2 % (c) The GRASP/AQUA Project, Glasgow University, 1992-1996
4 \section[Lexical analysis]{Lexical analysis}
7 #include "HsVersions.h"
11 isLexCon, isLexVar, isLexId, isLexSym,
12 isLexConId, isLexConSym, isLexVarId, isLexVarSym,
13 mkTupNameStr, ifaceParseErr,
16 IfaceToken(..), lexIface, SYN_IE(IfM), thenIf, returnIf, happyError,
22 IMPORT_1_3(Char(isDigit, isAlpha, isAlphanum, isUpper,isLower, isSpace, ord))
24 #if defined(__GLASGOW_HASKELL__) && __GLASGOW_HASKELL__ <= 201
26 IMPORT_DELOOPER(IdLoop) -- get the CostCentre type&constructors from here
28 import {-# SOURCE #-} CostCentre
29 # if __GLASGOW_HASKELL__ == 202
30 import PrelBase ( Char(..) )
32 # if __GLASGOW_HASKELL__ >= 209
33 import Addr ( Addr(..) )
38 import CmdLineOpts ( opt_IgnoreIfacePragmas )
39 import Demand ( Demand(..) {- instance Read -} )
40 import UniqFM ( UniqFM, listToUFM, lookupUFM)
41 import BasicTypes ( NewOrData(..), IfaceFlavour(..) )
43 #if __GLASGOW_HASKELL__ >= 202
44 import Maybes ( MaybeErr(..) )
46 import Maybes ( Maybe(..), MaybeErr(..) )
52 import ErrUtils ( Error(..) )
53 import Outputable ( Outputable(..), PprStyle(..) )
54 import Util ( nOfThem, panic )
59 #if __GLASGOW_HASKELL__ <= 201
63 #if __GLASGOW_HASKELL__ < 209
64 import ST ( thenST, seqST )
69 %************************************************************************
71 \subsection{Lexical categories}
73 %************************************************************************
75 These functions test strings to see if they fit the lexical categories
76 defined in the Haskell report. Normally applied as in e.g. @isCon
80 isLexCon, isLexVar, isLexId, isLexSym, isLexConId, isLexConSym,
81 isLexVarId, isLexVarSym :: FAST_STRING -> Bool
83 isLexCon cs = isLexConId cs || isLexConSym cs
84 isLexVar cs = isLexVarId cs || isLexVarSym cs
86 isLexId cs = isLexConId cs || isLexVarId cs
87 isLexSym cs = isLexConSym cs || isLexVarSym cs
93 | cs == SLIT("[]") = True
94 | c == '(' = True -- (), (,), (,,), ...
95 | otherwise = isUpper c || isUpperISO c
101 | otherwise = isLower c || isLowerISO c
107 | otherwise = c == ':'
114 | otherwise = isSymbolASCII c
120 isSymbolASCII c = c `elem` "!#$%&*+./<=>?@\\^|~-"
121 isSymbolISO c = ord c `elem` (0xd7 : 0xf7 : [0xa1 .. 0xbf])
122 isUpperISO (C# c#) = c# `geChar#` '\xc0'# && c# `leChar#` '\xde'# && c# `neChar#` '\xd7'#
123 --0xc0 <= oc && oc <= 0xde && oc /= 0xd7 where oc = ord c
124 isLowerISO (C# c#) = c# `geChar#` '\xdf'# && c# `leChar#` '\xff'# && c# `neChar#` '\xf7'#
125 --0xdf <= oc && oc <= 0xff && oc /= 0xf7 where oc = ord c
129 %************************************************************************
131 \subsection{Tuple strings -- ugh!}
133 %************************************************************************
136 mkTupNameStr 0 = SLIT("()")
137 mkTupNameStr 1 = panic "Name.mkTupNameStr: 1 ???"
138 mkTupNameStr 2 = _PK_ "(,)" -- not strictly necessary
139 mkTupNameStr 3 = _PK_ "(,,)" -- ditto
140 mkTupNameStr 4 = _PK_ "(,,,)" -- ditto
141 mkTupNameStr n = _PK_ ("(" ++ nOfThem (n-1) ',' ++ ")")
146 %************************************************************************
148 \subsection{Data types}
150 %************************************************************************
152 The token data type, fairly un-interesting except from two constructors,
153 @ITidinfo@ and @ITtype@, which are used to lazily lex id info (arity,
154 strictness, unfolding etc) and types for id decls.
156 The Idea/Observation here is that the renamer needs to scan through
157 all of an interface file before it can continue. But only a fraction
158 of the information contained in the file turns out to be useful, so
159 delaying as much as possible of the scanning and parsing of an
160 interface file Makes Sense (Heap profiles of the compiler
161 show at a reduction in heap usage by at least a factor of two,
164 Hence, the interface file lexer spots when value declarations are
165 being scanned and return the @ITidinfo@ and @ITtype@ constructors
166 for the type and any other id info for that binding (unfolding, strictness
167 etc). These constructors are applied to the result of lexing these sub-chunks.
169 The lexing of the type and id info is all done lazily, of course, so
170 the scanning (and subsequent parsing) will be done *only* on the ids the
171 renamer finds out that it is interested in. The rest will just be junked.
172 Laziness, you know it makes sense :-)
176 = ITinterface -- keywords
196 | ITbang -- magic symbols
211 | ITvarid FAST_STRING
212 | ITconid FAST_STRING
213 | ITvarsym FAST_STRING
214 | ITconsym FAST_STRING
215 | ITqvarid (FAST_STRING,FAST_STRING,IfaceFlavour)
216 | ITqconid (FAST_STRING,FAST_STRING,IfaceFlavour)
217 | ITqvarsym (FAST_STRING,FAST_STRING,IfaceFlavour)
218 | ITqconsym (FAST_STRING,FAST_STRING,IfaceFlavour)
220 | ITtysig StringBuffer (Maybe StringBuffer)
221 -- lazily return the stream of tokens for
222 -- the info attached to an id.
223 -- Stuff for reading unfoldings
225 | ITunfold Bool -- True <=> there's an INLINE pragma on this Id
226 | ITstrict [Demand] | ITbottom
227 | ITlam | ITbiglam | ITcase | ITprim_case | ITlet | ITletrec | ITin | ITof
228 | ITcoerce_in | ITcoerce_out | ITatsign
229 | ITccall (Bool,Bool) -- (is_casm, may_gc)
231 | ITchar Char | ITstring FAST_STRING
232 | ITinteger Integer | ITdouble Double
233 | ITinteger_lit | ITfloat_lit | ITrational_lit | ITaddr_lit | ITlit_lit | ITstring_lit
234 | ITunknown String -- Used when the lexer can't make sense of it
235 | ITeof -- end of file token
236 deriving Text -- debugging
238 instance Text CostCentre -- cheat!
242 %************************************************************************
244 \subsection{The lexical analyser}
246 %************************************************************************
249 lexIface :: (IfaceToken -> IfM a) -> IfM a
252 -- if bufferExhausted buf then
255 -- _trace ("Lexer: "++[C# (currentChar# buf)]) $
256 case currentChar# buf of
257 -- whitespace and comments, ignore.
258 ' '# -> lexIface cont (stepOn buf)
259 '\t'# -> lexIface cont (stepOn buf)
260 '\n'# -> \line -> lexIface cont (stepOn buf) (line+1)
262 -- Numbers and comments
264 case lookAhead# buf 1# of
265 '-'# -> lex_comment cont (stepOnBy# buf 2#)
268 then lex_num cont (negate) (ord# c -# ord# '0'#) (incLexeme (incLexeme buf))
271 -- Leave out nested comments for now; danger of finding { and - juxtaposed by mistake?
272 -- '{' : '-' : cs -> lex_nested_comment 1{-one seen-} cs
275 case prefixMatch (stepOn buf) "..)" of
276 Just buf' -> cont ITdotdot (stepOverLexeme buf')
278 case lookAhead# buf 1# of
279 ','# -> lex_tuple cont Nothing (stepOnBy# buf 2#)
280 ')'# -> cont (ITconid SLIT("()")) (stepOnBy# buf 2#)
281 _ -> cont IToparen (stepOn buf)
283 '{'# -> cont ITocurly (stepOn buf)
284 '}'# -> cont ITccurly (stepOn buf)
285 ')'# -> cont ITcparen (stepOn buf)
287 case lookAhead# buf 1# of
288 ']'# -> cont (ITconid SLIT("[]")) (stepOnBy# buf 2#)
289 _ -> cont ITobrack (stepOn buf)
290 ']'# -> cont ITcbrack (stepOn buf)
291 ','# -> cont ITcomma (stepOn buf)
292 ';'# -> cont ITsemi (stepOn buf)
293 '\"'# -> case untilEndOfString# (stepOn buf) of
295 -- the string literal does *not* include the dquotes
296 case lexemeToFastString buf' of
297 v -> cont (ITstring v) (stepOn (stepOverLexeme buf'))
300 -- untilEndOfChar# extends the current lexeme until
301 -- it hits a non-escaped single quote. The lexeme of the
302 -- StringBuffer returned does *not* include the closing quote,
303 -- hence we augment the lexeme and make sure to add the
304 -- starting quote, before `read'ing the string.
306 case untilEndOfChar# (stepOn buf) of
307 buf' -> case reads ('\'':lexemeToString (incLexeme buf')) of
308 [ (ch, rest)] -> cont (ITchar ch) (stepOverLexeme (incLexeme buf'))
310 -- ``thingy'' form for casm
312 case lookAhead# buf 1# of
313 '`'# -> lex_cstring cont (stepOnBy# buf 2#) -- remove the `` and go.
314 _ -> lex_id cont (incLexeme buf) -- add ` to lexeme and assume
315 -- scanning an id of some sort.
318 case lookAhead# buf 1# of
319 'S'# -> case lookAhead# buf 2# of
321 lex_demand cont (stepOnUntil (not . isSpace)
322 (stepOnBy# buf 3#)) -- past _S_
323 's'# -> case prefixMatch (stepOnBy# buf 2#) "cc_" of
324 Just buf' -> lex_scc cont (stepOnUntil (not . isSpace) (stepOverLexeme buf'))
325 Nothing -> lex_keyword cont (stepOnBy# buf 1#) -- drop the '_' and assume
327 _ -> lex_keyword cont (stepOn buf)
330 if bufferExhausted (stepOn buf) then
335 if isDigit (C# c) then
336 lex_num cont (id) (ord# c -# ord# '0'#) (incLexeme buf)
340 lex_comment cont buf =
341 -- _trace ("comment: "++[C# (currentChar# buf)]) $
342 case untilChar# buf '\n'# of {buf' -> lexIface cont (stepOverLexeme buf')}
345 lex_demand cont buf =
346 -- _trace ("demand: "++[C# (currentChar# buf)]) $
347 case read_em [] buf of { (ls,buf') -> cont (ITstrict ls) (stepOverLexeme buf')}
349 -- code snatched from Demand.lhs
351 -- _trace ("read_em: "++[C# (currentChar# buf)]) $
352 case currentChar# buf of
353 'L'# -> read_em (WwLazy False : acc) (stepOn buf)
354 'A'# -> read_em (WwLazy True : acc) (stepOn buf)
355 'S'# -> read_em (WwStrict : acc) (stepOn buf)
356 'P'# -> read_em (WwPrim : acc) (stepOn buf)
357 'E'# -> read_em (WwEnum : acc) (stepOn buf)
358 ')'# -> (reverse acc, stepOn buf)
359 'U'# -> do_unpack DataType True acc (stepOnBy# buf 2#)
360 'u'# -> do_unpack DataType False acc (stepOnBy# buf 2#)
361 'N'# -> do_unpack NewType True acc (stepOnBy# buf 2#)
362 'n'# -> do_unpack NewType False acc (stepOnBy# buf 2#)
363 _ -> (reverse acc, buf)
365 do_unpack new_or_data wrapper_unpacks acc buf
366 = case read_em [] buf of
367 (stuff, rest) -> read_em (WwUnpack new_or_data wrapper_unpacks stuff : acc) rest
371 -- _trace ("scc: "++[C# (currentChar# buf)]) $
372 case currentChar# buf of
375 case prefixMatch (stepOn buf) "NO_CC\"" of
376 Just buf' -> cont (ITscc noCostCentre) (stepOverLexeme buf')
378 case prefixMatch (stepOn buf) "CURRENT_CC\"" of
379 Just buf' -> cont (ITscc useCurrentCostCentre) (stepOverLexeme buf')
381 case prefixMatch (stepOn buf) "OVERHEAD\"" of
382 Just buf' -> cont (ITscc overheadCostCentre) (stepOverLexeme buf')
384 case prefixMatch (stepOn buf) "DONT_CARE\"" of
385 Just buf' -> cont (ITscc dontCareCostCentre) (stepOverLexeme buf')
387 case prefixMatch (stepOn buf) "SUBSUMED\"" of
388 Just buf' -> cont (ITscc subsumedCosts) (stepOverLexeme buf')
390 case prefixMatch (stepOn buf) "CAFs_in_...\"" of
391 Just buf' -> cont (ITscc preludeCafsCostCentre) (stepOverLexeme buf')
393 case prefixMatch (stepOn buf) "CC_CAFs_in_..." of
395 case untilChar# (stepOverLexeme buf') '\"'# of
396 buf'' -> cont (ITscc (mkAllCafsCC ({-module-}lexemeToFastString buf'') _NIL_)) (stepOn (stepOverLexeme buf''))
398 case prefixMatch (stepOn buf) "DICTs_in_...\"" of
399 Just buf' -> cont (ITscc (preludeDictsCostCentre True)) (stepOverLexeme buf')
401 case prefixMatch (stepOn buf) "CC_DICTs_in_..." of
403 case untilChar# (stepOverLexeme buf') '\"'# of
404 buf'' -> cont (ITscc (mkAllDictsCC (lexemeToFastString buf'') _NIL_ True))
405 (stepOn (stepOverLexeme buf''))
409 case untilChar# buf '/'# of
411 let mod_name = lexemeToFastString buf' in
412 -- case untilChar# (stepOn (stepOverLexeme buf')) '/'# of
414 -- let grp_name = lexemeToFastString buf'' in
415 case untilChar# (stepOn (stepOverLexeme buf')) '\"'# of
417 -- The label may contain arbitrary characters, so it
418 -- may have been escaped etc., hence we `read' it in to get
419 -- rid of these meta-chars in the string and then pack it (again.)
420 -- ToDo: do the same for module name (single quotes allowed in m-names).
421 -- BTW, the code in this module is totally gruesome..
422 let upk_label = _UNPK_ (lexemeToFastString buf'') in
423 case reads ('"':upk_label++"\"") of
425 let cc_name = _PK_ cc_label in
426 (mkUserCC cc_name mod_name _NIL_{-grp_name-},
427 stepOn (stepOverLexeme buf''))
429 trace ("trouble lexing scc label: " ++ upk_label ++ " , ignoring")
430 (mkUserCC _NIL_ mod_name _NIL_{-grp_name-},
431 stepOn (stepOverLexeme buf''))
433 case prefixMatch (stepOn buf) "CAF:" of
435 case match_user_cc (stepOverLexeme buf') of
436 (cc, buf'') -> cont (ITscc (cafifyCC cc)) buf''
438 case match_user_cc (stepOn buf) of
439 (cc, buf'') -> cont (ITscc cc) buf''
440 c -> cont (ITunknown [C# c]) (stepOn buf)
444 lex_num :: (IfaceToken -> IfM a) ->
445 (Int -> Int) -> Int# -> IfM a
446 lex_num cont minus acc# buf =
447 -- _trace ("lex_num: "++[C# (currentChar# buf)]) $
448 case scanNumLit (I# acc#) buf of
450 case currentChar# buf' of
452 -- this case is not optimised at all, as the
453 -- presence of floating point numbers in interface
454 -- files is not that common. (ToDo)
455 case expandWhile (isDigit) (incLexeme buf') of
456 buf'' -> -- points to first non digit char
457 case reads (lexemeToString buf'') of
458 [(v,_)] -> cont (ITdouble v) (stepOverLexeme buf'')
459 _ -> cont (ITinteger (fromInt (minus acc'))) (stepOverLexeme buf')
461 -- case reads (lexemeToString buf') of
462 -- [(i,_)] -> cont (ITinteger i) (stepOverLexeme buf')
465 lex_keyword cont buf =
466 -- _trace ("lex_keyword: "++[C# (currentChar# buf)]) $
467 case currentChar# buf of
468 ':'# -> case lookAhead# buf 1# of
469 '_'# -> -- a binding, type (and other id-info) follows,
470 -- to make the parser ever so slightly, we push
472 lex_decl cont (stepOnBy# buf 2#)
473 v# -> cont (ITunknown (['_',':',C# v#])) (stepOnBy# buf 2#)
475 case expandWhile (is_kwd_char) buf of
477 let kw = lexemeToFastString buf' in
478 -- _trace ("kw: "++lexemeToString buf') $
479 case lookupUFM ifaceKeywordsFM kw of
480 Nothing -> cont (ITunknown (_UNPK_ kw)) -- (minor) sigh
481 (stepOverLexeme buf')
482 Just xx -> cont xx (stepOverLexeme buf')
485 case doDiscard False buf of -- spin until ;; is found
487 {- _trace (show (lexemeToString buf')) $ -}
488 case currentChar# buf' of
489 '\n'# -> -- newline, no id info.
490 cont (ITtysig (lexemeToBuffer (decLexeme buf')) Nothing)
491 (stepOverLexeme buf')
492 '\r'# -> -- just to be sure for those Win* boxes..
493 cont (ITtysig (lexemeToBuffer (decLexeme buf')) Nothing)
494 (stepOverLexeme buf')
496 cont (ITtysig (lexemeToBuffer (decLexeme buf')) Nothing)
497 (stepOverLexeme buf')
498 c -> -- run all over the id info
499 case doDiscard False (stepOverLexeme buf') of -- spin until ;; is found (outside a string!)
501 --_trace ((C# c):show (lexemeToString (decLexeme buf'))) $
502 --_trace (show (lexemeToString (decLexeme buf''))) $
504 if opt_IgnoreIfacePragmas then
507 Just (lexemeToBuffer (decLexeme buf''))
510 cont (ITtysig (lexemeToBuffer (decLexeme buf')) idinfo)
511 (stepOverLexeme buf'')
514 is_kwd_char c@(C# c#) =
515 isAlphanum c || -- OLD: c `elem` "_@/\\"
526 lex_cstring cont buf =
527 -- _trace ("lex_cstring: "++[C# (currentChar# buf)]) $
528 case expandUntilMatch buf "\'\'" of
529 buf' -> cont (ITstring (lexemeToFastString (setCurrentPos# buf' (negateInt# 2#))))
530 (stepOverLexeme buf')
533 lex_tuple cont module_dot buf =
534 -- _trace ("lex_tuple: "++[C# (currentChar# buf)]) $
538 case currentChar# buf of
539 ','# -> go (n+1) (stepOn buf)
540 ')'# -> end_lex_id cont module_dot (ITconid (mkTupNameStr n)) (stepOn buf)
541 _ -> cont (ITunknown ("tuple " ++ show n)) buf
543 -- Similarly ' itself is ok inside an identifier, but not at the start
545 id_arr :: _ByteArray Int
548 newCharArray (0,255) `thenStrictlyST` \ barr ->
550 loop 256# = returnStrictlyST ()
552 if isAlphanum (C# (chr# i#)) || is_sym (chr# i#) then
553 writeCharArray barr (I# i#) '\1' `seqStrictlyST`
556 writeCharArray barr (I# i#) '\0' `seqStrictlyST`
559 loop 0# `seqStrictlyST`
560 unsafeFreezeByteArray barr)
564 _ByteArray _ arr# = id_arr
566 case ord# (indexCharArray# arr# (ord# c#)) of
570 --OLD: is_id_char c@(C# c#) = isAlphanum c || is_sym c#
574 ':'# -> True; '_'# -> True; '\''# -> True; '!'# -> True;
575 '#'# -> True; '$'# -> True; '%'# -> True;
576 '&'# -> True; '*'# -> True; '+'# -> True; '.'# -> True;
577 '/'# -> True; '<'# -> True; '='# -> True; '>'# -> True;
578 '?'# -> True; '\\'# -> True; '^'# -> True; '|'# -> True;
579 '-'# -> True; '~'# -> True; '@'# -> True; _ -> False }
581 --isAlphanum c || c `elem` ":_'!#$%&*+./<=>?@\\^|-~" -- ToDo: add ISOgraphic
584 mod_arr :: _ByteArray Int
587 newCharArray (0,255) `thenStrictlyST` \ barr ->
589 loop 256# = returnStrictlyST ()
591 if isAlphanum (C# (chr# i#)) || i# ==# (ord# '_'#) || i# ==# (ord# '\''#) then
592 writeCharArray barr (I# i#) '\1' `seqStrictlyST`
595 writeCharArray barr (I# i#) '\0' `seqStrictlyST`
598 loop 0# `seqStrictlyST`
599 unsafeFreezeByteArray barr)
602 is_mod_char (C# c#) =
604 _ByteArray _ arr# = mod_arr
606 case ord# (indexCharArray# arr# (ord# c#)) of
610 --isAlphanum c || c == '_' || c== '\'' --`elem` "_'"
613 -- _trace ("lex_id: "++[C# (currentChar# buf)]) $
614 case expandWhile (is_mod_char) buf of
616 case currentChar# buf' of
617 '.'# -> munch buf' HiFile
618 '!'# -> munch buf' HiBootFile
619 _ -> lex_id2 cont Nothing buf'
622 if not (emptyLexeme buf') then
623 -- _trace ("lex_id: "++(C# (currentChar# (stepOverLexeme buf'))):show (lexemeToFastString buf')) $
624 case lexemeToFastString buf' of
625 l@(FastString u# l# ba#) -> lex_id2 cont (Just (FastString u# l# ba#, hif))
626 (stepOn (stepOverLexeme buf'))
628 lex_id2 cont Nothing buf'
631 -- Dealt with the Module.part
632 lex_id2 cont module_dot buf =
633 -- _trace ("lex_id2: "++[C# (currentChar# buf)]) $
634 case currentChar# buf of
636 '['# -> -- Special case for []
637 case lookAhead# buf 1# of
638 ']'# -> end_lex_id cont module_dot (ITconid SLIT("[]")) (stepOnBy# buf 2#)
639 _ -> lex_id3 cont module_dot buf
641 '('# -> -- Special case for (,,,)
642 case lookAhead# buf 1# of
643 ')'# -> end_lex_id cont module_dot (ITconid SLIT("()")) (stepOnBy# buf 2#)
644 ','# -> lex_tuple cont module_dot (stepOnBy# buf 2#)
645 _ -> lex_id3 cont module_dot buf
646 ':'# -> lex_id3 cont module_dot (incLexeme buf)
649 Nothing -> lex_id3 cont module_dot buf
650 Just ghc -> -- this should be "GHC" (current home of (->))
651 case lookAhead# buf 1# of
652 '>'# -> end_lex_id cont module_dot (ITconid SLIT("->"))
654 _ -> lex_id3 cont module_dot buf
655 _ -> lex_id3 cont module_dot buf
659 -- Dealt with [], (), : special cases
661 lex_id3 cont module_dot buf =
662 -- _trace ("lex_id3: "++[C# (currentChar# buf)]) $
663 case expandWhile (is_id_char) buf of
667 end_lex_id cont module_dot (mk_var_token lexeme) new_buf
669 case _scc_ "Lex.haskellKeyword" lookupUFM haskellKeywordsFM lexeme of
670 Just kwd_token -> cont kwd_token new_buf
671 Nothing -> cont (mk_var_token lexeme) new_buf
673 lexeme = lexemeToFastString buf'
674 new_buf = stepOverLexeme buf'
678 lex_id2 module_dot [] ('[': ']': cs) = end_lex_id module_dot (ITconid SLIT("[]")) cs
679 lex_id2 module_dot [] ('(': ')': cs) = end_lex_id module_dot (ITconid SLIT("()")) cs
680 lex_id2 module_dot [] ('(': ',': cs) = lex_tuple module_dot cs
681 lex_id2 module_dot [] (':' : cs) = lex_id3 module_dot [':'] cs
682 lex_id2 module_dot xs cs = lex_id3 module_dot xs cs
685 -- Dealt with [], (), : special cases
688 lex_id3 module_dot len_xs xs cs =
689 case my_span' (is_id_char) cs of
690 (xs1,len_xs1,rest) ->
692 Just m -> end_lex_id (Just m) (mk_var_token rxs) rest --OLD:_PK_ (reverse xs))) rest
694 case _scc_ "Lex.haskellKeyword" lookupUFM haskellKeywordsFM rxs of
695 Just kwd_token -> kwd_token : lexIface rest
696 other -> token : lexIface cs end_lex_id Nothing (mk_var_token rxs) rest
698 rxs = packNChars (len_xs+len_xs1) (xs++xs1) -- OLD: _PK_ (reverse xs)
700 mk_var_token pk_str =
705 -- These tests assume a non- ISO-8859-1 impl of isUpper&isLower,
706 -- remove the second half of disjunction when using a 1.3 prelude.
708 if isUpper f then ITconid pk_str
709 else if isLower f then ITvarid pk_str
710 else if f == ':' then ITconsym pk_str
711 else if isLowerISO f then ITvarid pk_str
712 else if isUpperISO f then ITconid pk_str
716 mk_var_token xs@(f:_) | isUpper f || isUpperISO f = ITconid n
717 | f == ':' = ITconsym n
718 | isAlpha f = ITvarid n
719 | otherwise = ITvarsym n
724 end_lex_id cont Nothing token buf = cont token buf
725 end_lex_id cont (Just (m,hif)) token buf =
727 ITconid n -> cont (ITqconid (m,n,hif)) buf
728 ITvarid n -> cont (ITqvarid (m,n,hif)) buf
729 ITconsym n -> cont (ITqconsym (m,n,hif)) buf
731 -- Special case for ->
732 -- "->" by itself is a special token (ITrarrow),
733 -- but M.-> is a ITqconid
734 ITvarsym n | n == SLIT("->")
735 -> cont (ITqconsym (m,n,hif)) buf
737 ITvarsym n -> cont (ITqvarsym (m,n,hif)) buf
739 -- ITbang can't happen here I think
740 -- ITbang -> cont (ITqvarsym (m,SLIT("!"),hif)) buf
742 _ -> cont (ITunknown (show token)) buf
745 ifaceKeywordsFM :: UniqFM IfaceToken
746 ifaceKeywordsFM = listToUFM $
747 map (\ (x,y) -> (_PK_ x,y))
750 ,("letrec_", ITletrec)
751 ,("interface_", ITinterface)
752 ,("usages_", ITusages)
753 ,("versions_", ITversions)
754 ,("exports_", ITexports)
755 ,("instance_modules_", ITinstance_modules)
756 ,("instances_", ITinstances)
757 ,("fixities_", ITfixities)
758 ,("declarations_", ITdeclarations)
759 ,("pragmas_", ITpragmas)
760 ,("forall_", ITforall)
761 ,("U_", ITunfold False)
762 ,("U!_", ITunfold True)
764 ,("coerce_in_", ITcoerce_in)
765 ,("coerce_out_", ITcoerce_out)
767 ,("integer_", ITinteger_lit)
768 ,("rational_", ITrational_lit)
769 ,("addr_", ITaddr_lit)
770 ,("float_", ITfloat_lit)
771 ,("string_", ITstring_lit)
772 ,("litlit_", ITlit_lit)
773 ,("ccall_", ITccall (False, False))
774 ,("ccall_GC_", ITccall (False, True))
775 ,("casm_", ITccall (True, False))
776 ,("casm_GC_", ITccall (True, True))
779 haskellKeywordsFM = listToUFM $
780 map (\ (x,y) -> (_PK_ x,y))
783 ,("newtype", ITnewtype)
786 ,("instance", ITinstance)
787 ,("infixl", ITinfixl)
788 ,("infixr", ITinfixr)
791 ,("case#", ITprim_case)
795 ,("deriving", ITderiving)
807 -- doDiscard rips along really fast, looking for a double semicolon,
808 -- indicating the end of the pragma we're skipping
809 doDiscard inStr buf =
810 -- _trace (show (C# (currentChar# buf))) $
811 case currentChar# buf of
814 case lookAhead# buf 1# of
815 ';'# -> incLexeme (incLexeme buf)
816 _ -> doDiscard inStr (incLexeme buf)
818 doDiscard inStr (incLexeme buf)
821 odd_slashes buf flg i# =
822 case lookAhead# buf i# of
823 '\\'# -> odd_slashes buf (not flg) (i# -# 1#)
826 case lookAhead# buf (negateInt# 1#) of --backwards, actually
827 '\\'# -> -- escaping something..
828 if odd_slashes buf True (negateInt# 2#) then
829 -- odd number of slashes, " is escaped.
830 doDiscard inStr (incLexeme buf)
832 -- even number of slashes, \ is escaped.
833 doDiscard (not inStr) (incLexeme buf)
834 _ -> case inStr of -- forced to avoid build-up
835 True -> doDiscard False (incLexeme buf)
836 False -> doDiscard True (incLexeme buf)
837 _ -> doDiscard inStr (incLexeme buf)
842 my_span :: (a -> Bool) -> [a] -> ([a],[a])
843 my_span p xs = go [] xs
845 go so_far (x:xs') | p x = go (x:so_far) xs'
846 go so_far xs = (reverse so_far, xs)
848 my_span' :: (a -> Bool) -> [a] -> ([a],Int,[a])
849 my_span' p xs = go [] 0 xs
851 go so_far n (x:xs') | p x = go (x:so_far) (n+1) xs'
852 go so_far n xs = (reverse so_far,n, xs)
856 %************************************************************************
858 \subsection{Other utility functions
860 %************************************************************************
863 type IfM a = StringBuffer -> Int -> MaybeErr a Error
865 returnIf :: a -> IfM a
866 returnIf a s l = Succeeded a
868 thenIf :: IfM a -> (a -> IfM b) -> IfM b
869 m `thenIf` k = \s l ->
871 Succeeded a -> k a s l
872 Failed err -> Failed err
875 happyError s l = Failed (ifaceParseErr l ([]::[IfaceToken]){-Todo-})
877 -----------------------------------------------------------------
879 ifaceParseErr l toks sty
880 = hsep [ptext SLIT("Interface-file parse error: line"), int l, ptext SLIT("toks="), text (show (take 10 toks))]