2 % (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
4 \section[Lexical analysis]{Lexical analysis}
6 --------------------------------------------------------
8 There's a known bug in here:
10 If an interface file ends prematurely, Lex tries to
11 do headFS of an empty FastString.
13 An example that provokes the error is
15 f _:_ _forall_ [a] <<<END OF FILE>>>
16 --------------------------------------------------------
22 ifaceParseErr, srcParseErr,
25 Token(..), lexer, ParseResult(..), PState(..),
29 P, thenP, thenP_, returnP, mapP, failP, failMsgP,
30 getSrcLocP, getSrcFile,
31 layoutOn, layoutOff, pushContext, popContext
34 #include "HsVersions.h"
36 import Char ( isSpace, toUpper )
37 import List ( isSuffixOf )
39 import IdInfo ( InlinePragInfo(..) )
40 import PrelNames ( mkTupNameStr )
41 import CmdLineOpts ( opt_HiVersion, opt_NoHiCheck )
42 import Demand ( Demand(..) {- instance Read -} )
43 import UniqFM ( listToUFM, lookupUFM )
44 import BasicTypes ( NewOrData(..), Boxity(..) )
45 import SrcLoc ( SrcLoc, incSrcLine, srcLocFile, srcLocLine,
46 replaceSrcLine, mkSrcLoc )
48 import ErrUtils ( Message )
56 import PrelRead ( readRational__ ) -- Glasgow non-std
59 %************************************************************************
61 \subsection{Data types}
63 %************************************************************************
65 The token data type, fairly un-interesting except from one
66 constructor, @ITidinfo@, which is used to lazily lex id info (arity,
67 strictness, unfolding etc).
69 The Idea/Observation here is that the renamer needs to scan through
70 all of an interface file before it can continue. But only a fraction
71 of the information contained in the file turns out to be useful, so
72 delaying as much as possible of the scanning and parsing of an
73 interface file Makes Sense (Heap profiles of the compiler
74 show a reduction in heap usage by at least a factor of two,
77 Hence, the interface file lexer spots when value declarations are
78 being scanned and return the @ITidinfo@ and @ITtype@ constructors
79 for the type and any other id info for that binding (unfolding, strictness
80 etc). These constructors are applied to the result of lexing these sub-chunks.
82 The lexing of the type and id info is all done lazily, of course, so
83 the scanning (and subsequent parsing) will be done *only* on the ids the
84 renamer finds out that it is interested in. The rest will just be junked.
85 Laziness, you know it makes sense :-)
89 = ITas -- Haskell keywords
115 | ITforall -- GHC extension keywords
125 | ITinterface -- interface keywords
134 | ITccall (Bool,Bool,Bool) -- (is_dyn, is_casm, may_gc)
153 | ITunfold InlinePragInfo
154 | ITstrict ([Demand], Bool)
161 | ITspecialise_prag -- Pragmas
170 | ITdotdot -- reserved symbols
184 | ITbiglam -- GHC-extension symbols
186 | ITocurly -- special symbols
188 | ITocurlybar -- {|, for type applications
189 | ITccurlybar -- |}, for type applications
202 | ITvarid FAST_STRING -- identifiers
203 | ITconid FAST_STRING
204 | ITvarsym FAST_STRING
205 | ITconsym FAST_STRING
206 | ITqvarid (FAST_STRING,FAST_STRING)
207 | ITqconid (FAST_STRING,FAST_STRING)
208 | ITqvarsym (FAST_STRING,FAST_STRING)
209 | ITqconsym (FAST_STRING,FAST_STRING)
211 | ITipvarid FAST_STRING -- GHC extension: implicit param: ?x
213 | ITpragma StringBuffer
216 | ITstring FAST_STRING
218 | ITrational Rational
221 | ITprimstring FAST_STRING
223 | ITprimfloat Rational
224 | ITprimdouble Rational
225 | ITlitlit FAST_STRING
227 | ITunknown String -- Used when the lexer can't make sense of it
228 | ITeof -- end of file token
229 deriving Show -- debugging
232 -----------------------------------------------------------------------------
236 pragmaKeywordsFM = listToUFM $
237 map (\ (x,y) -> (_PK_ x,y))
238 [( "SPECIALISE", ITspecialise_prag ),
239 ( "SPECIALIZE", ITspecialise_prag ),
240 ( "SOURCE", ITsource_prag ),
241 ( "INLINE", ITinline_prag ),
242 ( "NOINLINE", ITnoinline_prag ),
243 ( "NOTINLINE", ITnoinline_prag ),
244 ( "LINE", ITline_prag ),
245 ( "RULES", ITrules_prag ),
246 ( "RULEZ", ITrules_prag ), -- american spelling :-)
247 ( "DEPRECATED", ITdeprecated_prag )
250 haskellKeywordsFM = listToUFM $
251 map (\ (x,y) -> (_PK_ x,y))
252 [( "_", ITunderscore ),
255 ( "class", ITclass ),
257 ( "default", ITdefault ),
258 ( "deriving", ITderiving ),
261 ( "hiding", IThiding ),
263 ( "import", ITimport ),
265 ( "infix", ITinfix ),
266 ( "infixl", ITinfixl ),
267 ( "infixr", ITinfixr ),
268 ( "instance", ITinstance ),
270 ( "module", ITmodule ),
271 ( "newtype", ITnewtype ),
273 ( "qualified", ITqualified ),
276 ( "where", ITwhere ),
280 isSpecial :: Token -> Bool
281 -- If we see M.x, where x is a keyword, but
282 -- is special, we treat is as just plain M.x,
284 isSpecial ITas = True
285 isSpecial IThiding = True
286 isSpecial ITqualified = True
287 isSpecial ITforall = True
288 isSpecial ITexport = True
289 isSpecial ITlabel = True
290 isSpecial ITdynamic = True
291 isSpecial ITunsafe = True
292 isSpecial ITwith = True
293 isSpecial ITccallconv = True
294 isSpecial ITstdcallconv = True
297 -- IMPORTANT: Keep this in synch with ParseIface.y's var_fs production! (SUP)
298 ghcExtensionKeywordsFM = listToUFM $
299 map (\ (x,y) -> (_PK_ x,y))
300 [ ( "forall", ITforall ),
301 ( "foreign", ITforeign ),
302 ( "export", ITexport ),
303 ( "label", ITlabel ),
304 ( "dynamic", ITdynamic ),
305 ( "unsafe", ITunsafe ),
307 ( "stdcall", ITstdcallconv),
308 ( "ccall", ITccallconv),
309 ("_ccall_", ITccall (False, False, False)),
310 ("_ccall_GC_", ITccall (False, False, True)),
311 ("_casm_", ITccall (False, True, False)),
312 ("_casm_GC_", ITccall (False, True, True)),
314 -- interface keywords
315 ("__interface", ITinterface),
317 ("__export", IT__export),
318 ("__depends", ITdepends),
319 ("__forall", IT__forall),
320 ("__letrec", ITletrec),
321 ("__coerce", ITcoerce),
322 ("__inline_me", ITinlineMe),
323 ("__inline_call", ITinlineCall),
324 ("__depends", ITdepends),
325 ("__DEFAULT", ITdefaultbranch),
327 ("__integer", ITinteger_lit),
328 ("__float", ITfloat_lit),
329 ("__int64", ITint64_lit),
330 ("__word", ITword_lit),
331 ("__word64", ITword64_lit),
332 ("__rational", ITrational_lit),
333 ("__addr", ITaddr_lit),
334 ("__label", ITlabel_lit),
335 ("__litlit", ITlit_lit),
336 ("__string", ITstring_lit),
339 ("__fuall", ITfuall),
341 ("__P", ITspecialise),
344 ("__D", ITdeprecated),
345 ("__U", ITunfold NoInlinePragInfo),
347 ("__ccall", ITccall (False, False, False)),
348 ("__ccall_GC", ITccall (False, False, True)),
349 ("__dyn_ccall", ITccall (True, False, False)),
350 ("__dyn_ccall_GC", ITccall (True, False, True)),
351 ("__casm", ITccall (False, True, False)),
352 ("__dyn_casm", ITccall (True, True, False)),
353 ("__casm_GC", ITccall (False, True, True)),
354 ("__dyn_casm_GC", ITccall (True, True, True)),
360 haskellKeySymsFM = listToUFM $
361 map (\ (x,y) -> (_PK_ x,y))
374 ,(".", ITdot) -- sadly, for 'forall a . t'
378 -----------------------------------------------------------------------------
383 - (glaexts) lexing an interface file or -fglasgow-exts
384 - (bol) pointer to beginning of line (for column calculations)
385 - (buf) pointer to beginning of token
386 - (buf) pointer to current char
387 - (atbol) flag indicating whether we're at the beginning of a line
390 lexer :: (Token -> P a) -> P a
391 lexer cont buf s@(PState{
393 glasgow_exts = glaexts,
399 -- first, start a new lexeme and lose all the whitespace
401 tab line bol atbol (stepOverLexeme buf)
403 line = srcLocLine loc
405 tab y bol atbol buf = -- trace ("tab: " ++ show (I# y) ++ " : " ++ show (currentChar buf)) $
406 case currentChar# buf of
409 if bufferExhausted (stepOn buf)
410 then cont ITeof buf s'
411 else trace "lexer: misplaced NUL?" $
412 tab y bol atbol (stepOn buf)
414 '\n'# -> let buf' = stepOn buf
415 in tab (y +# 1#) (currentIndex# buf') 1# buf'
417 -- find comments. This got harder in Haskell 98.
418 '-'# -> let trundle n =
419 let next = lookAhead# buf n in
420 if next `eqChar#` '-'# then trundle (n +# 1#)
421 else if is_symbol next || n <# 2#
424 (stepOnUntilChar# (stepOnBy# buf n) '\n'#)
427 -- comments and pragmas. We deal with LINE pragmas here,
428 -- and throw out any unrecognised pragmas as comments. Any
429 -- pragmas we know about are dealt with later (after any layout
430 -- processing if necessary).
431 '{'# | lookAhead# buf 1# `eqChar#` '-'# ->
432 if lookAhead# buf 2# `eqChar#` '#'# then
433 if lookAhead# buf 3# `eqChar#` '#'# then is_a_token else
434 case expandWhile# is_space (setCurrentPos# buf 3#) of { buf1->
435 case expandWhile# is_ident (stepOverLexeme buf1) of { buf2->
436 let lexeme = mkFastString -- ToDo: too slow
437 (map toUpper (lexemeToString buf2)) in
438 case lookupUFM pragmaKeywordsFM lexeme of
440 line_prag skip_to_end buf2 s'
441 Just other -> is_a_token
442 Nothing -> skip_to_end (stepOnBy# buf 2#) s'
445 else skip_to_end (stepOnBy# buf 2#) s'
447 skip_to_end = nested_comment (lexer cont)
449 -- special GHC extension: we grok cpp-style #line pragmas
450 '#'# | lexemeIndex buf ==# bol -> -- the '#' must be in column 0
451 line_prag next_line (stepOn buf) s'
453 next_line buf = lexer cont (stepOnUntilChar# buf '\n'#)
455 -- tabs have been expanded beforehand
456 c | is_space c -> tab y bol atbol (stepOn buf)
457 | otherwise -> is_a_token
459 where s' = s{loc = replaceSrcLine loc y,
463 is_a_token | atbol /=# 0# = lexBOL cont buf s'
464 | otherwise = lexToken cont glaexts buf s'
466 -- {-# LINE .. #-} pragmas. yeuch.
467 line_prag cont buf s@PState{loc=loc} =
468 case expandWhile# is_space buf of { buf1 ->
469 case scanNumLit 0 (stepOverLexeme buf1) of { (line,buf2) ->
470 -- subtract one: the line number refers to the *following* line.
471 let real_line = line - 1 in
472 case fromInteger real_line of { i@(I# l) ->
473 -- ToDo, if no filename then we skip the newline.... d'oh
474 case expandWhile# is_space buf2 of { buf3 ->
475 case currentChar# buf3 of
477 case untilEndOfString# (stepOn (stepOverLexeme buf3)) of { buf4 ->
479 file = lexemeToFastString buf4
480 new_buf = stepOn (stepOverLexeme buf4)
482 if nullFastString file
483 then cont new_buf s{loc = replaceSrcLine loc l}
484 else cont new_buf s{loc = mkSrcLoc file i}
486 _other -> cont (stepOverLexeme buf3) s{loc = replaceSrcLine loc l}
489 nested_comment :: P a -> P a
490 nested_comment cont buf = loop buf
493 case currentChar# buf of
494 '\NUL'# | bufferExhausted (stepOn buf) ->
495 lexError "unterminated `{-'" buf -- -}
496 '-'# | lookAhead# buf 1# `eqChar#` '}'# ->
497 cont (stepOnBy# buf 2#)
499 '{'# | lookAhead# buf 1# `eqChar#` '-'# ->
500 nested_comment (nested_comment cont) (stepOnBy# buf 2#)
502 '\n'# -> \ s@PState{loc=loc} ->
503 let buf' = stepOn buf in
504 nested_comment cont buf'
505 s{loc = incSrcLine loc, bol = currentIndex# buf',
508 _ -> nested_comment cont (stepOn buf)
510 -- When we are lexing the first token of a line, check whether we need to
511 -- insert virtual semicolons or close braces due to layout.
513 lexBOL :: (Token -> P a) -> P a
514 lexBOL cont buf s@(PState{
516 glasgow_exts = glaexts,
521 if need_close_curly then
522 --trace ("col = " ++ show (I# col) ++ ", layout: inserting '}'") $
523 cont ITvccurly buf s{atbol = 1#, context = tail ctx}
524 else if need_semi_colon then
525 --trace ("col = " ++ show (I# col) ++ ", layout: inserting ';'") $
526 cont ITsemi buf s{atbol = 0#}
528 lexToken cont glaexts buf s{atbol = 0#}
530 col = currentIndex# buf -# bol
543 Layout n -> col ==# n
546 lexToken :: (Token -> P a) -> Int# -> P a
547 lexToken cont glaexts buf =
548 -- trace "lexToken" $
549 case currentChar# buf of
551 -- special symbols ----------------------------------------------------
552 '('# | flag glaexts && lookAhead# buf 1# `eqChar#` '#'#
553 -> cont IToubxparen (setCurrentPos# buf 2#)
555 -> cont IToparen (incLexeme buf)
557 ')'# -> cont ITcparen (incLexeme buf)
558 '['# -> cont ITobrack (incLexeme buf)
559 ']'# -> cont ITcbrack (incLexeme buf)
560 ','# -> cont ITcomma (incLexeme buf)
561 ';'# -> cont ITsemi (incLexeme buf)
562 '}'# -> \ s@PState{context = ctx} ->
564 (_:ctx') -> cont ITccurly (incLexeme buf) s{context=ctx'}
565 _ -> lexError "too many '}'s" buf s
566 '|'# -> case lookAhead# buf 1# of
567 '}'# | flag glaexts -> cont ITccurlybar
568 (setCurrentPos# buf 2#)
569 _ -> lex_sym cont (incLexeme buf)
572 '#'# -> case lookAhead# buf 1# of
573 ')'# | flag glaexts -> cont ITcubxparen (setCurrentPos# buf 2#)
574 '-'# -> case lookAhead# buf 2# of
575 '}'# -> cont ITclose_prag (setCurrentPos# buf 3#)
576 _ -> lex_sym cont (incLexeme buf)
577 _ -> lex_sym cont (incLexeme buf)
579 '`'# | flag glaexts && lookAhead# buf 1# `eqChar#` '`'#
580 -> lex_cstring cont (setCurrentPos# buf 2#)
582 -> cont ITbackquote (incLexeme buf)
584 '{'# -> -- look for "{-##" special iface pragma
585 case lookAhead# buf 1# of
587 -> cont ITocurlybar (setCurrentPos# buf 2#)
588 '-'# -> case lookAhead# buf 2# of
589 '#'# -> case lookAhead# buf 3# of
592 = doDiscard 0# (stepOnBy# (stepOverLexeme buf) 4#) in
593 cont (ITpragma lexeme) buf'
594 _ -> lex_prag cont (setCurrentPos# buf 3#)
595 _ -> cont ITocurly (incLexeme buf)
596 _ -> (layoutOff `thenP_` cont ITocurly) (incLexeme buf)
598 -- strings/characters -------------------------------------------------
599 '\"'#{-"-} -> lex_string cont glaexts [] (incLexeme buf)
600 '\''# -> lex_char (char_end cont) glaexts (incLexeme buf)
602 -- strictness and cpr pragmas and __scc treated specially.
603 '_'# | flag glaexts ->
604 case lookAhead# buf 1# of
605 '_'# -> case lookAhead# buf 2# of
607 lex_demand cont (stepOnUntil (not . isSpace)
608 (stepOnBy# buf 3#)) -- past __S
610 cont ITcprinfo (stepOnBy# buf 3#) -- past __M
613 case prefixMatch (stepOnBy# buf 3#) "cc" of
614 Just buf' -> lex_scc cont (stepOverLexeme buf')
615 Nothing -> lex_id cont glaexts buf
616 _ -> lex_id cont glaexts buf
617 _ -> lex_id cont glaexts buf
619 -- Hexadecimal and octal constants
620 '0'# | (ch `eqChar#` 'x'# || ch `eqChar#` 'X'#) && is_hexdigit ch2
621 -> readNum (after_lexnum cont glaexts) buf' is_hexdigit 16 hex
622 | (ch `eqChar#` 'o'# || ch `eqChar#` 'O'#) && is_octdigit ch2
623 -> readNum (after_lexnum cont glaexts) buf' is_octdigit 8 oct_or_dec
624 where ch = lookAhead# buf 1#
625 ch2 = lookAhead# buf 2#
626 buf' = setCurrentPos# buf 2#
629 if bufferExhausted (stepOn buf) then
632 trace "lexIface: misplaced NUL?" $
633 cont (ITunknown "\NUL") (stepOn buf)
635 '?'# | flag glaexts && is_lower (lookAhead# buf 1#) ->
636 lex_ip cont (incLexeme buf)
637 c | is_digit c -> lex_num cont glaexts 0 buf
638 | is_symbol c -> lex_sym cont buf
639 | is_upper c -> lex_con cont glaexts buf
640 | is_ident c -> lex_id cont glaexts buf
641 | otherwise -> lexError "illegal character" buf
643 -- Int# is unlifted, and therefore faster than Bool for flags.
649 -------------------------------------------------------------------------------
653 = case expandWhile# is_space buf of { buf1 ->
654 case expandWhile# is_ident (stepOverLexeme buf1) of { buf2 ->
655 let lexeme = mkFastString (map toUpper (lexemeToString buf2)) in
656 case lookupUFM pragmaKeywordsFM lexeme of
657 Just kw -> cont kw (mergeLexemes buf buf2)
658 Nothing -> panic "lex_prag"
661 -------------------------------------------------------------------------------
664 lex_string cont glaexts s buf
665 = case currentChar# buf of
667 let buf' = incLexeme buf; s' = mkFastStringInt (reverse s) in
668 case currentChar# buf' of
669 '#'# | flag glaexts -> if all (<= 0xFF) s
670 then cont (ITprimstring s') (incLexeme buf')
671 else lexError "primitive string literal must contain only characters <= '\xFF'" buf'
672 _ -> cont (ITstring s') buf'
674 -- ignore \& in a string, deal with string gaps
675 '\\'# | next_ch `eqChar#` '&'#
676 -> lex_string cont glaexts s buf'
678 -> lex_stringgap cont glaexts s (incLexeme buf)
680 where next_ch = lookAhead# buf 1#
681 buf' = setCurrentPos# buf 2#
683 _ -> lex_char (lex_next_string cont s) glaexts buf
685 lex_stringgap cont glaexts s buf
686 = let buf' = incLexeme buf in
687 case currentChar# buf of
688 '\n'# -> \st@PState{loc = loc} -> lex_stringgap cont glaexts s buf'
689 st{loc = incSrcLine loc}
690 '\\'# -> lex_string cont glaexts s buf'
691 c | is_space c -> lex_stringgap cont glaexts s buf'
692 other -> charError buf'
694 lex_next_string cont s glaexts c buf = lex_string cont glaexts (c:s) buf
696 lex_char :: (Int# -> Int -> P a) -> Int# -> P a
697 lex_char cont glaexts buf
698 = case currentChar# buf of
699 '\\'# -> lex_escape (cont glaexts) (incLexeme buf)
700 c | is_any c -> cont glaexts (I# (ord# c)) (incLexeme buf)
701 other -> charError buf
703 char_end cont glaexts c buf
704 = case currentChar# buf of
705 '\''# -> let buf' = incLexeme buf in
706 case currentChar# buf' of
708 -> cont (ITprimchar c) (incLexeme buf')
709 _ -> cont (ITchar c) buf'
713 = let buf' = incLexeme buf in
714 case currentChar# buf of
715 'a'# -> cont (ord '\a') buf'
716 'b'# -> cont (ord '\b') buf'
717 'f'# -> cont (ord '\f') buf'
718 'n'# -> cont (ord '\n') buf'
719 'r'# -> cont (ord '\r') buf'
720 't'# -> cont (ord '\t') buf'
721 'v'# -> cont (ord '\v') buf'
722 '\\'# -> cont (ord '\\') buf'
723 '"'# -> cont (ord '\"') buf'
724 '\''# -> cont (ord '\'') buf'
725 '^'# -> let c = currentChar# buf' in
726 if c `geChar#` '@'# && c `leChar#` '_'#
727 then cont (I# (ord# c -# ord# '@'#)) (incLexeme buf')
730 'x'# -> readNum (after_charnum cont) buf' is_hexdigit 16 hex
731 'o'# -> readNum (after_charnum cont) buf' is_octdigit 8 oct_or_dec
733 -> readNum (after_charnum cont) buf is_digit 10 oct_or_dec
735 _ -> case [ (c,buf2) | (p,c) <- silly_escape_chars,
736 Just buf2 <- [prefixMatch buf p] ] of
737 (c,buf2):_ -> cont (ord c) buf2
740 after_charnum cont i buf
741 = if i >= 0 && i <= 0x7FFFFFFF
742 then cont (fromInteger i) buf
745 readNum cont buf is_digit base conv = read buf 0
747 = case currentChar# buf of { c ->
749 then read (incLexeme buf) (i*base + (toInteger (I# (conv c))))
755 || (c `geChar#` 'a'# && c `leChar#` 'f'#)
756 || (c `geChar#` 'A'# && c `leChar#` 'F'#)
758 hex c | is_digit c = ord# c -# ord# '0'#
759 | otherwise = ord# (to_lower c) -# ord# 'a'# +# 10#
760 oct_or_dec c = ord# c -# ord# '0'#
762 is_octdigit c = c `geChar#` '0'# && c `leChar#` '7'#
765 | c `geChar#` 'A'# && c `leChar#` 'Z'#
766 = chr# (ord# c -# (ord# 'A'# -# ord# 'a'#))
769 charError buf = lexError "error in character literal" buf
771 silly_escape_chars = [
808 -------------------------------------------------------------------------------
810 lex_demand cont buf =
811 case read_em [] buf of { (ls,buf') ->
812 case currentChar# buf' of
813 'B'# -> cont (ITstrict (ls, True )) (incLexeme buf')
814 _ -> cont (ITstrict (ls, False)) buf'
817 -- code snatched from Demand.lhs
819 case currentChar# buf of
820 'L'# -> read_em (WwLazy False : acc) (stepOn buf)
821 'A'# -> read_em (WwLazy True : acc) (stepOn buf)
822 'S'# -> read_em (WwStrict : acc) (stepOn buf)
823 'P'# -> read_em (WwPrim : acc) (stepOn buf)
824 'E'# -> read_em (WwEnum : acc) (stepOn buf)
825 ')'# -> (reverse acc, stepOn buf)
826 'U'# -> do_unpack DataType True acc (stepOnBy# buf 2#)
827 'u'# -> do_unpack DataType False acc (stepOnBy# buf 2#)
828 'N'# -> do_unpack NewType True acc (stepOnBy# buf 2#)
829 'n'# -> do_unpack NewType False acc (stepOnBy# buf 2#)
830 _ -> (reverse acc, buf)
832 do_unpack new_or_data wrapper_unpacks acc buf
833 = case read_em [] buf of
834 (stuff, rest) -> read_em (WwUnpack new_or_data wrapper_unpacks stuff : acc) rest
839 case currentChar# buf of
840 'C'# -> cont ITsccAllCafs (incLexeme buf)
841 other -> cont ITscc buf
843 -----------------------------------------------------------------------------
846 lex_num :: (Token -> P a) -> Int# -> Integer -> P a
847 lex_num cont glaexts acc buf =
848 case scanNumLit acc buf of
850 case currentChar# buf' of
851 '.'# | is_digit (lookAhead# buf' 1#) ->
852 -- this case is not optimised at all, as the
853 -- presence of floating point numbers in interface
854 -- files is not that common. (ToDo)
855 case expandWhile# is_digit (incLexeme buf') of
856 buf2 -> -- points to first non digit char
858 let l = case currentChar# buf2 of
864 = let buf3 = incLexeme buf2 in
865 case currentChar# buf3 of
866 '-'# -> expandWhile# is_digit (incLexeme buf3)
867 '+'# -> expandWhile# is_digit (incLexeme buf3)
868 x | is_digit x -> expandWhile# is_digit buf3
871 v = readRational__ (lexemeToString l)
873 in case currentChar# l of -- glasgow exts only
874 '#'# | flag glaexts -> let l' = incLexeme l in
875 case currentChar# l' of
876 '#'# -> cont (ITprimdouble v) (incLexeme l')
877 _ -> cont (ITprimfloat v) l'
878 _ -> cont (ITrational v) l
880 _ -> after_lexnum cont glaexts acc' buf'
882 after_lexnum cont glaexts i buf
883 = case currentChar# buf of
884 '#'# | flag glaexts -> cont (ITprimint i) (incLexeme buf)
885 _ -> cont (ITinteger i) buf
887 -----------------------------------------------------------------------------
888 -- C "literal literal"s (i.e. things like ``NULL'', ``stdout'' etc.)
890 -- we lexemeToFastString on the bit between the ``''s, but include the
891 -- quotes in the full lexeme.
893 lex_cstring cont buf =
894 case expandUntilMatch (stepOverLexeme buf) "\'\'" of
895 Just buf' -> cont (ITlitlit (lexemeToFastString
896 (setCurrentPos# buf' (negateInt# 2#))))
897 (mergeLexemes buf buf')
898 Nothing -> lexError "unterminated ``" buf
900 -----------------------------------------------------------------------------
901 -- identifiers, symbols etc.
904 case expandWhile# is_ident buf of
905 buf' -> cont (ITipvarid lexeme) buf'
906 where lexeme = lexemeToFastString buf'
908 lex_id cont glaexts buf =
909 let buf1 = expandWhile# is_ident buf in
912 case (if flag glaexts
913 then expandWhile# (eqChar# '#'#) buf1 -- slurp trailing hashes
914 else buf1) of { buf' ->
916 let lexeme = lexemeToFastString buf' in
918 case _scc_ "Lex.haskellKeyword" lookupUFM haskellKeywordsFM lexeme of {
919 Just kwd_token -> --trace ("hkeywd: "++_UNPK_(lexeme)) $
923 let var_token = cont (ITvarid lexeme) buf' in
925 if not (flag glaexts)
929 case lookupUFM ghcExtensionKeywordsFM lexeme of {
930 Just kwd_token -> cont kwd_token buf';
937 case expandWhile# is_symbol buf of
938 buf' -> case lookupUFM haskellKeySymsFM lexeme of {
939 Just kwd_token -> --trace ("keysym: "++unpackFS lexeme) $
940 cont kwd_token buf' ;
941 Nothing -> --trace ("sym: "++unpackFS lexeme) $
942 cont (mk_var_token lexeme) buf'
944 where lexeme = lexemeToFastString buf'
947 lex_con cont glaexts buf =
948 -- trace ("con: "{-++unpackFS lexeme-}) $
949 case expandWhile# is_ident buf of { buf1 ->
950 case slurp_trailing_hashes buf1 glaexts of { buf' ->
952 case currentChar# buf' of
957 just_a_conid = cont (ITconid lexeme) buf'
958 lexeme = lexemeToFastString buf'
959 munch = lex_qid cont glaexts lexeme (incLexeme buf') just_a_conid
962 lex_qid cont glaexts mod buf just_a_conid =
963 -- trace ("quid: "{-++unpackFS lexeme-}) $
964 case currentChar# buf of
965 '['# -> -- Special case for []
966 case lookAhead# buf 1# of
967 ']'# -> cont (ITqconid (mod,SLIT("[]"))) (setCurrentPos# buf 2#)
970 '('# -> -- Special case for (,,,)
971 -- This *is* necessary to deal with e.g. "instance C PrelBase.(,,)"
972 case lookAhead# buf 1# of
973 '#'# | flag glaexts -> case lookAhead# buf 2# of
974 ','# -> lex_ubx_tuple cont mod (setCurrentPos# buf 3#)
977 ')'# -> cont (ITqconid (mod,SLIT("()"))) (setCurrentPos# buf 2#)
978 ','# -> lex_tuple cont mod (setCurrentPos# buf 2#) just_a_conid
981 '-'# -> case lookAhead# buf 1# of
982 '>'# -> cont (ITqconid (mod,SLIT("(->)"))) (setCurrentPos# buf 2#)
983 _ -> lex_id3 cont glaexts mod buf just_a_conid
984 _ -> lex_id3 cont glaexts mod buf just_a_conid
986 lex_id3 cont glaexts mod buf just_a_conid
987 | is_symbol (currentChar# buf) =
989 start_new_lexeme = stepOverLexeme buf
991 -- trace ("lex_id31 "{-++unpackFS lexeme-}) $
992 case expandWhile# is_symbol start_new_lexeme of { buf' ->
994 lexeme = lexemeToFastString buf'
995 -- real lexeme is M.<sym>
996 new_buf = mergeLexemes buf buf'
998 cont (mk_qvar_token mod lexeme) new_buf
999 -- wrong, but arguably morally right: M... is now a qvarsym
1004 start_new_lexeme = stepOverLexeme buf
1006 -- trace ("lex_id32 "{-++unpackFS lexeme-}) $
1007 case expandWhile# is_ident start_new_lexeme of { buf1 ->
1012 case slurp_trailing_hashes buf1 glaexts of { buf' ->
1015 lexeme = lexemeToFastString buf'
1016 new_buf = mergeLexemes buf buf'
1017 is_a_qvarid = cont (mk_qvar_token mod lexeme) new_buf
1019 case _scc_ "Lex.haskellKeyword" lookupUFM haskellKeywordsFM lexeme of {
1020 Nothing -> is_a_qvarid ;
1022 Just kwd_token | isSpecial kwd_token -- special ids (as, qualified, hiding) shouldn't be
1023 -> is_a_qvarid -- recognised as keywords here.
1025 -> just_a_conid -- avoid M.where etc.
1028 slurp_trailing_hashes buf glaexts
1029 | flag glaexts = expandWhile# (`eqChar#` '#'#) buf
1034 | is_upper f = ITconid pk_str
1035 | is_ident f = ITvarid pk_str
1036 | f `eqChar#` ':'# = ITconsym pk_str
1037 | otherwise = ITvarsym pk_str
1039 (C# f) = _HEAD_ pk_str
1040 -- tl = _TAIL_ pk_str
1042 mk_qvar_token m token =
1043 -- trace ("mk_qvar ") $
1044 case mk_var_token token of
1045 ITconid n -> ITqconid (m,n)
1046 ITvarid n -> ITqvarid (m,n)
1047 ITconsym n -> ITqconsym (m,n)
1048 ITvarsym n -> ITqvarsym (m,n)
1049 _ -> ITunknown (show token)
1052 ----------------------------------------------------------------------------
1053 Horrible stuff for dealing with M.(,,,)
1056 lex_tuple cont mod buf back_off =
1060 case currentChar# buf of
1061 ','# -> go (n+1) (stepOn buf)
1062 ')'# -> cont (ITqconid (mod, snd (mkTupNameStr Boxed n))) (stepOn buf)
1065 lex_ubx_tuple cont mod buf back_off =
1069 case currentChar# buf of
1070 ','# -> go (n+1) (stepOn buf)
1071 '#'# -> case lookAhead# buf 1# of
1072 ')'# -> cont (ITqconid (mod, snd (mkTupNameStr Unboxed n)))
1078 -----------------------------------------------------------------------------
1079 doDiscard rips along really fast, looking for a '##-}',
1080 indicating the end of the pragma we're skipping
1083 doDiscard inStr buf =
1084 case currentChar# buf of
1085 '#'# | inStr ==# 0# ->
1086 case lookAhead# buf 1# of { '#'# ->
1087 case lookAhead# buf 2# of { '-'# ->
1088 case lookAhead# buf 3# of { '}'# ->
1089 (lexemeToBuffer buf, stepOverLexeme (setCurrentPos# buf 4#));
1090 _ -> doDiscard inStr (incLexeme buf) };
1091 _ -> doDiscard inStr (incLexeme buf) };
1092 _ -> doDiscard inStr (incLexeme buf) }
1096 odd_slashes buf flg i# =
1097 case lookAhead# buf i# of
1098 '\\'# -> odd_slashes buf (not flg) (i# -# 1#)
1101 not_inStr = if inStr ==# 0# then 1# else 0#
1103 case lookAhead# buf (negateInt# 1#) of --backwards, actually
1104 '\\'# -> -- escaping something..
1105 if odd_slashes buf True (negateInt# 2#)
1106 then -- odd number of slashes, " is escaped.
1107 doDiscard inStr (incLexeme buf)
1108 else -- even number of slashes, \ is escaped.
1109 doDiscard not_inStr (incLexeme buf)
1110 _ -> doDiscard not_inStr (incLexeme buf)
1112 '\''# | inStr ==# 0# ->
1113 case lookAhead# buf 1# of { '"'# ->
1114 case lookAhead# buf 2# of { '\''# ->
1115 doDiscard inStr (setCurrentPos# buf 3#);
1116 _ -> doDiscard inStr (incLexeme buf) };
1117 _ -> doDiscard inStr (incLexeme buf) }
1119 _ -> doDiscard inStr (incLexeme buf)
1123 -----------------------------------------------------------------------------
1134 data PState = PState {
1136 glasgow_exts :: Int#,
1139 context :: [LayoutContext]
1142 type P a = StringBuffer -- Input string
1147 returnP a buf s = POk s a
1149 thenP :: P a -> (a -> P b) -> P b
1150 m `thenP` k = \ buf s ->
1152 POk s1 a -> k a buf s1
1153 PFailed err -> PFailed err
1155 thenP_ :: P a -> P b -> P b
1156 m `thenP_` k = m `thenP` \_ -> k
1158 mapP :: (a -> P b) -> [a] -> P [b]
1159 mapP f [] = returnP []
1162 mapP f as `thenP` \bs ->
1165 failP :: String -> P a
1166 failP msg buf s = PFailed (text msg)
1168 failMsgP :: Message -> P a
1169 failMsgP msg buf s = PFailed msg
1171 lexError :: String -> P a
1172 lexError str buf s@PState{ loc = loc }
1173 = failMsgP (hcat [ppr loc, text ": ", text str]) buf s
1175 getSrcLocP :: P SrcLoc
1176 getSrcLocP buf s@(PState{ loc = loc }) = POk s loc
1178 getSrcFile :: P FAST_STRING
1179 getSrcFile buf s@(PState{ loc = loc }) = POk s (srcLocFile loc)
1181 getContext :: P [LayoutContext]
1182 getContext buf s@(PState{ context = ctx }) = POk s ctx
1184 pushContext :: LayoutContext -> P ()
1185 pushContext ctxt buf s@(PState{ context = ctx }) = POk s{context = ctxt:ctx} ()
1189 This special case in layoutOn is to handle layout contexts with are
1190 indented the same or less than the current context. This is illegal
1191 according to the Haskell spec, so we have to arrange to close the
1192 current context. eg.
1197 after the first 'where', the sequence of events is:
1199 - layout system inserts a ';' (column 0)
1200 - parser begins a new context at column 0
1201 - parser shifts ';' (legal empty declaration)
1202 - parser sees 'class': parse error (we're still in the inner context)
1204 trouble is, by the time we know we need a new context, the lexer has
1205 already generated the ';'. Hacky solution is as follows: since we
1206 know the column of the next token (it's the column number of the new
1207 context), we set the ACTUAL column number of the new context to this
1208 numer plus one. Hence the next time the lexer is called, a '}' will
1209 be generated to close the new context straight away. Furthermore, we
1210 have to set the atbol flag so that the ';' that the parser shifted as
1211 part of the new context is re-generated.
1213 when the new context is *less* indented than the current one:
1215 f = f where g = g where
1218 - current context: column 12.
1219 - on seeing 'h' (column 0), the layout system inserts '}'
1220 - parser starts a new context, column 0
1221 - parser sees '}', uses it to close new context
1222 - we still need to insert another '}' followed by a ';',
1223 hence the atbol trick.
1225 There's also a special hack in here to deal with
1232 i.e. the inner context is at the same indentation level as the outer
1233 context. This is strictly illegal according to Haskell 98, but
1234 there's a lot of existing code using this style and it doesn't make
1235 any sense to disallow it, since empty 'do' lists don't make sense.
1238 layoutOn :: Bool -> P ()
1239 layoutOn strict buf s@(PState{ bol = bol, context = ctx }) =
1240 let offset = lexemeIndex buf -# bol in
1243 | if strict then prev_off >=# offset else prev_off ># offset ->
1244 --trace ("layout on, column: " ++ show (I# offset)) $
1245 POk s{ context = Layout (offset +# 1#) : ctx, atbol = 1# } ()
1247 --trace ("layout on, column: " ++ show (I# offset)) $
1248 POk s{ context = Layout offset : ctx } ()
1251 layoutOff buf s@(PState{ context = ctx }) =
1252 POk s{ context = NoLayout:ctx } ()
1255 popContext = \ buf s@(PState{ context = ctx, loc = loc }) ->
1257 (_:tl) -> POk s{ context = tl } ()
1258 [] -> PFailed (srcParseErr buf loc)
1261 Note that if the name of the file we're processing ends
1262 with `hi-boot', we accept it on faith as having the right
1263 version. This is done so that .hi-boot files that comes
1264 with hsc don't have to be updated before every release,
1265 *and* it allows us to share .hi-boot files with versions
1266 of hsc that don't have .hi version checking (e.g., ghc-2.10's)
1268 If the version number is 0, the checking is also turned off.
1269 (needed to deal with GHC.hi only!)
1271 Once we can assume we're compiling with a version of ghc that
1272 supports interface file checking, we can drop the special
1275 checkVersion :: Maybe Integer -> P ()
1276 checkVersion mb@(Just v) buf s@(PState{loc = loc})
1277 | (v==0) || (v == fromInt opt_HiVersion) || opt_NoHiCheck = POk s ()
1278 | otherwise = PFailed (ifaceVersionErr mb loc ([]::[Token]){-Todo-})
1279 checkVersion mb@Nothing buf s@(PState{loc = loc})
1280 | "hi-boot" `isSuffixOf` (_UNPK_ (srcLocFile loc)) = POk s ()
1281 | otherwise = PFailed (ifaceVersionErr mb loc ([]::[Token]){-Todo-})
1283 -----------------------------------------------------------------
1285 ifaceParseErr :: StringBuffer -> SrcLoc -> Message
1287 = hsep [ppr l, ptext SLIT("Interface file parse error; on input `"),
1288 text (lexemeToString s), char '\'']
1290 ifaceVersionErr hi_vers l toks
1291 = hsep [ppr l, ptext SLIT("Interface file version error;"),
1292 ptext SLIT("Expected"), int opt_HiVersion,
1293 ptext SLIT("found "), pp_version]
1297 Nothing -> ptext SLIT("pre ghc-3.02 version")
1298 Just v -> ptext SLIT("version") <+> integer v
1300 -----------------------------------------------------------------------------
1302 srcParseErr :: StringBuffer -> SrcLoc -> Message
1306 then ptext SLIT(": parse error (possibly incorrect indentation)")
1307 else hcat [ptext SLIT(": parse error on input "),
1308 char '`', text token, char '\'']
1311 token = lexemeToString s