2 % (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
4 \section[Lexical analysis]{Lexical analysis}
6 --------------------------------------------------------
8 There's a known bug in here:
10 If an interface file ends prematurely, Lex tries to
11 do headFS of an empty FastString.
13 An example that provokes the error is
15 f _:_ _forall_ [a] <<<END OF FILE>>>
16 --------------------------------------------------------
19 {-# OPTIONS -#include "ctypes.h" #-}
26 Token(..), lexer, ParseResult(..), PState(..),
30 P, thenP, thenP_, returnP, mapP, failP, failMsgP,
31 getSrcLocP, getSrcFile,
32 layoutOn, layoutOff, pushContext, popContext
35 #include "HsVersions.h"
37 import Char ( ord, isSpace, toUpper )
38 import List ( isSuffixOf )
40 import IdInfo ( InlinePragInfo(..), CprInfo(..) )
41 import Name ( isLowerISO, isUpperISO )
42 import PrelMods ( mkTupNameStr, mkUbxTupNameStr )
43 import CmdLineOpts ( opt_IgnoreIfacePragmas, opt_HiVersion, opt_NoHiCheck )
44 import Demand ( Demand(..) {- instance Read -} )
45 import UniqFM ( UniqFM, listToUFM, lookupUFM)
46 import BasicTypes ( NewOrData(..) )
47 import SrcLoc ( SrcLoc, incSrcLine, srcLocFile, srcLocLine,
48 replaceSrcLine, mkSrcLoc )
50 import Maybes ( MaybeErr(..) )
51 import ErrUtils ( Message )
59 #if __GLASGOW_HASKELL__ >= 303
66 import PrelRead ( readRational__ ) -- Glasgow non-std
69 %************************************************************************
71 \subsection{Data types}
73 %************************************************************************
75 The token data type, fairly un-interesting except from one
76 constructor, @ITidinfo@, which is used to lazily lex id info (arity,
77 strictness, unfolding etc).
79 The Idea/Observation here is that the renamer needs to scan through
80 all of an interface file before it can continue. But only a fraction
81 of the information contained in the file turns out to be useful, so
82 delaying as much as possible of the scanning and parsing of an
83 interface file Makes Sense (Heap profiles of the compiler
84 show a reduction in heap usage by at least a factor of two,
87 Hence, the interface file lexer spots when value declarations are
88 being scanned and return the @ITidinfo@ and @ITtype@ constructors
89 for the type and any other id info for that binding (unfolding, strictness
90 etc). These constructors are applied to the result of lexing these sub-chunks.
92 The lexing of the type and id info is all done lazily, of course, so
93 the scanning (and subsequent parsing) will be done *only* on the ids the
94 renamer finds out that it is interested in. The rest will just be junked.
95 Laziness, you know it makes sense :-)
99 = ITas -- Haskell keywords
125 | ITforall -- GHC extension keywords
134 | ITinterface -- interface keywords
142 | ITccall (Bool,Bool,Bool) -- (is_dyn, is_casm, may_gc)
157 | ITunfold InlinePragInfo
158 | ITstrict ([Demand], Bool)
160 | ITcprinfo (CprInfo)
164 | ITspecialise_prag -- Pragmas
172 | ITdotdot -- reserved symbols
186 | ITbiglam -- GHC-extension symbols
188 | ITocurly -- special symbols
202 | ITvarid FAST_STRING -- identifiers
203 | ITconid FAST_STRING
204 | ITvarsym FAST_STRING
205 | ITconsym FAST_STRING
206 | ITqvarid (FAST_STRING,FAST_STRING)
207 | ITqconid (FAST_STRING,FAST_STRING)
208 | ITqvarsym (FAST_STRING,FAST_STRING)
209 | ITqconsym (FAST_STRING,FAST_STRING)
211 | ITpragma StringBuffer
214 | ITstring FAST_STRING
216 | ITrational Rational
219 | ITprimstring FAST_STRING
221 | ITprimfloat Rational
222 | ITprimdouble Rational
223 | ITlitlit FAST_STRING
225 | ITunknown String -- Used when the lexer can't make sense of it
226 | ITeof -- end of file token
227 deriving Text -- debugging
230 -----------------------------------------------------------------------------
234 pragmaKeywordsFM = listToUFM $
235 map (\ (x,y) -> (_PK_ x,y))
236 [( "SPECIALISE", ITspecialise_prag ),
237 ( "SPECIALIZE", ITspecialise_prag ),
238 ( "SOURCE", ITsource_prag ),
239 ( "INLINE", ITinline_prag ),
240 ( "NOINLINE", ITnoinline_prag ),
241 ( "NOTINLINE", ITnoinline_prag ),
242 ( "LINE", ITline_prag ),
243 ( "RULES", ITrules_prag ),
244 ( "RULEZ", ITrules_prag ) -- american spelling :-)
247 haskellKeywordsFM = listToUFM $
248 map (\ (x,y) -> (_PK_ x,y))
249 [( "_", ITunderscore ),
252 ( "class", ITclass ),
254 ( "default", ITdefault ),
255 ( "deriving", ITderiving ),
258 ( "hiding", IThiding ),
260 ( "import", ITimport ),
262 ( "infix", ITinfix ),
263 ( "infixl", ITinfixl ),
264 ( "infixr", ITinfixr ),
265 ( "instance", ITinstance ),
267 ( "module", ITmodule ),
268 ( "newtype", ITnewtype ),
270 ( "qualified", ITqualified ),
273 ( "where", ITwhere ),
277 ghcExtensionKeywordsFM = listToUFM $
278 map (\ (x,y) -> (_PK_ x,y))
279 [ ( "forall", ITforall ),
280 ( "foreign", ITforeign ),
281 ( "export", ITexport ),
282 ( "label", ITlabel ),
283 ( "dynamic", ITdynamic ),
284 ( "unsafe", ITunsafe ),
285 ( "stdcall", ITstdcallconv),
286 ( "ccall", ITccallconv),
287 ("_ccall_", ITccall (False, False, False)),
288 ("_ccall_GC_", ITccall (False, False, True)),
289 ("_casm_", ITccall (False, True, False)),
290 ("_casm_GC_", ITccall (False, True, True)),
292 -- interface keywords
293 ("__interface", ITinterface),
294 ("__export", IT__export),
295 ("__depends", ITdepends),
296 ("__forall", IT__forall),
297 ("__letrec", ITletrec),
298 ("__coerce", ITcoerce),
299 ("__inline_me", ITinlineMe),
300 ("__inline_call", ITinlineCall),
301 ("__depends", ITdepends),
302 ("__DEFAULT", ITdefaultbranch),
304 ("__integer", ITinteger_lit),
305 ("__float", ITfloat_lit),
306 ("__rational", ITrational_lit),
307 ("__addr", ITaddr_lit),
308 ("__litlit", ITlit_lit),
309 ("__string", ITstring_lit),
312 ("__fuall", ITfuall),
314 ("__P", ITspecialise),
317 ("__U", ITunfold NoInlinePragInfo),
319 ("__ccall", ITccall (False, False, False)),
320 ("__ccall_GC", ITccall (False, False, True)),
321 ("__dyn_ccall", ITccall (True, False, False)),
322 ("__dyn_ccall_GC", ITccall (True, False, True)),
323 ("__casm", ITccall (False, True, False)),
324 ("__dyn_casm", ITccall (True, True, False)),
325 ("__casm_GC", ITccall (False, True, True)),
326 ("__dyn_casm_GC", ITccall (True, True, True)),
332 haskellKeySymsFM = listToUFM $
333 map (\ (x,y) -> (_PK_ x,y))
346 ,(".", ITdot) -- sadly, for 'forall a . t'
350 -----------------------------------------------------------------------------
355 - (glaexts) lexing an interface file or -fglasgow-exts
356 - (bol) pointer to beginning of line (for column calculations)
357 - (buf) pointer to beginning of token
358 - (buf) pointer to current char
359 - (atbol) flag indicating whether we're at the beginning of a line
362 lexer :: (Token -> P a) -> P a
363 lexer cont buf s@(PState{
365 glasgow_exts = glaexts,
371 -- first, start a new lexeme and lose all the whitespace
373 tab line bol atbol (stepOverLexeme buf)
375 line = srcLocLine loc
377 tab y bol atbol buf = --trace ("tab: " ++ show (I# y) ++ " : " ++ show (currentChar buf)) $
378 case currentChar# buf of
381 if bufferExhausted (stepOn buf)
382 then cont ITeof buf s'
383 else trace "lexer: misplaced NUL?" $
384 tab y bol atbol (stepOn buf)
386 '\n'# -> let buf' = stepOn buf
387 in tab (y +# 1#) (currentIndex# buf') 1# buf'
389 -- find comments. This got harder in Haskell 98.
390 '-'# -> let trundle n =
391 let next = lookAhead# buf n in
392 if next `eqChar#` '-'# then trundle (n +# 1#)
393 else if is_symbol next || n <# 2#
395 else case untilChar# (stepOnBy# buf n) '\n'# of
396 { buf' -> tab y bol atbol (stepOverLexeme buf')
400 -- comments and pragmas. We deal with LINE pragmas here,
401 -- and throw out any unrecognised pragmas as comments. Any
402 -- pragmas we know about are dealt with later (after any layout
403 -- processing if necessary).
405 '{'# | lookAhead# buf 1# `eqChar#` '-'# ->
406 if lookAhead# buf 2# `eqChar#` '#'# then
407 if lookAhead# buf 3# `eqChar#` '#'# then is_a_token else
408 case expandWhile# is_space (setCurrentPos# buf 3#) of { buf1->
409 case expandWhile# is_ident (stepOverLexeme buf1) of { buf2->
410 let lexeme = mkFastString -- ToDo: too slow
411 (map toUpper (lexemeToString buf2)) in
412 case lookupUFM pragmaKeywordsFM lexeme of
413 Just ITline_prag -> line_prag (lexer cont) buf2 s'
414 Just other -> is_a_token
415 Nothing -> skip_to_end (stepOnBy# buf 2#)
418 else skip_to_end (stepOnBy# buf 2#)
420 skip_to_end buf = nested_comment (lexer cont) buf s'
422 -- tabs have been expanded beforehand
423 c | is_space c -> tab y bol atbol (stepOn buf)
424 | otherwise -> is_a_token
426 where s' = s{loc = replaceSrcLine loc y,
430 is_a_token | atbol /=# 0# = lexBOL cont buf s'
431 | otherwise = lexToken cont glaexts buf s'
433 -- {-# LINE .. #-} pragmas. yeuch.
435 case expandWhile# is_space buf of { buf1 ->
436 case scanNumLit 0 (stepOverLexeme buf1) of { (line,buf2) ->
437 -- subtract one: the line number refers to the *following* line.
438 let real_line = line - 1 in
439 case fromInteger real_line of { i@(I# l) ->
440 case expandWhile# is_space buf2 of { buf3 ->
441 case currentChar# buf3 of
443 case untilEndOfString# (stepOn (stepOverLexeme buf3)) of { buf4 ->
444 let file = lexemeToFastString buf4 in
445 \s@PState{loc=loc} -> skipToEnd buf4 s{loc = mkSrcLoc file i}
447 other -> \s@PState{loc=loc} -> skipToEnd buf3 s{loc = replaceSrcLine loc l}
450 skipToEnd buf = nested_comment cont buf
452 nested_comment :: P a -> P a
453 nested_comment cont buf = loop buf
456 case currentChar# buf of
457 '\NUL'# | bufferExhausted (stepOn buf) ->
458 lexError "unterminated `{-'" buf
460 '-'# | lookAhead# buf 1# `eqChar#` '}'# ->
461 cont (stepOnBy# buf 2#)
463 '{'# | lookAhead# buf 1# `eqChar#` '-'# ->
464 nested_comment (nested_comment cont) (stepOnBy# buf 2#)
466 '\n'# -> \ s@PState{loc=loc} ->
467 let buf' = stepOn buf in
468 nested_comment cont buf'
469 s{loc = incSrcLine loc, bol = currentIndex# buf',
472 _ -> nested_comment cont (stepOn buf)
474 -- When we are lexing the first token of a line, check whether we need to
475 -- insert virtual semicolons or close braces due to layout.
477 lexBOL :: (Token -> P a) -> P a
478 lexBOL cont buf s@(PState{
480 glasgow_exts = glaexts,
485 if need_close_curly then
486 --trace ("col = " ++ show (I# col) ++ ", layout: inserting '}'") $
487 cont ITvccurly buf s{atbol = 1#, context = tail ctx}
488 else if need_semi_colon then
489 --trace ("col = " ++ show (I# col) ++ ", layout: inserting ';'") $
490 cont ITsemi buf s{atbol = 0#}
492 lexToken cont glaexts buf s{atbol = 0#}
494 col = currentIndex# buf -# bol
507 Layout n -> col ==# n
510 lexToken :: (Token -> P a) -> Int# -> P a
511 lexToken cont glaexts buf =
513 case currentChar# buf of
515 -- special symbols ----------------------------------------------------
516 '('# | flag glaexts && lookAhead# buf 1# `eqChar#` '#'#
517 -> cont IToubxparen (setCurrentPos# buf 2#)
519 -> cont IToparen (incLexeme buf)
521 ')'# -> cont ITcparen (incLexeme buf)
522 '['# -> cont ITobrack (incLexeme buf)
523 ']'# -> cont ITcbrack (incLexeme buf)
524 ','# -> cont ITcomma (incLexeme buf)
525 ';'# -> cont ITsemi (incLexeme buf)
527 '}'# -> \ s@PState{context = ctx} ->
529 (_:ctx') -> cont ITccurly (incLexeme buf) s{context=ctx'}
530 _ -> lexError "too many '}'s" buf s
532 '#'# -> case lookAhead# buf 1# of
533 ')'# | flag glaexts -> cont ITcubxparen (setCurrentPos# buf 2#)
534 '-'# -> case lookAhead# buf 2# of
535 '}'# -> cont ITclose_prag (setCurrentPos# buf 3#)
536 _ -> lex_sym cont (incLexeme buf)
537 _ -> lex_sym cont (incLexeme buf)
539 '`'# | flag glaexts && lookAhead# buf 1# `eqChar#` '`'#
540 -> lex_cstring cont (setCurrentPos# buf 2#)
542 -> cont ITbackquote (incLexeme buf)
544 '{'# -> -- look for "{-##" special iface pragma
545 case lookAhead# buf 1# of
546 '-'# -> case lookAhead# buf 2# of
547 '#'# -> case lookAhead# buf 3# of
550 = doDiscard False (stepOnBy# (stepOverLexeme buf) 4#) in
551 cont (ITpragma lexeme) buf'
552 _ -> lex_prag cont (setCurrentPos# buf 3#)
553 _ -> cont ITocurly (incLexeme buf)
554 _ -> (layoutOff `thenP_` cont ITocurly) (incLexeme buf)
556 -- strings/characters -------------------------------------------------
557 '\"'#{-"-} -> lex_string cont glaexts "" (incLexeme buf)
558 '\''# -> lex_char (char_end cont) glaexts (incLexeme buf)
560 -- strictness and cpr pragmas and __scc treated specially.
561 '_'# | flag glaexts ->
562 case lookAhead# buf 1# of
563 '_'# -> case lookAhead# buf 2# of
565 lex_demand cont (stepOnUntil (not . isSpace)
566 (stepOnBy# buf 3#)) -- past __S
568 lex_cpr cont (stepOnUntil (not . isSpace)
569 (stepOnBy# buf 3#)) -- past __M
571 case prefixMatch (stepOnBy# buf 3#) "cc" of
572 Just buf' -> lex_scc cont (stepOverLexeme buf')
573 Nothing -> lex_id cont glaexts buf
574 _ -> lex_id cont glaexts buf
575 _ -> lex_id cont glaexts buf
577 -- Hexadecimal and octal constants
578 '0'# | (ch `eqChar#` 'x'# || ch `eqChar#` 'X'#) && is_hexdigit ch2
579 -> readNum (after_lexnum cont glaexts) buf' is_hexdigit 16 hex
580 | (ch `eqChar#` 'o'# || ch `eqChar#` 'O'#) && is_octdigit ch2
581 -> readNum (after_lexnum cont glaexts) buf' is_octdigit 8 oct_or_dec
582 where ch = lookAhead# buf 1#
583 ch2 = lookAhead# buf 2#
584 buf' = setCurrentPos# buf 2#
587 if bufferExhausted (stepOn buf) then
590 trace "lexIface: misplaced NUL?" $
591 cont (ITunknown "\NUL") (stepOn buf)
593 c | is_digit c -> lex_num cont glaexts 0 buf
594 | is_symbol c -> lex_sym cont buf
595 | is_upper c -> lex_con cont glaexts buf
596 | is_ident c -> lex_id cont glaexts buf
597 | otherwise -> lexError "illegal character" buf
599 -- Int# is unlifted, and therefore faster than Bool for flags.
605 -------------------------------------------------------------------------------
609 = case expandWhile# is_space buf of { buf1 ->
610 case expandWhile# is_ident (stepOverLexeme buf1) of { buf2 ->
611 let lexeme = mkFastString (map toUpper (lexemeToString buf2)) in
612 case lookupUFM pragmaKeywordsFM lexeme of
613 Just kw -> cont kw (mergeLexemes buf buf2)
614 Nothing -> panic "lex_prag"
617 -------------------------------------------------------------------------------
620 lex_string cont glaexts s buf
621 = case currentChar# buf of
623 let buf' = incLexeme buf; s' = mkFastString (reverse s) in
624 case currentChar# buf' of
625 '#'# | flag glaexts -> cont (ITprimstring s') (incLexeme buf')
626 _ -> cont (ITstring s') buf'
628 -- ignore \& in a string, deal with string gaps
629 '\\'# | next_ch `eqChar#` '&'#
630 -> lex_string cont glaexts s buf'
632 -> lex_stringgap cont glaexts s (incLexeme buf)
634 where next_ch = lookAhead# buf 1#
635 buf' = setCurrentPos# buf 2#
637 _ -> lex_char (lex_next_string cont s) glaexts buf
639 lex_stringgap cont glaexts s buf
640 = let buf' = incLexeme buf in
641 case currentChar# buf of
642 '\n'# -> \st@PState{loc = loc} -> lex_stringgap cont glaexts s buf'
643 st{loc = incSrcLine loc}
644 '\\'# -> lex_string cont glaexts s buf'
645 c | is_space c -> lex_stringgap cont glaexts s buf'
646 other -> charError buf'
648 lex_next_string cont s glaexts c buf = lex_string cont glaexts (c:s) buf
650 lex_char :: (Int# -> Char -> P a) -> Int# -> P a
651 lex_char cont glaexts buf
652 = case currentChar# buf of
653 '\\'# -> lex_escape (cont glaexts) (incLexeme buf)
654 c | is_any c -> cont glaexts (C# c) (incLexeme buf)
655 other -> charError buf
657 char_end cont glaexts c buf
658 = case currentChar# buf of
659 '\''# -> let buf' = incLexeme buf in
660 case currentChar# buf' of
662 -> cont (ITprimchar c) (incLexeme buf')
663 _ -> cont (ITchar c) buf'
667 = let buf' = incLexeme buf in
668 case currentChar# buf of
669 'a'# -> cont '\a' buf'
670 'b'# -> cont '\b' buf'
671 'f'# -> cont '\f' buf'
672 'n'# -> cont '\n' buf'
673 'r'# -> cont '\r' buf'
674 't'# -> cont '\t' buf'
675 'v'# -> cont '\v' buf'
676 '\\'# -> cont '\\' buf'
677 '"'# -> cont '\"' buf'
678 '\''# -> cont '\'' buf'
679 '^'# -> let c = currentChar# buf' in
680 if c `geChar#` '@'# && c `leChar#` '_'#
681 then cont (C# (chr# (ord# c -# ord# '@'#))) (incLexeme buf')
684 'x'# -> readNum (after_charnum cont) buf' is_hexdigit 16 hex
685 'o'# -> readNum (after_charnum cont) buf' is_octdigit 8 oct_or_dec
687 -> readNum (after_charnum cont) buf is_digit 10 oct_or_dec
689 _ -> case [ (c,buf2) | (p,c) <- silly_escape_chars,
690 Just buf2 <- [prefixMatch buf p] ] of
691 (c,buf2):_ -> cont c buf2
694 after_charnum cont i buf
695 = let int = fromInteger i in
696 if i >= 0 && i <= 255
697 then cont (chr int) buf
700 readNum cont buf is_digit base conv = read buf 0
702 = case currentChar# buf of { c ->
704 then read (incLexeme buf) (i*base + (toInteger (I# (conv c))))
710 || (c `geChar#` 'a'# && c `leChar#` 'f'#)
711 || (c `geChar#` 'A'# && c `leChar#` 'F'#)
713 hex c | is_digit c = ord# c -# ord# '0'#
714 | otherwise = ord# (to_lower c) -# ord# 'a'# +# 10#
715 oct_or_dec c = ord# c -# ord# '0'#
717 is_octdigit c = c `geChar#` '0'# && c `leChar#` '7'#
720 | c `geChar#` 'A'# && c `leChar#` 'Z'#
721 = chr# (ord# c -# (ord# 'A'# -# ord# 'a'#))
724 charError buf = lexError "error in character literal" buf
726 silly_escape_chars = [
763 -------------------------------------------------------------------------------
765 lex_demand cont buf =
766 case read_em [] buf of { (ls,buf') ->
767 case currentChar# buf' of
768 'B'# -> cont (ITstrict (ls, True )) (incLexeme buf')
769 _ -> cont (ITstrict (ls, False)) buf'
772 -- code snatched from Demand.lhs
774 case currentChar# buf of
775 'L'# -> read_em (WwLazy False : acc) (stepOn buf)
776 'A'# -> read_em (WwLazy True : acc) (stepOn buf)
777 'S'# -> read_em (WwStrict : acc) (stepOn buf)
778 'P'# -> read_em (WwPrim : acc) (stepOn buf)
779 'E'# -> read_em (WwEnum : acc) (stepOn buf)
780 ')'# -> (reverse acc, stepOn buf)
781 'U'# -> do_unpack DataType True acc (stepOnBy# buf 2#)
782 'u'# -> do_unpack DataType False acc (stepOnBy# buf 2#)
783 'N'# -> do_unpack NewType True acc (stepOnBy# buf 2#)
784 'n'# -> do_unpack NewType False acc (stepOnBy# buf 2#)
785 _ -> (reverse acc, buf)
787 do_unpack new_or_data wrapper_unpacks acc buf
788 = case read_em [] buf of
789 (stuff, rest) -> read_em (WwUnpack new_or_data wrapper_unpacks stuff : acc) rest
792 case read_em [] buf of { (cpr_inf,buf') ->
793 ASSERT ( null (tail cpr_inf) )
794 cont (ITcprinfo $ head cpr_inf) buf'
797 -- code snatched from lex_demand above
799 case currentChar# buf of
800 '-'# -> read_em (NoCPRInfo : acc) (stepOn buf)
801 '('# -> do_unpack acc (stepOn buf)
802 ')'# -> (reverse acc, stepOn buf)
803 _ -> (reverse acc, buf)
806 = case read_em [] buf of
807 (stuff, rest) -> read_em ((CPRInfo stuff) : acc) rest
811 case currentChar# buf of
812 'C'# -> cont ITsccAllCafs (incLexeme buf)
813 other -> cont ITscc buf
815 -----------------------------------------------------------------------------
818 lex_num :: (Token -> P a) -> Int# -> Integer -> P a
819 lex_num cont glaexts acc buf =
820 case scanNumLit acc buf of
822 case currentChar# buf' of
823 '.'# | is_digit (lookAhead# buf' 1#) ->
824 -- this case is not optimised at all, as the
825 -- presence of floating point numbers in interface
826 -- files is not that common. (ToDo)
827 case expandWhile# is_digit (incLexeme buf') of
828 buf2 -> -- points to first non digit char
830 let l = case currentChar# buf2 of
836 = let buf3 = incLexeme buf2 in
837 case currentChar# buf3 of
838 '-'# -> expandWhile# is_digit (incLexeme buf3)
839 '+'# -> expandWhile# is_digit (incLexeme buf3)
840 x | is_digit x -> expandWhile# is_digit buf3
843 v = readRational__ (lexemeToString l)
845 in case currentChar# l of -- glasgow exts only
846 '#'# | flag glaexts -> let l' = incLexeme l in
847 case currentChar# l' of
848 '#'# -> cont (ITprimdouble v) (incLexeme l')
849 _ -> cont (ITprimfloat v) l'
850 _ -> cont (ITrational v) l
852 _ -> after_lexnum cont glaexts acc' buf'
854 after_lexnum cont glaexts i buf
855 = case currentChar# buf of
856 '#'# | flag glaexts -> cont (ITprimint i) (incLexeme buf)
857 _ -> cont (ITinteger i) buf
859 -----------------------------------------------------------------------------
860 -- C "literal literal"s (i.e. things like ``NULL'', ``stdout'' etc.)
862 -- we lexemeToFastString on the bit between the ``''s, but include the
863 -- quotes in the full lexeme.
865 lex_cstring cont buf =
866 case expandUntilMatch (stepOverLexeme buf) "\'\'" of
867 Just buf' -> cont (ITlitlit (lexemeToFastString
868 (setCurrentPos# buf' (negateInt# 2#))))
869 (mergeLexemes buf buf')
870 Nothing -> lexError "unterminated ``" buf
872 ------------------------------------------------------------------------------
875 is_ident, is_symbol, is_any, is_upper, is_digit :: Char# -> Bool
877 {-# INLINE is_ctype #-}
878 #if __GLASGOW_HASKELL__ >= 303
879 is_ctype :: Word8 -> Char# -> Bool
880 is_ctype mask = \c ->
881 (indexWord8OffAddr (``char_types'' :: Addr) (ord (C# c)) .&. mask) /= 0
883 is_ctype :: Int -> Char# -> Bool
884 is_ctype (I# mask) = \c ->
885 let (A# ctype) = ``char_types'' :: Addr
886 flag_word = int2Word# (ord# (indexCharOffAddr# ctype (ord# c)))
888 (flag_word `and#` (int2Word# mask)) `neWord#` (int2Word# 0#)
891 is_ident = is_ctype 1
892 is_symbol = is_ctype 2
894 is_space = is_ctype 8
895 is_upper = is_ctype 16
896 is_digit = is_ctype 32
898 -----------------------------------------------------------------------------
899 -- identifiers, symbols etc.
901 lex_id cont glaexts buf =
902 case expandWhile# is_ident buf of { buf1 ->
904 case (if flag glaexts
905 then expandWhile# (eqChar# '#'#) buf1 -- slurp trailing hashes
906 else buf1) of { buf' ->
908 let lexeme = lexemeToFastString buf' in
910 case _scc_ "Lex.haskellKeyword" lookupUFM haskellKeywordsFM lexeme of {
911 Just kwd_token -> --trace ("hkeywd: "++_UNPK_(lexeme)) $
915 let var_token = cont (mk_var_token lexeme) buf' in
917 if not (flag glaexts)
921 case lookupUFM ghcExtensionKeywordsFM lexeme of {
922 Just kwd_token -> cont kwd_token buf';
928 case expandWhile# is_symbol buf of
929 buf' -> case lookupUFM haskellKeySymsFM lexeme of {
930 Just kwd_token -> --trace ("keysym: "++unpackFS lexeme) $
931 cont kwd_token buf' ;
932 Nothing -> --trace ("sym: "++unpackFS lexeme) $
933 cont (mk_var_token lexeme) buf'
935 where lexeme = lexemeToFastString buf'
938 lex_con cont glaexts buf =
939 case expandWhile# is_ident buf of { buf1 ->
940 case slurp_trailing_hashes buf1 glaexts of { buf' ->
942 case currentChar# buf' of
947 just_a_conid = --trace ("con: "++unpackFS lexeme) $
948 cont (ITconid lexeme) buf'
949 lexeme = lexemeToFastString buf'
950 munch = lex_qid cont glaexts lexeme (incLexeme buf') just_a_conid
953 lex_qid cont glaexts mod buf just_a_conid =
954 case currentChar# buf of
955 '['# -> -- Special case for []
956 case lookAhead# buf 1# of
957 ']'# -> cont (ITqconid (mod,SLIT("[]"))) (setCurrentPos# buf 2#)
960 '('# -> -- Special case for (,,,)
961 -- This *is* necessary to deal with e.g. "instance C PrelBase.(,,)"
962 case lookAhead# buf 1# of
963 '#'# | flag glaexts -> case lookAhead# buf 2# of
964 ','# -> lex_ubx_tuple cont mod (setCurrentPos# buf 3#)
967 ')'# -> cont (ITqconid (mod,SLIT("()"))) (setCurrentPos# buf 2#)
968 ','# -> lex_tuple cont mod (setCurrentPos# buf 2#) just_a_conid
971 '-'# -> case lookAhead# buf 1# of
972 '>'# -> cont (ITqconid (mod,SLIT("->"))) (setCurrentPos# buf 2#)
973 _ -> lex_id3 cont glaexts mod buf just_a_conid
974 _ -> lex_id3 cont glaexts mod buf just_a_conid
976 lex_id3 cont glaexts mod buf just_a_conid
977 | is_symbol (currentChar# buf) =
979 start_new_lexeme = stepOverLexeme buf
981 case expandWhile# is_symbol start_new_lexeme of { buf' ->
983 lexeme = lexemeToFastString buf'
984 -- real lexeme is M.<sym>
985 new_buf = mergeLexemes buf buf'
987 cont (mk_qvar_token mod lexeme) new_buf
988 -- wrong, but arguably morally right: M... is now a qvarsym
993 start_new_lexeme = stepOverLexeme buf
995 case expandWhile# is_ident start_new_lexeme of { buf1 ->
1000 case slurp_trailing_hashes buf1 glaexts of { buf' ->
1003 lexeme = lexemeToFastString buf'
1004 new_buf = mergeLexemes buf buf'
1005 is_a_qvarid = cont (mk_qvar_token mod lexeme) new_buf
1007 case _scc_ "Lex.haskellKeyword" lookupUFM haskellKeywordsFM lexeme of {
1008 Just kwd_token -> just_a_conid; -- avoid M.where etc.
1009 Nothing -> is_a_qvarid
1010 -- TODO: special ids (as, qualified, hiding) shouldn't be
1011 -- recognised as keywords here. ie. M.as is a qualified varid.
1015 slurp_trailing_hashes buf glaexts
1016 | flag glaexts = expandWhile# (`eqChar#` '#'#) buf
1021 | is_upper f = ITconid pk_str
1022 -- _[A-Z] is treated as a constructor in interface files.
1023 | f `eqChar#` '_'# && not (_NULL_ tl)
1024 && (case _HEAD_ tl of { C# g -> is_upper g }) = ITconid pk_str
1025 | is_ident f = ITvarid pk_str
1026 | f `eqChar#` ':'# = ITconsym pk_str
1027 | otherwise = ITvarsym pk_str
1029 (C# f) = _HEAD_ pk_str
1032 mk_qvar_token m token =
1033 case mk_var_token token of
1034 ITconid n -> ITqconid (m,n)
1035 ITvarid n -> ITqvarid (m,n)
1036 ITconsym n -> ITqconsym (m,n)
1037 ITvarsym n -> ITqvarsym (m,n)
1038 _ -> ITunknown (show token)
1041 ----------------------------------------------------------------------------
1042 Horrible stuff for dealing with M.(,,,)
1045 lex_tuple cont mod buf back_off =
1049 case currentChar# buf of
1050 ','# -> go (n+1) (stepOn buf)
1051 ')'# -> cont (ITqconid (mod, snd (mkTupNameStr n))) (stepOn buf)
1054 lex_ubx_tuple cont mod buf back_off =
1058 case currentChar# buf of
1059 ','# -> go (n+1) (stepOn buf)
1060 '#'# -> case lookAhead# buf 1# of
1061 ')'# -> cont (ITqconid (mod, snd (mkUbxTupNameStr n)))
1067 -----------------------------------------------------------------------------
1068 doDiscard rips along really fast, looking for a '#-}',
1069 indicating the end of the pragma we're skipping
1072 doDiscard inStr buf =
1073 case currentChar# buf of
1075 case lookAhead# buf 1# of { '#'# ->
1076 case lookAhead# buf 2# of { '-'# ->
1077 case lookAhead# buf 3# of { '}'# ->
1078 (lexemeToBuffer buf, stepOverLexeme (setCurrentPos# buf 4#));
1079 _ -> doDiscard inStr (incLexeme buf) };
1080 _ -> doDiscard inStr (incLexeme buf) };
1081 _ -> doDiscard inStr (incLexeme buf) }
1084 odd_slashes buf flg i# =
1085 case lookAhead# buf i# of
1086 '\\'# -> odd_slashes buf (not flg) (i# -# 1#)
1089 case lookAhead# buf (negateInt# 1#) of --backwards, actually
1090 '\\'# -> -- escaping something..
1091 if odd_slashes buf True (negateInt# 2#) then
1092 -- odd number of slashes, " is escaped.
1093 doDiscard inStr (incLexeme buf)
1095 -- even number of slashes, \ is escaped.
1096 doDiscard (not inStr) (incLexeme buf)
1097 _ -> case inStr of -- forced to avoid build-up
1098 True -> doDiscard False (incLexeme buf)
1099 False -> doDiscard True (incLexeme buf)
1100 _ -> doDiscard inStr (incLexeme buf)
1104 -----------------------------------------------------------------------------
1115 data PState = PState {
1117 glasgow_exts :: Int#,
1120 context :: [LayoutContext]
1123 type P a = StringBuffer -- Input string
1128 returnP a buf s = POk s a
1130 thenP :: P a -> (a -> P b) -> P b
1131 m `thenP` k = \ buf s ->
1133 POk s1 a -> k a buf s1
1134 PFailed err -> PFailed err
1136 thenP_ :: P a -> P b -> P b
1137 m `thenP_` k = m `thenP` \_ -> k
1139 mapP :: (a -> P b) -> [a] -> P [b]
1140 mapP f [] = returnP []
1143 mapP f as `thenP` \bs ->
1146 failP :: String -> P a
1147 failP msg buf s = PFailed (text msg)
1149 failMsgP :: Message -> P a
1150 failMsgP msg buf s = PFailed msg
1152 lexError :: String -> P a
1153 lexError str buf s@PState{ loc = loc }
1154 = failMsgP (hcat [ppr loc, text ": ", text str]) buf s
1156 getSrcLocP :: P SrcLoc
1157 getSrcLocP buf s@(PState{ loc = loc }) = POk s loc
1159 getSrcFile :: P FAST_STRING
1160 getSrcFile buf s@(PState{ loc = loc }) = POk s (srcLocFile loc)
1162 getContext :: P [LayoutContext]
1163 getContext buf s@(PState{ context = ctx }) = POk s ctx
1165 pushContext :: LayoutContext -> P ()
1166 pushContext ctxt buf s@(PState{ context = ctx }) = POk s{context = ctxt:ctx} ()
1170 This special case in layoutOn is to handle layout contexts with are
1171 indented the same or less than the current context. This is illegal
1172 according to the Haskell spec, so we have to arrange to close the
1173 current context. eg.
1178 after the first 'where', the sequence of events is:
1180 - layout system inserts a ';' (column 0)
1181 - parser begins a new context at column 0
1182 - parser shifts ';' (legal empty declaration)
1183 - parser sees 'class': parse error (we're still in the inner context)
1185 trouble is, by the time we know we need a new context, the lexer has
1186 already generated the ';'. Hacky solution is as follows: since we
1187 know the column of the next token (it's the column number of the new
1188 context), we set the ACTUAL column number of the new context to this
1189 numer plus one. Hence the next time the lexer is called, a '}' will
1190 be generated to close the new context straight away. Furthermore, we
1191 have to set the atbol flag so that the ';' that the parser shifted as
1192 part of the new context is re-generated.
1194 when the new context is *less* indented than the current one:
1196 f = f where g = g where
1199 - current context: column 12.
1200 - on seeing 'h' (column 0), the layout system inserts '}'
1201 - parser starts a new context, column 0
1202 - parser sees '}', uses it to close new context
1203 - we still need to insert another '}' followed by a ';',
1204 hence the atbol trick.
1206 There's also a special hack in here to deal with
1213 i.e. the inner context is at the same indentation level as the outer
1214 context. This is strictly illegal according to Haskell 98, but
1215 there's a lot of existing code using this style and it doesn't make
1216 any sense to disallow it, since empty 'do' lists don't make sense.
1219 layoutOn :: Bool -> P ()
1220 layoutOn strict buf s@(PState{ bol = bol, context = ctx }) =
1221 let offset = lexemeIndex buf -# bol in
1224 | if strict then prev_off >=# offset else prev_off ># offset ->
1225 --trace ("layout on, column: " ++ show (I# offset)) $
1226 POk s{ context = Layout (offset +# 1#) : ctx, atbol = 1# } ()
1228 --trace ("layout on, column: " ++ show (I# offset)) $
1229 POk s{ context = Layout offset : ctx } ()
1232 layoutOff buf s@(PState{ context = ctx }) =
1233 POk s{ context = NoLayout:ctx } ()
1236 popContext = \ buf s@(PState{ context = ctx }) ->
1238 (_:tl) -> POk s{ context = tl } ()
1239 [] -> panic "Lex.popContext: empty context"
1242 Note that if the name of the file we're processing ends
1243 with `hi-boot', we accept it on faith as having the right
1244 version. This is done so that .hi-boot files that comes
1245 with hsc don't have to be updated before every release,
1246 *and* it allows us to share .hi-boot files with versions
1247 of hsc that don't have .hi version checking (e.g., ghc-2.10's)
1249 If the version number is 0, the checking is also turned off.
1250 (needed to deal with GHC.hi only!)
1252 Once we can assume we're compiling with a version of ghc that
1253 supports interface file checking, we can drop the special
1256 checkVersion :: Maybe Integer -> P ()
1257 checkVersion mb@(Just v) buf s@(PState{loc = loc})
1258 | (v==0) || (v == fromInt opt_HiVersion) || opt_NoHiCheck = POk s ()
1259 | otherwise = PFailed (ifaceVersionErr mb loc ([]::[Token]){-Todo-})
1260 checkVersion mb@Nothing buf s@(PState{loc = loc})
1261 | "hi-boot" `isSuffixOf` (_UNPK_ (srcLocFile loc)) = POk s ()
1262 | otherwise = PFailed (ifaceVersionErr mb loc ([]::[Token]){-Todo-})
1264 -----------------------------------------------------------------
1266 ifaceParseErr :: StringBuffer -> SrcLoc -> Message
1268 = hsep [ppr l, ptext SLIT("Interface file parse error; on input `"),
1269 text (lexemeToString s), char '\'']
1271 ifaceVersionErr hi_vers l toks
1272 = hsep [ppr l, ptext SLIT("Interface file version error;"),
1273 ptext SLIT("Expected"), int opt_HiVersion,
1274 ptext SLIT("found "), pp_version]
1278 Nothing -> ptext SLIT("pre ghc-3.02 version")
1279 Just v -> ptext SLIT("version") <+> integer v