2 % (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
4 \section[Lexical analysis]{Lexical analysis}
6 --------------------------------------------------------
8 There's a known bug in here:
10 If an interface file ends prematurely, Lex tries to
11 do headFS of an empty FastString.
13 An example that provokes the error is
15 f _:_ _forall_ [a] <<<END OF FILE>>>
16 --------------------------------------------------------
19 {-# OPTIONS -#include "ctypes.h" #-}
26 Token(..), lexer, ParseResult(..), PState(..),
30 P, thenP, thenP_, returnP, mapP, failP, failMsgP,
31 getSrcLocP, getSrcFile,
32 layoutOn, layoutOff, pushContext, popContext
35 #include "HsVersions.h"
37 import Char ( ord, isSpace, toUpper )
38 import List ( isSuffixOf )
40 import IdInfo ( InlinePragInfo(..), CprInfo(..) )
41 import Name ( isLowerISO, isUpperISO )
42 import PrelMods ( mkTupNameStr, mkUbxTupNameStr )
43 import CmdLineOpts ( opt_IgnoreIfacePragmas, opt_HiVersion, opt_NoHiCheck )
44 import Demand ( Demand(..) {- instance Read -} )
45 import UniqFM ( UniqFM, listToUFM, lookupUFM)
46 import BasicTypes ( NewOrData(..) )
47 import SrcLoc ( SrcLoc, incSrcLine, srcLocFile, srcLocLine,
48 replaceSrcLine, mkSrcLoc )
50 import Maybes ( MaybeErr(..) )
51 import ErrUtils ( Message )
59 #if __GLASGOW_HASKELL__ >= 303
66 import PrelRead ( readRational__ ) -- Glasgow non-std
69 %************************************************************************
71 \subsection{Data types}
73 %************************************************************************
75 The token data type, fairly un-interesting except from one
76 constructor, @ITidinfo@, which is used to lazily lex id info (arity,
77 strictness, unfolding etc).
79 The Idea/Observation here is that the renamer needs to scan through
80 all of an interface file before it can continue. But only a fraction
81 of the information contained in the file turns out to be useful, so
82 delaying as much as possible of the scanning and parsing of an
83 interface file Makes Sense (Heap profiles of the compiler
84 show a reduction in heap usage by at least a factor of two,
87 Hence, the interface file lexer spots when value declarations are
88 being scanned and return the @ITidinfo@ and @ITtype@ constructors
89 for the type and any other id info for that binding (unfolding, strictness
90 etc). These constructors are applied to the result of lexing these sub-chunks.
92 The lexing of the type and id info is all done lazily, of course, so
93 the scanning (and subsequent parsing) will be done *only* on the ids the
94 renamer finds out that it is interested in. The rest will just be junked.
95 Laziness, you know it makes sense :-)
99 = ITas -- Haskell keywords
125 | ITforall -- GHC extension keywords
134 | ITinterface -- interface keywords
142 | ITccall (Bool,Bool,Bool) -- (is_dyn, is_casm, may_gc)
157 | ITunfold InlinePragInfo
158 | ITstrict ([Demand], Bool)
160 | ITcprinfo (CprInfo)
164 | ITspecialise_prag -- Pragmas
172 | ITdotdot -- reserved symbols
186 | ITbiglam -- GHC-extension symbols
188 | ITocurly -- special symbols
202 | ITvarid FAST_STRING -- identifiers
203 | ITconid FAST_STRING
204 | ITvarsym FAST_STRING
205 | ITconsym FAST_STRING
206 | ITqvarid (FAST_STRING,FAST_STRING)
207 | ITqconid (FAST_STRING,FAST_STRING)
208 | ITqvarsym (FAST_STRING,FAST_STRING)
209 | ITqconsym (FAST_STRING,FAST_STRING)
211 | ITpragma StringBuffer
214 | ITstring FAST_STRING
216 | ITrational Rational
219 | ITprimstring FAST_STRING
221 | ITprimfloat Rational
222 | ITprimdouble Rational
223 | ITlitlit FAST_STRING
225 | ITunknown String -- Used when the lexer can't make sense of it
226 | ITeof -- end of file token
227 deriving Text -- debugging
230 -----------------------------------------------------------------------------
234 pragmaKeywordsFM = listToUFM $
235 map (\ (x,y) -> (_PK_ x,y))
236 [( "SPECIALISE", ITspecialise_prag ),
237 ( "SPECIALIZE", ITspecialise_prag ),
238 ( "SOURCE", ITsource_prag ),
239 ( "INLINE", ITinline_prag ),
240 ( "NOINLINE", ITnoinline_prag ),
241 ( "NOTINLINE", ITnoinline_prag ),
242 ( "LINE", ITline_prag ),
243 ( "RULES", ITrules_prag ),
244 ( "RULEZ", ITrules_prag ) -- american spelling :-)
247 haskellKeywordsFM = listToUFM $
248 map (\ (x,y) -> (_PK_ x,y))
249 [( "_", ITunderscore ),
252 ( "class", ITclass ),
254 ( "default", ITdefault ),
255 ( "deriving", ITderiving ),
258 ( "hiding", IThiding ),
260 ( "import", ITimport ),
262 ( "infix", ITinfix ),
263 ( "infixl", ITinfixl ),
264 ( "infixr", ITinfixr ),
265 ( "instance", ITinstance ),
267 ( "module", ITmodule ),
268 ( "newtype", ITnewtype ),
270 ( "qualified", ITqualified ),
273 ( "where", ITwhere ),
277 ghcExtensionKeywordsFM = listToUFM $
278 map (\ (x,y) -> (_PK_ x,y))
279 [ ( "forall", ITforall ),
280 ( "foreign", ITforeign ),
281 ( "export", ITexport ),
282 ( "label", ITlabel ),
283 ( "dynamic", ITdynamic ),
284 ( "unsafe", ITunsafe ),
285 ( "stdcall", ITstdcallconv),
286 ( "ccall", ITccallconv),
287 ("_ccall_", ITccall (False, False, False)),
288 ("_ccall_GC_", ITccall (False, False, True)),
289 ("_casm_", ITccall (False, True, False)),
290 ("_casm_GC_", ITccall (False, True, True)),
292 -- interface keywords
293 ("__interface", ITinterface),
294 ("__export", IT__export),
295 ("__depends", ITdepends),
296 ("__forall", IT__forall),
297 ("__letrec", ITletrec),
298 ("__coerce", ITcoerce),
299 ("__inline_me", ITinlineMe),
300 ("__inline_call", ITinlineCall),
301 ("__depends", ITdepends),
302 ("__DEFAULT", ITdefaultbranch),
304 ("__integer", ITinteger_lit),
305 ("__float", ITfloat_lit),
306 ("__rational", ITrational_lit),
307 ("__addr", ITaddr_lit),
308 ("__litlit", ITlit_lit),
309 ("__string", ITstring_lit),
312 ("__fuall", ITfuall),
314 ("__P", ITspecialise),
317 ("__U", ITunfold NoInlinePragInfo),
319 ("__ccall", ITccall (False, False, False)),
320 ("__ccall_GC", ITccall (False, False, True)),
321 ("__dyn_ccall", ITccall (True, False, False)),
322 ("__dyn_ccall_GC", ITccall (True, False, True)),
323 ("__casm", ITccall (False, True, False)),
324 ("__dyn_casm", ITccall (True, True, False)),
325 ("__casm_GC", ITccall (False, True, True)),
326 ("__dyn_casm_GC", ITccall (True, True, True)),
332 haskellKeySymsFM = listToUFM $
333 map (\ (x,y) -> (_PK_ x,y))
346 ,(".", ITdot) -- sadly, for 'forall a . t'
350 -----------------------------------------------------------------------------
355 - (glaexts) lexing an interface file or -fglasgow-exts
356 - (bol) pointer to beginning of line (for column calculations)
357 - (buf) pointer to beginning of token
358 - (buf) pointer to current char
359 - (atbol) flag indicating whether we're at the beginning of a line
362 lexer :: (Token -> P a) -> P a
363 lexer cont buf s@(PState{
365 glasgow_exts = glaexts,
371 -- first, start a new lexeme and lose all the whitespace
372 = tab line bol atbol (stepOverLexeme buf)
374 line = srcLocLine loc
376 tab y bol atbol buf = --trace ("tab: " ++ show (I# y) ++ " : " ++ show (currentChar buf)) $
377 case currentChar# buf of
380 if bufferExhausted (stepOn buf)
381 then cont ITeof buf s'
382 else trace "lexer: misplaced NUL?" $
383 tab y bol atbol (stepOn buf)
385 '\n'# -> let buf' = stepOn buf
386 in tab (y +# 1#) (currentIndex# buf') 1# buf'
388 -- find comments. This got harder in Haskell 98.
389 '-'# -> let trundle n =
390 let next = lookAhead# buf n in
391 if next `eqChar#` '-'# then trundle (n +# 1#)
392 else if is_symbol next || n <# 2#
394 else case untilChar# (stepOnBy# buf n) '\n'# of
395 { buf' -> tab y bol atbol (stepOverLexeme buf')
399 -- comments and pragmas. We deal with LINE pragmas here,
400 -- and throw out any unrecognised pragmas as comments. Any
401 -- pragmas we know about are dealt with later (after any layout
402 -- processing if necessary).
404 '{'# | lookAhead# buf 1# `eqChar#` '-'# ->
405 if lookAhead# buf 2# `eqChar#` '#'# then
406 if lookAhead# buf 3# `eqChar#` '#'# then is_a_token else
407 case expandWhile# is_space (setCurrentPos# buf 3#) of { buf1->
408 case expandWhile# is_ident (stepOverLexeme buf1) of { buf2->
409 let lexeme = mkFastString -- ToDo: too slow
410 (map toUpper (lexemeToString buf2)) in
411 case lookupUFM pragmaKeywordsFM lexeme of
412 Just ITline_prag -> line_prag (lexer cont) buf2 s'
413 Just other -> is_a_token
414 Nothing -> skip_to_end (stepOnBy# buf 2#)
417 else skip_to_end (stepOnBy# buf 2#)
419 skip_to_end buf = nested_comment (lexer cont) buf s'
421 -- tabs have been expanded beforehand
422 c | is_space c -> tab y bol atbol (stepOn buf)
423 | otherwise -> is_a_token
425 where s' = s{loc = replaceSrcLine loc y,
429 is_a_token | atbol /=# 0# = lexBOL cont buf s'
430 | otherwise = lexToken cont glaexts buf s'
432 -- {-# LINE .. #-} pragmas. yeuch.
434 case expandWhile# is_space buf of { buf1 ->
435 case scanNumLit 0 (stepOverLexeme buf1) of { (line,buf2) ->
436 -- subtract one: the line number refers to the *following* line.
437 let real_line = line - 1 in
438 case fromInteger real_line of { i@(I# l) ->
439 case expandWhile# is_space buf2 of { buf3 ->
440 case currentChar# buf3 of
442 case untilEndOfString# (stepOn (stepOverLexeme buf3)) of { buf4 ->
443 let file = lexemeToFastString buf4 in
444 \s@PState{loc=loc} -> skipToEnd buf4 s{loc = mkSrcLoc file i}
446 other -> \s@PState{loc=loc} -> skipToEnd buf3 s{loc = replaceSrcLine loc l}
449 skipToEnd buf = nested_comment cont buf
451 nested_comment :: P a -> P a
452 nested_comment cont buf = loop buf
455 case currentChar# buf of
456 '\NUL'# | bufferExhausted (stepOn buf) ->
457 lexError "unterminated `{-'" buf
459 '-'# | lookAhead# buf 1# `eqChar#` '}'# ->
460 cont (stepOnBy# buf 2#)
462 '{'# | lookAhead# buf 1# `eqChar#` '-'# ->
463 nested_comment (nested_comment cont) (stepOnBy# buf 2#)
465 '\n'# -> \ s@PState{loc=loc} ->
466 let buf' = stepOn buf in
467 nested_comment cont buf'
468 s{loc = incSrcLine loc, bol = currentIndex# buf',
471 _ -> nested_comment cont (stepOn buf)
473 -- When we are lexing the first token of a line, check whether we need to
474 -- insert virtual semicolons or close braces due to layout.
476 lexBOL :: (Token -> P a) -> P a
477 lexBOL cont buf s@(PState{
479 glasgow_exts = glaexts,
484 if need_close_curly then
485 --trace ("col = " ++ show (I# col) ++ ", layout: inserting '}'") $
486 cont ITvccurly buf s{atbol = 1#, context = tail ctx}
487 else if need_semi_colon then
488 --trace ("col = " ++ show (I# col) ++ ", layout: inserting ';'") $
489 cont ITsemi buf s{atbol = 0#}
491 lexToken cont glaexts buf s{atbol = 0#}
493 col = currentIndex# buf -# bol
506 Layout n -> col ==# n
509 lexToken :: (Token -> P a) -> Int# -> P a
510 lexToken cont glaexts buf =
513 case currentChar# buf of
515 -- special symbols ----------------------------------------------------
516 '('# | flag glaexts && lookAhead# buf 1# `eqChar#` '#'#
517 -> cont IToubxparen (setCurrentPos# buf 2#)
519 -> cont IToparen (incLexeme buf)
521 ')'# -> cont ITcparen (incLexeme buf)
522 '['# -> cont ITobrack (incLexeme buf)
523 ']'# -> cont ITcbrack (incLexeme buf)
524 ','# -> cont ITcomma (incLexeme buf)
525 ';'# -> cont ITsemi (incLexeme buf)
527 '}'# -> \ s@PState{context = ctx} ->
529 (_:ctx') -> cont ITccurly (incLexeme buf) s{context=ctx'}
530 _ -> lexError "too many '}'s" buf s
532 '#'# -> case lookAhead# buf 1# of
533 ')'# | flag glaexts -> cont ITcubxparen (setCurrentPos# buf 2#)
534 '-'# -> case lookAhead# buf 2# of
535 '}'# -> cont ITclose_prag (setCurrentPos# buf 3#)
536 _ -> lex_sym cont (incLexeme buf)
537 _ -> lex_sym cont (incLexeme buf)
539 '`'# | flag glaexts && lookAhead# buf 1# `eqChar#` '`'#
540 -> lex_cstring cont (setCurrentPos# buf 2#)
542 -> cont ITbackquote (incLexeme buf)
544 '{'# -> -- look for "{-##" special iface pragma
545 case lookAhead# buf 1# of
546 '-'# -> case lookAhead# buf 2# of
547 '#'# -> case lookAhead# buf 3# of
550 = doDiscard False (stepOnBy# (stepOverLexeme buf) 4#) in
551 cont (ITpragma lexeme) buf'
552 _ -> lex_prag cont (setCurrentPos# buf 3#)
553 _ -> cont ITocurly (incLexeme buf)
554 _ -> (layoutOff `thenP_` cont ITocurly) (incLexeme buf)
556 -- strings/characters -------------------------------------------------
557 '\"'#{-"-} -> lex_string cont glaexts "" (incLexeme buf)
558 '\''# -> lex_char (char_end cont) glaexts (incLexeme buf)
560 -- strictness and cpr pragmas and __scc treated specially.
561 '_'# | flag glaexts ->
562 case lookAhead# buf 1# of
563 '_'# -> case lookAhead# buf 2# of
565 lex_demand cont (stepOnUntil (not . isSpace)
566 (stepOnBy# buf 3#)) -- past __S
568 lex_cpr cont (stepOnUntil (not . isSpace)
569 (stepOnBy# buf 3#)) -- past __M
571 case prefixMatch (stepOnBy# buf 3#) "cc" of
572 Just buf' -> lex_scc cont (stepOverLexeme buf')
573 Nothing -> lex_id cont glaexts buf
574 _ -> lex_id cont glaexts buf
575 _ -> lex_id cont glaexts buf
577 -- Hexadecimal and octal constants
578 '0'# | (ch `eqChar#` 'x'# || ch `eqChar#` 'X'#) && is_hexdigit ch2
579 -> readNum (after_lexnum cont glaexts) buf' is_hexdigit 16 hex
580 | (ch `eqChar#` 'o'# || ch `eqChar#` 'O'#) && is_octdigit ch2
581 -> readNum (after_lexnum cont glaexts) buf' is_octdigit 8 oct_or_dec
582 where ch = lookAhead# buf 1#
583 ch2 = lookAhead# buf 2#
584 buf' = setCurrentPos# buf 2#
587 if bufferExhausted (stepOn buf) then
590 trace "lexIface: misplaced NUL?" $
591 cont (ITunknown "\NUL") (stepOn buf)
593 c | is_digit c -> lex_num cont glaexts 0 buf
594 | is_symbol c -> lex_sym cont buf
595 | is_upper c -> lex_con cont glaexts buf
596 | is_ident c -> lex_id cont glaexts buf
597 | otherwise -> lexError "illegal character" buf
599 -- Int# is unlifted, and therefore faster than Bool for flags.
605 -------------------------------------------------------------------------------
609 = case expandWhile# is_space buf of { buf1 ->
610 case expandWhile# is_ident (stepOverLexeme buf1) of { buf2 ->
611 let lexeme = mkFastString (map toUpper (lexemeToString buf2)) in
612 case lookupUFM pragmaKeywordsFM lexeme of
613 Just kw -> cont kw (mergeLexemes buf buf2)
614 Nothing -> panic "lex_prag"
617 -------------------------------------------------------------------------------
620 lex_string cont glaexts s buf
621 = case currentChar# buf of
623 let buf' = incLexeme buf; s' = mkFastString (reverse s) in
624 case currentChar# buf' of
625 '#'# | flag glaexts -> cont (ITprimstring s') (incLexeme buf')
626 _ -> cont (ITstring s') buf'
628 -- ignore \& in a string, deal with string gaps
629 '\\'# | next_ch `eqChar#` '&'#
630 -> lex_string cont glaexts s buf'
632 -> lex_stringgap cont glaexts s (incLexeme buf)
634 where next_ch = lookAhead# buf 1#
635 buf' = setCurrentPos# buf 2#
637 _ -> lex_char (lex_next_string cont s) glaexts buf
639 lex_stringgap cont glaexts s buf
640 = let buf' = incLexeme buf in
641 case currentChar# buf of
642 '\n'# -> \st@PState{loc = loc} -> lex_stringgap cont glaexts s buf'
643 st{loc = incSrcLine loc}
644 '\\'# -> lex_string cont glaexts s buf'
645 c | is_space c -> lex_stringgap cont glaexts s buf'
646 other -> charError buf'
648 lex_next_string cont s glaexts c buf = lex_string cont glaexts (c:s) buf
650 lex_char :: (Int# -> Char -> P a) -> Int# -> P a
651 lex_char cont glaexts buf
652 = case currentChar# buf of
653 '\\'# -> lex_escape (cont glaexts) (incLexeme buf)
654 c | is_any c -> cont glaexts (C# c) (incLexeme buf)
655 other -> charError buf
657 char_end cont glaexts c buf
658 = case currentChar# buf of
659 '\''# -> let buf' = incLexeme buf in
660 case currentChar# buf' of
662 -> cont (ITprimchar c) (incLexeme buf')
663 _ -> cont (ITchar c) buf'
667 = let buf' = incLexeme buf in
668 case currentChar# buf of
669 'a'# -> cont '\a' buf'
670 'b'# -> cont '\b' buf'
671 'f'# -> cont '\f' buf'
672 'n'# -> cont '\n' buf'
673 'r'# -> cont '\r' buf'
674 't'# -> cont '\t' buf'
675 'v'# -> cont '\v' buf'
676 '\\'# -> cont '\\' buf'
677 '"'# -> cont '\"' buf'
678 '\''# -> cont '\'' buf'
679 '^'# -> let c = currentChar# buf' in
680 if c `geChar#` '@'# && c `leChar#` '_'#
681 then cont (C# (chr# (ord# c -# ord# '@'#))) (incLexeme buf')
684 'x'# -> readNum (after_charnum cont) buf' is_hexdigit 16 hex
685 'o'# -> readNum (after_charnum cont) buf' is_octdigit 8 oct_or_dec
687 -> readNum (after_charnum cont) buf is_digit 10 oct_or_dec
689 _ -> case [ (c,buf2) | (p,c) <- silly_escape_chars,
690 Just buf2 <- [prefixMatch buf p] ] of
691 (c,buf2):_ -> cont c buf2
694 after_charnum cont i buf
695 = let int = fromInteger i in
696 if i >= 0 && i <= 255
697 then cont (chr int) buf
700 readNum cont buf is_digit base conv = read buf 0
702 = case currentChar# buf of { c ->
704 then read (incLexeme buf) (i*base + (toInteger (I# (conv c))))
710 || (c `geChar#` 'a'# && c `leChar#` 'f'#)
711 || (c `geChar#` 'A'# && c `leChar#` 'F'#)
713 hex c | is_digit c = ord# c -# ord# '0'#
714 | otherwise = ord# (to_lower c) -# ord# 'a'# +# 10#
715 oct_or_dec c = ord# c -# ord# '0'#
717 is_octdigit c = c `geChar#` '0'# && c `leChar#` '7'#
720 | c `geChar#` 'A'# && c `leChar#` 'Z'#
721 = chr# (ord# c -# (ord# 'A'# -# ord# 'a'#))
724 charError buf = lexError "error in character literal" buf
726 silly_escape_chars = [
763 -------------------------------------------------------------------------------
765 lex_demand cont buf =
766 case read_em [] buf of { (ls,buf') ->
767 case currentChar# buf' of
768 'B'# -> cont (ITstrict (ls, True )) (incLexeme buf')
769 _ -> cont (ITstrict (ls, False)) buf'
772 -- code snatched from Demand.lhs
774 case currentChar# buf of
775 'L'# -> read_em (WwLazy False : acc) (stepOn buf)
776 'A'# -> read_em (WwLazy True : acc) (stepOn buf)
777 'S'# -> read_em (WwStrict : acc) (stepOn buf)
778 'P'# -> read_em (WwPrim : acc) (stepOn buf)
779 'E'# -> read_em (WwEnum : acc) (stepOn buf)
780 ')'# -> (reverse acc, stepOn buf)
781 'U'# -> do_unpack DataType True acc (stepOnBy# buf 2#)
782 'u'# -> do_unpack DataType False acc (stepOnBy# buf 2#)
783 'N'# -> do_unpack NewType True acc (stepOnBy# buf 2#)
784 'n'# -> do_unpack NewType False acc (stepOnBy# buf 2#)
785 _ -> (reverse acc, buf)
787 do_unpack new_or_data wrapper_unpacks acc buf
788 = case read_em [] buf of
789 (stuff, rest) -> read_em (WwUnpack new_or_data wrapper_unpacks stuff : acc) rest
792 case read_em [] buf of { (cpr_inf,buf') ->
793 ASSERT ( null (tail cpr_inf) )
794 cont (ITcprinfo $ head cpr_inf) buf'
797 -- code snatched from lex_demand above
799 case currentChar# buf of
800 '-'# -> read_em (NoCPRInfo : acc) (stepOn buf)
801 '('# -> do_unpack acc (stepOn buf)
802 ')'# -> (reverse acc, stepOn buf)
803 _ -> (reverse acc, buf)
806 = case read_em [] buf of
807 (stuff, rest) -> read_em ((CPRInfo stuff) : acc) rest
811 case currentChar# buf of
812 'C'# -> cont ITsccAllCafs (incLexeme buf)
813 other -> cont ITscc buf
815 -----------------------------------------------------------------------------
818 lex_num :: (Token -> P a) -> Int# -> Integer -> P a
819 lex_num cont glaexts acc buf =
820 case scanNumLit acc buf of
822 case currentChar# buf' of
823 '.'# | is_digit (lookAhead# buf' 1#) ->
824 -- this case is not optimised at all, as the
825 -- presence of floating point numbers in interface
826 -- files is not that common. (ToDo)
827 case expandWhile# is_digit (incLexeme buf') of
828 buf2 -> -- points to first non digit char
830 let l = case currentChar# buf2 of
836 = let buf3 = incLexeme buf2 in
837 case currentChar# buf3 of
838 '-'# -> expandWhile# is_digit (incLexeme buf3)
839 '+'# -> expandWhile# is_digit (incLexeme buf3)
840 x | is_digit x -> expandWhile# is_digit buf3
843 v = readRational__ (lexemeToString l)
845 in case currentChar# l of -- glasgow exts only
846 '#'# | flag glaexts -> let l' = incLexeme l in
847 case currentChar# l' of
848 '#'# -> cont (ITprimdouble v) (incLexeme l')
849 _ -> cont (ITprimfloat v) l'
850 _ -> cont (ITrational v) l
852 _ -> after_lexnum cont glaexts acc' buf'
854 after_lexnum cont glaexts i buf
855 = case currentChar# buf of
856 '#'# | flag glaexts -> cont (ITprimint i) (incLexeme buf)
857 _ -> cont (ITinteger i) buf
859 -----------------------------------------------------------------------------
860 -- C "literal literal"s (i.e. things like ``NULL'', ``stdout'' etc.)
862 -- we lexemeToFastString on the bit between the ``''s, but include the
863 -- quotes in the full lexeme.
865 lex_cstring cont buf =
866 case expandUntilMatch (stepOverLexeme buf) "\'\'" of
867 buf' -> cont (ITlitlit (lexemeToFastString
868 (setCurrentPos# buf' (negateInt# 2#))))
869 (mergeLexemes buf buf')
871 ------------------------------------------------------------------------------
874 is_ident, is_symbol, is_any, is_upper, is_digit :: Char# -> Bool
876 {-# INLINE is_ctype #-}
877 #if __GLASGOW_HASKELL__ >= 303
878 is_ctype :: Word8 -> Char# -> Bool
879 is_ctype mask = \c ->
880 (indexWord8OffAddr (``char_types'' :: Addr) (ord (C# c)) .&. mask) /= 0
882 is_ctype :: Int -> Char# -> Bool
883 is_ctype (I# mask) = \c ->
884 let (A# ctype) = ``char_types'' :: Addr
885 flag_word = int2Word# (ord# (indexCharOffAddr# ctype (ord# c)))
887 (flag_word `and#` (int2Word# mask)) `neWord#` (int2Word# 0#)
890 is_ident = is_ctype 1
891 is_symbol = is_ctype 2
893 is_space = is_ctype 8
894 is_upper = is_ctype 16
895 is_digit = is_ctype 32
897 -----------------------------------------------------------------------------
898 -- identifiers, symbols etc.
900 lex_id cont glaexts buf =
901 case expandWhile# is_ident buf of { buf1 ->
903 case (if flag glaexts
904 then expandWhile# (eqChar# '#'#) buf1 -- slurp trailing hashes
905 else buf1) of { buf' ->
907 let lexeme = lexemeToFastString buf' in
909 case _scc_ "Lex.haskellKeyword" lookupUFM haskellKeywordsFM lexeme of {
910 Just kwd_token -> --trace ("hkeywd: "++_UNPK_(lexeme)) $
914 let var_token = cont (mk_var_token lexeme) buf' in
916 if not (flag glaexts)
920 case lookupUFM ghcExtensionKeywordsFM lexeme of {
921 Just kwd_token -> cont kwd_token buf';
927 case expandWhile# is_symbol buf of
928 buf' -> case lookupUFM haskellKeySymsFM lexeme of {
929 Just kwd_token -> --trace ("keysym: "++unpackFS lexeme) $
930 cont kwd_token buf' ;
931 Nothing -> --trace ("sym: "++unpackFS lexeme) $
932 cont (mk_var_token lexeme) buf'
934 where lexeme = lexemeToFastString buf'
937 lex_con cont glaexts buf =
938 case expandWhile# is_ident buf of { buf1 ->
939 case slurp_trailing_hashes buf1 glaexts of { buf' ->
941 case currentChar# buf' of
946 just_a_conid = --trace ("con: "++unpackFS lexeme) $
947 cont (ITconid lexeme) buf'
948 lexeme = lexemeToFastString buf'
949 munch = lex_qid cont glaexts lexeme (incLexeme buf') just_a_conid
952 lex_qid cont glaexts mod buf just_a_conid =
953 case currentChar# buf of
954 '['# -> -- Special case for []
955 case lookAhead# buf 1# of
956 ']'# -> cont (ITqconid (mod,SLIT("[]"))) (setCurrentPos# buf 2#)
959 '('# -> -- Special case for (,,,)
960 -- This *is* necessary to deal with e.g. "instance C PrelBase.(,,)"
961 case lookAhead# buf 1# of
962 '#'# | flag glaexts -> case lookAhead# buf 2# of
963 ','# -> lex_ubx_tuple cont mod (setCurrentPos# buf 3#)
966 ')'# -> cont (ITqconid (mod,SLIT("()"))) (setCurrentPos# buf 2#)
967 ','# -> lex_tuple cont mod (setCurrentPos# buf 2#) just_a_conid
970 '-'# -> case lookAhead# buf 1# of
971 '>'# -> cont (ITqconid (mod,SLIT("->"))) (setCurrentPos# buf 2#)
972 _ -> lex_id3 cont glaexts mod buf just_a_conid
973 _ -> lex_id3 cont glaexts mod buf just_a_conid
975 lex_id3 cont glaexts mod buf just_a_conid
976 | is_symbol (currentChar# buf) =
978 start_new_lexeme = stepOverLexeme buf
980 case expandWhile# is_symbol start_new_lexeme of { buf' ->
982 lexeme = lexemeToFastString buf'
983 -- real lexeme is M.<sym>
984 new_buf = mergeLexemes buf buf'
986 cont (mk_qvar_token mod lexeme) new_buf
987 -- wrong, but arguably morally right: M... is now a qvarsym
992 start_new_lexeme = stepOverLexeme buf
994 case expandWhile# is_ident start_new_lexeme of { buf1 ->
999 case slurp_trailing_hashes buf1 glaexts of { buf' ->
1002 lexeme = lexemeToFastString buf'
1003 new_buf = mergeLexemes buf buf'
1004 is_a_qvarid = cont (mk_qvar_token mod lexeme) new_buf
1006 case _scc_ "Lex.haskellKeyword" lookupUFM haskellKeywordsFM lexeme of {
1007 Just kwd_token -> just_a_conid; -- avoid M.where etc.
1008 Nothing -> is_a_qvarid
1009 -- TODO: special ids (as, qualified, hiding) shouldn't be
1010 -- recognised as keywords here. ie. M.as is a qualified varid.
1014 slurp_trailing_hashes buf glaexts
1015 | flag glaexts = expandWhile# (`eqChar#` '#'#) buf
1020 | is_upper f = ITconid pk_str
1021 -- _[A-Z] is treated as a constructor in interface files.
1022 | f `eqChar#` '_'# && not (_NULL_ tl)
1023 && (case _HEAD_ tl of { C# g -> is_upper g }) = ITconid pk_str
1024 | is_ident f = ITvarid pk_str
1025 | f `eqChar#` ':'# = ITconsym pk_str
1026 | otherwise = ITvarsym pk_str
1028 (C# f) = _HEAD_ pk_str
1031 mk_qvar_token m token =
1032 case mk_var_token token of
1033 ITconid n -> ITqconid (m,n)
1034 ITvarid n -> ITqvarid (m,n)
1035 ITconsym n -> ITqconsym (m,n)
1036 ITvarsym n -> ITqvarsym (m,n)
1037 _ -> ITunknown (show token)
1040 ----------------------------------------------------------------------------
1041 Horrible stuff for dealing with M.(,,,)
1044 lex_tuple cont mod buf back_off =
1048 case currentChar# buf of
1049 ','# -> go (n+1) (stepOn buf)
1050 ')'# -> cont (ITqconid (mod, snd (mkTupNameStr n))) (stepOn buf)
1053 lex_ubx_tuple cont mod buf back_off =
1057 case currentChar# buf of
1058 ','# -> go (n+1) (stepOn buf)
1059 '#'# -> case lookAhead# buf 1# of
1060 ')'# -> cont (ITqconid (mod, snd (mkUbxTupNameStr n)))
1066 -----------------------------------------------------------------------------
1067 doDiscard rips along really fast, looking for a '#-}',
1068 indicating the end of the pragma we're skipping
1071 doDiscard inStr buf =
1072 case currentChar# buf of
1074 case lookAhead# buf 1# of { '#'# ->
1075 case lookAhead# buf 2# of { '-'# ->
1076 case lookAhead# buf 3# of { '}'# ->
1077 (lexemeToBuffer buf, stepOverLexeme (setCurrentPos# buf 4#));
1078 _ -> doDiscard inStr (incLexeme buf) };
1079 _ -> doDiscard inStr (incLexeme buf) };
1080 _ -> doDiscard inStr (incLexeme buf) }
1083 odd_slashes buf flg i# =
1084 case lookAhead# buf i# of
1085 '\\'# -> odd_slashes buf (not flg) (i# -# 1#)
1088 case lookAhead# buf (negateInt# 1#) of --backwards, actually
1089 '\\'# -> -- escaping something..
1090 if odd_slashes buf True (negateInt# 2#) then
1091 -- odd number of slashes, " is escaped.
1092 doDiscard inStr (incLexeme buf)
1094 -- even number of slashes, \ is escaped.
1095 doDiscard (not inStr) (incLexeme buf)
1096 _ -> case inStr of -- forced to avoid build-up
1097 True -> doDiscard False (incLexeme buf)
1098 False -> doDiscard True (incLexeme buf)
1099 _ -> doDiscard inStr (incLexeme buf)
1103 -----------------------------------------------------------------------------
1114 data PState = PState {
1116 glasgow_exts :: Int#,
1119 context :: [LayoutContext]
1122 type P a = StringBuffer -- Input string
1127 returnP a buf s = POk s a
1129 thenP :: P a -> (a -> P b) -> P b
1130 m `thenP` k = \ buf s ->
1132 POk s1 a -> k a buf s1
1133 PFailed err -> PFailed err
1135 thenP_ :: P a -> P b -> P b
1136 m `thenP_` k = m `thenP` \_ -> k
1138 mapP :: (a -> P b) -> [a] -> P [b]
1139 mapP f [] = returnP []
1142 mapP f as `thenP` \bs ->
1145 failP :: String -> P a
1146 failP msg buf s = PFailed (text msg)
1148 failMsgP :: Message -> P a
1149 failMsgP msg buf s = PFailed msg
1151 lexError :: String -> P a
1152 lexError str buf s@PState{ loc = loc }
1153 = failMsgP (hcat [ppr loc, text ": ", text str]) buf s
1155 getSrcLocP :: P SrcLoc
1156 getSrcLocP buf s@(PState{ loc = loc }) = POk s loc
1158 getSrcFile :: P FAST_STRING
1159 getSrcFile buf s@(PState{ loc = loc }) = POk s (srcLocFile loc)
1161 getContext :: P [LayoutContext]
1162 getContext buf s@(PState{ context = ctx }) = POk s ctx
1164 pushContext :: LayoutContext -> P ()
1165 pushContext ctxt buf s@(PState{ context = ctx }) = POk s{context = ctxt:ctx} ()
1169 This special case in layoutOn is to handle layout contexts with are
1170 indented the same or less than the current context. This is illegal
1171 according to the Haskell spec, so we have to arrange to close the
1172 current context. eg.
1177 after the first 'where', the sequence of events is:
1179 - layout system inserts a ';' (column 0)
1180 - parser begins a new context at column 0
1181 - parser shifts ';' (legal empty declaration)
1182 - parser sees 'class': parse error (we're still in the inner context)
1184 trouble is, by the time we know we need a new context, the lexer has
1185 already generated the ';'. Hacky solution is as follows: since we
1186 know the column of the next token (it's the column number of the new
1187 context), we set the ACTUAL column number of the new context to this
1188 numer plus one. Hence the next time the lexer is called, a '}' will
1189 be generated to close the new context straight away. Furthermore, we
1190 have to set the atbol flag so that the ';' that the parser shifted as
1191 part of the new context is re-generated.
1193 when the new context is *less* indented than the current one:
1195 f = f where g = g where
1198 - current context: column 12.
1199 - on seeing 'h' (column 0), the layout system inserts '}'
1200 - parser starts a new context, column 0
1201 - parser sees '}', uses it to close new context
1202 - we still need to insert another '}' followed by a ';',
1203 hence the atbol trick.
1205 There's also a special hack in here to deal with
1212 i.e. the inner context is at the same indentation level as the outer
1213 context. This is strictly illegal according to Haskell 98, but
1214 there's a lot of existing code using this style and it doesn't make
1215 any sense to disallow it, since empty 'do' lists don't make sense.
1218 layoutOn :: Bool -> P ()
1219 layoutOn strict buf s@(PState{ bol = bol, context = ctx }) =
1220 let offset = lexemeIndex buf -# bol in
1223 | if strict then prev_off >=# offset else prev_off ># offset ->
1224 --trace ("layout on, column: " ++ show (I# offset)) $
1225 POk s{ context = Layout (offset +# 1#) : ctx, atbol = 1# } ()
1227 --trace ("layout on, column: " ++ show (I# offset)) $
1228 POk s{ context = Layout offset : ctx } ()
1231 layoutOff buf s@(PState{ context = ctx }) =
1232 POk s{ context = NoLayout:ctx } ()
1235 popContext = \ buf s@(PState{ context = ctx }) ->
1237 (_:tl) -> POk s{ context = tl } ()
1238 [] -> panic "Lex.popContext: empty context"
1241 Note that if the name of the file we're processing ends
1242 with `hi-boot', we accept it on faith as having the right
1243 version. This is done so that .hi-boot files that comes
1244 with hsc don't have to be updated before every release,
1245 *and* it allows us to share .hi-boot files with versions
1246 of hsc that don't have .hi version checking (e.g., ghc-2.10's)
1248 If the version number is 0, the checking is also turned off.
1249 (needed to deal with GHC.hi only!)
1251 Once we can assume we're compiling with a version of ghc that
1252 supports interface file checking, we can drop the special
1255 checkVersion :: Maybe Integer -> P ()
1256 checkVersion mb@(Just v) buf s@(PState{loc = loc})
1257 | (v==0) || (v == fromInt opt_HiVersion) || opt_NoHiCheck = POk s ()
1258 | otherwise = PFailed (ifaceVersionErr mb loc ([]::[Token]){-Todo-})
1259 checkVersion mb@Nothing buf s@(PState{loc = loc})
1260 | "hi-boot" `isSuffixOf` (_UNPK_ (srcLocFile loc)) = POk s ()
1261 | otherwise = PFailed (ifaceVersionErr mb loc ([]::[Token]){-Todo-})
1263 -----------------------------------------------------------------
1265 ifaceParseErr :: StringBuffer -> SrcLoc -> Message
1267 = hsep [ppr l, ptext SLIT("Interface file parse error; on input `"),
1268 text (lexemeToString s), char '\'']
1270 ifaceVersionErr hi_vers l toks
1271 = hsep [ppr l, ptext SLIT("Interface file version error;"),
1272 ptext SLIT("Expected"), int opt_HiVersion,
1273 ptext SLIT("found "), pp_version]
1277 Nothing -> ptext SLIT("pre ghc-3.02 version")
1278 Just v -> ptext SLIT("version") <+> integer v