1 -----------------------------------------------------------------------------
2 -- (c) The University of Glasgow, 2006
6 -- This is a combination of an Alex-generated lexer from a regex
7 -- definition, with some hand-coded bits.
9 -- Completely accurate information about token-spans within the source
10 -- file is maintained. Every token has a start and end SrcLoc attached to it.
12 -----------------------------------------------------------------------------
16 -- - parsing integers is a bit slow
17 -- - readRational is a bit slow
19 -- Known bugs, that were also in the previous version:
20 -- - M... should be 3 tokens, not 1.
21 -- - pragma-end should be only valid in a pragma
25 -- The above warning supression flag is a temporary kludge.
26 -- While working on this module you are encouraged to remove it and fix
27 -- any warnings in the module. See
28 -- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
32 Token(..), lexer, pragState, mkPState, PState(..),
33 P(..), ParseResult(..), getSrcLoc,
34 failLocMsgP, failSpanMsgP, srcParseFail,
36 popContext, pushCurrentContext, setLastToken, setSrcLoc,
37 getLexState, popLexState, pushLexState,
38 extension, standaloneDerivingEnabled, bangPatEnabled,
42 #include "HsVersions.h"
54 import Util ( maybePrefixMatch, readRational )
58 import Data.Char ( chr, isSpace )
62 #if __GLASGOW_HASKELL__ >= 605
63 import Data.Char ( GeneralCategory(..), generalCategory, isPrint, isUpper )
65 import Compat.Unicode ( GeneralCategory(..), generalCategory, isPrint, isUpper )
69 $unispace = \x05 -- Trick Alex into handling Unicode. See alexGetChar.
70 $whitechar = [\ \n\r\f\v\xa0 $unispace]
71 $white_no_nl = $whitechar # \n
75 $unidigit = \x03 -- Trick Alex into handling Unicode. See alexGetChar.
76 $decdigit = $ascdigit -- for now, should really be $digit (ToDo)
77 $digit = [$ascdigit $unidigit]
79 $special = [\(\)\,\;\[\]\`\{\}]
80 $ascsymbol = [\!\#\$\%\&\*\+\.\/\<\=\>\?\@\\\^\|\-\~ \xa1-\xbf \xd7 \xf7]
81 $unisymbol = \x04 -- Trick Alex into handling Unicode. See alexGetChar.
82 $symbol = [$ascsymbol $unisymbol] # [$special \_\:\"\']
84 $unilarge = \x01 -- Trick Alex into handling Unicode. See alexGetChar.
85 $asclarge = [A-Z \xc0-\xd6 \xd8-\xde]
86 $large = [$asclarge $unilarge]
88 $unismall = \x02 -- Trick Alex into handling Unicode. See alexGetChar.
89 $ascsmall = [a-z \xdf-\xf6 \xf8-\xff]
90 $small = [$ascsmall $unismall \_]
92 $unigraphic = \x06 -- Trick Alex into handling Unicode. See alexGetChar.
93 $graphic = [$small $large $symbol $digit $special $unigraphic \:\"\']
96 $hexit = [$decdigit A-F a-f]
97 $symchar = [$symbol \:]
99 $idchar = [$small $large $digit \']
101 $docsym = [\| \^ \* \$]
103 @varid = $small $idchar*
104 @conid = $large $idchar*
106 @varsym = $symbol $symchar*
107 @consym = \: $symchar*
109 @decimal = $decdigit+
111 @hexadecimal = $hexit+
112 @exponent = [eE] [\-\+]? @decimal
114 -- we support the hierarchical module name extension:
117 @floating_point = @decimal \. @decimal @exponent? | @decimal @exponent
119 -- normal signed numerical literals can only be explicitly negative,
120 -- not explicitly positive (contrast @exponent)
122 @signed = @negative ?
126 -- everywhere: skip whitespace and comments
128 $tab+ { warn Opt_WarnTabs (text "Tab character") }
130 -- Everywhere: deal with nested comments. We explicitly rule out
131 -- pragmas, "{-#", so that we don't accidentally treat them as comments.
132 -- (this can happen even though pragmas will normally take precedence due to
133 -- longest-match, because pragmas aren't valid in every state, but comments
134 -- are). We also rule out nested Haddock comments, if the -haddock flag is
137 "{-" / { isNormalComment } { nested_comment lexToken }
139 -- Single-line comments are a bit tricky. Haskell 98 says that two or
140 -- more dashes followed by a symbol should be parsed as a varsym, so we
141 -- have to exclude those.
143 -- Since Haddock comments aren't valid in every state, we need to rule them
146 -- The following two rules match comments that begin with two dashes, but
147 -- continue with a different character. The rules test that this character
148 -- is not a symbol (in which case we'd have a varsym), and that it's not a
149 -- space followed by a Haddock comment symbol (docsym) (in which case we'd
150 -- have a Haddock comment). The rules then munch the rest of the line.
153 "--" [^$symbol : \ ] .* ;
155 -- Next, match Haddock comments if no -haddock flag
157 "-- " $docsym .* / { ifExtension (not . haddockEnabled) } ;
159 -- Now, when we've matched comments that begin with 2 dashes and continue
160 -- with a different character, we need to match comments that begin with three
161 -- or more dashes (which clearly can't be Haddock comments). We only need to
162 -- make sure that the first non-dash character isn't a symbol, and munch the
165 "---"\-* [^$symbol :] .* ;
167 -- Since the previous rules all match dashes followed by at least one
168 -- character, we also need to match a whole line filled with just dashes.
170 "--"\-* / { atEOL } ;
172 -- We need this rule since none of the other single line comment rules
173 -- actually match this case.
177 -- 'bol' state: beginning of a line. Slurp up all the whitespace (including
178 -- blank lines) until we find a non-whitespace character, then do layout
181 -- One slight wibble here: what if the line begins with {-#? In
182 -- theory, we have to lex the pragma to see if it's one we recognise,
183 -- and if it is, then we backtrack and do_bol, otherwise we treat it
184 -- as a nested comment. We don't bother with this: if the line begins
185 -- with {-#, then we'll assume it's a pragma we know about and go for do_bol.
188 ^\# (line)? { begin line_prag1 }
189 ^\# pragma .* \n ; -- GCC 3.3 CPP generated, apparently
190 ^\# \! .* \n ; -- #!, for scripts
194 -- after a layout keyword (let, where, do, of), we begin a new layout
195 -- context if the curly brace is missing.
196 -- Careful! This stuff is quite delicate.
197 <layout, layout_do> {
198 \{ / { notFollowedBy '-' } { pop_and open_brace }
199 -- we might encounter {-# here, but {- has been handled already
201 ^\# (line)? { begin line_prag1 }
204 -- do is treated in a subtly different way, see new_layout_context
205 <layout> () { new_layout_context True }
206 <layout_do> () { new_layout_context False }
208 -- after a new layout context which was found to be to the left of the
209 -- previous context, we have generated a '{' token, and we now need to
210 -- generate a matching '}' token.
211 <layout_left> () { do_layout_left }
213 <0,option_prags> \n { begin bol }
215 "{-#" $whitechar* (line|LINE) { begin line_prag2 }
217 -- single-line line pragmas, of the form
218 -- # <line> "<file>" <extra-stuff> \n
219 <line_prag1> $decdigit+ { setLine line_prag1a }
220 <line_prag1a> \" [$graphic \ ]* \" { setFile line_prag1b }
221 <line_prag1b> .* { pop }
223 -- Haskell-style line pragmas, of the form
224 -- {-# LINE <line> "<file>" #-}
225 <line_prag2> $decdigit+ { setLine line_prag2a }
226 <line_prag2a> \" [$graphic \ ]* \" { setFile line_prag2b }
227 <line_prag2b> "#-}"|"-}" { pop }
228 -- NOTE: accept -} at the end of a LINE pragma, for compatibility
229 -- with older versions of GHC which generated these.
231 -- We only want RULES pragmas to be picked up when explicit forall
232 -- syntax is enabled is on, because the contents of the pragma always
233 -- uses it. If it's not on then we're sure to get a parse error.
234 -- (ToDo: we should really emit a warning when ignoring pragmas)
235 -- XXX Now that we can enable this without the -fglasgow-exts hammer,
236 -- is it better just to let the parse error happen?
238 "{-#" $whitechar* (RULES|rules) / { ifExtension explicitForallEnabled } { token ITrules_prag }
241 "{-#" $whitechar* (INLINE|inline) { token (ITinline_prag True) }
242 "{-#" $whitechar* (NO(T?)INLINE|no(t?)inline)
243 { token (ITinline_prag False) }
244 "{-#" $whitechar* (SPECIALI[SZ]E|speciali[sz]e)
245 { token ITspec_prag }
246 "{-#" $whitechar* (SPECIALI[SZ]E|speciali[sz]e)
247 $whitechar* (INLINE|inline) { token (ITspec_inline_prag True) }
248 "{-#" $whitechar* (SPECIALI[SZ]E|speciali[sz]e)
249 $whitechar* (NO(T?)INLINE|no(t?)inline)
250 { token (ITspec_inline_prag False) }
251 "{-#" $whitechar* (SOURCE|source) { token ITsource_prag }
252 "{-#" $whitechar* (DEPRECATED|deprecated)
253 { token ITdeprecated_prag }
254 "{-#" $whitechar* (SCC|scc) { token ITscc_prag }
255 "{-#" $whitechar* (GENERATED|generated)
256 { token ITgenerated_prag }
257 "{-#" $whitechar* (CORE|core) { token ITcore_prag }
258 "{-#" $whitechar* (UNPACK|unpack) { token ITunpack_prag }
260 "{-#" $whitechar* (DOCOPTIONS|docoptions)
261 / { ifExtension haddockEnabled } { lex_string_prag ITdocOptions }
263 "{-#" { nested_comment lexToken }
265 -- ToDo: should only be valid inside a pragma:
266 "#-}" { token ITclose_prag}
270 "{-#" $whitechar* (OPTIONS|options) { lex_string_prag IToptions_prag }
271 "{-#" $whitechar* (OPTIONS_GHC|options_ghc)
272 { lex_string_prag IToptions_prag }
273 "{-#" $whitechar* (LANGUAGE|language) { token ITlanguage_prag }
274 "{-#" $whitechar* (INCLUDE|include) { lex_string_prag ITinclude_prag }
278 -- This is to catch things like {-# OPTIONS OPTIONS_HUGS ...
279 "{-#" $whitechar* $idchar+ { nested_comment lexToken }
282 -- '0' state: ordinary lexemes
287 "-- " $docsym / { ifExtension haddockEnabled } { multiline_doc_comment }
288 "{-" \ ? $docsym / { ifExtension haddockEnabled } { nested_doc_comment }
294 "[:" / { ifExtension parrEnabled } { token ITopabrack }
295 ":]" / { ifExtension parrEnabled } { token ITcpabrack }
299 "[|" / { ifExtension thEnabled } { token ITopenExpQuote }
300 "[e|" / { ifExtension thEnabled } { token ITopenExpQuote }
301 "[p|" / { ifExtension thEnabled } { token ITopenPatQuote }
302 "[d|" / { ifExtension thEnabled } { layout_token ITopenDecQuote }
303 "[t|" / { ifExtension thEnabled } { token ITopenTypQuote }
304 "|]" / { ifExtension thEnabled } { token ITcloseQuote }
305 \$ @varid / { ifExtension thEnabled } { skip_one_varid ITidEscape }
306 "$(" / { ifExtension thEnabled } { token ITparenEscape }
310 "(|" / { ifExtension arrowsEnabled `alexAndPred` notFollowedBySymbol }
311 { special IToparenbar }
312 "|)" / { ifExtension arrowsEnabled } { special ITcparenbar }
316 \? @varid / { ifExtension ipEnabled } { skip_one_varid ITdupipvarid }
320 "(#" / { ifExtension unboxedTuplesEnabled `alexAndPred` notFollowedBySymbol }
321 { token IToubxparen }
322 "#)" / { ifExtension unboxedTuplesEnabled }
323 { token ITcubxparen }
327 "{|" / { ifExtension genericsEnabled } { token ITocurlybar }
328 "|}" / { ifExtension genericsEnabled } { token ITccurlybar }
332 \( { special IToparen }
333 \) { special ITcparen }
334 \[ { special ITobrack }
335 \] { special ITcbrack }
336 \, { special ITcomma }
337 \; { special ITsemi }
338 \` { special ITbackquote }
345 @qual @varid { idtoken qvarid }
346 @qual @conid { idtoken qconid }
348 @conid { idtoken conid }
352 @qual @varid "#"+ / { ifExtension magicHashEnabled } { idtoken qvarid }
353 @qual @conid "#"+ / { ifExtension magicHashEnabled } { idtoken qconid }
354 @varid "#"+ / { ifExtension magicHashEnabled } { varid }
355 @conid "#"+ / { ifExtension magicHashEnabled } { idtoken conid }
361 @qual @varsym { idtoken qvarsym }
362 @qual @consym { idtoken qconsym }
367 -- For the normal boxed literals we need to be careful
368 -- when trying to be close to Haskell98
370 -- Normal integral literals (:: Num a => a, from Integer)
371 @decimal { tok_num positive 0 0 decimal }
372 0[oO] @octal { tok_num positive 2 2 octal }
373 0[xX] @hexadecimal { tok_num positive 2 2 hexadecimal }
375 -- Normal rational literals (:: Fractional a => a, from Rational)
376 @floating_point { strtoken tok_float }
380 -- Unboxed ints (:: Int#)
381 -- It's simpler (and faster?) to give separate cases to the negatives,
382 -- especially considering octal/hexadecimal prefixes.
383 @decimal \# / { ifExtension magicHashEnabled } { tok_primint positive 0 1 decimal }
384 0[oO] @octal \# / { ifExtension magicHashEnabled } { tok_primint positive 2 3 octal }
385 0[xX] @hexadecimal \# / { ifExtension magicHashEnabled } { tok_primint positive 2 3 hexadecimal }
386 @negative @decimal \# / { ifExtension magicHashEnabled } { tok_primint negative 1 2 decimal }
387 @negative 0[oO] @octal \# / { ifExtension magicHashEnabled } { tok_primint negative 3 4 octal }
388 @negative 0[xX] @hexadecimal \# / { ifExtension magicHashEnabled } { tok_primint negative 3 4 hexadecimal }
390 -- Unboxed floats and doubles (:: Float#, :: Double#)
391 -- prim_{float,double} work with signed literals
392 @signed @floating_point \# / { ifExtension magicHashEnabled } { init_strtoken 1 tok_primfloat }
393 @signed @floating_point \# \# / { ifExtension magicHashEnabled } { init_strtoken 2 tok_primdouble }
396 -- Strings and chars are lexed by hand-written code. The reason is
397 -- that even if we recognise the string or char here in the regex
398 -- lexer, we would still have to parse the string afterward in order
399 -- to convert it to a String.
402 \" { lex_string_tok }
406 -- -----------------------------------------------------------------------------
410 = ITas -- Haskell keywords
434 | ITscc -- ToDo: remove (we use {-# SCC "..." #-} now)
436 | ITforall -- GHC extension keywords
451 | ITinline_prag Bool -- True <=> INLINE, False <=> NOINLINE
452 | ITspec_prag -- SPECIALISE
453 | ITspec_inline_prag Bool -- SPECIALISE INLINE (or NOINLINE)
460 | ITcore_prag -- hdaume: core annotations
463 | IToptions_prag String
464 | ITinclude_prag String
467 | ITdotdot -- reserved symbols
483 | ITbiglam -- GHC-extension symbols
485 | ITocurly -- special symbols
487 | ITocurlybar -- {|, for type applications
488 | ITccurlybar -- |}, for type applications
492 | ITopabrack -- [:, for parallel arrays with -fparr
493 | ITcpabrack -- :], for parallel arrays with -fparr
504 | ITvarid FastString -- identifiers
506 | ITvarsym FastString
507 | ITconsym FastString
508 | ITqvarid (FastString,FastString)
509 | ITqconid (FastString,FastString)
510 | ITqvarsym (FastString,FastString)
511 | ITqconsym (FastString,FastString)
513 | ITdupipvarid FastString -- GHC extension: implicit param: ?x
515 | ITpragma StringBuffer
518 | ITstring FastString
520 | ITrational Rational
523 | ITprimstring FastString
525 | ITprimfloat Rational
526 | ITprimdouble Rational
528 -- MetaHaskell extension tokens
529 | ITopenExpQuote -- [| or [e|
530 | ITopenPatQuote -- [p|
531 | ITopenDecQuote -- [d|
532 | ITopenTypQuote -- [t|
534 | ITidEscape FastString -- $x
535 | ITparenEscape -- $(
539 -- Arrow notation extension
546 | ITLarrowtail -- -<<
547 | ITRarrowtail -- >>-
549 | ITunknown String -- Used when the lexer can't make sense of it
550 | ITeof -- end of file token
552 -- Documentation annotations
553 | ITdocCommentNext String -- something beginning '-- |'
554 | ITdocCommentPrev String -- something beginning '-- ^'
555 | ITdocCommentNamed String -- something beginning '-- $'
556 | ITdocSection Int String -- a section heading
557 | ITdocOptions String -- doc options (prune, ignore-exports, etc)
560 deriving Show -- debugging
563 isSpecial :: Token -> Bool
564 -- If we see M.x, where x is a keyword, but
565 -- is special, we treat is as just plain M.x,
567 isSpecial ITas = True
568 isSpecial IThiding = True
569 isSpecial ITqualified = True
570 isSpecial ITforall = True
571 isSpecial ITexport = True
572 isSpecial ITlabel = True
573 isSpecial ITdynamic = True
574 isSpecial ITsafe = True
575 isSpecial ITthreadsafe = True
576 isSpecial ITunsafe = True
577 isSpecial ITccallconv = True
578 isSpecial ITstdcallconv = True
579 isSpecial ITmdo = True
580 isSpecial ITfamily = True
583 -- the bitmap provided as the third component indicates whether the
584 -- corresponding extension keyword is valid under the extension options
585 -- provided to the compiler; if the extension corresponding to *any* of the
586 -- bits set in the bitmap is enabled, the keyword is valid (this setup
587 -- facilitates using a keyword in two different extensions that can be
588 -- activated independently)
590 reservedWordsFM = listToUFM $
591 map (\(x, y, z) -> (mkFastString x, (y, z)))
592 [( "_", ITunderscore, 0 ),
594 ( "case", ITcase, 0 ),
595 ( "class", ITclass, 0 ),
596 ( "data", ITdata, 0 ),
597 ( "default", ITdefault, 0 ),
598 ( "deriving", ITderiving, 0 ),
600 ( "else", ITelse, 0 ),
601 ( "hiding", IThiding, 0 ),
603 ( "import", ITimport, 0 ),
605 ( "infix", ITinfix, 0 ),
606 ( "infixl", ITinfixl, 0 ),
607 ( "infixr", ITinfixr, 0 ),
608 ( "instance", ITinstance, 0 ),
610 ( "module", ITmodule, 0 ),
611 ( "newtype", ITnewtype, 0 ),
613 ( "qualified", ITqualified, 0 ),
614 ( "then", ITthen, 0 ),
615 ( "type", ITtype, 0 ),
616 ( "where", ITwhere, 0 ),
617 ( "_scc_", ITscc, 0 ), -- ToDo: remove
619 ( "forall", ITforall, bit explicitForallBit),
620 ( "mdo", ITmdo, bit recursiveDoBit),
621 ( "family", ITfamily, bit tyFamBit),
623 ( "foreign", ITforeign, bit ffiBit),
624 ( "export", ITexport, bit ffiBit),
625 ( "label", ITlabel, bit ffiBit),
626 ( "dynamic", ITdynamic, bit ffiBit),
627 ( "safe", ITsafe, bit ffiBit),
628 ( "threadsafe", ITthreadsafe, bit ffiBit),
629 ( "unsafe", ITunsafe, bit ffiBit),
630 ( "stdcall", ITstdcallconv, bit ffiBit),
631 ( "ccall", ITccallconv, bit ffiBit),
632 ( "dotnet", ITdotnet, bit ffiBit),
634 ( "rec", ITrec, bit arrowsBit),
635 ( "proc", ITproc, bit arrowsBit)
638 reservedSymsFM :: UniqFM (Token, Int -> Bool)
639 reservedSymsFM = listToUFM $
640 map (\ (x,y,z) -> (mkFastString x,(y,z)))
641 [ ("..", ITdotdot, always)
642 -- (:) is a reserved op, meaning only list cons
643 ,(":", ITcolon, always)
644 ,("::", ITdcolon, always)
645 ,("=", ITequal, always)
646 ,("\\", ITlam, always)
647 ,("|", ITvbar, always)
648 ,("<-", ITlarrow, always)
649 ,("->", ITrarrow, always)
651 ,("~", ITtilde, always)
652 ,("=>", ITdarrow, always)
653 ,("-", ITminus, always)
654 ,("!", ITbang, always)
656 -- For data T (a::*) = MkT
657 ,("*", ITstar, \i -> kindSigsEnabled i || tyFamEnabled i)
658 -- For 'forall a . t'
659 ,(".", ITdot, explicitForallEnabled)
661 ,("-<", ITlarrowtail, arrowsEnabled)
662 ,(">-", ITrarrowtail, arrowsEnabled)
663 ,("-<<", ITLarrowtail, arrowsEnabled)
664 ,(">>-", ITRarrowtail, arrowsEnabled)
666 #if __GLASGOW_HASKELL__ >= 605
667 ,("∷", ITdcolon, unicodeSyntaxEnabled)
668 ,("⇒", ITdarrow, unicodeSyntaxEnabled)
669 ,("∀", ITforall, \i -> unicodeSyntaxEnabled i &&
670 explicitForallEnabled i)
671 ,("→", ITrarrow, unicodeSyntaxEnabled)
672 ,("←", ITlarrow, unicodeSyntaxEnabled)
673 ,("⋯", ITdotdot, unicodeSyntaxEnabled)
674 -- ToDo: ideally, → and ∷ should be "specials", so that they cannot
675 -- form part of a large operator. This would let us have a better
676 -- syntax for kinds: ɑ∷*→* would be a legal kind signature. (maybe).
680 -- -----------------------------------------------------------------------------
683 type Action = SrcSpan -> StringBuffer -> Int -> P (Located Token)
685 special :: Token -> Action
686 special tok span _buf len = return (L span tok)
688 token, layout_token :: Token -> Action
689 token t span buf len = return (L span t)
690 layout_token t span buf len = pushLexState layout >> return (L span t)
692 idtoken :: (StringBuffer -> Int -> Token) -> Action
693 idtoken f span buf len = return (L span $! (f buf len))
695 skip_one_varid :: (FastString -> Token) -> Action
696 skip_one_varid f span buf len
697 = return (L span $! f (lexemeToFastString (stepOn buf) (len-1)))
699 strtoken :: (String -> Token) -> Action
700 strtoken f span buf len =
701 return (L span $! (f $! lexemeToString buf len))
703 init_strtoken :: Int -> (String -> Token) -> Action
704 -- like strtoken, but drops the last N character(s)
705 init_strtoken drop f span buf len =
706 return (L span $! (f $! lexemeToString buf (len-drop)))
708 begin :: Int -> Action
709 begin code _span _str _len = do pushLexState code; lexToken
712 pop _span _buf _len = do popLexState; lexToken
714 pop_and :: Action -> Action
715 pop_and act span buf len = do popLexState; act span buf len
717 {-# INLINE nextCharIs #-}
718 nextCharIs buf p = not (atEnd buf) && p (currentChar buf)
720 notFollowedBy char _ _ _ (AI _ _ buf)
721 = nextCharIs buf (/=char)
723 notFollowedBySymbol _ _ _ (AI _ _ buf)
724 = nextCharIs buf (`notElem` "!#$%&*+./<=>?@\\^|-~")
726 -- We must reject doc comments as being ordinary comments everywhere.
727 -- In some cases the doc comment will be selected as the lexeme due to
728 -- maximal munch, but not always, because the nested comment rule is
729 -- valid in all states, but the doc-comment rules are only valid in
730 -- the non-layout states.
731 isNormalComment bits _ _ (AI _ _ buf)
732 | haddockEnabled bits = notFollowedByDocOrPragma
733 | otherwise = nextCharIs buf (/='#')
735 notFollowedByDocOrPragma
736 = not $ spaceAndP buf (`nextCharIs` (`elem` "|^*$#"))
738 spaceAndP buf p = p buf || nextCharIs buf (==' ') && p (snd (nextChar buf))
740 haddockDisabledAnd p bits _ _ (AI _ _ buf)
741 = if haddockEnabled bits then False else (p buf)
743 atEOL _ _ _ (AI _ _ buf) = atEnd buf || currentChar buf == '\n'
745 ifExtension pred bits _ _ _ = pred bits
747 multiline_doc_comment :: Action
748 multiline_doc_comment span buf _len = withLexedDocType (worker "")
750 worker commentAcc input docType oneLine = case alexGetChar input of
752 | oneLine -> docCommentEnd input commentAcc docType buf span
753 | otherwise -> case checkIfCommentLine input' of
754 Just input -> worker ('\n':commentAcc) input docType False
755 Nothing -> docCommentEnd input commentAcc docType buf span
756 Just (c, input) -> worker (c:commentAcc) input docType oneLine
757 Nothing -> docCommentEnd input commentAcc docType buf span
759 checkIfCommentLine input = check (dropNonNewlineSpace input)
761 check input = case alexGetChar input of
762 Just ('-', input) -> case alexGetChar input of
763 Just ('-', input) -> case alexGetChar input of
764 Just (c, _) | c /= '-' -> Just input
769 dropNonNewlineSpace input = case alexGetChar input of
771 | isSpace c && c /= '\n' -> dropNonNewlineSpace input'
776 nested comments require traversing by hand, they can't be parsed
777 using regular expressions.
779 nested_comment :: P (Located Token) -> Action
780 nested_comment cont span _str _len = do
784 go 0 input = do setInput input; cont
785 go n input = case alexGetChar input of
786 Nothing -> errBrace input span
787 Just ('-',input) -> case alexGetChar input of
788 Nothing -> errBrace input span
789 Just ('\125',input) -> go (n-1) input
790 Just (c,_) -> go n input
791 Just ('\123',input) -> case alexGetChar input of
792 Nothing -> errBrace input span
793 Just ('-',input) -> go (n+1) input
794 Just (c,_) -> go n input
795 Just (c,input) -> go n input
797 nested_doc_comment :: Action
798 nested_doc_comment span buf _len = withLexedDocType (go "")
800 go commentAcc input docType _ = case alexGetChar input of
801 Nothing -> errBrace input span
802 Just ('-',input) -> case alexGetChar input of
803 Nothing -> errBrace input span
804 Just ('\125',input@(AI end _ buf2)) ->
805 docCommentEnd input commentAcc docType buf span
806 Just (c,_) -> go ('-':commentAcc) input docType False
807 Just ('\123', input) -> case alexGetChar input of
808 Nothing -> errBrace input span
809 Just ('-',input) -> do
811 let cont = do input <- getInput; go commentAcc input docType False
812 nested_comment cont span buf _len
813 Just (c,_) -> go ('\123':commentAcc) input docType False
814 Just (c,input) -> go (c:commentAcc) input docType False
816 withLexedDocType lexDocComment = do
817 input@(AI _ _ buf) <- getInput
818 case prevChar buf ' ' of
819 '|' -> lexDocComment input ITdocCommentNext False
820 '^' -> lexDocComment input ITdocCommentPrev False
821 '$' -> lexDocComment input ITdocCommentNamed False
822 '*' -> lexDocSection 1 input
824 lexDocSection n input = case alexGetChar input of
825 Just ('*', input) -> lexDocSection (n+1) input
826 Just (c, _) -> lexDocComment input (ITdocSection n) True
827 Nothing -> do setInput input; lexToken -- eof reached, lex it normally
830 -------------------------------------------------------------------------------
831 -- This function is quite tricky. We can't just return a new token, we also
832 -- need to update the state of the parser. Why? Because the token is longer
833 -- than what was lexed by Alex, and the lexToken function doesn't know this, so
834 -- it writes the wrong token length to the parser state. This function is
835 -- called afterwards, so it can just update the state.
837 -- This is complicated by the fact that Haddock tokens can span multiple lines,
838 -- which is something that the original lexer didn't account for.
839 -- I have added last_line_len in the parser state which represents the length
840 -- of the part of the token that is on the last line. It is now used for layout
841 -- calculation in pushCurrentContext instead of last_len. last_len is, like it
842 -- was before, the full length of the token, and it is now only used for error
845 docCommentEnd :: AlexInput -> String -> (String -> Token) -> StringBuffer ->
846 SrcSpan -> P (Located Token)
847 docCommentEnd input commentAcc docType buf span = do
849 let (AI loc last_offs nextBuf) = input
850 comment = reverse commentAcc
851 span' = mkSrcSpan (srcSpanStart span) loc
852 last_len = byteDiff buf nextBuf
854 last_line_len = if (last_offs - last_len < 0)
858 span `seq` setLastToken span' last_len last_line_len
859 return (L span' (docType comment))
861 errBrace (AI end _ _) span = failLocMsgP (srcSpanStart span) end "unterminated `{-'"
863 open_brace, close_brace :: Action
864 open_brace span _str _len = do
866 setContext (NoLayout:ctx)
867 return (L span ITocurly)
868 close_brace span _str _len = do
870 return (L span ITccurly)
872 qvarid buf len = ITqvarid $! splitQualName buf len
873 qconid buf len = ITqconid $! splitQualName buf len
875 splitQualName :: StringBuffer -> Int -> (FastString,FastString)
876 -- takes a StringBuffer and a length, and returns the module name
877 -- and identifier parts of a qualified name. Splits at the *last* dot,
878 -- because of hierarchical module names.
879 splitQualName orig_buf len = split orig_buf orig_buf
882 | orig_buf `byteDiff` buf >= len = done dot_buf
883 | c == '.' = found_dot buf'
884 | otherwise = split buf' dot_buf
886 (c,buf') = nextChar buf
888 -- careful, we might get names like M....
889 -- so, if the character after the dot is not upper-case, this is
890 -- the end of the qualifier part.
891 found_dot buf -- buf points after the '.'
892 | isUpper c = split buf' buf
893 | otherwise = done buf
895 (c,buf') = nextChar buf
898 (lexemeToFastString orig_buf (qual_size - 1),
899 lexemeToFastString dot_buf (len - qual_size))
901 qual_size = orig_buf `byteDiff` dot_buf
904 case lookupUFM reservedWordsFM fs of
905 Just (keyword,0) -> do
907 return (L span keyword)
908 Just (keyword,exts) -> do
909 b <- extension (\i -> exts .&. i /= 0)
910 if b then do maybe_layout keyword
911 return (L span keyword)
912 else return (L span (ITvarid fs))
913 _other -> return (L span (ITvarid fs))
915 fs = lexemeToFastString buf len
917 conid buf len = ITconid fs
918 where fs = lexemeToFastString buf len
920 qvarsym buf len = ITqvarsym $! splitQualName buf len
921 qconsym buf len = ITqconsym $! splitQualName buf len
923 varsym = sym ITvarsym
924 consym = sym ITconsym
926 sym con span buf len =
927 case lookupUFM reservedSymsFM fs of
928 Just (keyword,exts) -> do
930 if b then return (L span keyword)
931 else return (L span $! con fs)
932 _other -> return (L span $! con fs)
934 fs = lexemeToFastString buf len
936 -- Variations on the integral numeric literal.
937 tok_integral :: (Integer -> Token)
938 -> (Integer -> Integer)
939 -- -> (StringBuffer -> StringBuffer) -> (Int -> Int)
941 -> (Integer, (Char->Int)) -> Action
942 tok_integral itint transint transbuf translen (radix,char_to_int) span buf len =
943 return $ L span $ itint $! transint $ parseUnsignedInteger
944 (offsetBytes transbuf buf) (subtract translen len) radix char_to_int
946 -- some conveniences for use with tok_integral
947 tok_num = tok_integral ITinteger
948 tok_primint = tok_integral ITprimint
951 decimal = (10,octDecDigit)
952 octal = (8,octDecDigit)
953 hexadecimal = (16,hexDigit)
955 -- readRational can understand negative rationals, exponents, everything.
956 tok_float str = ITrational $! readRational str
957 tok_primfloat str = ITprimfloat $! readRational str
958 tok_primdouble str = ITprimdouble $! readRational str
960 -- -----------------------------------------------------------------------------
963 -- we're at the first token on a line, insert layout tokens if necessary
965 do_bol span _str _len = do
969 --trace "layout: inserting '}'" $ do
971 -- do NOT pop the lex state, we might have a ';' to insert
972 return (L span ITvccurly)
974 --trace "layout: inserting ';'" $ do
976 return (L span ITsemi)
981 -- certain keywords put us in the "layout" state, where we might
982 -- add an opening curly brace.
983 maybe_layout ITdo = pushLexState layout_do
984 maybe_layout ITmdo = pushLexState layout_do
985 maybe_layout ITof = pushLexState layout
986 maybe_layout ITlet = pushLexState layout
987 maybe_layout ITwhere = pushLexState layout
988 maybe_layout ITrec = pushLexState layout
989 maybe_layout _ = return ()
991 -- Pushing a new implicit layout context. If the indentation of the
992 -- next token is not greater than the previous layout context, then
993 -- Haskell 98 says that the new layout context should be empty; that is
994 -- the lexer must generate {}.
996 -- We are slightly more lenient than this: when the new context is started
997 -- by a 'do', then we allow the new context to be at the same indentation as
998 -- the previous context. This is what the 'strict' argument is for.
1000 new_layout_context strict span _buf _len = do
1002 (AI _ offset _) <- getInput
1005 Layout prev_off : _ |
1006 (strict && prev_off >= offset ||
1007 not strict && prev_off > offset) -> do
1008 -- token is indented to the left of the previous context.
1009 -- we must generate a {} sequence now.
1010 pushLexState layout_left
1011 return (L span ITvocurly)
1013 setContext (Layout offset : ctx)
1014 return (L span ITvocurly)
1016 do_layout_left span _buf _len = do
1018 pushLexState bol -- we must be at the start of a line
1019 return (L span ITvccurly)
1021 -- -----------------------------------------------------------------------------
1024 setLine :: Int -> Action
1025 setLine code span buf len = do
1026 let line = parseUnsignedInteger buf len 10 octDecDigit
1027 setSrcLoc (mkSrcLoc (srcSpanFile span) (fromIntegral line - 1) 0)
1028 -- subtract one: the line number refers to the *following* line
1033 setFile :: Int -> Action
1034 setFile code span buf len = do
1035 let file = lexemeToFastString (stepOn buf) (len-2)
1036 setSrcLoc (mkSrcLoc file (srcSpanEndLine span) (srcSpanEndCol span))
1042 -- -----------------------------------------------------------------------------
1043 -- Options, includes and language pragmas.
1045 lex_string_prag :: (String -> Token) -> Action
1046 lex_string_prag mkTok span buf len
1047 = do input <- getInput
1051 return (L (mkSrcSpan start end) tok)
1053 = if isString input "#-}"
1054 then do setInput input
1055 return (mkTok (reverse acc))
1056 else case alexGetChar input of
1057 Just (c,i) -> go (c:acc) i
1058 Nothing -> err input
1059 isString i [] = True
1061 = case alexGetChar i of
1062 Just (c,i') | c == x -> isString i' xs
1064 err (AI end _ _) = failLocMsgP (srcSpanStart span) end "unterminated options pragma"
1067 -- -----------------------------------------------------------------------------
1070 -- This stuff is horrible. I hates it.
1072 lex_string_tok :: Action
1073 lex_string_tok span buf len = do
1074 tok <- lex_string ""
1076 return (L (mkSrcSpan (srcSpanStart span) end) tok)
1078 lex_string :: String -> P Token
1081 case alexGetChar' i of
1082 Nothing -> lit_error
1086 magicHash <- extension magicHashEnabled
1090 case alexGetChar' i of
1094 then failMsgP "primitive string literal must contain only characters <= \'\\xFF\'"
1095 else let s' = mkZFastString (reverse s) in
1096 return (ITprimstring s')
1097 -- mkZFastString is a hack to avoid encoding the
1098 -- string in UTF-8. We just want the exact bytes.
1100 return (ITstring (mkFastString (reverse s)))
1102 return (ITstring (mkFastString (reverse s)))
1105 | Just ('&',i) <- next -> do
1106 setInput i; lex_string s
1107 | Just (c,i) <- next, is_space c -> do
1108 setInput i; lex_stringgap s
1109 where next = alexGetChar' i
1115 lex_stringgap s = do
1118 '\\' -> lex_string s
1119 c | is_space c -> lex_stringgap s
1123 lex_char_tok :: Action
1124 -- Here we are basically parsing character literals, such as 'x' or '\n'
1125 -- but, when Template Haskell is on, we additionally spot
1126 -- 'x and ''T, returning ITvarQuote and ITtyQuote respectively,
1127 -- but WIHTOUT CONSUMING the x or T part (the parser does that).
1128 -- So we have to do two characters of lookahead: when we see 'x we need to
1129 -- see if there's a trailing quote
1130 lex_char_tok span buf len = do -- We've seen '
1131 i1 <- getInput -- Look ahead to first character
1132 let loc = srcSpanStart span
1133 case alexGetChar' i1 of
1134 Nothing -> lit_error
1136 Just ('\'', i2@(AI end2 _ _)) -> do -- We've seen ''
1137 th_exts <- extension thEnabled
1140 return (L (mkSrcSpan loc end2) ITtyQuote)
1143 Just ('\\', i2@(AI end2 _ _)) -> do -- We've seen 'backslash
1145 lit_ch <- lex_escape
1146 mc <- getCharOrFail -- Trailing quote
1147 if mc == '\'' then finish_char_tok loc lit_ch
1148 else do setInput i2; lit_error
1150 Just (c, i2@(AI end2 _ _))
1151 | not (isAny c) -> lit_error
1154 -- We've seen 'x, where x is a valid character
1155 -- (i.e. not newline etc) but not a quote or backslash
1156 case alexGetChar' i2 of -- Look ahead one more character
1157 Nothing -> lit_error
1158 Just ('\'', i3) -> do -- We've seen 'x'
1160 finish_char_tok loc c
1161 _other -> do -- We've seen 'x not followed by quote
1162 -- If TH is on, just parse the quote only
1163 th_exts <- extension thEnabled
1164 let (AI end _ _) = i1
1165 if th_exts then return (L (mkSrcSpan loc end) ITvarQuote)
1166 else do setInput i2; lit_error
1168 finish_char_tok :: SrcLoc -> Char -> P (Located Token)
1169 finish_char_tok loc ch -- We've already seen the closing quote
1170 -- Just need to check for trailing #
1171 = do magicHash <- extension magicHashEnabled
1172 i@(AI end _ _) <- getInput
1173 if magicHash then do
1174 case alexGetChar' i of
1175 Just ('#',i@(AI end _ _)) -> do
1177 return (L (mkSrcSpan loc end) (ITprimchar ch))
1179 return (L (mkSrcSpan loc end) (ITchar ch))
1181 return (L (mkSrcSpan loc end) (ITchar ch))
1183 lex_char :: Char -> AlexInput -> P Char
1186 '\\' -> do setInput inp; lex_escape
1187 c | isAny c -> do setInput inp; return c
1190 isAny c | c > '\xff' = isPrint c
1191 | otherwise = is_any c
1193 lex_escape :: P Char
1207 '^' -> do c <- getCharOrFail
1208 if c >= '@' && c <= '_'
1209 then return (chr (ord c - ord '@'))
1212 'x' -> readNum is_hexdigit 16 hexDigit
1213 'o' -> readNum is_octdigit 8 octDecDigit
1214 x | is_digit x -> readNum2 is_digit 10 octDecDigit (octDecDigit x)
1218 case alexGetChar' i of
1219 Nothing -> lit_error
1221 case alexGetChar' i2 of
1222 Nothing -> do setInput i2; lit_error
1224 let str = [c1,c2,c3] in
1225 case [ (c,rest) | (p,c) <- silly_escape_chars,
1226 Just rest <- [maybePrefixMatch p str] ] of
1227 (escape_char,[]):_ -> do
1230 (escape_char,_:_):_ -> do
1235 readNum :: (Char -> Bool) -> Int -> (Char -> Int) -> P Char
1236 readNum is_digit base conv = do
1240 then readNum2 is_digit base conv (conv c)
1241 else do setInput i; lit_error
1243 readNum2 is_digit base conv i = do
1246 where read i input = do
1247 case alexGetChar' input of
1248 Just (c,input') | is_digit c -> do
1249 read (i*base + conv c) input'
1251 if i >= 0 && i <= 0x10FFFF
1252 then do setInput input; return (chr i)
1255 silly_escape_chars = [
1292 -- before calling lit_error, ensure that the current input is pointing to
1293 -- the position of the error in the buffer. This is so that we can report
1294 -- a correct location to the user, but also so we can detect UTF-8 decoding
1295 -- errors if they occur.
1296 lit_error = lexError "lexical error in string/character literal"
1298 getCharOrFail :: P Char
1301 case alexGetChar' i of
1302 Nothing -> lexError "unexpected end-of-file in string/character literal"
1303 Just (c,i) -> do setInput i; return c
1305 -- -----------------------------------------------------------------------------
1308 warn :: DynFlag -> SDoc -> Action
1309 warn option warning srcspan _buf _len = do
1310 addWarning option srcspan warning
1313 -- -----------------------------------------------------------------------------
1324 SrcSpan -- The start and end of the text span related to
1325 -- the error. Might be used in environments which can
1326 -- show this span, e.g. by highlighting it.
1327 Message -- The error message
1329 data PState = PState {
1330 buffer :: StringBuffer,
1332 messages :: Messages,
1333 last_loc :: SrcSpan, -- pos of previous token
1334 last_offs :: !Int, -- offset of the previous token from the
1335 -- beginning of the current line.
1336 -- \t is equal to 8 spaces.
1337 last_len :: !Int, -- len of previous token
1338 last_line_len :: !Int,
1339 loc :: SrcLoc, -- current loc (end of prev token + 1)
1340 extsBitmap :: !Int, -- bitmap that determines permitted extensions
1341 context :: [LayoutContext],
1344 -- last_loc and last_len are used when generating error messages,
1345 -- and in pushCurrentContext only. Sigh, if only Happy passed the
1346 -- current token to happyError, we could at least get rid of last_len.
1347 -- Getting rid of last_loc would require finding another way to
1348 -- implement pushCurrentContext (which is only called from one place).
1350 newtype P a = P { unP :: PState -> ParseResult a }
1352 instance Monad P where
1358 returnP a = P $ \s -> POk s a
1360 thenP :: P a -> (a -> P b) -> P b
1361 (P m) `thenP` k = P $ \ s ->
1363 POk s1 a -> (unP (k a)) s1
1364 PFailed span err -> PFailed span err
1366 failP :: String -> P a
1367 failP msg = P $ \s -> PFailed (last_loc s) (text msg)
1369 failMsgP :: String -> P a
1370 failMsgP msg = P $ \s -> PFailed (last_loc s) (text msg)
1372 failLocMsgP :: SrcLoc -> SrcLoc -> String -> P a
1373 failLocMsgP loc1 loc2 str = P $ \s -> PFailed (mkSrcSpan loc1 loc2) (text str)
1375 failSpanMsgP :: SrcSpan -> String -> P a
1376 failSpanMsgP span msg = P $ \s -> PFailed span (text msg)
1378 extension :: (Int -> Bool) -> P Bool
1379 extension p = P $ \s -> POk s (p $! extsBitmap s)
1382 getExts = P $ \s -> POk s (extsBitmap s)
1384 setSrcLoc :: SrcLoc -> P ()
1385 setSrcLoc new_loc = P $ \s -> POk s{loc=new_loc} ()
1387 getSrcLoc :: P SrcLoc
1388 getSrcLoc = P $ \s@(PState{ loc=loc }) -> POk s loc
1390 setLastToken :: SrcSpan -> Int -> Int -> P ()
1391 setLastToken loc len line_len = P $ \s -> POk s {
1394 last_line_len=line_len
1397 data AlexInput = AI SrcLoc {-#UNPACK#-}!Int StringBuffer
1399 alexInputPrevChar :: AlexInput -> Char
1400 alexInputPrevChar (AI _ _ buf) = prevChar buf '\n'
1402 alexGetChar :: AlexInput -> Maybe (Char,AlexInput)
1403 alexGetChar (AI loc ofs s)
1405 | otherwise = adj_c `seq` loc' `seq` ofs' `seq` s' `seq`
1406 --trace (show (ord c)) $
1407 Just (adj_c, (AI loc' ofs' s'))
1408 where (c,s') = nextChar s
1409 loc' = advanceSrcLoc loc c
1410 ofs' = advanceOffs c ofs
1418 other_graphic = '\x6'
1421 | c <= '\x06' = non_graphic
1423 -- Alex doesn't handle Unicode, so when Unicode
1424 -- character is encoutered we output these values
1425 -- with the actual character value hidden in the state.
1427 case generalCategory c of
1428 UppercaseLetter -> upper
1429 LowercaseLetter -> lower
1430 TitlecaseLetter -> upper
1431 ModifierLetter -> other_graphic
1432 OtherLetter -> other_graphic
1433 NonSpacingMark -> other_graphic
1434 SpacingCombiningMark -> other_graphic
1435 EnclosingMark -> other_graphic
1436 DecimalNumber -> digit
1437 LetterNumber -> other_graphic
1438 OtherNumber -> other_graphic
1439 ConnectorPunctuation -> other_graphic
1440 DashPunctuation -> other_graphic
1441 OpenPunctuation -> other_graphic
1442 ClosePunctuation -> other_graphic
1443 InitialQuote -> other_graphic
1444 FinalQuote -> other_graphic
1445 OtherPunctuation -> other_graphic
1446 MathSymbol -> symbol
1447 CurrencySymbol -> symbol
1448 ModifierSymbol -> symbol
1449 OtherSymbol -> symbol
1451 _other -> non_graphic
1453 -- This version does not squash unicode characters, it is used when
1455 alexGetChar' :: AlexInput -> Maybe (Char,AlexInput)
1456 alexGetChar' (AI loc ofs s)
1458 | otherwise = c `seq` loc' `seq` ofs' `seq` s' `seq`
1459 --trace (show (ord c)) $
1460 Just (c, (AI loc' ofs' s'))
1461 where (c,s') = nextChar s
1462 loc' = advanceSrcLoc loc c
1463 ofs' = advanceOffs c ofs
1465 advanceOffs :: Char -> Int -> Int
1466 advanceOffs '\n' offs = 0
1467 advanceOffs '\t' offs = (offs `quot` 8 + 1) * 8
1468 advanceOffs _ offs = offs + 1
1470 getInput :: P AlexInput
1471 getInput = P $ \s@PState{ loc=l, last_offs=o, buffer=b } -> POk s (AI l o b)
1473 setInput :: AlexInput -> P ()
1474 setInput (AI l o b) = P $ \s -> POk s{ loc=l, last_offs=o, buffer=b } ()
1476 pushLexState :: Int -> P ()
1477 pushLexState ls = P $ \s@PState{ lex_state=l } -> POk s{lex_state=ls:l} ()
1479 popLexState :: P Int
1480 popLexState = P $ \s@PState{ lex_state=ls:l } -> POk s{ lex_state=l } ls
1482 getLexState :: P Int
1483 getLexState = P $ \s@PState{ lex_state=ls:l } -> POk s ls
1485 -- for reasons of efficiency, flags indicating language extensions (eg,
1486 -- -fglasgow-exts or -fparr) are represented by a bitmap stored in an unboxed
1489 genericsBit, ffiBit, parrBit :: Int
1490 genericsBit = 0 -- {| and |}
1496 explicitForallBit = 7 -- the 'forall' keyword and '.' symbol
1497 bangPatBit = 8 -- Tells the parser to understand bang-patterns
1498 -- (doesn't affect the lexer)
1499 tyFamBit = 9 -- indexed type families: 'family' keyword and kind sigs
1500 haddockBit = 10 -- Lex and parse Haddock comments
1501 magicHashBit = 11 -- # in both functions and operators
1502 kindSigsBit = 12 -- Kind signatures on type variables
1503 recursiveDoBit = 13 -- mdo
1504 unicodeSyntaxBit = 14 -- the forall symbol, arrow symbols, etc
1505 unboxedTuplesBit = 15 -- (# and #)
1506 standaloneDerivingBit = 16 -- standalone instance deriving declarations
1508 genericsEnabled, ffiEnabled, parrEnabled :: Int -> Bool
1510 genericsEnabled flags = testBit flags genericsBit
1511 ffiEnabled flags = testBit flags ffiBit
1512 parrEnabled flags = testBit flags parrBit
1513 arrowsEnabled flags = testBit flags arrowsBit
1514 thEnabled flags = testBit flags thBit
1515 ipEnabled flags = testBit flags ipBit
1516 explicitForallEnabled flags = testBit flags explicitForallBit
1517 bangPatEnabled flags = testBit flags bangPatBit
1518 tyFamEnabled flags = testBit flags tyFamBit
1519 haddockEnabled flags = testBit flags haddockBit
1520 magicHashEnabled flags = testBit flags magicHashBit
1521 kindSigsEnabled flags = testBit flags kindSigsBit
1522 recursiveDoEnabled flags = testBit flags recursiveDoBit
1523 unicodeSyntaxEnabled flags = testBit flags unicodeSyntaxBit
1524 unboxedTuplesEnabled flags = testBit flags unboxedTuplesBit
1525 standaloneDerivingEnabled flags = testBit flags standaloneDerivingBit
1527 -- PState for parsing options pragmas
1529 pragState :: StringBuffer -> SrcLoc -> PState
1533 messages = emptyMessages,
1534 -- XXX defaultDynFlags is not right, but we don't have a real
1536 dflags = defaultDynFlags,
1537 last_loc = mkSrcSpan loc loc,
1544 lex_state = [bol, option_prags, 0]
1548 -- create a parse state
1550 mkPState :: StringBuffer -> SrcLoc -> DynFlags -> PState
1551 mkPState buf loc flags =
1555 messages = emptyMessages,
1556 last_loc = mkSrcSpan loc loc,
1561 extsBitmap = fromIntegral bitmap,
1563 lex_state = [bol, 0]
1564 -- we begin in the layout state if toplev_layout is set
1567 bitmap = genericsBit `setBitIf` dopt Opt_Generics flags
1568 .|. ffiBit `setBitIf` dopt Opt_ForeignFunctionInterface flags
1569 .|. parrBit `setBitIf` dopt Opt_PArr flags
1570 .|. arrowsBit `setBitIf` dopt Opt_Arrows flags
1571 .|. thBit `setBitIf` dopt Opt_TemplateHaskell flags
1572 .|. ipBit `setBitIf` dopt Opt_ImplicitParams flags
1573 .|. explicitForallBit `setBitIf` dopt Opt_ScopedTypeVariables flags
1574 .|. explicitForallBit `setBitIf` dopt Opt_PolymorphicComponents flags
1575 .|. explicitForallBit `setBitIf` dopt Opt_ExistentialQuantification flags
1576 .|. explicitForallBit `setBitIf` dopt Opt_Rank2Types flags
1577 .|. explicitForallBit `setBitIf` dopt Opt_RankNTypes flags
1578 .|. bangPatBit `setBitIf` dopt Opt_BangPatterns flags
1579 .|. tyFamBit `setBitIf` dopt Opt_TypeFamilies flags
1580 .|. haddockBit `setBitIf` dopt Opt_Haddock flags
1581 .|. magicHashBit `setBitIf` dopt Opt_MagicHash flags
1582 .|. kindSigsBit `setBitIf` dopt Opt_KindSignatures flags
1583 .|. recursiveDoBit `setBitIf` dopt Opt_RecursiveDo flags
1584 .|. unicodeSyntaxBit `setBitIf` dopt Opt_UnicodeSyntax flags
1585 .|. unboxedTuplesBit `setBitIf` dopt Opt_UnboxedTuples flags
1586 .|. standaloneDerivingBit `setBitIf` dopt Opt_StandaloneDeriving flags
1588 setBitIf :: Int -> Bool -> Int
1589 b `setBitIf` cond | cond = bit b
1592 addWarning :: DynFlag -> SrcSpan -> SDoc -> P ()
1593 addWarning option srcspan warning
1594 = P $ \s@PState{messages=(ws,es), dflags=d} ->
1595 let warning' = mkWarnMsg srcspan alwaysQualify warning
1596 ws' = if dopt option d then ws `snocBag` warning' else ws
1597 in POk s{messages=(ws', es)} ()
1599 getMessages :: PState -> Messages
1600 getMessages PState{messages=ms} = ms
1602 getContext :: P [LayoutContext]
1603 getContext = P $ \s@PState{context=ctx} -> POk s ctx
1605 setContext :: [LayoutContext] -> P ()
1606 setContext ctx = P $ \s -> POk s{context=ctx} ()
1609 popContext = P $ \ s@(PState{ buffer = buf, context = ctx,
1610 loc = loc, last_len = len, last_loc = last_loc }) ->
1612 (_:tl) -> POk s{ context = tl } ()
1613 [] -> PFailed last_loc (srcParseErr buf len)
1615 -- Push a new layout context at the indentation of the last token read.
1616 -- This is only used at the outer level of a module when the 'module'
1617 -- keyword is missing.
1618 pushCurrentContext :: P ()
1619 pushCurrentContext = P $ \ s@PState{ last_offs=offs, last_line_len=len, context=ctx } ->
1620 POk s{context = Layout (offs-len) : ctx} ()
1621 --trace ("off: " ++ show offs ++ ", len: " ++ show len) $ POk s{context = Layout (offs-len) : ctx} ()
1623 getOffside :: P Ordering
1624 getOffside = P $ \s@PState{last_offs=offs, context=stk} ->
1625 let ord = case stk of
1626 (Layout n:_) -> compare offs n
1630 -- ---------------------------------------------------------------------------
1631 -- Construct a parse error
1634 :: StringBuffer -- current buffer (placed just after the last token)
1635 -> Int -- length of the previous token
1638 = hcat [ if null token
1639 then ptext SLIT("parse error (possibly incorrect indentation)")
1640 else hcat [ptext SLIT("parse error on input "),
1641 char '`', text token, char '\'']
1643 where token = lexemeToString (offsetBytes (-len) buf) len
1645 -- Report a parse failure, giving the span of the previous token as
1646 -- the location of the error. This is the entry point for errors
1647 -- detected during parsing.
1649 srcParseFail = P $ \PState{ buffer = buf, last_len = len,
1650 last_loc = last_loc } ->
1651 PFailed last_loc (srcParseErr buf len)
1653 -- A lexical error is reported at a particular position in the source file,
1654 -- not over a token range.
1655 lexError :: String -> P a
1658 i@(AI end _ buf) <- getInput
1659 reportLexError loc end buf str
1661 -- -----------------------------------------------------------------------------
1662 -- This is the top-level function: called from the parser each time a
1663 -- new token is to be read from the input.
1665 lexer :: (Located Token -> P a) -> P a
1667 tok@(L span tok__) <- lexToken
1668 -- trace ("token: " ++ show tok__) $ do
1671 lexToken :: P (Located Token)
1673 inp@(AI loc1 _ buf) <- getInput
1676 case alexScanUser exts inp sc of
1677 AlexEOF -> do let span = mkSrcSpan loc1 loc1
1678 setLastToken span 0 0
1679 return (L span ITeof)
1680 AlexError (AI loc2 _ buf) -> do
1681 reportLexError loc1 loc2 buf "lexical error"
1682 AlexSkip inp2 _ -> do
1685 AlexToken inp2@(AI end _ buf2) len t -> do
1687 let span = mkSrcSpan loc1 end
1688 let bytes = byteDiff buf buf2
1689 span `seq` setLastToken span bytes bytes
1692 reportLexError loc1 loc2 buf str
1693 | atEnd buf = failLocMsgP loc1 loc2 (str ++ " at end of input")
1696 c = fst (nextChar buf)
1698 if c == '\0' -- decoding errors are mapped to '\0', see utf8DecodeChar#
1699 then failLocMsgP loc2 loc2 (str ++ " (UTF-8 decoding error)")
1700 else failLocMsgP loc1 loc2 (str ++ " at character " ++ show c)