X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Fcompiler%2Fparser%2FLexer.x;h=4c1b48efc0845f3009dc19f4d8e5e26eadb36103;hb=274a7b1ab7cf30fd1ceaddd02f991eaf694cc37c;hp=264b7249bb18216aac556a8255db980503c5e7c3;hpb=2129fa6fc4afd7f7b0c767f8c0c14b9ab5508ec2;p=ghc-hetmet.git diff --git a/ghc/compiler/parser/Lexer.x b/ghc/compiler/parser/Lexer.x index 264b724..4c1b48e 100644 --- a/ghc/compiler/parser/Lexer.x +++ b/ghc/compiler/parser/Lexer.x @@ -1,5 +1,5 @@ ----------------------------------------------------------------------------- --- (c) The University of Glasgow, 2003 +-- (c) The University of Glasgow, 2006 -- -- GHC's lexer. -- @@ -22,15 +22,16 @@ { module Lexer ( - Token(..), Token__(..), lexer, mkPState, showPFailed, - P(..), ParseResult(..), setSrcLocFor, getSrcLoc, - failMsgP, failLocMsgP, srcParseFail, - popContext, pushCurrentContext, + Token(..), lexer, pragState, mkPState, PState(..), + P(..), ParseResult(..), getSrcLoc, + failLocMsgP, failSpanMsgP, srcParseFail, + popContext, pushCurrentContext, setLastToken, setSrcLoc, + getLexState, popLexState, pushLexState, + extension, bangPatEnabled ) where #include "HsVersions.h" -import ForeignCall ( Safety(..) ) import ErrUtils ( Message ) import Outputable import StringBuffer @@ -38,40 +39,49 @@ import FastString import FastTypes import SrcLoc import UniqFM -import CmdLineOpts +import DynFlags import Ctype -import Util ( maybePrefixMatch ) +import Util ( maybePrefixMatch, readRational ) import DATA_BITS -import Char +import Data.Char ( chr ) import Ratio -import TRACE +--import TRACE + +#if __GLASGOW_HASKELL__ >= 605 +import Data.Char ( GeneralCategory(..), generalCategory, isPrint, isUpper ) +#else +import Compat.Unicode ( GeneralCategory(..), generalCategory, isPrint, isUpper ) +#endif } -$whitechar = [\ \t\n\r\f\v] +$unispace = \x05 +$whitechar = [\ \t\n\r\f\v\xa0 $unispace] $white_no_nl = $whitechar # \n $ascdigit = 0-9 -$unidigit = \x01 +$unidigit = \x03 +$decdigit = $ascdigit -- for now, should really be $digit (ToDo) $digit = [$ascdigit $unidigit] $special = [\(\)\,\;\[\]\`\{\}] $ascsymbol = [\!\#\$\%\&\*\+\.\/\<\=\>\?\@\\\^\|\-\~] -$unisymbol = \x02 +$unisymbol = \x04 $symbol = [$ascsymbol $unisymbol] # [$special \_\:\"\'] -$unilarge = \x03 +$unilarge = \x01 $asclarge = [A-Z \xc0-\xd6 \xd8-\xde] $large = [$asclarge $unilarge] -$unismall = \x04 +$unismall = \x02 $ascsmall = [a-z \xdf-\xf6 \xf8-\xff] $small = [$ascsmall $unismall \_] -$graphic = [$small $large $symbol $digit $special \:\"\'] +$unigraphic = \x06 +$graphic = [$small $large $symbol $digit $special $unigraphic \:\"\'] $octit = 0-7 -$hexit = [$digit A-F a-f] +$hexit = [$decdigit A-F a-f] $symchar = [$symbol \:] $nl = [\n\r] $idchar = [$small $large $digit \'] @@ -82,7 +92,7 @@ $idchar = [$small $large $digit \'] @varsym = $symbol $symchar* @consym = \: $symchar* -@decimal = $digit+ +@decimal = $decdigit+ @octal = $octit+ @hexadecimal = $hexit+ @exponent = [eE] [\-\+]? @decimal @@ -109,7 +119,8 @@ $white_no_nl+ ; -- have to exclude those. -- The regex says: "munch all the characters after the dashes, as long as -- the first one is not a symbol". -"--"\-* ([^$symbol] .*)? ; +"--"\-* [^$symbol :] .* ; +"--"\-* / { atEOL } ; -- 'bol' state: beginning of a line. Slurp up all the whitespace (including -- blank lines) until we find a non-whitespace character, then do layout @@ -124,6 +135,7 @@ $white_no_nl+ ; \n ; ^\# (line)? { begin line_prag1 } ^\# pragma .* \n ; -- GCC 3.3 CPP generated, apparently + ^\# \! .* \n ; -- #!, for scripts () { do_bol } } @@ -146,41 +158,63 @@ $white_no_nl+ ; -- generate a matching '}' token. () { do_layout_left } -<0,glaexts> \n { begin bol } +<0,option_prags,glaexts> \n { begin bol } "{-#" $whitechar* (line|LINE) { begin line_prag2 } -- single-line line pragmas, of the form -- # "" \n - $digit+ { set_line line_prag1a } - \" [$graphic \ ]* \" { set_file line_prag1b } + $decdigit+ { setLine line_prag1a } + \" [$graphic \ ]* \" { setFile line_prag1b } .* { pop } -- Haskell-style line pragmas, of the form -- {-# LINE "" #-} - $digit+ { set_line line_prag2a } - \" [$graphic \ ]* \" { set_file line_prag2b } - "#-}" { pop } + $decdigit+ { setLine line_prag2a } + \" [$graphic \ ]* \" { setFile line_prag2b } + "#-}"|"-}" { pop } + -- NOTE: accept -} at the end of a LINE pragma, for compatibility + -- with older versions of GHC which generated these. + +-- We only want RULES pragmas to be picked up when -fglasgow-exts +-- is on, because the contents of the pragma is always written using +-- glasgow-exts syntax (using forall etc.), so if glasgow exts are not +-- enabled, we're sure to get a parse error. +-- (ToDo: we should really emit a warning when ignoring pragmas) + + "{-#" $whitechar* (RULES|rules) { token ITrules_prag } -<0,glaexts> { +<0,option_prags,glaexts> { + "{-#" $whitechar* (INLINE|inline) { token (ITinline_prag True) } + "{-#" $whitechar* (NO(T?)INLINE|no(t?)inline) + { token (ITinline_prag False) } + "{-#" $whitechar* (SPECIALI[SZ]E|speciali[sz]e) + { token ITspec_prag } "{-#" $whitechar* (SPECIALI[SZ]E|speciali[sz]e) - { token ITspecialise_prag } + $whitechar* (INLINE|inline) { token (ITspec_inline_prag True) } + "{-#" $whitechar* (SPECIALI[SZ]E|speciali[sz]e) + $whitechar* (NO(T?)INLINE|no(t?)inline) + { token (ITspec_inline_prag False) } "{-#" $whitechar* (SOURCE|source) { token ITsource_prag } - "{-#" $whitechar* (INLINE|inline) { token ITinline_prag } - "{-#" $whitechar* (NO(T?)INLINE|no(t?)inline) - { token ITnoinline_prag } - "{-#" $whitechar* (RULES|rules) { token ITrules_prag } "{-#" $whitechar* (DEPRECATED|deprecated) { token ITdeprecated_prag } "{-#" $whitechar* (SCC|scc) { token ITscc_prag } "{-#" $whitechar* (CORE|core) { token ITcore_prag } - + "{-#" $whitechar* (UNPACK|unpack) { token ITunpack_prag } + "{-#" { nested_comment } -- ToDo: should only be valid inside a pragma: "#-}" { token ITclose_prag} } + { + "{-#" $whitechar* (OPTIONS|options) { lex_string_prag IToptions_prag } + "{-#" $whitechar* (OPTIONS_GHC|options_ghc) + { lex_string_prag IToptions_prag } + "{-#" $whitechar* (LANGUAGE|language) { token ITlanguage_prag } + "{-#" $whitechar* (INCLUDE|include) { lex_string_prag ITinclude_prag } +} -- '0' state: ordinary lexemes -- 'glaexts' state: glasgow extensions (postfix '#', etc.) @@ -204,7 +238,8 @@ $white_no_nl+ ; } <0,glaexts> { - "(|" / { ifExtension arrowsEnabled } { special IToparenbar } + "(|" / { ifExtension arrowsEnabled `alexAndPred` notFollowedBySymbol } + { special IToparenbar } "|)" / { ifExtension arrowsEnabled } { special ITcparenbar } } @@ -214,13 +249,13 @@ $white_no_nl+ ; } { - "(#" { token IToubxparen } + "(#" / { notFollowedBySymbol } { token IToubxparen } "#)" { token ITcubxparen } "{|" { token ITocurlybar } "|}" { token ITccurlybar } } -<0,glaexts> { +<0,option_prags,glaexts> { \( { special IToparen } \) { special ITcparen } \[ { special ITobrack } @@ -233,7 +268,7 @@ $white_no_nl+ ; \} { close_brace } } -<0,glaexts> { +<0,option_prags,glaexts> { @qual @varid { check_qvarid } @qual @conid { idtoken qconid } @varid { varid } @@ -297,9 +332,7 @@ unsafeAt arr i = arr ! i -- ----------------------------------------------------------------------------- -- The token type -data Token = T SrcLoc{-start-} SrcLoc{-end-} Token__ - -data Token__ +data Token = ITas -- Haskell keywords | ITcase | ITclass @@ -334,22 +367,26 @@ data Token__ | ITsafe | ITthreadsafe | ITunsafe - | ITwith | ITstdcallconv | ITccallconv | ITdotnet | ITmdo - | ITspecialise_prag -- Pragmas + -- Pragmas + | ITinline_prag Bool -- True <=> INLINE, False <=> NOINLINE + | ITspec_prag -- SPECIALISE + | ITspec_inline_prag Bool -- SPECIALISE INLINE (or NOINLINE) | ITsource_prag - | ITinline_prag - | ITnoinline_prag | ITrules_prag | ITdeprecated_prag | ITline_prag | ITscc_prag | ITcore_prag -- hdaume: core annotations + | ITunpack_prag | ITclose_prag + | IToptions_prag String + | ITinclude_prag String + | ITlanguage_prag | ITdotdot -- reserved symbols | ITcolon @@ -414,26 +451,25 @@ data Token__ | ITprimdouble Rational -- MetaHaskell extension tokens - | ITopenExpQuote -- [| or [e| - | ITopenPatQuote -- [p| - | ITopenDecQuote -- [d| - | ITopenTypQuote -- [t| - | ITcloseQuote -- |] - | ITidEscape FastString -- $x - | ITparenEscape -- $( - | ITreifyType - | ITreifyDecl - | ITreifyFixity + | ITopenExpQuote -- [| or [e| + | ITopenPatQuote -- [p| + | ITopenDecQuote -- [d| + | ITopenTypQuote -- [t| + | ITcloseQuote -- |] + | ITidEscape FastString -- $x + | ITparenEscape -- $( + | ITvarQuote -- ' + | ITtyQuote -- '' -- Arrow notation extension | ITproc | ITrec - | IToparenbar -- (| - | ITcparenbar -- |) - | ITlarrowtail -- -< - | ITrarrowtail -- >- - | ITLarrowtail -- -<< - | ITRarrowtail -- >>- + | IToparenbar -- (| + | ITcparenbar -- |) + | ITlarrowtail -- -< + | ITrarrowtail -- >- + | ITLarrowtail -- -<< + | ITRarrowtail -- >>- | ITunknown String -- Used when the lexer can't make sense of it | ITeof -- end of file token @@ -441,7 +477,7 @@ data Token__ deriving Show -- debugging #endif -isSpecial :: Token__ -> Bool +isSpecial :: Token -> Bool -- If we see M.x, where x is a keyword, but -- is special, we treat is as just plain M.x, -- not as a keyword. @@ -455,7 +491,6 @@ isSpecial ITdynamic = True isSpecial ITsafe = True isSpecial ITthreadsafe = True isSpecial ITunsafe = True -isSpecial ITwith = True isSpecial ITccallconv = True isSpecial ITstdcallconv = True isSpecial ITmdo = True @@ -497,11 +532,8 @@ reservedWordsFM = listToUFM $ ( "where", ITwhere, 0 ), ( "_scc_", ITscc, 0 ), -- ToDo: remove - ( "forall", ITforall, bit glaExtsBit), + ( "forall", ITforall, bit tvBit), ( "mdo", ITmdo, bit glaExtsBit), - ( "reifyDecl", ITreifyDecl, bit thBit), - ( "reifyType", ITreifyType, bit thBit), - ( "reifyFixity",ITreifyFixity, bit thBit), ( "foreign", ITforeign, bit ffiBit), ( "export", ITexport, bit ffiBit), @@ -514,8 +546,6 @@ reservedWordsFM = listToUFM $ ( "ccall", ITccallconv, bit ffiBit), ( "dotnet", ITdotnet, bit ffiBit), - ( "with", ITwith, bit withBit), - ( "rec", ITrec, bit arrowsBit), ( "proc", ITproc, bit arrowsBit) ] @@ -538,52 +568,67 @@ reservedSymsFM = listToUFM $ ,("!", ITbang, 0) ,("*", ITstar, bit glaExtsBit) -- For data T (a::*) = MkT - ,(".", ITdot, bit glaExtsBit) -- For 'forall a . t' + ,(".", ITdot, bit tvBit) -- For 'forall a . t' ,("-<", ITlarrowtail, bit arrowsBit) ,(">-", ITrarrowtail, bit arrowsBit) ,("-<<", ITLarrowtail, bit arrowsBit) ,(">>-", ITRarrowtail, bit arrowsBit) + +#if __GLASGOW_HASKELL__ >= 605 + ,("λ", ITlam, bit glaExtsBit) + ,("∷", ITdcolon, bit glaExtsBit) + ,("⇒", ITdarrow, bit glaExtsBit) + ,("∀", ITforall, bit glaExtsBit) + ,("→", ITrarrow, bit glaExtsBit) + ,("←", ITlarrow, bit glaExtsBit) + ,("⋯", ITdotdot, bit glaExtsBit) +#endif ] -- ----------------------------------------------------------------------------- -- Lexer actions -type Action = SrcLoc -> SrcLoc -> StringBuffer -> Int -> P Token +type Action = SrcSpan -> StringBuffer -> Int -> P (Located Token) -special :: Token__ -> Action -special tok loc end _buf len = return (T loc end tok) +special :: Token -> Action +special tok span _buf len = return (L span tok) -token, layout_token :: Token__ -> Action -token t loc end buf len = return (T loc end t) -layout_token t loc end buf len = pushLexState layout >> return (T loc end t) +token, layout_token :: Token -> Action +token t span buf len = return (L span t) +layout_token t span buf len = pushLexState layout >> return (L span t) -idtoken :: (StringBuffer -> Int -> Token__) -> Action -idtoken f loc end buf len = return (T loc end $! (f buf len)) +idtoken :: (StringBuffer -> Int -> Token) -> Action +idtoken f span buf len = return (L span $! (f buf len)) -skip_one_varid :: (FastString -> Token__) -> Action -skip_one_varid f loc end buf len - = return (T loc end $! f (lexemeToFastString (stepOn buf) (len-1))) +skip_one_varid :: (FastString -> Token) -> Action +skip_one_varid f span buf len + = return (L span $! f (lexemeToFastString (stepOn buf) (len-1))) -strtoken :: (String -> Token__) -> Action -strtoken f loc end buf len = - return (T loc end $! (f $! lexemeToString buf len)) +strtoken :: (String -> Token) -> Action +strtoken f span buf len = + return (L span $! (f $! lexemeToString buf len)) -init_strtoken :: Int -> (String -> Token__) -> Action +init_strtoken :: Int -> (String -> Token) -> Action -- like strtoken, but drops the last N character(s) -init_strtoken drop f loc end buf len = - return (T loc end $! (f $! lexemeToString buf (len-drop))) +init_strtoken drop f span buf len = + return (L span $! (f $! lexemeToString buf (len-drop))) begin :: Int -> Action -begin code _loc _end _str _len = do pushLexState code; lexToken +begin code _span _str _len = do pushLexState code; lexToken pop :: Action -pop _loc _end _buf _len = do popLexState; lexToken +pop _span _buf _len = do popLexState; lexToken pop_and :: Action -> Action -pop_and act loc end buf len = do popLexState; act loc end buf len +pop_and act span buf len = do popLexState; act span buf len + +notFollowedBy char _ _ _ (AI _ _ buf) = atEnd buf || currentChar buf /= char -notFollowedBy char _ _ _ (_,buf) = atEnd buf || currentChar buf /= char +notFollowedBySymbol _ _ _ (AI _ _ buf) + = atEnd buf || currentChar buf `notElem` "!#$%&*+./<=>?@\\^|-~" + +atEOL _ _ _ (AI _ _ buf) = atEnd buf || currentChar buf == '\n' ifExtension pred bits _ _ _ = pred bits @@ -592,7 +637,7 @@ ifExtension pred bits _ _ _ = pred bits using regular expressions. -} nested_comment :: Action -nested_comment loc _end _str _len = do +nested_comment span _str _len = do input <- getInput go 1 input where go 0 input = do setInput input; lexToken @@ -613,21 +658,21 @@ nested_comment loc _end _str _len = do Just (c,input) -> go n input c -> go n input - err input = do failLocMsgP loc (fst input) "unterminated `{-'" + err (AI end _ _) = failLocMsgP (srcSpanStart span) end "unterminated `{-'" open_brace, close_brace :: Action -open_brace loc end _str _len = do +open_brace span _str _len = do ctx <- getContext setContext (NoLayout:ctx) - return (T loc end ITocurly) -close_brace loc end _str _len = do + return (L span ITocurly) +close_brace span _str _len = do popContext - return (T loc end ITccurly) + return (L span ITccurly) -- We have to be careful not to count M. as a qualified name -- when is a keyword. We hack around this by catching -- the offending tokens afterward, and re-lexing in a different state. -check_qvarid loc end buf len = do +check_qvarid span buf len = do case lookupUFM reservedWordsFM var of Just (keyword,exts) | not (isSpecial keyword) -> @@ -640,10 +685,11 @@ check_qvarid loc end buf len = do _other -> return token where (mod,var) = splitQualName buf len - token = T loc end (ITqvarid (mod,var)) + token = L span (ITqvarid (mod,var)) try_again = do - setInput (loc,buf) + (AI _ offs _) <- getInput + setInput (AI (srcSpanStart span) (offs-len) buf) pushLexState bad_qvarid lexToken @@ -654,35 +700,41 @@ splitQualName :: StringBuffer -> Int -> (FastString,FastString) -- takes a StringBuffer and a length, and returns the module name -- and identifier parts of a qualified name. Splits at the *last* dot, -- because of hierarchical module names. -splitQualName orig_buf len = split orig_buf 0 0 +splitQualName orig_buf len = split orig_buf orig_buf where - split buf dot_off n - | n == len = done dot_off - | lookAhead buf n == '.' = split2 buf n (n+1) - | otherwise = split buf dot_off (n+1) + split buf dot_buf + | orig_buf `byteDiff` buf >= len = done dot_buf + | c == '.' = found_dot buf' + | otherwise = split buf' dot_buf + where + (c,buf') = nextChar buf -- careful, we might get names like M.... -- so, if the character after the dot is not upper-case, this is -- the end of the qualifier part. - split2 buf dot_off n - | isUpper (lookAhead buf n) = split buf dot_off (n+1) - | otherwise = done dot_off - - done dot_off = - (lexemeToFastString orig_buf dot_off, - lexemeToFastString (stepOnBy (dot_off+1) orig_buf) (len - dot_off -1)) - -varid loc end buf len = + found_dot buf -- buf points after the '.' + | isUpper c = split buf' buf + | otherwise = done buf + where + (c,buf') = nextChar buf + + done dot_buf = + (lexemeToFastString orig_buf (qual_size - 1), + lexemeToFastString dot_buf (len - qual_size)) + where + qual_size = orig_buf `byteDiff` dot_buf + +varid span buf len = case lookupUFM reservedWordsFM fs of Just (keyword,0) -> do maybe_layout keyword - return (T loc end keyword) + return (L span keyword) Just (keyword,exts) -> do b <- extension (\i -> exts .&. i /= 0) if b then do maybe_layout keyword - return (T loc end keyword) - else return (T loc end (ITvarid fs)) - _other -> return (T loc end (ITvarid fs)) + return (L span keyword) + else return (L span (ITvarid fs)) + _other -> return (L span (ITvarid fs)) where fs = lexemeToFastString buf len @@ -695,62 +747,56 @@ qconsym buf len = ITqconsym $! splitQualName buf len varsym = sym ITvarsym consym = sym ITconsym -sym con loc end buf len = +sym con span buf len = case lookupUFM reservedSymsFM fs of - Just (keyword,0) -> return (T loc end keyword) + Just (keyword,0) -> return (L span keyword) Just (keyword,exts) -> do b <- extension (\i -> exts .&. i /= 0) - if b then return (T loc end keyword) - else return (T loc end $! con fs) - _other -> return (T loc end $! con fs) + if b then return (L span keyword) + else return (L span $! con fs) + _other -> return (L span $! con fs) where fs = lexemeToFastString buf len -tok_decimal loc end buf len - = return (T loc end (ITinteger $! parseInteger buf len 10 oct_or_dec)) +tok_decimal span buf len + = return (L span (ITinteger $! parseInteger buf len 10 octDecDigit)) -tok_octal loc end buf len - = return (T loc end (ITinteger $! parseInteger (stepOnBy 2 buf) (len-2) 8 oct_or_dec)) +tok_octal span buf len + = return (L span (ITinteger $! parseInteger (offsetBytes 2 buf) (len-2) 8 octDecDigit)) -tok_hexadecimal loc end buf len - = return (T loc end (ITinteger $! parseInteger (stepOnBy 2 buf) (len-2) 16 hex)) +tok_hexadecimal span buf len + = return (L span (ITinteger $! parseInteger (offsetBytes 2 buf) (len-2) 16 hexDigit)) -prim_decimal loc end buf len - = return (T loc end (ITprimint $! parseInteger buf (len-1) 10 oct_or_dec)) +prim_decimal span buf len + = return (L span (ITprimint $! parseInteger buf (len-1) 10 octDecDigit)) -prim_octal loc end buf len - = return (T loc end (ITprimint $! parseInteger (stepOnBy 2 buf) (len-3) 8 oct_or_dec)) +prim_octal span buf len + = return (L span (ITprimint $! parseInteger (offsetBytes 2 buf) (len-3) 8 octDecDigit)) -prim_hexadecimal loc end buf len - = return (T loc end (ITprimint $! parseInteger (stepOnBy 2 buf) (len-3) 16 hex)) +prim_hexadecimal span buf len + = return (L span (ITprimint $! parseInteger (offsetBytes 2 buf) (len-3) 16 hexDigit)) -tok_float str = ITrational $! readRational__ str -prim_float str = ITprimfloat $! readRational__ str -prim_double str = ITprimdouble $! readRational__ str - -parseInteger :: StringBuffer -> Int -> Integer -> (Char->Int) -> Integer -parseInteger buf len radix to_int - = go 0 0 - where go i x | i == len = x - | otherwise = go (i+1) (x * radix + toInteger (to_int (lookAhead buf i))) +tok_float str = ITrational $! readRational str +prim_float str = ITprimfloat $! readRational str +prim_double str = ITprimdouble $! readRational str -- ----------------------------------------------------------------------------- -- Layout processing -- we're at the first token on a line, insert layout tokens if necessary do_bol :: Action -do_bol loc end _str _len = do - pos <- getOffside end +do_bol span _str _len = do + pos <- getOffside case pos of LT -> do --trace "layout: inserting '}'" $ do popContext -- do NOT pop the lex state, we might have a ';' to insert - return (T loc end ITvccurly) + return (L span ITvccurly) EQ -> do --trace "layout: inserting ';'" $ do popLexState - return (T loc end ITsemi) + return (L span ITsemi) GT -> do popLexState lexToken @@ -774,9 +820,9 @@ maybe_layout _ = return () -- by a 'do', then we allow the new context to be at the same indentation as -- the previous context. This is what the 'strict' argument is for. -- -new_layout_context strict loc end _buf _len = do +new_layout_context strict span _buf _len = do popLexState - let offset = srcLocCol loc + (AI _ offset _) <- getInput ctx <- getContext case ctx of Layout prev_off : _ | @@ -785,51 +831,77 @@ new_layout_context strict loc end _buf _len = do -- token is indented to the left of the previous context. -- we must generate a {} sequence now. pushLexState layout_left - return (T loc end ITvocurly) + return (L span ITvocurly) other -> do setContext (Layout offset : ctx) - return (T loc end ITvocurly) + return (L span ITvocurly) -do_layout_left loc end _buf _len = do +do_layout_left span _buf _len = do popLexState pushLexState bol -- we must be at the start of a line - return (T loc end ITvccurly) + return (L span ITvccurly) -- ----------------------------------------------------------------------------- -- LINE pragmas -set_line :: Int -> Action -set_line code loc end buf len = do - let line = parseInteger buf len 10 oct_or_dec - setSrcLoc (mkSrcLoc (srcLocFile end) (fromIntegral line - 1) 0) +setLine :: Int -> Action +setLine code span buf len = do + let line = parseInteger buf len 10 octDecDigit + setSrcLoc (mkSrcLoc (srcSpanFile span) (fromIntegral line - 1) 0) -- subtract one: the line number refers to the *following* line popLexState pushLexState code lexToken -set_file :: Int -> Action -set_file code loc end buf len = do +setFile :: Int -> Action +setFile code span buf len = do let file = lexemeToFastString (stepOn buf) (len-2) - setSrcLoc (mkSrcLoc file (srcLocLine end) (srcLocCol end)) + setSrcLoc (mkSrcLoc file (srcSpanEndLine span) (srcSpanEndCol span)) popLexState pushLexState code lexToken + +-- ----------------------------------------------------------------------------- +-- Options, includes and language pragmas. + +lex_string_prag :: (String -> Token) -> Action +lex_string_prag mkTok span buf len + = do input <- getInput + start <- getSrcLoc + tok <- go [] input + end <- getSrcLoc + return (L (mkSrcSpan start end) tok) + where go acc input + = if isString input "#-}" + then do setInput input + return (mkTok (reverse acc)) + else case alexGetChar input of + Just (c,i) -> go (c:acc) i + Nothing -> err input + isString i [] = True + isString i (x:xs) + = case alexGetChar i of + Just (c,i') | c == x -> isString i' xs + _other -> False + err (AI end _ _) = failLocMsgP (srcSpanStart span) end "unterminated options pragma" + + -- ----------------------------------------------------------------------------- -- Strings & Chars -- This stuff is horrible. I hates it. lex_string_tok :: Action -lex_string_tok loc end buf len = do +lex_string_tok span buf len = do tok <- lex_string "" end <- getSrcLoc - return (T loc end tok) + return (L (mkSrcSpan (srcSpanStart span) end) tok) -lex_string :: String -> P Token__ +lex_string :: String -> P Token lex_string s = do i <- getInput - case alexGetChar i of + case alexGetChar' i of Nothing -> lit_error Just ('"',i) -> do @@ -838,14 +910,15 @@ lex_string s = do if glaexts then do i <- getInput - case alexGetChar i of + case alexGetChar' i of Just ('#',i) -> do setInput i if any (> '\xFF') s then failMsgP "primitive string literal must contain only characters <= \'\\xFF\'" - else let s' = mkFastStringNarrow (reverse s) in - -- always a narrow string/byte array + else let s' = mkZFastString (reverse s) in return (ITprimstring s') + -- mkZFastString is a hack to avoid encoding the + -- string in UTF-8. We just want the exact bytes. _other -> return (ITstring (mkFastString (reverse s))) else @@ -856,12 +929,11 @@ lex_string s = do setInput i; lex_string s | Just (c,i) <- next, is_space c -> do setInput i; lex_stringgap s - where next = alexGetChar i - - Just _ -> do - c <- lex_char - lex_string (c:s) + where next = alexGetChar' i + Just (c, i) -> do + c' <- lex_char c i + lex_string (c':s) lex_stringgap s = do c <- getCharOrFail @@ -872,35 +944,75 @@ lex_stringgap s = do lex_char_tok :: Action -lex_char_tok loc _end buf len = do - c <- lex_char - mc <- getCharOrFail - case mc of - '\'' -> do - glaexts <- extension glaExtsEnabled - if glaexts - then do - i@(end,_) <- getInput - case alexGetChar i of - Just ('#',i@(end,_)) -> do +-- Here we are basically parsing character literals, such as 'x' or '\n' +-- but, when Template Haskell is on, we additionally spot +-- 'x and ''T, returning ITvarQuote and ITtyQuote respectively, +-- but WIHTOUT CONSUMING the x or T part (the parser does that). +-- So we have to do two characters of lookahead: when we see 'x we need to +-- see if there's a trailing quote +lex_char_tok span buf len = do -- We've seen ' + i1 <- getInput -- Look ahead to first character + let loc = srcSpanStart span + case alexGetChar' i1 of + Nothing -> lit_error + + Just ('\'', i2@(AI end2 _ _)) -> do -- We've seen '' + th_exts <- extension thEnabled + if th_exts then do + setInput i2 + return (L (mkSrcSpan loc end2) ITtyQuote) + else lit_error + + Just ('\\', i2@(AI end2 _ _)) -> do -- We've seen 'backslash + setInput i2 + lit_ch <- lex_escape + mc <- getCharOrFail -- Trailing quote + if mc == '\'' then finish_char_tok loc lit_ch + else do setInput i2; lit_error + + Just (c, i2@(AI end2 _ _)) + | not (isAny c) -> lit_error + | otherwise -> + + -- We've seen 'x, where x is a valid character + -- (i.e. not newline etc) but not a quote or backslash + case alexGetChar' i2 of -- Look ahead one more character + Nothing -> lit_error + Just ('\'', i3) -> do -- We've seen 'x' + setInput i3 + finish_char_tok loc c + _other -> do -- We've seen 'x not followed by quote + -- If TH is on, just parse the quote only + th_exts <- extension thEnabled + let (AI end _ _) = i1 + if th_exts then return (L (mkSrcSpan loc end) ITvarQuote) + else do setInput i2; lit_error + +finish_char_tok :: SrcLoc -> Char -> P (Located Token) +finish_char_tok loc ch -- We've already seen the closing quote + -- Just need to check for trailing # + = do glaexts <- extension glaExtsEnabled + i@(AI end _ _) <- getInput + if glaexts then do + case alexGetChar' i of + Just ('#',i@(AI end _ _)) -> do setInput i - return (T loc end (ITprimchar c)) + return (L (mkSrcSpan loc end) (ITprimchar ch)) _other -> - return (T loc end (ITchar c)) + return (L (mkSrcSpan loc end) (ITchar ch)) else do - end <- getSrcLoc - return (T loc end (ITchar c)) - - _other -> lit_error + return (L (mkSrcSpan loc end) (ITchar ch)) -lex_char :: P Char -lex_char = do - mc <- getCharOrFail - case mc of - '\\' -> lex_escape - c | is_any c -> return c +lex_char :: Char -> AlexInput -> P Char +lex_char c inp = do + case c of + '\\' -> do setInput inp; lex_escape + c | isAny c -> do setInput inp; return c _other -> lit_error +isAny c | c > '\xff' = isPrint c + | otherwise = is_any c + lex_escape :: P Char lex_escape = do c <- getCharOrFail @@ -920,17 +1032,17 @@ lex_escape = do then return (chr (ord c - ord '@')) else lit_error - 'x' -> readNum is_hexdigit 16 hex - 'o' -> readNum is_octdigit 8 oct_or_dec - x | is_digit x -> readNum2 is_digit 10 oct_or_dec (oct_or_dec x) + 'x' -> readNum is_hexdigit 16 hexDigit + 'o' -> readNum is_octdigit 8 octDecDigit + x | is_digit x -> readNum2 is_digit 10 octDecDigit (octDecDigit x) c1 -> do i <- getInput - case alexGetChar i of + case alexGetChar' i of Nothing -> lit_error Just (c2,i2) -> - case alexGetChar i2 of - Nothing -> lit_error + case alexGetChar' i2 of + Nothing -> do setInput i2; lit_error Just (c3,i3) -> let str = [c1,c2,c3] in case [ (c,rest) | (p,c) <- silly_escape_chars, @@ -945,40 +1057,24 @@ lex_escape = do readNum :: (Char -> Bool) -> Int -> (Char -> Int) -> P Char readNum is_digit base conv = do + i <- getInput c <- getCharOrFail if is_digit c then readNum2 is_digit base conv (conv c) - else lit_error + else do setInput i; lit_error readNum2 is_digit base conv i = do input <- getInput read i input where read i input = do - case alexGetChar input of + case alexGetChar' input of Just (c,input') | is_digit c -> do read (i*base + conv c) input' _other -> do - setInput input if i >= 0 && i <= 0x10FFFF - then return (chr i) + then do setInput input; return (chr i) else lit_error -is_hexdigit c - = is_digit c - || (c >= 'a' && c <= 'f') - || (c >= 'A' && c <= 'F') - -hex c | is_digit c = ord c - ord '0' - | otherwise = ord (to_lower c) - ord 'a' + 10 - -oct_or_dec c = ord c - ord '0' - -is_octdigit c = c >= '0' && c <= '7' - -to_lower c - | c >= 'A' && c <= 'Z' = chr (ord c - (ord 'A' - ord 'a')) - | otherwise = c - silly_escape_chars = [ ("NUL", '\NUL'), ("SOH", '\SOH'), @@ -1016,64 +1112,20 @@ silly_escape_chars = [ ("DEL", '\DEL') ] +-- before calling lit_error, ensure that the current input is pointing to +-- the position of the error in the buffer. This is so that we can report +-- a correct location to the user, but also so we can detect UTF-8 decoding +-- errors if they occur. lit_error = lexError "lexical error in string/character literal" getCharOrFail :: P Char getCharOrFail = do i <- getInput - case alexGetChar i of + case alexGetChar' i of Nothing -> lexError "unexpected end-of-file in string/character literal" Just (c,i) -> do setInput i; return c -- ----------------------------------------------------------------------------- --- Floats - -readRational :: ReadS Rational -- NB: doesn't handle leading "-" -readRational r = do - (n,d,s) <- readFix r - (k,t) <- readExp s - return ((n%1)*10^^(k-d), t) - where - readFix r = do - (ds,s) <- lexDecDigits r - (ds',t) <- lexDotDigits s - return (read (ds++ds'), length ds', t) - - readExp (e:s) | e `elem` "eE" = readExp' s - readExp s = return (0,s) - - readExp' ('+':s) = readDec s - readExp' ('-':s) = do - (k,t) <- readDec s - return (-k,t) - readExp' s = readDec s - - readDec s = do - (ds,r) <- nonnull isDigit s - return (foldl1 (\n d -> n * 10 + d) [ ord d - ord '0' | d <- ds ], - r) - - lexDecDigits = nonnull isDigit - - lexDotDigits ('.':s) = return (span isDigit s) - lexDotDigits s = return ("",s) - - nonnull p s = do (cs@(_:_),t) <- return (span p s) - return (cs,t) - -readRational__ :: String -> Rational -- NB: *does* handle a leading "-" -readRational__ top_s - = case top_s of - '-' : xs -> - (read_me xs) - xs -> read_me xs - where - read_me s - = case (do { (x,"") <- readRational s ; return x }) of - [x] -> x - [] -> error ("readRational__: no parse:" ++ top_s) - _ -> error ("readRational__: ambiguous parse:" ++ top_s) - --- ----------------------------------------------------------------------------- -- The Parse Monad data LayoutContext @@ -1083,25 +1135,28 @@ data LayoutContext data ParseResult a = POk PState a | PFailed - SrcLoc SrcLoc -- The start and end of the text span related to + SrcSpan -- The start and end of the text span related to -- the error. Might be used in environments which can -- show this span, e.g. by highlighting it. Message -- The error message -showPFailed loc1 loc2 err - = showSDoc (hcat [ppr loc1, text ": ", err]) - data PState = PState { buffer :: StringBuffer, - last_loc :: SrcLoc, -- pos of previous token - last_len :: !Int, -- len of previous token + last_loc :: SrcSpan, -- pos of previous token + last_offs :: !Int, -- offset of the previous token from the + -- beginning of the current line. + -- \t is equal to 8 spaces. + last_len :: !Int, -- len of previous token loc :: SrcLoc, -- current loc (end of prev token + 1) extsBitmap :: !Int, -- bitmap that determines permitted extensions context :: [LayoutContext], lex_state :: [Int] } -- last_loc and last_len are used when generating error messages, - -- and in pushCurrentContext only. + -- and in pushCurrentContext only. Sigh, if only Happy passed the + -- current token to happyError, we could at least get rid of last_len. + -- Getting rid of last_loc would require finding another way to + -- implement pushCurrentContext (which is only called from one place). newtype P a = P { unP :: PState -> ParseResult a } @@ -1116,17 +1171,20 @@ returnP a = P $ \s -> POk s a thenP :: P a -> (a -> P b) -> P b (P m) `thenP` k = P $ \ s -> case m s of - POk s1 a -> (unP (k a)) s1 - PFailed l1 l2 err -> PFailed l1 l2 err + POk s1 a -> (unP (k a)) s1 + PFailed span err -> PFailed span err failP :: String -> P a -failP msg = P $ \s -> PFailed (last_loc s) (loc s) (text msg) +failP msg = P $ \s -> PFailed (last_loc s) (text msg) failMsgP :: String -> P a -failMsgP msg = P $ \s -> PFailed (last_loc s) (loc s) (text msg) +failMsgP msg = P $ \s -> PFailed (last_loc s) (text msg) failLocMsgP :: SrcLoc -> SrcLoc -> String -> P a -failLocMsgP loc1 loc2 str = P $ \s -> PFailed loc1 loc2 (text str) +failLocMsgP loc1 loc2 str = P $ \s -> PFailed (mkSrcSpan loc1 loc2) (text str) + +failSpanMsgP :: SrcSpan -> String -> P a +failSpanMsgP span msg = P $ \s -> PFailed span (text msg) extension :: (Int -> Bool) -> P Bool extension p = P $ \s -> POk s (p $! extsBitmap s) @@ -1137,38 +1195,85 @@ getExts = P $ \s -> POk s (extsBitmap s) setSrcLoc :: SrcLoc -> P () setSrcLoc new_loc = P $ \s -> POk s{loc=new_loc} () --- tmp, for supporting stuff in RdrHsSyn. The scope better not include --- any calls to the lexer, because it assumes things about the SrcLoc. -setSrcLocFor :: SrcLoc -> P a -> P a -setSrcLocFor new_loc scope = P $ \s@PState{ loc = old_loc } -> - case unP scope s{loc=new_loc} of - PFailed l1 l2 msg -> PFailed l1 l2 msg - POk _ r -> POk s r - getSrcLoc :: P SrcLoc getSrcLoc = P $ \s@(PState{ loc=loc }) -> POk s loc -setLastToken :: SrcLoc -> Int -> P () +setLastToken :: SrcSpan -> Int -> P () setLastToken loc len = P $ \s -> POk s{ last_loc=loc, last_len=len } () -type AlexInput = (SrcLoc,StringBuffer) +data AlexInput = AI SrcLoc {-#UNPACK#-}!Int StringBuffer alexInputPrevChar :: AlexInput -> Char -alexInputPrevChar (_,s) = prevChar s '\n' +alexInputPrevChar (AI _ _ buf) = prevChar buf '\n' alexGetChar :: AlexInput -> Maybe (Char,AlexInput) -alexGetChar (loc,s) +alexGetChar (AI loc ofs s) + | atEnd s = Nothing + | otherwise = adj_c `seq` loc' `seq` ofs' `seq` s' `seq` + Just (adj_c, (AI loc' ofs' s')) + where (c,s') = nextChar s + loc' = advanceSrcLoc loc c + ofs' = advanceOffs c ofs + + non_graphic = '\x0' + upper = '\x1' + lower = '\x2' + digit = '\x3' + symbol = '\x4' + space = '\x5' + other_graphic = '\x6' + + adj_c + | c <= '\x06' = non_graphic + | c <= '\xff' = c + | otherwise = + case generalCategory c of + UppercaseLetter -> upper + LowercaseLetter -> lower + TitlecaseLetter -> upper + ModifierLetter -> other_graphic + OtherLetter -> other_graphic + NonSpacingMark -> other_graphic + SpacingCombiningMark -> other_graphic + EnclosingMark -> other_graphic + DecimalNumber -> digit + LetterNumber -> other_graphic + OtherNumber -> other_graphic + ConnectorPunctuation -> other_graphic + DashPunctuation -> other_graphic + OpenPunctuation -> other_graphic + ClosePunctuation -> other_graphic + InitialQuote -> other_graphic + FinalQuote -> other_graphic + OtherPunctuation -> other_graphic + MathSymbol -> symbol + CurrencySymbol -> symbol + ModifierSymbol -> symbol + OtherSymbol -> symbol + Space -> space + _other -> non_graphic + +-- This version does not squash unicode characters, it is used when +-- lexing strings. +alexGetChar' :: AlexInput -> Maybe (Char,AlexInput) +alexGetChar' (AI loc ofs s) | atEnd s = Nothing - | otherwise = c `seq` loc' `seq` s' `seq` Just (c, (loc', s')) - where c = currentChar s - loc' = advanceSrcLoc loc c - s' = stepOn s + | otherwise = c `seq` loc' `seq` ofs' `seq` s' `seq` + Just (c, (AI loc' ofs' s')) + where (c,s') = nextChar s + loc' = advanceSrcLoc loc c + ofs' = advanceOffs c ofs + +advanceOffs :: Char -> Int -> Int +advanceOffs '\n' offs = 0 +advanceOffs '\t' offs = (offs `quot` 8 + 1) * 8 +advanceOffs _ offs = offs + 1 getInput :: P AlexInput -getInput = P $ \s@PState{ loc=l, buffer=b } -> POk s (l,b) +getInput = P $ \s@PState{ loc=l, last_offs=o, buffer=b } -> POk s (AI l o b) setInput :: AlexInput -> P () -setInput (l,b) = P $ \s -> POk s{ loc=l, buffer=b } () +setInput (AI l o b) = P $ \s -> POk s{ loc=l, last_offs=o, buffer=b } () pushLexState :: Int -> P () pushLexState ls = P $ \s@PState{ lex_state=l } -> POk s{lex_state=ls:l} () @@ -1187,19 +1292,38 @@ glaExtsBit, ffiBit, parrBit :: Int glaExtsBit = 0 ffiBit = 1 parrBit = 2 -withBit = 3 arrowsBit = 4 thBit = 5 ipBit = 6 +tvBit = 7 -- Scoped type variables enables 'forall' keyword +bangPatBit = 8 -- Tells the parser to understand bang-patterns + -- (doesn't affect the lexer) glaExtsEnabled, ffiEnabled, parrEnabled :: Int -> Bool glaExtsEnabled flags = testBit flags glaExtsBit ffiEnabled flags = testBit flags ffiBit -withEnabled flags = testBit flags withBit parrEnabled flags = testBit flags parrBit arrowsEnabled flags = testBit flags arrowsBit thEnabled flags = testBit flags thBit ipEnabled flags = testBit flags ipBit +tvEnabled flags = testBit flags tvBit +bangPatEnabled flags = testBit flags bangPatBit + +-- PState for parsing options pragmas +-- +pragState :: StringBuffer -> SrcLoc -> PState +pragState buf loc = + PState { + buffer = buf, + last_loc = mkSrcSpan loc loc, + last_offs = 0, + last_len = 0, + loc = loc, + extsBitmap = 0, + context = [], + lex_state = [bol, option_prags, 0] + } + -- create a parse state -- @@ -1207,7 +1331,8 @@ mkPState :: StringBuffer -> SrcLoc -> DynFlags -> PState mkPState buf loc flags = PState { buffer = buf, - last_loc = loc, + last_loc = mkSrcSpan loc loc, + last_offs = 0, last_len = 0, loc = loc, extsBitmap = fromIntegral bitmap, @@ -1218,11 +1343,12 @@ mkPState buf loc flags = where bitmap = glaExtsBit `setBitIf` dopt Opt_GlasgowExts flags .|. ffiBit `setBitIf` dopt Opt_FFI flags - .|. withBit `setBitIf` dopt Opt_With flags .|. parrBit `setBitIf` dopt Opt_PArr flags .|. arrowsBit `setBitIf` dopt Opt_Arrows flags .|. thBit `setBitIf` dopt Opt_TH flags .|. ipBit `setBitIf` dopt Opt_ImplicitParams flags + .|. tvBit `setBitIf` dopt Opt_ScopedTypeVariables flags + .|. bangPatBit `setBitIf` dopt Opt_BangPatterns flags -- setBitIf :: Int -> Bool -> Int b `setBitIf` cond | cond = bit b @@ -1239,19 +1365,19 @@ popContext = P $ \ s@(PState{ buffer = buf, context = ctx, loc = loc, last_len = len, last_loc = last_loc }) -> case ctx of (_:tl) -> POk s{ context = tl } () - [] -> PFailed last_loc loc (srcParseErr buf len) + [] -> PFailed last_loc (srcParseErr buf len) -- Push a new layout context at the indentation of the last token read. -- This is only used at the outer level of a module when the 'module' -- keyword is missing. pushCurrentContext :: P () -pushCurrentContext = P $ \ s@PState{ last_loc=loc, context=ctx } -> - POk s{ context = Layout (srcLocCol loc) : ctx} () +pushCurrentContext = P $ \ s@PState{ last_offs=offs, last_len=len, context=ctx } -> + POk s{context = Layout (offs-len) : ctx} () -getOffside :: SrcLoc -> P Ordering -getOffside loc = P $ \s@PState{context=stk} -> +getOffside :: P Ordering +getOffside = P $ \s@PState{last_offs=offs, context=stk} -> let ord = case stk of - (Layout n:_) -> compare (srcLocCol loc) n + (Layout n:_) -> compare offs n _ -> GT in POk s ord @@ -1268,49 +1394,64 @@ srcParseErr buf len else hcat [ptext SLIT("parse error on input "), char '`', text token, char '\''] ] - where token = lexemeToString (stepOnBy (-len) buf) len + where token = lexemeToString (offsetBytes (-len) buf) len -- Report a parse failure, giving the span of the previous token as -- the location of the error. This is the entry point for errors -- detected during parsing. srcParseFail :: P a srcParseFail = P $ \PState{ buffer = buf, last_len = len, - last_loc = last_loc, loc = loc } -> - PFailed last_loc loc (srcParseErr buf len) + last_loc = last_loc } -> + PFailed last_loc (srcParseErr buf len) -- A lexical error is reported at a particular position in the source file, --- not over a token range. TODO: this is slightly wrong, because we record --- the error at the character position following the one which caused the --- error. We should somehow back up by one character. +-- not over a token range. lexError :: String -> P a lexError str = do loc <- getSrcLoc - failLocMsgP loc loc str + i@(AI end _ buf) <- getInput + reportLexError loc end buf str -- ----------------------------------------------------------------------------- -- This is the top-level function: called from the parser each time a -- new token is to be read from the input. -lexer :: (Token -> P a) -> P a +lexer :: (Located Token -> P a) -> P a lexer cont = do - tok@(T _ _ tok__) <- lexToken + tok@(L _ tok__) <- lexToken --trace ("token: " ++ show tok__) $ do cont tok -lexToken :: P Token +lexToken :: P (Located Token) lexToken = do - inp@(loc1,buf) <- getInput + inp@(AI loc1 _ buf) <- getInput sc <- getLexState exts <- getExts case alexScanUser exts inp sc of - AlexEOF -> do setLastToken loc1 0 - return (T loc1 loc1 ITeof) - AlexError (loc2,_) -> do failLocMsgP loc1 loc2 "lexical error" + AlexEOF -> do let span = mkSrcSpan loc1 loc1 + setLastToken span 0 + return (L span ITeof) + AlexError (AI loc2 _ buf) -> do + reportLexError loc1 loc2 buf "lexical error" AlexSkip inp2 _ -> do setInput inp2 lexToken - AlexToken inp2@(end,buf2) len t -> do + AlexToken inp2@(AI end _ buf2) len t -> do setInput inp2 - setLastToken loc1 len - t loc1 end buf len + let span = mkSrcSpan loc1 end + let bytes = byteDiff buf buf2 + span `seq` setLastToken span bytes + t span buf bytes + +-- ToDo: Alex reports the buffer at the start of the erroneous lexeme, +-- but it would be more informative to report the location where the +-- error was actually discovered, especially if this is a decoding +-- error. +reportLexError loc1 loc2 buf str = + let + c = fst (nextChar buf) + in + if c == '\0' -- decoding errors are mapped to '\0', see utf8DecodeChar# + then failLocMsgP loc2 loc2 "UTF-8 decoding error" + else failLocMsgP loc1 loc2 (str ++ " at character " ++ show c) }