-- - M... should be 3 tokens, not 1.
-- - pragma-end should be only valid in a pragma
+-- qualified operator NOTES.
+--
+-- - If M.(+) is a single lexeme, then..
+-- - Probably (+) should be a single lexeme too, for consistency.
+-- Otherwise ( + ) would be a prefix operator, but M.( + ) would not be.
+-- - But we have to rule out reserved operators, otherwise (..) becomes
+-- a different lexeme.
+-- - Should we therefore also rule out reserved operators in the qualified
+-- form? This is quite difficult to achieve. We don't do it for
+-- qualified varids.
+
{
{-# OPTIONS -w #-}
-- The above warning supression flag is a temporary kludge.
-- Note that Alex itself generates code with with some unused bindings and
-- without type signatures, so removing the flag might not be possible.
+{-# OPTIONS_GHC -funbox-strict-fields #-}
+
module Lexer (
Token(..), lexer, pragState, mkPState, PState(..),
P(..), ParseResult(..), getSrcLoc,
popContext, pushCurrentContext, setLastToken, setSrcLoc,
getLexState, popLexState, pushLexState,
extension, standaloneDerivingEnabled, bangPatEnabled,
- addWarning
+ addWarning,
+ lexTokenStream
) where
import Bag
import Control.Monad
import Data.Bits
-import Data.Char ( chr, ord, isSpace )
+import Data.Char
import Data.Ratio
import Debug.Trace
-
-#if __GLASGOW_HASKELL__ >= 605
-import Data.Char ( GeneralCategory(..), generalCategory, isPrint, isUpper )
-#else
-import Compat.Unicode ( GeneralCategory(..), generalCategory, isPrint, isUpper )
-#endif
}
$unispace = \x05 -- Trick Alex into handling Unicode. See alexGetChar.
-$whitechar = [\ \n\r\f\v\xa0 $unispace]
+$whitechar = [\ \n\r\f\v $unispace]
$white_no_nl = $whitechar # \n
$tab = \t
$digit = [$ascdigit $unidigit]
$special = [\(\)\,\;\[\]\`\{\}]
-$ascsymbol = [\!\#\$\%\&\*\+\.\/\<\=\>\?\@\\\^\|\-\~ \xa1-\xbf \xd7 \xf7]
+$ascsymbol = [\!\#\$\%\&\*\+\.\/\<\=\>\?\@\\\^\|\-\~]
$unisymbol = \x04 -- Trick Alex into handling Unicode. See alexGetChar.
$symbol = [$ascsymbol $unisymbol] # [$special \_\:\"\']
$unilarge = \x01 -- Trick Alex into handling Unicode. See alexGetChar.
-$asclarge = [A-Z \xc0-\xd6 \xd8-\xde]
+$asclarge = [A-Z]
$large = [$asclarge $unilarge]
$unismall = \x02 -- Trick Alex into handling Unicode. See alexGetChar.
-$ascsmall = [a-z \xdf-\xf6 \xf8-\xff]
+$ascsmall = [a-z]
$small = [$ascsmall $unismall \_]
$unigraphic = \x06 -- Trick Alex into handling Unicode. See alexGetChar.
-- space followed by a Haddock comment symbol (docsym) (in which case we'd
-- have a Haddock comment). The rules then munch the rest of the line.
-"-- " ~[$docsym \#] .* ;
-"--" [^$symbol : \ ] .* ;
+"-- " ~[$docsym \#] .* { lineCommentToken }
+"--" [^$symbol : \ ] .* { lineCommentToken }
-- Next, match Haddock comments if no -haddock flag
-"-- " [$docsym \#] .* / { ifExtension (not . haddockEnabled) } ;
+"-- " [$docsym \#] .* / { ifExtension (not . haddockEnabled) } { lineCommentToken }
-- Now, when we've matched comments that begin with 2 dashes and continue
-- with a different character, we need to match comments that begin with three
-- make sure that the first non-dash character isn't a symbol, and munch the
-- rest of the line.
-"---"\-* [^$symbol :] .* ;
+"---"\-* [^$symbol :] .* { lineCommentToken }
-- Since the previous rules all match dashes followed by at least one
-- character, we also need to match a whole line filled with just dashes.
-"--"\-* / { atEOL } ;
+"--"\-* / { atEOL } { lineCommentToken }
-- We need this rule since none of the other single line comment rules
-- actually match this case.
-"-- " / { atEOL } ;
+"-- " / { atEOL } { lineCommentToken }
-- 'bol' state: beginning of a line. Slurp up all the whitespace (including
-- blank lines) until we find a non-whitespace character, then do layout
-- NOTE: accept -} at the end of a LINE pragma, for compatibility
-- with older versions of GHC which generated these.
--- We only want RULES pragmas to be picked up when explicit forall
--- syntax is enabled is on, because the contents of the pragma always
--- uses it. If it's not on then we're sure to get a parse error.
--- (ToDo: we should really emit a warning when ignoring pragmas)
--- XXX Now that we can enable this without the -fglasgow-exts hammer,
--- is it better just to let the parse error happen?
-<0>
- "{-#" $whitechar* (RULES|rules) / { ifExtension explicitForallEnabled } { token ITrules_prag }
-
<0,option_prags> {
+ "{-#" $whitechar* (RULES|rules) { rulePrag }
"{-#" $whitechar* (INLINE|inline) { token (ITinline_prag True) }
"{-#" $whitechar* (NO(T?)INLINE|no(t?)inline)
{ token (ITinline_prag False) }
$whitechar* (NO(T?)INLINE|no(t?)inline)
{ token (ITspec_inline_prag False) }
"{-#" $whitechar* (SOURCE|source) { token ITsource_prag }
+ "{-#" $whitechar* (WARNING|warning)
+ { token ITwarning_prag }
"{-#" $whitechar* (DEPRECATED|deprecated)
{ token ITdeprecated_prag }
"{-#" $whitechar* (SCC|scc) { token ITscc_prag }
{ token ITgenerated_prag }
"{-#" $whitechar* (CORE|core) { token ITcore_prag }
"{-#" $whitechar* (UNPACK|unpack) { token ITunpack_prag }
+ "{-#" $whitechar* (ANN|ann) { token ITann_prag }
- "{-#" { nested_comment lexToken }
+ -- We ignore all these pragmas, but don't generate a warning for them
+ -- CFILES is a hugs-only thing.
+ "{-#" $whitechar* (OPTIONS_HUGS|options_hugs|OPTIONS_NHC98|options_nhc98|OPTIONS_JHC|options_jhc|CFILES|cfiles)
+ { nested_comment lexToken }
-- ToDo: should only be valid inside a pragma:
- "#-}" { token ITclose_prag}
+ "#-}" { endPrag }
}
<option_prags> {
}
<0> {
- "-- #" .* ;
+ -- In the "0" mode we ignore these pragmas
+ "{-#" $whitechar* (OPTIONS|options|OPTIONS_GHC|options_ghc|OPTIONS_HADDOCK|options_haddock|LANGUAGE|language|INCLUDE|include)
+ { nested_comment lexToken }
+}
+
+<0> {
+ "-- #" .* { lineCommentToken }
}
<0,option_prags> {
- -- This is to catch things like {-# OPTIONS OPTIONS_HUGS ...
- "{-#" $whitechar* $idchar+ { nested_comment lexToken }
+ "{-#" { warnThen Opt_WarnUnrecognisedPragmas (text "Unrecognised pragma")
+ (nested_comment lexToken) }
}
-- '0' state: ordinary lexemes
@conid "#"+ / { ifExtension magicHashEnabled } { idtoken conid }
}
--- ToDo: M.(,,,)
-
+-- ToDo: - move `var` and (sym) into lexical syntax?
+-- - remove backquote from $special?
<0> {
- @qual @varsym { idtoken qvarsym }
- @qual @consym { idtoken qconsym }
- @varsym { varsym }
- @consym { consym }
+ @qual @varsym / { ifExtension oldQualOps } { idtoken qvarsym }
+ @qual @consym / { ifExtension oldQualOps } { idtoken qconsym }
+ @qual \( @varsym \) / { ifExtension newQualOps } { idtoken prefixqvarsym }
+ @qual \( @consym \) / { ifExtension newQualOps } { idtoken prefixqconsym }
+ @varsym { varsym }
+ @consym { consym }
}
-- For the normal boxed literals we need to be careful
-- when trying to be close to Haskell98
<0> {
-- Normal integral literals (:: Num a => a, from Integer)
- @decimal { tok_num positive 0 0 decimal }
- 0[oO] @octal { tok_num positive 2 2 octal }
- 0[xX] @hexadecimal { tok_num positive 2 2 hexadecimal }
+ @decimal { tok_num positive 0 0 decimal }
+ 0[oO] @octal { tok_num positive 2 2 octal }
+ 0[xX] @hexadecimal { tok_num positive 2 2 hexadecimal }
-- Normal rational literals (:: Fractional a => a, from Rational)
- @floating_point { strtoken tok_float }
+ @floating_point { strtoken tok_float }
}
<0> {
- -- Unboxed ints (:: Int#)
+ -- Unboxed ints (:: Int#) and words (:: Word#)
-- It's simpler (and faster?) to give separate cases to the negatives,
-- especially considering octal/hexadecimal prefixes.
- @decimal \# / { ifExtension magicHashEnabled } { tok_primint positive 0 1 decimal }
- 0[oO] @octal \# / { ifExtension magicHashEnabled } { tok_primint positive 2 3 octal }
- 0[xX] @hexadecimal \# / { ifExtension magicHashEnabled } { tok_primint positive 2 3 hexadecimal }
- @negative @decimal \# / { ifExtension magicHashEnabled } { tok_primint negative 1 2 decimal }
- @negative 0[oO] @octal \# / { ifExtension magicHashEnabled } { tok_primint negative 3 4 octal }
+ @decimal \# / { ifExtension magicHashEnabled } { tok_primint positive 0 1 decimal }
+ 0[oO] @octal \# / { ifExtension magicHashEnabled } { tok_primint positive 2 3 octal }
+ 0[xX] @hexadecimal \# / { ifExtension magicHashEnabled } { tok_primint positive 2 3 hexadecimal }
+ @negative @decimal \# / { ifExtension magicHashEnabled } { tok_primint negative 1 2 decimal }
+ @negative 0[oO] @octal \# / { ifExtension magicHashEnabled } { tok_primint negative 3 4 octal }
@negative 0[xX] @hexadecimal \# / { ifExtension magicHashEnabled } { tok_primint negative 3 4 hexadecimal }
+ @decimal \# \# / { ifExtension magicHashEnabled } { tok_primword 0 2 decimal }
+ 0[oO] @octal \# \# / { ifExtension magicHashEnabled } { tok_primword 2 4 octal }
+ 0[xX] @hexadecimal \# \# / { ifExtension magicHashEnabled } { tok_primword 2 4 hexadecimal }
+
-- Unboxed floats and doubles (:: Float#, :: Double#)
-- prim_{float,double} work with signed literals
@signed @floating_point \# / { ifExtension magicHashEnabled } { init_strtoken 1 tok_primfloat }
| ITspec_inline_prag Bool -- SPECIALISE INLINE (or NOINLINE)
| ITsource_prag
| ITrules_prag
+ | ITwarning_prag
| ITdeprecated_prag
| ITline_prag
| ITscc_prag
| ITgenerated_prag
| ITcore_prag -- hdaume: core annotations
| ITunpack_prag
+ | ITann_prag
| ITclose_prag
| IToptions_prag String
| ITinclude_prag String
| ITvocurly
| ITvccurly
| ITobrack
- | ITopabrack -- [:, for parallel arrays with -fparr
- | ITcpabrack -- :], for parallel arrays with -fparr
+ | ITopabrack -- [:, for parallel arrays with -XParr
+ | ITcpabrack -- :], for parallel arrays with -XParr
| ITcbrack
| IToparen
| ITcparen
| ITqconid (FastString,FastString)
| ITqvarsym (FastString,FastString)
| ITqconsym (FastString,FastString)
+ | ITprefixqvarsym (FastString,FastString)
+ | ITprefixqconsym (FastString,FastString)
| ITdupipvarid FastString -- GHC extension: implicit param: ?x
| ITprimchar Char
| ITprimstring FastString
| ITprimint Integer
+ | ITprimword Integer
| ITprimfloat Rational
| ITprimdouble Rational
| ITdocSection Int String -- a section heading
| ITdocOptions String -- doc options (prune, ignore-exports, etc)
| ITdocOptionsOld String -- doc options declared "-- # ..."-style
+ | ITlineComment String -- comment starting by "--"
+ | ITblockComment String -- comment in {- -}
#ifdef DEBUG
deriving Show -- debugging
( "where", ITwhere, 0 ),
( "_scc_", ITscc, 0 ), -- ToDo: remove
- ( "forall", ITforall, bit explicitForallBit),
+ ( "forall", ITforall, bit explicitForallBit .|. bit inRulePragBit),
( "mdo", ITmdo, bit recursiveDoBit),
( "family", ITfamily, bit tyFamBit),
( "group", ITgroup, bit transformComprehensionsBit),
-- For data T (a::*) = MkT
,("*", ITstar, \i -> kindSigsEnabled i || tyFamEnabled i)
-- For 'forall a . t'
- ,(".", ITdot, explicitForallEnabled)
+ ,(".", ITdot, \i -> explicitForallEnabled i || inRulePrag i)
,("-<", ITlarrowtail, arrowsEnabled)
,(">-", ITrarrowtail, arrowsEnabled)
,("-<<", ITLarrowtail, arrowsEnabled)
,(">>-", ITRarrowtail, arrowsEnabled)
-#if __GLASGOW_HASKELL__ >= 605
,("∷", ITdcolon, unicodeSyntaxEnabled)
,("⇒", ITdarrow, unicodeSyntaxEnabled)
,("∀", ITforall, \i -> unicodeSyntaxEnabled i &&
-- ToDo: ideally, → and ∷ should be "specials", so that they cannot
-- form part of a large operator. This would let us have a better
-- syntax for kinds: ɑ∷*→* would be a legal kind signature. (maybe).
-#endif
]
-- -----------------------------------------------------------------------------
| otherwise -> input
Nothing -> input
+lineCommentToken :: Action
+lineCommentToken span buf len = do
+ b <- extension rawTokenStreamEnabled
+ if b then strtoken ITlineComment span buf len else lexToken
+
{-
nested comments require traversing by hand, they can't be parsed
using regular expressions.
nested_comment :: P (Located Token) -> Action
nested_comment cont span _str _len = do
input <- getInput
- go (1::Int) input
+ go "" (1::Int) input
where
- go 0 input = do setInput input; cont
- go n input = case alexGetChar input of
+ go commentAcc 0 input = do setInput input
+ b <- extension rawTokenStreamEnabled
+ if b
+ then docCommentEnd input commentAcc ITblockComment _str span
+ else cont
+ go commentAcc n input = case alexGetChar input of
Nothing -> errBrace input span
Just ('-',input) -> case alexGetChar input of
Nothing -> errBrace input span
- Just ('\125',input) -> go (n-1) input
- Just (_,_) -> go n input
+ Just ('\125',input) -> go commentAcc (n-1) input
+ Just (_,_) -> go ('-':commentAcc) n input
Just ('\123',input) -> case alexGetChar input of
Nothing -> errBrace input span
- Just ('-',input) -> go (n+1) input
- Just (_,_) -> go n input
- Just (_,input) -> go n input
+ Just ('-',input) -> go ('-':'\123':commentAcc) (n+1) input
+ Just (_,_) -> go ('\123':commentAcc) n input
+ Just (c,input) -> go (c:commentAcc) n input
nested_doc_comment :: Action
nested_doc_comment span buf _len = withLexedDocType (go "")
Just (_, _) -> lexDocComment input (ITdocSection n) True
Nothing -> do setInput input; lexToken -- eof reached, lex it normally
+-- RULES pragmas turn on the forall and '.' keywords, and we turn them
+-- off again at the end of the pragma.
+rulePrag :: Action
+rulePrag span buf len = do
+ setExts (.|. bit inRulePragBit)
+ return (L span ITrules_prag)
+
+endPrag :: Action
+endPrag span buf len = do
+ setExts (.&. complement (bit inRulePragBit))
+ return (L span ITclose_prag)
+
-- docCommentEnd
-------------------------------------------------------------------------------
-- This function is quite tricky. We can't just return a new token, we also
popContext
return (L span ITccurly)
-qvarid buf len = ITqvarid $! splitQualName buf len
-qconid buf len = ITqconid $! splitQualName buf len
+qvarid buf len = ITqvarid $! splitQualName buf len False
+qconid buf len = ITqconid $! splitQualName buf len False
-splitQualName :: StringBuffer -> Int -> (FastString,FastString)
+splitQualName :: StringBuffer -> Int -> Bool -> (FastString,FastString)
-- takes a StringBuffer and a length, and returns the module name
-- and identifier parts of a qualified name. Splits at the *last* dot,
-- because of hierarchical module names.
-splitQualName orig_buf len = split orig_buf orig_buf
+splitQualName orig_buf len parens = split orig_buf orig_buf
where
split buf dot_buf
| orig_buf `byteDiff` buf >= len = done dot_buf
done dot_buf =
(lexemeToFastString orig_buf (qual_size - 1),
- lexemeToFastString dot_buf (len - qual_size))
+ if parens -- Prelude.(+)
+ then lexemeToFastString (stepOn dot_buf) (len - qual_size - 2)
+ else lexemeToFastString dot_buf (len - qual_size))
where
qual_size = orig_buf `byteDiff` dot_buf
conid buf len = ITconid fs
where fs = lexemeToFastString buf len
-qvarsym buf len = ITqvarsym $! splitQualName buf len
-qconsym buf len = ITqconsym $! splitQualName buf len
+qvarsym buf len = ITqvarsym $! splitQualName buf len False
+qconsym buf len = ITqconsym $! splitQualName buf len False
+prefixqvarsym buf len = ITprefixqvarsym $! splitQualName buf len True
+prefixqconsym buf len = ITprefixqconsym $! splitQualName buf len True
varsym = sym ITvarsym
consym = sym ITconsym
-- some conveniences for use with tok_integral
tok_num = tok_integral ITinteger
tok_primint = tok_integral ITprimint
+tok_primword = tok_integral ITprimword positive
positive = id
negative = negate
decimal = (10,octDecDigit)
c | isAny c -> do setInput inp; return c
_other -> lit_error
-isAny c | c > '\xff' = isPrint c
+isAny c | c > '\x7f' = isPrint c
| otherwise = is_any c
lex_escape :: P Char
'x' -> readNum is_hexdigit 16 hexDigit
'o' -> readNum is_octdigit 8 octDecDigit
- x | is_digit x -> readNum2 is_digit 10 octDecDigit (octDecDigit x)
+ x | is_decdigit x -> readNum2 is_decdigit 10 octDecDigit (octDecDigit x)
c1 -> do
i <- getInput
addWarning option srcspan warning
lexToken
+warnThen :: DynFlag -> SDoc -> Action -> Action
+warnThen option warning action srcspan buf len = do
+ addWarning option srcspan warning
+ action srcspan buf len
+
-- -----------------------------------------------------------------------------
-- The Parse Monad
getExts :: P Int
getExts = P $ \s -> POk s (extsBitmap s)
+setExts :: (Int -> Int) -> P ()
+setExts f = P $ \s -> POk s{ extsBitmap = f (extsBitmap s) } ()
+
setSrcLoc :: SrcLoc -> P ()
setSrcLoc new_loc = P $ \s -> POk s{loc=new_loc} ()
adj_c
| c <= '\x06' = non_graphic
- | c <= '\xff' = c
+ | c <= '\x7f' = c
-- Alex doesn't handle Unicode, so when Unicode
-- character is encoutered we output these values
-- with the actual character value hidden in the state.
LowercaseLetter -> lower
TitlecaseLetter -> upper
ModifierLetter -> other_graphic
- OtherLetter -> other_graphic
+ OtherLetter -> lower -- see #1103
NonSpacingMark -> other_graphic
SpacingCombiningMark -> other_graphic
EnclosingMark -> other_graphic
DecimalNumber -> digit
LetterNumber -> other_graphic
OtherNumber -> other_graphic
- ConnectorPunctuation -> other_graphic
- DashPunctuation -> other_graphic
+ ConnectorPunctuation -> symbol
+ DashPunctuation -> symbol
OpenPunctuation -> other_graphic
ClosePunctuation -> other_graphic
InitialQuote -> other_graphic
FinalQuote -> other_graphic
- OtherPunctuation -> other_graphic
+ OtherPunctuation -> symbol
MathSymbol -> symbol
CurrencySymbol -> symbol
ModifierSymbol -> symbol
getLexState = P $ \s@PState{ lex_state=ls:_ } -> POk s ls
-- for reasons of efficiency, flags indicating language extensions (eg,
--- -fglasgow-exts or -fparr) are represented by a bitmap stored in an unboxed
+-- -fglasgow-exts or -XParr) are represented by a bitmap stored in an unboxed
-- integer
genericsBit, ffiBit, parrBit :: Int
-- (doesn't affect the lexer)
tyFamBit = 9 -- indexed type families: 'family' keyword and kind sigs
haddockBit = 10 -- Lex and parse Haddock comments
-magicHashBit = 11 -- # in both functions and operators
+magicHashBit = 11 -- "#" in both functions and operators
kindSigsBit = 12 -- Kind signatures on type variables
recursiveDoBit = 13 -- mdo
unicodeSyntaxBit = 14 -- the forall symbol, arrow symbols, etc
standaloneDerivingBit = 16 -- standalone instance deriving declarations
transformComprehensionsBit = 17
qqBit = 18 -- enable quasiquoting
+inRulePragBit = 19
+rawTokenStreamBit = 20 -- producing a token stream with all comments included
+newQualOpsBit = 21 -- Haskell' qualified operator syntax, e.g. Prelude.(+)
genericsEnabled, ffiEnabled, parrEnabled :: Int -> Bool
always _ = True
standaloneDerivingEnabled flags = testBit flags standaloneDerivingBit
transformComprehensionsEnabled flags = testBit flags transformComprehensionsBit
qqEnabled flags = testBit flags qqBit
+inRulePrag flags = testBit flags inRulePragBit
+rawTokenStreamEnabled flags = testBit flags rawTokenStreamBit
+newQualOps flags = testBit flags newQualOpsBit
+oldQualOps flags = not (newQualOps flags)
-- PState for parsing options pragmas
--
-pragState :: StringBuffer -> SrcLoc -> PState
-pragState buf loc =
+pragState :: DynFlags -> StringBuffer -> SrcLoc -> PState
+pragState dynflags buf loc =
PState {
- buffer = buf,
+ buffer = buf,
messages = emptyMessages,
- -- XXX defaultDynFlags is not right, but we don't have a real
- -- dflags handy
- dflags = defaultDynFlags,
+ dflags = dynflags,
last_loc = mkSrcSpan loc loc,
last_offs = 0,
last_len = 0,
.|. qqBit `setBitIf` dopt Opt_QuasiQuotes flags
.|. ipBit `setBitIf` dopt Opt_ImplicitParams flags
.|. explicitForallBit `setBitIf` dopt Opt_ScopedTypeVariables flags
+ .|. explicitForallBit `setBitIf` dopt Opt_LiberalTypeSynonyms flags
.|. explicitForallBit `setBitIf` dopt Opt_PolymorphicComponents flags
.|. explicitForallBit `setBitIf` dopt Opt_ExistentialQuantification flags
.|. explicitForallBit `setBitIf` dopt Opt_Rank2Types flags
.|. unicodeSyntaxBit `setBitIf` dopt Opt_UnicodeSyntax flags
.|. unboxedTuplesBit `setBitIf` dopt Opt_UnboxedTuples flags
.|. standaloneDerivingBit `setBitIf` dopt Opt_StandaloneDeriving flags
- .|. transformComprehensionsBit `setBitIf` dopt Opt_TransformListComp flags
+ .|. transformComprehensionsBit `setBitIf` dopt Opt_TransformListComp flags
+ .|. rawTokenStreamBit `setBitIf` dopt Opt_KeepRawTokenStream flags
+ .|. newQualOpsBit `setBitIf` dopt Opt_NewQualifiedOperators flags
--
setBitIf :: Int -> Bool -> Int
b `setBitIf` cond | cond = bit b
if c == '\0' -- decoding errors are mapped to '\0', see utf8DecodeChar#
then failLocMsgP loc2 loc2 (str ++ " (UTF-8 decoding error)")
else failLocMsgP loc1 loc2 (str ++ " at character " ++ show c)
+
+lexTokenStream :: StringBuffer -> SrcLoc -> DynFlags -> ParseResult [Located Token]
+lexTokenStream buf loc dflags = unP go initState
+ where initState = mkPState buf loc (dopt_set (dopt_unset dflags Opt_Haddock) Opt_KeepRawTokenStream)
+ go = do
+ ltok <- lexer return
+ case ltok of
+ L _ ITeof -> return []
+ _ -> liftM (ltok:) go
}