*.o.cmd
*.depend*
log
+tags
autom4te.cache
config.log
/libraries/haskeline/
/libraries/haskell2010/
/libraries/haskell98/
+/libraries/hoopl/
/libraries/hpc/
/libraries/integer-gmp/
/libraries/integer-simple/
# -----------------------------------------------------------------------------
# specific generated files
+/bindist-list
+/bindistprep/
+/bindisttest/HelloWorld
+/bindisttest/a/
+/bindisttest/install\ dir/
+/bindisttest/output
/ch01.html
/ch02.html
/compiler/cmm/CmmLex.hs
/docs/man/ghc.1
/docs/users_guide/ug-book.xml
/docs/users_guide/ug-ent.xml
+/docs/users_guide/users_guide.xml
+/docs/users_guide/users_guide/
+/docs/users_guide/what_glasgow_exts_does.gen.xml
/driver/ghci/ghc-pkg-inplace
/driver/ghci/ghci-inplace
/driver/mangler/dist/ghc-asm
/libraries/bin-package-db/ghc.mk
/libraries/bootstrapping.conf
/libraries/prologue.txt
+/libraries/doc-index*.html
+/libraries/frames.html
+/libraries/haddock-util.js
+/libraries/hslogo-16.png
+/libraries/index-frames.html
+/libraries/index.html
+/libraries/minus.gif
+/libraries/ocean.css
+/libraries/plus.gif
+/libraries/synopsis.png
/libraries/stamp/
/libraries/time/
/libraries/*/dist-boot/
/rts/sm/Evac_thr.c
/rts/sm/Scav_thr.c
/stage3.package.conf
+/testsuite_summary.txt
/testlog
/utils/*/dist*/
/utils/ext-core/Driver
Quick Start for developers
- http://hackage.haskell.org/trac/ghc/wiki/Building/Hacking
-
+ http://hackage.haskell.org/trac/ghc/wiki/Building/Hacking
+
This section on the wiki will get you up and running with a
- serviceable build tree in no time:
-
+ serviceable build tree in no time.
+
+ Don't skip this! By default, GHC builds with all optimizations
+ and profiling; most hackers will want a quicker build, so creating
+ a mk/build.mk file and knowing how to rebuild only parts of GHC is
+ very important.
+
This is part of the "Building GHC" section of the wiki, which
has more detailed information on GHC's build system should you
need it.
which contains GHC itself and the "boot" libraries.
- 2. Check out the source code from darcs
- ---------------------------------------
+ 2. Check out the source code from git
+ -------------------------------------
- The recommended way to get a darcs checkout is to start off by
- downloading a snapshot with a name like:
+ First get the GHC git repository:
- ghc-HEAD-2009-09-09-ghc-corelibs-testsuite.tar.bz2
+ $ git clone http://darcs.haskell.org/ghc.git/
- from:
-
- http://darcs.haskell.org/
-
- and then untar it and bring it up-to-date with:
-
- $ cd ghc
- $ ./darcs-all get
-
-
- Alternatively you can use darcs to get the repos, but it will take a
- lot longer. First get the GHC darcs repository:
-
- $ darcs get http://darcs.haskell.org/ghc/
-
- Then run the darcs-all script in that repository
+ Then run the sync-all script in that repository
to get the other repositories:
$ cd ghc
- $ chmod +x darcs-all
- $ ./darcs-all get
+ $ ./sync-all get
This checks out the "boot" packages.
is itself written in Haskell. For instructions on how to port GHC to a
new platform, see the Building Guide.
-If you're building from darcs sources (as opposed to a source
+If you're building from git sources (as opposed to a source
distribution) then you also need to install Happy [4] and Alex [5].
For building library documentation, you'll need Haddock [6]. To build
$ make install
The "perl boot" step is only necessary if this is a tree checked out
-from darcs. For source distributions downloaded from GHC's web site,
+from git. For source distributions downloaded from GHC's web site,
this step has already been performed.
These steps give you the default build, which includes everything
optimised and built in various ways (eg. profiling libs are built).
-It can take a long time. To customise the build, see the file
-HACKING.
+It can take a long time. To customise the build, see the file HACKING.
AC_MSG_CHECKING([Setting up $2, $3, $4 and $5])
case $$1 in
i386-apple-darwin)
- # By default, gcc on OS X will generate SSE
- # instructions, which need things 16-byte aligned,
- # but we don't 16-byte align things. Thus drop
- # back to generic i686 compatibility. Trac #2983.
- $2="$$2 -march=i686 -m32"
- $3="$$3 -march=i686 -m32"
+ $2="$$2 -m32"
+ $3="$$3 -m32"
$4="$$4 -arch i386"
- $5="$$5 -march=i686 -m32"
+ $5="$$5 -m32"
;;
x86_64-apple-darwin)
$2="$$2 -m64"
;;
esac
- case $$1 in
- i386-apple-darwin|x86_64-apple-darwin)
- # We support back to OS X 10.5
- $2="$$2 -isysroot /Developer/SDKs/MacOSX10.5.sdk -mmacosx-version-min=10.5"
- $3="$$3 -isysroot /Developer/SDKs/MacOSX10.5.sdk -mmacosx-version-min=10.5"
- $4="$$4 -macosx_version_min 10.5"
- $5="$$5 -isysroot /Developer/SDKs/MacOSX10.5.sdk -mmacosx-version-min=10.5"
- ;;
- esac
-
# If gcc knows about the stack protector, turn it off.
# Otherwise the stack-smash handler gets triggered.
echo 'int main(void) {return 0;}' > conftest.c
])# FP_PROG_LD_X
+# FP_PROG_LD_BUILD_ID
+# ------------
+
+# Sets the output variable LdHasBuildId to YES if ld supports
+# --build-id, or NO otherwise.
+AC_DEFUN([FP_PROG_LD_BUILD_ID],
+[
+AC_CACHE_CHECK([whether ld understands --build-id], [fp_cv_ld_build_id],
+[echo 'foo() {}' > conftest.c
+${CC-cc} -c conftest.c
+if ${LdCmd} -r --build-id=none -o conftest2.o conftest.o > /dev/null 2>&1; then
+ fp_cv_ld_build_id=yes
+else
+ fp_cv_ld_build_id=no
+fi
+rm -rf conftest*])
+if test "$fp_cv_ld_build_id" = yes; then
+ LdHasBuildId=YES
+else
+ LdHasBuildId=NO
+fi
+AC_SUBST([LdHasBuildId])
+])# FP_PROG_LD_BUILD_ID
+
+
# FP_PROG_LD_IS_GNU
# -----------------
# Sets the output variable LdIsGNULd to YES or NO, depending on whether it is
# Determine which extra flags we need to pass gcc when we invoke it
# to compile .hc code.
#
-# Some OSs (Mandrake Linux, in particular) configure GCC with
-# -momit-leaf-frame-pointer on by default. If this is the case, we
-# need to turn it off for mangling to work. The test is currently a
-# bit crude, using only the version number of gcc.
-#
# -fwrapv is needed for gcc to emit well-behaved code in the presence of
# integer wrap around. (Trac #952)
#
-# -fno-unit-at-a-time or -fno-toplevel-reoder is necessary to avoid gcc
-# reordering things in the module and confusing the manger and/or splitter.
-# (eg. Trac #1427)
-#
AC_DEFUN([FP_GCC_EXTRA_FLAGS],
[AC_REQUIRE([FP_HAVE_GCC])
AC_CACHE_CHECK([for extra options to pass gcc when compiling via C], [fp_cv_gcc_extra_opts],
FP_COMPARE_VERSIONS([$fp_cv_gcc_version], [-ge], [3.4],
[fp_cv_gcc_extra_opts="$fp_cv_gcc_extra_opts -fwrapv"],
[])
- case $TargetPlatform in
- i386-*|x86_64-*)
- FP_COMPARE_VERSIONS([$fp_cv_gcc_version], [-ge], [3.2],
- [fp_cv_gcc_extra_opts="$fp_cv_gcc_extra_opts -mno-omit-leaf-frame-pointer"],
- [])
- FP_COMPARE_VERSIONS([$fp_cv_gcc_version], [-ge], [3.4],
- [FP_COMPARE_VERSIONS([$fp_cv_gcc_version], [-ge], [4.2],
- [fp_cv_gcc_extra_opts="$fp_cv_gcc_extra_opts -fno-toplevel-reorder"],
- [fp_cv_gcc_extra_opts="$fp_cv_gcc_extra_opts -fno-unit-at-a-time"]
- )],
- [])
- ;;
- sparc-*-solaris2)
- FP_COMPARE_VERSIONS([$fp_cv_gcc_version], [-ge], [4.2],
- [fp_cv_gcc_extra_opts="$fp_cv_gcc_extra_opts -fno-toplevel-reorder"],
- [])
- ;;
- esac
])
AC_SUBST([GccExtraViaCOpts],$fp_cv_gcc_extra_opts)
])
hppa*)
$2="hppa"
;;
- i386)
+ i386|i486|i586|i686)
$2="i386"
;;
ia64)
# --------------------------------
# converts vendor from gnu to ghc naming, and assigns the result to $target_var
AC_DEFUN([GHC_CONVERT_VENDOR],[
-$2="$1"
+ case "$1" in
+ pc|gentoo) # like i686-pc-linux-gnu and i686-gentoo-freebsd8
+ $2="unknown"
+ ;;
+ *)
+ #pass thru by default
+ $2="$1"
+ ;;
+ esac
])
# GHC_CONVERT_OS(os, target_var)
freebsd|netbsd|openbsd|dragonfly|osf1|osf3|hpux|linuxaout|kfreebsdgnu|freebsd2|solaris2|cygwin32|mingw32|darwin|gnu|nextstep2|nextstep3|sunos4|ultrix|irix|aix|haiku)
$2="$1"
;;
+ freebsd8) # like i686-gentoo-freebsd8
+ $2="freebsd"
+ ;;
*)
echo "Unknown OS $1"
exit 1
.PHONY: test_bindist
test_bindist:
"$(RM)" $(RM_OPTS_REC) bindisttest/$(BIN_DIST_INST_SUBDIR)
- "$(RM)" $(RM_OPTS_REC) bindisttest/a/b/c/*
+ "$(RM)" $(RM_OPTS_REC) bindisttest/a
"$(RM)" $(RM_OPTS) bindisttest/HelloWorld
"$(RM)" $(RM_OPTS) bindisttest/HelloWorld.o
"$(RM)" $(RM_OPTS) bindisttest/HelloWorld.hi
# NB. tar has funny interpretation of filenames sometimes (thinking
# c:/foo is a remote file), so it's safer to bzip and then pipe into
# tar rather than using tar -xjf:
+ mkdir bindisttest/a
+ mkdir bindisttest/a/b
+ mkdir bindisttest/a/b/c
cd bindisttest/a/b/c/ && $(BZIP2_CMD) -cd ../../../../$(BIN_DIST_TEST_TAR_BZ2) | $(TAR_CMD) -xf -
$(SHELL) bindisttest/checkBinaries.sh $(ProjectVersion)
ifeq "$(Windows)" "YES"
use Cwd;
my %required_tag;
+my $validate;
$required_tag{"-"} = 1;
+$validate = 0;
while ($#ARGV ne -1) {
my $arg = shift @ARGV;
if ($arg =~ /^--required-tag=(.*)/) {
$required_tag{$1} = 1;
}
+ elsif ($arg =~ /^--validate$/) {
+ $validate = 1;
+ }
else {
die "Bad arg: $arg";
}
}
+{
+ local $/ = undef;
+ open FILE, "packages" or die "Couldn't open file: $!";
+ binmode FILE;
+ my $string = <FILE>;
+ close FILE;
+
+ if ($string =~ /\r/) {
+ print STDERR <<EOF;
+Found ^M in packages.
+Perhaps you need to run
+ git config --global core.autocrlf false
+and re-check out the tree?
+EOF
+ exit 1;
+ }
+}
+
# Create libraries/*/{ghc.mk,GNUmakefile}
system("/usr/bin/perl", "-w", "boot-pkgs") == 0
or die "Running boot-pkgs failed: $?";
# If $tag is not "-" then it is an optional repository, so its
# absence isn't an error.
if (defined($required_tag{$tag})) {
- # We would like to just check for an _darcs directory here,
- # but in an lndir tree we avoid making _darcs directories,
+ # We would like to just check for a .git directory here,
+ # but in an lndir tree we avoid making .git directories,
# so it doesn't exist. We therefore require that every repo
# has a LICENSE file instead.
if (! -f "$dir/LICENSE") {
print STDERR "Error: $dir/LICENSE doesn't exist.\n";
- die "Maybe you haven't done './darcs-all get'?";
+ die "Maybe you haven't done './sync-all get'?";
}
}
}
}
}
-# Alas, darcs doesn't handle file permissions, so fix a few of them.
-for my $file ("boot", "darcs-all", "validate") {
- if (-f $file) {
- chmod 0755, $file
- or die "Can't chmod 0755 $file: $!";
- }
+if ($validate eq 0 && ! -f "mk/build.mk") {
+ print <<EOF;
+
+WARNING: You don't have a mk/build.mk file.
+
+By default a standard GHC build will be done, which uses optimisation
+and builds the profiling libraries. This will take a long time, so may
+not be what you want if you are developing GHC or the libraries, rather
+than simply building it to use it.
+
+For information on creating a mk/build.mk file, please see:
+ http://hackage.haskell.org/trac/ghc/wiki/Building/Using#Buildconfiguration
+
+EOF
}
+
if (-d "libraries/$package/_darcs") {
print "Ignoring libraries/$package as it looks like a darcs checkout\n"
}
+ elsif (-d "libraries/$package/.git") {
+ print "Ignoring libraries/$package as it looks like a git checkout\n"
+ }
else {
if (! -d "libraries/stamp") {
mkdir "libraries/stamp";
or die "Failed to open $pkgs: $!";
while (<PKGS>) {
chomp;
+ s/\r//g;
if (/.+/) {
push @library_dirs, "$package/$_";
}
-- ** The SpecInfo type
SpecInfo(..),
+ emptySpecInfo,
isEmptySpecInfo, specInfoFreeVars,
specInfoRules, seqSpecInfo, setSpecInfoHead,
specInfo, setSpecInfo,
combineSrcSpans :: SrcSpan -> SrcSpan -> SrcSpan
combineSrcSpans (UnhelpfulSpan _) r = r -- this seems more useful
combineSrcSpans l (UnhelpfulSpan _) = l
-combineSrcSpans start end
- = case line1 `compare` line2 of
- EQ -> case col1 `compare` col2 of
- EQ -> SrcSpanPoint file line1 col1
- LT -> SrcSpanOneLine file line1 col1 col2
- GT -> SrcSpanOneLine file line1 col2 col1
- LT -> SrcSpanMultiLine file line1 col1 line2 col2
- GT -> SrcSpanMultiLine file line2 col2 line1 col1
+combineSrcSpans span1 span2
+ = if line_start == line_end
+ then if col_start == col_end
+ then SrcSpanPoint file line_start col_start
+ else SrcSpanOneLine file line_start col_start col_end
+ else SrcSpanMultiLine file line_start col_start line_end col_end
where
- line1 = srcSpanStartLine start
- col1 = srcSpanStartCol start
- line2 = srcSpanEndLine end
- col2 = srcSpanEndCol end
- file = srcSpanFile start
+ (line_start, col_start) = min (srcSpanStartLine span1, srcSpanStartCol span1)
+ (srcSpanStartLine span2, srcSpanStartCol span2)
+ (line_end, col_end) = max (srcSpanEndLine span1, srcSpanEndCol span1)
+ (srcSpanEndLine span2, srcSpanEndCol span2)
+ file = srcSpanFile span1
\end{code}
%************************************************************************
mkAsmTempLabel,
- mkModuleInitLabel,
- mkPlainModuleInitLabel,
- mkModuleInitTableLabel,
+ mkPlainModuleInitLabel,
mkSplitMarkerLabel,
mkDirty_MUT_VAR_Label,
mkRtsPrimOpLabel,
mkRtsSlowTickyCtrLabel,
- moduleRegdLabel,
- moduleRegTableLabel,
-
- mkSelectorInfoLabel,
+ mkSelectorInfoLabel,
mkSelectorEntryLabel,
mkCmmInfoLabel,
mkDeadStripPreventer,
mkHpcTicksLabel,
- mkHpcModuleNameLabel,
hasCAF,
infoLblToEntryLbl, entryLblToInfoLbl, cvtToClosureLbl, cvtToSRTLbl,
| StringLitLabel
{-# UNPACK #-} !Unique
- | ModuleInitLabel
- Module -- the module name
- String -- its "way"
- -- at some point we might want some kind of version number in
- -- the module init label, to guard against compiling modules in
- -- the wrong order. We can't use the interface file version however,
- -- because we don't always recompile modules which depend on a module
- -- whose version has changed.
-
- | PlainModuleInitLabel -- without the version & way info
+ | PlainModuleInitLabel -- without the version & way info
Module
- | ModuleInitTableLabel -- table of imported modules to init
- Module
-
- | ModuleRegdLabel
-
| CC_Label CostCentre
| CCS_Label CostCentreStack
-- | Per-module table of tick locations
| HpcTicksLabel Module
- -- | Per-module name of the module for Hpc
- | HpcModuleNameLabel
-
-- | Label of an StgLargeSRT
| LargeSRTLabel
{-# UNPACK #-} !Unique
-- Constructing Code Coverage Labels
mkHpcTicksLabel = HpcTicksLabel
-mkHpcModuleNameLabel = HpcModuleNameLabel
-- Constructing labels used for dynamic linking
mkAsmTempLabel :: Uniquable a => a -> CLabel
mkAsmTempLabel a = AsmTempLabel (getUnique a)
-mkModuleInitLabel :: Module -> String -> CLabel
-mkModuleInitLabel mod way = ModuleInitLabel mod way
-
mkPlainModuleInitLabel :: Module -> CLabel
mkPlainModuleInitLabel mod = PlainModuleInitLabel mod
-mkModuleInitTableLabel :: Module -> CLabel
-mkModuleInitTableLabel mod = ModuleInitTableLabel mod
-
-moduleRegdLabel = ModuleRegdLabel
-moduleRegTableLabel = ModuleInitTableLabel
-
-
-- -----------------------------------------------------------------------------
-- Converting between info labels and entry/ret labels.
cvtToClosureLbl (IdLabel n c InfoTable) = IdLabel n c Closure
cvtToClosureLbl (IdLabel n c Entry) = IdLabel n c Closure
cvtToClosureLbl (IdLabel n c ConEntry) = IdLabel n c Closure
+cvtToClosureLbl (IdLabel n c RednCounts) = IdLabel n c Closure
cvtToClosureLbl l@(IdLabel n c Closure) = l
cvtToClosureLbl l
= pprPanic "cvtToClosureLbl" (pprCLabel l)
needsCDecl (LargeBitmapLabel _) = False
needsCDecl (IdLabel _ _ _) = True
needsCDecl (CaseLabel _ _) = True
-needsCDecl (ModuleInitLabel _ _) = True
-needsCDecl (PlainModuleInitLabel _) = True
-needsCDecl (ModuleInitTableLabel _) = True
-needsCDecl ModuleRegdLabel = False
+needsCDecl (PlainModuleInitLabel _) = True
needsCDecl (StringLitLabel _) = False
needsCDecl (AsmTempLabel _) = False
needsCDecl (CC_Label _) = True
needsCDecl (CCS_Label _) = True
needsCDecl (HpcTicksLabel _) = True
-needsCDecl HpcModuleNameLabel = False
-- | Check whether a label is a local temporary for native code generation
externallyVisibleCLabel (CaseLabel _ _) = False
externallyVisibleCLabel (StringLitLabel _) = False
externallyVisibleCLabel (AsmTempLabel _) = False
-externallyVisibleCLabel (ModuleInitLabel _ _) = True
externallyVisibleCLabel (PlainModuleInitLabel _)= True
-externallyVisibleCLabel (ModuleInitTableLabel _)= False
-externallyVisibleCLabel ModuleRegdLabel = False
-externallyVisibleCLabel (RtsLabel _) = True
+externallyVisibleCLabel (RtsLabel _) = True
externallyVisibleCLabel (CmmLabel _ _ _) = True
externallyVisibleCLabel (ForeignLabel{}) = True
externallyVisibleCLabel (IdLabel name _ _) = isExternalName name
externallyVisibleCLabel (CCS_Label _) = True
externallyVisibleCLabel (DynamicLinkerLabel _ _) = False
externallyVisibleCLabel (HpcTicksLabel _) = True
-externallyVisibleCLabel HpcModuleNameLabel = False
-externallyVisibleCLabel (LargeBitmapLabel _) = False
+externallyVisibleCLabel (LargeBitmapLabel _) = False
externallyVisibleCLabel (LargeSRTLabel _) = False
-- -----------------------------------------------------------------------------
labelType (RtsLabel (RtsApFast _)) = CodeLabel
labelType (CaseLabel _ CaseReturnInfo) = DataLabel
labelType (CaseLabel _ _) = CodeLabel
-labelType (ModuleInitLabel _ _) = CodeLabel
labelType (PlainModuleInitLabel _) = CodeLabel
-labelType (ModuleInitTableLabel _) = DataLabel
labelType (LargeSRTLabel _) = DataLabel
labelType (LargeBitmapLabel _) = DataLabel
labelType (ForeignLabel _ _ _ IsFunction) = CodeLabel
CmmLabel pkg _ _ -> True
#endif
- ModuleInitLabel m _ -> not opt_Static && this_pkg /= (modulePackageId m)
PlainModuleInitLabel m -> not opt_Static && this_pkg /= (modulePackageId m)
- ModuleInitTableLabel m -> not opt_Static && this_pkg /= (modulePackageId m)
-
+
-- Note that DynamicLinkerLabels do NOT require dynamic linking themselves.
_ -> False
pprCLbl (RtsLabel (RtsSlowTickyCtr pat))
= ptext (sLit "SLOW_CALL_") <> text pat <> ptext (sLit "_ctr")
-pprCLbl ModuleRegdLabel
- = ptext (sLit "_module_registered")
-
pprCLbl (ForeignLabel str _ _ _)
= ftext str
pprCLbl (CC_Label cc) = ppr cc
pprCLbl (CCS_Label ccs) = ppr ccs
-pprCLbl (ModuleInitLabel mod way)
- = ptext (sLit "__stginit_") <> ppr mod
- <> char '_' <> text way
-
pprCLbl (PlainModuleInitLabel mod)
= ptext (sLit "__stginit_") <> ppr mod
-pprCLbl (ModuleInitTableLabel mod)
- = ptext (sLit "__stginittable_") <> ppr mod
-
pprCLbl (HpcTicksLabel mod)
= ptext (sLit "_hpc_tickboxes_") <> ppr mod <> ptext (sLit "_hpc")
-pprCLbl HpcModuleNameLabel
- = ptext (sLit "_hpc_module_name_str")
-
ppIdFlavor :: Name -> IdLabelInfo -> SDoc
ppIdFlavor n x = pp_cSEP <> closureSuffix' n <>
(case x of
#endif
module Cmm
- ( CmmGraph(..), CmmBlock
+ ( CmmGraph, GenCmmGraph(..), CmmBlock
, CmmStackInfo(..), CmmTopInfo(..), Cmm, CmmTop
, CmmReplGraph, CmmFwdRewrite, CmmBwdRewrite
+ , modifyGraph
, lastNode, replaceLastNode, insertBetween
, ofBlockMap, toBlockMap, insertBlock
, ofBlockList, toBlockList, bodyToBlockList
-------------------------------------------------
-- CmmBlock, CmmGraph and Cmm
-data CmmGraph = CmmGraph { g_entry :: BlockId, g_graph :: Graph CmmNode C C }
+type CmmGraph = GenCmmGraph CmmNode
+data GenCmmGraph n = CmmGraph { g_entry :: BlockId, g_graph :: Graph n C C }
type CmmBlock = Block CmmNode C C
type CmmReplGraph e x = FuelUniqSM (Maybe (Graph CmmNode e x))
-------------------------------------------------
-- Manipulating CmmGraphs
+modifyGraph :: (Graph n C C -> Graph n' C C) -> GenCmmGraph n -> GenCmmGraph n'
+modifyGraph f g = CmmGraph {g_entry=g_entry g, g_graph=f (g_graph g)}
+
toBlockMap :: CmmGraph -> LabelMap CmmBlock
toBlockMap (CmmGraph {g_graph=GMany NothingO body NothingO}) = body
-- Running dataflow analysis and/or rewrites
-- Constructing forward and backward analysis-only pass
-analFwd :: Monad m => DataflowLattice f -> FwdTransfer CmmNode f -> FwdPass m CmmNode f
-analBwd :: Monad m => DataflowLattice f -> BwdTransfer CmmNode f -> BwdPass m CmmNode f
+analFwd :: Monad m => DataflowLattice f -> FwdTransfer n f -> FwdPass m n f
+analBwd :: Monad m => DataflowLattice f -> BwdTransfer n f -> BwdPass m n f
analFwd lat xfer = analRewFwd lat xfer noFwdRewrite
analBwd lat xfer = analRewBwd lat xfer noBwdRewrite
-- Constructing forward and backward analysis + rewrite pass
-analRewFwd :: Monad m => DataflowLattice f -> FwdTransfer CmmNode f -> FwdRewrite m CmmNode f -> FwdPass m CmmNode f
-analRewBwd :: Monad m => DataflowLattice f -> BwdTransfer CmmNode f -> BwdRewrite m CmmNode f -> BwdPass m CmmNode f
+analRewFwd :: Monad m => DataflowLattice f -> FwdTransfer n f -> FwdRewrite m n f -> FwdPass m n f
+analRewBwd :: Monad m => DataflowLattice f -> BwdTransfer n f -> BwdRewrite m n f -> BwdPass m n f
analRewFwd lat xfer rew = FwdPass {fp_lattice = lat, fp_transfer = xfer, fp_rewrite = rew}
analRewBwd lat xfer rew = BwdPass {bp_lattice = lat, bp_transfer = xfer, bp_rewrite = rew}
-- Running forward and backward dataflow analysis + optional rewrite
-dataflowPassFwd :: CmmGraph -> [(BlockId, f)] -> FwdPass FuelUniqSM CmmNode f -> FuelUniqSM (CmmGraph, BlockEnv f)
+dataflowPassFwd :: NonLocal n => GenCmmGraph n -> [(BlockId, f)] -> FwdPass FuelUniqSM n f -> FuelUniqSM (GenCmmGraph n, BlockEnv f)
dataflowPassFwd (CmmGraph {g_entry=entry, g_graph=graph}) facts fwd = do
(graph, facts, NothingO) <- analyzeAndRewriteFwd fwd (JustC [entry]) graph (mkFactBase (fp_lattice fwd) facts)
return (CmmGraph {g_entry=entry, g_graph=graph}, facts)
-dataflowPassBwd :: CmmGraph -> [(BlockId, f)] -> BwdPass FuelUniqSM CmmNode f -> FuelUniqSM (CmmGraph, BlockEnv f)
+dataflowPassBwd :: NonLocal n => GenCmmGraph n -> [(BlockId, f)] -> BwdPass FuelUniqSM n f -> FuelUniqSM (GenCmmGraph n, BlockEnv f)
dataflowPassBwd (CmmGraph {g_entry=entry, g_graph=graph}) facts bwd = do
(graph, facts, NothingO) <- analyzeAndRewriteBwd bwd (JustC [entry]) graph (mkFactBase (bp_lattice bwd) facts)
return (CmmGraph {g_entry=entry, g_graph=graph}, facts)
cpsTop hsc_env (CmmProc h@(TopInfo {stack_info=StackInfo {arg_space=entry_off}}) l g) =
do
-- Why bother doing it this early?
- -- g <- dual_rewrite Opt_D_dump_cmmz "spills and reloads"
+ -- g <- dual_rewrite run Opt_D_dump_cmmz "spills and reloads"
-- (dualLivenessWithInsertion callPPs) g
-- g <- run $ insertLateReloads g -- Duplicate reloads just before uses
- -- g <- dual_rewrite Opt_D_dump_cmmz "Dead Assignment Elimination"
+ -- g <- dual_rewrite runOptimization Opt_D_dump_cmmz "Dead Assignment Elimination"
-- (removeDeadAssignmentsAndReloads callPPs) g
dump Opt_D_dump_cmmz "Pre common block elimination" g
g <- return $ elimCommonBlocks g
----------- Spills and reloads -------------------
g <-
-- pprTrace "pre Spills" (ppr g) $
- dual_rewrite Opt_D_dump_cmmz "spills and reloads"
+ dual_rewrite run Opt_D_dump_cmmz "spills and reloads"
(dualLivenessWithInsertion procPoints) g
-- Insert spills at defns; reloads at return points
g <-
-- pprTrace "pre insertLateReloads" (ppr g) $
- run $ insertLateReloads g -- Duplicate reloads just before uses
+ runOptimization $ insertLateReloads g -- Duplicate reloads just before uses
dump Opt_D_dump_cmmz "Post late reloads" g
g <-
-- pprTrace "post insertLateReloads" (ppr g) $
- dual_rewrite Opt_D_dump_cmmz "Dead Assignment Elimination"
+ dual_rewrite runOptimization Opt_D_dump_cmmz "Dead Assignment Elimination"
(removeDeadAssignmentsAndReloads procPoints) g
-- Remove redundant reloads (and any other redundant asst)
where dflags = hsc_dflags hsc_env
mbpprTrace x y z = if dopt Opt_D_dump_cmmz dflags then pprTrace x y z else z
dump f txt g = dumpIfSet_dyn dflags f txt (ppr g)
-
- run = runFuelIO (hsc_OptFuel hsc_env)
-
- dual_rewrite flag txt pass g =
+ -- Runs a required transformation/analysis
+ run = runInfiniteFuelIO (hsc_OptFuel hsc_env)
+ -- Runs an optional transformation/analysis (and should
+ -- thus be subject to optimization fuel)
+ runOptimization = runFuelIO (hsc_OptFuel hsc_env)
+
+ -- pass 'run' or 'runOptimization' for 'r'
+ dual_rewrite r flag txt pass g =
do dump flag ("Pre " ++ txt) g
- g <- run $ pass g
+ g <- r $ pass g
dump flag ("Post " ++ txt) $ g
return g
| CmmRegOff CmmReg Int
-- CmmRegOff reg i
-- ** is shorthand only, meaning **
- -- CmmMachOp (MO_S_Add rep (CmmReg reg) (CmmLit (CmmInt i rep)))
- -- where rep = cmmRegType reg
+ -- CmmMachOp (MO_Add rep) [x, CmmLit (CmmInt (fromIntegral i) rep)]
+ -- where rep = typeWidth (cmmRegType reg)
instance Eq CmmExpr where -- Equality ignores the types
CmmLit l1 == CmmLit l2 = l1==l2
cmmExprType (CmmMachOp op args) = machOpResultType op (map cmmExprType args)
cmmExprType (CmmRegOff reg _) = cmmRegType reg
cmmExprType (CmmStackSlot _ _) = bWord -- an address
+-- Careful though: what is stored at the stack slot may be bigger than
+-- an address
cmmLitType :: CmmLit -> CmmType
cmmLitType (CmmInt _ width) = cmmBits width
import Constants
import FastString
-import Control.Monad
import Data.Maybe
-- -----------------------------------------------------------------------------
lintCmmExpr :: CmmExpr -> CmmLint CmmType
lintCmmExpr (CmmLoad expr rep) = do
_ <- lintCmmExpr expr
- when (widthInBytes (typeWidth rep) >= wORD_SIZE) $
- cmmCheckWordAddress expr
+ -- Disabled, if we have the inlining phase before the lint phase,
+ -- we can have funny offsets due to pointer tagging. -- EZY
+ -- when (widthInBytes (typeWidth rep) >= wORD_SIZE) $
+ -- cmmCheckWordAddress expr
return rep
lintCmmExpr expr@(CmmMachOp op args) = do
tys <- mapM lintCmmExpr args
-- This expression should be an address from which a word can be loaded:
-- check for funny-looking sub-word offsets.
-cmmCheckWordAddress :: CmmExpr -> CmmLint ()
-cmmCheckWordAddress e@(CmmMachOp op [arg, CmmLit (CmmInt i _)])
+_cmmCheckWordAddress :: CmmExpr -> CmmLint ()
+_cmmCheckWordAddress e@(CmmMachOp op [arg, CmmLit (CmmInt i _)])
| isOffsetOp op && notNodeReg arg && i `rem` fromIntegral wORD_SIZE /= 0
= cmmLintDubiousWordOffset e
-cmmCheckWordAddress e@(CmmMachOp op [CmmLit (CmmInt i _), arg])
+_cmmCheckWordAddress e@(CmmMachOp op [CmmLit (CmmInt i _), arg])
| isOffsetOp op && notNodeReg arg && i `rem` fromIntegral wORD_SIZE /= 0
= cmmLintDubiousWordOffset e
-cmmCheckWordAddress _
+_cmmCheckWordAddress _
= return ()
-- No warnings for unaligned arithmetic with the node register,
kill :: DefinerOfLocalRegs a => a -> RegSet -> RegSet
kill a live = foldRegsDefd delOneFromUniqSet live a
+-- Testing!
xferLive :: BwdTransfer CmmNode CmmLive
xferLive = mkBTransfer3 fst mid lst
where fst _ f = f
mid :: CmmNode O O -> CmmLive -> CmmLive
- mid n f = gen_kill n $ case n of CmmUnsafeForeignCall {} -> emptyRegSet
- _ -> f
+ mid n f = gen_kill n f
lst :: CmmNode O C -> FactBase CmmLive -> CmmLive
lst n f = gen_kill n $ case n of CmmCall {} -> emptyRegSet
CmmForeignCall {} -> emptyRegSet
A MidForeign call is used for *unsafe* foreign calls;
a LastForeign call is used for *safe* foreign calls.
Unsafe ones are easy: think of them as a "fat machine instruction".
+In particular, they do *not* kill all live registers (there was a bit
+of code in GHC that conservatively assumed otherwise.)
Safe ones are trickier. A safe foreign call
r = f(x)
cmmMiniInlineStmts uses (stmt:stmts)
= stmt : cmmMiniInlineStmts uses stmts
-lookForInline u expr (stmt : rest)
+lookForInline u expr stmts = lookForInline' u expr regset stmts
+ where regset = foldRegsUsed extendRegSet emptyRegSet expr
+
+lookForInline' u expr regset (stmt : rest)
| Just 1 <- lookupUFM (countUses stmt) u, ok_to_inline
= Just (inlineStmt u expr stmt : rest)
| ok_to_skip
- = case lookForInline u expr rest of
+ = case lookForInline' u expr regset rest of
Nothing -> Nothing
Just stmts -> Just (stmt:stmts)
CmmCall{} -> hasNoGlobalRegs expr
_ -> True
- -- We can skip over assignments to other tempoararies, because we
- -- know that expressions aren't side-effecting and temporaries are
- -- single-assignment.
+ -- Expressions aren't side-effecting. Temporaries may or may not
+ -- be single-assignment depending on the source (the old code
+ -- generator creates single-assignment code, but hand-written Cmm
+ -- and Cmm from the new code generator is not single-assignment.)
+ -- So we do an extra check to make sure that the register being
+ -- changed is not one we were relying on. I don't know how much of a
+ -- performance hit this is (we have to create a regset for every
+ -- instruction.) -- EZY
ok_to_skip = case stmt of
CmmNop -> True
- CmmAssign (CmmLocal (LocalReg u' _)) rhs | u' /= u -> True
+ CmmComment{} -> True
+ CmmAssign (CmmLocal r@(LocalReg u' _)) rhs | u' /= u && not (r `elemRegSet` regset) -> True
CmmAssign g@(CmmGlobal _) rhs -> not (g `regUsedIn` expr)
_other -> False
where check live id x = if id == entry then noLiveOnEntry id (in_regs live) x else x
middle :: CmmNode O O -> DualLive -> DualLive
- middle m live = changeStack updSlots $ changeRegs (xferLiveMiddle m) (changeRegs regs_in live)
- where xferLiveMiddle = case getBTransfer3 xferLive of (_, middle, _) -> middle
- regs_in :: RegSet -> RegSet
- regs_in live = case m of CmmUnsafeForeignCall {} -> emptyRegSet
- _ -> live
+ middle m = changeStack updSlots
+ . changeRegs updRegs
+ where -- Reuse middle of liveness analysis from CmmLive
+ updRegs = case getBTransfer3 xferLive of (_, middle, _) -> middle m
+
updSlots live = foldSlotsUsed reload (foldSlotsDefd spill live m) m
spill live s@(RegSlot r, _, _) = check s $ deleteFromRegSet live r
spill live _ = live
, copyInOflow, copyInSlot, copyOutOflow, copyOutSlot
-- Reexport of needed Cmm stuff
, Convention(..), ForeignConvention(..), ForeignTarget(..)
- , CmmStackInfo(..), CmmTopInfo(..), CmmGraph(..)
+ , CmmStackInfo(..), CmmTopInfo(..), CmmGraph, GenCmmGraph(..)
, Cmm, CmmTop
)
where
-- the optimiser with varying amount of fuel to find out the exact number of
-- steps where a bug is introduced in the output.
module OptimizationFuel
- ( OptimizationFuel, amountOfFuel, tankFilledTo, anyFuelLeft, oneLessFuel
+ ( OptimizationFuel, amountOfFuel, tankFilledTo, unlimitedFuel, anyFuelLeft, oneLessFuel
, OptFuelState, initOptFuelState
, FuelConsumer, FuelUsingMonad, FuelState
, fuelGet, fuelSet, lastFuelPass, setFuelPass
, fuelExhausted, fuelDec1, tryWithFuel
- , runFuelIO, fuelConsumingPass
+ , runFuelIO, runInfiniteFuelIO, fuelConsumingPass
, FuelUniqSM
, liftUniq
)
import Control.Monad
import StaticFlags (opt_Fuel)
import UniqSupply
-#ifdef DEBUG
import Panic
-#endif
import Compiler.Hoopl
import Compiler.Hoopl.GHC (getFuel, setFuel)
anyFuelLeft :: OptimizationFuel -> Bool
oneLessFuel :: OptimizationFuel -> OptimizationFuel
+unlimitedFuel :: OptimizationFuel
-#ifdef DEBUG
newtype OptimizationFuel = OptimizationFuel Int
deriving Show
anyFuelLeft (OptimizationFuel f) = f > 0
oneLessFuel (OptimizationFuel f) = ASSERT (f > 0) (OptimizationFuel (f - 1))
-#else
--- type OptimizationFuel = State# () -- would like this, but it won't work
-data OptimizationFuel = OptimizationFuel
- deriving Show
-tankFilledTo _ = OptimizationFuel
-amountOfFuel _ = maxBound
-
-anyFuelLeft _ = True
-oneLessFuel _ = OptimizationFuel
-#endif
+unlimitedFuel = OptimizationFuel infiniteFuel
data FuelState = FuelState { fs_fuel :: OptimizationFuel, fs_lastpass :: String }
newtype FuelUniqSM a = FUSM { unFUSM :: FuelState -> UniqSM (a, FuelState) }
writeIORef (fuel_ref fs) fuel'
return a
+-- ToDo: Do we need the pass_ref when we are doing infinite fueld
+-- transformations?
+runInfiniteFuelIO :: OptFuelState -> FuelUniqSM a -> IO a
+runInfiniteFuelIO fs (FUSM f) =
+ do pass <- readIORef (pass_ref fs)
+ u <- mkSplitUniqSupply 'u'
+ let (a, FuelState _ pass') = initUs_ u $ f (FuelState unlimitedFuel pass)
+ writeIORef (pass_ref fs) pass'
+ return a
+
instance Monad FuelUniqSM where
FUSM f >>= k = FUSM (\s -> f s >>= \(a, s') -> unFUSM (k a) s')
return a = FUSM (\s -> return (a, s))
import Constants
import BasicTypes
import CLabel
+import Util
-- The rest
import Data.List
then pprDataExterns info $$
pprWordArray (entryLblToInfoLbl clbl) info
else empty) $$
- (case blocks of
- [] -> empty
- -- the first block doesn't get a label:
- (BasicBlock _ stmts : rest) -> vcat [
+ (vcat [
blankLine,
extern_decls,
(if (externallyVisibleCLabel clbl)
then mkFN_ else mkIF_) (pprCLabel clbl) <+> lbrace,
nest 8 temp_decls,
nest 8 mkFB_,
- nest 8 (vcat (map pprStmt stmts)) $$
- vcat (map pprBBlock rest),
+ case blocks of
+ [] -> empty
+ -- the first block doesn't get a label:
+ (BasicBlock _ stmts : rest) ->
+ nest 8 (vcat (map pprStmt stmts)) $$
+ vcat (map pprBBlock rest),
nest 8 mkFE_,
rbrace ]
)
pprStringInCStyle :: [Word8] -> SDoc
pprStringInCStyle s = doubleQuotes (text (concatMap charToC s))
-charToC :: Word8 -> String
-charToC w =
- case chr (fromIntegral w) of
- '\"' -> "\\\""
- '\'' -> "\\\'"
- '\\' -> "\\\\"
- c | c >= ' ' && c <= '~' -> [c]
- | otherwise -> ['\\',
- chr (ord '0' + ord c `div` 64),
- chr (ord '0' + ord c `div` 8 `mod` 8),
- chr (ord '0' + ord c `mod` 8)]
-
-- ---------------------------------------------------------------------------
-- Initialising static objects with floating-point numbers. We can't
-- just emit the floating point number, because C will cast it to an int
-- in update frame CAF/DICT functions will be
-- subsumed by this enclosing cc
{ enterCostCentre cl_info cc body
- ; stmtsC [CmmComment $ mkFastString $ showSDoc $ ppr body]
; cgExpr body }
}
--
-----------------------------------------------------------------------------
-module CgHpc (cgTickBox, initHpc, hpcTable) where
+module CgHpc (cgTickBox, hpcTable) where
import OldCmm
import CLabel
import Module
import OldCmmUtils
-import CgUtils
import CgMonad
-import CgForeignCall
-import ForeignCall
-import ClosureInfo
-import FastString
import HscTypes
-import Panic
-import BasicTypes
-
-import Data.Char
-import Data.Word
cgTickBox :: Module -> Int -> Code
cgTickBox mod n = do
hpcTable :: Module -> HpcInfo -> Code
hpcTable this_mod (HpcInfo hpc_tickCount _) = do
- emitData ReadOnlyData
- [ CmmDataLabel mkHpcModuleNameLabel
- , CmmString $ map (fromIntegral . ord)
- (full_name_str)
- ++ [0]
- ]
emitData Data $ [ CmmDataLabel (mkHpcTicksLabel this_mod)
] ++
[ CmmStaticLit (CmmInt 0 W64)
| _ <- take hpc_tickCount [0::Int ..]
]
- where
- module_name_str = moduleNameString (Module.moduleName this_mod)
- full_name_str = if modulePackageId this_mod == mainPackageId
- then module_name_str
- else packageIdString (modulePackageId this_mod) ++ "/" ++
- module_name_str
hpcTable _ (NoHpcInfo {}) = error "TODO: impossible"
-
-initHpc :: Module -> HpcInfo -> Code
-initHpc this_mod (HpcInfo tickCount hashNo)
- = do { id <- newTemp bWord
- ; emitForeignCall'
- PlayRisky
- [CmmHinted id NoHint]
- (CmmCallee
- (CmmLit $ CmmLabel $ mkForeignLabel mod_alloc Nothing ForeignLabelInThisPackage IsFunction)
- CCallConv
- )
- [ CmmHinted (mkLblExpr mkHpcModuleNameLabel) AddrHint
- , CmmHinted (word32 tickCount) NoHint
- , CmmHinted (word32 hashNo) NoHint
- , CmmHinted (CmmLit $ CmmLabel $ mkHpcTicksLabel $ this_mod) AddrHint
- ]
- (Just [])
- NoC_SRT -- No SRT b/c we PlayRisky
- CmmMayReturn
- }
- where
- word32 i = CmmLit (CmmInt (fromIntegral (fromIntegral i :: Word32)) W32)
- mod_alloc = mkFastString "hs_hpc_module"
-initHpc _ (NoHpcInfo {}) = panic "initHpc: NoHpcInfo"
-
costCentreFrom,
curCCS, curCCSAddr,
emitCostCentreDecl, emitCostCentreStackDecl,
- emitRegisterCC, emitRegisterCCS,
- emitSetCCC, emitCCS,
+ emitSetCCC, emitCCS,
-- Lag/drag/void stuff
ldvEnter, ldvEnterClosure, ldvRecordCreate
(ws,ms) = SIZEOF_CostCentreStack `divMod` wORD_SIZE
-- ---------------------------------------------------------------------------
--- Registering CCs and CCSs
-
--- (cc)->link = CC_LIST;
--- CC_LIST = (cc);
--- (cc)->ccID = CC_ID++;
-
-emitRegisterCC :: CostCentre -> Code
-emitRegisterCC cc = do
- { tmp <- newTemp cInt
- ; stmtsC [
- CmmStore (cmmOffsetB cc_lit oFFSET_CostCentre_link)
- (CmmLoad cC_LIST bWord),
- CmmStore cC_LIST cc_lit,
- CmmAssign (CmmLocal tmp) (CmmLoad cC_ID cInt),
- CmmStore (cmmOffsetB cc_lit oFFSET_CostCentre_ccID) (CmmReg (CmmLocal tmp)),
- CmmStore cC_ID (cmmRegOffB (CmmLocal tmp) 1)
- ]
- }
- where
- cc_lit = CmmLit (CmmLabel (mkCCLabel cc))
-
--- (ccs)->prevStack = CCS_LIST;
--- CCS_LIST = (ccs);
--- (ccs)->ccsID = CCS_ID++;
-
-emitRegisterCCS :: CostCentreStack -> Code
-emitRegisterCCS ccs = do
- { tmp <- newTemp cInt
- ; stmtsC [
- CmmStore (cmmOffsetB ccs_lit oFFSET_CostCentreStack_prevStack)
- (CmmLoad cCS_LIST bWord),
- CmmStore cCS_LIST ccs_lit,
- CmmAssign (CmmLocal tmp) (CmmLoad cCS_ID cInt),
- CmmStore (cmmOffsetB ccs_lit oFFSET_CostCentreStack_ccsID) (CmmReg (CmmLocal tmp)),
- CmmStore cCS_ID (cmmRegOffB (CmmLocal tmp) 1)
- ]
- }
- where
- ccs_lit = CmmLit (CmmLabel (mkCCSLabel ccs))
-
-
-cC_LIST, cC_ID :: CmmExpr
-cC_LIST = CmmLit (CmmLabel (mkCmmDataLabel rtsPackageId (fsLit "CC_LIST")))
-cC_ID = CmmLit (CmmLabel (mkCmmDataLabel rtsPackageId (fsLit "CC_ID")))
-
-cCS_LIST, cCS_ID :: CmmExpr
-cCS_LIST = CmmLit (CmmLabel (mkCmmDataLabel rtsPackageId (fsLit "CCS_LIST")))
-cCS_ID = CmmLit (CmmLabel (mkCmmDataLabel rtsPackageId (fsLit "CCS_ID")))
-
--- ---------------------------------------------------------------------------
-- Set the current cost centre stack
emitSetCCC :: CostCentre -> Code
import CLabel
import OldCmm
-import OldCmmUtils
import OldPprCmm
import StgSyn
codeGen :: DynFlags
-> Module
-> [TyCon]
- -> [Module] -- directly-imported modules
- -> CollectedCCs -- (Local/global) cost-centres needing declaring/registering.
+ -> CollectedCCs -- (Local/global) cost-centres needing declaring/registering.
-> [(StgBinding,[(Id,[Id])])] -- Bindings to convert, with SRTs
-> HpcInfo
-> IO [Cmm] -- Output
-- possible for object splitting to split up the
-- pieces later.
-codeGen dflags this_mod data_tycons imported_mods
- cost_centre_info stg_binds hpc_info
+codeGen dflags this_mod data_tycons cost_centre_info stg_binds hpc_info
= do
{ showPass dflags "CodeGen"
{ cmm_binds <- mapM (getCmm . cgTopBinding dflags) stg_binds
; cmm_tycons <- mapM cgTyCon data_tycons
; cmm_init <- getCmm (mkModuleInit dflags cost_centre_info
- this_mod imported_mods hpc_info)
- ; return (cmm_binds ++ concat cmm_tycons ++ [cmm_init])
+ this_mod hpc_info)
+ ; return (cmm_init : cmm_binds ++ concat cmm_tycons)
}
-- Put datatype_stuff after code_stuff, because the
-- datatype closure table (for enumeration types) to
-- (say) PrelBase_True_closure, which is defined in
-- code_stuff
+ -- Note [codegen-split-init] the cmm_init block must
+ -- come FIRST. This is because when -split-objs is on
+ -- we need to combine this block with its
+ -- initialisation routines; see Note
+ -- [pipeline-split-init].
+
; dumpIfSet_dyn dflags Opt_D_dump_cmm "Cmm" (pprCmms code_stuff)
; return code_stuff }
-\end{code}
-
-%************************************************************************
-%* *
-\subsection[codegen-init]{Module initialisation code}
-%* *
-%************************************************************************
-
-/* -----------------------------------------------------------------------------
- Module initialisation
-
- The module initialisation code looks like this, roughly:
-
- FN(__stginit_Foo) {
- JMP_(__stginit_Foo_1_p)
- }
-
- FN(__stginit_Foo_1_p) {
- ...
- }
-
- We have one version of the init code with a module version and the
- 'way' attached to it. The version number helps to catch cases
- where modules are not compiled in dependency order before being
- linked: if a module has been compiled since any modules which depend on
- it, then the latter modules will refer to a different version in their
- init blocks and a link error will ensue.
-
- The 'way' suffix helps to catch cases where modules compiled in different
- ways are linked together (eg. profiled and non-profiled).
-
- We provide a plain, unadorned, version of the module init code
- which just jumps to the version with the label and way attached. The
- reason for this is that when using foreign exports, the caller of
- startupHaskell() must supply the name of the init function for the "top"
- module in the program, and we don't want to require that this name
- has the version and way info appended to it.
- -------------------------------------------------------------------------- */
-
-We initialise the module tree by keeping a work-stack,
- * pointed to by Sp
- * that grows downward
- * Sp points to the last occupied slot
-
-\begin{code}
-mkModuleInit
+mkModuleInit
:: DynFlags
-> CollectedCCs -- cost centre info
-> Module
- -> [Module]
- -> HpcInfo
+ -> HpcInfo
-> Code
-mkModuleInit dflags cost_centre_info this_mod imported_mods hpc_info
- = do { -- Allocate the static boolean that records if this
- -- module has been registered already
- emitData Data [CmmDataLabel moduleRegdLabel,
- CmmStaticLit zeroCLit]
+mkModuleInit dflags cost_centre_info this_mod hpc_info
+ = do { -- Allocate the static boolean that records if this
; whenC (opt_Hpc) $
hpcTable this_mod hpc_info
- -- we emit a recursive descent module search for all modules
- -- and *choose* to chase it in :Main, below.
- -- In this way, Hpc enabled modules can interact seamlessly with
- -- not Hpc enabled moduled, provided Main is compiled with Hpc.
-
- ; emitSimpleProc real_init_lbl $ do
- { ret_blk <- forkLabelledCode ret_code
-
- ; init_blk <- forkLabelledCode $ do
- { mod_init_code; stmtC (CmmBranch ret_blk) }
-
- ; stmtC (CmmCondBranch (cmmNeWord (CmmLit zeroCLit) mod_reg_val)
- ret_blk)
- ; stmtC (CmmBranch init_blk)
- }
-
- -- Make the "plain" procedure jump to the "real" init procedure
- ; emitSimpleProc plain_init_lbl jump_to_init
-
- -- When compiling the module in which the 'main' function lives,
- -- (that is, this_mod == main_mod)
- -- we inject an extra stg_init procedure for stg_init_ZCMain, for the
- -- RTS to invoke. We must consult the -main-is flag in case the
- -- user specified a different function to Main.main
-
- -- Notice that the recursive descent is optional, depending on what options
- -- are enabled.
-
- ; whenC (this_mod == main_mod)
- (emitSimpleProc plain_main_init_lbl rec_descent_init)
- }
- where
- -- The way string we attach to the __stginit label to catch
- -- accidental linking of modules compiled in different ways. We
- -- omit "dyn" from this way, because we want to be able to load
- -- both dynamic and non-dynamic modules into a dynamic GHC.
- way = mkBuildTag (filter want_way (ways dflags))
- want_way w = not (wayRTSOnly w) && wayName w /= WayDyn
-
- main_mod = mainModIs dflags
-
- plain_init_lbl = mkPlainModuleInitLabel this_mod
- real_init_lbl = mkModuleInitLabel this_mod way
- plain_main_init_lbl = mkPlainModuleInitLabel rOOT_MAIN
-
- jump_to_init = stmtC (CmmJump (mkLblExpr real_init_lbl) [])
-
- mod_reg_val = CmmLoad (mkLblExpr moduleRegdLabel) bWord
-
- -- Main refers to GHC.TopHandler.runIO, so make sure we call the
- -- init function for GHC.TopHandler.
- extra_imported_mods
- | this_mod == main_mod = [gHC_TOP_HANDLER]
- | otherwise = []
-
- mod_init_code = do
- { -- Set mod_reg to 1 to record that we've been here
- stmtC (CmmStore (mkLblExpr moduleRegdLabel) (CmmLit (mkIntCLit 1)))
-
; whenC (opt_SccProfilingOn) $ do
initCostCentres cost_centre_info
- ; whenC (opt_Hpc) $
- initHpc this_mod hpc_info
-
- ; mapCs (registerModuleImport way)
- (imported_mods++extra_imported_mods)
-
- }
-
- -- The return-code pops the work stack by
- -- incrementing Sp, and then jumpd to the popped item
- ret_code = stmtsC [ CmmAssign spReg (cmmRegOffW spReg 1)
- , CmmJump (CmmLoad (cmmRegOffW spReg (-1)) bWord) [] ]
-
-
- rec_descent_init = if opt_SccProfilingOn || isHpcUsed hpc_info
- then jump_to_init
- else ret_code
-
------------------------
-registerModuleImport :: String -> Module -> Code
-registerModuleImport way mod
- | mod == gHC_PRIM
- = nopC
- | otherwise -- Push the init procedure onto the work stack
- = stmtsC [ CmmAssign spReg (cmmRegOffW spReg (-1))
- , CmmStore (CmmReg spReg) (mkLblExpr (mkModuleInitLabel mod way)) ]
+ -- For backwards compatibility: user code may refer to this
+ -- label for calling hs_add_root().
+ ; emitData Data $ [ CmmDataLabel (mkPlainModuleInitLabel this_mod) ]
+
+ ; whenC (this_mod == mainModIs dflags) $
+ emitSimpleProc (mkPlainModuleInitLabel rOOT_MAIN) $ return ()
+ }
\end{code}
| otherwise
= do { mapM_ emitCostCentreDecl local_CCs
; mapM_ emitCostCentreStackDecl singleton_CCSs
- ; mapM_ emitRegisterCC local_CCs
- ; mapM_ emitRegisterCCS singleton_CCSs
- }
+ }
\end{code}
%************************************************************************
import StgCmmTicky
import MkGraph
-import CmmDecl
import CmmExpr
-import CmmUtils
+import CmmDecl
import CLabel
import PprCmm
import StgSyn
-import PrelNames
import DynFlags
-import StaticFlags
import HscTypes
import CostCentre
codeGen :: DynFlags
-> Module
-> [TyCon]
- -> [Module] -- Directly-imported modules
- -> CollectedCCs -- (Local/global) cost-centres needing declaring/registering.
+ -> CollectedCCs -- (Local/global) cost-centres needing declaring/registering.
-> [(StgBinding,[(Id,[Id])])] -- Bindings to convert, with SRTs
-> HpcInfo
-> IO [Cmm] -- Output
-codeGen dflags this_mod data_tycons imported_mods
+codeGen dflags this_mod data_tycons
cost_centre_info stg_binds hpc_info
= do { showPass dflags "New CodeGen"
- ; let way = buildTag dflags
- main_mod = mainModIs dflags
-- Why?
-- ; mapM_ (\x -> seq x (return ())) data_tycons
; code_stuff <- initC dflags this_mod $ do
{ cmm_binds <- mapM (getCmm . cgTopBinding dflags) stg_binds
; cmm_tycons <- mapM cgTyCon data_tycons
- ; cmm_init <- getCmm (mkModuleInit way cost_centre_info
- this_mod main_mod
- imported_mods hpc_info)
- ; return (cmm_binds ++ concat cmm_tycons ++ [cmm_init])
+ ; cmm_init <- getCmm (mkModuleInit cost_centre_info
+ this_mod hpc_info)
+ ; return (cmm_init : cmm_binds ++ concat cmm_tycons)
}
-- Put datatype_stuff after code_stuff, because the
-- datatype closure table (for enumeration types) to
-- possible for object splitting to split up the
-- pieces later.
+ -- Note [codegen-split-init] the cmm_init block must
+ -- come FIRST. This is because when -split-objs is on
+ -- we need to combine this block with its
+ -- initialisation routines; see Note
+ -- [pipeline-split-init].
+
; dumpIfSet_dyn dflags Opt_D_dump_cmmz "New Cmm" (pprCmms code_stuff)
; return code_stuff }
-}
mkModuleInit
- :: String -- the "way"
- -> CollectedCCs -- cost centre info
+ :: CollectedCCs -- cost centre info
-> Module
- -> Module -- name of the Main module
- -> [Module]
- -> HpcInfo
+ -> HpcInfo
-> FCode ()
-mkModuleInit way cost_centre_info this_mod main_mod imported_mods hpc_info
- = do { -- Allocate the static boolean that records if this
- -- module has been registered already
- emitData Data [CmmDataLabel moduleRegdLabel,
- CmmStaticLit zeroCLit]
-
- ; init_hpc <- initHpc this_mod hpc_info
- ; init_prof <- initCostCentres cost_centre_info
-
- -- We emit a recursive descent module search for all modules
- -- and *choose* to chase it in :Main, below.
- -- In this way, Hpc enabled modules can interact seamlessly with
- -- not Hpc enabled moduled, provided Main is compiled with Hpc.
-
- ; updfr_sz <- getUpdFrameOff
- ; tail <- getCode (pushUpdateFrame imports
- (do updfr_sz' <- getUpdFrameOff
- emit $ mkReturn (ret_e updfr_sz') [] (pop_ret_loc updfr_sz')))
- ; emitSimpleProc real_init_lbl $ (withFreshLabel "ret_block" $ \retId -> catAGraphs
- [ check_already_done retId updfr_sz
- , init_prof
- , init_hpc
- , tail])
- -- Make the "plain" procedure jump to the "real" init procedure
- ; emitSimpleProc plain_init_lbl (jump_to_init updfr_sz)
-
- -- When compiling the module in which the 'main' function lives,
- -- (that is, this_mod == main_mod)
- -- we inject an extra stg_init procedure for stg_init_ZCMain, for the
- -- RTS to invoke. We must consult the -main-is flag in case the
- -- user specified a different function to Main.main
-
- -- Notice that the recursive descent is optional, depending on what options
- -- are enabled.
-
-
- ; whenC (this_mod == main_mod)
- (emitSimpleProc plain_main_init_lbl (rec_descent_init updfr_sz))
- }
- where
- plain_init_lbl = mkPlainModuleInitLabel this_mod
- real_init_lbl = mkModuleInitLabel this_mod way
- plain_main_init_lbl = mkPlainModuleInitLabel rOOT_MAIN
-
- jump_to_init updfr_sz = mkJump (mkLblExpr real_init_lbl) [] updfr_sz
-
-
- -- Main refers to GHC.TopHandler.runIO, so make sure we call the
- -- init function for GHC.TopHandler.
- extra_imported_mods
- | this_mod == main_mod = [gHC_TOP_HANDLER]
- | otherwise = []
- all_imported_mods = imported_mods ++ extra_imported_mods
- imports = map (\mod -> mkLblExpr (mkModuleInitLabel mod way))
- (filter (gHC_PRIM /=) all_imported_mods)
-
- mod_reg_val = CmmLoad (mkLblExpr moduleRegdLabel) bWord
- check_already_done retId updfr_sz
- = mkCmmIfThenElse (cmmNeWord (CmmLit zeroCLit) mod_reg_val)
- (mkLabel retId <*> mkReturn (ret_e updfr_sz) [] (pop_ret_loc updfr_sz)) mkNop
- <*> -- Set mod_reg to 1 to record that we've been here
- mkStore (mkLblExpr moduleRegdLabel) (CmmLit (mkIntCLit 1))
-
- -- The return-code pops the work stack by
- -- incrementing Sp, and then jumps to the popped item
- ret_e updfr_sz = CmmLoad (CmmStackSlot (CallArea Old) updfr_sz) gcWord
- ret_code updfr_sz = mkJump (ret_e updfr_sz) [] (pop_ret_loc updfr_sz)
- -- mkAssign spReg (cmmRegOffW spReg 1) <*>
- -- mkJump (CmmLoad (cmmRegOffW spReg (-1)) bWord) [] updfr_sz
-
- pop_ret_loc updfr_sz = updfr_sz - widthInBytes (typeWidth bWord)
-
- rec_descent_init updfr_sz =
- if opt_SccProfilingOn || isHpcUsed hpc_info
- then jump_to_init updfr_sz
- else ret_code updfr_sz
+
+mkModuleInit cost_centre_info this_mod hpc_info
+ = do { initHpc this_mod hpc_info
+ ; initCostCentres cost_centre_info
+ -- For backwards compatibility: user code may refer to this
+ -- label for calling hs_add_root().
+ ; emitData Data $ [ CmmDataLabel (mkPlainModuleInitLabel this_mod) ]
+ }
---------------------------------------------------------------
-- Generating static stuff for algebraic data types
(map toVarArg fv_details)
-- RETURN
- ; return $ (regIdInfo bndr lf_info tmp, init) }
+ ; regIdInfo bndr lf_info tmp init }
-- Use with care; if used inappropriately, it could break invariants.
stripNV :: NonVoid a -> a
; (tmp, init) <- allocDynClosure closure_info use_cc blame_cc payload_w_offsets
-- RETURN
- ; returnFC $ (regIdInfo bndr lf_info tmp, init) }
+ ; regIdInfo bndr lf_info tmp init }
mkClosureLFInfo :: Id -- The binder
-> TopLevelFlag -- True of top level
{- Note [Data constructor dynamic tags]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The family size of a data type (the number of constructors)
-can be either:
+The family size of a data type (the number of constructors
+or the arity of a function) can be either:
* small, if the family size < 2**tag_bits
* big, otherwise.
Small families can have the constructor tag in the tag bits.
-Big families only use the tag value 1 to represent evaluatedness. -}
+Big families only use the tag value 1 to represent evaluatedness.
+We don't have very many tag bits: for example, we have 2 bits on
+x86-32 and 3 bits on x86-64. -}
isSmallFamily :: Int -> Bool
isSmallFamily fam_size = fam_size <= mAX_PTR_TAG
= do { let (cl_info, args_w_offsets) = layOutDynConstr con (addArgReps args)
-- No void args in args_w_offsets
; (tmp, init) <- allocDynClosure cl_info use_cc blame_cc args_w_offsets
- ; return (regIdInfo binder lf_info tmp, init) }
+ ; regIdInfo binder lf_info tmp init }
where
lf_info = mkConLFInfo con
import BlockId
import CmmExpr
import CmmUtils
+import MkGraph (CmmAGraph, mkAssign, (<*>))
import FastString
import Id
import VarEnv
litIdInfo id lf_info lit = --mkCgIdInfo id lf_info (CmmLit lit)
mkCgIdInfo id lf_info (addDynTag (CmmLit lit) (lfDynTag lf_info))
-regIdInfo :: Id -> LambdaFormInfo -> LocalReg -> CgIdInfo
-regIdInfo id lf_info reg =
- mkCgIdInfo id lf_info (addDynTag (CmmReg (CmmLocal reg)) (lfDynTag lf_info))
+-- Because the register may be spilled to the stack in untagged form, we
+-- modify the initialization code 'init' to immediately tag the
+-- register, and store a plain register in the CgIdInfo. We allocate
+-- a new register in order to keep single-assignment and help out the
+-- inliner. -- EZY
+regIdInfo :: Id -> LambdaFormInfo -> LocalReg -> CmmAGraph -> FCode (CgIdInfo, CmmAGraph)
+regIdInfo id lf_info reg init = do
+ reg' <- newTemp (localRegType reg)
+ let init' = init <*> mkAssign (CmmLocal reg') (addDynTag (CmmReg (CmmLocal reg)) (lfDynTag lf_info))
+ return (mkCgIdInfo id lf_info (CmmReg (CmmLocal reg')), init')
idInfoToAmode :: CgIdInfo -> CmmExpr
-- Returns a CmmExpr for the *tagged* pointer
module StgCmmHpc ( initHpc, mkTickBox ) where
-import StgCmmUtils
import StgCmmMonad
-import StgCmmForeign
import MkGraph
import CmmDecl
import CLabel
import Module
import CmmUtils
-import FastString
import HscTypes
-import Data.Char
import StaticFlags
-import BasicTypes
mkTickBox :: Module -> Int -> CmmAGraph
mkTickBox mod n
(CmmLit $ CmmLabel $ mkHpcTicksLabel $ mod)
n
-initHpc :: Module -> HpcInfo -> FCode CmmAGraph
+initHpc :: Module -> HpcInfo -> FCode ()
-- Emit top-level tables for HPC and return code to initialise
initHpc _ (NoHpcInfo {})
- = return mkNop
-initHpc this_mod (HpcInfo tickCount hashNo)
- = getCode $ whenC opt_Hpc $
- do { emitData ReadOnlyData
- [ CmmDataLabel mkHpcModuleNameLabel
- , CmmString $ map (fromIntegral . ord)
- (full_name_str)
- ++ [0]
- ]
- ; emitData Data $ [ CmmDataLabel (mkHpcTicksLabel this_mod)
+ = return ()
+initHpc this_mod (HpcInfo tickCount _hashNo)
+ = whenC opt_Hpc $
+ do { emitData Data $ [ CmmDataLabel (mkHpcTicksLabel this_mod)
] ++
[ CmmStaticLit (CmmInt 0 W64)
| _ <- take tickCount [0::Int ..]
]
-
- ; id <- newTemp bWord -- TODO FIXME NOW
- ; emitCCall
- [(id,NoHint)]
- (CmmLit $ CmmLabel $ mkForeignLabel mod_alloc Nothing ForeignLabelInThisPackage IsFunction)
- [ (mkLblExpr mkHpcModuleNameLabel,AddrHint)
- , (CmmLit $ mkIntCLit tickCount,NoHint)
- , (CmmLit $ mkIntCLit hashNo,NoHint)
- , (CmmLit $ CmmLabel $ mkHpcTicksLabel $ this_mod,AddrHint)
- ]
}
- where
- mod_alloc = mkFastString "hs_hpc_module"
- module_name_str = moduleNameString (Module.moduleName this_mod)
- full_name_str = if modulePackageId this_mod == mainPackageId
- then module_name_str
- else packageIdString (modulePackageId this_mod) ++ "/" ++
- module_name_str
-
-
-
-- Initialising Cost Centres & CCSs
---------------------------------------------------------------
-initCostCentres :: CollectedCCs -> FCode CmmAGraph
--- Emit the declarations, and return code to register them
+initCostCentres :: CollectedCCs -> FCode ()
+-- Emit the declarations
initCostCentres (local_CCs, ___extern_CCs, singleton_CCSs)
- = getCode $ whenC opt_SccProfilingOn $
+ = whenC opt_SccProfilingOn $
do { mapM_ emitCostCentreDecl local_CCs
- ; mapM_ emitCostCentreStackDecl singleton_CCSs
- ; emit $ catAGraphs $ map mkRegisterCC local_CCs
- ; emit $ catAGraphs $ map mkRegisterCCS singleton_CCSs }
+ ; mapM_ emitCostCentreStackDecl singleton_CCSs }
emitCostCentreDecl :: CostCentre -> FCode ()
(ws,ms) = SIZEOF_CostCentreStack `divMod` wORD_SIZE
-- ---------------------------------------------------------------------------
--- Registering CCs and CCSs
-
--- (cc)->link = CC_LIST;
--- CC_LIST = (cc);
--- (cc)->ccID = CC_ID++;
-
-mkRegisterCC :: CostCentre -> CmmAGraph
-mkRegisterCC cc
- = withTemp cInt $ \tmp ->
- catAGraphs [
- mkStore (cmmOffsetB cc_lit oFFSET_CostCentre_link)
- (CmmLoad cC_LIST bWord),
- mkStore cC_LIST cc_lit,
- mkAssign (CmmLocal tmp) (CmmLoad cC_ID cInt),
- mkStore (cmmOffsetB cc_lit oFFSET_CostCentre_ccID) (CmmReg (CmmLocal tmp)),
- mkStore cC_ID (cmmRegOffB (CmmLocal tmp) 1)
- ]
- where
- cc_lit = CmmLit (CmmLabel (mkCCLabel cc))
-
--- (ccs)->prevStack = CCS_LIST;
--- CCS_LIST = (ccs);
--- (ccs)->ccsID = CCS_ID++;
-
-mkRegisterCCS :: CostCentreStack -> CmmAGraph
-mkRegisterCCS ccs
- = withTemp cInt $ \ tmp ->
- catAGraphs [
- mkStore (cmmOffsetB ccs_lit oFFSET_CostCentreStack_prevStack)
- (CmmLoad cCS_LIST bWord),
- mkStore cCS_LIST ccs_lit,
- mkAssign (CmmLocal tmp) (CmmLoad cCS_ID cInt),
- mkStore (cmmOffsetB ccs_lit oFFSET_CostCentreStack_ccsID) (CmmReg (CmmLocal tmp)),
- mkStore cCS_ID (cmmRegOffB (CmmLocal tmp) 1)
- ]
- where
- ccs_lit = CmmLit (CmmLabel (mkCCSLabel ccs))
-
-
-cC_LIST, cC_ID :: CmmExpr
-cC_LIST = CmmLit (CmmLabel (mkCmmDataLabel rtsPackageId (fsLit "CC_LIST")))
-cC_ID = CmmLit (CmmLabel (mkCmmDataLabel rtsPackageId (fsLit "CC_ID")))
-
-cCS_LIST, cCS_ID :: CmmExpr
-cCS_LIST = CmmLit (CmmLabel (mkCmmDataLabel rtsPackageId (fsLit "CCS_LIST")))
-cCS_ID = CmmLit (CmmLabel (mkCmmDataLabel rtsPackageId (fsLit "CCS_ID")))
-
--- ---------------------------------------------------------------------------
-- Set the current cost centre stack
emitSetCCC :: CostCentre -> FCode ()
import CoreMonad ( endPass, CoreToDo(..) )
import CoreSyn
import CoreSubst
+import OccurAnal ( occurAnalyseExpr )
import Type
import Coercion
import TyCon
partial applications. But it's easier to let them through.
+Note [Dead code in CorePrep]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Imagine that we got an input program like this:
+
+ f :: Show b => Int -> (Int, b -> Maybe Int -> Int)
+ f x = (g True (Just x) + g () (Just x), g)
+ where
+ g :: Show a => a -> Maybe Int -> Int
+ g _ Nothing = x
+ g y (Just z) = if z > 100 then g y (Just (z + length (show y))) else g y unknown
+
+After specialisation and SpecConstr, we would get something like this:
+
+ f :: Show b => Int -> (Int, b -> Maybe Int -> Int)
+ f x = (g$Bool_True_Just x + g$Unit_Unit_Just x, g)
+ where
+ {-# RULES g $dBool = g$Bool
+ g $dUnit = g$Unit #-}
+ g = ...
+ {-# RULES forall x. g$Bool True (Just x) = g$Bool_True_Just x #-}
+ g$Bool = ...
+ {-# RULES forall x. g$Unit () (Just x) = g$Unit_Unit_Just x #-}
+ g$Unit = ...
+ g$Bool_True_Just = ...
+ g$Unit_Unit_Just = ...
+
+Note that the g$Bool and g$Unit functions are actually dead code: they are only kept
+alive by the occurrence analyser because they are referred to by the rules of g,
+which is being kept alive by the fact that it is used (unspecialised) in the returned pair.
+
+However, at the CorePrep stage there is no way that the rules for g will ever fire,
+and it really seems like a shame to produce an output program that goes to the trouble
+of allocating a closure for the unreachable g$Bool and g$Unit functions.
+
+The way we fix this is to:
+ * In cloneBndr, drop all unfoldings/rules
+ * In deFloatTop, run the occurrence analyser on each top-level RHS to drop
+ the dead local bindings
+
+The reason we don't just OccAnal the whole output of CorePrep is that the tidier
+ensures that all top-level binders are GlobalIds, so they don't show up in the free
+variables any longer. So if you run the occurrence analyser on the output of CoreTidy
+(or later) you e.g. turn this program:
+
+ Rec {
+ f = ... f ...
+ }
+
+Into this one:
+
+ f = ... f ...
+
+(Since f is not considered to be free in its own RHS.)
+
+
%************************************************************************
%* *
The main code
deFloatTop (Floats _ floats)
= foldrOL get [] floats
where
- get (FloatLet b) bs = b:bs
+ get (FloatLet b) bs = occurAnalyseRHSs b : bs
get b _ = pprPanic "corePrepPgm" (ppr b)
+
+ -- See Note [Dead code in CorePrep]
+ occurAnalyseRHSs (NonRec x e) = NonRec x (occurAnalyseExpr e)
+ occurAnalyseRHSs (Rec xes) = Rec [(x, occurAnalyseExpr e) | (x, e) <- xes]
-------------------------------------------
canFloatFromNoCaf :: Floats -> CpeRhs -> Maybe (Floats, CpeRhs)
cloneBndr env bndr
| isLocalId bndr
= do bndr' <- setVarUnique bndr <$> getUniqueM
- return (extendCorePrepEnv env bndr bndr', bndr')
+
+ -- We are going to OccAnal soon, so drop (now-useless) rules/unfoldings
+ -- so that we can drop more stuff as dead code.
+ -- See also Note [Dead code in CorePrep]
+ let bndr'' = bndr' `setIdUnfolding` noUnfolding
+ `setIdSpecialisation` emptySpecInfo
+ return (extendCorePrepEnv env bndr bndr'', bndr'')
| otherwise -- Top level things, which we don't want
-- to clone, have become GlobalIds by now
-- * Expression and bindings size
coreBindsSize, exprSize,
+ CoreStats(..), coreBindsStats,
-- * Hashing
hashExpr,
exprSize :: CoreExpr -> Int
-- ^ A measure of the size of the expressions, strictly greater than 0
-- It also forces the expression pretty drastically as a side effect
+-- Counts *leaves*, not internal nodes. Types and coercions are not counted.
exprSize (Var v) = v `seq` 1
exprSize (Lit lit) = lit `seq` 1
exprSize (App f a) = exprSize f + exprSize a
altSize (c,bs,e) = c `seq` varsSize bs + exprSize e
\end{code}
+\begin{code}
+data CoreStats = CS { cs_tm, cs_ty, cs_co :: Int }
+
+plusCS :: CoreStats -> CoreStats -> CoreStats
+plusCS (CS { cs_tm = p1, cs_ty = q1, cs_co = r1 })
+ (CS { cs_tm = p2, cs_ty = q2, cs_co = r2 })
+ = CS { cs_tm = p1+p2, cs_ty = q1+q2, cs_co = r1+r2 }
+
+zeroCS, oneTM :: CoreStats
+zeroCS = CS { cs_tm = 0, cs_ty = 0, cs_co = 0 }
+oneTM = zeroCS { cs_tm = 1 }
+
+sumCS :: (a -> CoreStats) -> [a] -> CoreStats
+sumCS f = foldr (plusCS . f) zeroCS
+
+coreBindsStats :: [CoreBind] -> CoreStats
+coreBindsStats = sumCS bindStats
+
+bindStats :: CoreBind -> CoreStats
+bindStats (NonRec v r) = bindingStats v r
+bindStats (Rec prs) = sumCS (\(v,r) -> bindingStats v r) prs
+
+bindingStats :: Var -> CoreExpr -> CoreStats
+bindingStats v r = bndrStats v `plusCS` exprStats r
+
+bndrStats :: Var -> CoreStats
+bndrStats v = oneTM `plusCS` tyStats (varType v)
+
+exprStats :: CoreExpr -> CoreStats
+exprStats (Var {}) = oneTM
+exprStats (Lit {}) = oneTM
+exprStats (App f (Type t))= tyCoStats (exprType f) t
+exprStats (App f a) = exprStats f `plusCS` exprStats a
+exprStats (Lam b e) = bndrStats b `plusCS` exprStats e
+exprStats (Let b e) = bindStats b `plusCS` exprStats e
+exprStats (Case e b _ as) = exprStats e `plusCS` bndrStats b `plusCS` sumCS altStats as
+exprStats (Cast e co) = coStats co `plusCS` exprStats e
+exprStats (Note _ e) = exprStats e
+exprStats (Type ty) = zeroCS { cs_ty = typeSize ty }
+ -- Ugh (might be a co)
+
+altStats :: CoreAlt -> CoreStats
+altStats (_, bs, r) = sumCS bndrStats bs `plusCS` exprStats r
+
+tyCoStats :: Type -> Type -> CoreStats
+tyCoStats fun_ty arg
+ = case splitForAllTy_maybe fun_ty of
+ Just (tv,_) | isCoVar tv -> coStats arg
+ _ -> tyStats arg
+
+tyStats :: Type -> CoreStats
+tyStats ty = zeroCS { cs_ty = typeSize ty }
+
+coStats :: Coercion -> CoreStats
+coStats co = zeroCS { cs_co = typeSize co }
+\end{code}
%************************************************************************
%* *
\section[Coverage]{@coverage@: the main function}
\begin{code}
-module Coverage (addCoverageTicksToBinds) where
+module Coverage (addCoverageTicksToBinds, hpcInitCode) where
import HsSyn
import Module
import TyCon
import MonadUtils
import Maybes
+import CLabel
+import Util
import Data.Array
import System.Directory ( createDirectoryIfMissing )
mixHash file tm tabstop entries = fromIntegral $ hashString
(show $ Mix file tm 0 tabstop entries)
\end{code}
+
+%************************************************************************
+%* *
+%* initialisation
+%* *
+%************************************************************************
+
+Each module compiled with -fhpc declares an initialisation function of
+the form `hpc_init_<module>()`, which is emitted into the _stub.c file
+and annotated with __attribute__((constructor)) so that it gets
+executed at startup time.
+
+The function's purpose is to call hs_hpc_module to register this
+module with the RTS, and it looks something like this:
+
+static void hpc_init_Main(void) __attribute__((constructor));
+static void hpc_init_Main(void)
+{extern StgWord64 _hpc_tickboxes_Main_hpc[];
+ hs_hpc_module("Main",8,1150288664,_hpc_tickboxes_Main_hpc);}
+
+\begin{code}
+hpcInitCode :: Module -> HpcInfo -> SDoc
+hpcInitCode _ (NoHpcInfo {}) = empty
+hpcInitCode this_mod (HpcInfo tickCount hashNo)
+ = vcat
+ [ text "static void hpc_init_" <> ppr this_mod
+ <> text "(void) __attribute__((constructor));"
+ , text "static void hpc_init_" <> ppr this_mod <> text "(void)"
+ , braces (vcat [
+ ptext (sLit "extern StgWord64 ") <> tickboxes <>
+ ptext (sLit "[]") <> semi,
+ ptext (sLit "hs_hpc_module") <>
+ parens (hcat (punctuate comma [
+ doubleQuotes full_name_str,
+ int tickCount, -- really StgWord32
+ int hashNo, -- really StgWord32
+ tickboxes
+ ])) <> semi
+ ])
+ ]
+ where
+ tickboxes = pprCLabel (mkHpcTicksLabel $ this_mod)
+
+ module_name = hcat (map (text.charToC) $
+ bytesFS (moduleNameFS (Module.moduleName this_mod)))
+ package_name = hcat (map (text.charToC) $
+ bytesFS (packageIdFS (modulePackageId this_mod)))
+ full_name_str
+ | modulePackageId this_mod == mainPackageId
+ = module_name
+ | otherwise
+ = package_name <> char '/' <> module_name
+\end{code}
; ds_vects <- mapM dsVect vects
; hetmet_brak <- if dopt Opt_F_coqpass dflags then dsLookupGlobalId hetmet_brak_name else return undefined
; hetmet_esc <- if dopt Opt_F_coqpass dflags then dsLookupGlobalId hetmet_esc_name else return undefined
+ ; let hpc_init
+ | opt_Hpc = hpcInitCode mod ds_hpc_info
+ | otherwise = empty
; return ( ds_ev_binds
, foreign_prs `appOL` core_prs `appOL` spec_prs
, spec_rules ++ ds_rules, ds_vects
- , ds_fords, ds_hpc_info, modBreaks, hetmet_brak, hetmet_esc) }
+ , ds_fords `appendStubC` hpc_init
+ , ds_hpc_info, modBreaks, hetmet_brak, hetmet_esc) }
; case mb_res of {
Nothing -> return (msgs, Nothing) ;
varStrictTypeName,
-- Type
forallTName, varTName, conTName, appTName,
- tupleTName, arrowTName, listTName, sigTName,
+ tupleTName, unboxedTupleTName, arrowTName, listTName, sigTName,
-- TyVarBndr
plainTVName, kindedTVName,
-- Kind
quoteDecName = qqFun (fsLit "quoteDec") quoteDecKey
quoteTypeName = qqFun (fsLit "quoteType") quoteTypeKey
--- TyConUniques available: 100-129
+-- TyConUniques available: 200-299
-- Check in PrelNames if you want to change this
expTyConKey, matchTyConKey, clauseTyConKey, qTyConKey, expQTyConKey,
fieldExpTyConKey, fieldPatTyConKey, nameTyConKey, patQTyConKey,
fieldPatQTyConKey, fieldExpQTyConKey, funDepTyConKey, predTyConKey,
predQTyConKey, decsQTyConKey :: Unique
-expTyConKey = mkPreludeTyConUnique 100
-matchTyConKey = mkPreludeTyConUnique 101
-clauseTyConKey = mkPreludeTyConUnique 102
-qTyConKey = mkPreludeTyConUnique 103
-expQTyConKey = mkPreludeTyConUnique 104
-decQTyConKey = mkPreludeTyConUnique 105
-patTyConKey = mkPreludeTyConUnique 106
-matchQTyConKey = mkPreludeTyConUnique 107
-clauseQTyConKey = mkPreludeTyConUnique 108
-stmtQTyConKey = mkPreludeTyConUnique 109
-conQTyConKey = mkPreludeTyConUnique 110
-typeQTyConKey = mkPreludeTyConUnique 111
-typeTyConKey = mkPreludeTyConUnique 112
-decTyConKey = mkPreludeTyConUnique 113
-varStrictTypeQTyConKey = mkPreludeTyConUnique 114
-strictTypeQTyConKey = mkPreludeTyConUnique 115
-fieldExpTyConKey = mkPreludeTyConUnique 116
-fieldPatTyConKey = mkPreludeTyConUnique 117
-nameTyConKey = mkPreludeTyConUnique 118
-patQTyConKey = mkPreludeTyConUnique 119
-fieldPatQTyConKey = mkPreludeTyConUnique 120
-fieldExpQTyConKey = mkPreludeTyConUnique 121
-funDepTyConKey = mkPreludeTyConUnique 122
-predTyConKey = mkPreludeTyConUnique 123
-predQTyConKey = mkPreludeTyConUnique 124
-tyVarBndrTyConKey = mkPreludeTyConUnique 125
-decsQTyConKey = mkPreludeTyConUnique 126
+expTyConKey = mkPreludeTyConUnique 200
+matchTyConKey = mkPreludeTyConUnique 201
+clauseTyConKey = mkPreludeTyConUnique 202
+qTyConKey = mkPreludeTyConUnique 203
+expQTyConKey = mkPreludeTyConUnique 204
+decQTyConKey = mkPreludeTyConUnique 205
+patTyConKey = mkPreludeTyConUnique 206
+matchQTyConKey = mkPreludeTyConUnique 207
+clauseQTyConKey = mkPreludeTyConUnique 208
+stmtQTyConKey = mkPreludeTyConUnique 209
+conQTyConKey = mkPreludeTyConUnique 210
+typeQTyConKey = mkPreludeTyConUnique 211
+typeTyConKey = mkPreludeTyConUnique 212
+decTyConKey = mkPreludeTyConUnique 213
+varStrictTypeQTyConKey = mkPreludeTyConUnique 214
+strictTypeQTyConKey = mkPreludeTyConUnique 215
+fieldExpTyConKey = mkPreludeTyConUnique 216
+fieldPatTyConKey = mkPreludeTyConUnique 217
+nameTyConKey = mkPreludeTyConUnique 218
+patQTyConKey = mkPreludeTyConUnique 219
+fieldPatQTyConKey = mkPreludeTyConUnique 220
+fieldExpQTyConKey = mkPreludeTyConUnique 221
+funDepTyConKey = mkPreludeTyConUnique 222
+predTyConKey = mkPreludeTyConUnique 223
+predQTyConKey = mkPreludeTyConUnique 224
+tyVarBndrTyConKey = mkPreludeTyConUnique 225
+decsQTyConKey = mkPreludeTyConUnique 226
-- IdUniques available: 200-399
-- If you want to change this, make sure you check in PrelNames
-- data Lit = ...
charLIdKey, stringLIdKey, integerLIdKey, intPrimLIdKey, wordPrimLIdKey,
floatPrimLIdKey, doublePrimLIdKey, rationalLIdKey :: Unique
-charLIdKey = mkPreludeMiscIdUnique 210
-stringLIdKey = mkPreludeMiscIdUnique 211
-integerLIdKey = mkPreludeMiscIdUnique 212
-intPrimLIdKey = mkPreludeMiscIdUnique 213
-wordPrimLIdKey = mkPreludeMiscIdUnique 214
-floatPrimLIdKey = mkPreludeMiscIdUnique 215
-doublePrimLIdKey = mkPreludeMiscIdUnique 216
-rationalLIdKey = mkPreludeMiscIdUnique 217
+charLIdKey = mkPreludeMiscIdUnique 220
+stringLIdKey = mkPreludeMiscIdUnique 221
+integerLIdKey = mkPreludeMiscIdUnique 222
+intPrimLIdKey = mkPreludeMiscIdUnique 223
+wordPrimLIdKey = mkPreludeMiscIdUnique 224
+floatPrimLIdKey = mkPreludeMiscIdUnique 225
+doublePrimLIdKey = mkPreludeMiscIdUnique 226
+rationalLIdKey = mkPreludeMiscIdUnique 227
liftStringIdKey :: Unique
-liftStringIdKey = mkPreludeMiscIdUnique 218
+liftStringIdKey = mkPreludeMiscIdUnique 228
-- data Pat = ...
litPIdKey, varPIdKey, tupPIdKey, unboxedTupPIdKey, conPIdKey, infixPIdKey, tildePIdKey, bangPIdKey,
asPIdKey, wildPIdKey, recPIdKey, listPIdKey, sigPIdKey, viewPIdKey :: Unique
-litPIdKey = mkPreludeMiscIdUnique 220
-varPIdKey = mkPreludeMiscIdUnique 221
-tupPIdKey = mkPreludeMiscIdUnique 222
-unboxedTupPIdKey = mkPreludeMiscIdUnique 362
-conPIdKey = mkPreludeMiscIdUnique 223
-infixPIdKey = mkPreludeMiscIdUnique 312
-tildePIdKey = mkPreludeMiscIdUnique 224
-bangPIdKey = mkPreludeMiscIdUnique 359
-asPIdKey = mkPreludeMiscIdUnique 225
-wildPIdKey = mkPreludeMiscIdUnique 226
-recPIdKey = mkPreludeMiscIdUnique 227
-listPIdKey = mkPreludeMiscIdUnique 228
-sigPIdKey = mkPreludeMiscIdUnique 229
-viewPIdKey = mkPreludeMiscIdUnique 360
+litPIdKey = mkPreludeMiscIdUnique 240
+varPIdKey = mkPreludeMiscIdUnique 241
+tupPIdKey = mkPreludeMiscIdUnique 242
+unboxedTupPIdKey = mkPreludeMiscIdUnique 243
+conPIdKey = mkPreludeMiscIdUnique 244
+infixPIdKey = mkPreludeMiscIdUnique 245
+tildePIdKey = mkPreludeMiscIdUnique 246
+bangPIdKey = mkPreludeMiscIdUnique 247
+asPIdKey = mkPreludeMiscIdUnique 248
+wildPIdKey = mkPreludeMiscIdUnique 249
+recPIdKey = mkPreludeMiscIdUnique 250
+listPIdKey = mkPreludeMiscIdUnique 251
+sigPIdKey = mkPreludeMiscIdUnique 252
+viewPIdKey = mkPreludeMiscIdUnique 253
-- type FieldPat = ...
fieldPatIdKey :: Unique
-fieldPatIdKey = mkPreludeMiscIdUnique 230
+fieldPatIdKey = mkPreludeMiscIdUnique 260
-- data Match = ...
matchIdKey :: Unique
-matchIdKey = mkPreludeMiscIdUnique 231
+matchIdKey = mkPreludeMiscIdUnique 261
-- data Clause = ...
clauseIdKey :: Unique
-clauseIdKey = mkPreludeMiscIdUnique 232
+clauseIdKey = mkPreludeMiscIdUnique 262
-- data Exp = ...
letEIdKey, caseEIdKey, doEIdKey, compEIdKey,
fromEIdKey, fromThenEIdKey, fromToEIdKey, fromThenToEIdKey,
listEIdKey, sigEIdKey, recConEIdKey, recUpdEIdKey :: Unique
-varEIdKey = mkPreludeMiscIdUnique 240
-conEIdKey = mkPreludeMiscIdUnique 241
-litEIdKey = mkPreludeMiscIdUnique 242
-appEIdKey = mkPreludeMiscIdUnique 243
-infixEIdKey = mkPreludeMiscIdUnique 244
-infixAppIdKey = mkPreludeMiscIdUnique 245
-sectionLIdKey = mkPreludeMiscIdUnique 246
-sectionRIdKey = mkPreludeMiscIdUnique 247
-lamEIdKey = mkPreludeMiscIdUnique 248
-tupEIdKey = mkPreludeMiscIdUnique 249
-unboxedTupEIdKey = mkPreludeMiscIdUnique 263
-condEIdKey = mkPreludeMiscIdUnique 250
-letEIdKey = mkPreludeMiscIdUnique 251
-caseEIdKey = mkPreludeMiscIdUnique 252
-doEIdKey = mkPreludeMiscIdUnique 253
-compEIdKey = mkPreludeMiscIdUnique 254
-fromEIdKey = mkPreludeMiscIdUnique 255
-fromThenEIdKey = mkPreludeMiscIdUnique 256
-fromToEIdKey = mkPreludeMiscIdUnique 257
-fromThenToEIdKey = mkPreludeMiscIdUnique 258
-listEIdKey = mkPreludeMiscIdUnique 259
-sigEIdKey = mkPreludeMiscIdUnique 260
-recConEIdKey = mkPreludeMiscIdUnique 261
-recUpdEIdKey = mkPreludeMiscIdUnique 262
+varEIdKey = mkPreludeMiscIdUnique 270
+conEIdKey = mkPreludeMiscIdUnique 271
+litEIdKey = mkPreludeMiscIdUnique 272
+appEIdKey = mkPreludeMiscIdUnique 273
+infixEIdKey = mkPreludeMiscIdUnique 274
+infixAppIdKey = mkPreludeMiscIdUnique 275
+sectionLIdKey = mkPreludeMiscIdUnique 276
+sectionRIdKey = mkPreludeMiscIdUnique 277
+lamEIdKey = mkPreludeMiscIdUnique 278
+tupEIdKey = mkPreludeMiscIdUnique 279
+unboxedTupEIdKey = mkPreludeMiscIdUnique 280
+condEIdKey = mkPreludeMiscIdUnique 281
+letEIdKey = mkPreludeMiscIdUnique 282
+caseEIdKey = mkPreludeMiscIdUnique 283
+doEIdKey = mkPreludeMiscIdUnique 284
+compEIdKey = mkPreludeMiscIdUnique 285
+fromEIdKey = mkPreludeMiscIdUnique 286
+fromThenEIdKey = mkPreludeMiscIdUnique 287
+fromToEIdKey = mkPreludeMiscIdUnique 288
+fromThenToEIdKey = mkPreludeMiscIdUnique 289
+listEIdKey = mkPreludeMiscIdUnique 290
+sigEIdKey = mkPreludeMiscIdUnique 291
+recConEIdKey = mkPreludeMiscIdUnique 292
+recUpdEIdKey = mkPreludeMiscIdUnique 293
-- type FieldExp = ...
fieldExpIdKey :: Unique
-fieldExpIdKey = mkPreludeMiscIdUnique 265
+fieldExpIdKey = mkPreludeMiscIdUnique 310
-- data Body = ...
guardedBIdKey, normalBIdKey :: Unique
-guardedBIdKey = mkPreludeMiscIdUnique 266
-normalBIdKey = mkPreludeMiscIdUnique 267
+guardedBIdKey = mkPreludeMiscIdUnique 311
+normalBIdKey = mkPreludeMiscIdUnique 312
-- data Guard = ...
normalGEIdKey, patGEIdKey :: Unique
-normalGEIdKey = mkPreludeMiscIdUnique 310
-patGEIdKey = mkPreludeMiscIdUnique 311
+normalGEIdKey = mkPreludeMiscIdUnique 313
+patGEIdKey = mkPreludeMiscIdUnique 314
-- data Stmt = ...
bindSIdKey, letSIdKey, noBindSIdKey, parSIdKey :: Unique
-bindSIdKey = mkPreludeMiscIdUnique 268
-letSIdKey = mkPreludeMiscIdUnique 269
-noBindSIdKey = mkPreludeMiscIdUnique 270
-parSIdKey = mkPreludeMiscIdUnique 271
+bindSIdKey = mkPreludeMiscIdUnique 320
+letSIdKey = mkPreludeMiscIdUnique 321
+noBindSIdKey = mkPreludeMiscIdUnique 322
+parSIdKey = mkPreludeMiscIdUnique 323
-- data Dec = ...
funDIdKey, valDIdKey, dataDIdKey, newtypeDIdKey, tySynDIdKey,
classDIdKey, instanceDIdKey, sigDIdKey, forImpDIdKey, pragInlDIdKey,
pragSpecDIdKey, pragSpecInlDIdKey, familyNoKindDIdKey, familyKindDIdKey,
dataInstDIdKey, newtypeInstDIdKey, tySynInstDIdKey :: Unique
-funDIdKey = mkPreludeMiscIdUnique 272
-valDIdKey = mkPreludeMiscIdUnique 273
-dataDIdKey = mkPreludeMiscIdUnique 274
-newtypeDIdKey = mkPreludeMiscIdUnique 275
-tySynDIdKey = mkPreludeMiscIdUnique 276
-classDIdKey = mkPreludeMiscIdUnique 277
-instanceDIdKey = mkPreludeMiscIdUnique 278
-sigDIdKey = mkPreludeMiscIdUnique 279
-forImpDIdKey = mkPreludeMiscIdUnique 297
-pragInlDIdKey = mkPreludeMiscIdUnique 348
-pragSpecDIdKey = mkPreludeMiscIdUnique 349
-pragSpecInlDIdKey = mkPreludeMiscIdUnique 352
-familyNoKindDIdKey= mkPreludeMiscIdUnique 340
-familyKindDIdKey = mkPreludeMiscIdUnique 353
-dataInstDIdKey = mkPreludeMiscIdUnique 341
-newtypeInstDIdKey = mkPreludeMiscIdUnique 342
-tySynInstDIdKey = mkPreludeMiscIdUnique 343
+funDIdKey = mkPreludeMiscIdUnique 330
+valDIdKey = mkPreludeMiscIdUnique 331
+dataDIdKey = mkPreludeMiscIdUnique 332
+newtypeDIdKey = mkPreludeMiscIdUnique 333
+tySynDIdKey = mkPreludeMiscIdUnique 334
+classDIdKey = mkPreludeMiscIdUnique 335
+instanceDIdKey = mkPreludeMiscIdUnique 336
+sigDIdKey = mkPreludeMiscIdUnique 337
+forImpDIdKey = mkPreludeMiscIdUnique 338
+pragInlDIdKey = mkPreludeMiscIdUnique 339
+pragSpecDIdKey = mkPreludeMiscIdUnique 340
+pragSpecInlDIdKey = mkPreludeMiscIdUnique 341
+familyNoKindDIdKey = mkPreludeMiscIdUnique 342
+familyKindDIdKey = mkPreludeMiscIdUnique 343
+dataInstDIdKey = mkPreludeMiscIdUnique 344
+newtypeInstDIdKey = mkPreludeMiscIdUnique 345
+tySynInstDIdKey = mkPreludeMiscIdUnique 346
-- type Cxt = ...
cxtIdKey :: Unique
-cxtIdKey = mkPreludeMiscIdUnique 280
+cxtIdKey = mkPreludeMiscIdUnique 360
-- data Pred = ...
classPIdKey, equalPIdKey :: Unique
-classPIdKey = mkPreludeMiscIdUnique 346
-equalPIdKey = mkPreludeMiscIdUnique 347
+classPIdKey = mkPreludeMiscIdUnique 361
+equalPIdKey = mkPreludeMiscIdUnique 362
-- data Strict = ...
isStrictKey, notStrictKey :: Unique
-isStrictKey = mkPreludeMiscIdUnique 281
-notStrictKey = mkPreludeMiscIdUnique 282
+isStrictKey = mkPreludeMiscIdUnique 363
+notStrictKey = mkPreludeMiscIdUnique 364
-- data Con = ...
normalCIdKey, recCIdKey, infixCIdKey, forallCIdKey :: Unique
-normalCIdKey = mkPreludeMiscIdUnique 283
-recCIdKey = mkPreludeMiscIdUnique 284
-infixCIdKey = mkPreludeMiscIdUnique 285
-forallCIdKey = mkPreludeMiscIdUnique 288
+normalCIdKey = mkPreludeMiscIdUnique 370
+recCIdKey = mkPreludeMiscIdUnique 371
+infixCIdKey = mkPreludeMiscIdUnique 372
+forallCIdKey = mkPreludeMiscIdUnique 373
-- type StrictType = ...
strictTKey :: Unique
-strictTKey = mkPreludeMiscIdUnique 286
+strictTKey = mkPreludeMiscIdUnique 374
-- type VarStrictType = ...
varStrictTKey :: Unique
-varStrictTKey = mkPreludeMiscIdUnique 287
+varStrictTKey = mkPreludeMiscIdUnique 375
-- data Type = ...
forallTIdKey, varTIdKey, conTIdKey, tupleTIdKey, unboxedTupleTIdKey, arrowTIdKey,
listTIdKey, appTIdKey, sigTIdKey :: Unique
-forallTIdKey = mkPreludeMiscIdUnique 290
-varTIdKey = mkPreludeMiscIdUnique 291
-conTIdKey = mkPreludeMiscIdUnique 292
-tupleTIdKey = mkPreludeMiscIdUnique 294
-unboxedTupleTIdKey = mkPreludeMiscIdUnique 361
-arrowTIdKey = mkPreludeMiscIdUnique 295
-listTIdKey = mkPreludeMiscIdUnique 296
-appTIdKey = mkPreludeMiscIdUnique 293
-sigTIdKey = mkPreludeMiscIdUnique 358
+forallTIdKey = mkPreludeMiscIdUnique 380
+varTIdKey = mkPreludeMiscIdUnique 381
+conTIdKey = mkPreludeMiscIdUnique 382
+tupleTIdKey = mkPreludeMiscIdUnique 383
+unboxedTupleTIdKey = mkPreludeMiscIdUnique 384
+arrowTIdKey = mkPreludeMiscIdUnique 385
+listTIdKey = mkPreludeMiscIdUnique 386
+appTIdKey = mkPreludeMiscIdUnique 387
+sigTIdKey = mkPreludeMiscIdUnique 388
-- data TyVarBndr = ...
plainTVIdKey, kindedTVIdKey :: Unique
-plainTVIdKey = mkPreludeMiscIdUnique 354
-kindedTVIdKey = mkPreludeMiscIdUnique 355
+plainTVIdKey = mkPreludeMiscIdUnique 390
+kindedTVIdKey = mkPreludeMiscIdUnique 391
-- data Kind = ...
starKIdKey, arrowKIdKey :: Unique
-starKIdKey = mkPreludeMiscIdUnique 356
-arrowKIdKey = mkPreludeMiscIdUnique 357
+starKIdKey = mkPreludeMiscIdUnique 392
+arrowKIdKey = mkPreludeMiscIdUnique 393
-- data Callconv = ...
cCallIdKey, stdCallIdKey :: Unique
-cCallIdKey = mkPreludeMiscIdUnique 300
-stdCallIdKey = mkPreludeMiscIdUnique 301
+cCallIdKey = mkPreludeMiscIdUnique 394
+stdCallIdKey = mkPreludeMiscIdUnique 395
-- data Safety = ...
unsafeIdKey, safeIdKey, threadsafeIdKey, interruptibleIdKey :: Unique
-unsafeIdKey = mkPreludeMiscIdUnique 305
-safeIdKey = mkPreludeMiscIdUnique 306
-threadsafeIdKey = mkPreludeMiscIdUnique 307
-interruptibleIdKey = mkPreludeMiscIdUnique 308
+unsafeIdKey = mkPreludeMiscIdUnique 400
+safeIdKey = mkPreludeMiscIdUnique 401
+threadsafeIdKey = mkPreludeMiscIdUnique 402
+interruptibleIdKey = mkPreludeMiscIdUnique 403
-- data InlineSpec =
inlineSpecNoPhaseIdKey, inlineSpecPhaseIdKey :: Unique
-inlineSpecNoPhaseIdKey = mkPreludeMiscIdUnique 350
-inlineSpecPhaseIdKey = mkPreludeMiscIdUnique 351
+inlineSpecNoPhaseIdKey = mkPreludeMiscIdUnique 404
+inlineSpecPhaseIdKey = mkPreludeMiscIdUnique 405
-- data FunDep = ...
funDepIdKey :: Unique
-funDepIdKey = mkPreludeMiscIdUnique 320
+funDepIdKey = mkPreludeMiscIdUnique 406
-- data FamFlavour = ...
typeFamIdKey, dataFamIdKey :: Unique
-typeFamIdKey = mkPreludeMiscIdUnique 344
-dataFamIdKey = mkPreludeMiscIdUnique 345
+typeFamIdKey = mkPreludeMiscIdUnique 407
+dataFamIdKey = mkPreludeMiscIdUnique 408
-- quasiquoting
quoteExpKey, quotePatKey, quoteDecKey, quoteTypeKey :: Unique
-quoteExpKey = mkPreludeMiscIdUnique 321
-quotePatKey = mkPreludeMiscIdUnique 322
-quoteDecKey = mkPreludeMiscIdUnique 323
-quoteTypeKey = mkPreludeMiscIdUnique 324
+quoteExpKey = mkPreludeMiscIdUnique 410
+quotePatKey = mkPreludeMiscIdUnique 411
+quoteDecKey = mkPreludeMiscIdUnique 412
+quoteTypeKey = mkPreludeMiscIdUnique 413
TysPrim
TysWiredIn
CostCentre
+ ProfInit
SCCfinal
RnBinds
RnEnv
@echo 'cMKDLL = "$(BLD_DLL)"' >> $@
@echo 'cLdIsGNULd :: String' >> $@
@echo 'cLdIsGNULd = "$(LdIsGNULd)"' >> $@
+ @echo 'cLdHasBuildId :: String' >> $@
+ @echo 'cLdHasBuildId = "$(LdHasBuildId)"' >> $@
@echo 'cLD_X :: String' >> $@
@echo 'cLD_X = "$(LD_X)"' >> $@
@echo 'cGHC_DRIVER_DIR :: String' >> $@
@echo 'cGHC_UNLIT_PGM = "$(GHC_UNLIT_PGM)"' >> $@
@echo 'cGHC_UNLIT_DIR :: String' >> $@
@echo 'cGHC_UNLIT_DIR = "$(GHC_UNLIT_DIR)"' >> $@
- @echo 'cGHC_MANGLER_PGM :: String' >> $@
- @echo 'cGHC_MANGLER_PGM = "$(GHC_MANGLER_PGM)"' >> $@
- @echo 'cGHC_MANGLER_DIR :: String' >> $@
- @echo 'cGHC_MANGLER_DIR = "$(GHC_MANGLER_DIR)"' >> $@
@echo 'cGHC_SPLIT_PGM :: String' >> $@
@echo 'cGHC_SPLIT_PGM = "$(GHC_SPLIT_PGM)"' >> $@
@echo 'cGHC_SPLIT_DIR :: String' >> $@
compiler_stage2_HC_OPTS += $(GhcStage2HcOpts)
compiler_stage3_HC_OPTS += $(GhcStage3HcOpts)
+ifeq "$(GhcStage1DefaultNewCodegen)" "YES"
+compiler_stage1_HC_OPTS += -DGHC_DEFAULT_NEW_CODEGEN
+endif
+
+ifeq "$(GhcStage2DefaultNewCodegen)" "YES"
+compiler_stage2_HC_OPTS += -DGHC_DEFAULT_NEW_CODEGEN
+endif
+
+ifeq "$(GhcStage3DefaultNewCodegen)" "YES"
+compiler_stage3_HC_OPTS += -DGHC_DEFAULT_NEW_CODEGEN
+endif
+
ifneq "$(BINDIST)" "YES"
compiler_stage2_TAGS_HC_OPTS = -package ghc
where
(modWords, occWord) = ASSERT (length rest1 > 0) (parseModOcc [] (tail rest1))
parseModOcc :: [[Word8]] -> [Word8] -> ([[Word8]], [Word8])
- parseModOcc acc str
+ -- We only look for dots if str could start with a module name,
+ -- i.e. if it starts with an upper case character.
+ -- Otherwise we might think that "X.:->" is the module name in
+ -- "X.:->.+", whereas actually "X" is the module name and
+ -- ":->.+" is a constructor name.
+ parseModOcc acc str@(c : _)
+ | isUpper $ chr $ fromIntegral c
= case break (== dot) str of
(top, []) -> (acc, top)
- (top, _:bot) -> parseModOcc (top : acc) bot
-
+ (top, _ : bot) -> parseModOcc (top : acc) bot
+ parseModOcc acc str = (acc, str)
+
-- | Get the 'HValue' associated with the given name.
--
-- May cause loading the module that contains the name.
-Subproject commit 6c949de6b044bda942fd0553e3eb9c0386a94e44
+Subproject commit b18f84ae40af08b3df0214593f4e4eb0665cdf7d
collectSigTysFromPats, collectSigTysFromPat,
hsTyClDeclBinders, hsTyClDeclsBinders,
- hsForeignDeclsBinders, hsGroupBinders
+ hsForeignDeclsBinders, hsGroupBinders,
+
+ -- Collecting implicit binders
+ lStmtsImplicits, hsValBindsImplicits, lPatImplicits
) where
import HsDecls
import BasicTypes
import SrcLoc
import FastString
+import Outputable
import Util
import Bag
+
+import Data.Either
\end{code}
%************************************************************************
%* *
+ Collecting binders the user did not write
+%* *
+%************************************************************************
+
+The job of this family of functions is to run through binding sites and find the set of all Names
+that were defined "implicitly", without being explicitly written by the user.
+
+The main purpose is to find names introduced by record wildcards so that we can avoid
+warning the user when they don't use those names (#4404)
+
+\begin{code}
+lStmtsImplicits :: [LStmtLR Name idR] -> NameSet
+lStmtsImplicits = hs_lstmts
+ where
+ hs_lstmts :: [LStmtLR Name idR] -> NameSet
+ hs_lstmts = foldr (\stmt rest -> unionNameSets (hs_stmt (unLoc stmt)) rest) emptyNameSet
+
+ hs_stmt (BindStmt pat _ _ _) = lPatImplicits pat
+ hs_stmt (LetStmt binds) = hs_local_binds binds
+ hs_stmt (ExprStmt _ _ _) = emptyNameSet
+ hs_stmt (ParStmt xs) = hs_lstmts $ concatMap fst xs
+
+ hs_stmt (TransformStmt stmts _ _ _) = hs_lstmts stmts
+ hs_stmt (GroupStmt stmts _ _ _) = hs_lstmts stmts
+ hs_stmt (RecStmt { recS_stmts = ss }) = hs_lstmts ss
+
+ hs_local_binds (HsValBinds val_binds) = hsValBindsImplicits val_binds
+ hs_local_binds (HsIPBinds _) = emptyNameSet
+ hs_local_binds EmptyLocalBinds = emptyNameSet
+
+hsValBindsImplicits :: HsValBindsLR Name idR -> NameSet
+hsValBindsImplicits (ValBindsOut binds _)
+ = unionManyNameSets [foldBag unionNameSets (hs_bind . unLoc) emptyNameSet hs_binds | (_rec, hs_binds) <- binds]
+ where
+ hs_bind (PatBind { pat_lhs = lpat }) = lPatImplicits lpat
+ hs_bind _ = emptyNameSet
+hsValBindsImplicits (ValBindsIn {}) = pprPanic "hsValBindsImplicits: ValBindsIn" empty
+
+lPatImplicits :: LPat Name -> NameSet
+lPatImplicits = hs_lpat
+ where
+ hs_lpat (L _ pat) = hs_pat pat
+
+ hs_lpats = foldr (\pat rest -> hs_lpat pat `unionNameSets` rest) emptyNameSet
+
+ hs_pat (LazyPat pat) = hs_lpat pat
+ hs_pat (BangPat pat) = hs_lpat pat
+ hs_pat (AsPat _ pat) = hs_lpat pat
+ hs_pat (ViewPat _ pat _) = hs_lpat pat
+ hs_pat (ParPat pat) = hs_lpat pat
+ hs_pat (ListPat pats _) = hs_lpats pats
+ hs_pat (PArrPat pats _) = hs_lpats pats
+ hs_pat (TuplePat pats _ _) = hs_lpats pats
+
+ hs_pat (SigPatIn pat _) = hs_lpat pat
+ hs_pat (SigPatOut pat _) = hs_lpat pat
+ hs_pat (CoPat _ pat _) = hs_pat pat
+
+ hs_pat (ConPatIn _ ps) = details ps
+ hs_pat (ConPatOut {pat_args=ps}) = details ps
+
+ hs_pat _ = emptyNameSet
+
+ details (PrefixCon ps) = hs_lpats ps
+ details (RecCon fs) = hs_lpats explicit `unionNameSets` mkNameSet (collectPatsBinders implicit)
+ where (explicit, implicit) = partitionEithers [if pat_explicit then Left pat else Right pat
+ | (i, fld) <- [0..] `zip` rec_flds fs
+ , let pat = hsRecFieldArg fld
+ pat_explicit = maybe True (i<) (rec_dotdot fs)]
+ details (InfixCon p1 p2) = hs_lpat p1 `unionNameSets` hs_lpat p2
+\end{code}
+
+
+%************************************************************************
+%* *
Collecting type signatures from patterns
%* *
%************************************************************************
newInfoSec = B.pack "\n\t.text"
newLine = B.pack "\n"
spInst = B.pack ", %esp\n"
-jmpInst = B.pack "jmp"
+jmpInst = B.pack "\n\tjmp"
-infoLen, spFix :: Int
+infoLen, spFix, labelStart :: Int
infoLen = B.length infoSec
spFix = 4
+labelStart = B.length jmpInst + 1
-- Search Predicates
eolPred, dollarPred, commaPred :: Char -> Bool
fixupStack f f' =
let -- fixup add ops
(a, c) = B.breakSubstring jmpInst f
- (l, b) = B.break eolPred c
+ -- we matched on a '\n' so go past it
+ (l', b) = B.break eolPred $ B.tail c
+ l = (B.head c) `B.cons` l'
(a', n) = B.breakEnd dollarPred a
(n', x) = B.break commaPred n
num = B.pack $ show $ readInt n' + spFix
then f' `B.append` f
-- We need to avoid processing jumps to labels, they are of the form:
-- jmp\tL..., jmp\t_f..., jmpl\t_f..., jmpl\t*%eax...
- else if B.index c 4 == 'L'
+ else if B.index c labelStart == 'L'
then fixupStack b $ f' `B.append` a `B.append` l
else fixupStack b $ f' `B.append` a' `B.append` num `B.append`
x `B.append` l
-- | read an int or error
readInt :: B.ByteString -> Int
readInt str | B.all isDigit str = (read . B.unpack) str
- | otherwise = error $ "LLvmMangler Cannot read" ++ show str
+ | otherwise = error $ "LLvmMangler Cannot read" ++ show str
++ "as it's not an Int"
import HscTypes
import DynFlags
import Config
+import SysTools
import ErrUtils ( dumpIfSet_dyn, showPass, ghcExit )
import Outputable
-> ForeignStubs
-> [PackageId]
-> [RawCmm] -- Compiled C--
- -> IO (Bool{-stub_h_exists-}, Bool{-stub_c_exists-})
+ -> IO (Bool{-stub_h_exists-}, Maybe FilePath{-stub_c_exists-})
codeOutput dflags this_mod location foreign_stubs pkg_deps flat_abstractC
=
\begin{code}
outputForeignStubs :: DynFlags -> Module -> ModLocation -> ForeignStubs
-> IO (Bool, -- Header file created
- Bool) -- C file created
+ Maybe FilePath) -- C file created
outputForeignStubs dflags mod location stubs
- = case stubs of
- NoStubs -> do
+ = do
+ let stub_h = mkStubPaths dflags (moduleName mod) location
+ stub_c <- newTempName dflags "c"
+
+ case stubs of
+ NoStubs -> do
-- When compiling External Core files, may need to use stub
-- files from a previous compilation
- stub_c_exists <- doesFileExist stub_c
- stub_h_exists <- doesFileExist stub_h
- return (stub_h_exists, stub_c_exists)
+ stub_h_exists <- doesFileExist stub_h
+ return (stub_h_exists, Nothing)
- ForeignStubs h_code c_code -> do
- let
+ ForeignStubs h_code c_code -> do
+ let
stub_c_output_d = pprCode CStyle c_code
stub_c_output_w = showSDoc stub_c_output_d
stub_h_output_w = showSDoc stub_h_output_d
-- in
- createDirectoryHierarchy (takeDirectory stub_c)
+ createDirectoryHierarchy (takeDirectory stub_h)
dumpIfSet_dyn dflags Opt_D_dump_foreign
"Foreign export header file" stub_h_output_d
-- isn't really HC code, so we need to define IN_STG_CODE==0 to
-- avoid the register variables etc. being enabled.
- return (stub_h_file_exists, stub_c_file_exists)
- where
- (stub_c, stub_h, _) = mkStubPaths dflags (moduleName mod) location
-
+ return (stub_h_file_exists, if stub_c_file_exists
+ then Just stub_c
+ else Nothing )
+ where
cplusplus_hdr = "#ifdef __cplusplus\nextern \"C\" {\n#endif\n"
cplusplus_ftr = "#ifdef __cplusplus\n}\n#endif\n"
| Hsc HscSource
| Ccpp
| Cc
+ | Cobjc
| HCc -- Haskellised C (as opposed to vanilla C) compilation
- | Mangle -- assembly mangling, now done by a separate script.
| SplitMangle -- after mangler if splitting
| SplitAs
| As
| LlvmMangle -- Fix up TNTC by processing assembly produced by LLVM
| CmmCpp -- pre-process Cmm source
| Cmm -- parse & compile Cmm code
+ | MergeStub -- merge in the stub object file
-- The final phase is a pseudo-phase that tells the pipeline to stop.
-- There is no runPhase case for it.
eqPhase (Hsc _) (Hsc _) = True
eqPhase Ccpp Ccpp = True
eqPhase Cc Cc = True
+eqPhase Cobjc Cobjc = True
eqPhase HCc HCc = True
-eqPhase Mangle Mangle = True
eqPhase SplitMangle SplitMangle = True
eqPhase SplitAs SplitAs = True
eqPhase As As = True
eqPhase LlvmMangle LlvmMangle = True
eqPhase CmmCpp CmmCpp = True
eqPhase Cmm Cmm = True
+eqPhase MergeStub MergeStub = True
eqPhase StopLn StopLn = True
eqPhase _ _ = False
after_x = nextPhase x
nextPhase :: Phase -> Phase
--- A conservative approximation the next phase, used in happensBefore
+-- A conservative approximation to the next phase, used in happensBefore
nextPhase (Unlit sf) = Cpp sf
nextPhase (Cpp sf) = HsPp sf
nextPhase (HsPp sf) = Hsc sf
nextPhase (Hsc _) = HCc
-nextPhase HCc = Mangle
-nextPhase Mangle = SplitMangle
nextPhase SplitMangle = As
nextPhase As = SplitAs
nextPhase LlvmOpt = LlvmLlc
nextPhase LlvmLlc = As
#endif
nextPhase LlvmMangle = As
-nextPhase SplitAs = StopLn
+nextPhase SplitAs = MergeStub
nextPhase Ccpp = As
nextPhase Cc = As
+nextPhase Cobjc = As
nextPhase CmmCpp = Cmm
nextPhase Cmm = HCc
+nextPhase HCc = As
+nextPhase MergeStub = StopLn
nextPhase StopLn = panic "nextPhase: nothing after StopLn"
-- the first compilation phase for a given file is determined
startPhase "c" = Cc
startPhase "cpp" = Ccpp
startPhase "C" = Cc
+startPhase "m" = Cobjc
startPhase "cc" = Ccpp
startPhase "cxx" = Ccpp
-startPhase "raw_s" = Mangle
startPhase "split_s" = SplitMangle
startPhase "s" = As
startPhase "S" = As
-- output filename. That could be fixed, but watch out.
phaseInputExt HCc = "hc"
phaseInputExt Ccpp = "cpp"
+phaseInputExt Cobjc = "m"
phaseInputExt Cc = "c"
-phaseInputExt Mangle = "raw_s"
phaseInputExt SplitMangle = "split_s" -- not really generated
phaseInputExt As = "s"
phaseInputExt LlvmOpt = "ll"
phaseInputExt SplitAs = "split_s" -- not really generated
phaseInputExt CmmCpp = "cmm"
phaseInputExt Cmm = "cmmcpp"
+phaseInputExt MergeStub = "o"
phaseInputExt StopLn = "o"
haskellish_src_suffixes, haskellish_suffixes, cish_suffixes,
haskellish_src_suffixes = haskellish_user_src_suffixes ++
[ "hspp", "hscpp", "hcr", "cmm", "cmmcpp" ]
haskellish_suffixes = haskellish_src_suffixes ++ ["hc", "raw_s"]
-cish_suffixes = [ "c", "cpp", "C", "cc", "cxx", "s", "S", "ll", "bc" ]
+cish_suffixes = [ "c", "cpp", "C", "cc", "cxx", "s", "S", "ll", "bc", "m" ]
extcoreish_suffixes = [ "hcr" ]
-- Will not be deleted as temp files:
haskellish_user_src_suffixes = [ "hs", "lhs", "hs-boot", "lhs-boot" ]
{-# OPTIONS -fno-cse #-}
+{-# LANGUAGE NamedFieldPuns #-}
-- -fno-cse is needed for GLOBAL_VAR's to behave properly
-----------------------------------------------------------------------------
import Data.List ( isSuffixOf )
import Data.Maybe
import System.Environment
+import Data.Char
-- ---------------------------------------------------------------------------
-- Pre-process
preprocess hsc_env (filename, mb_phase) =
ASSERT2(isJust mb_phase || isHaskellSrcFilename filename, text filename)
runPipeline anyHsc hsc_env (filename, mb_phase)
- Nothing Temporary Nothing{-no ModLocation-}
+ Nothing Temporary Nothing{-no ModLocation-} Nothing{-no stub-}
-- ---------------------------------------------------------------------------
hsc_env = hsc_env0 {hsc_dflags = dflags}
-- Figure out what lang we're generating
- let hsc_lang = hscMaybeAdjustTarget dflags StopLn src_flavour (hscTarget dflags)
+ let hsc_lang = hscTarget dflags
-- ... and what the next phase should be
let next_phase = hscNextPhase dflags src_flavour hsc_lang
-- ... and what file to generate the output into
source_unchanged = isJust maybe_old_linkable && not force_recomp
object_filename = ml_obj_file location
- let getStubLinkable False = return []
- getStubLinkable True
- = do stub_o <- compileStub hsc_env' this_mod location
- return [ DotO stub_o ]
-
- handleBatch HscNoRecomp
+ let handleBatch HscNoRecomp
= ASSERT (isJust maybe_old_linkable)
return maybe_old_linkable
return maybe_old_linkable
| otherwise
- = do stub_unlinked <- getStubLinkable hasStub
- (hs_unlinked, unlinked_time) <-
+ = do (hs_unlinked, unlinked_time) <-
case hsc_lang of
- HscNothing
- -> return ([], ms_hs_date summary)
+ HscNothing ->
+ return ([], ms_hs_date summary)
-- We're in --make mode: finish the compilation pipeline.
- _other
- -> do _ <- runPipeline StopLn hsc_env' (output_fn,Nothing)
+ _other -> do
+ maybe_stub_o <- case hasStub of
+ Nothing -> return Nothing
+ Just stub_c -> do
+ stub_o <- compileStub hsc_env' stub_c
+ return (Just stub_o)
+ _ <- runPipeline StopLn hsc_env' (output_fn,Nothing)
(Just basename)
Persistent
(Just location)
+ maybe_stub_o
-- The object filename comes from the ModLocation
- o_time <- getModificationTime object_filename
- return ([DotO object_filename], o_time)
- let linkable = LM unlinked_time this_mod
- (hs_unlinked ++ stub_unlinked)
+ o_time <- getModificationTime object_filename
+ return ([DotO object_filename], o_time)
+
+ let linkable = LM unlinked_time this_mod hs_unlinked
return (Just linkable)
handleInterpreted HscNoRecomp
= ASSERT (isHsBoot src_flavour)
return maybe_old_linkable
handleInterpreted (HscRecomp hasStub (Just (comp_bc, modBreaks)))
- = do stub_unlinked <- getStubLinkable hasStub
+ = do stub_o <- case hasStub of
+ Nothing -> return []
+ Just stub_c -> do
+ stub_o <- compileStub hsc_env' stub_c
+ return [DotO stub_o]
+
let hs_unlinked = [BCOs comp_bc modBreaks]
unlinked_time = ms_hs_date summary
-- Why do we use the timestamp of the source file here,
-- if the source is modified, then the linkable will
-- be out of date.
let linkable = LM unlinked_time this_mod
- (hs_unlinked ++ stub_unlinked)
+ (hs_unlinked ++ stub_o)
return (Just linkable)
let -- runCompiler :: Compiler result -> (result -> Maybe Linkable)
-- The _stub.c file is derived from the haskell source file, possibly taking
-- into account the -stubdir option.
--
--- Consequently, we derive the _stub.o filename from the haskell object
--- filename.
---
--- This isn't necessarily the same as the object filename we
--- would get if we just compiled the _stub.c file using the pipeline.
--- For example:
---
--- ghc src/A.hs -odir obj
---
--- results in obj/A.o, and src/A_stub.c. If we compile src/A_stub.c with
--- -odir obj, we would get obj/src/A_stub.o, which is wrong; we want
--- obj/A_stub.o.
+-- The object file created by compiling the _stub.c file is put into a
+-- temporary file, which will be later combined with the main .o file
+-- (see the MergeStubs phase).
-compileStub :: HscEnv -> Module -> ModLocation -> IO FilePath
-compileStub hsc_env mod location = do
- -- compile the _stub.c file w/ gcc
- let (stub_c,_,stub_o) = mkStubPaths (hsc_dflags hsc_env)
- (moduleName mod) location
-
- _ <- runPipeline StopLn hsc_env (stub_c,Nothing) Nothing
- (SpecificFile stub_o) Nothing{-no ModLocation-}
+compileStub :: HscEnv -> FilePath -> IO FilePath
+compileStub hsc_env stub_c = do
+ (_, stub_o) <- runPipeline StopLn hsc_env (stub_c,Nothing) Nothing
+ Temporary Nothing{-no ModLocation-} Nothing
return stub_o
-
-- ---------------------------------------------------------------------------
-- Link
let (lib_errs,lib_times) = splitEithers e_lib_times
if not (null lib_errs) || any (t <) lib_times
then return True
- else return False
+ else checkLinkInfo dflags pkg_deps exe_file
+
+-- Returns 'False' if it was, and we can avoid linking, because the
+-- previous binary was linked with "the same options".
+checkLinkInfo :: DynFlags -> [PackageId] -> FilePath -> IO Bool
+checkLinkInfo dflags pkg_deps exe_file
+ | isWindowsTarget || isDarwinTarget
+ -- ToDo: Windows and OS X do not use the ELF binary format, so
+ -- readelf does not work there. We need to find another way to do
+ -- this.
+ = return False -- conservatively we should return True, but not
+ -- linking in this case was the behaviour for a long
+ -- time so we leave it as-is.
+ | otherwise
+ = do
+ link_info <- getLinkInfo dflags pkg_deps
+ debugTraceMsg dflags 3 $ text ("Link info: " ++ link_info)
+ m_exe_link_info <- readElfSection dflags ghcLinkInfoSectionName exe_file
+ debugTraceMsg dflags 3 $ text ("Exe link info: " ++ show m_exe_link_info)
+ return (Just link_info /= m_exe_link_info)
+
+ghcLinkInfoSectionName :: String
+ghcLinkInfoSectionName = ".debug-ghc-link-info"
+ -- if we use the ".debug" prefix, then strip will strip it by default
findHSLib :: [String] -> String -> IO (Maybe FilePath)
findHSLib dirs lib = do
( _, out_file) <- runPipeline stop_phase' hsc_env
(src, mb_phase) Nothing output
- Nothing{-no ModLocation-}
+ Nothing{-no ModLocation-} Nothing
return out_file
-> Maybe FilePath -- ^ original basename (if different from ^^^)
-> PipelineOutput -- ^ Output filename
-> Maybe ModLocation -- ^ A ModLocation, if this is a Haskell module
+ -> Maybe FilePath -- ^ stub object, if we have one
-> IO (DynFlags, FilePath) -- ^ (final flags, output filename)
-runPipeline stop_phase hsc_env0 (input_fn, mb_phase) mb_basename output maybe_loc
+runPipeline stop_phase hsc_env0 (input_fn, mb_phase)
+ mb_basename output maybe_loc maybe_stub_o
= do
let dflags0 = hsc_dflags hsc_env0
(input_basename, suffix) = splitExtension input_fn
let get_output_fn = getOutputFilename stop_phase output basename
-- Execute the pipeline...
- (dflags', output_fn, maybe_loc) <-
- pipeLoop hsc_env start_phase stop_phase input_fn
- basename suffix' get_output_fn maybe_loc
+ let env = PipeEnv{ stop_phase,
+ src_basename = basename,
+ src_suffix = suffix',
+ output_spec = output }
+
+ state = PipeState{ hsc_env, maybe_loc, maybe_stub_o = maybe_stub_o }
+
+ (state', output_fn) <- unP (pipeLoop start_phase input_fn) env state
+
+ let PipeState{ hsc_env=hsc_env', maybe_loc } = state'
+ dflags' = hsc_dflags hsc_env'
-- Sometimes, a compilation phase doesn't actually generate any output
-- (eg. the CPP phase when -fcpp is not turned on). If we end on this
copyWithHeader dflags msg line_prag output_fn final_fn
return (dflags', final_fn)
+-- -----------------------------------------------------------------------------
+-- The pipeline uses a monad to carry around various bits of information
+
+-- PipeEnv: invariant information passed down
+data PipeEnv = PipeEnv {
+ stop_phase :: Phase, -- ^ Stop just before this phase
+ src_basename :: String, -- ^ basename of original input source
+ src_suffix :: String, -- ^ its extension
+ output_spec :: PipelineOutput -- ^ says where to put the pipeline output
+ }
+
+-- PipeState: information that might change during a pipeline run
+data PipeState = PipeState {
+ hsc_env :: HscEnv,
+ -- ^ only the DynFlags change in the HscEnv. The DynFlags change
+ -- at various points, for example when we read the OPTIONS_GHC
+ -- pragmas in the Cpp phase.
+ maybe_loc :: Maybe ModLocation,
+ -- ^ the ModLocation. This is discovered during compilation,
+ -- in the Hsc phase where we read the module header.
+ maybe_stub_o :: Maybe FilePath
+ -- ^ the stub object. This is set by the Hsc phase if a stub
+ -- object was created. The stub object will be joined with
+ -- the main compilation object using "ld -r" at the end.
+ }
+
+getPipeEnv :: CompPipeline PipeEnv
+getPipeEnv = P $ \env state -> return (state, env)
+
+getPipeState :: CompPipeline PipeState
+getPipeState = P $ \_env state -> return (state, state)
+
+getDynFlags :: CompPipeline DynFlags
+getDynFlags = P $ \_env state -> return (state, hsc_dflags (hsc_env state))
+
+setDynFlags :: DynFlags -> CompPipeline ()
+setDynFlags dflags = P $ \_env state ->
+ return (state{hsc_env= (hsc_env state){ hsc_dflags = dflags }}, ())
+
+setModLocation :: ModLocation -> CompPipeline ()
+setModLocation loc = P $ \_env state ->
+ return (state{ maybe_loc = Just loc }, ())
+
+setStubO :: FilePath -> CompPipeline ()
+setStubO stub_o = P $ \_env state ->
+ return (state{ maybe_stub_o = Just stub_o }, ())
+
+newtype CompPipeline a = P { unP :: PipeEnv -> PipeState -> IO (PipeState, a) }
+
+instance Monad CompPipeline where
+ return a = P $ \_env state -> return (state, a)
+ P m >>= k = P $ \env state -> do (state',a) <- m env state
+ unP (k a) env state'
+
+io :: IO a -> CompPipeline a
+io m = P $ \_env state -> do a <- m; return (state, a)
+
+phaseOutputFilename :: Phase{-next phase-} -> CompPipeline FilePath
+phaseOutputFilename next_phase = do
+ PipeEnv{stop_phase, src_basename, output_spec} <- getPipeEnv
+ PipeState{maybe_loc, hsc_env} <- getPipeState
+ let dflags = hsc_dflags hsc_env
+ io $ getOutputFilename stop_phase output_spec
+ src_basename dflags next_phase maybe_loc
-
-pipeLoop :: HscEnv -> Phase -> Phase
- -> FilePath -> String -> Suffix
- -> (DynFlags -> Phase -> Maybe ModLocation -> IO FilePath)
- -> Maybe ModLocation
- -> IO (DynFlags, FilePath, Maybe ModLocation)
-
-pipeLoop hsc_env phase stop_phase
- input_fn orig_basename orig_suff
- orig_get_output_fn maybe_loc
-
- | phase `eqPhase` stop_phase -- All done
- = return (hsc_dflags hsc_env, input_fn, maybe_loc)
-
- | not (phase `happensBefore` stop_phase)
+-- ---------------------------------------------------------------------------
+-- outer pipeline loop
+
+-- | pipeLoop runs phases until we reach the stop phase
+pipeLoop :: Phase -> FilePath -> CompPipeline FilePath
+pipeLoop phase input_fn = do
+ PipeEnv{stop_phase} <- getPipeEnv
+ PipeState{hsc_env} <- getPipeState
+ case () of
+ _ | phase `eqPhase` stop_phase -- All done
+ -> return input_fn
+
+ | not (phase `happensBefore` stop_phase)
-- Something has gone wrong. We'll try to cover all the cases when
-- this could happen, so if we reach here it is a panic.
-- eg. it might happen if the -C flag is used on a source file that
-- has {-# OPTIONS -fasm #-}.
- = panic ("pipeLoop: at phase " ++ show phase ++
+ -> panic ("pipeLoop: at phase " ++ show phase ++
" but I wanted to stop at phase " ++ show stop_phase)
- | otherwise
- = do debugTraceMsg (hsc_dflags hsc_env) 4
+ | otherwise
+ -> do io $ debugTraceMsg (hsc_dflags hsc_env) 4
(ptext (sLit "Running phase") <+> ppr phase)
- (next_phase, dflags', maybe_loc, output_fn)
- <- runPhase phase stop_phase hsc_env orig_basename
- orig_suff input_fn orig_get_output_fn maybe_loc
- let hsc_env' = hsc_env {hsc_dflags = dflags'}
- pipeLoop hsc_env' next_phase stop_phase output_fn
- orig_basename orig_suff orig_get_output_fn maybe_loc
+ dflags <- getDynFlags
+ (next_phase, output_fn) <- runPhase phase input_fn dflags
+ pipeLoop next_phase output_fn
+
+-- -----------------------------------------------------------------------------
+-- In each phase, we need to know into what filename to generate the
+-- output. All the logic about which filenames we generate output
+-- into is embodied in the following function.
getOutputFilename
:: Phase -> PipelineOutput -> String
odir = objectDir dflags
osuf = objectSuf dflags
keep_hc = dopt Opt_KeepHcFiles dflags
- keep_raw_s = dopt Opt_KeepRawSFiles dflags
keep_s = dopt Opt_KeepSFiles dflags
keep_bc = dopt Opt_KeepLlvmFiles dflags
- myPhaseInputExt HCc = hcsuf
- myPhaseInputExt StopLn = osuf
- myPhaseInputExt other = phaseInputExt other
+ myPhaseInputExt HCc = hcsuf
+ myPhaseInputExt MergeStub = osuf
+ myPhaseInputExt StopLn = osuf
+ myPhaseInputExt other = phaseInputExt other
is_last_phase = next_phase `eqPhase` stop_phase
-- sometimes, we keep output from intermediate stages
keep_this_output =
case next_phase of
- StopLn -> True
- Mangle | keep_raw_s -> True
As | keep_s -> True
LlvmOpt | keep_bc -> True
HCc | keep_hc -> True
-- of a source file can change the latter stages of the pipeline from
-- taking the via-C route to using the native code generator.
--
-runPhase :: Phase -- ^ Do this phase first
- -> Phase -- ^ Stop just before this phase
- -> HscEnv
- -> String -- ^ basename of original input source
- -> String -- ^ its extension
- -> FilePath -- ^ name of file which contains the input to this phase.
- -> (DynFlags -> Phase -> Maybe ModLocation -> IO FilePath)
- -- ^ how to calculate the output filename
- -> Maybe ModLocation -- ^ the ModLocation, if we have one
- -> IO (Phase, -- next phase
- DynFlags, -- new dynamic flags
- Maybe ModLocation, -- the ModLocation, if we have one
- FilePath) -- output filename
+runPhase :: Phase -- ^ Run this phase
+ -> FilePath -- ^ name of the input file
+ -> DynFlags -- ^ for convenience, we pass the current dflags in
+ -> CompPipeline (Phase, -- next phase to run
+ FilePath) -- output filename
-- Invariant: the output filename always contains the output
-- Interesting case: Hsc when there is no recompilation to do
-- Then the output filename is still a .o file
+
-------------------------------------------------------------------------------
-- Unlit phase
-runPhase (Unlit sf) _stop hsc_env _basename _suff input_fn get_output_fn maybe_loc
+runPhase (Unlit sf) input_fn dflags
= do
- let dflags = hsc_dflags hsc_env
- output_fn <- get_output_fn dflags (Cpp sf) maybe_loc
+ output_fn <- phaseOutputFilename (Cpp sf)
let unlit_flags = getOpts dflags opt_L
flags = map SysTools.Option unlit_flags ++
, SysTools.FileOption "" output_fn
]
- SysTools.runUnlit dflags flags
+ io $ SysTools.runUnlit dflags flags
- return (Cpp sf, dflags, maybe_loc, output_fn)
+ return (Cpp sf, output_fn)
-------------------------------------------------------------------------------
-- Cpp phase : (a) gets OPTIONS out of file
-- (b) runs cpp if necessary
-runPhase (Cpp sf) _stop hsc_env _basename _suff input_fn get_output_fn maybe_loc
- = do let dflags0 = hsc_dflags hsc_env
- src_opts <- getOptionsFromFile dflags0 input_fn
+runPhase (Cpp sf) input_fn dflags0
+ = do
+ src_opts <- io $ getOptionsFromFile dflags0 input_fn
(dflags1, unhandled_flags, warns)
- <- parseDynamicNoPackageFlags dflags0 src_opts
- checkProcessArgsResult unhandled_flags
+ <- io $ parseDynamicNoPackageFlags dflags0 src_opts
+ setDynFlags dflags1
+ io $ checkProcessArgsResult unhandled_flags
if not (xopt Opt_Cpp dflags1) then do
-- we have to be careful to emit warnings only once.
- unless (dopt Opt_Pp dflags1) $ handleFlagWarnings dflags1 warns
+ unless (dopt Opt_Pp dflags1) $ io $ handleFlagWarnings dflags1 warns
-- no need to preprocess CPP, just pass input file along
-- to the next phase of the pipeline.
- return (HsPp sf, dflags1, maybe_loc, input_fn)
+ return (HsPp sf, input_fn)
else do
- output_fn <- get_output_fn dflags1 (HsPp sf) maybe_loc
- doCpp dflags1 True{-raw-} False{-no CC opts-} input_fn output_fn
+ output_fn <- phaseOutputFilename (HsPp sf)
+ io $ doCpp dflags1 True{-raw-} False{-no CC opts-} input_fn output_fn
-- re-read the pragmas now that we've preprocessed the file
-- See #2464,#3457
- src_opts <- getOptionsFromFile dflags0 output_fn
+ src_opts <- io $ getOptionsFromFile dflags0 output_fn
(dflags2, unhandled_flags, warns)
- <- parseDynamicNoPackageFlags dflags0 src_opts
- unless (dopt Opt_Pp dflags2) $ handleFlagWarnings dflags2 warns
+ <- io $ parseDynamicNoPackageFlags dflags0 src_opts
+ unless (dopt Opt_Pp dflags2) $ io $ handleFlagWarnings dflags2 warns
-- the HsPp pass below will emit warnings
- checkProcessArgsResult unhandled_flags
+ io $ checkProcessArgsResult unhandled_flags
- return (HsPp sf, dflags2, maybe_loc, output_fn)
+ setDynFlags dflags2
+
+ return (HsPp sf, output_fn)
-------------------------------------------------------------------------------
-- HsPp phase
-runPhase (HsPp sf) _stop hsc_env basename suff input_fn get_output_fn maybe_loc
- = do let dflags = hsc_dflags hsc_env
+runPhase (HsPp sf) input_fn dflags
+ = do
if not (dopt Opt_Pp dflags) then
-- no need to preprocess, just pass input file along
-- to the next phase of the pipeline.
- return (Hsc sf, dflags, maybe_loc, input_fn)
+ return (Hsc sf, input_fn)
else do
let hspp_opts = getOpts dflags opt_F
- let orig_fn = basename <.> suff
- output_fn <- get_output_fn dflags (Hsc sf) maybe_loc
- SysTools.runPp dflags
+ PipeEnv{src_basename, src_suffix} <- getPipeEnv
+ let orig_fn = src_basename <.> src_suffix
+ output_fn <- phaseOutputFilename (Hsc sf)
+ io $ SysTools.runPp dflags
( [ SysTools.Option orig_fn
, SysTools.Option input_fn
, SysTools.FileOption "" output_fn
)
-- re-read pragmas now that we've parsed the file (see #3674)
- src_opts <- getOptionsFromFile dflags output_fn
+ src_opts <- io $ getOptionsFromFile dflags output_fn
(dflags1, unhandled_flags, warns)
- <- parseDynamicNoPackageFlags dflags src_opts
- handleFlagWarnings dflags1 warns
- checkProcessArgsResult unhandled_flags
+ <- io $ parseDynamicNoPackageFlags dflags src_opts
+ setDynFlags dflags1
+ io $ handleFlagWarnings dflags1 warns
+ io $ checkProcessArgsResult unhandled_flags
- return (Hsc sf, dflags1, maybe_loc, output_fn)
+ return (Hsc sf, output_fn)
-----------------------------------------------------------------------------
-- Hsc phase
-- Compilation of a single module, in "legacy" mode (_not_ under
-- the direction of the compilation manager).
-runPhase (Hsc src_flavour) stop hsc_env basename suff input_fn get_output_fn _maybe_loc
+runPhase (Hsc src_flavour) input_fn dflags0
= do -- normal Hsc mode, not mkdependHS
- let dflags0 = hsc_dflags hsc_env
+
+ PipeEnv{ stop_phase=stop,
+ src_basename=basename,
+ src_suffix=suff } <- getPipeEnv
-- we add the current directory (i.e. the directory in which
-- the .hs files resides) to the include path, since this is
paths = includePaths dflags0
dflags = dflags0 { includePaths = current_dir : paths }
+ setDynFlags dflags
+
-- gather the imports and module name
- (hspp_buf,mod_name,imps,src_imps) <-
+ (hspp_buf,mod_name,imps,src_imps) <- io $
case src_flavour of
ExtCoreFile -> do -- no explicit imports in ExtCore input.
m <- getCoreModuleName input_fn
-- the .hi and .o filenames, and this is as good a way
-- as any to generate them, and better than most. (e.g. takes
-- into accout the -osuf flags)
- location1 <- mkHomeModLocation2 dflags mod_name basename suff
+ location1 <- io $ mkHomeModLocation2 dflags mod_name basename suff
-- Boot-ify it if necessary
let location2 | isHsBoot src_flavour = addBootSuffixLocn location1
o_file = ml_obj_file location4 -- The real object file
+ setModLocation location4
-- Figure out if the source has changed, for recompilation avoidance.
--
-- changed (which the compiler itself figures out).
-- Setting source_unchanged to False tells the compiler that M.o is out of
-- date wrt M.hs (or M.o doesn't exist) so we must recompile regardless.
- src_timestamp <- getModificationTime (basename <.> suff)
+ src_timestamp <- io $ getModificationTime (basename <.> suff)
let force_recomp = dopt Opt_ForceRecomp dflags
- hsc_lang = hscMaybeAdjustTarget dflags stop src_flavour (hscTarget dflags)
- source_unchanged <-
+ hsc_lang = hscTarget dflags
+ source_unchanged <- io $
if force_recomp || not (isStopLn stop)
-- Set source_unchanged to False unconditionally if
-- (a) recompilation checker is off, or
-- get the DynFlags
let next_phase = hscNextPhase dflags src_flavour hsc_lang
- output_fn <- get_output_fn dflags next_phase (Just location4)
+ output_fn <- phaseOutputFilename next_phase
let dflags' = dflags { hscTarget = hsc_lang,
hscOutName = output_fn,
extCoreName = basename ++ ".hcr" }
- let hsc_env' = hsc_env {hsc_dflags = dflags'}
+ setDynFlags dflags'
+ PipeState{hsc_env=hsc_env'} <- getPipeState
-- Tell the finder cache about this module
- mod <- addHomeModuleToFinder hsc_env' mod_name location4
+ mod <- io $ addHomeModuleToFinder hsc_env' mod_name location4
-- Make the ModSummary to hand to hscMain
let
ms_srcimps = src_imps }
-- run the compiler!
- result <- hscCompileOneShot hsc_env'
+ result <- io $ hscCompileOneShot hsc_env'
mod_summary source_unchanged
Nothing -- No iface
Nothing -- No "module i of n" progress info
case result of
HscNoRecomp
- -> do SysTools.touch dflags' "Touching object file" o_file
+ -> do io $ SysTools.touch dflags' "Touching object file" o_file
-- The .o file must have a later modification date
-- than the source file (else we wouldn't be in HscNoRecomp)
-- but we touch it anyway, to keep 'make' happy (we think).
- return (StopLn, dflags', Just location4, o_file)
+ return (StopLn, o_file)
(HscRecomp hasStub _)
- -> do when hasStub $
- do stub_o <- compileStub hsc_env' mod location4
- liftIO $ consIORef v_Ld_inputs stub_o
+ -> do case hasStub of
+ Nothing -> return ()
+ Just stub_c ->
+ do stub_o <- io $ compileStub hsc_env' stub_c
+ setStubO stub_o
-- In the case of hs-boot files, generate a dummy .o-boot
-- stamp file for the benefit of Make
when (isHsBoot src_flavour) $
- SysTools.touch dflags' "Touching object file" o_file
- return (next_phase, dflags', Just location4, output_fn)
+ io $ SysTools.touch dflags' "Touching object file" o_file
+ return (next_phase, output_fn)
-----------------------------------------------------------------------------
-- Cmm phase
-runPhase CmmCpp _stop hsc_env _basename _suff input_fn get_output_fn maybe_loc
+runPhase CmmCpp input_fn dflags
= do
- let dflags = hsc_dflags hsc_env
- output_fn <- get_output_fn dflags Cmm maybe_loc
- doCpp dflags False{-not raw-} True{-include CC opts-} input_fn output_fn
- return (Cmm, dflags, maybe_loc, output_fn)
+ output_fn <- phaseOutputFilename Cmm
+ io $ doCpp dflags False{-not raw-} True{-include CC opts-}
+ input_fn output_fn
+ return (Cmm, output_fn)
-runPhase Cmm stop hsc_env basename _ input_fn get_output_fn maybe_loc
+runPhase Cmm input_fn dflags
= do
- let dflags = hsc_dflags hsc_env
- let hsc_lang = hscMaybeAdjustTarget dflags stop HsSrcFile (hscTarget dflags)
+ PipeEnv{src_basename} <- getPipeEnv
+ let hsc_lang = hscTarget dflags
+
let next_phase = hscNextPhase dflags HsSrcFile hsc_lang
- output_fn <- get_output_fn dflags next_phase maybe_loc
+
+ output_fn <- phaseOutputFilename next_phase
let dflags' = dflags { hscTarget = hsc_lang,
hscOutName = output_fn,
- extCoreName = basename ++ ".hcr" }
- let hsc_env' = hsc_env {hsc_dflags = dflags'}
+ extCoreName = src_basename ++ ".hcr" }
- hscCompileCmmFile hsc_env' input_fn
+ setDynFlags dflags'
+ PipeState{hsc_env} <- getPipeState
+
+ io $ hscCompileCmmFile hsc_env input_fn
-- XXX: catch errors above and convert them into ghcError? Original
-- code was:
--
--when (not ok) $ ghcError (PhaseFailed "cmm" (ExitFailure 1))
- return (next_phase, dflags, maybe_loc, output_fn)
+ return (next_phase, output_fn)
-----------------------------------------------------------------------------
-- Cc phase
-- we don't support preprocessing .c files (with -E) now. Doing so introduces
-- way too many hacks, and I can't say I've ever used it anyway.
-runPhase cc_phase _stop hsc_env _basename _suff input_fn get_output_fn maybe_loc
- | cc_phase `eqPhase` Cc || cc_phase `eqPhase` Ccpp || cc_phase `eqPhase` HCc
- = do let dflags = hsc_dflags hsc_env
+runPhase cc_phase input_fn dflags
+ | cc_phase `eqPhase` Cc || cc_phase `eqPhase` Ccpp || cc_phase `eqPhase` HCc || cc_phase `eqPhase` Cobjc
+ = do
let cc_opts = getOpts dflags opt_c
hcc = cc_phase `eqPhase` HCc
let cmdline_include_paths = includePaths dflags
-- HC files have the dependent packages stamped into them
- pkgs <- if hcc then getHCFilePackages input_fn else return []
+ pkgs <- if hcc then io $ getHCFilePackages input_fn else return []
-- add package include paths even if we're just compiling .c
-- files; this is the Value Add(TM) that using ghc instead of
-- gcc gives you :)
- pkg_include_dirs <- getPackageIncludePath dflags pkgs
+ pkg_include_dirs <- io $ getPackageIncludePath dflags pkgs
let include_paths = foldr (\ x xs -> "-I" : x : xs) []
(cmdline_include_paths ++ pkg_include_dirs)
- let (md_c_flags, md_regd_c_flags) = machdepCCOpts dflags
- gcc_extra_viac_flags <- getExtraViaCOpts dflags
+ let md_c_flags = machdepCCOpts dflags
+ gcc_extra_viac_flags <- io $ getExtraViaCOpts dflags
let pic_c_flags = picCCOpts dflags
let verb = getVerbFlag dflags
-- cc-options are not passed when compiling .hc files. Our
-- hc code doesn't not #include any header files anyway, so these
-- options aren't necessary.
- pkg_extra_cc_opts <-
+ pkg_extra_cc_opts <- io $
if cc_phase `eqPhase` HCc
then return []
else getPackageExtraCcOpts dflags pkgs
#ifdef darwin_TARGET_OS
- pkg_framework_paths <- getPackageFrameworkPath dflags pkgs
+ pkg_framework_paths <- io $ getPackageFrameworkPath dflags pkgs
let cmdline_framework_paths = frameworkPaths dflags
let framework_paths = map ("-F"++)
(cmdline_framework_paths ++ pkg_framework_paths)
-- Decide next phase
- let mangle = dopt Opt_DoAsmMangling dflags
- next_phase
- | hcc && mangle = Mangle
- | otherwise = As
- output_fn <- get_output_fn dflags next_phase maybe_loc
+ let next_phase = As
+ output_fn <- phaseOutputFilename next_phase
let
more_hcc_opts =
-- very weakly typed, being derived from C--.
["-fno-strict-aliasing"]
- SysTools.runCc dflags (
+ let gcc_lang_opt | cc_phase `eqPhase` Ccpp = "c++"
+ | cc_phase `eqPhase` Cobjc = "objective-c"
+ | otherwise = "c"
+ io $ SysTools.runCc dflags (
-- force the C compiler to interpret this file as C when
-- compiling .hc files, by adding the -x c option.
-- Also useful for plain .c files, just in case GHC saw a
-- -x c option.
- [ SysTools.Option "-x", if cc_phase `eqPhase` Ccpp
- then SysTools.Option "c++"
- else SysTools.Option "c"] ++
- [ SysTools.FileOption "" input_fn
+ [ SysTools.Option "-x", SysTools.Option gcc_lang_opt
+ , SysTools.FileOption "" input_fn
, SysTools.Option "-o"
, SysTools.FileOption "" output_fn
]
-- This is a temporary hack.
++ ["-mcpu=v9"]
#endif
- ++ (if hcc && mangle
- then md_regd_c_flags
- else [])
- ++ (if hcc
- then if mangle
- then gcc_extra_viac_flags
- else filter (=="-fwrapv")
- gcc_extra_viac_flags
- -- still want -fwrapv even for unreg'd
- else [])
++ (if hcc
- then more_hcc_opts
+ then gcc_extra_viac_flags ++ more_hcc_opts
else [])
++ [ verb, "-S", "-Wimplicit", cc_opt ]
++ [ "-D__GLASGOW_HASKELL__="++cProjectVersionInt ]
++ pkg_extra_cc_opts
))
- return (next_phase, dflags, maybe_loc, output_fn)
+ return (next_phase, output_fn)
-- ToDo: postprocess the output from gcc
-----------------------------------------------------------------------------
--- Mangle phase
-
-runPhase Mangle _stop hsc_env _basename _suff input_fn get_output_fn maybe_loc
- = do let dflags = hsc_dflags hsc_env
- let mangler_opts = getOpts dflags opt_m
-
-#if i386_TARGET_ARCH
- machdep_opts <- return [ show (stolen_x86_regs dflags) ]
-#else
- machdep_opts <- return []
-#endif
-
- let split = dopt Opt_SplitObjs dflags
- next_phase
- | split = SplitMangle
- | otherwise = As
- output_fn <- get_output_fn dflags next_phase maybe_loc
-
- SysTools.runMangle dflags (map SysTools.Option mangler_opts
- ++ [ SysTools.FileOption "" input_fn
- , SysTools.FileOption "" output_fn
- ]
- ++ map SysTools.Option machdep_opts)
-
- return (next_phase, dflags, maybe_loc, output_fn)
-
------------------------------------------------------------------------------
-- Splitting phase
-runPhase SplitMangle _stop hsc_env _basename _suff input_fn _get_output_fn maybe_loc
+runPhase SplitMangle input_fn dflags
= do -- tmp_pfx is the prefix used for the split .s files
- -- We also use it as the file to contain the no. of split .s files (sigh)
- let dflags = hsc_dflags hsc_env
- split_s_prefix <- SysTools.newTempName dflags "split"
+
+ split_s_prefix <- io $ SysTools.newTempName dflags "split"
let n_files_fn = split_s_prefix
- SysTools.runSplit dflags
+ io $ SysTools.runSplit dflags
[ SysTools.FileOption "" input_fn
, SysTools.FileOption "" split_s_prefix
, SysTools.FileOption "" n_files_fn
]
-- Save the number of split files for future references
- s <- readFile n_files_fn
+ s <- io $ readFile n_files_fn
let n_files = read s :: Int
dflags' = dflags { splitInfo = Just (split_s_prefix, n_files) }
+ setDynFlags dflags'
+
-- Remember to delete all these files
- addFilesToClean dflags' [ split_s_prefix ++ "__" ++ show n ++ ".s"
- | n <- [1..n_files]]
+ io $ addFilesToClean dflags' [ split_s_prefix ++ "__" ++ show n ++ ".s"
+ | n <- [1..n_files]]
- return (SplitAs, dflags', maybe_loc, "**splitmangle**")
+ return (SplitAs, "**splitmangle**")
-- we don't use the filename
-----------------------------------------------------------------------------
-- As phase
-runPhase As _stop hsc_env _basename _suff input_fn get_output_fn maybe_loc
- = do let dflags = hsc_dflags hsc_env
+runPhase As input_fn dflags
+ = do
let as_opts = getOpts dflags opt_a
let cmdline_include_paths = includePaths dflags
- output_fn <- get_output_fn dflags StopLn maybe_loc
+ next_phase <- maybeMergeStub
+ output_fn <- phaseOutputFilename next_phase
-- we create directories for the object file, because it
-- might be a hierarchical module.
- createDirectoryHierarchy (takeDirectory output_fn)
+ io $ createDirectoryHierarchy (takeDirectory output_fn)
- let (md_c_flags, _) = machdepCCOpts dflags
- SysTools.runAs dflags
+ let md_c_flags = machdepCCOpts dflags
+ io $ SysTools.runAs dflags
(map SysTools.Option as_opts
++ [ SysTools.Option ("-I" ++ p) | p <- cmdline_include_paths ]
#ifdef sparc_TARGET_ARCH
]
++ map SysTools.Option md_c_flags)
- return (StopLn, dflags, maybe_loc, output_fn)
+ return (next_phase, output_fn)
-runPhase SplitAs _stop hsc_env _basename _suff _input_fn get_output_fn maybe_loc
+runPhase SplitAs _input_fn dflags
= do
- let dflags = hsc_dflags hsc_env
- output_fn <- get_output_fn dflags StopLn maybe_loc
+ -- we'll handle the stub_o file in this phase, so don't MergeStub,
+ -- just jump straight to StopLn afterwards.
+ let next_phase = StopLn
+ output_fn <- phaseOutputFilename next_phase
let base_o = dropExtension output_fn
osuf = objectSuf dflags
split_odir = base_o ++ "_" ++ osuf ++ "_split"
- createDirectoryHierarchy split_odir
+ io $ createDirectoryHierarchy split_odir
-- remove M_split/ *.o, because we're going to archive M_split/ *.o
-- later and we don't want to pick up any old objects.
- fs <- getDirectoryContents split_odir
- mapM_ removeFile $ map (split_odir </>) $ filter (osuf `isSuffixOf`) fs
+ fs <- io $ getDirectoryContents split_odir
+ io $ mapM_ removeFile $
+ map (split_odir </>) $ filter (osuf `isSuffixOf`) fs
let as_opts = getOpts dflags opt_a
Just x -> x
let split_s n = split_s_prefix ++ "__" ++ show n <.> "s"
+
+ split_obj :: Int -> FilePath
split_obj n = split_odir </>
takeFileName base_o ++ "__" ++ show n <.> osuf
- let (md_c_flags, _) = machdepCCOpts dflags
+ let md_c_flags = machdepCCOpts dflags
let assemble_file n
= SysTools.runAs dflags
(map SysTools.Option as_opts ++
]
++ map SysTools.Option md_c_flags)
- mapM_ assemble_file [1..n]
+ io $ mapM_ assemble_file [1..n]
+
+ -- Note [pipeline-split-init]
+ -- If we have a stub file, it may contain constructor
+ -- functions for initialisation of this module. We can't
+ -- simply leave the stub as a separate object file, because it
+ -- will never be linked in: nothing refers to it. We need to
+ -- ensure that if we ever refer to the data in this module
+ -- that needs initialisation, then we also pull in the
+ -- initialisation routine.
+ --
+ -- To that end, we make a DANGEROUS ASSUMPTION here: the data
+ -- that needs to be initialised is all in the FIRST split
+ -- object. See Note [codegen-split-init].
+
+ PipeState{maybe_stub_o} <- getPipeState
+ case maybe_stub_o of
+ Nothing -> return ()
+ Just stub_o -> io $ do
+ tmp_split_1 <- newTempName dflags osuf
+ let split_1 = split_obj 1
+ copyFile split_1 tmp_split_1
+ removeFile split_1
+ joinObjectFiles dflags [tmp_split_1, stub_o] split_1
-- join them into a single .o file
- joinObjectFiles dflags (map split_obj [1..n]) output_fn
+ io $ joinObjectFiles dflags (map split_obj [1..n]) output_fn
- return (StopLn, dflags, maybe_loc, output_fn)
+ return (next_phase, output_fn)
-----------------------------------------------------------------------------
-- LlvmOpt phase
-runPhase LlvmOpt _stop hsc_env _basename _suff input_fn get_output_fn maybe_loc
+runPhase LlvmOpt input_fn dflags
= do
- let dflags = hsc_dflags hsc_env
let lo_opts = getOpts dflags opt_lo
let opt_lvl = max 0 (min 2 $ optLevel dflags)
-- don't specify anything if user has specified commands. We do this for
then [SysTools.Option (llvmOpts !! opt_lvl)]
else []
- output_fn <- get_output_fn dflags LlvmLlc maybe_loc
+ output_fn <- phaseOutputFilename LlvmLlc
- SysTools.runLlvmOpt dflags
+ io $ SysTools.runLlvmOpt dflags
([ SysTools.FileOption "" input_fn,
SysTools.Option "-o",
SysTools.FileOption "" output_fn]
++ optFlag
++ map SysTools.Option lo_opts)
- return (LlvmLlc, dflags, maybe_loc, output_fn)
+ return (LlvmLlc, output_fn)
where
-- we always (unless -optlo specified) run Opt since we rely on it to
-- fix up some pretty big deficiencies in the code we generate
-----------------------------------------------------------------------------
-- LlvmLlc phase
-runPhase LlvmLlc _stop hsc_env _basename _suff input_fn get_output_fn maybe_loc
+runPhase LlvmLlc input_fn dflags
= do
- let dflags = hsc_dflags hsc_env
let lc_opts = getOpts dflags opt_lc
let opt_lvl = max 0 (min 2 $ optLevel dflags)
#if darwin_TARGET_OS
| not opt_Static = "dynamic-no-pic"
| otherwise = "static"
- output_fn <- get_output_fn dflags nphase maybe_loc
+ output_fn <- phaseOutputFilename nphase
- SysTools.runLlvmLlc dflags
+ io $ SysTools.runLlvmLlc dflags
([ SysTools.Option (llvmOpts !! opt_lvl),
SysTools.Option $ "-relocation-model=" ++ rmodel,
SysTools.FileOption "" input_fn,
SysTools.Option "-o", SysTools.FileOption "" output_fn]
++ map SysTools.Option lc_opts)
- return (nphase, dflags, maybe_loc, output_fn)
+ return (nphase, output_fn)
where
#if darwin_TARGET_OS
llvmOpts = ["-O1", "-O2", "-O2"]
-----------------------------------------------------------------------------
-- LlvmMangle phase
-runPhase LlvmMangle _stop hsc_env _basename _suff input_fn get_output_fn maybe_loc
+runPhase LlvmMangle input_fn _dflags
= do
- let dflags = hsc_dflags hsc_env
- output_fn <- get_output_fn dflags As maybe_loc
- llvmFixupAsm input_fn output_fn
- return (As, dflags, maybe_loc, output_fn)
+ output_fn <- phaseOutputFilename As
+ io $ llvmFixupAsm input_fn output_fn
+ return (As, output_fn)
+
+-----------------------------------------------------------------------------
+-- merge in stub objects
+runPhase MergeStub input_fn dflags
+ = do
+ PipeState{maybe_stub_o} <- getPipeState
+ output_fn <- phaseOutputFilename StopLn
+ case maybe_stub_o of
+ Nothing ->
+ panic "runPhase(MergeStub): no stub"
+ Just stub_o -> do
+ io $ joinObjectFiles dflags [input_fn, stub_o] output_fn
+ return (StopLn, output_fn)
-- warning suppression
-runPhase other _stop _dflags _basename _suff _input_fn _get_output_fn _maybe_loc =
+runPhase other _input_fn _dflags =
panic ("runPhase: don't know how to run phase " ++ show other)
+
+maybeMergeStub :: CompPipeline Phase
+maybeMergeStub
+ = do
+ PipeState{maybe_stub_o} <- getPipeState
+ if isJust maybe_stub_o then return MergeStub else return StopLn
+
-----------------------------------------------------------------------------
-- MoveBinary sort-of-phase
-- After having produced a binary, move it somewhere else and generate a
return True
| otherwise = return True
-mkExtraCObj :: DynFlags -> [String] -> IO FilePath
+mkExtraCObj :: DynFlags -> String -> IO FilePath
mkExtraCObj dflags xs
= do cFile <- newTempName dflags "c"
oFile <- newTempName dflags "o"
- writeFile cFile $ unlines xs
+ writeFile cFile xs
let rtsDetails = getPackageDetails (pkgState dflags) rtsPackageId
- (md_c_flags, _) = machdepCCOpts dflags
+ md_c_flags = machdepCCOpts dflags
SysTools.runCc dflags
([Option "-c",
FileOption "" cFile,
map Option md_c_flags)
return oFile
-mkRtsOptionsLevelObj :: DynFlags -> IO [FilePath]
-mkRtsOptionsLevelObj dflags
- = do let mkRtsEnabledObj val
- = do fn <- mkExtraCObj dflags
- ["#include \"Rts.h\"",
- "#include \"RtsOpts.h\"",
- "const rtsOptsEnabledEnum rtsOptsEnabled = "
- ++ val ++ ";"]
- return [fn]
- case rtsOptsEnabled dflags of
- RtsOptsNone -> mkRtsEnabledObj "rtsOptsNone"
- RtsOptsSafeOnly -> return [] -- The default
- RtsOptsAll -> mkRtsEnabledObj "rtsOptsAll"
+mkExtraObjToLinkIntoBinary :: DynFlags -> [PackageId] -> IO FilePath
+mkExtraObjToLinkIntoBinary dflags dep_packages = do
+ link_info <- getLinkInfo dflags dep_packages
+ mkExtraCObj dflags (showSDoc (vcat [rts_opts_enabled,
+ extra_rts_opts,
+ link_opts link_info]
+ <> char '\n')) -- final newline, to
+ -- keep gcc happy
+
+ where
+ mk_rts_opts_enabled val
+ = vcat [text "#include \"Rts.h\"",
+ text "#include \"RtsOpts.h\"",
+ text "const RtsOptsEnabledEnum rtsOptsEnabled = " <>
+ text val <> semi ]
+
+ rts_opts_enabled = case rtsOptsEnabled dflags of
+ RtsOptsNone -> mk_rts_opts_enabled "RtsOptsNone"
+ RtsOptsSafeOnly -> empty -- The default
+ RtsOptsAll -> mk_rts_opts_enabled "RtsOptsAll"
+
+ extra_rts_opts = case rtsOpts dflags of
+ Nothing -> empty
+ Just opts -> text "char *ghc_rts_opts = " <> text (show opts) <> semi
+
+ link_opts info
+ | isDarwinTarget = empty
+ | isWindowsTarget = empty
+ | otherwise = hcat [
+ text "__asm__(\"\\t.section ", text ghcLinkInfoSectionName,
+ text ",\\\"\\\",@note\\n",
+ text "\\t.ascii \\\"", info', text "\\\"\\n\");" ]
+ where
+ -- we need to escape twice: once because we're inside a C string,
+ -- and again because we're inside an asm string.
+ info' = text $ (escape.escape) info
+
+ escape :: String -> String
+ escape = concatMap (charToC.fromIntegral.ord)
+
+-- The "link info" is a string representing the parameters of the
+-- link. We save this information in the binary, and the next time we
+-- link, if nothing else has changed, we use the link info stored in
+-- the existing binary to decide whether to re-link or not.
+getLinkInfo :: DynFlags -> [PackageId] -> IO String
+getLinkInfo dflags dep_packages = do
+ package_link_opts <- getPackageLinkOpts dflags dep_packages
+#ifdef darwin_TARGET_OS
+ pkg_frameworks <- getPackageFrameworks dflags dep_packages
+#endif
+ extra_ld_inputs <- readIORef v_Ld_inputs
+ let
+ link_info = (package_link_opts,
+#ifdef darwin_TARGET_OS
+ pkg_frameworks,
+#endif
+ rtsOpts dflags,
+ rtsOptsEnabled dflags,
+ dopt Opt_NoHsMain dflags,
+ extra_ld_inputs,
+ getOpts dflags opt_l)
+ --
+ return (show link_info)
-- generates a Perl skript starting a parallel prg under PVM
mk_pvm_wrapper_script :: String -> String -> String -> String
let no_hs_main = dopt Opt_NoHsMain dflags
let main_lib | no_hs_main = []
| otherwise = [ "-lHSrtsmain" ]
- rtsEnabledObj <- mkRtsOptionsLevelObj dflags
- rtsOptsObj <- case rtsOpts dflags of
- Just opts ->
- do fn <- mkExtraCObj dflags
- -- We assume that the Haskell "show" does
- -- the right thing here
- ["char *ghc_rts_opts = " ++ show opts ++ ";"]
- return [fn]
- Nothing -> return []
+
+ extraLinkObj <- mkExtraObjToLinkIntoBinary dflags dep_packages
pkg_link_opts <- getPackageLinkOpts dflags dep_packages
rc_objs <- maybeCreateManifest dflags output_fn
- let (md_c_flags, _) = machdepCCOpts dflags
+ let md_c_flags = machdepCCOpts dflags
SysTools.runLink dflags (
[ SysTools.Option verb
, SysTools.Option "-o"
#endif
++ pkg_lib_path_opts
++ main_lib
- ++ rtsEnabledObj
- ++ rtsOptsObj
+ ++ [extraLinkObj]
++ pkg_link_opts
#ifdef darwin_TARGET_OS
++ pkg_framework_path_opts
-- probably _stub.o files
extra_ld_inputs <- readIORef v_Ld_inputs
- let (md_c_flags, _) = machdepCCOpts dflags
+ let md_c_flags = machdepCCOpts dflags
let extra_ld_opts = getOpts dflags opt_l
- rtsEnabledObj <- mkRtsOptionsLevelObj dflags
+ extraLinkObj <- mkExtraObjToLinkIntoBinary dflags dep_packages
#if defined(mingw32_HOST_OS)
-----------------------------------------------------------------------------
++ lib_path_opts
++ extra_ld_opts
++ pkg_lib_path_opts
- ++ rtsEnabledObj
+ ++ [extraLinkObj]
++ pkg_link_opts
))
#elif defined(darwin_TARGET_OS)
md_c_flags
++ o_files
++ [ "-undefined", "dynamic_lookup", "-single_module",
- "-Wl,-read_only_relocs,suppress", "-install_name", instName ]
+#if !defined(x86_64_TARGET_ARCH)
+ "-Wl,-read_only_relocs,suppress",
+#endif
+ "-install_name", instName ]
++ extra_ld_inputs
++ lib_path_opts
++ extra_ld_opts
++ pkg_lib_path_opts
- ++ rtsEnabledObj
+ ++ [extraLinkObj]
++ pkg_link_opts
))
#else
++ lib_path_opts
++ extra_ld_opts
++ pkg_lib_path_opts
- ++ rtsEnabledObj
+ ++ [extraLinkObj]
++ pkg_link_opts
))
#endif
| otherwise = (optc ++ md_c_flags)
where
optc = getOpts dflags opt_c
- (md_c_flags, _) = machdepCCOpts dflags
+ md_c_flags = machdepCCOpts dflags
let cpp_prog args | raw = SysTools.runCpp dflags args
| otherwise = SysTools.runCc dflags (SysTools.Option "-E" : args)
SysTools.Option "-nostdlib",
SysTools.Option "-nodefaultlibs",
SysTools.Option "-Wl,-r",
+ SysTools.Option ld_build_id,
SysTools.Option ld_x_flag,
SysTools.Option "-o",
SysTools.FileOption "" output_fn ]
++ map SysTools.Option md_c_flags
++ args)
+
ld_x_flag | null cLD_X = ""
| otherwise = "-Wl,-x"
- (md_c_flags, _) = machdepCCOpts dflags
+ -- suppress the generation of the .note.gnu.build-id section,
+ -- which we don't need and sometimes causes ld to emit a
+ -- warning:
+ ld_build_id | cLdHasBuildId == "YES" = "-Wl,--build-id=none"
+ | otherwise = ""
+
+ md_c_flags = machdepCCOpts dflags
if cLdIsGNULd == "YES"
then do
HscInterpreted -> StopLn
_other -> StopLn
-
-hscMaybeAdjustTarget :: DynFlags -> Phase -> HscSource -> HscTarget -> HscTarget
-hscMaybeAdjustTarget dflags stop _ current_hsc_lang
- = hsc_lang
- where
- keep_hc = dopt Opt_KeepHcFiles dflags
- hsc_lang
- -- don't change the lang if we're interpreting
- | current_hsc_lang == HscInterpreted = current_hsc_lang
-
- -- force -fvia-C if we are being asked for a .hc file
- | HCc <- stop = HscC
- | keep_hc = HscC
- -- otherwise, stick to the plan
- | otherwise = current_hsc_lang
-
-- debugging flags
= Opt_D_dump_cmm
+ | Opt_D_dump_raw_cmm
| Opt_D_dump_cmmz
| Opt_D_dump_cmmz_pretty
| Opt_D_dump_cps_cmm
| Opt_D_dump_asm_stats
| Opt_D_dump_asm_expanded
| Opt_D_dump_llvm
+ | Opt_D_dump_core_stats
| Opt_D_dump_cpranal
| Opt_D_dump_deriv
| Opt_D_dump_ds
| Opt_Pp
| Opt_ForceRecomp
| Opt_DryRun
- | Opt_DoAsmMangling
| Opt_ExcessPrecision
| Opt_EagerBlackHoling
| Opt_ReadUserPackageConf
| Opt_KeepHiDiffs
| Opt_KeepHcFiles
| Opt_KeepSFiles
- | Opt_KeepRawSFiles
| Opt_KeepTmpFiles
| Opt_KeepRawTokenStream
| Opt_KeepLlvmFiles
#ifndef OMIT_NATIVE_CODEGEN
targetPlatform :: Platform, -- ^ The platform we're compiling for. Used by the NCG.
#endif
- stolen_x86_regs :: Int,
cmdlineHcIncludes :: [String], -- ^ @\-\#includes@
importPaths :: [FilePath],
mainModIs :: Module,
pgm_P :: (String,[Option]),
pgm_F :: String,
pgm_c :: (String,[Option]),
- pgm_m :: (String,[Option]),
pgm_s :: (String,[Option]),
pgm_a :: (String,[Option]),
pgm_l :: (String,[Option]),
deriving Eq
data RtsOptsEnabled = RtsOptsNone | RtsOptsSafeOnly | RtsOptsAll
+ deriving (Show)
-- | Used by 'GHC.newSession' to partially initialize a new 'DynFlags' value
initDynFlags :: DynFlags -> IO DynFlags
#ifndef OMIT_NATIVE_CODEGEN
targetPlatform = defaultTargetPlatform,
#endif
- stolen_x86_regs = 4,
cmdlineHcIncludes = [],
importPaths = ["."],
mainModIs = mAIN,
pgm_P = panic "defaultDynFlags: No pgm_P",
pgm_F = panic "defaultDynFlags: No pgm_F",
pgm_c = panic "defaultDynFlags: No pgm_c",
- pgm_m = panic "defaultDynFlags: No pgm_m",
pgm_s = panic "defaultDynFlags: No pgm_s",
pgm_a = panic "defaultDynFlags: No pgm_a",
pgm_l = panic "defaultDynFlags: No pgm_l",
setHiDir f d = d{ hiDir = Just f}
setStubDir f d = d{ stubDir = Just f, includePaths = f : includePaths d }
-- -stubdir D adds an implicit -I D, so that gcc can find the _stub.h file
- -- \#included from the .hc file when compiling with -fvia-C.
+ -- \#included from the .hc file when compiling via C (i.e. unregisterised
+ -- builds).
setOutputDir f = setObjectDir f . setHiDir f . setStubDir f
setDylibInstallName f d = d{ dylibInstallName = Just f}
= runCmdLine (processArgs flag_spec args') dflags0
when (not (null errs)) $ ghcError $ errorsToGhcException errs
- -- Cannot use -fPIC with registerised -fvia-C, because the mangler
- -- isn't up to the job. We know that if hscTarget == HscC, then the
- -- user has explicitly used -fvia-C, because -fasm is the default,
- -- unless there is no NCG on this platform. The latter case is
- -- checked when the -fPIC flag is parsed.
- --
let (pic_warns, dflags2)
- | opt_PIC && hscTarget dflags1 == HscC && cGhcUnregisterised == "NO"
- = ([L noSrcSpan $ "Warning: -fvia-C is incompatible with -fPIC; ignoring -fvia-C"],
- dflags1{ hscTarget = HscAsm })
#if !(x86_64_TARGET_ARCH && linux_TARGET_OS)
| (not opt_Static || opt_PIC) && hscTarget dflags1 == HscLlvm
= ([L noSrcSpan $ "Warning: -fllvm is incompatible with -fPIC and -"
, Flag "pgmP" (hasArg setPgmP)
, Flag "pgmF" (hasArg (\f d -> d{ pgm_F = f}))
, Flag "pgmc" (hasArg (\f d -> d{ pgm_c = (f,[])}))
- , Flag "pgmm" (hasArg (\f d -> d{ pgm_m = (f,[])}))
+ , Flag "pgmm" (HasArg (\_ -> addWarn "The -keep-raw-s-files flag does nothing; it will be removed in a future GHC release"))
, Flag "pgms" (hasArg (\f d -> d{ pgm_s = (f,[])}))
, Flag "pgma" (hasArg (\f d -> d{ pgm_a = (f,[])}))
, Flag "pgml" (hasArg (\f d -> d{ pgm_l = (f,[])}))
, Flag "keep-hc-files" (NoArg (setDynFlag Opt_KeepHcFiles))
, Flag "keep-s-file" (NoArg (setDynFlag Opt_KeepSFiles))
, Flag "keep-s-files" (NoArg (setDynFlag Opt_KeepSFiles))
- , Flag "keep-raw-s-file" (NoArg (setDynFlag Opt_KeepRawSFiles))
- , Flag "keep-raw-s-files" (NoArg (setDynFlag Opt_KeepRawSFiles))
+ , Flag "keep-raw-s-file" (NoArg (addWarn "The -keep-raw-s-file flag does nothing; it will be removed in a future GHC release"))
+ , Flag "keep-raw-s-files" (NoArg (addWarn "The -keep-raw-s-files flag does nothing; it will be removed in a future GHC release"))
, Flag "keep-llvm-file" (NoArg (setDynFlag Opt_KeepLlvmFiles))
, Flag "keep-llvm-files" (NoArg (setDynFlag Opt_KeepLlvmFiles))
-- This only makes sense as plural
, Flag "dstg-stats" (NoArg (setDynFlag Opt_StgStats))
, Flag "ddump-cmm" (setDumpFlag Opt_D_dump_cmm)
+ , Flag "ddump-raw-cmm" (setDumpFlag Opt_D_dump_raw_cmm)
, Flag "ddump-cmmz" (setDumpFlag Opt_D_dump_cmmz)
, Flag "ddump-cmmz-pretty" (setDumpFlag Opt_D_dump_cmmz_pretty)
+ , Flag "ddump-core-stats" (setDumpFlag Opt_D_dump_core_stats)
, Flag "ddump-cps-cmm" (setDumpFlag Opt_D_dump_cps_cmm)
, Flag "ddump-cvt-cmm" (setDumpFlag Opt_D_dump_cvt_cmm)
, Flag "ddump-asm" (setDumpFlag Opt_D_dump_asm)
------ Machine dependant (-m<blah>) stuff ---------------------------
- , Flag "monly-2-regs" (noArg (\s -> s{stolen_x86_regs = 2}))
- , Flag "monly-3-regs" (noArg (\s -> s{stolen_x86_regs = 3}))
- , Flag "monly-4-regs" (noArg (\s -> s{stolen_x86_regs = 4}))
+ , Flag "monly-2-regs" (NoArg (addWarn "The -monly-2-regs flag does nothing; it will be removed in a future GHC release"))
+ , Flag "monly-3-regs" (NoArg (addWarn "The -monly-3-regs flag does nothing; it will be removed in a future GHC release"))
+ , Flag "monly-4-regs" (NoArg (addWarn "The -monly-4-regs flag does nothing; it will be removed in a future GHC release"))
, Flag "msse2" (NoArg (setDynFlag Opt_SSE2))
------ Warning opts -------------------------------------------------
------ Compiler flags -----------------------------------------------
, Flag "fasm" (NoArg (setObjTarget HscAsm))
- , Flag "fvia-c" (NoArg (setObjTarget HscC >>
- (addWarn "The -fvia-c flag will be removed in a future GHC release")))
- , Flag "fvia-C" (NoArg (setObjTarget HscC >>
- (addWarn "The -fvia-C flag will be removed in a future GHC release")))
+ , Flag "fvia-c" (NoArg
+ (addWarn "The -fvia-c flag does nothing; it will be removed in a future GHC release"))
+ , Flag "fvia-C" (NoArg
+ (addWarn "The -fvia-C flag does nothing; it will be removed in a future GHC release"))
, Flag "fllvm" (NoArg (setObjTarget HscLlvm))
, Flag "fno-code" (NoArg (do upd $ \d -> d{ ghcLink=NoLink }
( "dicts-cheap", Opt_DictsCheap, nop ),
( "excess-precision", Opt_ExcessPrecision, nop ),
( "eager-blackholing", Opt_EagerBlackHoling, nop ),
- ( "asm-mangling", Opt_DoAsmMangling, nop ),
( "print-bind-result", Opt_PrintBindResult, nop ),
( "force-recomp", Opt_ForceRecomp, nop ),
( "hpc-no-auto", Opt_Hpc_No_Auto, nop ),
= [ Opt_AutoLinkPackages,
Opt_ReadUserPackageConf,
- Opt_DoAsmMangling,
-
Opt_SharedImplib,
+#if GHC_DEFAULT_NEW_CODEGEN
+ Opt_TryNewCodeGen,
+#endif
+
Opt_GenManifest,
Opt_EmbedManifest,
Opt_PrintBindContents,
, (Opt_ExistentialQuantification, turnOn, Opt_ExplicitForAll)
, (Opt_PolymorphicComponents, turnOn, Opt_ExplicitForAll)
, (Opt_FlexibleInstances, turnOn, Opt_TypeSynonymInstances)
+ , (Opt_FunctionalDependencies, turnOn, Opt_MultiParamTypeClasses)
, (Opt_ModalTypes, turnOn, Opt_RankNTypes)
, (Opt_ModalTypes, turnOn, Opt_ExplicitForAll)
| otherwise = dfs
-- Changes the target only if we're compiling object code. This is
--- used by -fasm and -fvia-C, which switch from one to the other, but
--- not from bytecode to object-code. The idea is that -fasm/-fvia-C
+-- used by -fasm and -fllvm, which switch from one to the other, but
+-- not from bytecode to object-code. The idea is that -fasm/-fllvm
-- can be safely used in an OPTIONS_GHC pragma.
setObjTarget :: HscTarget -> DynP ()
setObjTarget l = upd set
-- The options below are not dependent on the version of gcc, only the
-- platform.
-machdepCCOpts :: DynFlags -> ([String], -- flags for all C compilations
- [String]) -- for registerised HC compilations
-machdepCCOpts dflags = let (flagsAll, flagsRegHc) = machdepCCOpts' dflags
- in (cCcOpts ++ flagsAll, flagsRegHc)
+machdepCCOpts :: DynFlags -> [String] -- flags for all C compilations
+machdepCCOpts dflags = cCcOpts ++ machdepCCOpts'
-machdepCCOpts' :: DynFlags -> ([String], -- flags for all C compilations
- [String]) -- for registerised HC compilations
-machdepCCOpts' _dflags
+machdepCCOpts' :: [String] -- flags for all C compilations
+machdepCCOpts'
#if alpha_TARGET_ARCH
- = ( ["-w", "-mieee"
+ = ["-w", "-mieee"
#ifdef HAVE_THREADED_RTS_SUPPORT
, "-D_REENTRANT"
#endif
- ], [] )
+ ]
-- For now, to suppress the gcc warning "call-clobbered
-- register used for global register variable", we simply
-- disable all warnings altogether using the -w flag. Oh well.
#elif hppa_TARGET_ARCH
-- ___HPUX_SOURCE, not _HPUX_SOURCE, is #defined if -ansi!
-- (very nice, but too bad the HP /usr/include files don't agree.)
- = ( ["-D_HPUX_SOURCE"], [] )
-
-#elif m68k_TARGET_ARCH
- -- -fno-defer-pop : for the .hc files, we want all the pushing/
- -- popping of args to routines to be explicit; if we let things
- -- be deferred 'til after an STGJUMP, imminent death is certain!
- --
- -- -fomit-frame-pointer : *don't*
- -- It's better to have a6 completely tied up being a frame pointer
- -- rather than let GCC pick random things to do with it.
- -- (If we want to steal a6, then we would try to do things
- -- as on iX86, where we *do* steal the frame pointer [%ebp].)
- = ( [], ["-fno-defer-pop", "-fno-omit-frame-pointer"] )
+ = ["-D_HPUX_SOURCE"]
#elif i386_TARGET_ARCH
-- -fno-defer-pop : basically the same game as for m68k
--
-- -fomit-frame-pointer : *must* in .hc files; because we're stealing
-- the fp (%ebp) for our register maps.
- = let n_regs = stolen_x86_regs _dflags
- in
- (
- [ if opt_Static then "-DDONT_WANT_WIN32_DLL_SUPPORT" else ""
- ],
- [ "-fno-defer-pop",
- "-fomit-frame-pointer",
- -- we want -fno-builtin, because when gcc inlines
- -- built-in functions like memcpy() it tends to
- -- run out of registers, requiring -monly-n-regs
- "-fno-builtin",
- "-DSTOLEN_X86_REGS="++show n_regs ]
- )
-
-#elif ia64_TARGET_ARCH
- = ( [], ["-fomit-frame-pointer", "-G0"] )
-
-#elif x86_64_TARGET_ARCH
- = (
- [],
- ["-fomit-frame-pointer",
- "-fno-asynchronous-unwind-tables",
- -- the unwind tables are unnecessary for HC code,
- -- and get in the way of -split-objs. Another option
- -- would be to throw them away in the mangler, but this
- -- is easier.
- "-fno-builtin"
- -- calling builtins like strlen() using the FFI can
- -- cause gcc to run out of regs, so use the external
- -- version.
- ] )
-
-#elif sparc_TARGET_ARCH
- = ( [], ["-w"] )
- -- For now, to suppress the gcc warning "call-clobbered
- -- register used for global register variable", we simply
- -- disable all warnings altogether using the -w flag. Oh well.
+ = if opt_Static then ["-DDONT_WANT_WIN32_DLL_SUPPORT"] else []
-#elif powerpc_apple_darwin_TARGET
- -- -no-cpp-precomp:
- -- Disable Apple's precompiling preprocessor. It's a great thing
- -- for "normal" programs, but it doesn't support register variable
- -- declarations.
- = ( [], ["-no-cpp-precomp"] )
#else
- = ( [], [] )
+ = []
#endif
picCCOpts :: DynFlags -> [String]
import DynFlags
import Outputable
import UniqFM
-import Maybes ( expectJust )
+import Maybes ( expectJust )
import Exception ( evaluate )
import Distribution.Text
import Distribution.Package hiding (PackageId)
-import Data.IORef ( IORef, writeIORef, readIORef, atomicModifyIORef )
+import Data.IORef ( IORef, writeIORef, readIORef, atomicModifyIORef )
import System.Directory
import System.FilePath
import Control.Monad
-import System.Time ( ClockTime )
+import System.Time ( ClockTime )
import Data.List ( partition )
-type FileExt = String -- Filename extension
-type BaseName = String -- Basename of file
+type FileExt = String -- Filename extension
+type BaseName = String -- Basename of file
-- -----------------------------------------------------------------------------
-- The Finder
writeIORef fc_ref emptyUFM
flushModLocationCache this_pkg mlc_ref
where
- this_pkg = thisPackage (hsc_dflags hsc_env)
- fc_ref = hsc_FC hsc_env
- mlc_ref = hsc_MLC hsc_env
+ this_pkg = thisPackage (hsc_dflags hsc_env)
+ fc_ref = hsc_FC hsc_env
+ mlc_ref = hsc_MLC hsc_env
flushModLocationCache :: PackageId -> IORef ModLocationCache -> IO ()
flushModLocationCache this_pkg ref = do
_ <- evaluate =<< readIORef ref
return ()
where is_ext mod _ | modulePackageId mod /= this_pkg = True
- | otherwise = False
+ | otherwise = False
addToFinderCache :: IORef FinderCache -> ModuleName -> FindResult -> IO ()
addToFinderCache ref key val =
atomicModifyIORef ref $ \c -> (delModuleEnv c key, ())
lookupFinderCache :: IORef FinderCache -> ModuleName -> IO (Maybe FindResult)
-lookupFinderCache ref key = do
+lookupFinderCache ref key = do
c <- readIORef ref
return $! lookupUFM c key
findImportedModule :: HscEnv -> ModuleName -> Maybe FastString -> IO FindResult
findImportedModule hsc_env mod_name mb_pkg =
case mb_pkg of
- Nothing -> unqual_import
- Just pkg | pkg == fsLit "this" -> home_import -- "this" is special
- | otherwise -> pkg_import
+ Nothing -> unqual_import
+ Just pkg | pkg == fsLit "this" -> home_import -- "this" is special
+ | otherwise -> pkg_import
where
home_import = findHomeModule hsc_env mod_name
pkg_import = findExposedPackageModule hsc_env mod_name mb_pkg
- unqual_import = home_import
- `orIfNotFound`
- findExposedPackageModule hsc_env mod_name Nothing
+ unqual_import = home_import
+ `orIfNotFound`
+ findExposedPackageModule hsc_env mod_name Nothing
-- | Locate a specific 'Module'. The purpose of this function is to
-- create a 'ModLocation' for a given 'Module', that is to find out
-- where the files associated with this module live. It is used when
--- reading the interface for a module mentioned by another interface,
+-- reading the interface for a module mentioned by another interface,
-- for example (a "system import").
findExactModule :: HscEnv -> Module -> IO FindResult
findExactModule hsc_env mod =
- let dflags = hsc_dflags hsc_env in
- if modulePackageId mod == thisPackage dflags
- then findHomeModule hsc_env (moduleName mod)
- else findPackageModule hsc_env mod
+ let dflags = hsc_dflags hsc_env
+ in if modulePackageId mod == thisPackage dflags
+ then findHomeModule hsc_env (moduleName mod)
+ else findPackageModule hsc_env mod
-- -----------------------------------------------------------------------------
-- Helpers
homeSearchCache :: HscEnv -> ModuleName -> IO FindResult -> IO FindResult
homeSearchCache hsc_env mod_name do_this = do
m <- lookupFinderCache (hsc_FC hsc_env) mod_name
- case m of
+ case m of
Just result -> return result
Nothing -> do
- result <- do_this
- addToFinderCache (hsc_FC hsc_env) mod_name result
- case result of
- Found loc mod -> addToModLocationCache (hsc_MLC hsc_env) mod loc
- _other -> return ()
- return result
+ result <- do_this
+ addToFinderCache (hsc_FC hsc_env) mod_name result
+ case result of
+ Found loc mod -> addToModLocationCache (hsc_MLC hsc_env) mod loc
+ _other -> return ()
+ return result
findExposedPackageModule :: HscEnv -> ModuleName -> Maybe FastString
-> IO FindResult
Just loc -> return (Found loc mod)
Nothing -> do
result <- do_this
- case result of
- Found loc mod -> addToModLocationCache (hsc_MLC hsc_env) mod loc
- _other -> return ()
- return result
+ case result of
+ Found loc mod -> addToModLocationCache (hsc_MLC hsc_env) mod loc
+ _other -> return ()
+ return result
where
mlc = hsc_MLC hsc_env
removeFromModLocationCache (hsc_MLC hsc_env) (mkModule this_pkg mod)
-- -----------------------------------------------------------------------------
--- The internal workers
+-- The internal workers
-- | Search for a module in the home package only.
findHomeModule :: HscEnv -> ModuleName -> IO FindResult
hisuf = hiSuf dflags
mod = mkModule (thisPackage dflags) mod_name
- source_exts =
+ source_exts =
[ ("hs", mkHomeModLocationSearched dflags mod_name "hs")
, ("lhs", mkHomeModLocationSearched dflags mod_name "lhs")
]
-
- hi_exts = [ (hisuf, mkHiOnlyModLocation dflags hisuf)
- , (addBootSuffix hisuf, mkHiOnlyModLocation dflags hisuf)
- ]
-
- -- In compilation manager modes, we look for source files in the home
- -- package because we can compile these automatically. In one-shot
- -- compilation mode we look for .hi and .hi-boot files only.
+
+ hi_exts = [ (hisuf, mkHiOnlyModLocation dflags hisuf)
+ , (addBootSuffix hisuf, mkHiOnlyModLocation dflags hisuf)
+ ]
+
+ -- In compilation manager modes, we look for source files in the home
+ -- package because we can compile these automatically. In one-shot
+ -- compilation mode we look for .hi and .hi-boot files only.
exts | isOneShot (ghcMode dflags) = hi_exts
- | otherwise = source_exts
+ | otherwise = source_exts
in
-- special case for GHC.Prim; we won't find it in the filesystem.
-- This is important only when compiling the base package (where GHC.Prim
-- is a home module).
- if mod == gHC_PRIM
+ if mod == gHC_PRIM
then return (Found (error "GHC.Prim ModLocation") mod)
- else
-
- searchPathExts home_path mod exts
+ else searchPathExts home_path mod exts
-- | Search for a module in external packages only.
findPackageModule :: HscEnv -> Module -> IO FindResult
findPackageModule hsc_env mod = do
let
- dflags = hsc_dflags hsc_env
- pkg_id = modulePackageId mod
- pkg_map = pkgIdMap (pkgState dflags)
+ dflags = hsc_dflags hsc_env
+ pkg_id = modulePackageId mod
+ pkg_map = pkgIdMap (pkgState dflags)
--
case lookupPackage pkg_map pkg_id of
Nothing -> return (NoPackage pkg_id)
Just pkg_conf -> findPackageModule_ hsc_env mod pkg_conf
-
+
findPackageModule_ :: HscEnv -> Module -> PackageConfig -> IO FindResult
-findPackageModule_ hsc_env mod pkg_conf =
+findPackageModule_ hsc_env mod pkg_conf =
modLocationCache hsc_env mod $
-- special case for GHC.Prim; we won't find it in the filesystem.
- if mod == gHC_PRIM
+ if mod == gHC_PRIM
then return (Found (error "GHC.Prim ModLocation") mod)
- else
+ else
let
dflags = hsc_dflags hsc_env
tag = buildTag dflags
- -- hi-suffix for packages depends on the build tag.
+ -- hi-suffix for packages depends on the build tag.
package_hisuf | null tag = "hi"
- | otherwise = tag ++ "_hi"
+ | otherwise = tag ++ "_hi"
mk_hi_loc = mkHiOnlyModLocation dflags package_hisuf
-- General path searching
searchPathExts
- :: [FilePath] -- paths to search
- -> Module -- module name
+ :: [FilePath] -- paths to search
+ -> Module -- module name
-> [ (
- FileExt, -- suffix
- FilePath -> BaseName -> IO ModLocation -- action
+ FileExt, -- suffix
+ FilePath -> BaseName -> IO ModLocation -- action
)
- ]
+ ]
-> IO FindResult
-searchPathExts paths mod exts
+searchPathExts paths mod exts
= do result <- search to_search
{-
- hPutStrLn stderr (showSDoc $
- vcat [text "Search" <+> ppr mod <+> sep (map (text. fst) exts)
- , nest 2 (vcat (map text paths))
- , case result of
- Succeeded (loc, p) -> text "Found" <+> ppr loc
- Failed fs -> text "not found"])
--}
- return result
+ hPutStrLn stderr (showSDoc $
+ vcat [text "Search" <+> ppr mod <+> sep (map (text. fst) exts)
+ , nest 2 (vcat (map text paths))
+ , case result of
+ Succeeded (loc, p) -> text "Found" <+> ppr loc
+ Failed fs -> text "not found"])
+-}
+ return result
where
basename = moduleNameSlashes (moduleName mod)
to_search :: [(FilePath, IO ModLocation)]
to_search = [ (file, fn path basename)
- | path <- paths,
- (ext,fn) <- exts,
- let base | path == "." = basename
- | otherwise = path </> basename
- file = base <.> ext
- ]
+ | path <- paths,
+ (ext,fn) <- exts,
+ let base | path == "." = basename
+ | otherwise = path </> basename
+ file = base <.> ext
+ ]
search [] = return (NotFound { fr_paths = map fst to_search
, fr_pkg = Just (modulePackageId mod)
search ((file, mk_result) : rest) = do
b <- doesFileExist file
- if b
- then do { loc <- mk_result; return (Found loc mod) }
- else search rest
+ if b
+ then do { loc <- mk_result; return (Found loc mod) }
+ else search rest
mkHomeModLocationSearched :: DynFlags -> ModuleName -> FileExt
- -> FilePath -> BaseName -> IO ModLocation
+ -> FilePath -> BaseName -> IO ModLocation
mkHomeModLocationSearched dflags mod suff path basename = do
mkHomeModLocation2 dflags mod (path </> basename) suff
-- (b) and (c): The filename of the source file, minus its extension
--
-- ext
--- The filename extension of the source file (usually "hs" or "lhs").
+-- The filename extension of the source file (usually "hs" or "lhs").
mkHomeModLocation :: DynFlags -> ModuleName -> FilePath -> IO ModLocation
mkHomeModLocation dflags mod src_filename = do
mkHomeModLocation2 dflags mod basename extension
mkHomeModLocation2 :: DynFlags
- -> ModuleName
- -> FilePath -- Of source module, without suffix
- -> String -- Suffix
- -> IO ModLocation
+ -> ModuleName
+ -> FilePath -- Of source module, without suffix
+ -> String -- Suffix
+ -> IO ModLocation
mkHomeModLocation2 dflags mod src_basename ext = do
let mod_basename = moduleNameSlashes mod
hi_fn <- mkHiPath dflags src_basename mod_basename
return (ModLocation{ ml_hs_file = Just (src_basename <.> ext),
- ml_hi_file = hi_fn,
- ml_obj_file = obj_fn })
+ ml_hi_file = hi_fn,
+ ml_obj_file = obj_fn })
mkHiOnlyModLocation :: DynFlags -> Suffix -> FilePath -> String
- -> IO ModLocation
+ -> IO ModLocation
mkHiOnlyModLocation dflags hisuf path basename
= do let full_basename = path </> basename
obj_fn <- mkObjPath dflags full_basename basename
return ModLocation{ ml_hs_file = Nothing,
- ml_hi_file = full_basename <.> hisuf,
- -- Remove the .hi-boot suffix from
- -- hi_file, if it had one. We always
- -- want the name of the real .hi file
- -- in the ml_hi_file field.
- ml_obj_file = obj_fn
+ ml_hi_file = full_basename <.> hisuf,
+ -- Remove the .hi-boot suffix from
+ -- hi_file, if it had one. We always
+ -- want the name of the real .hi file
+ -- in the ml_hi_file field.
+ ml_obj_file = obj_fn
}
-- | Constructs the filename of a .o file for a given source file.
-- Does /not/ check whether the .o file exists
mkObjPath
:: DynFlags
- -> FilePath -- the filename of the source file, minus the extension
- -> String -- the module name with dots replaced by slashes
+ -> FilePath -- the filename of the source file, minus the extension
+ -> String -- the module name with dots replaced by slashes
-> IO FilePath
mkObjPath dflags basename mod_basename
= do let
- odir = objectDir dflags
- osuf = objectSuf dflags
-
- obj_basename | Just dir <- odir = dir </> mod_basename
- | otherwise = basename
+ odir = objectDir dflags
+ osuf = objectSuf dflags
+
+ obj_basename | Just dir <- odir = dir </> mod_basename
+ | otherwise = basename
return (obj_basename <.> osuf)
-- Does /not/ check whether the .hi file exists
mkHiPath
:: DynFlags
- -> FilePath -- the filename of the source file, minus the extension
- -> String -- the module name with dots replaced by slashes
+ -> FilePath -- the filename of the source file, minus the extension
+ -> String -- the module name with dots replaced by slashes
-> IO FilePath
mkHiPath dflags basename mod_basename
= do let
- hidir = hiDir dflags
- hisuf = hiSuf dflags
+ hidir = hiDir dflags
+ hisuf = hiSuf dflags
- hi_basename | Just dir <- hidir = dir </> mod_basename
- | otherwise = basename
+ hi_basename | Just dir <- hidir = dir </> mod_basename
+ | otherwise = basename
return (hi_basename <.> hisuf)
:: DynFlags
-> ModuleName
-> ModLocation
- -> (FilePath,FilePath,FilePath)
+ -> FilePath
mkStubPaths dflags mod location
= let
stubdir = stubDir dflags
mod_basename = moduleNameSlashes mod
- src_basename = dropExtension $ expectJust "mkStubPaths"
+ src_basename = dropExtension $ expectJust "mkStubPaths"
(ml_hs_file location)
stub_basename0
| otherwise = src_basename
stub_basename = stub_basename0 ++ "_stub"
-
- obj = ml_obj_file location
- osuf = objectSuf dflags
- stub_obj_base = dropTail (length osuf + 1) obj ++ "_stub"
- -- NB. not takeFileName, see #3093
in
- (stub_basename <.> "c",
- stub_basename <.> "h",
- stub_obj_base <.> objectSuf dflags)
+ stub_basename <.> "h"
-- -----------------------------------------------------------------------------
--- findLinkable isn't related to the other stuff in here,
+-- findLinkable isn't related to the other stuff in here,
-- but there's no other obvious place for it
findObjectLinkableMaybe :: Module -> ModLocation -> IO (Maybe Linkable)
findObjectLinkableMaybe mod locn
= do let obj_fn = ml_obj_file locn
- maybe_obj_time <- modificationTimeIfExists obj_fn
- case maybe_obj_time of
- Nothing -> return Nothing
- Just obj_time -> liftM Just (findObjectLinkable mod obj_fn obj_time)
+ maybe_obj_time <- modificationTimeIfExists obj_fn
+ case maybe_obj_time of
+ Nothing -> return Nothing
+ Just obj_time -> liftM Just (findObjectLinkable mod obj_fn obj_time)
-- Make an object linkable when we know the object file exists, and we know
-- its modification time.
findObjectLinkable :: Module -> FilePath -> ClockTime -> IO Linkable
-findObjectLinkable mod obj_fn obj_time = do
- let stub_fn = (dropExtension obj_fn ++ "_stub") <.> "o"
- stub_exist <- doesFileExist stub_fn
- if stub_exist
- then return (LM obj_time mod [DotO obj_fn, DotO stub_fn])
- else return (LM obj_time mod [DotO obj_fn])
+findObjectLinkable mod obj_fn obj_time = return (LM obj_time mod [DotO obj_fn])
+ -- We used to look for _stub.o files here, but that was a bug (#706)
+ -- Now GHC merges the stub.o into the main .o (#3687)
-- -----------------------------------------------------------------------------
-- Error messages
cantFindErr _ multiple_found _ mod_name (FoundMultiple pkgs)
= hang (ptext multiple_found <+> quotes (ppr mod_name) <> colon) 2 (
sep [ptext (sLit "it was found in multiple packages:"),
- hsep (map (text.packageIdString) pkgs)]
+ hsep (map (text.packageIdString) pkgs)]
)
cantFindErr cannot_find _ dflags mod_name find_result
= ptext cannot_find <+> quotes (ppr mod_name)
more_info
= case find_result of
- NoPackage pkg
- -> ptext (sLit "no package matching") <+> quotes (ppr pkg) <+>
- ptext (sLit "was found")
+ NoPackage pkg
+ -> ptext (sLit "no package matching") <+> quotes (ppr pkg) <+>
+ ptext (sLit "was found")
NotFound { fr_paths = files, fr_pkg = mb_pkg
, fr_mods_hidden = mod_hiddens, fr_pkgs_hidden = pkg_hiddens
, fr_suggestions = suggest }
- | Just pkg <- mb_pkg, pkg /= thisPackage dflags
- -> not_found_in_package pkg files
+ | Just pkg <- mb_pkg, pkg /= thisPackage dflags
+ -> not_found_in_package pkg files
| not (null suggest)
-> pp_suggestions suggest $$ tried_these files
| null files && null mod_hiddens && null pkg_hiddens
-> ptext (sLit "It is not a module in the current program, or in any known package.")
- | otherwise
- -> vcat (map pkg_hidden pkg_hiddens) $$
+ | otherwise
+ -> vcat (map pkg_hidden pkg_hiddens) $$
vcat (map mod_hidden mod_hiddens) $$
tried_these files
tried_these files
| null files = empty
| verbosity dflags < 3 =
- ptext (sLit "Use -v to see a list of the files searched for.")
+ ptext (sLit "Use -v to see a list of the files searched for.")
| otherwise =
hang (ptext (sLit "Locations searched:")) 2 $ vcat (map text files)
-
+
pkg_hidden pkg =
ptext (sLit "It is a member of the hidden package") <+> quotes (ppr pkg)
<> dot $$ cabal_pkg_hidden_hint pkg
-- | Type environment for types declared in this module
cm_types :: !TypeEnv,
-- | Declarations
- cm_binds :: [CoreBind],
- -- | Imports
- cm_imports :: ![Module]
+ cm_binds :: [CoreBind]
}
instance Outputable CoreModule where
gutsToCoreModule :: Either (CgGuts, ModDetails) ModGuts -> CoreModule
gutsToCoreModule (Left (cg, md)) = CoreModule {
cm_module = cg_module cg, cm_types = md_types md,
- cm_imports = cg_dir_imps cg, cm_binds = cg_binds cg
+ cm_binds = cg_binds cg
}
gutsToCoreModule (Right mg) = CoreModule {
cm_module = mg_module mg, cm_types = mg_types mg,
- cm_imports = moduleEnvKeys (mg_dir_imps mg), cm_binds = mg_binds mg
+ cm_binds = mg_binds mg
}
-- %************************************************************************
--- -----------------------------------------------------------------------------\r
---\r
--- (c) The University of Glasgow, 2005\r
---\r
--- This module deals with --make\r
--- -----------------------------------------------------------------------------\r
-\r
-module GhcMake( \r
- depanal, \r
- load, LoadHowMuch(..),\r
-\r
- topSortModuleGraph, \r
-\r
- noModError, cyclicModuleErr\r
- ) where\r
-\r
-#include "HsVersions.h"\r
-\r
-#ifdef GHCI\r
-import qualified Linker ( unload )\r
-#endif\r
-\r
-import DriverPipeline\r
-import DriverPhases\r
-import GhcMonad\r
-import Module\r
-import HscTypes\r
-import ErrUtils\r
-import DynFlags\r
-import HsSyn hiding ((<.>))\r
-import Finder\r
-import HeaderInfo\r
-import TcIface ( typecheckIface )\r
-import TcRnMonad ( initIfaceCheck )\r
-import RdrName ( RdrName )\r
-\r
-import Exception ( evaluate, tryIO )\r
-import Panic\r
-import SysTools\r
-import BasicTypes\r
-import SrcLoc\r
-import Util\r
-import Digraph\r
-import Bag ( listToBag )\r
-import Maybes ( expectJust, mapCatMaybes )\r
-import StringBuffer\r
-import FastString\r
-import Outputable\r
-import UniqFM\r
-\r
-import qualified Data.Map as Map\r
-import qualified FiniteMap as Map( insertListWith)\r
-\r
-import System.Directory ( doesFileExist, getModificationTime )\r
-import System.IO ( fixIO )\r
-import System.IO.Error ( isDoesNotExistError )\r
-import System.Time ( ClockTime )\r
-import System.FilePath\r
-import Control.Monad\r
-import Data.Maybe\r
-import Data.List\r
-import qualified Data.List as List\r
-\r
--- -----------------------------------------------------------------------------\r
--- Loading the program\r
-\r
--- | Perform a dependency analysis starting from the current targets\r
--- and update the session with the new module graph.\r
---\r
--- Dependency analysis entails parsing the @import@ directives and may\r
--- therefore require running certain preprocessors.\r
---\r
--- Note that each 'ModSummary' in the module graph caches its 'DynFlags'.\r
--- These 'DynFlags' are determined by the /current/ session 'DynFlags' and the\r
--- @OPTIONS@ and @LANGUAGE@ pragmas of the parsed module. Thus if you want to\r
--- changes to the 'DynFlags' to take effect you need to call this function\r
--- again.\r
---\r
-depanal :: GhcMonad m =>\r
- [ModuleName] -- ^ excluded modules\r
- -> Bool -- ^ allow duplicate roots\r
- -> m ModuleGraph\r
-depanal excluded_mods allow_dup_roots = do\r
- hsc_env <- getSession\r
- let\r
- dflags = hsc_dflags hsc_env\r
- targets = hsc_targets hsc_env\r
- old_graph = hsc_mod_graph hsc_env\r
- \r
- liftIO $ showPass dflags "Chasing dependencies"\r
- liftIO $ debugTraceMsg dflags 2 (hcat [\r
- text "Chasing modules from: ",\r
- hcat (punctuate comma (map pprTarget targets))])\r
-\r
- mod_graph <- liftIO $ downsweep hsc_env old_graph excluded_mods allow_dup_roots\r
- modifySession $ \_ -> hsc_env { hsc_mod_graph = mod_graph }\r
- return mod_graph\r
-\r
--- | Describes which modules of the module graph need to be loaded.\r
-data LoadHowMuch\r
- = LoadAllTargets\r
- -- ^ Load all targets and its dependencies.\r
- | LoadUpTo ModuleName\r
- -- ^ Load only the given module and its dependencies.\r
- | LoadDependenciesOf ModuleName\r
- -- ^ Load only the dependencies of the given module, but not the module\r
- -- itself.\r
-\r
--- | Try to load the program. See 'LoadHowMuch' for the different modes.\r
---\r
--- This function implements the core of GHC's @--make@ mode. It preprocesses,\r
--- compiles and loads the specified modules, avoiding re-compilation wherever\r
--- possible. Depending on the target (see 'DynFlags.hscTarget') compilating\r
--- and loading may result in files being created on disk.\r
---\r
--- Calls the 'reportModuleCompilationResult' callback after each compiling\r
--- each module, whether successful or not.\r
---\r
--- Throw a 'SourceError' if errors are encountered before the actual\r
--- compilation starts (e.g., during dependency analysis). All other errors\r
--- are reported using the callback.\r
---\r
-load :: GhcMonad m => LoadHowMuch -> m SuccessFlag\r
-load how_much = do\r
- mod_graph <- depanal [] False\r
- load2 how_much mod_graph\r
-\r
-load2 :: GhcMonad m => LoadHowMuch -> [ModSummary]\r
- -> m SuccessFlag\r
-load2 how_much mod_graph = do\r
- guessOutputFile\r
- hsc_env <- getSession\r
-\r
- let hpt1 = hsc_HPT hsc_env\r
- let dflags = hsc_dflags hsc_env\r
-\r
- -- The "bad" boot modules are the ones for which we have\r
- -- B.hs-boot in the module graph, but no B.hs\r
- -- The downsweep should have ensured this does not happen\r
- -- (see msDeps)\r
- let all_home_mods = [ms_mod_name s \r
- | s <- mod_graph, not (isBootSummary s)]\r
- bad_boot_mods = [s | s <- mod_graph, isBootSummary s,\r
- not (ms_mod_name s `elem` all_home_mods)]\r
- ASSERT( null bad_boot_mods ) return ()\r
-\r
- -- check that the module given in HowMuch actually exists, otherwise\r
- -- topSortModuleGraph will bomb later.\r
- let checkHowMuch (LoadUpTo m) = checkMod m\r
- checkHowMuch (LoadDependenciesOf m) = checkMod m\r
- checkHowMuch _ = id\r
-\r
- checkMod m and_then\r
- | m `elem` all_home_mods = and_then\r
- | otherwise = do \r
- liftIO $ errorMsg dflags (text "no such module:" <+>\r
- quotes (ppr m))\r
- return Failed\r
-\r
- checkHowMuch how_much $ do\r
-\r
- -- mg2_with_srcimps drops the hi-boot nodes, returning a \r
- -- graph with cycles. Among other things, it is used for\r
- -- backing out partially complete cycles following a failed\r
- -- upsweep, and for removing from hpt all the modules\r
- -- not in strict downwards closure, during calls to compile.\r
- let mg2_with_srcimps :: [SCC ModSummary]\r
- mg2_with_srcimps = topSortModuleGraph True mod_graph Nothing\r
-\r
- -- If we can determine that any of the {-# SOURCE #-} imports\r
- -- are definitely unnecessary, then emit a warning.\r
- warnUnnecessarySourceImports mg2_with_srcimps\r
-\r
- let\r
- -- check the stability property for each module.\r
- stable_mods@(stable_obj,stable_bco)\r
- = checkStability hpt1 mg2_with_srcimps all_home_mods\r
-\r
- -- prune bits of the HPT which are definitely redundant now,\r
- -- to save space.\r
- pruned_hpt = pruneHomePackageTable hpt1 \r
- (flattenSCCs mg2_with_srcimps)\r
- stable_mods\r
-\r
- _ <- liftIO $ evaluate pruned_hpt\r
-\r
- -- before we unload anything, make sure we don't leave an old\r
- -- interactive context around pointing to dead bindings. Also,\r
- -- write the pruned HPT to allow the old HPT to be GC'd.\r
- modifySession $ \_ -> hsc_env{ hsc_IC = emptyInteractiveContext,\r
- hsc_HPT = pruned_hpt }\r
-\r
- liftIO $ debugTraceMsg dflags 2 (text "Stable obj:" <+> ppr stable_obj $$\r
- text "Stable BCO:" <+> ppr stable_bco)\r
-\r
- -- Unload any modules which are going to be re-linked this time around.\r
- let stable_linkables = [ linkable\r
- | m <- stable_obj++stable_bco,\r
- Just hmi <- [lookupUFM pruned_hpt m],\r
- Just linkable <- [hm_linkable hmi] ]\r
- liftIO $ unload hsc_env stable_linkables\r
-\r
- -- We could at this point detect cycles which aren't broken by\r
- -- a source-import, and complain immediately, but it seems better\r
- -- to let upsweep_mods do this, so at least some useful work gets\r
- -- done before the upsweep is abandoned.\r
- --hPutStrLn stderr "after tsort:\n"\r
- --hPutStrLn stderr (showSDoc (vcat (map ppr mg2)))\r
-\r
- -- Now do the upsweep, calling compile for each module in\r
- -- turn. Final result is version 3 of everything.\r
-\r
- -- Topologically sort the module graph, this time including hi-boot\r
- -- nodes, and possibly just including the portion of the graph\r
- -- reachable from the module specified in the 2nd argument to load.\r
- -- This graph should be cycle-free.\r
- -- If we're restricting the upsweep to a portion of the graph, we\r
- -- also want to retain everything that is still stable.\r
- let full_mg :: [SCC ModSummary]\r
- full_mg = topSortModuleGraph False mod_graph Nothing\r
-\r
- maybe_top_mod = case how_much of\r
- LoadUpTo m -> Just m\r
- LoadDependenciesOf m -> Just m\r
- _ -> Nothing\r
-\r
- partial_mg0 :: [SCC ModSummary]\r
- partial_mg0 = topSortModuleGraph False mod_graph maybe_top_mod\r
-\r
- -- LoadDependenciesOf m: we want the upsweep to stop just\r
- -- short of the specified module (unless the specified module\r
- -- is stable).\r
- partial_mg\r
- | LoadDependenciesOf _mod <- how_much\r
- = ASSERT( case last partial_mg0 of \r
- AcyclicSCC ms -> ms_mod_name ms == _mod; _ -> False )\r
- List.init partial_mg0\r
- | otherwise\r
- = partial_mg0\r
- \r
- stable_mg = \r
- [ AcyclicSCC ms\r
- | AcyclicSCC ms <- full_mg,\r
- ms_mod_name ms `elem` stable_obj++stable_bco,\r
- ms_mod_name ms `notElem` [ ms_mod_name ms' | \r
- AcyclicSCC ms' <- partial_mg ] ]\r
-\r
- mg = stable_mg ++ partial_mg\r
-\r
- -- clean up between compilations\r
- let cleanup = cleanTempFilesExcept dflags\r
- (ppFilesFromSummaries (flattenSCCs mg2_with_srcimps))\r
-\r
- liftIO $ debugTraceMsg dflags 2 (hang (text "Ready for upsweep")\r
- 2 (ppr mg))\r
-\r
- setSession hsc_env{ hsc_HPT = emptyHomePackageTable }\r
- (upsweep_ok, modsUpswept)\r
- <- upsweep pruned_hpt stable_mods cleanup mg\r
-\r
- -- Make modsDone be the summaries for each home module now\r
- -- available; this should equal the domain of hpt3.\r
- -- Get in in a roughly top .. bottom order (hence reverse).\r
-\r
- let modsDone = reverse modsUpswept\r
-\r
- -- Try and do linking in some form, depending on whether the\r
- -- upsweep was completely or only partially successful.\r
-\r
- if succeeded upsweep_ok\r
-\r
- then \r
- -- Easy; just relink it all.\r
- do liftIO $ debugTraceMsg dflags 2 (text "Upsweep completely successful.")\r
-\r
- -- Clean up after ourselves\r
- liftIO $ cleanTempFilesExcept dflags (ppFilesFromSummaries modsDone)\r
-\r
- -- Issue a warning for the confusing case where the user\r
- -- said '-o foo' but we're not going to do any linking.\r
- -- We attempt linking if either (a) one of the modules is\r
- -- called Main, or (b) the user said -no-hs-main, indicating\r
- -- that main() is going to come from somewhere else.\r
- --\r
- let ofile = outputFile dflags\r
- let no_hs_main = dopt Opt_NoHsMain dflags\r
- let \r
- main_mod = mainModIs dflags\r
- a_root_is_Main = any ((==main_mod).ms_mod) mod_graph\r
- do_linking = a_root_is_Main || no_hs_main || ghcLink dflags == LinkDynLib\r
-\r
- when (ghcLink dflags == LinkBinary \r
- && isJust ofile && not do_linking) $\r
- liftIO $ debugTraceMsg dflags 1 $\r
- text ("Warning: output was redirected with -o, " ++\r
- "but no output will be generated\n" ++\r
- "because there is no " ++ \r
- moduleNameString (moduleName main_mod) ++ " module.")\r
-\r
- -- link everything together\r
- hsc_env1 <- getSession\r
- linkresult <- liftIO $ link (ghcLink dflags) dflags do_linking (hsc_HPT hsc_env1)\r
-\r
- loadFinish Succeeded linkresult\r
-\r
- else \r
- -- Tricky. We need to back out the effects of compiling any\r
- -- half-done cycles, both so as to clean up the top level envs\r
- -- and to avoid telling the interactive linker to link them.\r
- do liftIO $ debugTraceMsg dflags 2 (text "Upsweep partially successful.")\r
-\r
- let modsDone_names\r
- = map ms_mod modsDone\r
- let mods_to_zap_names \r
- = findPartiallyCompletedCycles modsDone_names \r
- mg2_with_srcimps\r
- let mods_to_keep\r
- = filter ((`notElem` mods_to_zap_names).ms_mod) \r
- modsDone\r
-\r
- hsc_env1 <- getSession\r
- let hpt4 = retainInTopLevelEnvs (map ms_mod_name mods_to_keep) \r
- (hsc_HPT hsc_env1)\r
-\r
- -- Clean up after ourselves\r
- liftIO $ cleanTempFilesExcept dflags (ppFilesFromSummaries mods_to_keep)\r
-\r
- -- there should be no Nothings where linkables should be, now\r
- ASSERT(all (isJust.hm_linkable) \r
- (eltsUFM (hsc_HPT hsc_env))) do\r
- \r
- -- Link everything together\r
- linkresult <- liftIO $ link (ghcLink dflags) dflags False hpt4\r
-\r
- modifySession $ \hsc_env -> hsc_env{ hsc_HPT = hpt4 }\r
- loadFinish Failed linkresult\r
-\r
--- Finish up after a load.\r
-\r
--- If the link failed, unload everything and return.\r
-loadFinish :: GhcMonad m =>\r
- SuccessFlag -> SuccessFlag\r
- -> m SuccessFlag\r
-loadFinish _all_ok Failed\r
- = do hsc_env <- getSession\r
- liftIO $ unload hsc_env []\r
- modifySession discardProg\r
- return Failed\r
-\r
--- Empty the interactive context and set the module context to the topmost\r
--- newly loaded module, or the Prelude if none were loaded.\r
-loadFinish all_ok Succeeded\r
- = do modifySession $ \hsc_env -> hsc_env{ hsc_IC = emptyInteractiveContext }\r
- return all_ok\r
-\r
-\r
--- Forget the current program, but retain the persistent info in HscEnv\r
-discardProg :: HscEnv -> HscEnv\r
-discardProg hsc_env\r
- = hsc_env { hsc_mod_graph = emptyMG, \r
- hsc_IC = emptyInteractiveContext,\r
- hsc_HPT = emptyHomePackageTable }\r
-\r
--- used to fish out the preprocess output files for the purposes of\r
--- cleaning up. The preprocessed file *might* be the same as the\r
--- source file, but that doesn't do any harm.\r
-ppFilesFromSummaries :: [ModSummary] -> [FilePath]\r
-ppFilesFromSummaries summaries = map ms_hspp_file summaries\r
-\r
--- | If there is no -o option, guess the name of target executable\r
--- by using top-level source file name as a base.\r
-guessOutputFile :: GhcMonad m => m ()\r
-guessOutputFile = modifySession $ \env ->\r
- let dflags = hsc_dflags env\r
- mod_graph = hsc_mod_graph env\r
- mainModuleSrcPath :: Maybe String\r
- mainModuleSrcPath = do\r
- let isMain = (== mainModIs dflags) . ms_mod\r
- [ms] <- return (filter isMain mod_graph)\r
- ml_hs_file (ms_location ms)\r
- name = fmap dropExtension mainModuleSrcPath\r
-\r
-#if defined(mingw32_HOST_OS)\r
- -- we must add the .exe extention unconditionally here, otherwise\r
- -- when name has an extension of its own, the .exe extension will\r
- -- not be added by DriverPipeline.exeFileName. See #2248\r
- name_exe = fmap (<.> "exe") name\r
-#else\r
- name_exe = name\r
-#endif\r
- in\r
- case outputFile dflags of\r
- Just _ -> env\r
- Nothing -> env { hsc_dflags = dflags { outputFile = name_exe } }\r
-\r
--- -----------------------------------------------------------------------------\r
-\r
--- | Prune the HomePackageTable\r
---\r
--- Before doing an upsweep, we can throw away:\r
---\r
--- - For non-stable modules:\r
--- - all ModDetails, all linked code\r
--- - all unlinked code that is out of date with respect to\r
--- the source file\r
---\r
--- This is VERY IMPORTANT otherwise we'll end up requiring 2x the\r
--- space at the end of the upsweep, because the topmost ModDetails of the\r
--- old HPT holds on to the entire type environment from the previous\r
--- compilation.\r
-\r
-pruneHomePackageTable\r
- :: HomePackageTable\r
- -> [ModSummary]\r
- -> ([ModuleName],[ModuleName])\r
- -> HomePackageTable\r
-\r
-pruneHomePackageTable hpt summ (stable_obj, stable_bco)\r
- = mapUFM prune hpt\r
- where prune hmi\r
- | is_stable modl = hmi'\r
- | otherwise = hmi'{ hm_details = emptyModDetails }\r
- where\r
- modl = moduleName (mi_module (hm_iface hmi))\r
- hmi' | Just l <- hm_linkable hmi, linkableTime l < ms_hs_date ms\r
- = hmi{ hm_linkable = Nothing }\r
- | otherwise\r
- = hmi\r
- where ms = expectJust "prune" (lookupUFM ms_map modl)\r
-\r
- ms_map = listToUFM [(ms_mod_name ms, ms) | ms <- summ]\r
-\r
- is_stable m = m `elem` stable_obj || m `elem` stable_bco\r
-\r
--- -----------------------------------------------------------------------------\r
-\r
--- Return (names of) all those in modsDone who are part of a cycle\r
--- as defined by theGraph.\r
-findPartiallyCompletedCycles :: [Module] -> [SCC ModSummary] -> [Module]\r
-findPartiallyCompletedCycles modsDone theGraph\r
- = chew theGraph\r
- where\r
- chew [] = []\r
- chew ((AcyclicSCC _):rest) = chew rest -- acyclic? not interesting.\r
- chew ((CyclicSCC vs):rest)\r
- = let names_in_this_cycle = nub (map ms_mod vs)\r
- mods_in_this_cycle \r
- = nub ([done | done <- modsDone, \r
- done `elem` names_in_this_cycle])\r
- chewed_rest = chew rest\r
- in \r
- if notNull mods_in_this_cycle\r
- && length mods_in_this_cycle < length names_in_this_cycle\r
- then mods_in_this_cycle ++ chewed_rest\r
- else chewed_rest\r
-\r
-\r
--- ---------------------------------------------------------------------------\r
--- Unloading\r
-\r
-unload :: HscEnv -> [Linkable] -> IO ()\r
-unload hsc_env stable_linkables -- Unload everthing *except* 'stable_linkables'\r
- = case ghcLink (hsc_dflags hsc_env) of\r
-#ifdef GHCI\r
- LinkInMemory -> Linker.unload (hsc_dflags hsc_env) stable_linkables\r
-#else\r
- LinkInMemory -> panic "unload: no interpreter"\r
- -- urgh. avoid warnings:\r
- hsc_env stable_linkables\r
-#endif\r
- _other -> return ()\r
-\r
--- -----------------------------------------------------------------------------\r
-\r
-{- |\r
-\r
- Stability tells us which modules definitely do not need to be recompiled.\r
- There are two main reasons for having stability:\r
- \r
- - avoid doing a complete upsweep of the module graph in GHCi when\r
- modules near the bottom of the tree have not changed.\r
-\r
- - to tell GHCi when it can load object code: we can only load object code\r
- for a module when we also load object code fo all of the imports of the\r
- module. So we need to know that we will definitely not be recompiling\r
- any of these modules, and we can use the object code.\r
-\r
- The stability check is as follows. Both stableObject and\r
- stableBCO are used during the upsweep phase later.\r
-\r
-@\r
- stable m = stableObject m || stableBCO m\r
-\r
- stableObject m = \r
- all stableObject (imports m)\r
- && old linkable does not exist, or is == on-disk .o\r
- && date(on-disk .o) > date(.hs)\r
-\r
- stableBCO m =\r
- all stable (imports m)\r
- && date(BCO) > date(.hs)\r
-@\r
-\r
- These properties embody the following ideas:\r
-\r
- - if a module is stable, then:\r
-\r
- - if it has been compiled in a previous pass (present in HPT)\r
- then it does not need to be compiled or re-linked.\r
-\r
- - if it has not been compiled in a previous pass,\r
- then we only need to read its .hi file from disk and\r
- link it to produce a 'ModDetails'.\r
-\r
- - if a modules is not stable, we will definitely be at least\r
- re-linking, and possibly re-compiling it during the 'upsweep'.\r
- All non-stable modules can (and should) therefore be unlinked\r
- before the 'upsweep'.\r
-\r
- - Note that objects are only considered stable if they only depend\r
- on other objects. We can't link object code against byte code.\r
--}\r
-\r
-checkStability\r
- :: HomePackageTable -- HPT from last compilation\r
- -> [SCC ModSummary] -- current module graph (cyclic)\r
- -> [ModuleName] -- all home modules\r
- -> ([ModuleName], -- stableObject\r
- [ModuleName]) -- stableBCO\r
-\r
-checkStability hpt sccs all_home_mods = foldl checkSCC ([],[]) sccs\r
- where\r
- checkSCC (stable_obj, stable_bco) scc0\r
- | stableObjects = (scc_mods ++ stable_obj, stable_bco)\r
- | stableBCOs = (stable_obj, scc_mods ++ stable_bco)\r
- | otherwise = (stable_obj, stable_bco)\r
- where\r
- scc = flattenSCC scc0\r
- scc_mods = map ms_mod_name scc\r
- home_module m = m `elem` all_home_mods && m `notElem` scc_mods\r
-\r
- scc_allimps = nub (filter home_module (concatMap ms_home_allimps scc))\r
- -- all imports outside the current SCC, but in the home pkg\r
- \r
- stable_obj_imps = map (`elem` stable_obj) scc_allimps\r
- stable_bco_imps = map (`elem` stable_bco) scc_allimps\r
-\r
- stableObjects = \r
- and stable_obj_imps\r
- && all object_ok scc\r
-\r
- stableBCOs = \r
- and (zipWith (||) stable_obj_imps stable_bco_imps)\r
- && all bco_ok scc\r
-\r
- object_ok ms\r
- | Just t <- ms_obj_date ms = t >= ms_hs_date ms \r
- && same_as_prev t\r
- | otherwise = False\r
- where\r
- same_as_prev t = case lookupUFM hpt (ms_mod_name ms) of\r
- Just hmi | Just l <- hm_linkable hmi\r
- -> isObjectLinkable l && t == linkableTime l\r
- _other -> True\r
- -- why '>=' rather than '>' above? If the filesystem stores\r
- -- times to the nearset second, we may occasionally find that\r
- -- the object & source have the same modification time, \r
- -- especially if the source was automatically generated\r
- -- and compiled. Using >= is slightly unsafe, but it matches\r
- -- make's behaviour.\r
-\r
- bco_ok ms\r
- = case lookupUFM hpt (ms_mod_name ms) of\r
- Just hmi | Just l <- hm_linkable hmi ->\r
- not (isObjectLinkable l) && \r
- linkableTime l >= ms_hs_date ms\r
- _other -> False\r
-\r
--- -----------------------------------------------------------------------------\r
-\r
--- | The upsweep\r
---\r
--- This is where we compile each module in the module graph, in a pass\r
--- from the bottom to the top of the graph.\r
---\r
--- There better had not be any cyclic groups here -- we check for them.\r
-\r
-upsweep\r
- :: GhcMonad m\r
- => HomePackageTable -- ^ HPT from last time round (pruned)\r
- -> ([ModuleName],[ModuleName]) -- ^ stable modules (see checkStability)\r
- -> IO () -- ^ How to clean up unwanted tmp files\r
- -> [SCC ModSummary] -- ^ Mods to do (the worklist)\r
- -> m (SuccessFlag,\r
- [ModSummary])\r
- -- ^ Returns:\r
- --\r
- -- 1. A flag whether the complete upsweep was successful.\r
- -- 2. The 'HscEnv' in the monad has an updated HPT\r
- -- 3. A list of modules which succeeded loading.\r
-\r
-upsweep old_hpt stable_mods cleanup sccs = do\r
- (res, done) <- upsweep' old_hpt [] sccs 1 (length sccs)\r
- return (res, reverse done)\r
- where\r
-\r
- upsweep' _old_hpt done\r
- [] _ _\r
- = return (Succeeded, done)\r
-\r
- upsweep' _old_hpt done\r
- (CyclicSCC ms:_) _ _\r
- = do dflags <- getSessionDynFlags\r
- liftIO $ fatalErrorMsg dflags (cyclicModuleErr ms)\r
- return (Failed, done)\r
-\r
- upsweep' old_hpt done\r
- (AcyclicSCC mod:mods) mod_index nmods\r
- = do -- putStrLn ("UPSWEEP_MOD: hpt = " ++ \r
- -- show (map (moduleUserString.moduleName.mi_module.hm_iface) \r
- -- (moduleEnvElts (hsc_HPT hsc_env)))\r
- let logger _mod = defaultWarnErrLogger\r
-\r
- hsc_env <- getSession\r
- mb_mod_info\r
- <- handleSourceError\r
- (\err -> do logger mod (Just err); return Nothing) $ do\r
- mod_info <- liftIO $ upsweep_mod hsc_env old_hpt stable_mods\r
- mod mod_index nmods\r
- logger mod Nothing -- log warnings\r
- return (Just mod_info)\r
-\r
- liftIO cleanup -- Remove unwanted tmp files between compilations\r
-\r
- case mb_mod_info of\r
- Nothing -> return (Failed, done)\r
- Just mod_info -> do\r
- let this_mod = ms_mod_name mod\r
-\r
- -- Add new info to hsc_env\r
- hpt1 = addToUFM (hsc_HPT hsc_env) this_mod mod_info\r
- hsc_env1 = hsc_env { hsc_HPT = hpt1 }\r
-\r
- -- Space-saving: delete the old HPT entry\r
- -- for mod BUT if mod is a hs-boot\r
- -- node, don't delete it. For the\r
- -- interface, the HPT entry is probaby for the\r
- -- main Haskell source file. Deleting it\r
- -- would force the real module to be recompiled\r
- -- every time.\r
- old_hpt1 | isBootSummary mod = old_hpt\r
- | otherwise = delFromUFM old_hpt this_mod\r
-\r
- done' = mod:done\r
-\r
- -- fixup our HomePackageTable after we've finished compiling\r
- -- a mutually-recursive loop. See reTypecheckLoop, below.\r
- hsc_env2 <- liftIO $ reTypecheckLoop hsc_env1 mod done'\r
- setSession hsc_env2\r
-\r
- upsweep' old_hpt1 done' mods (mod_index+1) nmods\r
-\r
--- | Compile a single module. Always produce a Linkable for it if\r
--- successful. If no compilation happened, return the old Linkable.\r
-upsweep_mod :: HscEnv\r
- -> HomePackageTable\r
- -> ([ModuleName],[ModuleName])\r
- -> ModSummary\r
- -> Int -- index of module\r
- -> Int -- total number of modules\r
- -> IO HomeModInfo\r
-\r
-upsweep_mod hsc_env old_hpt (stable_obj, stable_bco) summary mod_index nmods\r
- = let \r
- this_mod_name = ms_mod_name summary\r
- this_mod = ms_mod summary\r
- mb_obj_date = ms_obj_date summary\r
- obj_fn = ml_obj_file (ms_location summary)\r
- hs_date = ms_hs_date summary\r
-\r
- is_stable_obj = this_mod_name `elem` stable_obj\r
- is_stable_bco = this_mod_name `elem` stable_bco\r
-\r
- old_hmi = lookupUFM old_hpt this_mod_name\r
-\r
- -- We're using the dflags for this module now, obtained by\r
- -- applying any options in its LANGUAGE & OPTIONS_GHC pragmas.\r
- dflags = ms_hspp_opts summary\r
- prevailing_target = hscTarget (hsc_dflags hsc_env)\r
- local_target = hscTarget dflags\r
-\r
- -- If OPTIONS_GHC contains -fasm or -fvia-C, be careful that\r
- -- we don't do anything dodgy: these should only work to change\r
- -- from -fvia-C to -fasm and vice-versa, otherwise we could \r
- -- end up trying to link object code to byte code.\r
- target = if prevailing_target /= local_target\r
- && (not (isObjectTarget prevailing_target)\r
- || not (isObjectTarget local_target))\r
- then prevailing_target\r
- else local_target \r
-\r
- -- store the corrected hscTarget into the summary\r
- summary' = summary{ ms_hspp_opts = dflags { hscTarget = target } }\r
-\r
- -- The old interface is ok if\r
- -- a) we're compiling a source file, and the old HPT\r
- -- entry is for a source file\r
- -- b) we're compiling a hs-boot file\r
- -- Case (b) allows an hs-boot file to get the interface of its\r
- -- real source file on the second iteration of the compilation\r
- -- manager, but that does no harm. Otherwise the hs-boot file\r
- -- will always be recompiled\r
- \r
- mb_old_iface \r
- = case old_hmi of\r
- Nothing -> Nothing\r
- Just hm_info | isBootSummary summary -> Just iface\r
- | not (mi_boot iface) -> Just iface\r
- | otherwise -> Nothing\r
- where \r
- iface = hm_iface hm_info\r
-\r
- compile_it :: Maybe Linkable -> IO HomeModInfo\r
- compile_it mb_linkable = \r
- compile hsc_env summary' mod_index nmods \r
- mb_old_iface mb_linkable\r
-\r
- compile_it_discard_iface :: Maybe Linkable -> IO HomeModInfo\r
- compile_it_discard_iface mb_linkable =\r
- compile hsc_env summary' mod_index nmods\r
- Nothing mb_linkable\r
-\r
- -- With the HscNothing target we create empty linkables to avoid\r
- -- recompilation. We have to detect these to recompile anyway if\r
- -- the target changed since the last compile.\r
- is_fake_linkable\r
- | Just hmi <- old_hmi, Just l <- hm_linkable hmi =\r
- null (linkableUnlinked l)\r
- | otherwise =\r
- -- we have no linkable, so it cannot be fake\r
- False\r
-\r
- implies False _ = True\r
- implies True x = x\r
-\r
- in\r
- case () of\r
- _\r
- -- Regardless of whether we're generating object code or\r
- -- byte code, we can always use an existing object file\r
- -- if it is *stable* (see checkStability).\r
- | is_stable_obj, Just hmi <- old_hmi -> do\r
- liftIO $ debugTraceMsg (hsc_dflags hsc_env) 5\r
- (text "skipping stable obj mod:" <+> ppr this_mod_name)\r
- return hmi\r
- -- object is stable, and we have an entry in the\r
- -- old HPT: nothing to do\r
-\r
- | is_stable_obj, isNothing old_hmi -> do\r
- liftIO $ debugTraceMsg (hsc_dflags hsc_env) 5\r
- (text "compiling stable on-disk mod:" <+> ppr this_mod_name)\r
- linkable <- liftIO $ findObjectLinkable this_mod obj_fn\r
- (expectJust "upsweep1" mb_obj_date)\r
- compile_it (Just linkable)\r
- -- object is stable, but we need to load the interface\r
- -- off disk to make a HMI.\r
-\r
- | not (isObjectTarget target), is_stable_bco,\r
- (target /= HscNothing) `implies` not is_fake_linkable ->\r
- ASSERT(isJust old_hmi) -- must be in the old_hpt\r
- let Just hmi = old_hmi in do\r
- liftIO $ debugTraceMsg (hsc_dflags hsc_env) 5\r
- (text "skipping stable BCO mod:" <+> ppr this_mod_name)\r
- return hmi\r
- -- BCO is stable: nothing to do\r
-\r
- | not (isObjectTarget target),\r
- Just hmi <- old_hmi,\r
- Just l <- hm_linkable hmi,\r
- not (isObjectLinkable l),\r
- (target /= HscNothing) `implies` not is_fake_linkable,\r
- linkableTime l >= ms_hs_date summary -> do\r
- liftIO $ debugTraceMsg (hsc_dflags hsc_env) 5\r
- (text "compiling non-stable BCO mod:" <+> ppr this_mod_name)\r
- compile_it (Just l)\r
- -- we have an old BCO that is up to date with respect\r
- -- to the source: do a recompilation check as normal.\r
-\r
- -- When generating object code, if there's an up-to-date\r
- -- object file on the disk, then we can use it.\r
- -- However, if the object file is new (compared to any\r
- -- linkable we had from a previous compilation), then we\r
- -- must discard any in-memory interface, because this\r
- -- means the user has compiled the source file\r
- -- separately and generated a new interface, that we must\r
- -- read from the disk.\r
- --\r
- | isObjectTarget target,\r
- Just obj_date <- mb_obj_date,\r
- obj_date >= hs_date -> do\r
- case old_hmi of\r
- Just hmi\r
- | Just l <- hm_linkable hmi,\r
- isObjectLinkable l && linkableTime l == obj_date -> do\r
- liftIO $ debugTraceMsg (hsc_dflags hsc_env) 5\r
- (text "compiling mod with new on-disk obj:" <+> ppr this_mod_name)\r
- compile_it (Just l)\r
- _otherwise -> do\r
- liftIO $ debugTraceMsg (hsc_dflags hsc_env) 5\r
- (text "compiling mod with new on-disk obj2:" <+> ppr this_mod_name)\r
- linkable <- liftIO $ findObjectLinkable this_mod obj_fn obj_date\r
- compile_it_discard_iface (Just linkable)\r
-\r
- _otherwise -> do\r
- liftIO $ debugTraceMsg (hsc_dflags hsc_env) 5\r
- (text "compiling mod:" <+> ppr this_mod_name)\r
- compile_it Nothing\r
-\r
-\r
-\r
--- Filter modules in the HPT\r
-retainInTopLevelEnvs :: [ModuleName] -> HomePackageTable -> HomePackageTable\r
-retainInTopLevelEnvs keep_these hpt\r
- = listToUFM [ (mod, expectJust "retain" mb_mod_info)\r
- | mod <- keep_these\r
- , let mb_mod_info = lookupUFM hpt mod\r
- , isJust mb_mod_info ]\r
-\r
--- ---------------------------------------------------------------------------\r
--- Typecheck module loops\r
-\r
-{-\r
-See bug #930. This code fixes a long-standing bug in --make. The\r
-problem is that when compiling the modules *inside* a loop, a data\r
-type that is only defined at the top of the loop looks opaque; but\r
-after the loop is done, the structure of the data type becomes\r
-apparent.\r
-\r
-The difficulty is then that two different bits of code have\r
-different notions of what the data type looks like.\r
-\r
-The idea is that after we compile a module which also has an .hs-boot\r
-file, we re-generate the ModDetails for each of the modules that\r
-depends on the .hs-boot file, so that everyone points to the proper\r
-TyCons, Ids etc. defined by the real module, not the boot module.\r
-Fortunately re-generating a ModDetails from a ModIface is easy: the\r
-function TcIface.typecheckIface does exactly that.\r
-\r
-Picking the modules to re-typecheck is slightly tricky. Starting from\r
-the module graph consisting of the modules that have already been\r
-compiled, we reverse the edges (so they point from the imported module\r
-to the importing module), and depth-first-search from the .hs-boot\r
-node. This gives us all the modules that depend transitively on the\r
-.hs-boot module, and those are exactly the modules that we need to\r
-re-typecheck.\r
-\r
-Following this fix, GHC can compile itself with --make -O2.\r
--}\r
-\r
-reTypecheckLoop :: HscEnv -> ModSummary -> ModuleGraph -> IO HscEnv\r
-reTypecheckLoop hsc_env ms graph\r
- | not (isBootSummary ms) && \r
- any (\m -> ms_mod m == this_mod && isBootSummary m) graph\r
- = do\r
- let mss = reachableBackwards (ms_mod_name ms) graph\r
- non_boot = filter (not.isBootSummary) mss\r
- debugTraceMsg (hsc_dflags hsc_env) 2 $\r
- text "Re-typechecking loop: " <> ppr (map ms_mod_name non_boot)\r
- typecheckLoop hsc_env (map ms_mod_name non_boot)\r
- | otherwise\r
- = return hsc_env\r
- where\r
- this_mod = ms_mod ms\r
-\r
-typecheckLoop :: HscEnv -> [ModuleName] -> IO HscEnv\r
-typecheckLoop hsc_env mods = do\r
- new_hpt <-\r
- fixIO $ \new_hpt -> do\r
- let new_hsc_env = hsc_env{ hsc_HPT = new_hpt }\r
- mds <- initIfaceCheck new_hsc_env $ \r
- mapM (typecheckIface . hm_iface) hmis\r
- let new_hpt = addListToUFM old_hpt \r
- (zip mods [ hmi{ hm_details = details }\r
- | (hmi,details) <- zip hmis mds ])\r
- return new_hpt\r
- return hsc_env{ hsc_HPT = new_hpt }\r
- where\r
- old_hpt = hsc_HPT hsc_env\r
- hmis = map (expectJust "typecheckLoop" . lookupUFM old_hpt) mods\r
-\r
-reachableBackwards :: ModuleName -> [ModSummary] -> [ModSummary]\r
-reachableBackwards mod summaries\r
- = [ ms | (ms,_,_) <- reachableG (transposeG graph) root ]\r
- where -- the rest just sets up the graph:\r
- (graph, lookup_node) = moduleGraphNodes False summaries\r
- root = expectJust "reachableBackwards" (lookup_node HsBootFile mod)\r
-\r
--- ---------------------------------------------------------------------------\r
--- Topological sort of the module graph\r
-\r
-type SummaryNode = (ModSummary, Int, [Int])\r
-\r
-topSortModuleGraph\r
- :: Bool\r
- -- ^ Drop hi-boot nodes? (see below)\r
- -> [ModSummary]\r
- -> Maybe ModuleName\r
- -- ^ Root module name. If @Nothing@, use the full graph.\r
- -> [SCC ModSummary]\r
--- ^ Calculate SCCs of the module graph, possibly dropping the hi-boot nodes\r
--- The resulting list of strongly-connected-components is in topologically\r
--- sorted order, starting with the module(s) at the bottom of the\r
--- dependency graph (ie compile them first) and ending with the ones at\r
--- the top.\r
---\r
--- Drop hi-boot nodes (first boolean arg)? \r
---\r
--- - @False@: treat the hi-boot summaries as nodes of the graph,\r
--- so the graph must be acyclic\r
---\r
--- - @True@: eliminate the hi-boot nodes, and instead pretend\r
--- the a source-import of Foo is an import of Foo\r
--- The resulting graph has no hi-boot nodes, but can be cyclic\r
-\r
-topSortModuleGraph drop_hs_boot_nodes summaries mb_root_mod\r
- = map (fmap summaryNodeSummary) $ stronglyConnCompG initial_graph\r
- where\r
- (graph, lookup_node) = moduleGraphNodes drop_hs_boot_nodes summaries\r
- \r
- initial_graph = case mb_root_mod of\r
- Nothing -> graph\r
- Just root_mod ->\r
- -- restrict the graph to just those modules reachable from\r
- -- the specified module. We do this by building a graph with\r
- -- the full set of nodes, and determining the reachable set from\r
- -- the specified node.\r
- let root | Just node <- lookup_node HsSrcFile root_mod, graph `hasVertexG` node = node\r
- | otherwise = ghcError (ProgramError "module does not exist")\r
- in graphFromEdgedVertices (seq root (reachableG graph root))\r
-\r
-summaryNodeKey :: SummaryNode -> Int\r
-summaryNodeKey (_, k, _) = k\r
-\r
-summaryNodeSummary :: SummaryNode -> ModSummary\r
-summaryNodeSummary (s, _, _) = s\r
-\r
-moduleGraphNodes :: Bool -> [ModSummary]\r
- -> (Graph SummaryNode, HscSource -> ModuleName -> Maybe SummaryNode)\r
-moduleGraphNodes drop_hs_boot_nodes summaries = (graphFromEdgedVertices nodes, lookup_node)\r
- where\r
- numbered_summaries = zip summaries [1..]\r
-\r
- lookup_node :: HscSource -> ModuleName -> Maybe SummaryNode\r
- lookup_node hs_src mod = Map.lookup (mod, hs_src) node_map\r
-\r
- lookup_key :: HscSource -> ModuleName -> Maybe Int\r
- lookup_key hs_src mod = fmap summaryNodeKey (lookup_node hs_src mod)\r
-\r
- node_map :: NodeMap SummaryNode\r
- node_map = Map.fromList [ ((moduleName (ms_mod s), ms_hsc_src s), node)\r
- | node@(s, _, _) <- nodes ]\r
-\r
- -- We use integers as the keys for the SCC algorithm\r
- nodes :: [SummaryNode]\r
- nodes = [ (s, key, out_keys)\r
- | (s, key) <- numbered_summaries\r
- -- Drop the hi-boot ones if told to do so\r
- , not (isBootSummary s && drop_hs_boot_nodes)\r
- , let out_keys = out_edge_keys hs_boot_key (map unLoc (ms_home_srcimps s)) ++\r
- out_edge_keys HsSrcFile (map unLoc (ms_home_imps s)) ++\r
- (-- see [boot-edges] below\r
- if drop_hs_boot_nodes || ms_hsc_src s == HsBootFile \r
- then [] \r
- else case lookup_key HsBootFile (ms_mod_name s) of\r
- Nothing -> []\r
- Just k -> [k]) ]\r
-\r
- -- [boot-edges] if this is a .hs and there is an equivalent\r
- -- .hs-boot, add a link from the former to the latter. This\r
- -- has the effect of detecting bogus cases where the .hs-boot\r
- -- depends on the .hs, by introducing a cycle. Additionally,\r
- -- it ensures that we will always process the .hs-boot before\r
- -- the .hs, and so the HomePackageTable will always have the\r
- -- most up to date information.\r
-\r
- -- Drop hs-boot nodes by using HsSrcFile as the key\r
- hs_boot_key | drop_hs_boot_nodes = HsSrcFile\r
- | otherwise = HsBootFile\r
-\r
- out_edge_keys :: HscSource -> [ModuleName] -> [Int]\r
- out_edge_keys hi_boot ms = mapCatMaybes (lookup_key hi_boot) ms\r
- -- If we want keep_hi_boot_nodes, then we do lookup_key with\r
- -- the IsBootInterface parameter True; else False\r
-\r
-\r
-type NodeKey = (ModuleName, HscSource) -- The nodes of the graph are \r
-type NodeMap a = Map.Map NodeKey a -- keyed by (mod, src_file_type) pairs\r
-\r
-msKey :: ModSummary -> NodeKey\r
-msKey (ModSummary { ms_mod = mod, ms_hsc_src = boot }) = (moduleName mod,boot)\r
-\r
-mkNodeMap :: [ModSummary] -> NodeMap ModSummary\r
-mkNodeMap summaries = Map.fromList [ (msKey s, s) | s <- summaries]\r
- \r
-nodeMapElts :: NodeMap a -> [a]\r
-nodeMapElts = Map.elems\r
-\r
--- | If there are {-# SOURCE #-} imports between strongly connected\r
--- components in the topological sort, then those imports can\r
--- definitely be replaced by ordinary non-SOURCE imports: if SOURCE\r
--- were necessary, then the edge would be part of a cycle.\r
-warnUnnecessarySourceImports :: GhcMonad m => [SCC ModSummary] -> m ()\r
-warnUnnecessarySourceImports sccs = do\r
- logWarnings (listToBag (concatMap (check.flattenSCC) sccs))\r
- where check ms =\r
- let mods_in_this_cycle = map ms_mod_name ms in\r
- [ warn i | m <- ms, i <- ms_home_srcimps m,\r
- unLoc i `notElem` mods_in_this_cycle ]\r
-\r
- warn :: Located ModuleName -> WarnMsg\r
- warn (L loc mod) = \r
- mkPlainErrMsg loc\r
- (ptext (sLit "Warning: {-# SOURCE #-} unnecessary in import of ")\r
- <+> quotes (ppr mod))\r
-\r
------------------------------------------------------------------------------\r
--- Downsweep (dependency analysis)\r
-\r
--- Chase downwards from the specified root set, returning summaries\r
--- for all home modules encountered. Only follow source-import\r
--- links.\r
-\r
--- We pass in the previous collection of summaries, which is used as a\r
--- cache to avoid recalculating a module summary if the source is\r
--- unchanged.\r
---\r
--- The returned list of [ModSummary] nodes has one node for each home-package\r
--- module, plus one for any hs-boot files. The imports of these nodes \r
--- are all there, including the imports of non-home-package modules.\r
-\r
-downsweep :: HscEnv\r
- -> [ModSummary] -- Old summaries\r
- -> [ModuleName] -- Ignore dependencies on these; treat\r
- -- them as if they were package modules\r
- -> Bool -- True <=> allow multiple targets to have \r
- -- the same module name; this is \r
- -- very useful for ghc -M\r
- -> IO [ModSummary]\r
- -- The elts of [ModSummary] all have distinct\r
- -- (Modules, IsBoot) identifiers, unless the Bool is true\r
- -- in which case there can be repeats\r
-downsweep hsc_env old_summaries excl_mods allow_dup_roots\r
- = do\r
- rootSummaries <- mapM getRootSummary roots\r
- let root_map = mkRootMap rootSummaries\r
- checkDuplicates root_map\r
- summs <- loop (concatMap msDeps rootSummaries) root_map\r
- return summs\r
- where\r
- roots = hsc_targets hsc_env\r
-\r
- old_summary_map :: NodeMap ModSummary\r
- old_summary_map = mkNodeMap old_summaries\r
-\r
- getRootSummary :: Target -> IO ModSummary\r
- getRootSummary (Target (TargetFile file mb_phase) obj_allowed maybe_buf)\r
- = do exists <- liftIO $ doesFileExist file\r
- if exists \r
- then summariseFile hsc_env old_summaries file mb_phase \r
- obj_allowed maybe_buf\r
- else throwOneError $ mkPlainErrMsg noSrcSpan $\r
- text "can't find file:" <+> text file\r
- getRootSummary (Target (TargetModule modl) obj_allowed maybe_buf)\r
- = do maybe_summary <- summariseModule hsc_env old_summary_map False \r
- (L rootLoc modl) obj_allowed \r
- maybe_buf excl_mods\r
- case maybe_summary of\r
- Nothing -> packageModErr modl\r
- Just s -> return s\r
-\r
- rootLoc = mkGeneralSrcSpan (fsLit "<command line>")\r
-\r
- -- In a root module, the filename is allowed to diverge from the module\r
- -- name, so we have to check that there aren't multiple root files\r
- -- defining the same module (otherwise the duplicates will be silently\r
- -- ignored, leading to confusing behaviour).\r
- checkDuplicates :: NodeMap [ModSummary] -> IO ()\r
- checkDuplicates root_map \r
- | allow_dup_roots = return ()\r
- | null dup_roots = return ()\r
- | otherwise = liftIO $ multiRootsErr (head dup_roots)\r
- where\r
- dup_roots :: [[ModSummary]] -- Each at least of length 2\r
- dup_roots = filterOut isSingleton (nodeMapElts root_map)\r
-\r
- loop :: [(Located ModuleName,IsBootInterface)]\r
- -- Work list: process these modules\r
- -> NodeMap [ModSummary]\r
- -- Visited set; the range is a list because\r
- -- the roots can have the same module names\r
- -- if allow_dup_roots is True\r
- -> IO [ModSummary]\r
- -- The result includes the worklist, except\r
- -- for those mentioned in the visited set\r
- loop [] done = return (concat (nodeMapElts done))\r
- loop ((wanted_mod, is_boot) : ss) done \r
- | Just summs <- Map.lookup key done\r
- = if isSingleton summs then\r
- loop ss done\r
- else\r
- do { multiRootsErr summs; return [] }\r
- | otherwise\r
- = do mb_s <- summariseModule hsc_env old_summary_map \r
- is_boot wanted_mod True\r
- Nothing excl_mods\r
- case mb_s of\r
- Nothing -> loop ss done\r
- Just s -> loop (msDeps s ++ ss) (Map.insert key [s] done)\r
- where\r
- key = (unLoc wanted_mod, if is_boot then HsBootFile else HsSrcFile)\r
-\r
--- XXX Does the (++) here need to be flipped?\r
-mkRootMap :: [ModSummary] -> NodeMap [ModSummary]\r
-mkRootMap summaries = Map.insertListWith (flip (++))\r
- [ (msKey s, [s]) | s <- summaries ]\r
- Map.empty\r
-\r
-msDeps :: ModSummary -> [(Located ModuleName, IsBootInterface)]\r
--- (msDeps s) returns the dependencies of the ModSummary s.\r
--- A wrinkle is that for a {-# SOURCE #-} import we return\r
--- *both* the hs-boot file\r
--- *and* the source file\r
--- as "dependencies". That ensures that the list of all relevant\r
--- modules always contains B.hs if it contains B.hs-boot.\r
--- Remember, this pass isn't doing the topological sort. It's\r
--- just gathering the list of all relevant ModSummaries\r
-msDeps s = \r
- concat [ [(m,True), (m,False)] | m <- ms_home_srcimps s ] \r
- ++ [ (m,False) | m <- ms_home_imps s ] \r
-\r
-home_imps :: [Located (ImportDecl RdrName)] -> [Located ModuleName]\r
-home_imps imps = [ ideclName i | L _ i <- imps, isLocal (ideclPkgQual i) ]\r
- where isLocal Nothing = True\r
- isLocal (Just pkg) | pkg == fsLit "this" = True -- "this" is special\r
- isLocal _ = False\r
-\r
-ms_home_allimps :: ModSummary -> [ModuleName]\r
-ms_home_allimps ms = map unLoc (ms_home_srcimps ms ++ ms_home_imps ms)\r
-\r
-ms_home_srcimps :: ModSummary -> [Located ModuleName]\r
-ms_home_srcimps = home_imps . ms_srcimps\r
-\r
-ms_home_imps :: ModSummary -> [Located ModuleName]\r
-ms_home_imps = home_imps . ms_imps\r
-\r
------------------------------------------------------------------------------\r
--- Summarising modules\r
-\r
--- We have two types of summarisation:\r
---\r
--- * Summarise a file. This is used for the root module(s) passed to\r
--- cmLoadModules. The file is read, and used to determine the root\r
--- module name. The module name may differ from the filename.\r
---\r
--- * Summarise a module. We are given a module name, and must provide\r
--- a summary. The finder is used to locate the file in which the module\r
--- resides.\r
-\r
-summariseFile\r
- :: HscEnv\r
- -> [ModSummary] -- old summaries\r
- -> FilePath -- source file name\r
- -> Maybe Phase -- start phase\r
- -> Bool -- object code allowed?\r
- -> Maybe (StringBuffer,ClockTime)\r
- -> IO ModSummary\r
-\r
-summariseFile hsc_env old_summaries file mb_phase obj_allowed maybe_buf\r
- -- we can use a cached summary if one is available and the\r
- -- source file hasn't changed, But we have to look up the summary\r
- -- by source file, rather than module name as we do in summarise.\r
- | Just old_summary <- findSummaryBySourceFile old_summaries file\r
- = do\r
- let location = ms_location old_summary\r
-\r
- -- return the cached summary if the source didn't change\r
- src_timestamp <- case maybe_buf of\r
- Just (_,t) -> return t\r
- Nothing -> liftIO $ getModificationTime file\r
- -- The file exists; we checked in getRootSummary above.\r
- -- If it gets removed subsequently, then this \r
- -- getModificationTime may fail, but that's the right\r
- -- behaviour.\r
-\r
- if ms_hs_date old_summary == src_timestamp \r
- then do -- update the object-file timestamp\r
- obj_timestamp <-\r
- if isObjectTarget (hscTarget (hsc_dflags hsc_env)) \r
- || obj_allowed -- bug #1205\r
- then liftIO $ getObjTimestamp location False\r
- else return Nothing\r
- return old_summary{ ms_obj_date = obj_timestamp }\r
- else\r
- new_summary\r
-\r
- | otherwise\r
- = new_summary\r
- where\r
- new_summary = do\r
- let dflags = hsc_dflags hsc_env\r
-\r
- (dflags', hspp_fn, buf)\r
- <- preprocessFile hsc_env file mb_phase maybe_buf\r
-\r
- (srcimps,the_imps, L _ mod_name) <- getImports dflags' buf hspp_fn file\r
-\r
- -- Make a ModLocation for this file\r
- location <- liftIO $ mkHomeModLocation dflags mod_name file\r
-\r
- -- Tell the Finder cache where it is, so that subsequent calls\r
- -- to findModule will find it, even if it's not on any search path\r
- mod <- liftIO $ addHomeModuleToFinder hsc_env mod_name location\r
-\r
- src_timestamp <- case maybe_buf of\r
- Just (_,t) -> return t\r
- Nothing -> liftIO $ getModificationTime file\r
- -- getMofificationTime may fail\r
-\r
- -- when the user asks to load a source file by name, we only\r
- -- use an object file if -fobject-code is on. See #1205.\r
- obj_timestamp <-\r
- if isObjectTarget (hscTarget (hsc_dflags hsc_env)) \r
- || obj_allowed -- bug #1205\r
- then liftIO $ modificationTimeIfExists (ml_obj_file location)\r
- else return Nothing\r
-\r
- return (ModSummary { ms_mod = mod, ms_hsc_src = HsSrcFile,\r
- ms_location = location,\r
- ms_hspp_file = hspp_fn,\r
- ms_hspp_opts = dflags',\r
- ms_hspp_buf = Just buf,\r
- ms_srcimps = srcimps, ms_imps = the_imps,\r
- ms_hs_date = src_timestamp,\r
- ms_obj_date = obj_timestamp })\r
-\r
-findSummaryBySourceFile :: [ModSummary] -> FilePath -> Maybe ModSummary\r
-findSummaryBySourceFile summaries file\r
- = case [ ms | ms <- summaries, HsSrcFile <- [ms_hsc_src ms],\r
- expectJust "findSummaryBySourceFile" (ml_hs_file (ms_location ms)) == file ] of\r
- [] -> Nothing\r
- (x:_) -> Just x\r
-\r
--- Summarise a module, and pick up source and timestamp.\r
-summariseModule\r
- :: HscEnv\r
- -> NodeMap ModSummary -- Map of old summaries\r
- -> IsBootInterface -- True <=> a {-# SOURCE #-} import\r
- -> Located ModuleName -- Imported module to be summarised\r
- -> Bool -- object code allowed?\r
- -> Maybe (StringBuffer, ClockTime)\r
- -> [ModuleName] -- Modules to exclude\r
- -> IO (Maybe ModSummary) -- Its new summary\r
-\r
-summariseModule hsc_env old_summary_map is_boot (L loc wanted_mod) \r
- obj_allowed maybe_buf excl_mods\r
- | wanted_mod `elem` excl_mods\r
- = return Nothing\r
-\r
- | Just old_summary <- Map.lookup (wanted_mod, hsc_src) old_summary_map\r
- = do -- Find its new timestamp; all the \r
- -- ModSummaries in the old map have valid ml_hs_files\r
- let location = ms_location old_summary\r
- src_fn = expectJust "summariseModule" (ml_hs_file location)\r
-\r
- -- check the modification time on the source file, and\r
- -- return the cached summary if it hasn't changed. If the\r
- -- file has disappeared, we need to call the Finder again.\r
- case maybe_buf of\r
- Just (_,t) -> check_timestamp old_summary location src_fn t\r
- Nothing -> do\r
- m <- tryIO (getModificationTime src_fn)\r
- case m of\r
- Right t -> check_timestamp old_summary location src_fn t\r
- Left e | isDoesNotExistError e -> find_it\r
- | otherwise -> ioError e\r
-\r
- | otherwise = find_it\r
- where\r
- dflags = hsc_dflags hsc_env\r
-\r
- hsc_src = if is_boot then HsBootFile else HsSrcFile\r
-\r
- check_timestamp old_summary location src_fn src_timestamp\r
- | ms_hs_date old_summary == src_timestamp = do\r
- -- update the object-file timestamp\r
- obj_timestamp <- \r
- if isObjectTarget (hscTarget (hsc_dflags hsc_env))\r
- || obj_allowed -- bug #1205\r
- then getObjTimestamp location is_boot\r
- else return Nothing\r
- return (Just old_summary{ ms_obj_date = obj_timestamp })\r
- | otherwise = \r
- -- source changed: re-summarise.\r
- new_summary location (ms_mod old_summary) src_fn src_timestamp\r
-\r
- find_it = do\r
- -- Don't use the Finder's cache this time. If the module was\r
- -- previously a package module, it may have now appeared on the\r
- -- search path, so we want to consider it to be a home module. If\r
- -- the module was previously a home module, it may have moved.\r
- uncacheModule hsc_env wanted_mod\r
- found <- findImportedModule hsc_env wanted_mod Nothing\r
- case found of\r
- Found location mod \r
- | isJust (ml_hs_file location) ->\r
- -- Home package\r
- just_found location mod\r
- | otherwise -> \r
- -- Drop external-pkg\r
- ASSERT(modulePackageId mod /= thisPackage dflags)\r
- return Nothing\r
- \r
- err -> noModError dflags loc wanted_mod err\r
- -- Not found\r
-\r
- just_found location mod = do\r
- -- Adjust location to point to the hs-boot source file, \r
- -- hi file, object file, when is_boot says so\r
- let location' | is_boot = addBootSuffixLocn location\r
- | otherwise = location\r
- src_fn = expectJust "summarise2" (ml_hs_file location')\r
-\r
- -- Check that it exists\r
- -- It might have been deleted since the Finder last found it\r
- maybe_t <- modificationTimeIfExists src_fn\r
- case maybe_t of\r
- Nothing -> noHsFileErr loc src_fn\r
- Just t -> new_summary location' mod src_fn t\r
-\r
-\r
- new_summary location mod src_fn src_timestamp\r
- = do\r
- -- Preprocess the source file and get its imports\r
- -- The dflags' contains the OPTIONS pragmas\r
- (dflags', hspp_fn, buf) <- preprocessFile hsc_env src_fn Nothing maybe_buf\r
- (srcimps, the_imps, L mod_loc mod_name) <- getImports dflags' buf hspp_fn src_fn\r
-\r
- when (mod_name /= wanted_mod) $\r
- throwOneError $ mkPlainErrMsg mod_loc $ \r
- text "File name does not match module name:" \r
- $$ text "Saw:" <+> quotes (ppr mod_name)\r
- $$ text "Expected:" <+> quotes (ppr wanted_mod)\r
-\r
- -- Find the object timestamp, and return the summary\r
- obj_timestamp <-\r
- if isObjectTarget (hscTarget (hsc_dflags hsc_env))\r
- || obj_allowed -- bug #1205\r
- then getObjTimestamp location is_boot\r
- else return Nothing\r
-\r
- return (Just (ModSummary { ms_mod = mod,\r
- ms_hsc_src = hsc_src,\r
- ms_location = location,\r
- ms_hspp_file = hspp_fn,\r
- ms_hspp_opts = dflags',\r
- ms_hspp_buf = Just buf,\r
- ms_srcimps = srcimps,\r
- ms_imps = the_imps,\r
- ms_hs_date = src_timestamp,\r
- ms_obj_date = obj_timestamp }))\r
-\r
-\r
-getObjTimestamp :: ModLocation -> Bool -> IO (Maybe ClockTime)\r
-getObjTimestamp location is_boot\r
- = if is_boot then return Nothing\r
- else modificationTimeIfExists (ml_obj_file location)\r
-\r
-\r
-preprocessFile :: HscEnv\r
- -> FilePath\r
- -> Maybe Phase -- ^ Starting phase\r
- -> Maybe (StringBuffer,ClockTime)\r
- -> IO (DynFlags, FilePath, StringBuffer)\r
-preprocessFile hsc_env src_fn mb_phase Nothing\r
- = do\r
- (dflags', hspp_fn) <- preprocess hsc_env (src_fn, mb_phase)\r
- buf <- hGetStringBuffer hspp_fn\r
- return (dflags', hspp_fn, buf)\r
-\r
-preprocessFile hsc_env src_fn mb_phase (Just (buf, _time))\r
- = do\r
- let dflags = hsc_dflags hsc_env\r
- -- case we bypass the preprocessing stage?\r
- let \r
- local_opts = getOptions dflags buf src_fn\r
- --\r
- (dflags', leftovers, warns)\r
- <- parseDynamicNoPackageFlags dflags local_opts\r
- checkProcessArgsResult leftovers\r
- handleFlagWarnings dflags' warns\r
-\r
- let\r
- needs_preprocessing\r
- | Just (Unlit _) <- mb_phase = True\r
- | Nothing <- mb_phase, Unlit _ <- startPhase src_fn = True\r
- -- note: local_opts is only required if there's no Unlit phase\r
- | xopt Opt_Cpp dflags' = True\r
- | dopt Opt_Pp dflags' = True\r
- | otherwise = False\r
-\r
- when needs_preprocessing $\r
- ghcError (ProgramError "buffer needs preprocesing; interactive check disabled")\r
-\r
- return (dflags', src_fn, buf)\r
-\r
-\r
------------------------------------------------------------------------------\r
--- Error messages\r
------------------------------------------------------------------------------\r
-\r
-noModError :: DynFlags -> SrcSpan -> ModuleName -> FindResult -> IO ab\r
--- ToDo: we don't have a proper line number for this error\r
-noModError dflags loc wanted_mod err\r
- = throwOneError $ mkPlainErrMsg loc $ cannotFindModule dflags wanted_mod err\r
- \r
-noHsFileErr :: SrcSpan -> String -> IO a\r
-noHsFileErr loc path\r
- = throwOneError $ mkPlainErrMsg loc $ text "Can't find" <+> text path\r
- \r
-packageModErr :: ModuleName -> IO a\r
-packageModErr mod\r
- = throwOneError $ mkPlainErrMsg noSrcSpan $\r
- text "module" <+> quotes (ppr mod) <+> text "is a package module"\r
-\r
-multiRootsErr :: [ModSummary] -> IO ()\r
-multiRootsErr [] = panic "multiRootsErr"\r
-multiRootsErr summs@(summ1:_)\r
- = throwOneError $ mkPlainErrMsg noSrcSpan $\r
- text "module" <+> quotes (ppr mod) <+> \r
- text "is defined in multiple files:" <+>\r
- sep (map text files)\r
- where\r
- mod = ms_mod summ1\r
- files = map (expectJust "checkDup" . ml_hs_file . ms_location) summs\r
-\r
-cyclicModuleErr :: [ModSummary] -> SDoc\r
-cyclicModuleErr ms\r
- = hang (ptext (sLit "Module imports form a cycle for modules:"))\r
- 2 (vcat (map show_one ms))\r
- where\r
- mods_in_cycle = map ms_mod_name ms\r
- imp_modname = unLoc . ideclName . unLoc\r
- just_in_cycle = filter ((`elem` mods_in_cycle) . imp_modname)\r
-\r
- show_one ms = \r
- vcat [ show_mod (ms_hsc_src ms) (ms_mod_name ms) <+>\r
- maybe empty (parens . text) (ml_hs_file (ms_location ms)),\r
- nest 2 $ ptext (sLit "imports:") <+> vcat [\r
- pp_imps HsBootFile (just_in_cycle $ ms_srcimps ms),\r
- pp_imps HsSrcFile (just_in_cycle $ ms_imps ms) ]\r
- ]\r
- show_mod hsc_src mod = ppr mod <> text (hscSourceString hsc_src)\r
- pp_imps src imps = fsep (map (show_mod src . unLoc . ideclName . unLoc) imps)\r
+-- -----------------------------------------------------------------------------
+--
+-- (c) The University of Glasgow, 2011
+--
+-- This module implements multi-module compilation, and is used
+-- by --make and GHCi.
+--
+-- -----------------------------------------------------------------------------
+
+module GhcMake(
+ depanal,
+ load, LoadHowMuch(..),
+
+ topSortModuleGraph,
+
+ noModError, cyclicModuleErr
+ ) where
+
+#include "HsVersions.h"
+
+#ifdef GHCI
+import qualified Linker ( unload )
+#endif
+
+import DriverPipeline
+import DriverPhases
+import GhcMonad
+import Module
+import HscTypes
+import ErrUtils
+import DynFlags
+import HsSyn hiding ((<.>))
+import Finder
+import HeaderInfo
+import TcIface ( typecheckIface )
+import TcRnMonad ( initIfaceCheck )
+import RdrName ( RdrName )
+
+import Exception ( evaluate, tryIO )
+import Panic
+import SysTools
+import BasicTypes
+import SrcLoc
+import Util
+import Digraph
+import Bag ( listToBag )
+import Maybes ( expectJust, mapCatMaybes )
+import StringBuffer
+import FastString
+import Outputable
+import UniqFM
+
+import qualified Data.Map as Map
+import qualified FiniteMap as Map( insertListWith)
+
+import System.Directory ( doesFileExist, getModificationTime )
+import System.IO ( fixIO )
+import System.IO.Error ( isDoesNotExistError )
+import System.Time ( ClockTime )
+import System.FilePath
+import Control.Monad
+import Data.Maybe
+import Data.List
+import qualified Data.List as List
+
+-- -----------------------------------------------------------------------------
+-- Loading the program
+
+-- | Perform a dependency analysis starting from the current targets
+-- and update the session with the new module graph.
+--
+-- Dependency analysis entails parsing the @import@ directives and may
+-- therefore require running certain preprocessors.
+--
+-- Note that each 'ModSummary' in the module graph caches its 'DynFlags'.
+-- These 'DynFlags' are determined by the /current/ session 'DynFlags' and the
+-- @OPTIONS@ and @LANGUAGE@ pragmas of the parsed module. Thus if you want to
+-- changes to the 'DynFlags' to take effect you need to call this function
+-- again.
+--
+depanal :: GhcMonad m =>
+ [ModuleName] -- ^ excluded modules
+ -> Bool -- ^ allow duplicate roots
+ -> m ModuleGraph
+depanal excluded_mods allow_dup_roots = do
+ hsc_env <- getSession
+ let
+ dflags = hsc_dflags hsc_env
+ targets = hsc_targets hsc_env
+ old_graph = hsc_mod_graph hsc_env
+
+ liftIO $ showPass dflags "Chasing dependencies"
+ liftIO $ debugTraceMsg dflags 2 (hcat [
+ text "Chasing modules from: ",
+ hcat (punctuate comma (map pprTarget targets))])
+
+ mod_graph <- liftIO $ downsweep hsc_env old_graph excluded_mods allow_dup_roots
+ modifySession $ \_ -> hsc_env { hsc_mod_graph = mod_graph }
+ return mod_graph
+
+-- | Describes which modules of the module graph need to be loaded.
+data LoadHowMuch
+ = LoadAllTargets
+ -- ^ Load all targets and its dependencies.
+ | LoadUpTo ModuleName
+ -- ^ Load only the given module and its dependencies.
+ | LoadDependenciesOf ModuleName
+ -- ^ Load only the dependencies of the given module, but not the module
+ -- itself.
+
+-- | Try to load the program. See 'LoadHowMuch' for the different modes.
+--
+-- This function implements the core of GHC's @--make@ mode. It preprocesses,
+-- compiles and loads the specified modules, avoiding re-compilation wherever
+-- possible. Depending on the target (see 'DynFlags.hscTarget') compilating
+-- and loading may result in files being created on disk.
+--
+-- Calls the 'reportModuleCompilationResult' callback after each compiling
+-- each module, whether successful or not.
+--
+-- Throw a 'SourceError' if errors are encountered before the actual
+-- compilation starts (e.g., during dependency analysis). All other errors
+-- are reported using the callback.
+--
+load :: GhcMonad m => LoadHowMuch -> m SuccessFlag
+load how_much = do
+ mod_graph <- depanal [] False
+ load2 how_much mod_graph
+
+load2 :: GhcMonad m => LoadHowMuch -> [ModSummary]
+ -> m SuccessFlag
+load2 how_much mod_graph = do
+ guessOutputFile
+ hsc_env <- getSession
+
+ let hpt1 = hsc_HPT hsc_env
+ let dflags = hsc_dflags hsc_env
+
+ -- The "bad" boot modules are the ones for which we have
+ -- B.hs-boot in the module graph, but no B.hs
+ -- The downsweep should have ensured this does not happen
+ -- (see msDeps)
+ let all_home_mods = [ms_mod_name s
+ | s <- mod_graph, not (isBootSummary s)]
+ bad_boot_mods = [s | s <- mod_graph, isBootSummary s,
+ not (ms_mod_name s `elem` all_home_mods)]
+ ASSERT( null bad_boot_mods ) return ()
+
+ -- check that the module given in HowMuch actually exists, otherwise
+ -- topSortModuleGraph will bomb later.
+ let checkHowMuch (LoadUpTo m) = checkMod m
+ checkHowMuch (LoadDependenciesOf m) = checkMod m
+ checkHowMuch _ = id
+
+ checkMod m and_then
+ | m `elem` all_home_mods = and_then
+ | otherwise = do
+ liftIO $ errorMsg dflags (text "no such module:" <+>
+ quotes (ppr m))
+ return Failed
+
+ checkHowMuch how_much $ do
+
+ -- mg2_with_srcimps drops the hi-boot nodes, returning a
+ -- graph with cycles. Among other things, it is used for
+ -- backing out partially complete cycles following a failed
+ -- upsweep, and for removing from hpt all the modules
+ -- not in strict downwards closure, during calls to compile.
+ let mg2_with_srcimps :: [SCC ModSummary]
+ mg2_with_srcimps = topSortModuleGraph True mod_graph Nothing
+
+ -- If we can determine that any of the {-# SOURCE #-} imports
+ -- are definitely unnecessary, then emit a warning.
+ warnUnnecessarySourceImports mg2_with_srcimps
+
+ let
+ -- check the stability property for each module.
+ stable_mods@(stable_obj,stable_bco)
+ = checkStability hpt1 mg2_with_srcimps all_home_mods
+
+ -- prune bits of the HPT which are definitely redundant now,
+ -- to save space.
+ pruned_hpt = pruneHomePackageTable hpt1
+ (flattenSCCs mg2_with_srcimps)
+ stable_mods
+
+ _ <- liftIO $ evaluate pruned_hpt
+
+ -- before we unload anything, make sure we don't leave an old
+ -- interactive context around pointing to dead bindings. Also,
+ -- write the pruned HPT to allow the old HPT to be GC'd.
+ modifySession $ \_ -> hsc_env{ hsc_IC = emptyInteractiveContext,
+ hsc_HPT = pruned_hpt }
+
+ liftIO $ debugTraceMsg dflags 2 (text "Stable obj:" <+> ppr stable_obj $$
+ text "Stable BCO:" <+> ppr stable_bco)
+
+ -- Unload any modules which are going to be re-linked this time around.
+ let stable_linkables = [ linkable
+ | m <- stable_obj++stable_bco,
+ Just hmi <- [lookupUFM pruned_hpt m],
+ Just linkable <- [hm_linkable hmi] ]
+ liftIO $ unload hsc_env stable_linkables
+
+ -- We could at this point detect cycles which aren't broken by
+ -- a source-import, and complain immediately, but it seems better
+ -- to let upsweep_mods do this, so at least some useful work gets
+ -- done before the upsweep is abandoned.
+ --hPutStrLn stderr "after tsort:\n"
+ --hPutStrLn stderr (showSDoc (vcat (map ppr mg2)))
+
+ -- Now do the upsweep, calling compile for each module in
+ -- turn. Final result is version 3 of everything.
+
+ -- Topologically sort the module graph, this time including hi-boot
+ -- nodes, and possibly just including the portion of the graph
+ -- reachable from the module specified in the 2nd argument to load.
+ -- This graph should be cycle-free.
+ -- If we're restricting the upsweep to a portion of the graph, we
+ -- also want to retain everything that is still stable.
+ let full_mg :: [SCC ModSummary]
+ full_mg = topSortModuleGraph False mod_graph Nothing
+
+ maybe_top_mod = case how_much of
+ LoadUpTo m -> Just m
+ LoadDependenciesOf m -> Just m
+ _ -> Nothing
+
+ partial_mg0 :: [SCC ModSummary]
+ partial_mg0 = topSortModuleGraph False mod_graph maybe_top_mod
+
+ -- LoadDependenciesOf m: we want the upsweep to stop just
+ -- short of the specified module (unless the specified module
+ -- is stable).
+ partial_mg
+ | LoadDependenciesOf _mod <- how_much
+ = ASSERT( case last partial_mg0 of
+ AcyclicSCC ms -> ms_mod_name ms == _mod; _ -> False )
+ List.init partial_mg0
+ | otherwise
+ = partial_mg0
+
+ stable_mg =
+ [ AcyclicSCC ms
+ | AcyclicSCC ms <- full_mg,
+ ms_mod_name ms `elem` stable_obj++stable_bco,
+ ms_mod_name ms `notElem` [ ms_mod_name ms' |
+ AcyclicSCC ms' <- partial_mg ] ]
+
+ mg = stable_mg ++ partial_mg
+
+ -- clean up between compilations
+ let cleanup hsc_env = intermediateCleanTempFiles dflags
+ (flattenSCCs mg2_with_srcimps)
+ hsc_env
+
+ liftIO $ debugTraceMsg dflags 2 (hang (text "Ready for upsweep")
+ 2 (ppr mg))
+
+ setSession hsc_env{ hsc_HPT = emptyHomePackageTable }
+ (upsweep_ok, modsUpswept)
+ <- upsweep pruned_hpt stable_mods cleanup mg
+
+ -- Make modsDone be the summaries for each home module now
+ -- available; this should equal the domain of hpt3.
+ -- Get in in a roughly top .. bottom order (hence reverse).
+
+ let modsDone = reverse modsUpswept
+
+ -- Try and do linking in some form, depending on whether the
+ -- upsweep was completely or only partially successful.
+
+ if succeeded upsweep_ok
+
+ then
+ -- Easy; just relink it all.
+ do liftIO $ debugTraceMsg dflags 2 (text "Upsweep completely successful.")
+
+ -- Clean up after ourselves
+ hsc_env1 <- getSession
+ liftIO $ intermediateCleanTempFiles dflags modsDone hsc_env1
+
+ -- Issue a warning for the confusing case where the user
+ -- said '-o foo' but we're not going to do any linking.
+ -- We attempt linking if either (a) one of the modules is
+ -- called Main, or (b) the user said -no-hs-main, indicating
+ -- that main() is going to come from somewhere else.
+ --
+ let ofile = outputFile dflags
+ let no_hs_main = dopt Opt_NoHsMain dflags
+ let
+ main_mod = mainModIs dflags
+ a_root_is_Main = any ((==main_mod).ms_mod) mod_graph
+ do_linking = a_root_is_Main || no_hs_main || ghcLink dflags == LinkDynLib
+
+ when (ghcLink dflags == LinkBinary
+ && isJust ofile && not do_linking) $
+ liftIO $ debugTraceMsg dflags 1 $
+ text ("Warning: output was redirected with -o, " ++
+ "but no output will be generated\n" ++
+ "because there is no " ++
+ moduleNameString (moduleName main_mod) ++ " module.")
+
+ -- link everything together
+ linkresult <- liftIO $ link (ghcLink dflags) dflags do_linking (hsc_HPT hsc_env1)
+
+ loadFinish Succeeded linkresult
+
+ else
+ -- Tricky. We need to back out the effects of compiling any
+ -- half-done cycles, both so as to clean up the top level envs
+ -- and to avoid telling the interactive linker to link them.
+ do liftIO $ debugTraceMsg dflags 2 (text "Upsweep partially successful.")
+
+ let modsDone_names
+ = map ms_mod modsDone
+ let mods_to_zap_names
+ = findPartiallyCompletedCycles modsDone_names
+ mg2_with_srcimps
+ let mods_to_keep
+ = filter ((`notElem` mods_to_zap_names).ms_mod)
+ modsDone
+
+ hsc_env1 <- getSession
+ let hpt4 = retainInTopLevelEnvs (map ms_mod_name mods_to_keep)
+ (hsc_HPT hsc_env1)
+
+ -- Clean up after ourselves
+ liftIO $ intermediateCleanTempFiles dflags mods_to_keep hsc_env1
+
+ -- there should be no Nothings where linkables should be, now
+ ASSERT(all (isJust.hm_linkable)
+ (eltsUFM (hsc_HPT hsc_env))) do
+
+ -- Link everything together
+ linkresult <- liftIO $ link (ghcLink dflags) dflags False hpt4
+
+ modifySession $ \hsc_env -> hsc_env{ hsc_HPT = hpt4 }
+ loadFinish Failed linkresult
+
+-- Finish up after a load.
+
+-- If the link failed, unload everything and return.
+loadFinish :: GhcMonad m =>
+ SuccessFlag -> SuccessFlag
+ -> m SuccessFlag
+loadFinish _all_ok Failed
+ = do hsc_env <- getSession
+ liftIO $ unload hsc_env []
+ modifySession discardProg
+ return Failed
+
+-- Empty the interactive context and set the module context to the topmost
+-- newly loaded module, or the Prelude if none were loaded.
+loadFinish all_ok Succeeded
+ = do modifySession $ \hsc_env -> hsc_env{ hsc_IC = emptyInteractiveContext }
+ return all_ok
+
+
+-- Forget the current program, but retain the persistent info in HscEnv
+discardProg :: HscEnv -> HscEnv
+discardProg hsc_env
+ = hsc_env { hsc_mod_graph = emptyMG,
+ hsc_IC = emptyInteractiveContext,
+ hsc_HPT = emptyHomePackageTable }
+
+intermediateCleanTempFiles :: DynFlags -> [ModSummary] -> HscEnv -> IO ()
+intermediateCleanTempFiles dflags summaries hsc_env
+ = cleanTempFilesExcept dflags except
+ where
+ except =
+ -- Save preprocessed files. The preprocessed file *might* be
+ -- the same as the source file, but that doesn't do any
+ -- harm.
+ map ms_hspp_file summaries ++
+ -- Save object files for loaded modules. The point of this
+ -- is that we might have generated and compiled a stub C
+ -- file, and in the case of GHCi the object file will be a
+ -- temporary file which we must not remove because we need
+ -- to load/link it later.
+ hptObjs (hsc_HPT hsc_env)
+
+-- | If there is no -o option, guess the name of target executable
+-- by using top-level source file name as a base.
+guessOutputFile :: GhcMonad m => m ()
+guessOutputFile = modifySession $ \env ->
+ let dflags = hsc_dflags env
+ mod_graph = hsc_mod_graph env
+ mainModuleSrcPath :: Maybe String
+ mainModuleSrcPath = do
+ let isMain = (== mainModIs dflags) . ms_mod
+ [ms] <- return (filter isMain mod_graph)
+ ml_hs_file (ms_location ms)
+ name = fmap dropExtension mainModuleSrcPath
+
+#if defined(mingw32_HOST_OS)
+ -- we must add the .exe extention unconditionally here, otherwise
+ -- when name has an extension of its own, the .exe extension will
+ -- not be added by DriverPipeline.exeFileName. See #2248
+ name_exe = fmap (<.> "exe") name
+#else
+ name_exe = name
+#endif
+ in
+ case outputFile dflags of
+ Just _ -> env
+ Nothing -> env { hsc_dflags = dflags { outputFile = name_exe } }
+
+-- -----------------------------------------------------------------------------
+
+-- | Prune the HomePackageTable
+--
+-- Before doing an upsweep, we can throw away:
+--
+-- - For non-stable modules:
+-- - all ModDetails, all linked code
+-- - all unlinked code that is out of date with respect to
+-- the source file
+--
+-- This is VERY IMPORTANT otherwise we'll end up requiring 2x the
+-- space at the end of the upsweep, because the topmost ModDetails of the
+-- old HPT holds on to the entire type environment from the previous
+-- compilation.
+
+pruneHomePackageTable
+ :: HomePackageTable
+ -> [ModSummary]
+ -> ([ModuleName],[ModuleName])
+ -> HomePackageTable
+
+pruneHomePackageTable hpt summ (stable_obj, stable_bco)
+ = mapUFM prune hpt
+ where prune hmi
+ | is_stable modl = hmi'
+ | otherwise = hmi'{ hm_details = emptyModDetails }
+ where
+ modl = moduleName (mi_module (hm_iface hmi))
+ hmi' | Just l <- hm_linkable hmi, linkableTime l < ms_hs_date ms
+ = hmi{ hm_linkable = Nothing }
+ | otherwise
+ = hmi
+ where ms = expectJust "prune" (lookupUFM ms_map modl)
+
+ ms_map = listToUFM [(ms_mod_name ms, ms) | ms <- summ]
+
+ is_stable m = m `elem` stable_obj || m `elem` stable_bco
+
+-- -----------------------------------------------------------------------------
+
+-- Return (names of) all those in modsDone who are part of a cycle
+-- as defined by theGraph.
+findPartiallyCompletedCycles :: [Module] -> [SCC ModSummary] -> [Module]
+findPartiallyCompletedCycles modsDone theGraph
+ = chew theGraph
+ where
+ chew [] = []
+ chew ((AcyclicSCC _):rest) = chew rest -- acyclic? not interesting.
+ chew ((CyclicSCC vs):rest)
+ = let names_in_this_cycle = nub (map ms_mod vs)
+ mods_in_this_cycle
+ = nub ([done | done <- modsDone,
+ done `elem` names_in_this_cycle])
+ chewed_rest = chew rest
+ in
+ if notNull mods_in_this_cycle
+ && length mods_in_this_cycle < length names_in_this_cycle
+ then mods_in_this_cycle ++ chewed_rest
+ else chewed_rest
+
+
+-- ---------------------------------------------------------------------------
+-- Unloading
+
+unload :: HscEnv -> [Linkable] -> IO ()
+unload hsc_env stable_linkables -- Unload everthing *except* 'stable_linkables'
+ = case ghcLink (hsc_dflags hsc_env) of
+#ifdef GHCI
+ LinkInMemory -> Linker.unload (hsc_dflags hsc_env) stable_linkables
+#else
+ LinkInMemory -> panic "unload: no interpreter"
+ -- urgh. avoid warnings:
+ hsc_env stable_linkables
+#endif
+ _other -> return ()
+
+-- -----------------------------------------------------------------------------
+
+{- |
+
+ Stability tells us which modules definitely do not need to be recompiled.
+ There are two main reasons for having stability:
+
+ - avoid doing a complete upsweep of the module graph in GHCi when
+ modules near the bottom of the tree have not changed.
+
+ - to tell GHCi when it can load object code: we can only load object code
+ for a module when we also load object code fo all of the imports of the
+ module. So we need to know that we will definitely not be recompiling
+ any of these modules, and we can use the object code.
+
+ The stability check is as follows. Both stableObject and
+ stableBCO are used during the upsweep phase later.
+
+@
+ stable m = stableObject m || stableBCO m
+
+ stableObject m =
+ all stableObject (imports m)
+ && old linkable does not exist, or is == on-disk .o
+ && date(on-disk .o) > date(.hs)
+
+ stableBCO m =
+ all stable (imports m)
+ && date(BCO) > date(.hs)
+@
+
+ These properties embody the following ideas:
+
+ - if a module is stable, then:
+
+ - if it has been compiled in a previous pass (present in HPT)
+ then it does not need to be compiled or re-linked.
+
+ - if it has not been compiled in a previous pass,
+ then we only need to read its .hi file from disk and
+ link it to produce a 'ModDetails'.
+
+ - if a modules is not stable, we will definitely be at least
+ re-linking, and possibly re-compiling it during the 'upsweep'.
+ All non-stable modules can (and should) therefore be unlinked
+ before the 'upsweep'.
+
+ - Note that objects are only considered stable if they only depend
+ on other objects. We can't link object code against byte code.
+-}
+
+checkStability
+ :: HomePackageTable -- HPT from last compilation
+ -> [SCC ModSummary] -- current module graph (cyclic)
+ -> [ModuleName] -- all home modules
+ -> ([ModuleName], -- stableObject
+ [ModuleName]) -- stableBCO
+
+checkStability hpt sccs all_home_mods = foldl checkSCC ([],[]) sccs
+ where
+ checkSCC (stable_obj, stable_bco) scc0
+ | stableObjects = (scc_mods ++ stable_obj, stable_bco)
+ | stableBCOs = (stable_obj, scc_mods ++ stable_bco)
+ | otherwise = (stable_obj, stable_bco)
+ where
+ scc = flattenSCC scc0
+ scc_mods = map ms_mod_name scc
+ home_module m = m `elem` all_home_mods && m `notElem` scc_mods
+
+ scc_allimps = nub (filter home_module (concatMap ms_home_allimps scc))
+ -- all imports outside the current SCC, but in the home pkg
+
+ stable_obj_imps = map (`elem` stable_obj) scc_allimps
+ stable_bco_imps = map (`elem` stable_bco) scc_allimps
+
+ stableObjects =
+ and stable_obj_imps
+ && all object_ok scc
+
+ stableBCOs =
+ and (zipWith (||) stable_obj_imps stable_bco_imps)
+ && all bco_ok scc
+
+ object_ok ms
+ | Just t <- ms_obj_date ms = t >= ms_hs_date ms
+ && same_as_prev t
+ | otherwise = False
+ where
+ same_as_prev t = case lookupUFM hpt (ms_mod_name ms) of
+ Just hmi | Just l <- hm_linkable hmi
+ -> isObjectLinkable l && t == linkableTime l
+ _other -> True
+ -- why '>=' rather than '>' above? If the filesystem stores
+ -- times to the nearset second, we may occasionally find that
+ -- the object & source have the same modification time,
+ -- especially if the source was automatically generated
+ -- and compiled. Using >= is slightly unsafe, but it matches
+ -- make's behaviour.
+
+ bco_ok ms
+ = case lookupUFM hpt (ms_mod_name ms) of
+ Just hmi | Just l <- hm_linkable hmi ->
+ not (isObjectLinkable l) &&
+ linkableTime l >= ms_hs_date ms
+ _other -> False
+
+-- -----------------------------------------------------------------------------
+
+-- | The upsweep
+--
+-- This is where we compile each module in the module graph, in a pass
+-- from the bottom to the top of the graph.
+--
+-- There better had not be any cyclic groups here -- we check for them.
+
+upsweep
+ :: GhcMonad m
+ => HomePackageTable -- ^ HPT from last time round (pruned)
+ -> ([ModuleName],[ModuleName]) -- ^ stable modules (see checkStability)
+ -> (HscEnv -> IO ()) -- ^ How to clean up unwanted tmp files
+ -> [SCC ModSummary] -- ^ Mods to do (the worklist)
+ -> m (SuccessFlag,
+ [ModSummary])
+ -- ^ Returns:
+ --
+ -- 1. A flag whether the complete upsweep was successful.
+ -- 2. The 'HscEnv' in the monad has an updated HPT
+ -- 3. A list of modules which succeeded loading.
+
+upsweep old_hpt stable_mods cleanup sccs = do
+ (res, done) <- upsweep' old_hpt [] sccs 1 (length sccs)
+ return (res, reverse done)
+ where
+
+ upsweep' _old_hpt done
+ [] _ _
+ = return (Succeeded, done)
+
+ upsweep' _old_hpt done
+ (CyclicSCC ms:_) _ _
+ = do dflags <- getSessionDynFlags
+ liftIO $ fatalErrorMsg dflags (cyclicModuleErr ms)
+ return (Failed, done)
+
+ upsweep' old_hpt done
+ (AcyclicSCC mod:mods) mod_index nmods
+ = do -- putStrLn ("UPSWEEP_MOD: hpt = " ++
+ -- show (map (moduleUserString.moduleName.mi_module.hm_iface)
+ -- (moduleEnvElts (hsc_HPT hsc_env)))
+ let logger _mod = defaultWarnErrLogger
+
+ hsc_env <- getSession
+
+ -- Remove unwanted tmp files between compilations
+ liftIO (cleanup hsc_env)
+
+ mb_mod_info
+ <- handleSourceError
+ (\err -> do logger mod (Just err); return Nothing) $ do
+ mod_info <- liftIO $ upsweep_mod hsc_env old_hpt stable_mods
+ mod mod_index nmods
+ logger mod Nothing -- log warnings
+ return (Just mod_info)
+
+ case mb_mod_info of
+ Nothing -> return (Failed, done)
+ Just mod_info -> do
+ let this_mod = ms_mod_name mod
+
+ -- Add new info to hsc_env
+ hpt1 = addToUFM (hsc_HPT hsc_env) this_mod mod_info
+ hsc_env1 = hsc_env { hsc_HPT = hpt1 }
+
+ -- Space-saving: delete the old HPT entry
+ -- for mod BUT if mod is a hs-boot
+ -- node, don't delete it. For the
+ -- interface, the HPT entry is probaby for the
+ -- main Haskell source file. Deleting it
+ -- would force the real module to be recompiled
+ -- every time.
+ old_hpt1 | isBootSummary mod = old_hpt
+ | otherwise = delFromUFM old_hpt this_mod
+
+ done' = mod:done
+
+ -- fixup our HomePackageTable after we've finished compiling
+ -- a mutually-recursive loop. See reTypecheckLoop, below.
+ hsc_env2 <- liftIO $ reTypecheckLoop hsc_env1 mod done'
+ setSession hsc_env2
+
+ upsweep' old_hpt1 done' mods (mod_index+1) nmods
+
+-- | Compile a single module. Always produce a Linkable for it if
+-- successful. If no compilation happened, return the old Linkable.
+upsweep_mod :: HscEnv
+ -> HomePackageTable
+ -> ([ModuleName],[ModuleName])
+ -> ModSummary
+ -> Int -- index of module
+ -> Int -- total number of modules
+ -> IO HomeModInfo
+
+upsweep_mod hsc_env old_hpt (stable_obj, stable_bco) summary mod_index nmods
+ = let
+ this_mod_name = ms_mod_name summary
+ this_mod = ms_mod summary
+ mb_obj_date = ms_obj_date summary
+ obj_fn = ml_obj_file (ms_location summary)
+ hs_date = ms_hs_date summary
+
+ is_stable_obj = this_mod_name `elem` stable_obj
+ is_stable_bco = this_mod_name `elem` stable_bco
+
+ old_hmi = lookupUFM old_hpt this_mod_name
+
+ -- We're using the dflags for this module now, obtained by
+ -- applying any options in its LANGUAGE & OPTIONS_GHC pragmas.
+ dflags = ms_hspp_opts summary
+ prevailing_target = hscTarget (hsc_dflags hsc_env)
+ local_target = hscTarget dflags
+
+ -- If OPTIONS_GHC contains -fasm or -fvia-C, be careful that
+ -- we don't do anything dodgy: these should only work to change
+ -- from -fvia-C to -fasm and vice-versa, otherwise we could
+ -- end up trying to link object code to byte code.
+ target = if prevailing_target /= local_target
+ && (not (isObjectTarget prevailing_target)
+ || not (isObjectTarget local_target))
+ then prevailing_target
+ else local_target
+
+ -- store the corrected hscTarget into the summary
+ summary' = summary{ ms_hspp_opts = dflags { hscTarget = target } }
+
+ -- The old interface is ok if
+ -- a) we're compiling a source file, and the old HPT
+ -- entry is for a source file
+ -- b) we're compiling a hs-boot file
+ -- Case (b) allows an hs-boot file to get the interface of its
+ -- real source file on the second iteration of the compilation
+ -- manager, but that does no harm. Otherwise the hs-boot file
+ -- will always be recompiled
+
+ mb_old_iface
+ = case old_hmi of
+ Nothing -> Nothing
+ Just hm_info | isBootSummary summary -> Just iface
+ | not (mi_boot iface) -> Just iface
+ | otherwise -> Nothing
+ where
+ iface = hm_iface hm_info
+
+ compile_it :: Maybe Linkable -> IO HomeModInfo
+ compile_it mb_linkable =
+ compile hsc_env summary' mod_index nmods
+ mb_old_iface mb_linkable
+
+ compile_it_discard_iface :: Maybe Linkable -> IO HomeModInfo
+ compile_it_discard_iface mb_linkable =
+ compile hsc_env summary' mod_index nmods
+ Nothing mb_linkable
+
+ -- With the HscNothing target we create empty linkables to avoid
+ -- recompilation. We have to detect these to recompile anyway if
+ -- the target changed since the last compile.
+ is_fake_linkable
+ | Just hmi <- old_hmi, Just l <- hm_linkable hmi =
+ null (linkableUnlinked l)
+ | otherwise =
+ -- we have no linkable, so it cannot be fake
+ False
+
+ implies False _ = True
+ implies True x = x
+
+ in
+ case () of
+ _
+ -- Regardless of whether we're generating object code or
+ -- byte code, we can always use an existing object file
+ -- if it is *stable* (see checkStability).
+ | is_stable_obj, Just hmi <- old_hmi -> do
+ liftIO $ debugTraceMsg (hsc_dflags hsc_env) 5
+ (text "skipping stable obj mod:" <+> ppr this_mod_name)
+ return hmi
+ -- object is stable, and we have an entry in the
+ -- old HPT: nothing to do
+
+ | is_stable_obj, isNothing old_hmi -> do
+ liftIO $ debugTraceMsg (hsc_dflags hsc_env) 5
+ (text "compiling stable on-disk mod:" <+> ppr this_mod_name)
+ linkable <- liftIO $ findObjectLinkable this_mod obj_fn
+ (expectJust "upsweep1" mb_obj_date)
+ compile_it (Just linkable)
+ -- object is stable, but we need to load the interface
+ -- off disk to make a HMI.
+
+ | not (isObjectTarget target), is_stable_bco,
+ (target /= HscNothing) `implies` not is_fake_linkable ->
+ ASSERT(isJust old_hmi) -- must be in the old_hpt
+ let Just hmi = old_hmi in do
+ liftIO $ debugTraceMsg (hsc_dflags hsc_env) 5
+ (text "skipping stable BCO mod:" <+> ppr this_mod_name)
+ return hmi
+ -- BCO is stable: nothing to do
+
+ | not (isObjectTarget target),
+ Just hmi <- old_hmi,
+ Just l <- hm_linkable hmi,
+ not (isObjectLinkable l),
+ (target /= HscNothing) `implies` not is_fake_linkable,
+ linkableTime l >= ms_hs_date summary -> do
+ liftIO $ debugTraceMsg (hsc_dflags hsc_env) 5
+ (text "compiling non-stable BCO mod:" <+> ppr this_mod_name)
+ compile_it (Just l)
+ -- we have an old BCO that is up to date with respect
+ -- to the source: do a recompilation check as normal.
+
+ -- When generating object code, if there's an up-to-date
+ -- object file on the disk, then we can use it.
+ -- However, if the object file is new (compared to any
+ -- linkable we had from a previous compilation), then we
+ -- must discard any in-memory interface, because this
+ -- means the user has compiled the source file
+ -- separately and generated a new interface, that we must
+ -- read from the disk.
+ --
+ | isObjectTarget target,
+ Just obj_date <- mb_obj_date,
+ obj_date >= hs_date -> do
+ case old_hmi of
+ Just hmi
+ | Just l <- hm_linkable hmi,
+ isObjectLinkable l && linkableTime l == obj_date -> do
+ liftIO $ debugTraceMsg (hsc_dflags hsc_env) 5
+ (text "compiling mod with new on-disk obj:" <+> ppr this_mod_name)
+ compile_it (Just l)
+ _otherwise -> do
+ liftIO $ debugTraceMsg (hsc_dflags hsc_env) 5
+ (text "compiling mod with new on-disk obj2:" <+> ppr this_mod_name)
+ linkable <- liftIO $ findObjectLinkable this_mod obj_fn obj_date
+ compile_it_discard_iface (Just linkable)
+
+ _otherwise -> do
+ liftIO $ debugTraceMsg (hsc_dflags hsc_env) 5
+ (text "compiling mod:" <+> ppr this_mod_name)
+ compile_it Nothing
+
+
+
+-- Filter modules in the HPT
+retainInTopLevelEnvs :: [ModuleName] -> HomePackageTable -> HomePackageTable
+retainInTopLevelEnvs keep_these hpt
+ = listToUFM [ (mod, expectJust "retain" mb_mod_info)
+ | mod <- keep_these
+ , let mb_mod_info = lookupUFM hpt mod
+ , isJust mb_mod_info ]
+
+-- ---------------------------------------------------------------------------
+-- Typecheck module loops
+
+{-
+See bug #930. This code fixes a long-standing bug in --make. The
+problem is that when compiling the modules *inside* a loop, a data
+type that is only defined at the top of the loop looks opaque; but
+after the loop is done, the structure of the data type becomes
+apparent.
+
+The difficulty is then that two different bits of code have
+different notions of what the data type looks like.
+
+The idea is that after we compile a module which also has an .hs-boot
+file, we re-generate the ModDetails for each of the modules that
+depends on the .hs-boot file, so that everyone points to the proper
+TyCons, Ids etc. defined by the real module, not the boot module.
+Fortunately re-generating a ModDetails from a ModIface is easy: the
+function TcIface.typecheckIface does exactly that.
+
+Picking the modules to re-typecheck is slightly tricky. Starting from
+the module graph consisting of the modules that have already been
+compiled, we reverse the edges (so they point from the imported module
+to the importing module), and depth-first-search from the .hs-boot
+node. This gives us all the modules that depend transitively on the
+.hs-boot module, and those are exactly the modules that we need to
+re-typecheck.
+
+Following this fix, GHC can compile itself with --make -O2.
+-}
+
+reTypecheckLoop :: HscEnv -> ModSummary -> ModuleGraph -> IO HscEnv
+reTypecheckLoop hsc_env ms graph
+ | not (isBootSummary ms) &&
+ any (\m -> ms_mod m == this_mod && isBootSummary m) graph
+ = do
+ let mss = reachableBackwards (ms_mod_name ms) graph
+ non_boot = filter (not.isBootSummary) mss
+ debugTraceMsg (hsc_dflags hsc_env) 2 $
+ text "Re-typechecking loop: " <> ppr (map ms_mod_name non_boot)
+ typecheckLoop hsc_env (map ms_mod_name non_boot)
+ | otherwise
+ = return hsc_env
+ where
+ this_mod = ms_mod ms
+
+typecheckLoop :: HscEnv -> [ModuleName] -> IO HscEnv
+typecheckLoop hsc_env mods = do
+ new_hpt <-
+ fixIO $ \new_hpt -> do
+ let new_hsc_env = hsc_env{ hsc_HPT = new_hpt }
+ mds <- initIfaceCheck new_hsc_env $
+ mapM (typecheckIface . hm_iface) hmis
+ let new_hpt = addListToUFM old_hpt
+ (zip mods [ hmi{ hm_details = details }
+ | (hmi,details) <- zip hmis mds ])
+ return new_hpt
+ return hsc_env{ hsc_HPT = new_hpt }
+ where
+ old_hpt = hsc_HPT hsc_env
+ hmis = map (expectJust "typecheckLoop" . lookupUFM old_hpt) mods
+
+reachableBackwards :: ModuleName -> [ModSummary] -> [ModSummary]
+reachableBackwards mod summaries
+ = [ ms | (ms,_,_) <- reachableG (transposeG graph) root ]
+ where -- the rest just sets up the graph:
+ (graph, lookup_node) = moduleGraphNodes False summaries
+ root = expectJust "reachableBackwards" (lookup_node HsBootFile mod)
+
+-- ---------------------------------------------------------------------------
+-- Topological sort of the module graph
+
+type SummaryNode = (ModSummary, Int, [Int])
+
+topSortModuleGraph
+ :: Bool
+ -- ^ Drop hi-boot nodes? (see below)
+ -> [ModSummary]
+ -> Maybe ModuleName
+ -- ^ Root module name. If @Nothing@, use the full graph.
+ -> [SCC ModSummary]
+-- ^ Calculate SCCs of the module graph, possibly dropping the hi-boot nodes
+-- The resulting list of strongly-connected-components is in topologically
+-- sorted order, starting with the module(s) at the bottom of the
+-- dependency graph (ie compile them first) and ending with the ones at
+-- the top.
+--
+-- Drop hi-boot nodes (first boolean arg)?
+--
+-- - @False@: treat the hi-boot summaries as nodes of the graph,
+-- so the graph must be acyclic
+--
+-- - @True@: eliminate the hi-boot nodes, and instead pretend
+-- the a source-import of Foo is an import of Foo
+-- The resulting graph has no hi-boot nodes, but can be cyclic
+
+topSortModuleGraph drop_hs_boot_nodes summaries mb_root_mod
+ = map (fmap summaryNodeSummary) $ stronglyConnCompG initial_graph
+ where
+ (graph, lookup_node) = moduleGraphNodes drop_hs_boot_nodes summaries
+
+ initial_graph = case mb_root_mod of
+ Nothing -> graph
+ Just root_mod ->
+ -- restrict the graph to just those modules reachable from
+ -- the specified module. We do this by building a graph with
+ -- the full set of nodes, and determining the reachable set from
+ -- the specified node.
+ let root | Just node <- lookup_node HsSrcFile root_mod, graph `hasVertexG` node = node
+ | otherwise = ghcError (ProgramError "module does not exist")
+ in graphFromEdgedVertices (seq root (reachableG graph root))
+
+summaryNodeKey :: SummaryNode -> Int
+summaryNodeKey (_, k, _) = k
+
+summaryNodeSummary :: SummaryNode -> ModSummary
+summaryNodeSummary (s, _, _) = s
+
+moduleGraphNodes :: Bool -> [ModSummary]
+ -> (Graph SummaryNode, HscSource -> ModuleName -> Maybe SummaryNode)
+moduleGraphNodes drop_hs_boot_nodes summaries = (graphFromEdgedVertices nodes, lookup_node)
+ where
+ numbered_summaries = zip summaries [1..]
+
+ lookup_node :: HscSource -> ModuleName -> Maybe SummaryNode
+ lookup_node hs_src mod = Map.lookup (mod, hs_src) node_map
+
+ lookup_key :: HscSource -> ModuleName -> Maybe Int
+ lookup_key hs_src mod = fmap summaryNodeKey (lookup_node hs_src mod)
+
+ node_map :: NodeMap SummaryNode
+ node_map = Map.fromList [ ((moduleName (ms_mod s), ms_hsc_src s), node)
+ | node@(s, _, _) <- nodes ]
+
+ -- We use integers as the keys for the SCC algorithm
+ nodes :: [SummaryNode]
+ nodes = [ (s, key, out_keys)
+ | (s, key) <- numbered_summaries
+ -- Drop the hi-boot ones if told to do so
+ , not (isBootSummary s && drop_hs_boot_nodes)
+ , let out_keys = out_edge_keys hs_boot_key (map unLoc (ms_home_srcimps s)) ++
+ out_edge_keys HsSrcFile (map unLoc (ms_home_imps s)) ++
+ (-- see [boot-edges] below
+ if drop_hs_boot_nodes || ms_hsc_src s == HsBootFile
+ then []
+ else case lookup_key HsBootFile (ms_mod_name s) of
+ Nothing -> []
+ Just k -> [k]) ]
+
+ -- [boot-edges] if this is a .hs and there is an equivalent
+ -- .hs-boot, add a link from the former to the latter. This
+ -- has the effect of detecting bogus cases where the .hs-boot
+ -- depends on the .hs, by introducing a cycle. Additionally,
+ -- it ensures that we will always process the .hs-boot before
+ -- the .hs, and so the HomePackageTable will always have the
+ -- most up to date information.
+
+ -- Drop hs-boot nodes by using HsSrcFile as the key
+ hs_boot_key | drop_hs_boot_nodes = HsSrcFile
+ | otherwise = HsBootFile
+
+ out_edge_keys :: HscSource -> [ModuleName] -> [Int]
+ out_edge_keys hi_boot ms = mapCatMaybes (lookup_key hi_boot) ms
+ -- If we want keep_hi_boot_nodes, then we do lookup_key with
+ -- the IsBootInterface parameter True; else False
+
+
+type NodeKey = (ModuleName, HscSource) -- The nodes of the graph are
+type NodeMap a = Map.Map NodeKey a -- keyed by (mod, src_file_type) pairs
+
+msKey :: ModSummary -> NodeKey
+msKey (ModSummary { ms_mod = mod, ms_hsc_src = boot }) = (moduleName mod,boot)
+
+mkNodeMap :: [ModSummary] -> NodeMap ModSummary
+mkNodeMap summaries = Map.fromList [ (msKey s, s) | s <- summaries]
+
+nodeMapElts :: NodeMap a -> [a]
+nodeMapElts = Map.elems
+
+-- | If there are {-# SOURCE #-} imports between strongly connected
+-- components in the topological sort, then those imports can
+-- definitely be replaced by ordinary non-SOURCE imports: if SOURCE
+-- were necessary, then the edge would be part of a cycle.
+warnUnnecessarySourceImports :: GhcMonad m => [SCC ModSummary] -> m ()
+warnUnnecessarySourceImports sccs = do
+ logWarnings (listToBag (concatMap (check.flattenSCC) sccs))
+ where check ms =
+ let mods_in_this_cycle = map ms_mod_name ms in
+ [ warn i | m <- ms, i <- ms_home_srcimps m,
+ unLoc i `notElem` mods_in_this_cycle ]
+
+ warn :: Located ModuleName -> WarnMsg
+ warn (L loc mod) =
+ mkPlainErrMsg loc
+ (ptext (sLit "Warning: {-# SOURCE #-} unnecessary in import of ")
+ <+> quotes (ppr mod))
+
+-----------------------------------------------------------------------------
+-- Downsweep (dependency analysis)
+
+-- Chase downwards from the specified root set, returning summaries
+-- for all home modules encountered. Only follow source-import
+-- links.
+
+-- We pass in the previous collection of summaries, which is used as a
+-- cache to avoid recalculating a module summary if the source is
+-- unchanged.
+--
+-- The returned list of [ModSummary] nodes has one node for each home-package
+-- module, plus one for any hs-boot files. The imports of these nodes
+-- are all there, including the imports of non-home-package modules.
+
+downsweep :: HscEnv
+ -> [ModSummary] -- Old summaries
+ -> [ModuleName] -- Ignore dependencies on these; treat
+ -- them as if they were package modules
+ -> Bool -- True <=> allow multiple targets to have
+ -- the same module name; this is
+ -- very useful for ghc -M
+ -> IO [ModSummary]
+ -- The elts of [ModSummary] all have distinct
+ -- (Modules, IsBoot) identifiers, unless the Bool is true
+ -- in which case there can be repeats
+downsweep hsc_env old_summaries excl_mods allow_dup_roots
+ = do
+ rootSummaries <- mapM getRootSummary roots
+ let root_map = mkRootMap rootSummaries
+ checkDuplicates root_map
+ summs <- loop (concatMap msDeps rootSummaries) root_map
+ return summs
+ where
+ roots = hsc_targets hsc_env
+
+ old_summary_map :: NodeMap ModSummary
+ old_summary_map = mkNodeMap old_summaries
+
+ getRootSummary :: Target -> IO ModSummary
+ getRootSummary (Target (TargetFile file mb_phase) obj_allowed maybe_buf)
+ = do exists <- liftIO $ doesFileExist file
+ if exists
+ then summariseFile hsc_env old_summaries file mb_phase
+ obj_allowed maybe_buf
+ else throwOneError $ mkPlainErrMsg noSrcSpan $
+ text "can't find file:" <+> text file
+ getRootSummary (Target (TargetModule modl) obj_allowed maybe_buf)
+ = do maybe_summary <- summariseModule hsc_env old_summary_map False
+ (L rootLoc modl) obj_allowed
+ maybe_buf excl_mods
+ case maybe_summary of
+ Nothing -> packageModErr modl
+ Just s -> return s
+
+ rootLoc = mkGeneralSrcSpan (fsLit "<command line>")
+
+ -- In a root module, the filename is allowed to diverge from the module
+ -- name, so we have to check that there aren't multiple root files
+ -- defining the same module (otherwise the duplicates will be silently
+ -- ignored, leading to confusing behaviour).
+ checkDuplicates :: NodeMap [ModSummary] -> IO ()
+ checkDuplicates root_map
+ | allow_dup_roots = return ()
+ | null dup_roots = return ()
+ | otherwise = liftIO $ multiRootsErr (head dup_roots)
+ where
+ dup_roots :: [[ModSummary]] -- Each at least of length 2
+ dup_roots = filterOut isSingleton (nodeMapElts root_map)
+
+ loop :: [(Located ModuleName,IsBootInterface)]
+ -- Work list: process these modules
+ -> NodeMap [ModSummary]
+ -- Visited set; the range is a list because
+ -- the roots can have the same module names
+ -- if allow_dup_roots is True
+ -> IO [ModSummary]
+ -- The result includes the worklist, except
+ -- for those mentioned in the visited set
+ loop [] done = return (concat (nodeMapElts done))
+ loop ((wanted_mod, is_boot) : ss) done
+ | Just summs <- Map.lookup key done
+ = if isSingleton summs then
+ loop ss done
+ else
+ do { multiRootsErr summs; return [] }
+ | otherwise
+ = do mb_s <- summariseModule hsc_env old_summary_map
+ is_boot wanted_mod True
+ Nothing excl_mods
+ case mb_s of
+ Nothing -> loop ss done
+ Just s -> loop (msDeps s ++ ss) (Map.insert key [s] done)
+ where
+ key = (unLoc wanted_mod, if is_boot then HsBootFile else HsSrcFile)
+
+-- XXX Does the (++) here need to be flipped?
+mkRootMap :: [ModSummary] -> NodeMap [ModSummary]
+mkRootMap summaries = Map.insertListWith (flip (++))
+ [ (msKey s, [s]) | s <- summaries ]
+ Map.empty
+
+msDeps :: ModSummary -> [(Located ModuleName, IsBootInterface)]
+-- (msDeps s) returns the dependencies of the ModSummary s.
+-- A wrinkle is that for a {-# SOURCE #-} import we return
+-- *both* the hs-boot file
+-- *and* the source file
+-- as "dependencies". That ensures that the list of all relevant
+-- modules always contains B.hs if it contains B.hs-boot.
+-- Remember, this pass isn't doing the topological sort. It's
+-- just gathering the list of all relevant ModSummaries
+msDeps s =
+ concat [ [(m,True), (m,False)] | m <- ms_home_srcimps s ]
+ ++ [ (m,False) | m <- ms_home_imps s ]
+
+home_imps :: [Located (ImportDecl RdrName)] -> [Located ModuleName]
+home_imps imps = [ ideclName i | L _ i <- imps, isLocal (ideclPkgQual i) ]
+ where isLocal Nothing = True
+ isLocal (Just pkg) | pkg == fsLit "this" = True -- "this" is special
+ isLocal _ = False
+
+ms_home_allimps :: ModSummary -> [ModuleName]
+ms_home_allimps ms = map unLoc (ms_home_srcimps ms ++ ms_home_imps ms)
+
+ms_home_srcimps :: ModSummary -> [Located ModuleName]
+ms_home_srcimps = home_imps . ms_srcimps
+
+ms_home_imps :: ModSummary -> [Located ModuleName]
+ms_home_imps = home_imps . ms_imps
+
+-----------------------------------------------------------------------------
+-- Summarising modules
+
+-- We have two types of summarisation:
+--
+-- * Summarise a file. This is used for the root module(s) passed to
+-- cmLoadModules. The file is read, and used to determine the root
+-- module name. The module name may differ from the filename.
+--
+-- * Summarise a module. We are given a module name, and must provide
+-- a summary. The finder is used to locate the file in which the module
+-- resides.
+
+summariseFile
+ :: HscEnv
+ -> [ModSummary] -- old summaries
+ -> FilePath -- source file name
+ -> Maybe Phase -- start phase
+ -> Bool -- object code allowed?
+ -> Maybe (StringBuffer,ClockTime)
+ -> IO ModSummary
+
+summariseFile hsc_env old_summaries file mb_phase obj_allowed maybe_buf
+ -- we can use a cached summary if one is available and the
+ -- source file hasn't changed, But we have to look up the summary
+ -- by source file, rather than module name as we do in summarise.
+ | Just old_summary <- findSummaryBySourceFile old_summaries file
+ = do
+ let location = ms_location old_summary
+
+ -- return the cached summary if the source didn't change
+ src_timestamp <- case maybe_buf of
+ Just (_,t) -> return t
+ Nothing -> liftIO $ getModificationTime file
+ -- The file exists; we checked in getRootSummary above.
+ -- If it gets removed subsequently, then this
+ -- getModificationTime may fail, but that's the right
+ -- behaviour.
+
+ if ms_hs_date old_summary == src_timestamp
+ then do -- update the object-file timestamp
+ obj_timestamp <-
+ if isObjectTarget (hscTarget (hsc_dflags hsc_env))
+ || obj_allowed -- bug #1205
+ then liftIO $ getObjTimestamp location False
+ else return Nothing
+ return old_summary{ ms_obj_date = obj_timestamp }
+ else
+ new_summary
+
+ | otherwise
+ = new_summary
+ where
+ new_summary = do
+ let dflags = hsc_dflags hsc_env
+
+ (dflags', hspp_fn, buf)
+ <- preprocessFile hsc_env file mb_phase maybe_buf
+
+ (srcimps,the_imps, L _ mod_name) <- getImports dflags' buf hspp_fn file
+
+ -- Make a ModLocation for this file
+ location <- liftIO $ mkHomeModLocation dflags mod_name file
+
+ -- Tell the Finder cache where it is, so that subsequent calls
+ -- to findModule will find it, even if it's not on any search path
+ mod <- liftIO $ addHomeModuleToFinder hsc_env mod_name location
+
+ src_timestamp <- case maybe_buf of
+ Just (_,t) -> return t
+ Nothing -> liftIO $ getModificationTime file
+ -- getMofificationTime may fail
+
+ -- when the user asks to load a source file by name, we only
+ -- use an object file if -fobject-code is on. See #1205.
+ obj_timestamp <-
+ if isObjectTarget (hscTarget (hsc_dflags hsc_env))
+ || obj_allowed -- bug #1205
+ then liftIO $ modificationTimeIfExists (ml_obj_file location)
+ else return Nothing
+
+ return (ModSummary { ms_mod = mod, ms_hsc_src = HsSrcFile,
+ ms_location = location,
+ ms_hspp_file = hspp_fn,
+ ms_hspp_opts = dflags',
+ ms_hspp_buf = Just buf,
+ ms_srcimps = srcimps, ms_imps = the_imps,
+ ms_hs_date = src_timestamp,
+ ms_obj_date = obj_timestamp })
+
+findSummaryBySourceFile :: [ModSummary] -> FilePath -> Maybe ModSummary
+findSummaryBySourceFile summaries file
+ = case [ ms | ms <- summaries, HsSrcFile <- [ms_hsc_src ms],
+ expectJust "findSummaryBySourceFile" (ml_hs_file (ms_location ms)) == file ] of
+ [] -> Nothing
+ (x:_) -> Just x
+
+-- Summarise a module, and pick up source and timestamp.
+summariseModule
+ :: HscEnv
+ -> NodeMap ModSummary -- Map of old summaries
+ -> IsBootInterface -- True <=> a {-# SOURCE #-} import
+ -> Located ModuleName -- Imported module to be summarised
+ -> Bool -- object code allowed?
+ -> Maybe (StringBuffer, ClockTime)
+ -> [ModuleName] -- Modules to exclude
+ -> IO (Maybe ModSummary) -- Its new summary
+
+summariseModule hsc_env old_summary_map is_boot (L loc wanted_mod)
+ obj_allowed maybe_buf excl_mods
+ | wanted_mod `elem` excl_mods
+ = return Nothing
+
+ | Just old_summary <- Map.lookup (wanted_mod, hsc_src) old_summary_map
+ = do -- Find its new timestamp; all the
+ -- ModSummaries in the old map have valid ml_hs_files
+ let location = ms_location old_summary
+ src_fn = expectJust "summariseModule" (ml_hs_file location)
+
+ -- check the modification time on the source file, and
+ -- return the cached summary if it hasn't changed. If the
+ -- file has disappeared, we need to call the Finder again.
+ case maybe_buf of
+ Just (_,t) -> check_timestamp old_summary location src_fn t
+ Nothing -> do
+ m <- tryIO (getModificationTime src_fn)
+ case m of
+ Right t -> check_timestamp old_summary location src_fn t
+ Left e | isDoesNotExistError e -> find_it
+ | otherwise -> ioError e
+
+ | otherwise = find_it
+ where
+ dflags = hsc_dflags hsc_env
+
+ hsc_src = if is_boot then HsBootFile else HsSrcFile
+
+ check_timestamp old_summary location src_fn src_timestamp
+ | ms_hs_date old_summary == src_timestamp = do
+ -- update the object-file timestamp
+ obj_timestamp <-
+ if isObjectTarget (hscTarget (hsc_dflags hsc_env))
+ || obj_allowed -- bug #1205
+ then getObjTimestamp location is_boot
+ else return Nothing
+ return (Just old_summary{ ms_obj_date = obj_timestamp })
+ | otherwise =
+ -- source changed: re-summarise.
+ new_summary location (ms_mod old_summary) src_fn src_timestamp
+
+ find_it = do
+ -- Don't use the Finder's cache this time. If the module was
+ -- previously a package module, it may have now appeared on the
+ -- search path, so we want to consider it to be a home module. If
+ -- the module was previously a home module, it may have moved.
+ uncacheModule hsc_env wanted_mod
+ found <- findImportedModule hsc_env wanted_mod Nothing
+ case found of
+ Found location mod
+ | isJust (ml_hs_file location) ->
+ -- Home package
+ just_found location mod
+ | otherwise ->
+ -- Drop external-pkg
+ ASSERT(modulePackageId mod /= thisPackage dflags)
+ return Nothing
+
+ err -> noModError dflags loc wanted_mod err
+ -- Not found
+
+ just_found location mod = do
+ -- Adjust location to point to the hs-boot source file,
+ -- hi file, object file, when is_boot says so
+ let location' | is_boot = addBootSuffixLocn location
+ | otherwise = location
+ src_fn = expectJust "summarise2" (ml_hs_file location')
+
+ -- Check that it exists
+ -- It might have been deleted since the Finder last found it
+ maybe_t <- modificationTimeIfExists src_fn
+ case maybe_t of
+ Nothing -> noHsFileErr loc src_fn
+ Just t -> new_summary location' mod src_fn t
+
+
+ new_summary location mod src_fn src_timestamp
+ = do
+ -- Preprocess the source file and get its imports
+ -- The dflags' contains the OPTIONS pragmas
+ (dflags', hspp_fn, buf) <- preprocessFile hsc_env src_fn Nothing maybe_buf
+ (srcimps, the_imps, L mod_loc mod_name) <- getImports dflags' buf hspp_fn src_fn
+
+ when (mod_name /= wanted_mod) $
+ throwOneError $ mkPlainErrMsg mod_loc $
+ text "File name does not match module name:"
+ $$ text "Saw:" <+> quotes (ppr mod_name)
+ $$ text "Expected:" <+> quotes (ppr wanted_mod)
+
+ -- Find the object timestamp, and return the summary
+ obj_timestamp <-
+ if isObjectTarget (hscTarget (hsc_dflags hsc_env))
+ || obj_allowed -- bug #1205
+ then getObjTimestamp location is_boot
+ else return Nothing
+
+ return (Just (ModSummary { ms_mod = mod,
+ ms_hsc_src = hsc_src,
+ ms_location = location,
+ ms_hspp_file = hspp_fn,
+ ms_hspp_opts = dflags',
+ ms_hspp_buf = Just buf,
+ ms_srcimps = srcimps,
+ ms_imps = the_imps,
+ ms_hs_date = src_timestamp,
+ ms_obj_date = obj_timestamp }))
+
+
+getObjTimestamp :: ModLocation -> Bool -> IO (Maybe ClockTime)
+getObjTimestamp location is_boot
+ = if is_boot then return Nothing
+ else modificationTimeIfExists (ml_obj_file location)
+
+
+preprocessFile :: HscEnv
+ -> FilePath
+ -> Maybe Phase -- ^ Starting phase
+ -> Maybe (StringBuffer,ClockTime)
+ -> IO (DynFlags, FilePath, StringBuffer)
+preprocessFile hsc_env src_fn mb_phase Nothing
+ = do
+ (dflags', hspp_fn) <- preprocess hsc_env (src_fn, mb_phase)
+ buf <- hGetStringBuffer hspp_fn
+ return (dflags', hspp_fn, buf)
+
+preprocessFile hsc_env src_fn mb_phase (Just (buf, _time))
+ = do
+ let dflags = hsc_dflags hsc_env
+ -- case we bypass the preprocessing stage?
+ let
+ local_opts = getOptions dflags buf src_fn
+ --
+ (dflags', leftovers, warns)
+ <- parseDynamicNoPackageFlags dflags local_opts
+ checkProcessArgsResult leftovers
+ handleFlagWarnings dflags' warns
+
+ let
+ needs_preprocessing
+ | Just (Unlit _) <- mb_phase = True
+ | Nothing <- mb_phase, Unlit _ <- startPhase src_fn = True
+ -- note: local_opts is only required if there's no Unlit phase
+ | xopt Opt_Cpp dflags' = True
+ | dopt Opt_Pp dflags' = True
+ | otherwise = False
+
+ when needs_preprocessing $
+ ghcError (ProgramError "buffer needs preprocesing; interactive check disabled")
+
+ return (dflags', src_fn, buf)
+
+
+-----------------------------------------------------------------------------
+-- Error messages
+-----------------------------------------------------------------------------
+
+noModError :: DynFlags -> SrcSpan -> ModuleName -> FindResult -> IO ab
+-- ToDo: we don't have a proper line number for this error
+noModError dflags loc wanted_mod err
+ = throwOneError $ mkPlainErrMsg loc $ cannotFindModule dflags wanted_mod err
+
+noHsFileErr :: SrcSpan -> String -> IO a
+noHsFileErr loc path
+ = throwOneError $ mkPlainErrMsg loc $ text "Can't find" <+> text path
+
+packageModErr :: ModuleName -> IO a
+packageModErr mod
+ = throwOneError $ mkPlainErrMsg noSrcSpan $
+ text "module" <+> quotes (ppr mod) <+> text "is a package module"
+
+multiRootsErr :: [ModSummary] -> IO ()
+multiRootsErr [] = panic "multiRootsErr"
+multiRootsErr summs@(summ1:_)
+ = throwOneError $ mkPlainErrMsg noSrcSpan $
+ text "module" <+> quotes (ppr mod) <+>
+ text "is defined in multiple files:" <+>
+ sep (map text files)
+ where
+ mod = ms_mod summ1
+ files = map (expectJust "checkDup" . ml_hs_file . ms_location) summs
+
+cyclicModuleErr :: [ModSummary] -> SDoc
+cyclicModuleErr ms
+ = hang (ptext (sLit "Module imports form a cycle for modules:"))
+ 2 (vcat (map show_one ms))
+ where
+ mods_in_cycle = map ms_mod_name ms
+ imp_modname = unLoc . ideclName . unLoc
+ just_in_cycle = filter ((`elem` mods_in_cycle) . imp_modname)
+
+ show_one ms =
+ vcat [ show_mod (ms_hsc_src ms) (ms_mod_name ms) <+>
+ maybe empty (parens . text) (ml_hs_file (ms_location ms)),
+ nest 2 $ ptext (sLit "imports:") <+> vcat [
+ pp_imps HsBootFile (just_in_cycle $ ms_srcimps ms),
+ pp_imps HsSrcFile (just_in_cycle $ ms_imps ms) ]
+ ]
+ show_mod hsc_src mod = ppr mod <> text (hscSourceString hsc_src)
+ pp_imps src imps = fsep (map (show_mod src . unLoc . ideclName . unLoc) imps)
, hscParseIdentifier
, hscTcRcLookupName
, hscTcRnGetInfo
- , hscRnImportDecls
#ifdef GHCI
+ , hscRnImportDecls
, hscGetModuleExports
, hscTcRnLookupRdrName
, hscStmt, hscStmtWithLocation
import TcRnDriver
import TcIface ( typecheckIface )
import TcRnMonad
-import RnNames ( rnImports )
import IfaceEnv ( initNameCache )
import LoadIface ( ifaceStats, initExternalPackageState )
import PrelInfo ( wiredInThings, basicKnownKeyNames )
import qualified StgCmm ( codeGen )
import StgSyn
import CostCentre
-import TyCon ( TyCon, isDataTyCon )
+import ProfInit
+import TyCon ( TyCon, isDataTyCon )
import Name ( Name, NamedThing(..) )
import SimplStg ( stg2stg )
import CodeGen ( codeGen )
hscGetModuleExports :: HscEnv -> Module -> IO (Maybe [AvailInfo])
hscGetModuleExports hsc_env mdl =
runHsc hsc_env $ ioMsgMaybe' $ getModuleExports hsc_env mdl
-#endif
-- -----------------------------------------------------------------------------
-- | Rename some import declarations
-> [LImportDecl RdrName]
-> IO GlobalRdrEnv
-hscRnImportDecls hsc_env this_mod import_decls = runHsc hsc_env $ do
- (_, r, _, _) <-
- ioMsgMaybe $ initTc hsc_env HsSrcFile False this_mod $
- rnImports import_decls
- return r
+-- It is important that we use tcRnImports instead of calling rnImports directly
+-- because tcRnImports will force-load any orphan modules necessary, making extra
+-- instances/family instances visible (GHC #4832)
+hscRnImportDecls hsc_env this_mod import_decls
+ = runHsc hsc_env $ ioMsgMaybe $ initTc hsc_env HsSrcFile False this_mod $
+ fmap tcg_rdr_env $ tcRnImports hsc_env this_mod import_decls
+
+#endif
-- -----------------------------------------------------------------------------
-- | parse a file, returning the abstract syntax
data HscStatus' a
= HscNoRecomp
| HscRecomp
- Bool -- Has stub files. This is a hack. We can't compile C files here
+ (Maybe FilePath)
+ -- Has stub files. This is a hack. We can't compile C files here
-- since it's done in DriverPipeline. For now we just return True
-- if we want the caller to compile them for us.
a
, hscBackend = \ tc_result mod_summary mb_old_hash -> do
dflags <- getDynFlags
case hscTarget dflags of
- HscNothing -> return (HscRecomp False ())
+ HscNothing -> return (HscRecomp Nothing ())
_otherw -> genericHscBackend hscOneShotCompiler
tc_result mod_summary mb_old_hash
, hscGenBootOutput = \tc_result mod_summary mb_old_iface -> do
(iface, changed, _) <- hscSimpleIface tc_result mb_old_iface
hscWriteIface iface changed mod_summary
- return (HscRecomp False ())
+ return (HscRecomp Nothing ())
, hscGenOutput = \guts0 mod_summary mb_old_iface -> do
guts <- hscSimplify' guts0
, hscGenBootOutput = \tc_result mod_summary mb_old_iface -> do
(iface, changed, details) <- hscSimpleIface tc_result mb_old_iface
hscWriteIface iface changed mod_summary
- return (HscRecomp False (), iface, details)
+ return (HscRecomp Nothing (), iface, details)
, hscGenOutput = \guts0 mod_summary mb_old_iface -> do
guts <- hscSimplify' guts0
, hscGenBootOutput = \tc_result _mod_summary mb_old_iface -> do
(iface, _changed, details) <- hscSimpleIface tc_result mb_old_iface
- return (HscRecomp False Nothing, iface, details)
+ return (HscRecomp Nothing Nothing, iface, details)
, hscGenOutput = \guts0 mod_summary mb_old_iface -> do
guts <- hscSimplify' guts0
, hscBackend = \tc_result _mod_summary mb_old_iface -> do
handleWarnings
(iface, _changed, details) <- hscSimpleIface tc_result mb_old_iface
- return (HscRecomp False (), iface, details)
+ return (HscRecomp Nothing (), iface, details)
, hscGenBootOutput = \_ _ _ ->
panic "hscCompileNothing: hscGenBootOutput should not be called"
-- | Compile to hard-code.
hscGenHardCode :: CgGuts -> ModSummary
- -> Hsc Bool -- ^ @True@ <=> stub.c exists
+ -> Hsc (Maybe FilePath) -- ^ @Just f@ <=> _stub.c is f
hscGenHardCode cgguts mod_summary
= do
hsc_env <- getHscEnv
cg_module = this_mod,
cg_binds = core_binds,
cg_tycons = tycons,
- cg_dir_imps = dir_imps,
- cg_foreign = foreign_stubs,
+ cg_foreign = foreign_stubs0,
cg_dep_pkgs = dependencies,
cg_hpc_info = hpc_info } = cgguts
dflags = hsc_dflags hsc_env
<- {-# SCC "CoreToStg" #-}
myCoreToStg dflags this_mod prepd_binds
+ let prof_init = profilingInitCode this_mod cost_centre_info
+ foreign_stubs = foreign_stubs0 `appendStubC` prof_init
+
------------------ Code generation ------------------
cmms <- if dopt Opt_TryNewCodeGen dflags
then do cmms <- tryNewCodeGen hsc_env this_mod data_tycons
- dir_imps cost_centre_info
+ cost_centre_info
stg_binds hpc_info
return cmms
else {-# SCC "CodeGen" #-}
codeGen dflags this_mod data_tycons
- dir_imps cost_centre_info
+ cost_centre_info
stg_binds hpc_info
--- Optionally run experimental Cmm transformations ---
-- unless certain dflags are on, the identity function
------------------ Code output -----------------------
rawcmms <- cmmToRawCmm cmms
- dumpIfSet_dyn dflags Opt_D_dump_cmmz "Raw Cmm" (ppr rawcmms)
+ dumpIfSet_dyn dflags Opt_D_dump_raw_cmm "Raw Cmm" (ppr rawcmms)
(_stub_h_exists, stub_c_exists)
<- codeOutput dflags this_mod location foreign_stubs
dependencies rawcmms
-------------------- Stuff for new code gen ---------------------
-tryNewCodeGen :: HscEnv -> Module -> [TyCon] -> [Module]
+tryNewCodeGen :: HscEnv -> Module -> [TyCon]
-> CollectedCCs
-> [(StgBinding,[(Id,[Id])])]
-> HpcInfo
-> IO [Cmm]
-tryNewCodeGen hsc_env this_mod data_tycons imported_mods
+tryNewCodeGen hsc_env this_mod data_tycons
cost_centre_info stg_binds hpc_info =
do { let dflags = hsc_dflags hsc_env
- ; prog <- StgCmm.codeGen dflags this_mod data_tycons imported_mods
+ ; prog <- StgCmm.codeGen dflags this_mod data_tycons
cost_centre_info stg_binds hpc_info
; dumpIfSet_dyn dflags Opt_D_dump_cmmz "Cmm produced by new codegen"
(pprCmms prog)
-- * Information about modules
ModDetails(..), emptyModDetails,
- ModGuts(..), CgGuts(..), ForeignStubs(..),
+ ModGuts(..), CgGuts(..), ForeignStubs(..), appendStubC,
ImportedMods,
ModSummary(..), ms_mod_name, showModMsg, isBootSummary,
-- * State relating to modules in this package
HomePackageTable, HomeModInfo(..), emptyHomePackageTable,
- hptInstances, hptRules, hptVectInfo,
-
+ hptInstances, hptRules, hptVectInfo,
+ hptObjs,
+
-- * State relating to known packages
ExternalPackageState(..), EpsStats(..), addEpsInStats,
PackageTypeEnv, PackageIfaceTable, emptyPackageIfaceTable,
Warnings(..), WarningTxt(..), plusWarns,
-- * Linker stuff
- Linkable(..), isObjectLinkable,
+ Linkable(..), isObjectLinkable, linkableObjs,
Unlinked(..), CompiledByteCode,
isObject, nameOfObject, isInterpretable, byteCodeOfObject,
-- And get its dfuns
, thing <- things ]
+
+hptObjs :: HomePackageTable -> [FilePath]
+hptObjs hpt = concat (map (maybe [] linkableObjs . hm_linkable) (eltsUFM hpt))
\end{code}
%************************************************************************
-- data constructor workers; reason: we we regard them
-- as part of the code-gen of tycons
- cg_dir_imps :: ![Module],
- -- ^ Directly-imported modules; used to generate
- -- initialisation code
-
- cg_foreign :: !ForeignStubs, -- ^ Foreign export stubs
+ cg_foreign :: !ForeignStubs, -- ^ Foreign export stubs
cg_dep_pkgs :: ![PackageId], -- ^ Dependent packages, used to
-- generate #includes for C code gen
cg_hpc_info :: !HpcInfo, -- ^ Program coverage tick box information
--
-- 2) C stubs to use when calling
-- "foreign exported" functions
+
+appendStubC :: ForeignStubs -> SDoc -> ForeignStubs
+appendStubC NoStubs c_code = ForeignStubs empty c_code
+appendStubC (ForeignStubs h c) c_code = ForeignStubs h (c $$ c_code)
\end{code}
\begin{code}
-- compiling a module in HscNothing mode, and this choice
-- happens to work well with checkStability in module GHC.
+linkableObjs :: Linkable -> [FilePath]
+linkableObjs l = [ f | DotO f <- linkableUnlinked l ]
+
instance Outputable Linkable where
ppr (LM when_made mod unlinkeds)
= (text "LinkableM" <+> parens (text (show when_made)) <+> ppr mod)
"fsimple-list-literals",
"fruntime-types",
"fno-pre-inlining",
+ "fno-opt-coercion",
"fexcess-precision",
"static",
"fhardwire-lib-paths",
opt_PprCols,
opt_PprCaseAsLet,
opt_PprStyle_Debug, opt_TraceLevel,
- opt_NoDebugOutput,
+ opt_NoDebugOutput,
-- Suppressing boring aspects of core dumps
opt_SuppressAll,
opt_CprOff,
opt_SimplNoPreInlining,
opt_SimplExcessPrecision,
+ opt_NoOptCoercion,
opt_MaxWorkerArgs,
-- Unfolding control
opt_NoDebugOutput :: Bool
opt_NoDebugOutput = lookUp (fsLit "-dno-debug-output")
-
-- profiling opts
opt_SccProfilingOn :: Bool
opt_SccProfilingOn = lookUp (fsLit "-fscc-profiling")
opt_SimplExcessPrecision :: Bool
opt_SimplExcessPrecision = lookUp (fsLit "-fexcess-precision")
+opt_NoOptCoercion :: Bool
+opt_NoOptCoercion = lookUp (fsLit "-fno-opt-coercion")
+
-- Unfolding control
-- See Note [Discounts and thresholds] in CoreUnfold
-----------------------------------------------------------------------------
\begin{code}
+{-# OPTIONS -fno-warn-unused-do-bind #-}
module SysTools (
-- Initialisation
initSysTools,
-- Interface to system tools
runUnlit, runCpp, runCc, -- [Option] -> IO ()
runPp, -- [Option] -> IO ()
- runMangle, runSplit, -- [Option] -> IO ()
+ runSplit, -- [Option] -> IO ()
runAs, runLink, -- [Option] -> IO ()
runMkDLL,
runWindres,
runLlvmOpt,
runLlvmLlc,
+ readElfSection,
touch, -- String -> String -> IO ()
copy,
import Data.Char
import Data.List
import qualified Data.Map as Map
+import Text.ParserCombinators.ReadP hiding (char)
+import qualified Text.ParserCombinators.ReadP as R
#ifndef mingw32_HOST_OS
import qualified System.Posix.Internals
-- architecture-specific stuff is done when building Config.hs
unlit_path = installed cGHC_UNLIT_PGM
- -- split and mangle are Perl scripts
+ -- split is a Perl script
split_script = installed cGHC_SPLIT_PGM
- mangle_script = installed cGHC_MANGLER_PGM
windres_path = installed_mingw_bin "windres"
| isWindowsHost = installed cGHC_TOUCHY_PGM
| otherwise = "touch"
-- On Win32 we don't want to rely on #!/bin/perl, so we prepend
- -- a call to Perl to get the invocation of split and mangle.
+ -- a call to Perl to get the invocation of split.
-- On Unix, scripts are invoked using the '#!' method. Binary
-- installations of GHC on Unix place the correct line on the
-- front of the script at installation time, so we don't want
(split_prog, split_args)
| isWindowsHost = (perl_path, [Option split_script])
| otherwise = (split_script, [])
- (mangle_prog, mangle_args)
- | isWindowsHost = (perl_path, [Option mangle_script])
- | otherwise = (mangle_script, [])
(mkdll_prog, mkdll_args)
| not isWindowsHost
= panic "Can't build DLLs on a non-Win32 system"
pgm_P = cpp_path,
pgm_F = "",
pgm_c = (gcc_prog,[]),
- pgm_m = (mangle_prog,mangle_args),
pgm_s = (split_prog,split_args),
pgm_a = (as_prog,[]),
pgm_l = (ld_prog,[]),
= (path, '\"' : head b_dirs ++ "\";" ++ paths)
mangle_path other = other
-runMangle :: DynFlags -> [Option] -> IO ()
-runMangle dflags args = do
- let (p,args0) = pgm_m dflags
- runSomething dflags "Mangler" p (args0++args)
-
runSplit :: DynFlags -> [Option] -> IO ()
runSplit dflags args = do
let (p,args0) = pgm_s dflags
getExtraViaCOpts dflags = do
f <- readFile (topDir dflags </> "extra-gcc-opts")
return (words f)
+
+-- | read the contents of the named section in an ELF object as a
+-- String.
+readElfSection :: DynFlags -> String -> FilePath -> IO (Maybe String)
+readElfSection _dflags section exe = do
+ let
+ prog = "readelf"
+ args = [Option "-p", Option section, FileOption "" exe]
+ --
+ r <- readProcessWithExitCode prog (filter notNull (map showOpt args)) ""
+ case r of
+ (ExitSuccess, out, _err) -> return (doFilter (lines out))
+ _ -> return Nothing
+ where
+ doFilter [] = Nothing
+ doFilter (s:r) = case readP_to_S parse s of
+ [(p,"")] -> Just p
+ _r -> doFilter r
+ where parse = do
+ skipSpaces; R.char '['; skipSpaces; string "0]"; skipSpaces;
+ munch (const True)
\end{code}
%************************************************************************
$ do let ref = filesToClean dflags
files <- readIORef ref
let (to_keep, to_delete) = partition (`elem` dont_delete) files
- removeTmpFiles dflags to_delete
writeIORef ref to_keep
+ removeTmpFiles dflags to_delete
-- find a temporary name that doesn't already exist.
import Util
import FastString
+import Control.Monad ( when )
import Data.List ( sortBy )
import Data.IORef ( IORef, readIORef, writeIORef )
\end{code}
mg_binds = binds,
mg_rules = imp_rules,
mg_vect_info = vect_info,
- mg_dir_imps = dir_imps,
- mg_anns = anns,
+ mg_anns = anns,
mg_deps = deps,
mg_foreign = foreign_stubs,
mg_hpc_info = hpc_info,
(ptext (sLit "rules"))
(pprRulesForUser tidy_rules)
- ; let dir_imp_mods = moduleEnvKeys dir_imps
-
- ; return (CgGuts { cg_module = mod,
- cg_tycons = alg_tycons,
- cg_binds = all_tidy_binds,
- cg_dir_imps = dir_imp_mods,
- cg_foreign = foreign_stubs,
+ -- Print one-line size info
+ ; let cs = coreBindsStats tidy_binds
+ ; when (dopt Opt_D_dump_core_stats dflags)
+ (printDump (ptext (sLit "Tidy size (terms,types,coercions)")
+ <+> ppr (moduleName mod) <> colon
+ <+> int (cs_tm cs)
+ <+> int (cs_ty cs)
+ <+> int (cs_co cs) ))
+
+ ; return (CgGuts { cg_module = mod,
+ cg_tycons = alg_tycons,
+ cg_binds = all_tidy_binds,
+ cg_foreign = foreign_stubs,
cg_dep_pkgs = dep_pkgs deps,
cg_hpc_info = hpc_info,
cg_modBreaks = modBreaks },
(c) Update the current assignment
- (d) If the intstruction is a branch:
+ (d) If the instruction is a branch:
if the destination block already has a register assignment,
Generate a new block with fixup code and redirect the
jump to the new block.
-- register does not already have an assignment,
-- and the source register is assigned to a register, not to a spill slot,
-- then we can eliminate the instruction.
- -- (we can't eliminitate it if the source register is on the stack, because
+ -- (we can't eliminate it if the source register is on the stack, because
-- we do not want to use one spill slot for different virtual registers)
case takeRegRegMoveInstr instr of
Just (src,dst) | src `elementOfUniqSet` (liveDieRead live),
saveClobberedTemps
- :: Instruction instr
+ :: (Outputable instr, Instruction instr)
=> [RealReg] -- real registers clobbered by this instruction
-> [Reg] -- registers which are no longer live after this insn
-> RegM [instr] -- return: instructions to spill any temps that will
--- | Mark all these regal regs as allocated,
+-- | Mark all these real regs as allocated,
-- and kick out their vreg assignments.
--
clobberRegs :: [RealReg] -> RegM ()
-- -----------------------------------------------------------------------------
-- allocateRegsAndSpill
+-- Why are we performing a spill?
+data SpillLoc = ReadMem StackSlot -- reading from register only in memory
+ | WriteNew -- writing to a new variable
+ | WriteMem -- writing to register only in memory
+-- Note that ReadNew is not valid, since you don't want to be reading
+-- from an uninitialized register. We also don't need the location of
+-- the register in memory, since that will be invalidated by the write.
+-- Technically, we could coalesce WriteNew and WriteMem into a single
+-- entry as well. -- EZY
+
-- This function does several things:
-- For each temporary referred to by this instruction,
-- we allocate a real register (spilling another temporary if necessary).
-- the list of free registers and free stack slots.
allocateRegsAndSpill
- :: Instruction instr
+ :: (Outputable instr, Instruction instr)
=> Bool -- True <=> reading (load up spilled regs)
-> [VirtualReg] -- don't push these out
-> [instr] -- spill insns
allocateRegsAndSpill reading keep spills alloc (r:rs)
= do assig <- getAssigR
+ let doSpill = allocRegsAndSpill_spill reading keep spills alloc r rs assig
case lookupUFM assig r of
-- case (1a): already in a register
Just (InReg my_reg) ->
allocateRegsAndSpill reading keep spills (my_reg:alloc) rs
-- case (1b): already in a register (and memory)
- -- NB1. if we're writing this register, update its assignemnt to be
+ -- NB1. if we're writing this register, update its assignment to be
-- InReg, because the memory value is no longer valid.
-- NB2. This is why we must process written registers here, even if they
-- are also read by the same instruction.
allocateRegsAndSpill reading keep spills (my_reg:alloc) rs
-- Not already in a register, so we need to find a free one...
- loc -> allocRegsAndSpill_spill reading keep spills alloc r rs loc assig
+ Just (InMem slot) | reading -> doSpill (ReadMem slot)
+ | otherwise -> doSpill WriteMem
+ Nothing | reading ->
+ -- pprPanic "allocateRegsAndSpill: Cannot read from uninitialized register" (ppr r)
+ -- ToDo: This case should be a panic, but we
+ -- sometimes see an unreachable basic block which
+ -- triggers this because the register allocator
+ -- will start with an empty assignment.
+ doSpill WriteNew
+
+ | otherwise -> doSpill WriteNew
-allocRegsAndSpill_spill reading keep spills alloc r rs loc assig
+-- reading is redundant with reason, but we keep it around because it's
+-- convenient and it maintains the recursive structure of the allocator. -- EZY
+allocRegsAndSpill_spill reading keep spills alloc r rs assig spill_loc
= do
freeRegs <- getFreeRegsR
let freeRegs_thisClass = getFreeRegs (classOfVirtualReg r) freeRegs
-- case (2): we have a free register
(my_reg : _) ->
- do spills' <- loadTemp reading r loc my_reg spills
-
- let new_loc
- -- if the tmp was in a slot, then now its in a reg as well
- | Just (InMem slot) <- loc
- , reading
- = InBoth my_reg slot
+ do spills' <- loadTemp r spill_loc my_reg spills
- -- tmp has been loaded into a reg
- | otherwise
- = InReg my_reg
-
- setAssigR (addToUFM assig r $! new_loc)
+ setAssigR (addToUFM assig r $! newLocation spill_loc my_reg)
setFreeRegsR $ allocateReg my_reg freeRegs
allocateRegsAndSpill reading keep spills' (my_reg : alloc) rs
-- we have a temporary that is in both register and mem,
-- just free up its register for use.
| (temp, my_reg, slot) : _ <- candidates_inBoth
- = do spills' <- loadTemp reading r loc my_reg spills
+ = do spills' <- loadTemp r spill_loc my_reg spills
let assig1 = addToUFM assig temp (InMem slot)
- let assig2 = addToUFM assig1 r (InReg my_reg)
+ let assig2 = addToUFM assig1 r $! newLocation spill_loc my_reg
setAssigR assig2
allocateRegsAndSpill reading keep spills' (my_reg:alloc) rs
-- update the register assignment
let assig1 = addToUFM assig temp_to_push_out (InMem slot)
- let assig2 = addToUFM assig1 r (InReg my_reg)
+ let assig2 = addToUFM assig1 r $! newLocation spill_loc my_reg
setAssigR assig2
-- if need be, load up a spilled temp into the reg we've just freed up.
- spills' <- loadTemp reading r loc my_reg spills
+ spills' <- loadTemp r spill_loc my_reg spills
allocateRegsAndSpill reading keep
(spill_store ++ spills')
result
--- | Load up a spilled temporary if we need to.
+-- | Calculate a new location after a register has been loaded.
+newLocation :: SpillLoc -> RealReg -> Loc
+-- if the tmp was read from a slot, then now its in a reg as well
+newLocation (ReadMem slot) my_reg = InBoth my_reg slot
+-- writes will always result in only the register being available
+newLocation _ my_reg = InReg my_reg
+
+-- | Load up a spilled temporary if we need to (read from memory).
loadTemp
- :: Instruction instr
- => Bool
- -> VirtualReg -- the temp being loaded
- -> Maybe Loc -- the current location of this temp
+ :: (Outputable instr, Instruction instr)
+ => VirtualReg -- the temp being loaded
+ -> SpillLoc -- the current location of this temp
-> RealReg -- the hreg to load the temp into
-> [instr]
-> RegM [instr]
-loadTemp True vreg (Just (InMem slot)) hreg spills
+loadTemp vreg (ReadMem slot) hreg spills
= do
insn <- loadR (RegReal hreg) slot
recordSpill (SpillLoad $ getUnique vreg)
return $ {- COMMENT (fsLit "spill load") : -} insn : spills
-loadTemp _ _ _ _ spills =
+loadTemp _ _ _ spills =
return spills
size | not use_sse2 && isFloatSize sz = FF80
| otherwise = sz
--
- return (Fixed sz (getRegisterReg use_sse2 reg) nilOL)
+ return (Fixed size (getRegisterReg use_sse2 reg) nilOL)
getRegister tree@(CmmRegOff _ _)
| sse2 -> coerceFP2FP W64 x
| otherwise -> conversionNop FF80 x
- MO_FF_Conv W64 W32
- | sse2 -> coerceFP2FP W32 x
- | otherwise -> conversionNop FF80 x
+ MO_FF_Conv W64 W32 -> coerceFP2FP W32 x
MO_FS_Conv from to -> coerceFP2Int from to x
MO_SF_Conv from to -> coerceInt2FP from to x
| otherwise
#endif
= concatOL push_codes
+
+ -- Deallocate parameters after call for ccall;
+ -- but not for stdcall (callee does it)
+ --
+ -- We have to pop any stack padding we added
+ -- on Darwin even if we are doing stdcall, though (#5052)
+ pop_size | cconv /= StdCallConv = tot_arg_size
+ | otherwise
+#if darwin_TARGET_OS
+ = arg_pad_size
+#else
+ = 0
+#endif
+
call = callinsns `appOL`
toOL (
- -- Deallocate parameters after call for ccall;
- -- but not for stdcall (callee does it)
- (if cconv == StdCallConv || tot_arg_size==0 then [] else
- [ADD II32 (OpImm (ImmInt tot_arg_size)) (OpReg esp)])
+ (if pop_size==0 then [] else
+ [ADD II32 (OpImm (ImmInt pop_size)) (OpReg esp)])
++
[DELTA (delta + tot_arg_size)]
)
--------------------------------------------------------------------------------
coerceFP2FP :: Width -> CmmExpr -> NatM Register
coerceFP2FP to x = do
+ use_sse2 <- sse2Enabled
(x_reg, x_code) <- getSomeReg x
let
- opc = case to of W32 -> CVTSD2SS; W64 -> CVTSS2SD
+ opc | use_sse2 = case to of W32 -> CVTSD2SS; W64 -> CVTSS2SD
+ | otherwise = GDTOF
code dst = x_code `snocOL` opc x_reg dst
-- in
- return (Any (floatSize to) code)
+ return (Any (if use_sse2 then floatSize to else FF80) code)
--------------------------------------------------------------------------------
| GITOF Reg Reg -- src(intreg), dst(fpreg)
| GITOD Reg Reg -- src(intreg), dst(fpreg)
+ | GDTOF Reg Reg -- src(fpreg), dst(fpreg)
+
| GADD Size Reg Reg Reg -- src1, src2, dst
| GDIV Size Reg Reg Reg -- src1, src2, dst
| GSUB Size Reg Reg Reg -- src1, src2, dst
GITOF src dst -> mkRU [src] [dst]
GITOD src dst -> mkRU [src] [dst]
+ GDTOF src dst -> mkRU [src] [dst]
+
GADD _ s1 s2 dst -> mkRU [s1,s2] [dst]
GSUB _ s1 s2 dst -> mkRU [s1,s2] [dst]
GMUL _ s1 s2 dst -> mkRU [s1,s2] [dst]
GITOF src dst -> GITOF (env src) (env dst)
GITOD src dst -> GITOD (env src) (env dst)
+ GDTOF src dst -> GDTOF (env src) (env dst)
+
GADD sz s1 s2 dst -> GADD sz (env s1) (env s2) (env dst)
GSUB sz s1 s2 dst -> GSUB sz (env s1) (env s2) (env dst)
GMUL sz s1 s2 dst -> GMUL sz (env s1) (env s2) (env dst)
where p insn r = case insn of
CALL _ _ -> GFREE : insn : r
JMP _ -> GFREE : insn : r
+ JXX_GBL _ _ -> GFREE : insn : r
_ -> insn : r
-- if you ever add a new FP insn to the fake x86 FP insn set,
GLD1{} -> True
GFTOI{} -> True
GDTOI{} -> True
- GITOF{} -> True
- GITOD{} -> True
+ GITOF{} -> True
+ GITOD{} -> True
+ GDTOF{} -> True
GADD{} -> True
GDIV{} -> True
GSUB{} -> True
text " ; fildl (%esp) ; ",
gpop dst 1, text " ; addl $4,%esp"])
+pprInstr g@(GDTOF src dst)
+ = pprG g (vcat [gtab <> gpush src 0,
+ gtab <> text "subl $4,%esp ; fstps (%esp) ; flds (%esp) ; addl $4,%esp ;",
+ gtab <> gpop dst 1])
+
{- Gruesome swamp follows. If you're unfortunate enough to have ventured
this far into the jungle AND you give a Rat's Ass (tm) what's going
on, here's the deal. Generate code to do a floating point comparison
pprGInstr (GITOF src dst) = pprSizeSizeRegReg (sLit "gitof") II32 FF32 src dst
pprGInstr (GITOD src dst) = pprSizeSizeRegReg (sLit "gitod") II32 FF64 src dst
+pprGInstr (GDTOF src dst) = pprSizeSizeRegReg (sLit "gdtof") FF64 FF32 src dst
pprGInstr (GCMP co src dst) = pprCondRegReg (sLit "gcmp_") FF64 co src dst
pprGInstr (GABS sz src dst) = pprSizeRegReg (sLit "gabs") sz src dst
hetMetCodeTypeTyConKey = mkPreludeTyConUnique 135
---------------- Template Haskell -------------------
--- USES TyConUniques 100-129
+-- USES TyConUniques 200-299
-----------------------------------------------------
unitTyConKey :: Unique
hetmet_guest_char_literal_key = mkPreludeMiscIdUnique 136
---------------- Template Haskell -------------------
--- USES IdUniques 200-399
+-- USES IdUniques 200-499
-----------------------------------------------------
\end{code}
import Constants
import Data.Bits as Bits
-import Data.Word ( Word )
+import Data.Int ( Int64 )
+import Data.Word ( Word, Word64 )
\end{code}
primop_rule CharEqOp = relop (==) ++ litEq op_name True
primop_rule CharNeOp = relop (/=) ++ litEq op_name False
- primop_rule IntGtOp = relop (>)
- primop_rule IntGeOp = relop (>=)
- primop_rule IntLeOp = relop (<=)
- primop_rule IntLtOp = relop (<)
+ primop_rule IntGtOp = relop (>) ++ boundsCmp op_name Gt
+ primop_rule IntGeOp = relop (>=) ++ boundsCmp op_name Ge
+ primop_rule IntLeOp = relop (<=) ++ boundsCmp op_name Le
+ primop_rule IntLtOp = relop (<) ++ boundsCmp op_name Lt
- primop_rule CharGtOp = relop (>)
- primop_rule CharGeOp = relop (>=)
- primop_rule CharLeOp = relop (<=)
- primop_rule CharLtOp = relop (<)
+ primop_rule CharGtOp = relop (>) ++ boundsCmp op_name Gt
+ primop_rule CharGeOp = relop (>=) ++ boundsCmp op_name Ge
+ primop_rule CharLeOp = relop (<=) ++ boundsCmp op_name Le
+ primop_rule CharLtOp = relop (<) ++ boundsCmp op_name Lt
primop_rule FloatGtOp = relop (>)
primop_rule FloatGeOp = relop (>=)
primop_rule DoubleEqOp = relop (==)
primop_rule DoubleNeOp = relop (/=)
- primop_rule WordGtOp = relop (>)
- primop_rule WordGeOp = relop (>=)
- primop_rule WordLeOp = relop (<=)
- primop_rule WordLtOp = relop (<)
+ primop_rule WordGtOp = relop (>) ++ boundsCmp op_name Gt
+ primop_rule WordGeOp = relop (>=) ++ boundsCmp op_name Ge
+ primop_rule WordLeOp = relop (<=) ++ boundsCmp op_name Le
+ primop_rule WordLtOp = relop (<) ++ boundsCmp op_name Lt
primop_rule WordEqOp = relop (==)
primop_rule WordNeOp = relop (/=)
val_if_neq | is_eq = falseVal
| otherwise = trueVal
+
+-- | Check if there is comparison with minBound or maxBound, that is
+-- always true or false. For instance, an Int cannot be smaller than its
+-- minBound, so we can replace such comparison with False.
+boundsCmp :: Name -> Comparison -> [CoreRule]
+boundsCmp op_name op = [ rule ]
+ where
+ rule = BuiltinRule
+ { ru_name = occNameFS (nameOccName op_name)
+ `appendFS` (fsLit "min/maxBound")
+ , ru_fn = op_name
+ , ru_nargs = 2
+ , ru_try = rule_fn
+ }
+ rule_fn _ [a, b] = mkRuleFn op a b
+ rule_fn _ _ = Nothing
+
+data Comparison = Gt | Ge | Lt | Le
+
+mkRuleFn :: Comparison -> CoreExpr -> CoreExpr -> Maybe CoreExpr
+mkRuleFn Gt (Lit lit) _ | isMinBound lit = Just falseVal
+mkRuleFn Le (Lit lit) _ | isMinBound lit = Just trueVal
+mkRuleFn Ge _ (Lit lit) | isMinBound lit = Just trueVal
+mkRuleFn Lt _ (Lit lit) | isMinBound lit = Just falseVal
+mkRuleFn Ge (Lit lit) _ | isMaxBound lit = Just trueVal
+mkRuleFn Lt (Lit lit) _ | isMaxBound lit = Just falseVal
+mkRuleFn Gt _ (Lit lit) | isMaxBound lit = Just falseVal
+mkRuleFn Le _ (Lit lit) | isMaxBound lit = Just trueVal
+mkRuleFn _ _ _ = Nothing
+
+isMinBound :: Literal -> Bool
+isMinBound (MachChar c) = c == minBound
+isMinBound (MachInt i) = i == toInteger (minBound :: Int)
+isMinBound (MachInt64 i) = i == toInteger (minBound :: Int64)
+isMinBound (MachWord i) = i == toInteger (minBound :: Word)
+isMinBound (MachWord64 i) = i == toInteger (minBound :: Word64)
+isMinBound _ = False
+
+isMaxBound :: Literal -> Bool
+isMaxBound (MachChar c) = c == maxBound
+isMaxBound (MachInt i) = i == toInteger (maxBound :: Int)
+isMaxBound (MachInt64 i) = i == toInteger (maxBound :: Int64)
+isMaxBound (MachWord i) = i == toInteger (maxBound :: Word)
+isMaxBound (MachWord64 i) = i == toInteger (maxBound :: Word64)
+isMaxBound _ = False
+
+
-- Note that we *don't* warn the user about overflow. It's not done at
-- runtime either, and compilation of completely harmless things like
-- ((124076834 :: Word32) + (2147483647 :: Word32))
out_of_line = True
has_side_effects = True
+primop CasMutVarOp "casMutVar#" GenPrimOp
+ MutVar# s a -> a -> a -> State# s -> (# State# s, Int#, a #)
+ with
+ out_of_line = True
+ has_side_effects = True
+
------------------------------------------------------------------------
section "Exceptions"
------------------------------------------------------------------------
--- /dev/null
+-- -----------------------------------------------------------------------------
+--
+-- (c) The University of Glasgow, 2011
+--
+-- Generate code to initialise cost centres
+--
+-- -----------------------------------------------------------------------------
+
+module ProfInit (profilingInitCode) where
+
+import CLabel
+import CostCentre
+import Outputable
+import StaticFlags
+import FastString
+import Module
+
+-- -----------------------------------------------------------------------------
+-- Initialising cost centres
+
+-- We must produce declarations for the cost-centres defined in this
+-- module;
+
+profilingInitCode :: Module -> CollectedCCs -> SDoc
+profilingInitCode this_mod (local_CCs, ___extern_CCs, singleton_CCSs)
+ | not opt_SccProfilingOn = empty
+ | otherwise
+ = vcat
+ [ text "static void prof_init_" <> ppr this_mod
+ <> text "(void) __attribute__((constructor));"
+ , text "static void prof_init_" <> ppr this_mod <> text "(void)"
+ , braces (vcat (
+ map emitRegisterCC local_CCs ++
+ map emitRegisterCCS singleton_CCSs
+ ))
+ ]
+ where
+ emitRegisterCC cc =
+ ptext (sLit "extern CostCentre ") <> cc_lbl <> ptext (sLit "[];") $$
+ ptext (sLit "REGISTER_CC(") <> cc_lbl <> char ')' <> semi
+ where cc_lbl = ppr (mkCCLabel cc)
+ emitRegisterCCS ccs =
+ ptext (sLit "extern CostCentreStack ") <> ccs_lbl <> ptext (sLit "[];") $$
+ ptext (sLit "REGISTER_CCS(") <> ccs_lbl <> char ')' <> semi
+ where ccs_lbl = ppr (mkCCSLabel ccs)
-- let x = x in 3
-- should report 'x' unused
; let real_uses = findUses dus result_fvs
- ; warnUnusedLocalBinds bound_names real_uses
+ -- Insert fake uses for variables introduced implicitly by wildcards (#4404)
+ implicit_uses = hsValBindsImplicits binds'
+ ; warnUnusedLocalBinds bound_names (real_uses `unionNameSets` implicit_uses)
; let
-- The variables "used" in the val binds are:
-- ...bring them and their fixities into scope
; let bound_names = collectLStmtsBinders (map fst new_lhs_and_fv)
+ -- Fake uses of variables introduced implicitly (warning suppression, see #4404)
+ implicit_uses = lStmtsImplicits (map fst new_lhs_and_fv)
; bindLocalNamesFV bound_names $
addLocalFixities fix_env bound_names $ do
-- (C) do the right-hand-sides and thing-inside
{ segs <- rn_rec_stmts bound_names new_lhs_and_fv
; (res, fvs) <- cont segs
- ; warnUnusedLocalBinds bound_names fvs
+ ; warnUnusedLocalBinds bound_names (fvs `unionNameSets` implicit_uses)
; return (res, fvs) }}
-- get all the fixity decls in any Let stmt
-- (Opt_WarnMissingImportList also checks for T(..) items
-- but that is done in checkDodgyImport below)
case imp_details of
- Just (False, _) -> return ()
+ Just (False, _) -> return () -- Explicit import list
_ | implicit_prelude -> return ()
+ | qual_only -> return ()
| otherwise -> ifDOptM Opt_WarnMissingImportList $
addWarn (missingImportListWarn imp_mod_name)
= ifDOptM Opt_WarnDodgyImports (addWarn (dodgyImportWarn n))
-- NB. use the RdrName for reporting the warning
| IEThingAll {} <- ieRdr
+ , not (is_qual decl_spec)
= ifDOptM Opt_WarnMissingImportList $
addWarn (missingImportListItem ieRdr)
checkDodgyImport _
simpl_gently = CoreDoSimplify max_iter
(base_mode { sm_phase = InitialPhase
, sm_names = ["Gentle"]
- , sm_rules = True -- Note [RULEs enabled in SimplGently]
+ , sm_rules = rules_on -- Note [RULEs enabled in SimplGently]
, sm_inline = False
, sm_case_case = False })
-- Don't do case-of-case transformations.
But watch out: list fusion can prevent floating. So use phase control
to switch off those rules until after floating.
-Currently (Oct10) I think that sm_rules is always True, so we
-could remove it.
-
%************************************************************************
%* *
where
(body_usg', tagged_bndr) = tagBinder body_usg bndr
rhs_usg = unitVarEnv rhs_var NoOccInfo -- We don't need exact info
- rhs = mkCoerceI co (Var rhs_var)
+ rhs = mkCoerceI co (Var (zapIdOccInfo rhs_var)) -- See Note [Zap case binders in proxy bindings]
\end{code}
%************************************************************************
\begin{code}
-data ProxyEnv
- = PE (IdEnv (Id, [(Id,CoercionI)])) VarSet
- -- Main env, and its free variables (of both range and domain)
+data ProxyEnv -- See Note [ProxyEnv]
+ = PE (IdEnv -- Domain = scrutinee variables
+ (Id, -- The scrutinee variable again
+ [(Id,CoercionI)])) -- The case binders that it maps to
+ VarSet -- Free variables of both range and domain
\end{code}
Note [ProxyEnv]
Notice that later bindings may mention earlier ones, and that
we need to go "both ways".
+Note [Zap case binders in proxy bindings]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+From the original
+ case x of cb(dead) { p -> ...x... }
+we will get
+ case x of cb(live) { p -> let x = cb in ...x... }
+
+Core Lint never expects to find an *occurence* of an Id marked
+as Dead, so we must zap the OccInfo on cb before making the
+binding x = cb. See Trac #5028.
+
Historical note [no-case-of-case]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We *used* to suppress the binder-swap in case expressions when
-- Localise the scrut_var before shadowing it; we're making a
-- new binding for it, and it might have an External Name, or
-- even be a GlobalId; Note [Binder swap on GlobalId scrutinees]
- -- Also we don't want any INLILNE or NOINLINE pragmas!
+ -- Also we don't want any INLINE or NOINLINE pragmas!
-----------
type ProxyBind = (Id, Id, CoercionI)
+ -- (scrut variable, case-binder variable, coercion)
getProxies :: OccEnv -> Id -> Bag ProxyBind
-- Return a bunch of bindings [...(xi,ei)...]
; us <- mkSplitUniqSupply 's'
; let (expr', _counts) = initSmpl dflags emptyRuleBase emptyFamInstEnvs us $
- simplExprGently simplEnvForGHCi expr
+ simplExprGently (simplEnvForGHCi dflags) expr
; Err.dumpIfSet_dyn dflags Opt_D_dump_simpl "Simplified expression"
(pprCoreExpr expr')
sm_eta_expand :: Bool -- Whether eta-expansion is enabled
\begin{code}
-simplEnvForGHCi :: SimplEnv
-simplEnvForGHCi = mkSimplEnv $
- SimplMode { sm_names = ["GHCi"]
- , sm_phase = InitialPhase
- , sm_rules = True, sm_inline = False
- , sm_eta_expand = False, sm_case_case = True }
+simplEnvForGHCi :: DynFlags -> SimplEnv
+simplEnvForGHCi dflags
+ = mkSimplEnv $ SimplMode { sm_names = ["GHCi"]
+ , sm_phase = InitialPhase
+ , sm_rules = rules_on
+ , sm_inline = False
+ , sm_eta_expand = eta_expand_on
+ , sm_case_case = True }
+ where
+ rules_on = dopt Opt_EnableRewriteRules dflags
+ eta_expand_on = dopt Opt_DoLambdaEtaExpansion dflags
-- Do not do any inlining, in case we expose some unboxed
-- tuple stuff that confuses the bytecode interpreter
-- See Note [Simplifying inside InlineRules]
updModeForInlineRules inline_rule_act current_mode
= current_mode { sm_phase = phaseFromActivation inline_rule_act
- , sm_rules = True
, sm_inline = True
, sm_eta_expand = False }
+ -- For sm_rules, just inherit; sm_rules might be "off"
+ -- becuase of -fno-enable-rewrite-rules
where
phaseFromActivation (ActiveAfter n) = Phase n
phaseFromActivation _ = InitialPhase
trace_dump dflags rule rule_rhs stuff
| not (dopt Opt_D_dump_rule_firings dflags)
, not (dopt Opt_D_dump_rule_rewrites dflags) = stuff
- | not (dopt Opt_D_dump_rule_rewrites dflags)
+ | not (dopt Opt_D_dump_rule_rewrites dflags)
= pprTrace "Rule fired:" (ftext (ru_name rule)) stuff
+
| otherwise
= pprTrace "Rule fired"
(vcat [text "Rule:" <+> ftext (ru_name rule),
tcPolyBinds top_lvl sig_fn prag_fn rec_group rec_tc bind_list
= setSrcSpan loc $
recoverM (recoveryCode binder_names sig_fn) $ do
- -- Set up main recoer; take advantage of any type sigs
+ -- Set up main recover; take advantage of any type sigs
{ traceTc "------------------------------------------------" empty
; traceTc "Bindings for" (ppr binder_names)
+ -- Instantiate the polytypes of any binders that have signatures
+ -- (as determined by sig_fn), returning a TcSigInfo for each
; tc_sig_fn <- tcInstSigs sig_fn binder_names
; dflags <- getDOpts
; return (binds, poly_ids) }
where
binder_names = collectHsBindListBinders bind_list
- loc = getLoc (head bind_list)
- -- TODO: location a bit awkward, but the mbinds have been
- -- dependency analysed and may no longer be adjacent
+ loc = foldr1 combineSrcSpans (map getLoc bind_list)
+ -- The mbinds have been dependency analysed and
+ -- may no longer be adjacent; so find the narrowest
+ -- span that includes them all
------------------
tcPolyNoGen
-- it binds a single variable,
-- it has a signature,
tcPolyCheck sig@(TcSigInfo { sig_id = id, sig_tvs = tvs, sig_scoped = scoped
- , sig_theta = theta, sig_tau = tau, sig_loc = loc })
+ , sig_theta = theta, sig_tau = tau })
prag_fn rec_tc bind_list
= do { ev_vars <- newEvVars theta
; let skol_info = SigSkol (FunSigCtxt (idName id)) (mkPhiTy theta tau)
; export <- mkExport prag_fn tvs theta mono_info
+ ; loc <- getSrcSpanM
; let (_, poly_id, _, _) = export
abs_bind = L loc $ AbsBinds
{ abs_tvs = tvs
-- dependencies based on type signatures
-> [LHsBind Name]
-> TcM (LHsBinds TcId, [TcId])
-tcPolyInfer top_lvl mono sig_fn prag_fn rec_tc bind_list
+tcPolyInfer top_lvl mono tc_sig_fn prag_fn rec_tc bind_list
= do { ((binds', mono_infos), wanted)
<- captureConstraints $
- tcMonoBinds sig_fn LetLclBndr rec_tc bind_list
+ tcMonoBinds tc_sig_fn LetLclBndr rec_tc bind_list
; unifyCtxts [sig | (_, Just sig, _) <- mono_infos]
\begin{code}
module TcCanonical(
- mkCanonical, mkCanonicals, mkCanonicalFEV, canWanteds, canGivens,
- canOccursCheck, canEq,
+ mkCanonical, mkCanonicals, mkCanonicalFEV, mkCanonicalFEVs, canWanteds, canGivens,
+ canOccursCheck, canEqToWorkList,
rewriteWithFunDeps
) where
%************************************************************************
\begin{code}
-canWanteds :: [WantedEvVar] -> TcS CanonicalCts
-canWanteds = fmap andCCans . mapM (\(EvVarX ev loc) -> mkCanonical (Wanted loc) ev)
+canWanteds :: [WantedEvVar] -> TcS WorkList
+canWanteds = fmap unionWorkLists . mapM (\(EvVarX ev loc) -> mkCanonical (Wanted loc) ev)
-canGivens :: GivenLoc -> [EvVar] -> TcS CanonicalCts
+canGivens :: GivenLoc -> [EvVar] -> TcS WorkList
canGivens loc givens = do { ccs <- mapM (mkCanonical (Given loc)) givens
- ; return (andCCans ccs) }
+ ; return (unionWorkLists ccs) }
-mkCanonicals :: CtFlavor -> [EvVar] -> TcS CanonicalCts
-mkCanonicals fl vs = fmap andCCans (mapM (mkCanonical fl) vs)
+mkCanonicals :: CtFlavor -> [EvVar] -> TcS WorkList
+mkCanonicals fl vs = fmap unionWorkLists (mapM (mkCanonical fl) vs)
-mkCanonicalFEV :: FlavoredEvVar -> TcS CanonicalCts
+mkCanonicalFEV :: FlavoredEvVar -> TcS WorkList
mkCanonicalFEV (EvVarX ev fl) = mkCanonical fl ev
-mkCanonical :: CtFlavor -> EvVar -> TcS CanonicalCts
+mkCanonicalFEVs :: Bag FlavoredEvVar -> TcS WorkList
+mkCanonicalFEVs = foldrBagM canon_one emptyWorkList
+ where -- Preserves order (shouldn't be important, but curently
+ -- is important for the vectoriser)
+ canon_one fev wl = do { wl' <- mkCanonicalFEV fev
+ ; return (unionWorkList wl' wl) }
+
+mkCanonical :: CtFlavor -> EvVar -> TcS WorkList
mkCanonical fl ev = case evVarPred ev of
- ClassP clas tys -> canClass fl ev clas tys
- IParam ip ty -> canIP fl ev ip ty
- EqPred ty1 ty2 -> canEq fl ev ty1 ty2
+ ClassP clas tys -> canClassToWorkList fl ev clas tys
+ IParam ip ty -> canIPToWorkList fl ev ip ty
+ EqPred ty1 ty2 -> canEqToWorkList fl ev ty1 ty2
-canClass :: CtFlavor -> EvVar -> Class -> [TcType] -> TcS CanonicalCts
-canClass fl v cn tys
+canClassToWorkList :: CtFlavor -> EvVar -> Class -> [TcType] -> TcS WorkList
+canClassToWorkList fl v cn tys
= do { (xis,cos,ccs) <- flattenMany fl tys -- cos :: xis ~ tys
; let no_flattening_happened = isEmptyCCan ccs
dict_co = mkTyConCoercion (classTyCon cn) cos
-- Add the superclasses of this one here, See Note [Adding superclasses].
-- But only if we are not simplifying the LHS of a rule.
; sctx <- getTcSContext
- ; sc_cts <- if simplEqsOnly sctx then return emptyCCan
+ ; sc_cts <- if simplEqsOnly sctx then return emptyWorkList
else newSCWorkFromFlavored v_new fl cn xis
- ; return (sc_cts `andCCan` ccs `extendCCans` CDictCan { cc_id = v_new
- , cc_flavor = fl
- , cc_class = cn
- , cc_tyargs = xis }) }
+ ; return (sc_cts `unionWorkList`
+ workListFromEqs ccs `unionWorkList`
+ workListFromNonEq CDictCan { cc_id = v_new
+ , cc_flavor = fl
+ , cc_class = cn
+ , cc_tyargs = xis }) }
\end{code}
Note [Adding superclasses]
\begin{code}
-newSCWorkFromFlavored :: EvVar -> CtFlavor -> Class -> [Xi] -> TcS CanonicalCts
+newSCWorkFromFlavored :: EvVar -> CtFlavor -> Class -> [Xi] -> TcS WorkList
-- Returns superclasses, see Note [Adding superclasses]
newSCWorkFromFlavored ev orig_flavor cls xis
| isDerived orig_flavor
- = return emptyCCan -- Deriveds don't yield more superclasses because we will
- -- add them transitively in the case of wanteds.
+ = return emptyWorkList -- Deriveds don't yield more superclasses because we will
+ -- add them transitively in the case of wanteds.
| isGiven orig_flavor
= do { let sc_theta = immSuperClasses cls xis
; mkCanonicals flavor sc_vars }
| isEmptyVarSet (tyVarsOfTypes xis)
- = return emptyCCan -- Wanteds with no variables yield no deriveds.
- -- See Note [Improvement from Ground Wanteds]
+ = return emptyWorkList -- Wanteds with no variables yield no deriveds.
+ -- See Note [Improvement from Ground Wanteds]
| otherwise -- Wanted case, just add those SC that can lead to improvement.
= do { let sc_rec_theta = transSuperClasses cls xis
-canIP :: CtFlavor -> EvVar -> IPName Name -> TcType -> TcS CanonicalCts
+canIPToWorkList :: CtFlavor -> EvVar -> IPName Name -> TcType -> TcS WorkList
-- See Note [Canonical implicit parameter constraints] to see why we don't
-- immediately canonicalize (flatten) IP constraints.
-canIP fl v nm ty
- = return $ singleCCan $ CIPCan { cc_id = v
- , cc_flavor = fl
- , cc_ip_nm = nm
- , cc_ip_ty = ty }
+canIPToWorkList fl v nm ty
+ = return $ workListFromNonEq (CIPCan { cc_id = v
+ , cc_flavor = fl
+ , cc_ip_nm = nm
+ , cc_ip_ty = ty })
-----------------
+canEqToWorkList :: CtFlavor -> EvVar -> Type -> Type -> TcS WorkList
+canEqToWorkList fl cv ty1 ty2 = do { cts <- canEq fl cv ty1 ty2
+ ; return $ workListFromEqs cts }
+
canEq :: CtFlavor -> EvVar -> Type -> Type -> TcS CanonicalCts
canEq fl cv ty1 ty2
| tcEqType ty1 ty2 -- Dealing with equality here avoids
\begin{code}
rewriteWithFunDeps :: [Equation]
-> [Xi] -> CtFlavor
- -> TcS (Maybe ([Xi], [Coercion], CanonicalCts))
+ -> TcS (Maybe ([Xi], [Coercion], WorkList))
rewriteWithFunDeps eqn_pred_locs xis fl
= do { fd_ev_poss <- mapM (instFunDepEqn fl) eqn_pred_locs
; let fd_ev_pos :: [(Int,FlavoredEvVar)]
fd_ev_pos = concat fd_ev_poss
(rewritten_xis, cos) = unzip (rewriteDictParams fd_ev_pos xis)
; fds <- mapM (\(_,fev) -> mkCanonicalFEV fev) fd_ev_pos
- ; let fd_work = unionManyBags fds
- ; if isEmptyBag fd_work
+ ; let fd_work = unionWorkLists fds
+ ; if isEmptyWorkList fd_work
then return Nothing
else return (Just (rewritten_xis, cos, fd_work)) }
type AtomicInert = CanonicalCt -- constraint pulled from InertSet
type WorkItem = CanonicalCt -- constraint pulled from WorkList
--- A mixture of Given, Wanted, and Derived constraints.
--- We split between equalities and the rest to process equalities first.
-type WorkList = CanonicalCts
-
-unionWorkLists :: WorkList -> WorkList -> WorkList
-unionWorkLists = andCCan
-
-isEmptyWorkList :: WorkList -> Bool
-isEmptyWorkList = isEmptyCCan
-
-emptyWorkList :: WorkList
-emptyWorkList = emptyCCan
-
-workListFromCCan :: CanonicalCt -> WorkList
-workListFromCCan = singleCCan
-
------------------------
data StopOrContinue
= Stop -- Work item is consumed
, sr_stop = ContinueWith work_item })
= do { itr <- stage depth work_item inerts
; traceTcS ("Stage result (" ++ name ++ ")") (ppr itr)
- ; let itr' = itr { sr_new_work = accum_work `unionWorkLists` sr_new_work itr }
+ ; let itr' = itr { sr_new_work = accum_work `unionWorkList` sr_new_work itr }
; run_pipeline stages itr' }
\end{code}
-> (ct,evVarPred ev)) ws)
, text "inert = " <+> ppr inert ]
- ; (flag, inert_ret) <- foldrBagM (tryPreSolveAndInteract sctx dyn_flags) (True,inert) ws
- -- use foldr to preserve the order
+ ; can_ws <- mkCanonicalFEVs ws
+
+ ; (flag, inert_ret)
+ <- foldrWorkListM (tryPreSolveAndInteract sctx dyn_flags) (True,inert) can_ws
; traceTcS "solveInteract, after clever canonicalization (and interaction):" $
vcat [ text "No interaction happened = " <+> ppr flag
; return (flag, inert_ret) }
-
tryPreSolveAndInteract :: SimplContext
-> DynFlags
- -> FlavoredEvVar
+ -> CanonicalCt
-> (Bool, InertSet)
-> TcS (Bool, InertSet)
-- Returns: True if it was able to discharge this constraint AND all previous ones
-tryPreSolveAndInteract sctx dyn_flags flavev@(EvVarX ev_var fl) (all_previous_discharged, inert)
+tryPreSolveAndInteract sctx dyn_flags ct (all_previous_discharged, inert)
= do { let inert_cts = get_inert_cts (evVarPred ev_var)
- ; this_one_discharged <- dischargeFromCCans inert_cts flavev
+ ; this_one_discharged <-
+ if isCFrozenErr ct then
+ return False
+ else
+ dischargeFromCCans inert_cts ev_var fl
; if this_one_discharged
then return (all_previous_discharged, inert)
else do
- { extra_cts <- mkCanonical fl ev_var
- ; inert_ret <- solveInteractWithDepth (ctxtStkDepth dyn_flags,0,[]) extra_cts inert
+ { inert_ret <- solveOneWithDepth (ctxtStkDepth dyn_flags,0,[]) ct inert
; return (False, inert_ret) } }
where
+ ev_var = cc_id ct
+ fl = cc_flavor ct
+
get_inert_cts (ClassP clas _)
| simplEqsOnly sctx = emptyCCan
| otherwise = fst (getRelevantCts clas (inert_dicts inert))
get_inert_cts (EqPred {})
= inert_eqs inert `unionBags` cCanMapToBag (inert_funeqs inert)
-dischargeFromCCans :: CanonicalCts -> FlavoredEvVar -> TcS Bool
+dischargeFromCCans :: CanonicalCts -> EvVar -> CtFlavor -> TcS Bool
-- See if this (pre-canonicalised) work-item is identical to a
-- one already in the inert set. Reasons:
-- a) Avoid creating superclass constraints for millions of incoming (Num a) constraints
-- b) Termination for improve_eqs in TcSimplify.simpl_loop
-dischargeFromCCans cans (EvVarX ev fl)
+dischargeFromCCans cans ev fl
= Bag.foldrBag discharge_ct (return False) cans
where
the_pred = evVarPred ev
, text "Max depth =" <+> ppr max_depth
, text "ws =" <+> ppr ws ]
- -- Solve equalities first
- ; let (eqs, non_eqs) = Bag.partitionBag isCTyEqCan ws
- ; is_from_eqs <- Bag.foldrBagM (solveOneWithDepth ctxt) inert eqs
- ; Bag.foldrBagM (solveOneWithDepth ctxt) is_from_eqs non_eqs }
- -- use foldr to preserve the order
+
+ ; foldrWorkListM (solveOneWithDepth ctxt) inert ws }
+ -- use foldr to preserve the order
------------------
-- Fully interact the given work item with an inert set, and return a
interactWithInertEqsStage :: SimplifierStage
interactWithInertEqsStage depth workItem inert
= Bag.foldrBagM (interactNext depth) initITR (inert_eqs inert)
- -- use foldr to preserve the order
+ -- use foldr to preserve the order
where
initITR = SR { sr_inerts = inert { inert_eqs = emptyCCan }
, sr_new_work = emptyWorkList
= text rule <+> keep_doc
<+> vcat [ ptext (sLit "Inert =") <+> ppr inert
, ptext (sLit "Work =") <+> ppr work_item
- , ppUnless (isEmptyBag new_work) $
+ , ppUnless (isEmptyWorkList new_work) $
ptext (sLit "New =") <+> ppr new_work ]
keep_doc = case inert_action of
KeepInert -> ptext (sLit "[keep]")
DropInert -> inerts
; return $ SR { sr_inerts = inerts_new
- , sr_new_work = sr_new_work it `unionWorkLists` new_work
+ , sr_new_work = sr_new_work it `unionWorkList` new_work
, sr_stop = stop } }
| otherwise
= return $ it { sr_inerts = (sr_inerts it) `updInertSet` inert }
-- and put it back into the work-list
-- Maybe rather than starting again, we could *replace* the
-- inert item, but its safe and simple to restart
- ; mkIRStopD "Cls/Cls fundep (solved)" (inert_w `consBag` fd_work) }
-
+ ; mkIRStopD "Cls/Cls fundep (solved)" $
+ workListFromNonEq inert_w `unionWorkList` fd_work }
| otherwise
-> do { setDictBind d2 (EvCast d1 dict_co)
; mkIRStopK "Cls/Cls fundep (solved)" fd_work }
Wanted {} -> setDictBind d2 (EvCast d2' dict_co)
Derived {} -> return ()
; let workItem' = workItem { cc_id = d2', cc_tyargs = rewritten_tys2 }
- ; mkIRStopK "Cls/Cls fundep (partial)" (workItem' `consBag` fd_work) }
+ ; mkIRStopK "Cls/Cls fundep (partial)" $
+ workListFromNonEq workItem' `unionWorkList` fd_work }
where
dict_co = mkTyConCoercion (classTyCon cls1) cos2
| wfl `canRewrite` ifl
, tv `elemVarSet` tyVarsOfTypes xis
= do { rewritten_dict <- rewriteDict (cv,tv,xi) (dv,ifl,cl,xis)
- ; mkIRContinue "Cls/Eq" workItem DropInert (workListFromCCan rewritten_dict) }
+ ; mkIRContinue "Cls/Eq" workItem DropInert (workListFromNonEq rewritten_dict) }
-- Class constraint and given equality: use the equality to rewrite
-- the class constraint.
| wfl `canRewrite` ifl
, tv `elemVarSet` tyVarsOfType ty
= do { rewritten_ip <- rewriteIP (cv,tv,xi) (ipid,ifl,nm,ty)
- ; mkIRContinue "IP/Eq" workItem DropInert (workListFromCCan rewritten_ip) }
+ ; mkIRContinue "IP/Eq" workItem DropInert (workListFromNonEq rewritten_ip) }
-- Two implicit parameter constraints. If the names are the same,
-- but their types are not, we generate a wanted type equality
| ifl `canRewrite` wfl
, tv `elemVarSet` tyVarsOfTypes (xi2:args) -- Rewrite RHS as well
= do { rewritten_funeq <- rewriteFunEq (cv1,tv,xi1) (cv2,wfl,tc,args,xi2)
- ; mkIRStopK "Eq/FunEq" (workListFromCCan rewritten_funeq) }
+ ; mkIRStopK "Eq/FunEq" (workListFromEq rewritten_funeq) }
-- Must Stop here, because we may no longer be inert after the rewritting.
-- Inert: function equality, work item: equality
| wfl `canRewrite` ifl
, tv `elemVarSet` tyVarsOfTypes (xi1:args) -- Rewrite RHS as well
= do { rewritten_funeq <- rewriteFunEq (cv2,tv,xi2) (cv1,ifl,tc,args,xi1)
- ; mkIRContinue "FunEq/Eq" workItem DropInert (workListFromCCan rewritten_funeq) }
+ ; mkIRContinue "FunEq/Eq" workItem DropInert (workListFromEq rewritten_funeq) }
-- One may think that we could (KeepTransformedInert rewritten_funeq)
-- but that is wrong, because it may end up not being inert with respect
-- to future inerts. Example:
| Just tv2' <- tcGetTyVar_maybe xi2'
, tv2 == tv2' -- In this case xi2[xi1/tv1] = tv2, so we have tv2~tv2
= do { when (isWanted gw) (setCoBind cv2 (mkSymCoercion co2'))
- ; return emptyCCan }
+ ; return emptyWorkList }
| otherwise
= do { cv2' <- newCoVar (mkTyVarTy tv2) xi2'
; case gw of
Given {} -> setCoBind cv2' $ mkCoVarCoercion cv2 `mkTransCoercion`
co2'
Derived {} -> return ()
- ; canEq gw cv2' (mkTyVarTy tv2) xi2' }
+ ; canEqToWorkList gw cv2' (mkTyVarTy tv2) xi2' }
where
xi2' = substTyWith [tv1] [xi1] xi2
co2' = substTyWith [tv1] [mkCoVarCoercion cv1] xi2 -- xi2 ~ xi2[xi1/tv1]
Derived {} -> return ()
- ; return (singleCCan $ CFrozenErr { cc_id = cv2', cc_flavor = fl2 }) }
+ ; return (workListFromNonEq $ CFrozenErr { cc_id = cv2', cc_flavor = fl2 }) }
where
(ty2a, ty2b) = coVarKind cv2 -- cv2 : ty2a ~ ty2b
ty2a' = substTyWith [tv1] [xi1] ty2a
; let workItem' = CDictCan { cc_id = dv', cc_flavor = fl,
cc_class = cls, cc_tyargs = xis' }
; return $
- SomeTopInt { tir_new_work = singleCCan workItem' `andCCan` fd_work
+ SomeTopInt { tir_new_work = workListFromNonEq workItem' `unionWorkList` fd_work
, tir_new_inert = Stop } } }
GenInst wtvs ev_term -- Solved
| otherwise
= do { meta_details <- readMutVar ref;
- ; WARN( not (isFlexi meta_details),
- hang (text "Double update of meta tyvar")
+ ; ASSERT2( isFlexi meta_details,
+ hang (text "Double update of meta tyvar")
2 (ppr tyvar $$ ppr meta_details) )
traceTc "writeMetaTyVar" (ppr tyvar <+> text ":=" <+> ppr ty)
#ifdef GHCI
tcRnStmt, tcRnExpr, tcRnType,
tcRnLookupRdrName,
- getModuleExports,
+ getModuleExports,
#endif
+ tcRnImports,
tcRnLookupName,
tcRnGetInfo,
tcRnModule,
isCDictCan_Maybe, isCIPCan_Maybe, isCFunEqCan_Maybe,
isCFrozenErr,
+ WorkList, unionWorkList, unionWorkLists, isEmptyWorkList, emptyWorkList,
+ workListFromEq, workListFromNonEq,
+ workListFromEqs, workListFromNonEqs, foldrWorkListM,
+
CanonicalCt(..), Xi, tyVarsOfCanonical, tyVarsOfCanonicals, tyVarsOfCDicts,
deCanonicalise, mkFrozenError,
isCFrozenErr :: CanonicalCt -> Bool
isCFrozenErr (CFrozenErr {}) = True
isCFrozenErr _ = False
+
+
+-- A mixture of Given, Wanted, and Derived constraints.
+-- We split between equalities and the rest to process equalities first.
+data WorkList = WorkList { weqs :: CanonicalCts
+ -- NB: weqs includes equalities /and/ family equalities
+ , wrest :: CanonicalCts }
+
+unionWorkList :: WorkList -> WorkList -> WorkList
+unionWorkList wl1 wl2
+ = WorkList { weqs = weqs wl1 `andCCan` weqs wl2
+ , wrest = wrest wl1 `andCCan` wrest wl2 }
+
+unionWorkLists :: [WorkList] -> WorkList
+unionWorkLists = foldr unionWorkList emptyWorkList
+
+isEmptyWorkList :: WorkList -> Bool
+isEmptyWorkList wl = isEmptyCCan (weqs wl) && isEmptyCCan (wrest wl)
+
+emptyWorkList :: WorkList
+emptyWorkList
+ = WorkList { weqs = emptyBag, wrest = emptyBag }
+
+workListFromEq :: CanonicalCt -> WorkList
+workListFromEq = workListFromEqs . singleCCan
+
+workListFromNonEq :: CanonicalCt -> WorkList
+workListFromNonEq = workListFromNonEqs . singleCCan
+
+workListFromNonEqs :: CanonicalCts -> WorkList
+workListFromNonEqs cts
+ = WorkList { weqs = emptyCCan, wrest = cts }
+
+workListFromEqs :: CanonicalCts -> WorkList
+workListFromEqs cts
+ = WorkList { weqs = cts, wrest = emptyCCan }
+
+foldrWorkListM :: (Monad m) => (CanonicalCt -> r -> m r)
+ -> r -> WorkList -> m r
+-- Prioritizes equalities
+foldrWorkListM on_ct r (WorkList {weqs = eqs, wrest = rest })
+ = do { r1 <- foldrBagM on_ct r eqs
+ ; foldrBagM on_ct r1 rest }
+
+instance Outputable WorkList where
+ ppr wl = vcat [ text "WorkList (Equalities) = " <+> ppr (weqs wl)
+ , text "WorkList (Other) = " <+> ppr (wrest wl) ]
+
\end{code}
+
+
%************************************************************************
%* *
CtFlavor
import VarSet\r
import VarEnv\r
import PrelNames\r
+import StaticFlags ( opt_NoOptCoercion )\r
import Util\r
import Outputable\r
\end{code}\r
optCoercion :: TvSubst -> Coercion -> NormalCo\r
-- ^ optCoercion applies a substitution to a coercion, \r
-- *and* optimises it to reduce its size\r
-optCoercion env co = opt_co env False co\r
+optCoercion env co \r
+ | opt_NoOptCoercion = substTy env co\r
+ | otherwise = opt_co env False co\r
\r
type NormalCo = Coercion\r
-- Invariants: \r
* Translation of type instance decl:
type instance F [a] = Maybe a
- translates to
- A SynTyCon 'R:FList a', whose
+ translates to a "representation TyCon", 'R:FList', where
+ R:FList is a SynTyCon, whose
SynTyConRhs is (SynonymTyCon (Maybe a))
TyConParent is (FamInstTyCon F [a] co)
where co :: F [a] ~ R:FList a
- Notice that we introduce a gratuitous vanilla type synonym
+
+ It's very much as if the user had written
+ type instance F [a] = R:FList a
type R:FList a = Maybe a
- solely so that type and data families can be treated more
- uniformly, via a single FamInstTyCon descriptor
+ Indeed, in GHC's internal representation, the RHS of every
+ 'type instance' is simply an application of the representation
+ TyCon to the quantified varaibles.
+
+ The intermediate representation TyCon is a bit gratuitous, but
+ it means that:
+
+ each 'type instance' decls is in 1-1 correspondance
+ with its representation TyCon
+
+ So the result of typechecking a 'type instance' decl is just a
+ TyCon. In turn this means that type and data families can be
+ treated uniformly.
* In the future we might want to support
* closed type families (esp when we have proper kinds)
-- * Type free variables
tyVarsOfType, tyVarsOfTypes, tyVarsOfPred, tyVarsOfTheta,
- expandTypeSynonyms,
+ expandTypeSynonyms,
+ typeSize,
-- * Type comparison
coreEqType, coreEqType2,
%************************************************************************
%* *
+ Size
+%* *
+%************************************************************************
+
+\begin{code}
+typeSize :: Type -> Int
+typeSize (TyVarTy _) = 1
+typeSize (AppTy t1 t2) = typeSize t1 + typeSize t2
+typeSize (FunTy t1 t2) = typeSize t1 + typeSize t2
+typeSize (PredTy p) = predSize p
+typeSize (ForAllTy _ t) = 1 + typeSize t
+typeSize (TyConApp _ ts) = 1 + sum (map typeSize ts)
+
+predSize :: PredType -> Int
+predSize (IParam _ t) = 1 + typeSize t
+predSize (ClassP _ ts) = 1 + sum (map typeSize ts)
+predSize (EqPred t1 t2) = typeSize t1 + typeSize t2
+\end{code}
+
+
+%************************************************************************
+%* *
\subsection{Type families}
%* *
%************************************************************************
-- add back conflict edges from other nodes to this one
map_conflict
= foldUniqSet
- (adjustUFM (\n -> n { nodeConflicts = addOneToUniqSet (nodeConflicts n) k}))
+ (adjustUFM_C (\n -> n { nodeConflicts = addOneToUniqSet (nodeConflicts n) k}))
(graphMap graph)
(nodeConflicts node)
-- add back coalesce edges from other nodes to this one
map_coalesce
= foldUniqSet
- (adjustUFM (\n -> n { nodeCoalesce = addOneToUniqSet (nodeCoalesce n) k}))
+ (adjustUFM_C (\n -> n { nodeCoalesce = addOneToUniqSet (nodeCoalesce n) k}))
map_conflict
(nodeCoalesce node)
else node -- panic "GraphOps.freezeNode: edge to freeze wasn't in the coalesce set"
-- If the edge isn't actually in the coelesce set then just ignore it.
- fm2 = foldUniqSet (adjustUFM (freezeEdge k)) fm1
+ fm2 = foldUniqSet (adjustUFM_C (freezeEdge k)) fm1
$ nodeCoalesce node
in fm2
setColor u color
= graphMapModify
- $ adjustUFM
+ $ adjustUFM_C
(\n -> n { nodeColor = Just color })
u
map
k def
-{-# INLINE adjustUFM #-}
-adjustUFM
+-- Argument order different from UniqFM's adjustUFM
+{-# INLINE adjustUFM_C #-}
+adjustUFM_C
:: Uniquable k
=> (a -> a)
-> k -> UniqFM a -> UniqFM a
-adjustUFM f k map
+adjustUFM_C f k map
= case lookupUFM map k of
Nothing -> map
Just a -> addToUFM map k (f a)
addListToUFM,addListToUFM_C,
addToUFM_Directly,
addListToUFM_Directly,
+ adjustUFM,
+ adjustUFM_Directly,
delFromUFM,
delFromUFM_Directly,
delListFromUFM,
intersectUFM,
intersectUFM_C,
foldUFM, foldUFM_Directly,
- mapUFM,
+ mapUFM, mapUFM_Directly,
elemUFM, elemUFM_Directly,
filterUFM, filterUFM_Directly,
sizeUFM,
lookupUFM, lookupUFM_Directly,
lookupWithDefaultUFM, lookupWithDefaultUFM_Directly,
eltsUFM, keysUFM, splitUFM,
- ufmToList
+ ufmToList,
+ joinUFM
) where
import Unique ( Uniquable(..), Unique, getKey )
import Outputable
+import Compiler.Hoopl hiding (Unique)
+
import qualified Data.IntMap as M
\end{code}
-> UniqFM elt -> [(key,elt)]
-> UniqFM elt
+adjustUFM :: Uniquable key => (elt -> elt) -> UniqFM elt -> key -> UniqFM elt
+adjustUFM_Directly :: (elt -> elt) -> UniqFM elt -> Unique -> UniqFM elt
+
delFromUFM :: Uniquable key => UniqFM elt -> key -> UniqFM elt
delListFromUFM :: Uniquable key => UniqFM elt -> [key] -> UniqFM elt
delFromUFM_Directly :: UniqFM elt -> Unique -> UniqFM elt
foldUFM :: (elt -> a -> a) -> a -> UniqFM elt -> a
foldUFM_Directly:: (Unique -> elt -> a -> a) -> a -> UniqFM elt -> a
mapUFM :: (elt1 -> elt2) -> UniqFM elt1 -> UniqFM elt2
+mapUFM_Directly :: (Unique -> elt1 -> elt2) -> UniqFM elt1 -> UniqFM elt2
filterUFM :: (elt -> Bool) -> UniqFM elt -> UniqFM elt
filterUFM_Directly :: (Unique -> elt -> Bool) -> UniqFM elt -> UniqFM elt
UFM (M.insertWith (\_new old -> exi v old) (getKey $ getUnique k) (new v) m)
addListToUFM_C f = foldl (\m (k, v) -> addToUFM_C f m k v)
+adjustUFM f (UFM m) k = UFM (M.adjust f (getKey $ getUnique k) m)
+adjustUFM_Directly f (UFM m) u = UFM (M.adjust f (getKey u) m)
+
delFromUFM (UFM m) k = UFM (M.delete (getKey $ getUnique k) m)
delListFromUFM = foldl delFromUFM
delFromUFM_Directly (UFM m) u = UFM (M.delete (getKey u) m)
foldUFM k z (UFM m) = M.fold k z m
foldUFM_Directly k z (UFM m) = M.foldWithKey (k . getUnique) z m
mapUFM f (UFM m) = UFM (M.map f m)
+mapUFM_Directly f (UFM m) = UFM (M.mapWithKey (f . getUnique) m)
filterUFM p (UFM m) = UFM (M.filter p m)
filterUFM_Directly p (UFM m) = UFM (M.filterWithKey (p . getUnique) m)
eltsUFM (UFM m) = M.elems m
ufmToList (UFM m) = map (\(k, v) -> (getUnique k, v)) $ M.toList m
+-- Hoopl
+joinUFM :: JoinFun v -> JoinFun (UniqFM v)
+joinUFM eltJoin l (OldFact old) (NewFact new) = foldUFM_Directly add (NoChange, old) new
+ where add k new_v (ch, joinmap) =
+ case lookupUFM_Directly joinmap k of
+ Nothing -> (SomeChange, addToUFM_Directly joinmap k new_v)
+ Just old_v -> case eltJoin l (OldFact old_v) (NewFact new_v) of
+ (SomeChange, v') -> (SomeChange, addToUFM_Directly joinmap k v')
+ (NoChange, _) -> (ch, joinmap)
+
\end{code}
%************************************************************************
Direction(..), reslash,
-- * Utils for defining Data instances
- abstractConstr, abstractDataType, mkNoRepType
+ abstractConstr, abstractDataType, mkNoRepType,
+
+ -- * Utils for printing C code
+ charToC
) where
#include "HsVersions.h"
import System.FilePath
import System.Time ( ClockTime )
-import Data.Char ( isUpper, isAlphaNum, isSpace, ord, isDigit )
+import Data.Char ( isUpper, isAlphaNum, isSpace, chr, ord, isDigit )
import Data.Ratio ( (%) )
import Data.Ord ( comparing )
import Data.Bits
abstractDataType n = mkDataType n [abstractConstr n]
\end{code}
+%************************************************************************
+%* *
+\subsection[Utils-C]{Utils for printing C code}
+%* *
+%************************************************************************
+
+\begin{code}
+charToC :: Word8 -> String
+charToC w =
+ case chr (fromIntegral w) of
+ '\"' -> "\\\""
+ '\'' -> "\\\'"
+ '\\' -> "\\\\"
+ c | c >= ' ' && c <= '~' -> [c]
+ | otherwise -> ['\\',
+ chr (ord '0' + ord c `div` 64),
+ chr (ord '0' + ord c `div` 8 `mod` 8),
+ chr (ord '0' + ord c `mod` 8)]
+\end{code}
-{-# OPTIONS -fno-warn-missing-signatures #-}
+{-# OPTIONS -fno-warn-missing-signatures -fno-warn-unused-do-bind #-}
module Vectorise ( vectorise )
where
--
vectTopBind :: CoreBind -> VM CoreBind
vectTopBind b@(NonRec var expr)
- = do
- (inline, _, expr') <- vectTopRhs [] var expr
- var' <- vectTopBinder var inline expr'
+ = do { -- Vectorise the right-hand side, create an appropriate top-level binding and add it to
+ -- the vectorisation map.
+ ; (inline, isScalar, expr') <- vectTopRhs [] var expr
+ ; var' <- vectTopBinder var inline expr'
+ ; when isScalar $
+ addGlobalScalar var
- -- Vectorising the body may create other top-level bindings.
- hs <- takeHoisted
-
- -- To get the same functionality as the original body we project
- -- out its vectorised version from the closure.
- cexpr <- tryConvert var var' expr
-
- return . Rec $ (var, cexpr) : (var', expr') : hs
+ -- We replace the original top-level binding by a value projected from the vectorised
+ -- closure and add any newly created hoisted top-level bindings.
+ ; cexpr <- tryConvert var var' expr
+ ; hs <- takeHoisted
+ ; return . Rec $ (var, cexpr) : (var', expr') : hs
+ }
`orElseV`
return b
-
vectTopBind b@(Rec bs)
- = do
- (vars', _, exprs')
- <- fixV $ \ ~(_, inlines, rhss) ->
- do vars' <- sequence [vectTopBinder var inline rhs
- | (var, ~(inline, rhs)) <- zipLazy vars (zip inlines rhss)]
- (inlines', areScalars', exprs')
- <- mapAndUnzip3M (uncurry $ vectTopRhs vars) bs
- if (and areScalars') || (length bs <= 1)
- then do
- return (vars', inlines', exprs')
- else do
- _ <- mapM deleteGlobalScalar vars
- (inlines'', _, exprs'') <- mapAndUnzip3M (uncurry $ vectTopRhs []) bs
- return (vars', inlines'', exprs'')
+ = let (vars, exprs) = unzip bs
+ in
+ do { (vars', _, exprs', hs) <- fixV $
+ \ ~(_, inlines, rhss, _) ->
+ do { -- Vectorise the right-hand sides, create an appropriate top-level bindings and
+ -- add them to the vectorisation map.
+ ; vars' <- sequence [vectTopBinder var inline rhs
+ | (var, ~(inline, rhs)) <- zipLazy vars (zip inlines rhss)]
+ ; (inlines, areScalars, exprs') <- mapAndUnzip3M (uncurry $ vectTopRhs vars) bs
+ ; hs <- takeHoisted
+ ; if and areScalars
+ then -- (1) Entire recursive group is scalar
+ -- => add all variables to the global set of scalars
+ do { mapM addGlobalScalar vars
+ ; return (vars', inlines, exprs', hs)
+ }
+ else -- (2) At least one binding is not scalar
+ -- => vectorise again with empty set of local scalars
+ do { (inlines, _, exprs') <- mapAndUnzip3M (uncurry $ vectTopRhs []) bs
+ ; hs <- takeHoisted
+ ; return (vars', inlines, exprs', hs)
+ }
+ }
- hs <- takeHoisted
- cexprs <- sequence $ zipWith3 tryConvert vars vars' exprs
- return . Rec $ zip vars cexprs ++ zip vars' exprs' ++ hs
+ -- Replace the original top-level bindings by a values projected from the vectorised
+ -- closures and add any newly created hoisted top-level bindings to the group.
+ ; cexprs <- sequence $ zipWith3 tryConvert vars vars' exprs
+ ; return . Rec $ zip vars cexprs ++ zip vars' exprs' ++ hs
+ }
`orElseV`
- return b
- where
- (vars, exprs) = unzip bs
+ return b
-- | Make the vectorised version of this top level binder, and add the mapping
-- between it and the original to the state. For some binder @foo@ the vectorised
where
rhs _globalScalar (Just (_, expr')) -- Case (1)
= return (inlineMe, False, expr')
- rhs True _vectDecl -- Case (2)
- = return (inlineMe, True, scalarRHS)
- -- FIXME: that True is not enough to register scalarness
- rhs False _vectDecl -- Case (3)
+ rhs True Nothing -- Case (2)
+ = do { expr' <- vectScalarFun True recFs expr
+ ; return (inlineMe, True, vectorised expr')
+ }
+ rhs False Nothing -- Case (3)
= do { let fvs = freeVars expr
; (inline, isScalar, vexpr) <- inBind var $
vectPolyExpr (isLoopBreaker $ idOccInfo var) recFs fvs
- ; if isScalar
- then addGlobalScalar var
- else deleteGlobalScalar var
; return (inline, isScalar, vectorised vexpr)
}
-
- -- For scalar right-hand sides, we know that the original binding will remain unaltered
- -- (hence, we can refer to it without risk of cycles) - cf, 'tryConvert'.
- scalarRHS = panic "Vectorise.scalarRHS: not implemented yet"
-- | Project out the vectorised version of a binding from some closure,
-- or return the original body if that doesn't work or the binding is scalar.
-- These are things the exist at top-level.
data GlobalEnv
= GlobalEnv {
- -- | Mapping from global variables to their vectorised versions.
+ -- | Mapping from global variables to their vectorised versions — aka the /vectorisation
+ -- map/.
global_vars :: VarEnv Var
-- | Mapping from global variables that have a vectorisation declaration to the right-hand
-- | Vectorisation of expressions.
-module Vectorise.Exp
- (vectPolyExpr)
-where
-import Vectorise.Utils
+module Vectorise.Exp (
+
+ -- Vectorise a polymorphic expression
+ vectPolyExpr,
+
+ -- Vectorise a scalar expression of functional type
+ vectScalarFun
+) where
+
+#include "HsVersions.h"
+
import Vectorise.Type.Type
import Vectorise.Var
import Vectorise.Vect
import Vectorise.Env
import Vectorise.Monad
import Vectorise.Builtins
+import Vectorise.Utils
import CoreSyn
import CoreUtils
-- | Vectorise an expression with an outer lambda abstraction.
--
-vectFnExpr :: Bool -- ^ When the RHS of a binding, whether that binding should be inlined.
- -> Bool -- ^ Whether the binding is a loop breaker.
- -> [Var]
- -> CoreExprWithFVs -- ^ Expression to vectorise. Must have an outer `AnnLam`.
+vectFnExpr :: Bool -- ^ If we process the RHS of a binding, whether that binding should
+ -- be inlined
+ -> Bool -- ^ Whether the binding is a loop breaker
+ -> [Var] -- ^ Names of function in same recursive binding group
+ -> CoreExprWithFVs -- ^ Expression to vectorise; must have an outer `AnnLam`
-> VM (Inline, Bool, VExpr)
-vectFnExpr inline loop_breaker recFns e@(fvs, AnnLam bndr _)
- | isId bndr = onlyIfV True -- (isEmptyVarSet fvs) -- we check for free variables later. TODO: clean up
- (mark DontInline True . vectScalarLam bs recFns $ deAnnotate body)
- `orElseV` mark inlineMe False (vectLam inline loop_breaker fvs bs body)
- where
- (bs,body) = collectAnnValBinders e
+vectFnExpr inline loop_breaker recFns expr@(_fvs, AnnLam bndr _)
+ | isId bndr = mark DontInline True (vectScalarFun False recFns (deAnnotate expr))
+ `orElseV`
+ mark inlineMe False (vectLam inline loop_breaker expr)
vectFnExpr _ _ _ e = mark DontInline False $ vectExpr e
mark :: Inline -> Bool -> VM a -> VM (Inline, Bool, a)
mark b isScalarFn p = do { x <- p; return (b, isScalarFn, x) }
-
--- | Vectorise a function where are the args have scalar type,
--- that is Int, Float, Double etc.
-vectScalarLam
- :: [Var] -- ^ Bound variables of function
- -> [Var]
- -> CoreExpr -- ^ Function body.
- -> VM VExpr
-
-vectScalarLam args recFns body
- = do scalars' <- globalScalars
- let scalars = unionVarSet (mkVarSet recFns) scalars'
- onlyIfV (all is_prim_ty arg_tys
- && is_prim_ty res_ty
- && is_scalar (extendVarSetList scalars args) body
- && uses scalars body)
- $ do
- fn_var <- hoistExpr (fsLit "fn") (mkLams args body) DontInline
- zipf <- zipScalars arg_tys res_ty
- clo <- scalarClosure arg_tys res_ty (Var fn_var)
- (zipf `App` Var fn_var)
- clo_var <- hoistExpr (fsLit "clo") clo DontInline
- lclo <- liftPD (Var clo_var)
- return (Var clo_var, lclo)
+-- |Vectorise an expression of functional type, where all arguments and the result are of scalar
+-- type (i.e., 'Int', 'Float', 'Double' etc.) and which does not contain any subcomputations that
+-- involve parallel arrays. Such functionals do not requires the full blown vectorisation
+-- transformation; instead, they can be lifted by application of a member of the zipWith family
+-- (i.e., 'map', 'zipWith', zipWith3', etc.)
+--
+vectScalarFun :: Bool -- ^ Was the function marked as scalar by the user?
+ -> [Var] -- ^ Functions names in same recursive binding group
+ -> CoreExpr -- ^ Expression to be vectorised
+ -> VM VExpr
+vectScalarFun forceScalar recFns expr
+ = do { gscalars <- globalScalars
+ ; let scalars = gscalars `extendVarSetList` recFns
+ (arg_tys, res_ty) = splitFunTys (exprType expr)
+ ; MASSERT( not $ null arg_tys )
+ ; onlyIfV (forceScalar -- user asserts the functions is scalar
+ ||
+ all is_prim_ty arg_tys -- check whether the function is scalar
+ && is_prim_ty res_ty
+ && is_scalar scalars expr
+ && uses scalars expr)
+ $ mkScalarFun arg_tys res_ty expr
+ }
where
- arg_tys = map idType args
- res_ty = exprType body
-
+ -- FIXME: This is woefully insufficient!!! We need a scalar pragma for types!!!
is_prim_ty ty
| Just (tycon, []) <- splitTyConApp_maybe ty
= tycon == intTyCon
|| tycon == floatTyCon
|| tycon == doubleTyCon
-
| otherwise = False
-
- cantbe_parr_expr expr = not $ maybe_parr_ty $ exprType expr
-
- maybe_parr_ty ty = maybe_parr_ty' [] ty
-
- maybe_parr_ty' _ ty | Nothing <- splitTyConApp_maybe ty = False -- TODO: is this really what we want to do with polym. types?
- maybe_parr_ty' alreadySeen ty
- | isPArrTyCon tycon = True
- | isPrimTyCon tycon = False
- | isAbstractTyCon tycon = True
- | isFunTyCon tycon || isProductTyCon tycon || isTupleTyCon tycon = any (maybe_parr_ty' alreadySeen) args
- | isDataTyCon tycon = any (maybe_parr_ty' alreadySeen) args ||
- hasParrDataCon alreadySeen tycon
- | otherwise = True
- where
- Just (tycon, args) = splitTyConApp_maybe ty
-
-
- hasParrDataCon alreadySeen tycon
- | tycon `elem` alreadySeen = False
- | otherwise =
- any (maybe_parr_ty' $ tycon : alreadySeen) $ concat $ map dataConOrigArgTys $ tyConDataCons tycon
-
- -- checks to make sure expression can't contain a non-scalar subexpression. Might err on the side of caution whenever
- -- an external (non data constructor) variable is used, or anonymous data constructor
- is_scalar vs e@(Var v)
- | Just _ <- isDataConId_maybe v = cantbe_parr_expr e
- | otherwise = cantbe_parr_expr e && (v `elemVarSet` vs)
- is_scalar _ e@(Lit _) = cantbe_parr_expr e
-
- is_scalar vs e@(App e1 e2) = cantbe_parr_expr e &&
- is_scalar vs e1 && is_scalar vs e2
- is_scalar vs e@(Let (NonRec b letExpr) body)
- = cantbe_parr_expr e &&
- is_scalar vs letExpr && is_scalar (extendVarSet vs b) body
- is_scalar vs e@(Let (Rec bnds) body)
- = let vs' = extendVarSetList vs (map fst bnds)
- in cantbe_parr_expr e &&
- all (is_scalar vs') (map snd bnds) && is_scalar vs' body
- is_scalar vs e@(Case eC eId ty alts)
- = let vs' = extendVarSet vs eId
- in cantbe_parr_expr e &&
- is_prim_ty ty &&
- is_scalar vs' eC &&
- (all (is_scalar_alt vs') alts)
-
- is_scalar _ _ = False
-
- is_scalar_alt vs (_, bs, e)
- = is_scalar (extendVarSetList vs bs) e
+ -- Checks whether an expression contain a non-scalar subexpression.
+ --
+ -- Precodition: The variables in the first argument are scalar.
+ --
+ -- In case of a recursive binding group, we /assume/ that all bindings are scalar (by adding
+ -- them to the list of scalar variables) and then check them. If one of them turns out not to
+ -- be scalar, the entire group is regarded as not being scalar.
+ --
+ -- FIXME: Currently, doesn't regard external (non-data constructor) variable and anonymous
+ -- data constructor as scalar. Should be changed once scalar types are passed
+ -- through VectInfo.
+ --
+ is_scalar :: VarSet -> CoreExpr -> Bool
+ is_scalar scalars (Var v) = v `elemVarSet` scalars
+ is_scalar _scalars (Lit _) = True
+ is_scalar scalars e@(App e1 e2)
+ | maybe_parr_ty (exprType e) = False
+ | otherwise = is_scalar scalars e1 && is_scalar scalars e2
+ is_scalar scalars (Lam var body)
+ | maybe_parr_ty (varType var) = False
+ | otherwise = is_scalar (scalars `extendVarSet` var) body
+ is_scalar scalars (Let bind body) = bindsAreScalar && is_scalar scalars' body
+ where
+ (bindsAreScalar, scalars') = is_scalar_bind scalars bind
+ is_scalar scalars (Case e var ty alts)
+ | is_prim_ty ty = is_scalar scalars' e && all (is_scalar_alt scalars') alts
+ | otherwise = False
+ where
+ scalars' = scalars `extendVarSet` var
+ is_scalar scalars (Cast e _coe) = is_scalar scalars e
+ is_scalar scalars (Note _ e ) = is_scalar scalars e
+ is_scalar _scalars (Type _) = True
+
+ -- Result: (<is this binding group scalar>, scalars ++ variables bound in this group)
+ is_scalar_bind scalars (NonRec var e) = (is_scalar scalars e, scalars `extendVarSet` var)
+ is_scalar_bind scalars (Rec bnds) = (all (is_scalar scalars') es, scalars')
+ where
+ (vars, es) = unzip bnds
+ scalars' = scalars `extendVarSetList` vars
+
+ is_scalar_alt scalars (_, vars, e) = is_scalar (scalars `extendVarSetList ` vars) e
+
+ -- Checks whether the type might be a parallel array type. In particular, if the outermost
+ -- constructor is a type family, we conservatively assume that it may be a parallel array type.
+ maybe_parr_ty :: Type -> Bool
+ maybe_parr_ty ty
+ | Just ty' <- coreView ty = maybe_parr_ty ty'
+ | Just (tyCon, _) <- splitTyConApp_maybe ty = isPArrTyCon tyCon || isSynFamilyTyCon tyCon
+ maybe_parr_ty _ = False
+
+ -- FIXME: I'm not convinced that this reasoning is (always) sound. If the identify functions
+ -- is called by some other function that is otherwise scalar, it would be very bad
+ -- that just this call to the identity makes it not be scalar.
-- A scalar function has to actually compute something. Without the check,
-- we would treat (\(x :: Int) -> x) as a scalar function and lift it to
-- (map (\x -> x)) which is very bad. Normal lifting transforms it to
-- (\n# x -> x) which is what we want.
- uses funs (Var v) = v `elemVarSet` funs
- uses funs (App e1 e2) = uses funs e1 || uses funs e2
+ uses funs (Var v) = v `elemVarSet` funs
+ uses funs (App e1 e2) = uses funs e1 || uses funs e2
+ uses funs (Lam b body) = uses (funs `extendVarSet` b) body
uses funs (Let (NonRec _b letExpr) body)
- = uses funs letExpr || uses funs body
+ = uses funs letExpr || uses funs body
uses funs (Case e _eId _ty alts)
- = uses funs e || any (uses_alt funs) alts
- uses _ _ = False
+ = uses funs e || any (uses_alt funs) alts
+ uses _ _ = False
- uses_alt funs (_, _bs, e)
- = uses funs e
+ uses_alt funs (_, _bs, e) = uses funs e
+
+mkScalarFun :: [Type] -> Type -> CoreExpr -> VM VExpr
+mkScalarFun arg_tys res_ty expr
+ = do { fn_var <- hoistExpr (fsLit "fn") expr DontInline
+ ; zipf <- zipScalars arg_tys res_ty
+ ; clo <- scalarClosure arg_tys res_ty (Var fn_var) (zipf `App` Var fn_var)
+ ; clo_var <- hoistExpr (fsLit "clo") clo DontInline
+ ; lclo <- liftPD (Var clo_var)
+ ; return (Var clo_var, lclo)
+ }
-- | Vectorise a lambda abstraction.
-vectLam
- :: Bool -- ^ When the RHS of a binding, whether that binding should be inlined.
- -> Bool -- ^ Whether the binding is a loop breaker.
- -> VarSet -- ^ The free variables in the body.
- -> [Var] -- ^ Binding variables.
- -> CoreExprWithFVs -- ^ Body of abstraction.
- -> VM VExpr
-
-vectLam inline loop_breaker fvs bs body
- = do tyvars <- localTyVars
+--
+vectLam :: Bool -- ^ When the RHS of a binding, whether that binding should be inlined.
+ -> Bool -- ^ Whether the binding is a loop breaker.
+ -> CoreExprWithFVs -- ^ Body of abstraction.
+ -> VM VExpr
+vectLam inline loop_breaker expr@(fvs, AnnLam _ _)
+ = do let (bs, body) = collectAnnValBinders expr
+
+ tyvars <- localTyVars
(vs, vvs) <- readLEnv $ \env ->
unzip [(var, vv) | var <- varSetElems fvs
, Just vv <- [lookupVarEnv (local_vars env) var]]
(LitAlt (mkMachInt 0), [], empty)])
| otherwise = return (ve, le)
+vectLam _ _ _ = panic "vectLam"
vectTyAppExpr :: CoreExprWithFVs -> [Type] -> VM VExpr
;;
esac
+# Testing if we shall enable shared libs support on Solaris.
+# Anything older than SunOS 5.11 aka Solaris 11 (Express) is broken.
+
+SOLARIS_BROKEN_SHLD=NO
+
+case $host in
+ i386-*-solaris2)
+ # here we go with the test
+ MINOR=`uname -r|cut -d '.' -f 2-`
+ if test "$MINOR" -lt "11"; then
+ SOLARIS_BROKEN_SHLD=YES
+ fi
+ ;;
+esac
+
# Sync this with cTargetArch in compiler/ghc.mk
checkArch() {
case $1 in
# Verify that the installed (bootstrap) GHC is capable of generating
# code for the requested build platform.
-if test "$build" != "$bootstrap_target"
+if test "$BuildPlatform" != "$bootstrap_target"
then
echo "This GHC (${WithGhc}) does not generate code for the build platform"
echo " GHC target platform : $bootstrap_target"
exit 1
fi
+echo "GHC build : $BuildPlatform"
+echo "GHC host : $HostPlatform"
+echo "GHC target : $TargetPlatform"
+
AC_SUBST(BuildPlatform)
AC_SUBST(HostPlatform)
AC_SUBST(TargetPlatform)
AC_SUBST(exeext)
AC_SUBST(soext)
+AC_SUBST(SOLARIS_BROKEN_SHLD)
+
AC_ARG_WITH(hc,
[AC_HELP_STRING([--with-hc=ARG],
[Use ARG as the path to the compiler for compiling ordinary
SplitObjsBroken=NO
if test "$TargetOS_CPP" = "darwin"
then
+ AC_MSG_CHECKING(XCode version)
XCodeVersion=`xcodebuild -version | grep Xcode | sed "s/Xcode //"`
- XCodeVersion1=`echo "$XCodeVersion" | sed 's/\..*//'`
- XCodeVersion2=`echo "$XCodeVersion" | sed 's/.*\.//'`
# Old XCode versions don't actually give the XCode version
if test "$XCodeVersion" = ""
then
+ AC_MSG_RESULT(not found (too old?))
SplitObjsBroken=YES
- fi
- # Split objects is broken (#4013) with XCode < 3.2
- if test "$XCodeVersion1" -lt 3
- then
- SplitObjsBroken=YES
- fi
- if test "$XCodeVersion1" -eq 3 && test "$XCodeVersion2" -lt 2
- then
- SplitObjsBroken=YES
+ else
+ AC_MSG_RESULT($XCodeVersion)
+ XCodeVersion1=`echo "$XCodeVersion" | sed 's/\..*//'`
+changequote(, )dnl
+ XCodeVersion2=`echo "$XCodeVersion" | sed 's/[^.]*\.\([^.]*\).*/\1/'`
+changequote([, ])dnl
+ AC_MSG_NOTICE(XCode version component 1: $XCodeVersion1)
+ AC_MSG_NOTICE(XCode version component 2: $XCodeVersion2)
+ # Split objects is broken (#4013) with XCode < 3.2
+ if test "$XCodeVersion1" -lt 3
+ then
+ SplitObjsBroken=YES
+ else
+ if test "$XCodeVersion1" -eq 3
+ then
+ if test "$XCodeVersion2" -lt 2
+ then
+ SplitObjsBroken=YES
+ fi
+ fi
+ fi
fi
fi
AC_SUBST([SplitObjsBroken])
dnl ** check for ld, whether it has an -x option, and if it is GNU ld
FP_PROG_LD_X
FP_PROG_LD_IS_GNU
+FP_PROG_LD_BUILD_ID
dnl ** check for Apple-style dead-stripping support
dnl (.subsections-via-symbols assembler directive)
+++ /dev/null
-#!/usr/bin/perl -w
-
-use strict;
-
-# Usage:
-#
-# ./darcs-all [-q] [-s] [-i] [-r repo]
-# [--nofib] [--testsuite] [--checked-out] cmd [darcs flags]
-#
-# Applies the darcs command "cmd" to each repository in the tree.
-#
-# e.g.
-# ./darcs-all -r http://darcs.haskell.org/ghc get
-# To get any repos which do not exist in the local tree
-#
-# ./darcs-all -r ~/ghc-validate push
-# To push all your repos to the ~/ghc-validate tree
-#
-# ./darcs-all pull -a
-# To pull everything from the default repos
-#
-# ./darc-all push --dry-run
-# To see what local patches you have relative to the main repos
-#
-# -------------- Flags -------------------
-# -q says to be quite, and -s to be silent.
-#
-# -i says to ignore darcs errors and move on to the next repository
-#
-# -r repo says to use repo as the location of package repositories
-#
-# --checked-out says that the remote repo is in checked-out layout, as
-# opposed to the layout used for the main repo. By default a repo on
-# the local filesystem is assumed to be checked-out, and repos accessed
-# via HTTP or SSH are assumed to be in the main repo layout; use
-# --checked-out to override the latter.
-#
-# --nofib, --testsuite also get the nofib and testsuite repos respectively
-#
-# The darcs get flag you are most likely to want is --complete. By
-# default we pass darcs the --partial flag.
-#
-# ------------ Which repos to use -------------
-# darcs-all uses the following algorithm to decide which remote repos to use
-#
-# It always computes the remote repos from a single base, $repo_base
-# How is $repo_base set?
-# If you say "-r repo", then that's $repo_base
-# othewise $repo_base is set thus:
-# look in _darcs/prefs/defaultrepo,
-# and remove the trailing 'ghc'
-#
-# Then darcs-all iterates over the package found in the file
-# ./packages, which has entries like:
-# libraries/array packages/array darcs
-# or, in general
-# <local-path> <remote-path> <vcs>
-#
-# If $repo_base looks like a local filesystem path, or if you give
-# the --checked-out flag, darcs-all works on repos of form
-# $repo_base/<local-path>
-# otherwise darcs-all works on repos of form
-# $repo_base/<remote-path>
-# This logic lets you say
-# both darcs-all -r http://darcs.haskell.org/ghc-6.12 pull
-# and darcs-all -r ../HEAD pull
-# The latter is called a "checked-out tree".
-
-# NB: darcs-all *ignores* the defaultrepo of all repos other than the
-# root one. So the remote repos must be laid out in one of the two
-# formats given by <local-path> and <remote-path> in the file 'packages'.
-
-
-$| = 1; # autoflush stdout after each print, to avoid output after die
-
-my $defaultrepo;
-
-my $verbose = 2;
-my $ignore_failure = 0;
-my $want_remote_repo = 0;
-my $checked_out_flag = 0;
-
-my %tags;
-
-my @packages;
-
-# Figure out where to get the other repositories from.
-sub getrepo {
- my $basedir = ".";
- my $repo = $defaultrepo || `cat $basedir/_darcs/prefs/defaultrepo`;
- chomp $repo;
-
- my $repo_base;
- my $checked_out_tree;
-
- if ($repo =~ /^...*:/) {
- # HTTP or SSH
- # Above regex says "at least two chars before the :", to avoid
- # catching Win32 drives ("C:\").
- $repo_base = $repo;
-
- # --checked-out is needed if you want to use a checked-out repo
- # over SSH or HTTP
- if ($checked_out_flag) {
- $checked_out_tree = 1;
- } else {
- $checked_out_tree = 0;
- }
-
- # Don't drop the last part of the path if specified with -r, as
- # it expects repos of the form:
- #
- # http://darcs.haskell.org
- #
- # rather than
- #
- # http://darcs.haskell.org/ghc
- #
- if (!$defaultrepo) {
- $repo_base =~ s#/[^/]+/?$##;
- }
- }
- elsif ($repo =~ /^\/|\.\.\/|.:(\/|\\)/) {
- # Local filesystem, either absolute or relative path
- # (assumes a checked-out tree):
- $repo_base = $repo;
- $checked_out_tree = 1;
- }
- else {
- die "Couldn't work out repo";
- }
-
- return $repo_base, $checked_out_tree;
-}
-
-sub message {
- if ($verbose >= 2) {
- print "@_\n";
- }
-}
-
-sub warning {
- if ($verbose >= 1) {
- print "warning: @_\n";
- }
-}
-
-sub darcs {
- message "== running darcs @_";
- system ("darcs", @_) == 0
- or $ignore_failure
- or die "darcs failed: $?";
-}
-
-sub parsePackages {
- my @repos;
- my $lineNum;
-
- my ($repo_base, $checked_out_tree) = getrepo();
-
- open IN, "< packages" or die "Can't open packages file";
- @repos = <IN>;
- close IN;
-
- @packages = ();
- $lineNum = 0;
- foreach (@repos) {
- chomp;
- $lineNum++;
- if (/^([^# ]+) +([^ ]+) +([^ ]+) +([^ ]+) +([^ ]+)$/) {
- my %line;
- $line{"localpath"} = $1;
- $line{"tag"} = $2;
- $line{"remotepath"} = $3;
- $line{"vcs"} = $4;
- $line{"upstream"} = $5;
- push @packages, \%line;
- }
- elsif (! /^(#.*)?$/) {
- die "Bad content on line $lineNum of packages file: $_";
- }
- }
-}
-
-sub darcsall {
- my $localpath;
- my $remotepath;
- my $path;
- my $tag;
- my @repos;
- my $command = $_[0];
- my $line;
-
- my ($repo_base, $checked_out_tree) = getrepo();
-
- for $line (@packages) {
- $localpath = $$line{"localpath"};
- $tag = $$line{"tag"};
- $remotepath = $$line{"remotepath"};
-
- if ($checked_out_tree) {
- $path = "$repo_base/$localpath";
- }
- else {
- $path = "$repo_base/$remotepath";
- }
-
- if (-d "$localpath/_darcs") {
- if ($want_remote_repo) {
- if ($command =~ /^opt/) {
- # Allows ./darcs-all optimize --relink
- darcs (@_, "--repodir", $localpath, "--sibling=$path");
- } else {
- darcs (@_, "--repodir", $localpath, $path);
- }
- } else {
- darcs (@_, "--repodir", $localpath);
- }
- }
- elsif ($tag eq "-") {
- message "== Required repo $localpath is missing! Skipping";
- }
- else {
- message "== $localpath repo not present; skipping";
- }
- }
-}
-
-sub darcsget {
- my $r_flags;
- my $localpath;
- my $remotepath;
- my $path;
- my $tag;
- my @repos;
- my $line;
-
- my ($repo_base, $checked_out_tree) = getrepo();
-
- if (! grep /(?:--complete|--partial|--lazy)/, @_) {
- warning("adding --partial, to override use --complete");
- $r_flags = [@_, "--partial"];
- }
- else {
- $r_flags = \@_;
- }
-
- for $line (@packages) {
- $localpath = $$line{"localpath"};
- $tag = $$line{"tag"};
- $remotepath = $$line{"remotepath"};
-
- if ($checked_out_tree) {
- $path = "$repo_base/$localpath";
- }
- else {
- $path = "$repo_base/$remotepath";
- }
-
- if ($tags{$tag} eq 1) {
- if (-d $localpath) {
- warning("$localpath already present; omitting");
- }
- else {
- darcs (@$r_flags, $path, $localpath);
- }
- }
- }
-}
-
-sub darcsupstreampull {
- my $localpath;
- my $upstream;
- my $line;
-
- for $line (@packages) {
- $localpath = $$line{"localpath"};
- $upstream = $$line{"upstream"};
-
- if ($upstream ne "-") {
- if (-d $localpath) {
- darcs ("pull", @_, "--repodir", $localpath, $upstream);
- }
- }
- }
-}
-
-sub main {
- if (! -d "compiler") {
- die "error: darcs-all must be run from the top level of the ghc tree."
- }
-
- $tags{"-"} = 1;
- $tags{"dph"} = 1;
- $tags{"nofib"} = 0;
- $tags{"testsuite"} = 0;
- $tags{"extra"} = 0;
-
- while ($#_ ne -1) {
- my $arg = shift;
- # We handle -q here as well as lower down as we need to skip over it
- # if it comes before the darcs command
- if ($arg eq "-q") {
- $verbose = 1;
- }
- elsif ($arg eq "-s") {
- $verbose = 0;
- }
- elsif ($arg eq "-r") {
- $defaultrepo = shift;
- }
- elsif ($arg eq "-i") {
- $ignore_failure = 1;
- }
- # --nofib tells get to also grab the nofib repo.
- # It has no effect on the other commands.
- elsif ($arg eq "--nofib") {
- $tags{"nofib"} = 1;
- }
- elsif ($arg eq "--no-nofib") {
- $tags{"nofib"} = 0;
- }
- # --testsuite tells get to also grab the testsuite repo.
- # It has no effect on the other commands.
- elsif ($arg eq "--testsuite") {
- $tags{"testsuite"} = 1;
- }
- elsif ($arg eq "--no-testsuite") {
- $tags{"testsuite"} = 0;
- }
- # --dph tells get to also grab the dph repo.
- # It has no effect on the other commands.
- elsif ($arg eq "--dph") {
- $tags{"dph"} = 1;
- }
- elsif ($arg eq "--no-dph") {
- $tags{"dph"} = 0;
- }
- # --extralibs tells get to also grab the extra repos.
- # It has no effect on the other commands.
- elsif ($arg eq "--extra") {
- $tags{"extra"} = 1;
- }
- elsif ($arg eq "--no-extra") {
- $tags{"extra"} = 0;
- }
- # Use --checked-out if the remote repos are a checked-out tree,
- # rather than the master trees.
- elsif ($arg eq "--checked-out") {
- $checked_out_flag = 1;
- }
- else {
- unshift @_, $arg;
- if (grep /^-q$/, @_) {
- $verbose = 1;
- }
- last;
- }
- }
-
- if ($#_ eq -1) {
- die "What do you want to do?";
- }
- my $command = $_[0];
- parsePackages;
- if ($command eq "get") {
- darcsget @_;
- }
- elsif ($command eq "upstreampull") {
- shift;
- darcsupstreampull @_;
- }
- else {
- if ($command =~ /^(?:w|wh|wha|what|whats|whatsn|whatsne|whatsnew)$/) {
- # Hack around whatsnew failing if there are no changes
- $ignore_failure = 1;
- }
- if ($command =~ /^(pul|pus|sen|put|opt)/) {
- $want_remote_repo = 1;
- }
- darcsall @_;
- }
-}
-
-END {
- my $ec = $?;
-
- message "== Checking for old bytestring repo";
- if (-d "libraries/bytestring/_darcs") {
- if ((system "darcs annotate --repodir libraries/bytestring --match 'hash 20080118173113-3fd76-d5b74c04372a297b585ebea4e16d524551ce5035' > /dev/null 2> /dev/null") == 0) {
- print <<EOF;
-============================
-ATTENTION!
-
-You have an old bytestring repository in your GHC tree!
-
-Please remove it (e.g. "rm -r libraries/bytestring"), and the new
-version of bytestring will be used from a tarball instead.
-============================
-EOF
- }
- }
-
- message "== Checking for bytestring tarball";
- if (-d "libraries/bytestring" && not -d "libraries/bytestring/_darcs") {
- print <<EOF;
-============================
-ATTENTION!
-
-You have an old bytestring in your GHC tree!
-
-Please remove it (e.g. "rm -r libraries/bytestring"), and then run
-"./darcs-all get" to get the darcs repository.
-============================
-EOF
- }
-
- message "== Checking for unpulled tarball patches";
- if ((system "darcs annotate --match 'hash 20090930200358-3fd76-cab3bf4a0a9e3902eb6dd41f71712ad3a6a9bcd1' > /dev/null 2> /dev/null") == 0) {
- print <<EOF;
-============================
-ATTENTION!
-
-You have the unpulled tarball patches in your GHC tree!
-
-Please remove them:
- darcs unpull -p "Use mingw tarballs to get mingw on Windows"
-and say yes to each patch.
-============================
-EOF
- }
-
- $? = $ec;
-}
-
-main(@ARGV);
-
--- /dev/null
+
+module BuildInfo where
+
+import Control.Monad.State
+
+type BIMonad = StateT BuildInfo Maybe
+
+data BuildInfo = BuildInfo {
+ biThingVersionMap :: ThingVersionMap,
+ biThingHashMap :: ThingHashMap,
+ biMaybeWays :: Maybe Ways
+ }
+ deriving Show
+
+type ThingMap = [(String, String)]
+-- Mapping from thing (e.g. "Cabal") to version (e.g. "1.10.0.0")
+type ThingVersionMap = ThingMap
+-- Mapping from thing (e.g. "Cabal") to ABI hash
+-- (e.g. "e1f7c380581d61d42b0360d440cc35ed")
+type ThingHashMap = ThingMap
+-- The list of ways in the order the build system uses them, e.g.
+-- ["v", "p", "dyn"] => we have ".depend-v-p-dyn.haskell" files
+type Ways = [String]
+
+emptyBuildInfo :: Maybe Ways -> BuildInfo
+emptyBuildInfo mWays = BuildInfo {
+ biThingVersionMap = [],
+ biThingHashMap = [],
+ biMaybeWays = mWays
+ }
+
+addThingMap :: ThingMap -> String -> String -> Maybe ThingMap
+addThingMap mapping thing str
+ = case lookup thing mapping of
+ Just str' ->
+ if str == str'
+ then Just mapping
+ else Nothing
+ Nothing ->
+ Just ((thing, str) : mapping)
+
+getMaybeWays :: BIMonad (Maybe Ways)
+getMaybeWays = do st <- get
+ return $ biMaybeWays st
+
+haveThingVersion :: String -> String -> BIMonad ()
+haveThingVersion thing thingVersion
+ = do st <- get
+ case addThingMap (biThingVersionMap st) thing thingVersion of
+ Nothing -> fail "Inconsistent version"
+ Just tvm -> put $ st { biThingVersionMap = tvm }
+
+haveThingHash :: String -> String -> BIMonad ()
+haveThingHash thing thingHash
+ = do st <- get
+ case addThingMap (biThingHashMap st) thing thingHash of
+ Nothing -> fail "Inconsistent hash"
+ Just thm -> put $ st { biThingHashMap = thm }
+
--- /dev/null
+
+module Change where
+
+data FileChange = First Change
+ | Second Change
+ | Change Change
+
+data Change = DuplicateFile FilePath
+ | ExtraFile FilePath
+ | ExtraWay String
+ | ExtraThing String
+ | ThingVersionChanged String String String
+ | PermissionsChanged FilePath FilePath String String
+ | FileSizeChanged FilePath FilePath Integer Integer
+
+isSizeChange :: FileChange -> Bool
+isSizeChange (Change (FileSizeChanged {})) = True
+isSizeChange _ = False
+
+pprFileChange :: FileChange -> String
+pprFileChange (First p) = "First " ++ pprChange p
+pprFileChange (Second p) = "Second " ++ pprChange p
+pprFileChange (Change p) = "Change " ++ pprChange p
+
+pprChange :: Change -> String
+pprChange (DuplicateFile fp) = "Duplicate file: " ++ show fp
+pprChange (ExtraFile fp) = "Extra file: " ++ show fp
+pprChange (ExtraWay w) = "Extra way: " ++ show w
+pprChange (ExtraThing t) = "Extra thing: " ++ show t
+pprChange (ThingVersionChanged t v1 v2)
+ = "Version changed for " ++ show t ++ ":\n"
+ ++ " " ++ v1 ++ " -> " ++ v2
+pprChange (PermissionsChanged fp1 fp2 p1 p2)
+ = "Permissions changed:\n"
+ ++ " " ++ show fp1
+ ++ " " ++ show fp2
+ ++ " " ++ p1 ++ " -> " ++ p2
+pprChange (FileSizeChanged fp1 fp2 s1 s2)
+ = "Size changed:\n"
+ ++ " " ++ show fp1 ++ "\n"
+ ++ " " ++ show fp2 ++ "\n"
+ ++ " " ++ show s1 ++ " -> " ++ show s2
+
--- /dev/null
+
+module FilenameDescr where
+
+import Data.Either
+import Data.List
+
+import BuildInfo
+import Utils
+import Tar
+
+-- We can't just compare plain filenames, because versions numbers of GHC
+-- and the libaries will vary. So we use FilenameDescr instead, which
+-- abstracts out the version numbers.
+type FilenameDescr = [FilenameDescrBit]
+data FilenameDescrBit = VersionOf String
+ | HashOf String
+ | FP String
+ | Ways
+ deriving (Show, Eq, Ord)
+
+normalise :: FilenameDescr -> FilenameDescr
+normalise [] = []
+normalise [x] = [x]
+normalise (FP x1 : FP x2 : xs) = normalise (FP (x1 ++ x2) : xs)
+normalise (x : xs) = x : normalise xs
+
+-- Sanity check that the FilenameDescr matches the filename in the tar line
+checkContent :: BuildInfo -> (FilenameDescr, TarLine) -> Errors
+checkContent buildInfo (fd, tl)
+ = let fn = tlFileName tl
+ in case flattenFilenameDescr buildInfo fd of
+ Right fn' ->
+ if fn' == fn
+ then []
+ else ["checkContent: Can't happen: filename mismatch: " ++ show fn]
+ Left errs ->
+ errs
+
+flattenFilenameDescr :: BuildInfo -> FilenameDescr
+ -> Either Errors FilePath
+flattenFilenameDescr buildInfo fd = case partitionEithers (map f fd) of
+ ([], strs) -> Right (concat strs)
+ (errs, _) -> Left (concat errs)
+ where f (FP fp) = Right fp
+ f (VersionOf thing)
+ = case lookup thing (biThingVersionMap buildInfo) of
+ Just v -> Right v
+ Nothing -> Left ["Can't happen: thing has no version in mapping"]
+ f (HashOf thing)
+ = case lookup thing (biThingHashMap buildInfo) of
+ Just v -> Right v
+ Nothing -> Left ["Can't happen: thing has no hash in mapping"]
+ f Ways = case biMaybeWays buildInfo of
+ Just ways -> Right $ intercalate "-" ways
+ Nothing -> Left ["Can't happen: No ways, but Ways is used"]
+
--- /dev/null
+
+GHC = ghc
+
+compare: *.hs
+ "$(GHC)" -O --make -Wall -Werror $@
+
+.PHONY: clean
+clean:
+ rm -f *.o
+ rm -f *.hi
+ rm -f compare compare.exe
+
--- /dev/null
+
+module Tar where
+
+import Data.Either
+import Data.List
+import System.Exit
+import System.Process
+
+import Utils
+
+readTarLines :: FilePath -> IO [TarLine]
+readTarLines fp
+ = do (ec, out, err) <- readProcessWithExitCode "tar" ["-jtvf", fp] ""
+ case (ec, err) of
+ (ExitSuccess, []) ->
+ case parseTarLines fp out of
+ Left errs -> die errs
+ Right tls -> return tls
+ _ ->
+ die ["Failed running tar -jtvf " ++ show fp,
+ "Exit code: " ++ show ec,
+ "Stderr: " ++ show err]
+
+parseTarLines :: FilePath -> String -> Either Errors [TarLine]
+parseTarLines fp xs
+ = case partitionEithers (zipWith (parseTarLine fp) [1..] (lines xs)) of
+ ([], tls) -> Right tls
+ (errss, _) -> Left (intercalate [""] errss)
+
+data TarLine = TarLine {
+ tlPermissions :: String,
+ tlUser :: String,
+ tlGroup :: String,
+ tlSize :: Integer,
+ tlDateTime :: String,
+ tlFileName :: FilePath
+ }
+
+parseTarLine :: FilePath -> Int -> String -> Either Errors TarLine
+parseTarLine fp line str
+ = case re "^([^ ]+) ([^ ]+)/([^ ]+) +([0-9]+) ([^ ]+ [^ ]+) ([^ ]+)$"
+ str of
+ Just [perms, user, grp, sizeStr, dateTime, filename] ->
+ case maybeRead sizeStr of
+ Just size ->
+ Right $ TarLine {
+ tlPermissions = perms,
+ tlUser = user,
+ tlGroup = grp,
+ tlSize = size,
+ tlDateTime = dateTime,
+ tlFileName = filename
+ }
+ _ -> error "Can't happen: Can't parse size"
+ _ ->
+ Left ["In " ++ show fp ++ ", at line " ++ show line,
+ "Tar line doesn't parse: " ++ show str]
+
--- /dev/null
+
+module Utils where
+
+import Data.Function
+import Data.List
+import System.Exit
+import System.IO
+import Text.Regex.Posix
+
+die :: Errors -> IO a
+die errs = do mapM_ (hPutStrLn stderr) errs
+ exitFailure
+
+dieOnErrors :: Either Errors a -> IO a
+dieOnErrors (Left errs) = die errs
+dieOnErrors (Right x) = return x
+
+type Errors = [String]
+
+maybeRead :: Read a => String -> Maybe a
+maybeRead str = case reads str of
+ [(x, "")] -> Just x
+ _ -> Nothing
+
+re :: String -> String -> Maybe [String]
+re r str = case matchM r' str :: Maybe (String, String, String, [String]) of
+ Just (_, _, _, ms) -> Just ms
+ Nothing -> Nothing
+ where r' = makeRegex r :: Regex
+
+unSepList :: Eq a => a -> [a] -> [[a]]
+unSepList x xs = case break (x ==) xs of
+ (this, _ : xs') ->
+ this : unSepList x xs'
+ (this, []) ->
+ [this]
+
+sortByFst :: Ord a => [(a, b)] -> [(a, b)]
+sortByFst = sortBy (compare `on` fst)
+
--- /dev/null
+{-# LANGUAGE PatternGuards #-}
+
+module Main (main) where
+
+import Control.Monad.State
+import Data.List
+import System.Environment
+
+import BuildInfo
+import FilenameDescr
+import Change
+import Utils
+import Tar
+
+-- TODO:
+-- * Check installed trees too
+-- * Check hashbangs
+
+-- Only size changes > sizeAbs are considered an issue
+sizeAbs :: Integer
+sizeAbs = 1000
+
+-- Only a size change of sizePercentage% or more is considered an issue
+sizePercentage :: Integer
+sizePercentage = 150
+
+main :: IO ()
+main = do args <- getArgs
+ case args of
+ [bd1, bd2] -> doit False bd1 bd2
+ ["--ignore-size-changes", bd1, bd2] -> doit True bd1 bd2
+ _ -> die ["Bad args. Need 2 bindists."]
+
+doit :: Bool -> FilePath -> FilePath -> IO ()
+doit ignoreSizeChanges bd1 bd2
+ = do tls1 <- readTarLines bd1
+ tls2 <- readTarLines bd2
+ let mWays1 = findWays tls1
+ mWays2 = findWays tls2
+ wayDifferences <- case (mWays1, mWays2) of
+ (Nothing, Nothing) ->
+ return []
+ (Just ways1, Just ways2) ->
+ return $ diffWays ways1 ways2
+ _ ->
+ die ["One input has ways, but the other doesn't"]
+ (content1, tvm1) <- dieOnErrors $ mkContents mWays1 tls1
+ (content2, tvm2) <- dieOnErrors $ mkContents mWays2 tls2
+ let sortedContent1 = sortByFst content1
+ sortedContent2 = sortByFst content2
+ (nubProbs1, nubbedContent1) = nubContents sortedContent1
+ (nubProbs2, nubbedContent2) = nubContents sortedContent2
+ differences = compareContent mWays1 nubbedContent1
+ mWays2 nubbedContent2
+ allProbs = map First nubProbs1 ++ map Second nubProbs2
+ ++ diffThingVersionMap tvm1 tvm2
+ ++ wayDifferences
+ ++ differences
+ wantedProbs = if ignoreSizeChanges
+ then filter (not . isSizeChange) allProbs
+ else allProbs
+ mapM_ (putStrLn . pprFileChange) wantedProbs
+
+-- *nix bindists have ways.
+-- Windows "bindists", install trees, and testsuites don't.
+findWays :: [TarLine] -> Maybe Ways
+findWays tls = msum $ map f tls
+ where f tl = case re regex (tlFileName tl) of
+ Just [dashedWays] -> Just (unSepList '-' dashedWays)
+ _ -> Nothing
+ regex = "/libraries/base/dist-install/build/\\.depend-(.*)\\.haskell"
+
+diffWays :: Ways -> Ways -> [FileChange]
+diffWays ws1 ws2 = f (sort ws1) (sort ws2)
+ where f [] [] = []
+ f xs [] = map (First . ExtraWay) xs
+ f [] ys = map (Second . ExtraWay) ys
+ f xs@(x : xs') ys@(y : ys')
+ = case x `compare` y of
+ LT -> First (ExtraWay x) : f xs' ys
+ GT -> Second (ExtraWay y) : f xs ys'
+ EQ -> f xs' ys'
+
+diffThingVersionMap :: ThingVersionMap -> ThingVersionMap -> [FileChange]
+diffThingVersionMap tvm1 tvm2 = f (sortByFst tvm1) (sortByFst tvm2)
+ where f [] [] = []
+ f xs [] = map (First . ExtraThing . fst) xs
+ f [] ys = map (Second . ExtraThing . fst) ys
+ f xs@((xt, xv) : xs') ys@((yt, yv) : ys')
+ = case xt `compare` yt of
+ LT -> First (ExtraThing xt) : f xs' ys
+ GT -> Second (ExtraThing yt) : f xs ys'
+ EQ -> let this = if xv == yv
+ then []
+ else [Change (ThingVersionChanged xt xv yv)]
+ in this ++ f xs' ys'
+
+mkContents :: Maybe Ways -> [TarLine]
+ -> Either Errors ([(FilenameDescr, TarLine)], ThingVersionMap)
+mkContents mWays tls
+ = case runStateT (mapM f tls) (emptyBuildInfo mWays) of
+ Nothing -> Left ["Can't happen: mkContents: Nothing"]
+ Just (xs, finalBuildInfo) ->
+ case concat $ map (checkContent finalBuildInfo) xs of
+ [] -> Right (xs, biThingVersionMap finalBuildInfo)
+ errs -> Left errs
+ where f tl = do fnd <- mkFilePathDescr (tlFileName tl)
+ return (fnd, tl)
+
+nubContents :: [(FilenameDescr, TarLine)]
+ -> ([Change], [(FilenameDescr, TarLine)])
+nubContents [] = ([], [])
+nubContents [x] = ([], [x])
+nubContents (x1@(fd1, tl1) : xs@((fd2, _) : _))
+ | fd1 == fd2 = (DuplicateFile (tlFileName tl1) : ps, xs')
+ | otherwise = (ps, x1 : xs')
+ where (ps, xs') = nubContents xs
+
+mkFilePathDescr :: FilePath -> BIMonad FilenameDescr
+mkFilePathDescr fp
+ | Just [ghcVersion, _, middle, filename]
+ <- re ("^ghc-" ++ versionRE ++ "(/.*)?/([^/]*)$") fp
+ = do haveThingVersion "ghc" ghcVersion
+ middle' <- mkMiddleDescr middle
+ filename' <- mkFileNameDescr filename
+ let fd = FP "ghc-" : VersionOf "ghc" : middle' ++ FP "/" : filename'
+ return $ normalise fd
+ | otherwise = return [FP fp]
+
+mkMiddleDescr :: FilePath -> BIMonad FilenameDescr
+mkMiddleDescr middle
+ -- haddock docs in a Windows installed tree
+ | Just [thing, thingVersion, _, src]
+ <- re ("^/doc/html/libraries/([^/]*)-" ++ versionRE ++ "(/src)?$")
+ middle
+ = do haveThingVersion thing thingVersion
+ return [FP "/doc/html/libraries/",
+ FP thing, FP "-", VersionOf thing, FP src]
+ `mplus` unchanged
+ -- libraries in a Windows installed tree
+ | Just [thing, thingVersion, _, rest]
+ <- re ("^/lib/([^/]*)-" ++ versionRE ++ "(/.*)?$")
+ middle
+ = do haveThingVersion thing thingVersion
+ return [FP "/lib/", FP thing, FP "-", VersionOf thing, FP rest]
+ `mplus` unchanged
+ -- Windows in-tree gcc
+ | Just [prefix, _, _, gccVersion, _, rest]
+ <- re ("^(/mingw/(lib(exec)?/gcc/mingw32/|share/gcc-))" ++ versionRE ++ "(/.*)?$")
+ middle
+ = do haveThingVersion "gcc" gccVersion
+ return [FP prefix, VersionOf "gcc", FP rest]
+ `mplus` unchanged
+ | otherwise = unchanged
+ where unchanged = return [FP middle]
+
+mkFileNameDescr :: FilePath -> BIMonad FilenameDescr
+mkFileNameDescr filename
+ | Just [prog, ghcVersion, _, exe]
+ <- re ("^(ghc|ghci|ghcii|haddock)-" ++ versionRE ++ "(\\.exe|\\.sh|)$")
+ filename
+ = do haveThingVersion "ghc" ghcVersion
+ return [FP prog, FP "-", VersionOf "ghc", FP exe]
+ `mplus` unchanged
+ | Just [thing, thingVersion, _, ghcVersion, _, soDll]
+ <- re ("^libHS(.*)-" ++ versionRE ++ "-ghc" ++ versionRE ++ "\\.(so|dll|dylib)$")
+ filename
+ = do haveThingVersion "ghc" ghcVersion
+ haveThingVersion thing thingVersion
+ return [FP "libHS", FP thing, FP "-", VersionOf thing,
+ FP "-ghc", VersionOf "ghc", FP ".", FP soDll]
+ `mplus` unchanged
+ | Just [way, thingVersion, _, soDll]
+ <- re ("^libHSrts(_.*)?-ghc" ++ versionRE ++ "\\.(so|dll|dylib)$")
+ filename
+ = do haveThingVersion "ghc" thingVersion
+ return [FP "libHSrts", FP way, FP "-ghc", VersionOf "ghc",
+ FP ".", FP soDll]
+ `mplus` unchanged
+ | Just [thingVersion, _, soDll]
+ <- re ("^libHSffi-ghc" ++ versionRE ++ "\\.(so|dll|dylib)$")
+ filename
+ = do haveThingVersion "ghc" thingVersion
+ return [FP "libHSffi-ghc", VersionOf "ghc", FP ".", FP soDll]
+ `mplus` unchanged
+ | Just [thing, thingVersion, _, way]
+ <- re ("^libHS(.*)-" ++ versionRE ++ "(_.*)?\\.a$")
+ filename
+ = do haveThingVersion thing thingVersion
+ return [FP "libHS", FP thing, FP "-", VersionOf thing,
+ FP way, FP ".a"]
+ `mplus` unchanged
+ | Just [thing, thingVersion, _]
+ <- re ("^HS(.*)-" ++ versionRE ++ "\\.o$")
+ filename
+ = do haveThingVersion thing thingVersion
+ return [FP "HS", FP thing, FP "-", VersionOf thing, FP ".o"]
+ `mplus` unchanged
+ | Just [thing, thingVersion, _, thingHash]
+ <- re ("^(.*)-" ++ versionRE ++ "-([0-9a-f]{32})\\.conf$")
+ filename
+ = do haveThingVersion thing thingVersion
+ haveThingHash thing thingHash
+ return [FP thing, FP "-", VersionOf thing, FP "-", HashOf thing,
+ FP ".conf"]
+ `mplus` unchanged
+ | Just [thingVersion, _]
+ <- re ("^mingw32-gcc-" ++ versionRE ++ "\\.exe$")
+ filename
+ = do haveThingVersion "gcc" thingVersion
+ return [FP "mingw32-gcc-", VersionOf "gcc", FP ".exe"]
+ `mplus` unchanged
+ | Just [dashedWays, depType]
+ <- re "^\\.depend-(.*)\\.(haskell|c_asm)"
+ filename
+ = do mWays <- getMaybeWays
+ if Just (unSepList '-' dashedWays) == mWays
+ then return [FP ".depend-", Ways, FP ".", FP depType]
+ else unchanged
+ | otherwise = unchanged
+ where unchanged = return [FP filename]
+
+compareContent :: Maybe Ways -> [(FilenameDescr, TarLine)]
+ -> Maybe Ways -> [(FilenameDescr, TarLine)]
+ -> [FileChange]
+compareContent mWays1 xs1all mWays2 xs2all
+ = f xs1all xs2all
+ where f [] [] = []
+ f xs [] = concatMap (mkExtraFile mWays1 mWays2 First . tlFileName . snd) xs
+ f [] ys = concatMap (mkExtraFile mWays2 mWays1 Second . tlFileName . snd) ys
+ f xs1@((fd1, tl1) : xs1') xs2@((fd2, tl2) : xs2')
+ = case fd1 `compare` fd2 of
+ EQ -> map Change (compareTarLine tl1 tl2)
+ ++ f xs1' xs2'
+ LT -> mkExtraFile mWays1 mWays2 First (tlFileName tl1)
+ ++ f xs1' xs2
+ GT -> mkExtraFile mWays2 mWays1 Second (tlFileName tl2)
+ ++ f xs1 xs2'
+ mkExtraFile mWaysMe mWaysThem mkFileChange filename
+ = case (findFileWay filename, mWaysMe, mWaysThem) of
+ (Just way, Just waysMe, Just waysThem)
+ | (way `elem` waysMe) && not (way `elem` waysThem) -> []
+ _ -> [mkFileChange (ExtraFile filename)]
+
+findFileWay :: FilePath -> Maybe String
+findFileWay fp
+ | Just [way] <- re "\\.([a-z_]+)_hi$" fp
+ = Just way
+ | Just [_, _, way] <- re ("libHS.*-" ++ versionRE ++ "_([a-z_]+).a$") fp
+ = Just way
+ | otherwise = Nothing
+
+compareTarLine :: TarLine -> TarLine -> [Change]
+compareTarLine tl1 tl2
+ = [ PermissionsChanged fn1 fn2 perms1 perms2 | perms1 /= perms2 ]
+ ++ [ FileSizeChanged fn1 fn2 size1 size2 | sizeChanged ]
+ where fn1 = tlFileName tl1
+ fn2 = tlFileName tl2
+ perms1 = tlPermissions tl1
+ perms2 = tlPermissions tl2
+ size1 = tlSize tl1
+ size2 = tlSize tl2
+ sizeChanged = abs (size1 - size2) > sizeAbs
+ && (((100 * size1) `div` size2) > sizePercentage ||
+ ((100 * size2) `div` size1) > sizePercentage)
+
+versionRE :: String
+versionRE = "([0-9]+(\\.[0-9]+)*)"
+
--- /dev/null
+#!/bin/sh
+
+set -e
+
+die () {
+ echo "$1" >&2
+ exit 1
+}
+
+[ "$#" -eq 2 ] || die "Bad args. Usage: $0 <Linux bindist> <Windows bindist>"
+
+LINUX_BINDIST=`realpath "$1"`
+WINDOWS_BINDIST=`realpath "$2"`
+
+mkdir docs
+cd docs
+tar -jxf "$LINUX_BINDIST"
+mv ghc* linux
+tar -jxf "$WINDOWS_BINDIST"
+mv ghc* windows
+cd linux
+./configure --prefix=`pwd`/inst
+make install
+cd inst/share/doc/ghc/html/libraries
+mv ../../../../../../../windows/doc/html/libraries/Win32-* .
+sh gen_contents_index
+cd ..
+for i in Cabal haddock libraries users_guide
+do
+ tar -jcf ../../../../../../$i.html.tar.bz2 $i
+done
+cd ..
+mv *.pdf *.ps ../../../../..
+
<varlistentry>
<term>
+ <option>-ddump-core-stats</option>
+ <indexterm><primary><option>-ddump-core-stats</option></primary></indexterm>
+ </term>
+ <listitem>
+ <para>Print a one-line summary of the size of the Core program
+ at the end of the optimisation pipeline.</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
<option>-dfaststring-stats</option>
<indexterm><primary><option>-dfaststring-stats</option></primary></indexterm>
</term>
#include "foo_stub.h"
#endif
-#ifdef __GLASGOW_HASKELL__
-extern void __stginit_Foo ( void );
-#endif
-
int main(int argc, char *argv[])
{
int i;
hs_init(&argc, &argv);
-#ifdef __GLASGOW_HASKELL__
- hs_add_root(__stginit_Foo);
-#endif
for (i = 0; i < 5; i++) {
printf("%d\n", foo(2500));
(i.e. those arguments between
<literal>+RTS...-RTS</literal>).</para>
- <para>Next, we call
- <function>hs_add_root</function><indexterm><primary><function>hs_add_root</function></primary>
- </indexterm>, a GHC-specific interface which is required to
- initialise the Haskell modules in the program. The argument
- to <function>hs_add_root</function> should be the name of the
- initialization function for the "root" module in your program
- - in other words, the module which directly or indirectly
- imports all the other Haskell modules in the program. In a
- standalone Haskell program the root module is normally
- <literal>Main</literal>, but when you are using Haskell code
- from a library it may not be. If your program has multiple
- root modules, then you can call
- <function>hs_add_root</function> multiple times, one for each
- root. The name of the initialization function for module
- <replaceable>M</replaceable> is
- <literal>__stginit_<replaceable>M</replaceable></literal>, and
- it may be declared as an external function symbol as in the
- code above. Note that the symbol name should be transformed
- according to the Z-encoding:</para>
-
<informaltable>
<tgroup cols="2" align="left" colsep="1" rowsep="1">
<thead>
// Initialize Haskell runtime
hs_init(&argc, &argv);
- // Tell Haskell about all root modules
- hs_add_root(__stginit_Foo);
-
// do any other initialization here and
// return false if there was a problem
return HS_BOOL_TRUE;
</programlisting>
<para>The initialisation routine, <literal>mylib_init</literal>, calls
- <literal>hs_init()</literal> and <literal>hs_add_root()</literal> as
+ <literal>hs_init()</literal> as
normal to initialise the Haskell runtime, and the corresponding
deinitialisation function <literal>mylib_end()</literal> calls
<literal>hs_exit()</literal> to shut down the runtime.</para>
invoke <literal>foreign export</literal>ed functions from
multiple OS threads concurrently. The runtime system must
be initialised as usual by
- calling <literal>hs_init()</literal>
- and <literal>hs_add_root</literal>, and these calls must
+ calling <literal>hs_init()</literal>, and this call must
complete before invoking any <literal>foreign
export</literal>ed functions.</para>
</sect3>
<entry>-</entry>
</row>
<row>
- <entry><option>-keep-raw-s-file</option> or
- <option>-keep-raw-s-files</option></entry>
- <entry>retain intermediate <literal>.raw_s</literal> files</entry>
- <entry>dynamic</entry>
- <entry>-</entry>
- </row>
- <row>
<entry><option>-keep-tmp-files</option></entry>
<entry>retain all intermediate temporary files</entry>
<entry>dynamic</entry>
<row>
<entry><option>-package-name</option> <replaceable>P</replaceable></entry>
<entry>Compile to be part of package <replaceable>P</replaceable></entry>
- <entry>dynamic</entry>
+ <entry>static</entry>
<entry>-</entry>
</row>
<row>
</row>
<row>
+ <entry><option>-fno-opt-coercion</option></entry>
+ <entry>Turn off the coercion optimiser</entry>
+ <entry>static</entry>
+ <entry>-</entry>
+ </row>
+
+ <row>
<entry><option>-feager-blackholing</option></entry>
<entry>Turn on <link linkend="parallel-compile-options">eager blackholing</link></entry>
<entry>dynamic</entry>
<entry><option>-fasm</option></entry>
<entry>Use the native code generator</entry>
<entry>dynamic</entry>
- <entry>-fvia-C</entry>
- </row>
- <row>
- <entry><option>-fvia-C</option></entry>
- <entry>Compile via C</entry>
- <entry>dynamic</entry>
- <entry>-fasm</entry>
+ <entry>-fllvm</entry>
</row>
<row>
<entry><option>-fllvm</option></entry>
</row>
</row>
<row>
- <entry><option>-pgmm</option> <replaceable>cmd</replaceable></entry>
- <entry>Use <replaceable>cmd</replaceable> as the mangler</entry>
- <entry>dynamic</entry>
- <entry>-</entry>
- </row>
- <row>
<entry><option>-pgms</option> <replaceable>cmd</replaceable></entry>
<entry>Use <replaceable>cmd</replaceable> as the splitter</entry>
<entry>dynamic</entry>
<entry>-</entry>
</row>
<row>
+ <entry><option>-ddump-core-stats</option></entry>
+ <entry>Print a one-line summary of the size of the Core program
+ at the end of the optimisation pipeline </entry>
+ <entry>dynamic</entry>
+ <entry>-</entry>
+ </row>
+ <row>
<entry><option>-ddump-cpranal</option></entry>
<entry>Dump output from CPR analysis</entry>
<entry>dynamic</entry>
<entry>-</entry>
</row>
<row>
- <entry><option>-fno-asm-mangling</option></entry>
- <entry>Turn off assembly mangling (use <option>-unreg</option> instead)</entry>
- <entry>dynamic</entry>
- <entry>-</entry>
- </row>
- <row>
<entry><option>-fno-ghci-sandbox</option></entry>
<entry>Turn off the GHCi sandbox. Means computations are run in teh main thread, rather than a forked thread.</entry>
<entry>dynamic</entry>
<sect2 id="impredicative-polymorphism">
<title>Impredicative polymorphism
</title>
-<para><emphasis>NOTE: the impredicative-polymorphism feature is deprecated in GHC 6.12, and
-will be removed or replaced in GHC 6.14.</emphasis></para>
-
<para>GHC supports <emphasis>impredicative polymorphism</emphasis>,
enabled with <option>-XImpredicativeTypes</option>.
This means
g (x:xs) = xs ++ [ x :: a ]
</programlisting>
This program will be rejected, because "<literal>a</literal>" does not scope
-over the definition of "<literal>f</literal>", so "<literal>x::a</literal>"
+over the definition of "<literal>g</literal>", so "<literal>x::a</literal>"
means "<literal>x::forall a. a</literal>" by Haskell's usual implicit
quantification rules.
</para></listitem>
<programlisting>
f = runST ( (op >>= \(x :: STRef s Int) -> g x) :: forall s. ST s Bool )
</programlisting>
-Here, the type signature <literal>forall a. ST s Bool</literal> brings the
+Here, the type signature <literal>forall s. ST s Bool</literal> brings the
type variable <literal>s</literal> into scope, in the annotated expression
<literal>(op >>= \(x :: STRef s Int) -> g x)</literal>.
</para>
<programlisting>
/usr/bin/ld: Undefined symbols:
_ZCMain_main_closure
-___stginit_ZCMain
</programlisting>
</para>
<varlistentry>
<term>
- <option>-pgmm</option> <replaceable>cmd</replaceable>
- <indexterm><primary><option>-pgmm</option></primary></indexterm>
- </term>
- <listitem>
- <para>Use <replaceable>cmd</replaceable> as the
- mangler.</para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>
<option>-pgms</option> <replaceable>cmd</replaceable>
<indexterm><primary><option>-pgms</option></primary></indexterm>
</term>
</term>
<listitem>
<para>Use GHC's native code generator rather than
- compiling via C. This will compile faster (up to twice as
- fast), but may produce code that is slightly slower than
- compiling via C. <option>-fasm</option> is the default.</para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>
- <option>-fvia-C</option>
- <indexterm><primary><option>-fvia-C</option></primary></indexterm>
- </term>
- <listitem>
- <para>Compile via C instead of using the native code
- generator. This is the default on architectures for which GHC
- doesn't have a native code generator.</para>
+ compiling via LLVM.
+ <option>-fasm</option> is the default.</para>
</listitem>
</varlistentry>
<listitem>
<para>Compile via LLVM instead of using the native code
generator. This will generally take slightly longer than the
- native code generator to compile but quicker than compiling
- via C. Produced code is generally the same speed or faster
+ native code generator to compile.
+ Produced code is generally the same speed or faster
than the other two code generators. Compiling via LLVM
requires LLVM version 2.7 or later to be on the path.</para>
</listitem>
<?xml version="1.0" encoding="iso-8859-1"?>
-<section id="runtime-control">
+<sect1 id="runtime-control">
<title>Running a compiled program</title>
<indexterm><primary>runtime control of Haskell programs</primary></indexterm>
options themselves.
</para>
- <section id="setting-rts-options">
+ <sect2 id="setting-rts-options">
<title>Setting RTS options</title>
<indexterm><primary>RTS options, setting</primary></indexterm>
</itemizedlist>
</para>
- <section id="rts-opts-cmdline">
+ <sect3 id="rts-opts-cmdline">
<title>Setting RTS options on the command line</title>
<para>
<literal>+RTS -M128m -RTS</literal>
to the command line.
</para>
- </section>
+ </sect3>
- <section id="rts-opts-compile-time">
+ <sect3 id="rts-opts-compile-time">
<title>Setting RTS options at compile time</title>
<para>
set <literal>-H128m -K64m</literal>, link
with <literal>-with-rtsopts="-H128m -K64m"</literal>.
</para>
- </section>
+ </sect3>
- <section id="rts-options-environment">
+ <sect3 id="rts-options-environment">
<title>Setting RTS options with the <envar>GHCRTS</envar>
environment variable</title>
a crawl until the OS decides to kill the process (and you
hope it kills the right one).
</para>
- </section>
+ </sect3>
- <section id="rts-hooks">
+ <sect3 id="rts-hooks">
<title>“Hooks” to change RTS behaviour</title>
<indexterm><primary>hooks</primary><secondary>RTS</secondary></indexterm>
versions in the file
<filename>ghc/compiler/parser/hschooks.c</filename> in a GHC
source tree.</para>
- </section>
+ </sect3>
- </section>
+ </sect2>
- <section id="rts-options-misc">
+ <sect2 id="rts-options-misc">
<title>Miscellaneous RTS options</title>
<variablelist>
</listitem>
</varlistentry>
</variablelist>
- </section>
+ </sect2>
- <section id="rts-options-gc">
+ <sect2 id="rts-options-gc">
<title>RTS options to control the garbage collector</title>
<indexterm><primary>garbage collector</primary><secondary>options</secondary></indexterm>
</varlistentry>
</variablelist>
- </section>
+ </sect2>
- <section>
+ <sect2>
<title>RTS options for concurrency and parallelism</title>
<para>The RTS options related to concurrency are described in
<xref linkend="using-concurrent" />, and those for parallelism in
<xref linkend="parallel-options"/>.</para>
- </section>
+ </sect2>
- <section id="rts-profiling">
+ <sect2 id="rts-profiling">
<title>RTS options for profiling</title>
<para>Most profiling runtime options are only available when you
</listitem>
</varlistentry>
</variablelist>
- </section>
+ </sect2>
- <section id="rts-eventlog">
+ <sect2 id="rts-eventlog">
<title>Tracing</title>
<indexterm><primary>tracing</primary></indexterm>
the binary eventlog file by using the <option>-l</option>
option.
</para>
- </section>
+ </sect2>
- <section id="rts-options-debugging">
+ <sect2 id="rts-options-debugging">
<title>RTS options for hackers, debuggers, and over-interested
souls</title>
</varlistentry>
</variablelist>
- </section>
+ </sect2>
- <section>
+ <sect2>
<title>Getting information about the RTS</title>
<indexterm><primary>RTS</primary></indexterm>
</variablelist>
- </section>
-</section>
+ </sect2>
+</sect1>
<!-- Emacs stuff:
;;; Local Variables: ***
<para>Keep intermediate <literal>.hc</literal> files when
doing <literal>.hs</literal>-to-<literal>.o</literal>
compilations via C (NOTE: <literal>.hc</literal> files
- aren't generated when using the native code generator, you
- may need to use <option>-fvia-C</option> to force them
- to be produced).</para>
+ are only generated by unregisterised compilers).</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
- <option>-keep-raw-s-file</option>,
- <option>-keep-raw-s-files</option>
- <indexterm><primary><option>-keep-raw-s-file</option></primary></indexterm>
- <indexterm><primary><option>-keep-raw-s-files</option></primary></indexterm>
- </term>
- <listitem>
- <para>Keep intermediate <literal>.raw-s</literal> files.
- These are the direct output from the C compiler, before
- GHC does “assembly mangling” to produce the
- <literal>.s</literal> file. Again, these are not produced
- when using the native code generator.</para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>
<option>-keep-tmp-files</option>
<indexterm><primary><option>-keep-tmp-files</option></primary></indexterm>
<indexterm><primary>temporary files</primary><secondary>keeping</secondary></indexterm>
</varlistentry>
<varlistentry>
- <term>Compile via C and crank up GCC:</term>
- <listitem>
- <para>The native code-generator is designed to be quick, not
- mind-bogglingly clever. Better to let GCC have a go, as it
- tries much harder on register allocation, etc.</para>
-
- <para>So, when we want very fast code, we use: <option>-O
- -fvia-C</option>.</para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
<term>Overloaded functions are not your friend:</term>
<listitem>
<para>Haskell's overloading (using type classes) is elegant,
</varlistentry>
<varlistentry>
+ <term>
+ <option>-fwarn-missing-import-lists</option>:
+ <indexterm><primary><option>-fwarn-import-lists</option></primary></indexterm>
+ <indexterm><primary>missing import lists, warning</primary></indexterm>
+ <indexterm><primary>import lists, missing</primary></indexterm>
+ </term>
+ <listitem>
+
+ <para>This flag warns if you use an unqualified
+ <literal>import</literal> declaration
+ that does not explicitly list the entities brought into scope. For
+ example
+ </para>
+<programlisting>
+module M where
+ import X( f )
+ import Y
+ import qualified Z
+ p x = f x x
+</programlisting>
+ <para>
+ The <option>-fwarn-import-lists</option> flag will warn about the import
+ of <literal>Y</literal> but not <literal>X</literal>
+ If module <literal>Y</literal> is later changed to export (say) <literal>f</literal>,
+ then the reference to <literal>f</literal> in <literal>M</literal> will become
+ ambiguous. No warning is produced for the import of <literal>Z</literal>
+ because extending <literal>Z</literal>'s exports would be unlikely to produce
+ ambiguity in <literal>M</literal>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><option>-fwarn-missing-methods</option>:</term>
<listitem>
<indexterm><primary><option>-fwarn-missing-methods</option></primary></indexterm>
<para>We don't use a <option>-O*</option> flag for day-to-day
work. We use <option>-O</option> to get respectable speed;
e.g., when we want to measure something. When we want to go for
- broke, we tend to use <option>-O2 -fvia-C</option> (and we go for
+ broke, we tend to use <option>-O2</option> (and we go for
lots of coffee breaks).</para>
<para>The easiest way to see what <option>-O</option> (etc.)
</listitem>
</varlistentry>
- <varlistentry>
- <term><option>-monly-[32]-regs</option>:</term>
- <listitem>
- <para>(x86 only)<indexterm><primary>-monly-N-regs
- option (iX86 only)</primary></indexterm> GHC tries to
- “steal” four registers from GCC, for performance
- reasons; it almost always works. However, when GCC is
- compiling some modules with four stolen registers, it will
- crash, probably saying:
-
-<screen>
-Foo.hc:533: fixed or forbidden register was spilled.
-This may be due to a compiler bug or to impossible asm
-statements or clauses.
-</screen>
-
- Just give some registers back with
- <option>-monly-N-regs</option>. Try `3' first, then `2'.
- If `2' doesn't work, please report the bug to us.</para>
- </listitem>
- </varlistentry>
</variablelist>
</sect1>
</varlistentry>
<varlistentry>
+ <term><literal>-k</literal> or
+ <literal>––keep-files</literal></term>
+ <listitem>
+ <para>Proceed as normal, but do not delete any intermediate files.</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><literal>-x</literal> or
+ <literal>––cross-compile</literal></term>
+ <listitem>
+ <para>Activate cross-compilation mode (see <xref linkend="hsc2hs_cross"/>).</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><literal>––cross-safe</literal></term>
+ <listitem>
+ <para>Restrict the .hsc directives to those supported by the
+ <literal>--cross-compile</literal> mode (see <xref linkend="hsc2hs_cross"/>).
+ This should be useful if your <literal>.hsc</literal> files
+ must be safely cross-compiled and you wish to keep
+ non-cross-compilable constructs from creeping into them.</para>
+ </listitem>
+ </varlistentry>
+
+
+ <varlistentry>
<term><literal>-?</literal> or <literal>––help</literal></term>
<listitem>
<para>Display a summary of the available flags and exit successfully.</para>
</sect2>
+ <sect2 id="hsc2hs_cross">
+ <title>Cross-compilation</title>
+
+ <para><command>hsc2hs</command> normally operates by creating, compiling,
+ and running a C program. That approach doesn't work when cross-compiling --
+ in this case, the C compiler's generates code for the target machine,
+ not the host machine. For this situation, there's
+ a special mode <command>hsc2hs --cross-compile</command> which can generate
+ the .hs by extracting information from compilations only -- specifically,
+ whether or not compilation fails.
+ </para>
+
+ <para>Only a subset of <literal>.hsc</literal> syntax is supported by
+ <literal>--cross-compile</literal>. The following are unsupported:
+ <itemizedlist>
+ <listitem><literal>#{const_str}</literal></listitem>
+ <listitem><literal>#{let}</literal></listitem>
+ <listitem><literal>#{def}</literal></listitem>
+ <listitem>Custom constructs</listitem>
+ </itemizedlist>
+ </para>
+ </sect2>
+
</sect1>
</chapter>
// StartEnd.c
#include <Rts.h>
-extern void __stginit_Adder(void);
-
void HsStart()
{
int argc = 1;
// Initialize Haskell runtime
char** args = argv;
hs_init(&argc, &args);
-
- // Tell Haskell about all root modules
- hs_add_root(__stginit_Adder);
}
void HsEnd()
+++ /dev/null
-# -----------------------------------------------------------------------------
-#
-# (c) 2009 The University of Glasgow
-#
-# This file is part of the GHC build system.
-#
-# To understand how the build system works and how to modify it, see
-# http://hackage.haskell.org/trac/ghc/wiki/Building/Architecture
-# http://hackage.haskell.org/trac/ghc/wiki/Building/Modifying
-#
-# -----------------------------------------------------------------------------
-
-dir = driver/mangler
-TOP = ../..
-include $(TOP)/mk/sub-makefile.mk
+++ /dev/null
-%************************************************************************
-%* *
-\section[Driver-asm-fiddling]{Fiddling with assembler files}
-%* *
-%************************************************************************
-
-Tasks:
-\begin{itemize}
-\item
-Utterly stomp out C functions' prologues and epilogues; i.e., the
-stuff to do with the C stack.
-\item
-Any other required tidying up.
-\end{itemize}
-
-General note [chak]: Many regexps are very fragile because they rely on white
-space being in the right place. This caused trouble with gcc 2.95 (at least
-on Linux), where the use of white space in .s files generated by gcc suddenly
-changed. To guarantee compatibility across different versions of gcc, make
-sure (at least on i386-.*-linux) that regexps tolerate varying amounts of white
-space between an assembler statement and its arguments as well as after a the
-comma separating multiple arguments.
-
-\emph{For the time being, I have corrected the regexps for i386-.*-linux. I
-didn't touch all the regexps for other i386 platforms, as I don't have
-a box to test these changes.}
-
-HPPA specific notes:
-\begin{itemize}
-\item
-The HP linker is very picky about symbols being in the appropriate
-space (code vs. data). When we mangle the threaded code to put the
-info tables just prior to the code, they wind up in code space
-rather than data space. This means that references to *_info from
-un-mangled parts of the RTS (e.g. unthreaded GC code) get
-unresolved symbols. Solution: mini-mangler for .c files on HP. I
-think this should really be triggered in the driver by a new -rts
-option, so that user code doesn't get mangled inappropriately.
-\item
-With reversed tables, jumps are to the _info label rather than to
-the _entry label. The _info label is just an address in code
-space, rather than an entry point with the descriptive blob we
-talked about yesterday. As a result, you can't use the call-style
-JMP_ macro. However, some JMP_ macros take _info labels as targets
-and some take code entry points within the RTS. The latter won't
-work with the goto-style JMP_ macro. Sigh. Solution: Use the goto
-style JMP_ macro, and mangle some more assembly, changing all
-"RP'literal" and "LP'literal" references to "R'literal" and
-"L'literal," so that you get the real address of the code, rather
-than the descriptive blob. Also change all ".word P%literal"
-entries in info tables and vector tables to just ".word literal,"
-for the same reason. Advantage: No more ridiculous call sequences.
-\end{itemize}
-
-%************************************************************************
-%* *
-\subsection{Top-level code}
-%* *
-%************************************************************************
-
-\begin{code}
-$TargetPlatform = $TARGETPLATFORM;
-
-($Pgm = $0) =~ s|.*/||m;
-$ifile = $ARGV[0];
-$ofile = $ARGV[1];
-
-if ( $TargetPlatform =~ /^i386-/m ) {
- if ($ARGV[2] eq '') {
- $StolenX86Regs = 4;
- } else {
- $StolenX86Regs = $ARGV[2];
- }
-}
-
-&mangle_asm($ifile,$ofile);
-
-exit(0);
-\end{code}
-
-%************************************************************************
-%* *
-\subsection{Constants for various architectures}
-%* *
-%************************************************************************
-
-\begin{code}
-sub init_TARGET_STUFF {
-
- #--------------------------------------------------------#
- if ( $TargetPlatform =~ /^alpha-.*-.*/m ) {
-
- $T_STABBY = 0; # 1 iff .stab things (usually if a.out format)
- $T_US = ''; # _ if symbols have an underscore on the front
- $T_PRE_APP = 'DONT THINK THIS APPLIES'; # regexp that says what comes before APP/NO_APP
- $T_CONST_LBL = '^\$L?C(\d+):$'; # regexp for what such a lbl looks like
- $T_POST_LBL = ':';
-
- $T_MOVE_DIRVS = '^(\s*(\$.*\.\.ng:|\.align\s+\d+|\.(globl|ent)\s+\S+|\#.*|\.(file|loc)\s+\S+\s+\S+|\.text|\.r?data)\n)';
- $T_COPY_DIRVS = '^\s*(\$.*\.\.ng:|\#|\.(file|globl|ent|loc))';
-
- $T_DOT_WORD = '\.(long|quad|byte|word)';
- $T_DOT_GLOBAL = '^\t\.globl';
- $T_HDR_literal = "\.rdata\n\t\.align 3\n";
- $T_HDR_misc = "\.text\n\t\.align 3\n";
- $T_HDR_data = "\.data\n\t\.align 3\n";
- $T_HDR_rodata = "\.rdata\n\t\.align 3\n";
- $T_HDR_closure = "\.data\n\t\.align 3\n";
- $T_HDR_info = "\.text\n\t\.align 3\n";
- $T_HDR_entry = "\.text\n\t\.align 3\n";
- $T_HDR_vector = "\.text\n\t\.align 3\n";
-
- #--------------------------------------------------------#
- } elsif ( $TargetPlatform =~ /^hppa/m ) {
-
- $T_STABBY = 0; # 1 iff .stab things (usually if a.out format)
- $T_US = ''; # _ if symbols have an underscore on the front
- $T_PRE_APP = 'DONT THINK THIS APPLIES'; # regexp that says what comes before APP/NO_APP
- $T_CONST_LBL = '^L\$C(\d+)$'; # regexp for what such a lbl looks like
- $T_POST_LBL = '';
-
- $T_MOVE_DIRVS = '^((\s+\.(IMPORT|EXPORT|PARAM).*|\s+\.align\s+\d+|\s+\.(SPACE|SUBSPA)\s+\S+|\s*)\n)';
- $T_COPY_DIRVS = '^\s+\.(IMPORT|EXPORT)';
-
- $T_DOT_WORD = '\.(blockz|word|half|byte)';
- $T_DOT_GLOBAL = '^\s+\.EXPORT';
- $T_HDR_literal = "\t.SPACE \$TEXT\$\n\t.SUBSPA \$LIT\$\n";
- $T_HDR_misc = "\t.SPACE \$TEXT\$\n\t.SUBSPA \$CODE\$\n\t\.align 4\n";
- $T_HDR_data = "\t.SPACE \$PRIVATE\$\n\t.SUBSPA \$DATA\$\n\t\.align 4\n";
- $T_HDR_rodata = "\t.SPACE \$PRIVATE\$\n\t.SUBSPA \$DATA\$\n\t\.align 4\n";
- $T_HDR_closure = "\t.SPACE \$PRIVATE\$\n\t.SUBSPA \$DATA\$\n\t\.align 4\n";
- $T_HDR_info = "\t.SPACE \$TEXT\$\n\t.SUBSPA \$CODE\$\n\t\.align 4\n";
- $T_HDR_entry = "\t.SPACE \$TEXT\$\n\t.SUBSPA \$CODE\$\n\t\.align 4\n";
- $T_HDR_vector = "\t.SPACE \$TEXT\$\n\t.SUBSPA \$CODE\$\n\t\.align 4\n";
-
- #--------------------------------------------------------#
- } elsif ( $TargetPlatform =~ /^i386-.*-(linuxaout|freebsd2|nextstep3|cygwin32|mingw32)$/m ) {
- # NeXT added but not tested. CaS
-
- $T_STABBY = 1; # 1 iff .stab things (usually if a.out format)
- $T_US = '_'; # _ if symbols have an underscore on the front
- $T_PRE_APP = '^#'; # regexp that says what comes before APP/NO_APP
- $T_CONST_LBL = '^LC(\d+):$';
- $T_POST_LBL = ':';
- $T_X86_PRE_LLBL_PAT = 'L';
- $T_X86_PRE_LLBL = 'L';
- $T_X86_BADJMP = '^\tjmp [^L\*]';
-
- $T_MOVE_DIRVS = '^(\s*(\.(p2)?align\s.*|\.globl\s+\S+|\.text|\.data|\.stab[^n].*|\.type\s+.*|\.size\s+.*|\.lcomm.*)\n)';
- $T_COPY_DIRVS = '\.(globl|stab|lcomm)';
- $T_DOT_WORD = '\.(long|word|value|byte|space)';
- $T_DOT_GLOBAL = '\.globl';
- $T_HDR_literal = "\.text\n\t\.align 4\n";
- $T_HDR_misc = "\.text\n\t\.align 4,0x90\n";
- $T_HDR_data = "\.data\n\t\.align 4\n";
- $T_HDR_rodata = "\.text\n\t\.align 4\n";
- $T_HDR_closure = "\.data\n\t\.align 4\n";
- $T_HDR_info = "\.text\n\t\.align 4\n"; # NB: requires padding
- $T_HDR_entry = "\.text\n"; # no .align so we're right next to _info (arguably wrong...?)
- $T_HDR_vector = "\.text\n\t\.align 4\n"; # NB: requires padding
-
- #--------------------------------------------------------#
- } elsif ( $TargetPlatform =~ /^i386-.*-(solaris2|linux|gnu|freebsd|dragonfly|netbsd|openbsd|kfreebsdgnu)$/m ) {
-
- $T_STABBY = 0; # 1 iff .stab things (usually if a.out format)
- $T_US = ''; # _ if symbols have an underscore on the front
- $T_PRE_APP = # regexp that says what comes before APP/NO_APP
- ($TargetPlatform =~ /-(linux|gnu|freebsd|dragonfly|netbsd|openbsd)$/m) ? '#' : '/' ;
- $T_CONST_LBL = '^\.LC(\d+):$'; # regexp for what such a lbl looks like
- $T_POST_LBL = ':';
- $T_X86_PRE_LLBL_PAT = '\.L';
- $T_X86_PRE_LLBL = '.L';
- $T_X86_BADJMP = '^\tjmp\s+[^\.\*]';
-
- $T_MOVE_DIRVS = '^(\s*(\.(p2)?align\s.*|\.globl\s+\S+|\.text|\.data|\.section\s+.*|\.type\s+.*|\.size\s+\S+\s*,\s*\d+|\.ident.*|\.local.*)\n)';
- if ( $TargetPlatform =~ /solaris2/m ) {
- # newer Solaris linkers are picky about .size information, so
- # omit it (see #1421)
- $T_COPY_DIRVS = '^\s*\.(globl|local)';
- } else {
- $T_COPY_DIRVS = '^\s*\.(globl|type|size|local)';
- }
-
- $T_DOT_WORD = '\.(long|value|word|byte|zero)';
- $T_DOT_GLOBAL = '\.globl';
- $T_HDR_literal = "\.section\t\.rodata\n"; # or just use .text??? (WDP 95/11)
- $T_HDR_misc = "\.text\n\t\.align 4\n";
- $T_HDR_data = "\.data\n\t\.align 4\n";
- $T_HDR_rodata = "\.section\t\.rodata\n\t\.align 4\n";
- $T_HDR_closure = "\.data\n\t\.align 4\n";
- $T_HDR_info = "\.text\n\t\.align 4\n";
- $T_HDR_entry = "\.text\n"; # no .align so we're right next to _info (arguably wrong...?)
- $T_HDR_vector = "\.text\n\t\.align 4\n"; # NB: requires padding
-
- #--------------------------------------------------------#
- } elsif ( $TargetPlatform =~ /^ia64-.*-linux$/m ) {
-
- $T_STABBY = 0; # 1 iff .stab things (usually if a.out format)
- $T_US = ''; # _ if symbols have an underscore on the front
- $T_PRE_APP = '#';
- $T_CONST_LBL = '^\.LC(\d+):$'; # regexp for what such a lbl looks like
- $T_POST_LBL = ':';
-
- $T_MOVE_DIRVS = '^(\s*\.(global|proc|pred\.safe_across_calls|text|data|section|subsection|align|size|type|ident)\s+.*\n)';
- $T_COPY_DIRVS = '\.(global|proc)';
-
- $T_DOT_WORD = '\.(long|value|byte|zero)';
- $T_DOT_GLOBAL = '\.global';
- $T_HDR_literal = "\.section\t\.rodata\n";
- $T_HDR_misc = "\.text\n\t\.align 16\n"; # May contain code; align like 'entry'
- $T_HDR_data = "\.data\n\t\.align 8\n";
- $T_HDR_rodata = "\.section\t\.rodata\n\t\.align 8\n";
- $T_HDR_closure = "\.data\n\t\.align 8\n";
- $T_HDR_info = "\.text\n\t\.align 8\n";
- $T_HDR_entry = "\.text\n\t\.align 16\n";
- $T_HDR_vector = "\.text\n\t\.align 8\n";
-
- #--------------------------------------------------------#
- } elsif ( $TargetPlatform =~ /^x86_64-.*-(linux|openbsd|freebsd|dragonfly|netbsd|kfreebsdgnu)$/m ) {
-
- $T_STABBY = 0; # 1 iff .stab things (usually if a.out format)
- $T_US = ''; # _ if symbols have an underscore on the front
- $T_PRE_APP = '#';
- $T_CONST_LBL = '^\.LC(\d+):$'; # regexp for what such a lbl looks like
- $T_POST_LBL = ':';
-
- $T_MOVE_DIRVS = '^(\s*\.(globl|text|data|section|align|size|type|ident|local)([ \t].*)?\n)';
- $T_COPY_DIRVS = '\.(globl|type|size|local)';
-
- $T_DOT_WORD = '\.(quad|long|value|byte|zero)';
- $T_DOT_GLOBAL = '\.global';
-
- $T_HDR_literal16 = "\.section\t\.rodata.cst16\n\t.align 16\n";
- $T_HDR_literal = "\.section\t\.rodata\n";
-
- $T_HDR_misc = "\.text\n\t\.align 8\n";
- $T_HDR_data = "\.data\n\t\.align 8\n";
- $T_HDR_rodata = "\.section\t\.rodata\n\t\.align 8\n";
-
- # the assembler on x86_64/Linux refuses to generate code for
- # .quad x - y
- # where x is in the text section and y in the rodata section.
- # It works if y is in the text section, though. This is probably
- # going to cause difficulties for PIC, I imagine.
- #
- # See Note [x86-64-relative] in includes/InfoTables.h
- $T_HDR_relrodata= "\.text\n\t\.align 8\n";
-
- $T_HDR_closure = "\.data\n\t\.align 8\n";
- $T_HDR_info = "\.text\n\t\.align 8\n";
- $T_HDR_entry = "\.text\n\t\.align 8\n";
- $T_HDR_vector = "\.text\n\t\.align 8\n";
-
- #--------------------------------------------------------#
- } elsif ( $TargetPlatform =~ /^m68k-.*-sunos4/m ) {
-
- $T_STABBY = 1; # 1 iff .stab things (usually if a.out format)
- $T_US = '_'; # _ if symbols have an underscore on the front
- $T_PRE_APP = '^# MAY NOT APPLY'; # regexp that says what comes before APP/NO_APP
- $T_CONST_LBL = '^LC(\d+):$';
- $T_POST_LBL = ':';
-
- $T_MOVE_DIRVS = '^(\s*(\.align\s+\d+|\.proc\s+\d+|\.const|\.cstring|\.globl\s+\S+|\.text|\.data|\.even|\.stab[^n].*)\n)';
- $T_COPY_DIRVS = '\.(globl|proc|stab)';
-
- $T_DOT_WORD = '\.long';
- $T_DOT_GLOBAL = '\.globl';
- $T_HDR_literal = "\.text\n\t\.even\n";
- $T_HDR_misc = "\.text\n\t\.even\n";
- $T_HDR_data = "\.data\n\t\.even\n";
- $T_HDR_rodata = "\.text\n\t\.even\n";
- $T_HDR_closure = "\.data\n\t\.even\n";
- $T_HDR_info = "\.text\n\t\.even\n";
- $T_HDR_entry = "\.text\n\t\.even\n";
- $T_HDR_vector = "\.text\n\t\.even\n";
-
- #--------------------------------------------------------#
- } elsif ( $TargetPlatform =~ /^mips-.*/m ) {
-
- $T_STABBY = 0; # 1 iff .stab things (usually if a.out format)
- $T_US = ''; # _ if symbols have an underscore on the front
- $T_PRE_APP = '^\s*#'; # regexp that says what comes before APP/NO_APP
- $T_CONST_LBL = '^\$LC(\d+):$'; # regexp for what such a lbl looks like
- $T_POST_LBL = ':';
-
- $T_MOVE_DIRVS = '^(\s*(\.align\s+\d+|\.(globl|ent)\s+\S+|\.text|\.r?data)\n)';
- $T_COPY_DIRVS = '\.(globl|ent)';
-
- $T_DOT_WORD = '\.word';
- $T_DOT_GLOBAL = '^\t\.globl';
- $T_HDR_literal = "\t\.rdata\n\t\.align 2\n";
- $T_HDR_misc = "\t\.text\n\t\.align 2\n";
- $T_HDR_data = "\t\.data\n\t\.align 2\n";
- $T_HDR_rodata = "\t\.rdata\n\t\.align 2\n";
- $T_HDR_closure = "\t\.data\n\t\.align 2\n";
- $T_HDR_info = "\t\.text\n\t\.align 2\n";
- $T_HDR_entry = "\t\.text\n\t\.align 2\n";
- $T_HDR_vector = "\t\.text\n\t\.align 2\n";
-
- #--------------------------------------------------------#
- } elsif ( $TargetPlatform =~ /^powerpc-apple-darwin.*/m ) {
- # Apple PowerPC Darwin/MacOS X.
- $T_STABBY = 0; # 1 iff .stab things (usually if a.out format)
- $T_US = '_'; # _ if symbols have an underscore on the front
- $T_PRE_APP = 'DOESNT APPLY'; # regexp that says what comes before APP/NO_APP
- $T_CONST_LBL = '^\LC\d+:'; # regexp for what such a lbl looks like
- $T_POST_LBL = ':';
-
- $T_MOVE_DIRVS = '^(\s*(\.(p2)?align\s.*|\.text|\.data|\.const_data|\.cstring|\.non_lazy_symbol_pointer|\.const|\.static_const|\.literal4|\.literal8|\.static_data|\.globl \S+|\.section .*|\.lcomm.*)\n)';
- $T_COPY_DIRVS = '\.(globl|lcomm)';
-
- $T_DOT_WORD = '\.(long|short|byte|fill|space)';
- $T_DOT_GLOBAL = '\.globl';
- $T_HDR_toc = "\.toc\n";
- $T_HDR_literal = "\t\.const\n\t\.align 2\n";
- $T_HDR_misc = "\t\.text\n\t\.align 2\n";
- $T_HDR_data = "\t\.data\n\t\.align 2\n";
- $T_HDR_rodata = "\t\.const\n\t\.align 2\n";
- $T_HDR_relrodata= "\t\.const_data\n\t\.align 2\n";
- $T_HDR_closure = "\t\.data\n\t\.align 2\n";
- $T_HDR_info = "\t\.text\n\t\.align 2\n";
- $T_HDR_entry = "\t\.text\n\t\.align 2\n";
- $T_HDR_vector = "\t\.text\n\t\.align 2\n";
-
- #--------------------------------------------------------#
- } elsif ( $TargetPlatform =~ /^i386-apple-darwin.*/m ) {
- # Apple i386 Darwin/MacOS X.
- $T_STABBY = 0; # 1 iff .stab things (usually if a.out format)
- $T_US = '_'; # _ if symbols have an underscore on the front
- $T_PRE_APP = 'DOESNT APPLY'; # regexp that says what comes before APP/NO_APP
- $T_CONST_LBL = '^\LC\d+:'; # regexp for what such a lbl looks like
- $T_POST_LBL = ':';
- $T_X86_PRE_LLBL_PAT = 'L';
- $T_X86_PRE_LLBL = 'L';
- $T_X86_BADJMP = '^\tjmp [^L\*]';
-
- $T_MOVE_DIRVS = '^(\s*(\.(p2)?align\s.*|\.text|\.data|\.const_data|\.cstring|\.non_lazy_symbol_pointer|\.const|\.static_const|\.literal4|\.literal8|\.static_data|\.globl \S+|\.section .*|\.lcomm.*)\n)';
- $T_COPY_DIRVS = '\.(globl|lcomm)';
-
- $T_DOT_WORD = '\.(long|short|byte|fill|space)';
- $T_DOT_GLOBAL = '\.globl';
- $T_HDR_toc = "\.toc\n";
- $T_HDR_literal16= "\t\.literal8\n\t\.align 4\n";
- $T_HDR_literal = "\t\.const\n\t\.align 4\n";
- $T_HDR_misc = "\t\.text\n\t\.align 2\n";
- $T_HDR_data = "\t\.data\n\t\.align 2\n";
- $T_HDR_rodata = "\t\.const\n\t\.align 2\n";
- $T_HDR_relrodata= "\t\.const_data\n\t\.align 2\n";
- $T_HDR_closure = "\t\.data\n\t\.align 2\n";
- $T_HDR_info = "\t\.text\n\t\.align 2\n";
- $T_HDR_entry = "\t\.text\n\t\.align 2\n";
- $T_HDR_vector = "\t\.text\n\t\.align 2\n";
-
- #--------------------------------------------------------#
- } elsif ( $TargetPlatform =~ /^x86_64-apple-darwin.*/m ) {
- # Apple amd64 Darwin/MacOS X.
- $T_STABBY = 0; # 1 iff .stab things (usually if a.out format)
- $T_US = '_'; # _ if symbols have an underscore on the front
- $T_PRE_APP = 'DOESNT APPLY'; # regexp that says what comes before APP/NO_APP
- $T_CONST_LBL = '^\LC\d+:'; # regexp for what such a lbl looks like
- $T_POST_LBL = ':';
-
- $T_MOVE_DIRVS = '^(\s*(\.align \d+|\.text|\.data|\.const_data|\.cstring|\.non_lazy_symbol_pointer|\.const|\.static_const|\.literal4|\.literal8|\.static_data|\.globl \S+|\.section .*|\.lcomm.*)\n)';
- $T_COPY_DIRVS = '\.(globl|lcomm)';
-
- $T_DOT_WORD = '\.(quad|long|short|byte|fill|space)';
- $T_DOT_GLOBAL = '\.globl';
- $T_HDR_toc = "\.toc\n";
- $T_HDR_literal16= "\t\.literal8\n\t\.align 4\n";
- $T_HDR_literal = "\t\.const\n\t\.align 4\n";
- $T_HDR_misc = "\t\.text\n\t\.align 2\n";
- $T_HDR_data = "\t\.data\n\t\.align 2\n";
- $T_HDR_rodata = "\t\.const\n\t\.align 2\n";
- $T_HDR_relrodata= "\t\.const_data\n\t\.align 2\n";
- $T_HDR_closure = "\t\.data\n\t\.align 2\n";
- $T_HDR_info = "\t\.text\n\t\.align 2\n";
- $T_HDR_entry = "\t\.text\n\t\.align 2\n";
- $T_HDR_vector = "\t\.text\n\t\.align 2\n";
-
- #--------------------------------------------------------#
- } elsif ( $TargetPlatform =~ /^powerpc-.*-linux/m ) {
- # PowerPC Linux
- $T_STABBY = 0; # 1 iff .stab things (usually if a.out format)
- $T_US = ''; # _ if symbols have an underscore on the front
- $T_PRE_APP = '^#'; # regexp that says what comes before APP/NO_APP
- $T_CONST_LBL = '^\.LC\d+:'; # regexp for what such a lbl looks like
- $T_POST_LBL = ':';
-
- $T_MOVE_DIRVS = '^(\s*(\.(p2)?align\s+\d+(,\s*0x90)?|\.globl\s+\S+|\.text|\.data|\.section\s+.*|\.type\s+.*|\.size\s+\S+\s*,\s*\d+|\.ident.*|\.local.*)\n)';
- $T_COPY_DIRVS = '^\s*\.(globl|type|size|local)';
-
- $T_DOT_WORD = '\.(long|short|byte|fill|space)';
- $T_DOT_GLOBAL = '\.globl';
- $T_HDR_toc = "\.toc\n";
- $T_HDR_literal = "\t\.section\t.rodata\n\t\.align 2\n";
- $T_HDR_misc = "\t\.text\n\t\.align 2\n";
- $T_HDR_data = "\t\.data\n\t\.align 2\n";
- $T_HDR_rodata = "\t\.section\t.rodata\n\t\.align 2\n";
- $T_HDR_closure = "\t\.data\n\t\.align 2\n";
- $T_HDR_info = "\t\.text\n\t\.align 2\n";
- $T_HDR_entry = "\t\.text\n\t\.align 2\n";
- $T_HDR_vector = "\t\.text\n\t\.align 2\n";
-
- #--------------------------------------------------------#
- } elsif ( $TargetPlatform =~ /^powerpc64-.*-linux/m ) {
- # PowerPC 64 Linux
- $T_STABBY = 0; # 1 iff .stab things (usually if a.out format)
- $T_US = '\.'; # _ if symbols have an underscore on the front
- $T_PRE_APP = '^#'; # regexp that says what comes before APP/NO_APP
- $T_CONST_LBL = '^\.LC\d+:'; # regexp for what such a lbl looks like
- $T_POST_LBL = ':';
-
- $T_MOVE_DIRVS = '^(\s*(\.(p2)?align\s+\d+(,\s*0x90)?|\.globl\s+\S+|\.text|\.data|\.section\s+.*|\.type\s+.*|\.size\s+\S+\s*,\s*\d+|\.ident.*|\.local.*)\n)';
- $T_COPY_DIRVS = '^\s*\.(globl|type|size|local)';
-
- $T_DOT_WORD = '\.(long|short|byte|fill|space)';
- $T_DOT_GLOBAL = '\.globl';
- $T_HDR_toc = "\.toc\n";
- $T_HDR_literal = "\t\.section\t\".toc\",\"aw\"\n";
- $T_HDR_misc = "\t\.text\n\t\.align 2\n";
- $T_HDR_data = "\t\.data\n\t\.align 2\n";
- $T_HDR_rodata = "\t\.section\t.rodata\n\t\.align 2\n";
- $T_HDR_closure = "\t\.data\n\t\.align 2\n";
- $T_HDR_info = "\t\.text\n\t\.align 2\n";
- $T_HDR_entry = "\t\.text\n\t\.align 2\n";
- $T_HDR_vector = "\t\.text\n\t\.align 2\n";
-
- #--------------------------------------------------------#
- } elsif ( $TargetPlatform =~ /^sparc-.*-(solaris2|openbsd)/m ) {
-
- $T_STABBY = 0; # 1 iff .stab things (usually if a.out format)
- $T_US = ''; # _ if symbols have an underscore on the front
- $T_PRE_APP = 'DOES NOT SEEM TO APPLY'; # regexp that says what comes before APP/NO_APP
- $T_CONST_LBL = '^\.LLC(\d+):$'; # regexp for what such a lbl looks like
- $T_POST_LBL = ':';
-
- $T_MOVE_DIRVS = '^((\s+\.align\s+\d+|\s+\.proc\s+\d+|\s+\.global\s+\S+|\s+\.local\s+\S+|\.text|\.data|\.stab.*|\s*\.section.*|\s+\.type.*|\s+\.size.*)\n)';
- $T_COPY_DIRVS = '\.(global|local|proc|stab)';
-
- $T_DOT_WORD = '\.(long|word|byte|half|skip|uahalf|uaword)';
- $T_DOT_GLOBAL = '^\t\.global';
- $T_HDR_literal = "\.text\n\t\.align 8\n";
- $T_HDR_misc = "\.text\n\t\.align 4\n";
- $T_HDR_data = "\.data\n\t\.align 8\n";
- $T_HDR_rodata = "\.text\n\t\.align 4\n";
- $T_HDR_closure = "\.data\n\t\.align 4\n";
- $T_HDR_info = "\.text\n\t\.align 4\n";
- $T_HDR_entry = "\.text\n\t\.align 4\n";
- $T_HDR_vector = "\.text\n\t\.align 4\n";
-
- #--------------------------------------------------------#
- } elsif ( $TargetPlatform =~ /^sparc-.*-sunos4/m ) {
-
- $T_STABBY = 1; # 1 iff .stab things (usually if a.out format)
- $T_US = '_'; # _ if symbols have an underscore on the front
- $T_PRE_APP = '^# DOES NOT SEEM TO APPLY'; # regexp that says what comes before APP/NO_APP
- $T_CONST_LBL = '^LC(\d+):$';
- $T_POST_LBL = ':';
-
- $T_MOVE_DIRVS = '^((\s+\.align\s+\d+|\s+\.proc\s+\d+|\s+\.global\s+\S+|\.text|\.data|\.stab.*)\n)';
- $T_COPY_DIRVS = '\.(global|proc|stab)';
-
- $T_DOT_WORD = '\.word';
- $T_DOT_GLOBAL = '^\t\.global';
- $T_HDR_literal = "\.text\n\t\.align 8\n";
- $T_HDR_misc = "\.text\n\t\.align 4\n";
- $T_HDR_data = "\.data\n\t\.align 8\n";
- $T_HDR_rodata = "\.text\n\t\.align 4\n";
- $T_HDR_closure = "\.data\n\t\.align 4\n";
- $T_HDR_info = "\.text\n\t\.align 4\n";
- $T_HDR_entry = "\.text\n\t\.align 4\n";
- $T_HDR_vector = "\.text\n\t\.align 4\n";
-
- #--------------------------------------------------------#
- } elsif ( $TargetPlatform =~ /^sparc-.*-linux/m ) {
- $T_STABBY = 0; # 1 iff .stab things (usually if a.out format)
- $T_US = ''; # _ if symbols have an underscore on the front
- $T_PRE_APP = '#'; # regexp that says what comes before APP/NO_APP
- # Probably doesn't apply anyway
- $T_CONST_LBL = '^\.LLC(\d+):$'; # regexp for what such a lbl looks like
- $T_POST_LBL = ':';
-
- $T_MOVE_DIRVS = '^((\s+\.align\s+\d+|\s+\.proc\s+\d+|\s+\.global\s+\S+|\s+\.local\s+\S+|\.text|\.data|\.seg|\.stab.*|\s+?\.section.*|\s+\.type.*|\s+\.size.*)\n)';
- $T_COPY_DIRVS = '\.(global|local|globl|proc|stab)';
-
- $T_DOT_WORD = '\.(long|word|nword|xword|byte|half|short|skip|uahalf|uaword)';
- $T_DOT_GLOBAL = '^\t\.global';
- $T_HDR_literal = "\.text\n\t\.align 8\n";
- $T_HDR_misc = "\.text\n\t\.align 4\n";
- $T_HDR_data = "\.data\n\t\.align 8\n";
- $T_HDR_rodata = "\.text\n\t\.align 4\n";
- $T_HDR_closure = "\.data\n\t\.align 4\n";
- $T_HDR_info = "\.text\n\t\.align 4\n";
- $T_HDR_entry = "\.text\n\t\.align 4\n";
- $T_HDR_vector = "\.text\n\t\.align 4\n";
-
- #--------------------------------------------------------#
- } else {
- print STDERR "$Pgm: don't know how to mangle assembly language for: $TargetPlatform\n";
- exit 1;
- }
-
- if($T_HDR_relrodata eq "") {
- # default values:
- # relrodata defaults to rodata.
- $T_HDR_relrodata = $T_HDR_rodata;
- }
-
-if ( 0 ) {
-print STDERR "T_STABBY: $T_STABBY\n";
-print STDERR "T_US: $T_US\n";
-print STDERR "T_PRE_APP: $T_PRE_APP\n";
-print STDERR "T_CONST_LBL: $T_CONST_LBL\n";
-print STDERR "T_POST_LBL: $T_POST_LBL\n";
-if ( $TargetPlatform =~ /^i386-/m ) {
- print STDERR "T_X86_PRE_LLBL_PAT: $T_X86_PRE_LLBL_PAT\n";
- print STDERR "T_X86_PRE_LLBL: $T_X86_PRE_LLBL\n";
- print STDERR "T_X86_BADJMP: $T_X86_BADJMP\n";
-}
-print STDERR "T_MOVE_DIRVS: $T_MOVE_DIRVS\n";
-print STDERR "T_COPY_DIRVS: $T_COPY_DIRVS\n";
-print STDERR "T_DOT_WORD: $T_DOT_WORD\n";
-print STDERR "T_HDR_literal: $T_HDR_literal\n";
-print STDERR "T_HDR_misc: $T_HDR_misc\n";
-print STDERR "T_HDR_data: $T_HDR_data\n";
-print STDERR "T_HDR_rodata: $T_HDR_rodata\n";
-print STDERR "T_HDR_closure: $T_HDR_closure\n";
-print STDERR "T_HDR_info: $T_HDR_info\n";
-print STDERR "T_HDR_entry: $T_HDR_entry\n";
-print STDERR "T_HDR_vector: $T_HDR_vector\n";
-}
-
-}
-\end{code}
-
-%************************************************************************
-%* *
-\subsection{Mangle away}
-%* *
-%************************************************************************
-
-\begin{code}
-sub mangle_asm {
- local($in_asmf, $out_asmf) = @_;
- local($i, $c);
-
- # ia64-specific information for code chunks
- my $ia64_locnum;
- my $ia64_outnum;
-
- &init_TARGET_STUFF();
- &init_FUNNY_THINGS();
-
- open(INASM, "< $in_asmf")
- || &tidy_up_and_die(1,"$Pgm: failed to open `$in_asmf' (to read)\n");
- open(OUTASM,"> $out_asmf")
- || &tidy_up_and_die(1,"$Pgm: failed to open `$out_asmf' (to write)\n");
-
- # read whole file, divide into "chunks":
- # record some info about what we've found...
-
- @chk = (); # contents of the chunk
- $numchks = 0; # number of them
- @chkcat = (); # what category of thing in each chunk
- @chksymb = (); # what symbol(base) is defined in this chunk
- %entrychk = (); # ditto, its entry code
- %closurechk = (); # ditto, the (static) closure
- %srtchk = (); # ditto, its SRT (for top-level things)
- %infochk = (); # given a symbol base, say what chunk its info tbl is in
- %vectorchk = (); # ditto, return vector table
- $EXTERN_DECLS = ''; # .globl <foo> .text (MIPS only)
-
- $i = 0; $chkcat[0] = 'misc'; $chk[0] = '';
-
- while (<INASM>) {
- tr/\r//d if $TargetPlatform =~ /-mingw32$/m; # In case Perl doesn't convert line endings
- next if $T_STABBY && /^\.stab.*${T_US}__stg_split_marker/om;
- next if $T_STABBY && /^\.stab.*ghc.*c_ID/m;
- next if /^\t\.def.*endef$/m;
- next if /${T_PRE_APP}(NO_)?APP/om;
- next if /^;/m && $TargetPlatform =~ /^hppa/m;
-
- next if /(^$|^\t\.file\t|^ # )/m && $TargetPlatform =~ /(^mips-|^ia64-|-mingw32$)/m;
-
- if ( $TargetPlatform =~ /^mips-/m
- && /^\t\.(globl\S+\.text|comm\t)/m ) {
- $EXTERN_DECLS .= $_ unless /(__DISCARD__|\b(PK_|ASSIGN_)(FLT|DBL)\b)/m;
- # Treat .comm variables as data. These show up in two (known) places:
- #
- # - the module_registered variable used in the __stginit fragment.
- # even though these are declared static and initialised, gcc 3.3
- # likes to make them .comm, presumably to save space in the
- # object file.
- #
- # - global variables used to pass arguments from C to STG in
- # a foreign export. (is this still true? --SDM)
- #
- } elsif ( /^\t\.comm.*$/m ) {
- $chk[++$i] = $_;
- $chkcat[$i] = 'data';
- $chksymb[$i] = '';
-
- # Labels ending "_str": these are literal strings.
- } elsif ( /^${T_US}([A-Za-z0-9_]+)_str${T_POST_LBL}$/m ) {
- $chk[++$i] = $_;
- $chkcat[$i] = 'relrodata';
- $chksymb[$i] = '';
- } elsif ( $TargetPlatform =~ /-darwin/m
- && (/^\s*\.subsections_via_symbols/m
- ||/^\s*\.no_dead_strip.*/m)) {
- # Don't allow Apple's linker to do any dead-stripping of symbols
- # in this file, because it will mess up info-tables in mangled
- # code.
- # The .no_dead_strip directives are actually put there by
- # the gcc3 "used" attribute on entry points.
-
- } elsif ( $TargetPlatform =~ /^.*-apple-darwin.*/m && (
- /^\s*\.picsymbol_stub/m
- || /^\s*\.section __TEXT,__picsymbol_stub\d,.*/m
- || /^\s*\.section __TEXT,__picsymbolstub\d,.*/m
- || /^\s*\.symbol_stub/m
- || /^\s*\.section __TEXT,__symbol_stub\d,.*/m
- || /^\s*\.section __TEXT,__symbolstub\d,.*/m
- || /^\s*\.lazy_symbol_pointer/m
- || /^\s*\.non_lazy_symbol_pointer/m
- || /^\s*\.section __IMPORT.*/m))
- {
- $chk[++$i] = $_;
- $chkcat[$i] = 'dyld';
- $chksymb[$i] = '';
- $dyld_section = $_;
-
- } elsif ( $TargetPlatform =~ /^.*-apple-darwin.*/m && $chkcat[$i] eq 'dyld' && /^\s*\.data/m)
- { # non_lazy_symbol_ptrs that point to local symbols
- $chk[++$i] = $_;
- $chkcat[$i] = 'dyld';
- $chksymb[$i] = '';
- $dyld_section = $_;
- } elsif ( $TargetPlatform =~ /^.*-apple-darwin.*/m && $chkcat[$i] eq 'dyld' && /^\s*\.align/m)
- { # non_lazy_symbol_ptrs that point to local symbols
- $dyld_section .= $_;
- } elsif ( $TargetPlatform =~ /^.*-apple-darwin.*/m && $chkcat[$i] eq 'dyld' && /^L_.*:$/m)
- { # non_lazy_symbol_ptrs that point to local symbols
- $chk[++$i] = $dyld_section . $_;
- $chkcat[$i] = 'dyld';
- $chksymb[$i] = '';
-
- } elsif ( /^\s+/m ) { # most common case first -- a simple line!
- # duplicated from the bottom
-
- $chk[$i] .= $_;
-
- } elsif ( /\.\.ng:$/m && $TargetPlatform =~ /^alpha-/m ) {
- # Alphas: Local labels not to be confused with new chunks
- $chk[$i] .= $_;
- # NB: all the rest start with a non-space
-
- } elsif ( $TargetPlatform =~ /^mips-/m
- && /^\d+:/m ) { # a funny-looking very-local label
- $chk[$i] .= $_;
-
- } elsif ( /$T_CONST_LBL/om ) {
- $chk[++$i] = $_;
- $chkcat[$i] = 'literal';
- $chksymb[$i] = $1;
-
- } elsif ( /^${T_US}__stg_split_marker(\d*)${T_POST_LBL}$/om ) {
- $chk[++$i] = $_;
- $chkcat[$i] = 'splitmarker';
- $chksymb[$i] = $1;
-
- } elsif ( /^${T_US}([A-Za-z0-9_]+)_info${T_POST_LBL}$/om ) {
- $symb = $1;
- $chk[++$i] = $_;
- $chkcat[$i] = 'infotbl';
- $chksymb[$i] = $symb;
-
- die "Info table already? $symb; $i\n" if defined($infochk{$symb});
-
- $infochk{$symb} = $i;
-
- } elsif ( /^${T_US}([A-Za-z0-9_]+)_(entry|ret)${T_POST_LBL}$/om ) {
- $chk[++$i] = $_;
- $chkcat[$i] = 'entry';
- $chksymb[$i] = $1;
-
- $entrychk{$1} = $i;
-
- } elsif ( /^${T_US}([A-Za-z0-9_]+)_closure${T_POST_LBL}$/om ) {
- $chk[++$i] = $_;
- $chkcat[$i] = 'closure';
- $chksymb[$i] = $1;
-
- $closurechk{$1} = $i;
-
- } elsif ( /^${T_US}([A-Za-z0-9_]+)_srt${T_POST_LBL}$/om ) {
- $chk[++$i] = $_;
- $chkcat[$i] = 'srt';
- $chksymb[$i] = $1;
-
- $srtchk{$1} = $i;
-
- } elsif ( /^${T_US}([A-Za-z0-9_]+)_ct${T_POST_LBL}$/om ) {
- $chk[++$i] = $_;
- $chkcat[$i] = 'data';
- $chksymb[$i] = '';
-
- } elsif ( /^${T_US}(stg_ap_stack_entries|stg_stack_save_entries|stg_arg_bitmaps)${T_POST_LBL}$/om ) {
- $chk[++$i] = $_;
- $chkcat[$i] = 'data';
- $chksymb[$i] = '';
-
- } elsif ( /^(${T_US}__gnu_compiled_c|gcc2_compiled\.)${T_POST_LBL}/om ) {
- ; # toss it
-
- } elsif ( /^${T_US}[A-Za-z0-9_]+\.\d+${T_POST_LBL}$/om
- || /^${T_US}.*_CAT${T_POST_LBL}$/om # PROF: _entryname_CAT
- || /^${T_US}.*_done${T_POST_LBL}$/om # PROF: _module_done
- || /^${T_US}_module_registered${T_POST_LBL}$/om # PROF: _module_registered
- ) {
- $chk[++$i] = $_;
- $chkcat[$i] = 'data';
- $chksymb[$i] = '';
-
- } elsif ( /^([A-Za-z0-9_]+)\s+\.comm/m && $TargetPlatform =~ /^hppa/m ) {
- $chk[++$i] = $_;
- $chkcat[$i] = 'bss';
- $chksymb[$i] = '';
-
- } elsif ( /^${T_US}([A-Za-z0-9_]+)_cc(s)?${T_POST_LBL}$/om ) {
- # all CC_ symbols go in the data section...
- $chk[++$i] = $_;
- $chkcat[$i] = 'data';
- $chksymb[$i] = '';
-
- } elsif ( /^${T_US}([A-Za-z0-9_]+)_hpc${T_POST_LBL}$/om ) {
- # hpc shares tick boxes across modules
- $chk[++$i] = $_;
- $chkcat[$i] = 'data';
- $chksymb[$i] = '';
-
- } elsif ( /^${T_US}([A-Za-z0-9_]+)_(alt|dflt)${T_POST_LBL}$/om ) {
- $chk[++$i] = $_;
- $chkcat[$i] = 'misc';
- $chksymb[$i] = '';
- } elsif ( /^${T_US}([A-Za-z0-9_]+)_vtbl${T_POST_LBL}$/om ) {
- $chk[++$i] = $_;
- $chkcat[$i] = 'vector';
- $chksymb[$i] = $1;
-
- $vectorchk{$1} = $i;
-
- } elsif ( $TargetPlatform =~ /^i386-.*-solaris2/m
- && /^[A-Za-z0-9][A-Za-z0-9_]*:/m ) {
- # Some Solaris system headers contain function definitions (as
- # opposed to mere prototypes), which end up in the .hc file when
- # a Haskell module foreign imports the corresponding system
- # functions (most notably stat()). We put them into the text
- # segment. Note that this currently does not extend to function
- # names starting with an underscore.
- # - chak 7/2001
- $chk[++$i] = $_;
- $chkcat[$i] = 'misc';
- $chksymb[$i] = $1;
-
- } elsif ( $TargetPlatform =~ /^i386-apple-darwin/m && /^(___i686\.get_pc_thunk\.[abcd]x):/om) {
- # To handle PIC on Darwin/x86, we need to appropriately pass through
- # the get_pc_thunk functions. The need to be put into a special section
- # marked as coalesced (otherwise the .weak_definition doesn't work
- # on Darwin).
- $chk[++$i] = $_;
- $chkcat[$i] = 'get_pc_thunk';
- $chksymb[$i] = $1;
-
- } elsif ( /^${T_US}[A-Za-z0-9_]/om
- && ( $TargetPlatform !~ /^hppa/m # need to avoid local labels in this case
- || ! /^L\$\d+$/m )
- && ( $TargetPlatform !~ /^powerpc64/m # we need to avoid local labels in this case
- || ! /^\.L\d+:$/m ) ) {
- local($thing);
- chop($thing = $_);
- $thing =~ s/:$//m;
- $chk[++$i] = $_;
- $chksymb[$i] = '';
- if (
- /^${T_US}stg_.*${T_POST_LBL}$/om # RTS internals
- || /^${T_US}__stg_.*${T_POST_LBL}$/om # more RTS internals
- || /^${T_US}__fexp_.*${T_POST_LBL}$/om # foreign export
- || /^${T_US}.*_slow${T_POST_LBL}$/om # slow entry
- || /^${T_US}__stginit.*${T_POST_LBL}$/om # __stginit<module>
- || /^${T_US}.*_btm${T_POST_LBL}$/om # large bitmaps
- || /^${T_US}.*_fast${T_POST_LBL}$/om # primops
- || /^_uname:/om # x86/Solaris2
- )
- {
- $chkcat[$i] = 'misc';
- } elsif (
- /^${T_US}.*_srtd${T_POST_LBL}$/om # large bitmaps
- || /^${T_US}.*_closure_tbl${T_POST_LBL}$/om # closure tables
- )
- {
- $chkcat[$i] = 'relrodata';
- } else
- {
- print STDERR "Warning: retaining unknown function \`$thing' in output from C compiler\n";
- $chkcat[$i] = 'unknown';
- }
-
- } elsif ( $TargetPlatform =~ /^powerpc-.*-linux/m && /^\.LCTOC1 = /om ) {
- # PowerPC Linux's large-model PIC (-fPIC) generates a gobal offset
- # table "by hand". Be sure to copy it over.
- # Note that this label and all entries in the table should actually
- # go into the .got2 section, but it isn't easy to distinguish them
- # from other constant literals (.LC\d+), so we just put everything
- # in .rodata.
- $chk[++$i] = $_;
- $chkcat[$i] = 'literal';
- $chksymb[$i] = 'LCTOC1';
- } else { # simple line (duplicated at the top)
-
- $chk[$i] .= $_;
- }
- }
- $numchks = $#chk + 1;
- $chk[$numchks] = ''; # We might push .note.GNU-stack into this
- $chkcat[$numchks] = 'verbatim'; # If we do, write it straight back out
-
- # open CHUNKS, ">/tmp/chunks1" or die "Cannot open /tmp/chunks1: $!\n";
- # for (my $i = 0; $i < @chk; ++$i) { print CHUNKS "======= $i =======\n", $chk[$i] }
- # close CHUNKS;
-
- # the division into chunks is imperfect;
- # we throw some things over the fence into the next
- # chunk.
- #
- # also, there are things we would like to know
- # about the whole module before we start spitting
- # output.
-
- local($FIRST_MANGLABLE) = ($TargetPlatform =~ /^(alpha-|hppa|mips-)/m) ? 1 : 0;
- local($FIRST_TOSSABLE ) = ($TargetPlatform =~ /^(hppa|mips-)/m) ? 1 : 0;
-
-# print STDERR "first chunk to mangle: $FIRST_MANGLABLE\n";
-
- # Alphas: NB: we start meddling at chunk 1, not chunk 0
- # The first ".rdata" is quite magical; as of GCC 2.7.x, it
- # spits a ".quad 0" in after the very first ".rdata"; we
- # detect this special case (tossing the ".quad 0")!
- local($magic_rdata_seen) = 0;
-
- # HPPAs, MIPSen: also start medding at chunk 1
-
- for ($i = $FIRST_TOSSABLE; $i < $numchks; $i++) {
- $c = $chk[$i]; # convenience copy
-
-# print STDERR "\nCHK $i (BEFORE) (",$chkcat[$i],"):\n", $c;
-
- # toss all prologue stuff; HPPA is pretty weird
- # (see elsewhere)
- $c = &hppa_mash_prologue($c) if $TargetPlatform =~ /^hppa-/m;
-
- undef $ia64_locnum;
- undef $ia64_outnum;
-
- # be slightly paranoid to make sure there's
- # nothing surprising in there
- if ( $c =~ /--- BEGIN ---/m ) {
- if (($p, $r) = split(/--- BEGIN ---/m, $c)) {
-
- # remove junk whitespace around the split point
- $p =~ s/\t+$//m;
- $r =~ s/^\s*\n//m;
-
- if ($TargetPlatform =~ /^i386-/m) {
- if ($p =~ /^\tsubl\s+\$(\d+),\s*\%esp\n/m) {
- if ($1 >= 8192) {
- die "Error: reserved stack space exceeded!\n Possible workarounds: compile with -fasm, or try another version of gcc.\n"
- }
- }
-
- # gcc 3.4.3 puts this kind of stuff in the prologue, eg.
- # when compiling PrimOps.cmm with -optc-O2:
- # xorl %ecx, %ecx
- # xorl %edx, %edx
- # movl %ecx, 16(%esp)
- # movl %edx, 20(%esp)
- # but then the code of the function doesn't assume
- # anything about the contnets of these stack locations.
- # I think it's to do with the use of inline functions for
- # PK_Word64() and friends, where gcc is initialising the
- # contents of the struct to zero, and failing to optimise
- # away the initialisation. Let's live dangerously and
- # discard these initalisations.
-
- $p =~ s/^\tpushl\s+\%e(di|si|bx)\n//gm;
- $p =~ s/^\txorl\s+\%e(ax|cx|dx),\s*\%e(ax|cx|dx)\n//gm;
- $p =~ s/^\tmovl\s+\%e(ax|cx|dx|si|di),\s*\d*\(\%esp\)\n//gm;
- $p =~ s/^\tmovl\s+\$\d+,\s*\d*\(\%esp\)\n//gm;
- $p =~ s/^\tsubl\s+\$\d+,\s*\%esp\n//m;
- $p =~ s/^\tmovl\s+\$\d+,\s*\%eax\n\tcall\s+__alloca\n//m if ($TargetPlatform =~ /^.*-(cygwin32|mingw32)/m);
-
- if ($TargetPlatform =~ /^i386-apple-darwin/m) {
- $pcrel_label = $p;
- $pcrel_label =~ s/(.|\n)*^(\"?L\d+\$pb\"?):\n(.|\n)*/$2/m or $pcrel_label = "";
- $pcrel_reg = $p;
- $pcrel_reg =~ s/(.|\n)*.*___i686\.get_pc_thunk\.([abcd]x)\n(.|\n)*/$2/m or $pcrel_reg = "";
- $p =~ s/^\s+call\s+___i686\.get_pc_thunk\..x//m;
- $p =~ s/^\"?L\d+\$pb\"?:\n//m;
-
- if ($pcrel_reg eq "bx") {
- # Bad gcc. Goes and uses %ebx, our BaseReg, for PIC. Bad gcc.
- die "Darwin/x86: -fPIC -via-C doesn't work yet, use -fasm. Aborting."
- }
- }
-
- } elsif ($TargetPlatform =~ /^x86_64-/m) {
- $p =~ s/^\tpushq\s+\%r(bx|bp|12|13|14)\n//gm;
- $p =~ s/^\tmovq\s+\%r(bx|bp|12|13|14),\s*\d*\(\%rsp\)\n//gm;
- $p =~ s/^\tsubq\s+\$\d+,\s*\%rsp\n//m;
-
- } elsif ($TargetPlatform =~ /^ia64-/m) {
- $p =~ s/^\t\.prologue .*\n//m;
-
- # Record the number of local and out registers for register relocation later
- $p =~ s/^\t\.save ar\.pfs, r\d+\n\talloc r\d+ = ar\.pfs, 0, (\d+), (\d+), 0\n//m;
- $ia64_locnum = $1;
- $ia64_outnum = $2;
-
- $p =~ s/^\t\.fframe \d+\n\tadds r12 = -\d+, r12\n//m;
- $p =~ s/^\t\.save rp, r\d+\n\tmov r\d+ = b0\n//m;
-
- # Ignore save/restore of these registers; they're taken
- # care of in StgRun()
- $p =~ s/^\t\.save ar\.lc, r\d+\n//m;
- $p =~ s/^\t\.save pr, r\d+\n//m;
- $p =~ s/^\tmov r\d+ = ar\.lc\n//m;
- $p =~ s/^\tmov r\d+ = pr\n//m;
-
- # Remove .proc and .body directives
- $p =~ s/^\t\.proc [a-zA-Z0-9_.]+#\n//m;
- $p =~ s/^\t\.body\n//m;
-
- # If there's a label, move it to the body
- if ($p =~ /^[a-zA-Z0-9.]+:\n/m) {
- $p = $` . $';
- $r = $& . $r;
- }
-
- # Remove floating-point spill instructions.
- # Only fp registers 2-5 and 16-23 are saved by the runtime.
- if ($p =~ s/^\tstf\.spill \[r1[4-9]\] = f([2-5]|1[6-9]|2[0-3])(, [0-9]+)?\n//gm) {
- # Being paranoid, only try to remove these if we saw a
- # spill operation.
- $p =~ s/^\tmov r1[4-9] = r12\n//m;
- $p =~ s/^\tadds r1[4-9] = -[0-9]+, r12\n//gm;
- $p =~ s/^\t\.save\.f 0x[0-9a-fA-F]\n//gm;
- $p =~ s/^\t\.save\.gf 0x0, 0x[0-9a-fA-F]+\n//gm;
- }
-
- $p =~ s/^\tnop(?:\.[mifb])?\s+\d+\n//gm; # remove nop instructions
- $p =~ s/^\t\.(mii|mmi|mfi)\n//gm; # bundling is no longer sensible
- $p =~ s/^\t;;\n//gm; # discard stops
- $p =~ s/^\t\/\/.*\n//gm; # gcc inserts timings in // comments
-
- # GCC 3.3 saves r1 in the prologue, move this to the body
- # (Does this register get restored anywhere?)
- if ($p =~ /^\tmov r\d+ = r1\n/m) {
- $p = $` . $';
- $r = $& . $r;
- }
- } elsif ($TargetPlatform =~ /^m68k-/m) {
- $p =~ s/^\tlink a6,#-?\d.*\n//m;
- $p =~ s/^\tpea a6@\n\tmovel sp,a6\n//m;
- # The above showed up in the asm code,
- # so I added it here.
- # I hope it's correct.
- # CaS
- $p =~ s/^\tmovel d2,sp\@-\n//m;
- $p =~ s/^\tmovel d5,sp\@-\n//m; # SMmark.* only?
- $p =~ s/^\tmoveml \#0x[0-9a-f]+,sp\@-\n//m; # SMmark.* only?
- } elsif ($TargetPlatform =~ /^mips-/m) {
- # the .frame/.mask/.fmask that we use is the same
- # as that produced by GCC for miniInterpret; this
- # gives GDB some chance of figuring out what happened
- $FRAME = "\t.frame\t\$sp,2168,\$31\n\t.mask\t0x90000000,-4\n\t.fmask\t0x00000000,0\n";
- $p =~ s/^\t\.(frame).*\n/__FRAME__/gm;
- $p =~ s/^\t\.(mask|fmask).*\n//gm;
- $p =~ s/^\t\.cprestore.*\n/\t\.cprestore 416\n/m; # 16 + 100 4-byte args
- $p =~ s/^\tsubu\t\$sp,\$sp,\d+\n//m;
- $p =~ s/^\tsw\t\$31,\d+\(\$sp\)\n//m;
- $p =~ s/^\tsw\t\$fp,\d+\(\$sp\)\n//m;
- $p =~ s/^\tsw\t\$28,\d+\(\$sp\)\n//m;
- $p =~ s/__FRAME__/$FRAME/m;
- } elsif ($TargetPlatform =~ /^powerpc-apple-darwin.*/m) {
- $pcrel_label = $p;
- $pcrel_label =~ s/(.|\n)*^(\"?L\d+\$pb\"?):\n(.|\n)*/$2/m or $pcrel_label = "";
-
- $p =~ s/^\tmflr r0\n//m;
- $p =~ s/^\tbl saveFP # f\d+\n//m;
- $p =~ s/^\tbl saveFP ; save f\d+-f\d+\n//m;
- $p =~ s/^\"?L\d+\$pb\"?:\n//m;
- $p =~ s/^\tstmw r\d+,-\d+\(r1\)\n//m;
- $p =~ s/^\tstfd f\d+,-\d+\(r1\)\n//gm;
- $p =~ s/^\tstw r0,\d+\(r1\)\n//gm;
- $p =~ s/^\tstwu r1,-\d+\(r1\)\n//m;
- $p =~ s/^\tstw r\d+,-\d+\(r1\)\n//gm;
- $p =~ s/^\tbcl 20,31,\"?L\d+\$pb\"?\n//m;
- $p =~ s/^\"?L\d+\$pb\"?:\n//m;
- $p =~ s/^\tmflr r31\n//m;
-
- # This is bad: GCC 3 seems to zero-fill some local variables in the prologue
- # under some circumstances, only when generating position dependent code.
- # I have no idea why, and I don't think it is necessary, so let's toss it.
- $p =~ s/^\tli r\d+,0\n//gm;
- $p =~ s/^\tstw r\d+,\d+\(r1\)\n//gm;
- } elsif ($TargetPlatform =~ /^powerpc-.*-linux/m) {
- $p =~ s/^\tmflr 0\n//m;
- $p =~ s/^\tstmw \d+,\d+\(1\)\n//m;
- $p =~ s/^\tstfd \d+,\d+\(1\)\n//gm;
- $p =~ s/^\tstw r0,8\(1\)\n//m;
- $p =~ s/^\tstwu 1,-\d+\(1\)\n//m;
- $p =~ s/^\tstw \d+,\d+\(1\)\n//gm;
-
- # GCC's "large-model" PIC (-fPIC)
- $pcrel_label = $p;
- $pcrel_label =~ s/(.|\n)*^.LCF(\d+):\n(.|\n)*/$2/m or $pcrel_label = "";
-
- $p =~ s/^\tbcl 20,31,.LCF\d+\n//m;
- $p =~ s/^.LCF\d+:\n//m;
- $p =~ s/^\tmflr 30\n//m;
- $p =~ s/^\tlwz 0,\.LCL\d+-\.LCF\d+\(30\)\n//m;
- $p =~ s/^\tadd 30,0,30\n//m;
-
- # This is bad: GCC 3 seems to zero-fill some local variables in the prologue
- # under some circumstances, only when generating position dependent code.
- # I have no idea why, and I don't think it is necessary, so let's toss it.
- $p =~ s/^\tli \d+,0\n//gm;
- $p =~ s/^\tstw \d+,\d+\(1\)\n//gm;
- } elsif ($TargetPlatform =~ /^powerpc64-.*-linux/m) {
- $p =~ s/^\tmr 31,1\n//m;
- $p =~ s/^\tmflr 0\n//m;
- $p =~ s/^\tstmw \d+,\d+\(1\)\n//m;
- $p =~ s/^\tstfd \d+,-?\d+\(1\)\n//gm;
- $p =~ s/^\tstd r0,8\(1\)\n//m;
- $p =~ s/^\tstdu 1,-\d+\(1\)\n//m;
- $p =~ s/^\tstd \d+,-?\d+\(1\)\n//gm;
-
- # This is bad: GCC 3 seems to zero-fill some local variables in the prologue
- # under some circumstances, only when generating position dependent code.
- # I have no idea why, and I don't think it is necessary, so let's toss it.
- $p =~ s/^\tli \d+,0\n//gm;
- $p =~ s/^\tstd \d+,\d+\(1\)\n//gm;
- } else {
- print STDERR "$Pgm: unknown prologue mangling? $TargetPlatform\n";
- }
-
- # HWL HACK: dont die, just print a warning
- #print stderr "HWL: this should die! Prologue junk?: $p\n" if $p =~ /^\t[^\.]/;
- die "Prologue junk?: $p\n" if $p =~ /^\s+[^\s\.]/m;
-
- # For PIC, we want to keep part of the prologue
- if ($TargetPlatform =~ /^powerpc-apple-darwin.*/m && $pcrel_label ne "") {
- # Darwin: load the current instruction pointer into register r31
- $p .= "bcl 20,31,$pcrel_label\n";
- $p .= "$pcrel_label:\n";
- $p .= "\tmflr r31\n";
- } elsif ($TargetPlatform =~ /^powerpc-.*-linux/m && $pcrel_label ne "") {
- # Linux: load the GOT pointer into register 30
- $p .= "\tbcl 20,31,.LCF$pcrel_label\n";
- $p .= ".LCF$pcrel_label:\n";
- $p .= "\tmflr 30\n";
- $p .= "\tlwz 0,.LCL$pcrel_label-.LCF$pcrel_label(30)\n";
- $p .= "\tadd 30,0,30\n";
- } elsif ($TargetPlatform =~ /^i386-apple-darwin.*/m && $pcrel_label ne "") {
- $p .= "\tcall ___i686.get_pc_thunk.$pcrel_reg\n";
- $p .= "$pcrel_label:\n";
- }
-
- # glue together what's left
- $c = $p . $r;
- }
- }
-
- if ( $TargetPlatform =~ /^mips-/m ) {
- # MIPS: first, this basic sequence may occur "--- END ---" or not
- $c =~ s/^\tlw\t\$31,\d+\(\$sp\)\n\taddu\t\$sp,\$sp,\d+\n\tj\t\$31\n\t\.end/\t\.end/m;
- }
-
- # toss all epilogue stuff; again, paranoidly
- if ( $c =~ /--- END ---/m ) {
- # Gcc may decide to replicate the function epilogue. We want
- # to process all epilogues, so we split the function and then
- # loop here.
- @fragments = split(/--- END ---/m, $c);
- $r = shift(@fragments);
-
- # Rebuild `c'; processed fragments will be appended to `c'
- $c = $r;
-
- foreach $e (@fragments) {
- # etail holds code that is after the epilogue in the assembly-code
- # layout and should not be filtered as part of the epilogue.
- $etail = "";
- if ($TargetPlatform =~ /^i386-/m) {
- $e =~ s/^\tret\n//m;
- $e =~ s/^\tpopl\s+\%edi\n//m;
- $e =~ s/^\tpopl\s+\%esi\n//m;
- $e =~ s/^\tpopl\s+\%edx\n//m;
- $e =~ s/^\tpopl\s+\%ecx\n//m;
- $e =~ s/^\taddl\s+\$\d+,\s*\%esp\n//m;
- $e =~ s/^\tsubl\s+\$-\d+,\s*\%esp\n//m;
- } elsif ($TargetPlatform =~ /^ia64-/m) {
- # The epilogue is first split into:
- # $e, the epilogue code (up to the return instruction)
- # $etail, non-epilogue code (after the return instruction)
- # The return instruction is stripped in the process.
- if (!(($e, $etail) = split(/^\tbr\.ret\.sptk\.many b0\n/m, $e))) {
- die "Epilogue doesn't seem to have one return instruction: $e\n";
- }
- # Remove 'endp' directive from the tail
- $etail =~ s/^\t\.endp [a-zA-Z0-9_.]+#\n//m;
-
- # If a return value is saved here, discard it
- $e =~ s/^\tmov r8 = r14\n//m;
-
- # Remove floating-point fill instructions.
- # Only fp registers 2-5 and 16-23 are saved by the runtime.
- if ($e =~ s/^\tldf\.fill f([2-5]|1[6-9]|2[0-3]) = \[r1[4-9]\](, [0-9]+)?\n//gm) {
- # Being paranoid, only try to remove this if we saw a fill
- # operation.
- $e =~ s/^\tadds r1[4-9] = [0-9]+, r12//gm;
- }
-
- $e =~ s/^\tnop(?:\.[mifb])?\s+\d+\n//gm; # remove nop instructions
- $e =~ s/^\tmov ar\.pfs = r\d+\n//m;
- $e =~ s/^\tmov ar\.lc = r\d+\n//m;
- $e =~ s/^\tmov pr = r\d+, -1\n//m;
- $e =~ s/^\tmov b0 = r\d+\n//m;
- $e =~ s/^\t\.restore sp\n\tadds r12 = \d+, r12\n//m;
- #$e =~ s/^\tbr\.ret\.sptk\.many b0\n//; # already removed
- $e =~ s/^\t\.(mii|mmi|mfi|mib)\n//gm; # bundling is no longer sensible
- $e =~ s/^\t;;\n//gm; # discard stops - stop at end of body is sufficient
- $e =~ s/^\t\/\/.*\n//gm; # gcc inserts timings in // comments
- } elsif ($TargetPlatform =~ /^m68k-/m) {
- $e =~ s/^\tunlk a6\n//m;
- $e =~ s/^\trts\n//m;
- } elsif ($TargetPlatform =~ /^mips-/m) {
- $e =~ s/^\tlw\t\$31,\d+\(\$sp\)\n//m;
- $e =~ s/^\tlw\t\$fp,\d+\(\$sp\)\n//m;
- $e =~ s/^\taddu\t\$sp,\$sp,\d+\n//m;
- $e =~ s/^\tj\t\$31\n//m;
- } elsif ($TargetPlatform =~ /^powerpc-apple-darwin.*/m) {
- $e =~ s/^\taddi r1,r1,\d+\n//m;
- $e =~ s/^\tlwz r\d+,\d+\(r1\)\n//m;
- $e =~ s/^\tlmw r\d+,-\d+\(r1\)\n//m;
- $e =~ s/^\tmtlr r0\n//m;
- $e =~ s/^\tblr\n//m;
- $e =~ s/^\tb restFP ;.*\n//m;
- } elsif ($TargetPlatform =~ /^powerpc64-.*-linux/m) {
- $e =~ s/^\tmr 3,0\n//m;
- $e =~ s/^\taddi 1,1,\d+\n//m;
- $e =~ s/^\tld 0,16\(1\)\n//m;
- $e =~ s/^\tmtlr 0\n//m;
-
- # callee-save registers
- $e =~ s/^\tld \d+,-?\d+\(1\)\n//gm;
- $e =~ s/^\tlfd \d+,-?\d+\(1\)\n//gm;
-
- # get rid of the debug junk along with the blr
- $e =~ s/^\tblr\n\t.long .*\n\t.byte .*\n//m;
-
- # incase we missed it with the last one get the blr alone
- $e =~ s/^\tblr\n//m;
- } else {
- print STDERR "$Pgm: unknown epilogue mangling? $TargetPlatform\n";
- }
-
- print STDERR "WARNING: Epilogue junk?: $e\n" if $e =~ /^\t\s*[^\.\s\n]/m;
-
- # glue together what's left
- $c .= $e . $etail;
- }
- $c =~ s/\n\t\n/\n/m; # junk blank line
- }
- else {
- if ($TargetPlatform =~ /^ia64-/m) {
- # On IA64, remove an .endp directive even if no epilogue was found.
- # Code optimizations may have removed the "--- END ---" token.
- $c =~ s/^\t\.endp [a-zA-Z0-9_.]+#\n//m;
- }
- }
-
- # On SPARCs, we don't do --- BEGIN/END ---, we just
- # toss the register-windowing save/restore/ret* instructions
- # directly unless they've been generated by function definitions in header
- # files on Solaris:
- if ( $TargetPlatform =~ /^sparc-/m ) {
- if ( ! ( $TargetPlatform =~ /solaris2$/m && $chkcat[$i] eq 'unknown' )) {
- $c =~ s/^\t(save.*|restore.*|ret|retl)\n//gm;
- }
- # throw away PROLOGUE comments
- $c =~ s/^\t!#PROLOGUE# 0\n\t!#PROLOGUE# 1\n//m;
- }
-
- # On Alphas, the prologue mangling is done a little later (below)
-
- # toss all calls to __DISCARD__
- $c =~ s/^\t(call|jbsr|jal)\s+${T_US}__DISCARD__\n//gom;
- $c =~ s/^\tjsr\s+\$26\s*,\s*${T_US}__DISCARD__\n//gom if $TargetPlatform =~ /^alpha-/m;
- $c =~ s/^\tbl\s+L___DISCARD__\$stub\n//gom if $TargetPlatform =~ /^powerpc-apple-darwin.*/m;
- $c =~ s/^\tbl\s+__DISCARD__(\@plt)?\n//gom if $TargetPlatform =~ /^powerpc-.*-linux/m;
- $c =~ s/^\tbl\s+\.__DISCARD__\n\s+nop\n//gom if $TargetPlatform =~ /^powerpc64-.*-linux/m;
- $c =~ s/^\tcall\s+L___DISCARD__\$stub\n//gom if $TargetPlatform =~ /i386-apple-darwin.*/m;
-
- # IA64: fix register allocation; mangle tailcalls into jumps
- if ($TargetPlatform =~ /^ia64-/m) {
- ia64_rename_registers($ia64_locnum, $ia64_outnum) if (defined($ia64_locnum));
- ia64_mangle_tailcalls();
- }
-
- # MIPS: that may leave some gratuitous asm macros around
- # (no harm done; but we get rid of them to be tidier)
- $c =~ s/^\t\.set\tnoreorder\n\t\.set\tnomacro\n\taddu\t(\S+)\n\t\.set\tmacro\n\t\.set\treorder\n/\taddu\t$1\n/m
- if $TargetPlatform =~ /^mips-/m;
-
- # toss stack adjustment after DoSparks
- $c =~ s/^(\tjbsr _DoSparks\n)\taddqw #8,sp/$1/gm
- if $TargetPlatform =~ /^m68k-/m; # this looks old...
-
- if ( $TargetPlatform =~ /^alpha-/m &&
- ! $magic_rdata_seen &&
- $c =~ /^\s*\.rdata\n\t\.quad 0\n\t\.align \d\n/m ) {
- $c =~ s/^\s*\.rdata\n\t\.quad 0\n\t\.align (\d)\n/\.rdata\n\t\.align $1\n/m;
- $magic_rdata_seen = 1;
- }
-
- # pick some end-things and move them to the next chunk
-
- # pin a funny end-thing on (for easier matching):
- $c .= 'FUNNY#END#THING';
-
- while ( $c =~ /${T_MOVE_DIRVS}FUNNY#END#THING/om ) {
-
- $to_move = $1;
-
- # on x86 we try not to copy any directives into a literal
- # chunk, rather we keep looking for the next real chunk. This
- # is because we get things like
- #
- # .globl blah_closure
- # .LC32
- # .string "..."
- # blah_closure:
- # ...
- #
- if ( $TargetPlatform =~ /^(i386|sparc|powerpc)/m && $to_move =~ /${T_COPY_DIRVS}/m ) {
- $j = $i + 1;
- while ( $j < $numchks && $chk[$j] =~ /$T_CONST_LBL/m) {
- $j++;
- }
- if ( $j < $numchks ) {
- $chk[$j] = $to_move . $chk[$j];
- }
- }
-
- elsif ( ( $i < ($numchks - 1)
- && ( $to_move =~ /${T_COPY_DIRVS}/m
- || ( $TargetPlatform =~ /^hppa/m
- && $to_move =~ /align/m
- && $chkcat[$i+1] eq 'literal')
- )
- )
- || ($to_move =~ /^[ \t]*\.section[ \t]+\.note\.GNU-stack,/m)
- ) {
- $chk[$i + 1] = $to_move . $chk[$i + 1];
- # otherwise they're tossed
- }
-
- $c =~ s/${T_MOVE_DIRVS}FUNNY#END#THING/FUNNY#END#THING/om;
- }
-
- if ( $TargetPlatform =~ /^alpha-/m && $c =~ /^\t\.ent\s+(\S+)/m ) {
- $ent = $1;
- # toss all prologue stuff, except for loading gp, and the ..ng address
- unless ($c =~ /\.ent.*\n\$.*\.\.ng:/m) {
- if (($p, $r) = split(/^\t\.prologue/m, $c)) {
- # use vars '$junk'; # Unused?
- if (($keep, $junk) = split(/\.\.ng:/m, $p)) {
- $keep =~ s/^\t\.frame.*\n/\t.frame \$30,0,\$26,0\n/m;
- $keep =~ s/^\t\.(mask|fmask).*\n//gm;
- $c = $keep . "..ng:\n";
- } else {
- print STDERR "malformed code block ($ent)?\n"
- }
- }
- $c .= "\t.prologue" . $r;
- }
- }
-
- $c =~ s/FUNNY#END#THING//m;
-
-# print STDERR "\nCHK $i (AFTER) (",$chkcat[$i],"):\n", $c;
-
- $chk[$i] = $c; # update w/ convenience copy
- }
-
- # open CHUNKS, ">/tmp/chunks2" or die "Cannot open /tmp/chunks2: $!\n";
- # for (my $i = 0; $i < @chk; ++$i) { print CHUNKS "======= $i =======\n", $chk[$i] }
- # close CHUNKS;
-
- if ( $TargetPlatform =~ /^alpha-/m ) {
- # print out the header stuff first
- $chk[0] =~ s/^(\t\.file.*)"(ghc\d+\.c)"/$1"$ifile_root.hc"/m;
- print OUTASM $chk[0];
-
- } elsif ( $TargetPlatform =~ /^hppa/m ) {
- print OUTASM $chk[0];
-
- } elsif ( $TargetPlatform =~ /^mips-/m ) {
- $chk[0] = "\t\.file\t1 \"$ifile_root.hc\"\n" . $chk[0];
-
- # get rid of horrible "<dollar>Revision: .*$" strings
- local(@lines0) = split(/\n/m, $chk[0]);
- local($z) = 0;
- while ( $z <= $#lines0 ) {
- if ( $lines0[$z] =~ /^\t\.byte\t0x24,0x52,0x65,0x76,0x69,0x73,0x69,0x6f$/m ) {
- undef($lines0[$z]);
- $z++;
- while ( $z <= $#lines0 ) {
- undef($lines0[$z]);
- last if $lines0[$z] =~ /[,\t]0x0$/m;
- $z++;
- }
- }
- $z++;
- }
- $chk[0] = join("\n", @lines0);
- $chk[0] =~ s/\n\n+/\n/m;
- print OUTASM $chk[0];
- }
-
- # print out all the literal strings next
- for ($i = 0; $i < $numchks; $i++) {
- if ( $chkcat[$i] eq 'literal' ) {
-
- # HACK: try to detect 16-byte constants and align them
- # on a 16-byte boundary. x86_64 sometimes needs 128-bit
- # aligned constants, and so does Darwin/x86.
- if ( $TargetPlatform =~ /^x86_64/m
- || $TargetPlatform =~ /^i386-apple-darwin/m ) {
- $z = $chk[$i];
- if ($z =~ /(\.long.*\n.*\.long.*\n.*\.long.*\n.*\.long|\.quad.*\n.*\.quad)/m) {
- print OUTASM $T_HDR_literal16;
- } else {
- print OUTASM $T_HDR_literal;
- }
- } else {
- print OUTASM $T_HDR_literal;
- }
-
- print OUTASM $chk[$i];
- print OUTASM "; end literal\n" if $TargetPlatform =~ /^hppa/m; # for the splitter
-
- $chkcat[$i] = 'DONE ALREADY';
- }
- }
-
- # on the HPPA, print out all the bss next
- if ( $TargetPlatform =~ /^hppa/m ) {
- for ($i = 1; $i < $numchks; $i++) {
- if ( $chkcat[$i] eq 'bss' ) {
- print OUTASM "\t.SPACE \$PRIVATE\$\n\t.SUBSPA \$BSS\$\n\t.align 4\n";
- print OUTASM $chk[$i];
-
- $chkcat[$i] = 'DONE ALREADY';
- }
- }
- }
-
- # $numchks + 1 as we have the extra one for .note.GNU-stack
- for ($i = $FIRST_MANGLABLE; $i < $numchks + 1; $i++) {
-# print STDERR "$i: cat $chkcat[$i], symb $chksymb[$i]\n";
-
- next if $chkcat[$i] eq 'DONE ALREADY';
-
- if ( $chkcat[$i] eq 'misc' || $chkcat[$i] eq 'unknown' ) {
- if ($chk[$i] ne '') {
- print OUTASM $T_HDR_misc;
- &print_doctored($chk[$i], 0);
- }
-
- } elsif ( $chkcat[$i] eq 'verbatim' ) {
- print OUTASM $chk[$i];
-
- } elsif ( $chkcat[$i] eq 'toss' ) {
- print STDERR "*** NB: TOSSING code for $chksymb[$i] !!! ***\n";
-
- } elsif ( $chkcat[$i] eq 'data' ) {
- if ($chk[$i] ne '') {
- print OUTASM $T_HDR_data;
- print OUTASM $chk[$i];
- }
-
- } elsif ( $chkcat[$i] eq 'splitmarker' ) {
- # we can just re-constitute this one...
- # NB: we emit _three_ underscores no matter what,
- # so ghc-split doesn't have to care.
- print OUTASM "___stg_split_marker",$chksymb[$i],"${T_POST_LBL}\n";
-
- } elsif ( $chkcat[$i] eq 'closure'
- || $chkcat[$i] eq 'srt'
- || $chkcat[$i] eq 'infotbl'
- || $chkcat[$i] eq 'entry') { # do them in that order
- $symb = $chksymb[$i];
-
- # CLOSURE
- if ( defined($closurechk{$symb}) ) {
- print OUTASM $T_HDR_closure;
- print OUTASM $chk[$closurechk{$symb}];
- $chkcat[$closurechk{$symb}] = 'DONE ALREADY';
- }
-
- # SRT
- if ( defined($srtchk{$symb}) ) {
- print OUTASM $T_HDR_relrodata;
- print OUTASM $chk[$srtchk{$symb}];
- $chkcat[$srtchk{$symb}] = 'DONE ALREADY';
- }
-
- # INFO TABLE
- if ( defined($infochk{$symb}) ) {
-
- print OUTASM $T_HDR_info;
- print OUTASM &rev_tbl($symb, $chk[$infochk{$symb}], 1);
-
- # entry code will be put here!
-
- $chkcat[$infochk{$symb}] = 'DONE ALREADY';
- }
-
- # ENTRY POINT
- if ( defined($entrychk{$symb}) ) {
-
- $c = $chk[$entrychk{$symb}];
-
- # If this is an entry point with an info table,
- # eliminate the entry symbol and all directives involving it.
- if (defined($infochk{$symb}) && $TargetPlatform !~ /^ia64-/m
- && $TABLES_NEXT_TO_CODE eq "YES") {
- @o = ();
- foreach $l (split(/\n/m,$c)) {
- next if $l =~ /^.*$symb_(entry|ret)${T_POST_LBL}/m;
-
- # If we have .type/.size direrctives involving foo_entry,
- # then make them refer to foo_info instead. The information
- # in these directives is used by the cachegrind annotator,
- # so it is worthwhile keeping.
- if ($l =~ /^\s*\.(type|size).*$symb_(entry|ret)/m) {
- $l =~ s/$symb(_entry|_ret)/${symb}_info/gm;
- push(@o,$l);
- next;
- }
- next if $l =~ /^\s*\..*$symb.*\n?/m;
- push(@o,$l);
- }
- $c = join("\n",@o) . "\n";
- }
-
- print OUTASM $T_HDR_entry;
-
- &print_doctored($c, 1); # NB: the 1!!!
-
- $chkcat[$entrychk{$symb}] = 'DONE ALREADY';
- }
-
- } elsif ( $chkcat[$i] eq 'vector' ) {
- $symb = $chksymb[$i];
-
- # VECTOR TABLE
- if ( defined($vectorchk{$symb}) ) {
- print OUTASM $T_HDR_vector;
- print OUTASM &rev_tbl($symb, $chk[$vectorchk{$symb}], 0);
-
- # direct return code will be put here!
- $chkcat[$vectorchk{$symb}] = 'DONE ALREADY';
-
- } elsif ( $TargetPlatform =~ /^alpha-/m ) {
- # Alphas: the commented nop is for the splitter, to ensure
- # that no module ends with a label as the very last
- # thing. (The linker will adjust the label to point
- # to the first code word of the next module linked in,
- # even if alignment constraints cause the label to move!)
-
- print OUTASM "\t# nop\n";
- }
-
- } elsif ( $chkcat[$i] eq 'rodata' ) {
- print OUTASM $T_HDR_rodata;
- print OUTASM $chk[$i];
- $chkcat[$i] = 'DONE ALREADY';
- } elsif ( $chkcat[$i] eq 'relrodata' ) {
- print OUTASM $T_HDR_relrodata;
- print OUTASM $chk[$i];
- $chkcat[$i] = 'DONE ALREADY';
- } elsif ( $chkcat[$i] eq 'toc' ) {
- # silly optimisation to print tocs, since they come in groups...
- print OUTASM $T_HDR_toc;
- local($j) = $i;
- while ($chkcat[$j] eq 'toc')
- { if ( $chk[$j] !~ /\.tc UpdatePAP\[TC\]/m # not needed: always turned into a jump.
- )
- {
- print OUTASM $chk[$j];
- }
- $chkcat[$j] = 'DONE ALREADY';
- $j++;
- }
-
- } elsif ( $TargetPlatform =~ /^.*-apple-darwin.*/m && $chkcat[$i] eq 'dyld' ) {
- # apple-darwin: dynamic linker stubs
- if($chk[$i] !~ /\.indirect_symbol ___DISCARD__/m)
- { # print them out unchanged, but remove the stubs for __DISCARD__
- print OUTASM $chk[$i];
- }
- } elsif ( $TargetPlatform =~ /^i386-apple-darwin.*/m && $chkcat[$i] eq 'get_pc_thunk' ) {
- # i386-apple-darwin: __i686.get_pc_thunk.[abcd]x
- print OUTASM ".section __TEXT,__textcoal_nt,coalesced,no_toc\n";
- print OUTASM $chk[$i];
- } else {
- &tidy_up_and_die(1,"$Pgm: unknown chkcat (ghc-asm: $TargetPlatform)\n$chkcat[$i]\n$chk[$i]\n");
- }
- }
-
- print OUTASM $EXTERN_DECLS if $TargetPlatform =~ /^mips-/m;
-
- # finished
- close(OUTASM) || &tidy_up_and_die(1,"Failed writing to $out_asmf\n");
- close(INASM) || &tidy_up_and_die(1,"Failed reading from $in_asmf\n");
-}
-\end{code}
-
-On IA64, tail calls are converted to branches at this point. The mangler
-searches for function calls immediately followed by a '--- TAILCALL ---'
-token. Since the compiler can put various combinations of labels, bundling
-directives, nop instructions, stops, and a move of the return value
-between the branch and the tail call, proper matching of the tail call
-gets a little hairy. This subroutine does the mangling.
-
-Here is an example of a tail call before mangling:
-
-\begin{verbatim}
- br.call.sptk.many b0 = b6
-.L211
- ;;
- .mmi
- mov r1 = r32
- ;;
- nop.m 0
- nop.i 0
- ;;
- --- TAILCALL --
- ;;
-.L123
-\end{verbatim}
-
-\begin{code}
-sub ia64_mangle_tailcalls {
- # Function input and output are in $c
-
- # Construct the tailcall-mangling expression the first time this function
- # is called.
- if (!defined($IA64_MATCH_TAILCALL)) {
- # One-line pattern matching constructs. None of these
- # should bind references; all parenthesized terms
- # should be (?:) terms.
- my $stop = q/(?:\t;;\n)/;
- my $bundle = q/(?:\t\.(?:mii|mib|mmi|mmb|mfi|mfb|mbb|bbb)\n)/;
- my $nop = q/(?:\tnop(?:\.[mifb])?\s+\d+\n)/;
- my $movgp = q/(?:\tmov r1 = r\d+\n)/;
- my $postbr = q/(?:\tbr \.L\d+\n)/;
-
- my $noeffect = "(?:$stop$bundle?|$nop)*";
- my $postbundle = "(?:$bundle?$nop?$nop?$postbr)?";
-
- # Important parts of the pattern match. The branch target
- # and subsequent jump label are bound to $1 and $2
- # respectively. Sometimes there is no label.
- my $callbr = q/^\tbr\.call\.sptk\.many b0 = (.*)\n/;
- my $label = q/(?:^\.L([0-9]*):\n)/;
- my $tailcall = q/\t--- TAILCALL ---\n/;
-
- $IA64_MATCH_TAILCALL =
- $callbr . $label . '?' . $noeffect . $movgp . '?' . $noeffect .
- $tailcall . $stop . '?' . '(?:' . $postbundle . ')?';
- }
-
- # Find and mangle tailcalls
- while ($c =~ s/$IA64_MATCH_TAILCALL/\tbr\.few $1\n/om) {
- # Eek, the gcc optimiser is getting smarter... if we see a jump to the
- # --- TAILCALL --- marker then we reapply the substitution at the source sites
- $c =~ s/^\tbr \.L$2\n/\t--- TAILCALL ---\n/gm if ($2);
- }
-
- # Verify that all instances of TAILCALL were processed
- if ($c =~ /^\t--- TAILCALL ---\n/m) {
- die "Unmangled TAILCALL tokens remain after mangling"
- }
-}
-\end{code}
-
-The number of registers allocated on the IA64 register stack is set
-upon entry to the runtime with an `alloc' instruction at the entry
-point of \verb+StgRun()+. Gcc uses its own `alloc' to allocate
-however many registers it likes in each function. When we discard
-gcc's alloc, we have to reconcile its register assignment with what
-the STG uses.
-
-There are three stack areas: fixed registers, input/local registers,
-and output registers. We move the output registers to the output
-register space and leave the other registers where they are.
-
-\begin{code}
-sub ia64_rename_registers() {
- # The text to be mangled is in $c
- # Find number of registers in each stack area
- my ($loc, $out) = @_;
- my $cout;
- my $first_out_reg;
- my $regnum;
- my $fragment;
-
- # These are the register numbers used in the STG runtime
- my $STG_FIRST_OUT_REG = 32 + 34;
- my $STG_LAST_OUT_REG = $STG_FIRST_OUT_REG + 7;
-
- $first_out_reg = 32 + $loc;
-
- if ($first_out_reg > $STG_FIRST_OUT_REG) {
- die "Too many local registers allocated by gcc";
- }
-
- # Split the string into fragments containing one register name each.
- # Rename the register in each fragment and concatenate.
- $cout = "";
- foreach $fragment (split(/(?=r\d+[^a-zA-Z0-9_.])/sm, $c)) {
- if ($fragment =~ /^r(\d+)((?:[^a-zA-Z0-9_.].*)?)$/sm) {
- $regnum = $1;
-
- if ($regnum < $first_out_reg) {
- # This is a local or fixed register
-
- # Local registers 32 and 33 (r64 and r65) are
- # used to hold saved state; they shouldn't be touched
- if ($regnum == 64 || $regnum == 65) {
- die "Reserved register $regnum is in use";
- }
- }
- else {
- # This is an output register
- $regnum = $regnum - $first_out_reg + $STG_FIRST_OUT_REG;
- if ($regnum > $STG_LAST_OUT_REG) {
- die "Register number ($regnum) is out of expected range";
- }
- }
-
- # Update this fragment
- $fragment = "r" . $regnum . $2;
- }
- $cout .= $fragment;
- }
-
- $c = $cout;
-}
-
-\end{code}
-
-\begin{code}
-sub hppa_mash_prologue { # OK, epilogue, too
- local($_) = @_;
-
- # toss all prologue stuff
- s/^\s+\.ENTRY[^\0]*--- BEGIN ---/\t.ENTRY/m;
-
- # Lie about our .CALLINFO
- s/^\s+\.CALLINFO.*$/\t.CALLINFO NO_CALLS,NO_UNWIND/m;
-
- # Get rid of P'
-
- s/LP'/L'/gm;
- s/RP'/R'/gm;
-
- # toss all epilogue stuff
- s/^\s+--- END ---[^\0]*\.EXIT/\t.EXIT/m;
-
- # Sorry; we moved the _info stuff to the code segment.
- s/_info,DATA/_info,CODE/gm;
-
- return($_);
-}
-\end{code}
-
-\begin{code}
-sub print_doctored {
- local($_, $need_fallthru_patch) = @_;
-
- if ( $TargetPlatform =~ /^x86_64-/m ) {
- # Catch things like
- #
- # movq -4(%ebp), %rax
- # jmp *%rax
- #
- # and optimise:
- #
- s/^\tmovq\s+(-?\d*\(\%r(bx|bp|13)\)),\s*(\%r(ax|cx|dx|10|11))\n\tjmp\s+\*\3/\tjmp\t\*$1/gm;
- s/^\tmovl\s+\$${T_US}(.*),\s*(\%e(ax|cx|si|di))\n\tjmp\s+\*\%r\3/\tjmp\t$T_US$1/gm;
- }
-
- if ( $TargetPlatform !~ /^i386-/m
- || ! /^\t[a-z]/m # no instructions in here, apparently
- || /^${T_US}__stginit_[A-Za-z0-9_]+${T_POST_LBL}/m) {
- print OUTASM $_;
- return;
- }
-
- # OK, must do some x86 **HACKING**
-
- local($entry_patch) = '';
- local($exit_patch) = '';
-
- # gotta watch out for weird instructions that
- # invisibly smash various regs:
- # rep* %ecx used for counting
- # scas* %edi used for destination index
- # cmps* %e[sd]i used for indices
- # loop* %ecx used for counting
- #
- # SIGH.
-
- # We cater for:
- # * use of STG reg [ nn(%ebx) ] where no machine reg avail
- #
- # * GCC used an "STG reg" for its own purposes
- #
- # * some secret uses of machine reg, requiring STG reg
- # to be saved/restored
-
- # The most dangerous "GCC uses" of an "STG reg" are when
- # the reg holds the target of a jmp -- it's tricky to
- # insert the patch-up code before we get to the target!
- # So here we change the jmps:
-
- # --------------------------------------------------------
- # it can happen that we have jumps of the form...
- # jmp *<something involving %esp>
- # or
- # jmp <something involving another naughty register...>
- #
- # a reasonably-common case is:
- #
- # movl $_blah,<bad-reg>
- # jmp *<bad-reg>
- #
- s/^\tmovl\s+\$${T_US}(.*),\s*(\%e[acd]x)\n\tjmp\s+\*\2/\tjmp $T_US$1/gm;
-
- # Catch things like
- #
- # movl -4(%ebx), %eax
- # jmp *%eax
- #
- # and optimise:
- #
- s/^\tmovl\s+(-?\d*\(\%e(bx|si)\)),\s*(\%e[acd]x)\n\tjmp\s+\*\3/\tjmp\t\*$1/gm;
-
- if ($StolenX86Regs <= 2 ) { # YURGH! spurious uses of esi?
- s/^\tmovl\s+(.*),\s*\%esi\n\tjmp\s+\*%esi\n/\tmovl $1,\%eax\n\tjmp \*\%eax\n/gm;
- s/^\tjmp\s+\*(.*\(.*\%esi.*\))\n/\tmovl $1,\%eax\n\tjmp \*\%eax\n/gm;
- s/^\tjmp\s+\*\%esi\n/\tmovl \%esi,\%eax\n\tjmp \*\%eax\n/gm;
- die "$Pgm: (mangler) still have jump involving \%esi!\n$_"
- if /(jmp|call)\s+.*\%esi/m;
- }
- if ($StolenX86Regs <= 3 ) { # spurious uses of edi?
- s/^\tmovl\s+(.*),\s*\%edi\n\tjmp\s+\*%edi\n/\tmovl $1,\%eax\n\tjmp \*\%eax\n/gm;
- s/^\tjmp\s+\*(.*\(.*\%edi.*\))\n/\tmovl $1,\%eax\n\tjmp \*\%eax\n/gm;
- s/^\tjmp\s+\*\%edi\n/\tmovl \%edi,\%eax\n\tjmp \*\%eax\n/gm;
- die "$Pgm: (mangler) still have jump involving \%edi!\n$_"
- if /(jmp|call)\s+.*\%edi/m;
- }
-
- # OK, now we can decide what our patch-up code is going to
- # be:
-
- # Offsets into register table - you'd better update these magic
- # numbers should you change its contents!
- # local($OFFSET_R1)=0; No offset for R1 in new RTS.
- local($OFFSET_Hp)=88;
-
- # Note funky ".=" stuff; we're *adding* to these _patch guys
- if ( $StolenX86Regs <= 2
- && ( /[^0-9]\(\%ebx\)/m || /\%esi/m || /^\tcmps/m ) ) { # R1 (esi)
- $entry_patch .= "\tmovl \%esi,(\%ebx)\n";
- $exit_patch .= "\tmovl (\%ebx),\%esi\n";
-
- # nothing for call_{entry,exit} because %esi is callee-save
- }
- if ( $StolenX86Regs <= 3
- && ( /${OFFSET_Hp}\(\%ebx\)/m || /\%edi/m || /^\t(scas|cmps)/m ) ) { # Hp (edi)
- $entry_patch .= "\tmovl \%edi,${OFFSET_Hp}(\%ebx)\n";
- $exit_patch .= "\tmovl ${OFFSET_Hp}(\%ebx),\%edi\n";
-
- # nothing for call_{entry,exit} because %edi is callee-save
- }
-
- # --------------------------------------------------------
- # next, here we go with non-%esp patching!
- #
- s/^(\t[a-z])/$entry_patch$1/m; # before first instruction
-
-# Before calling GC we must set up the exit condition before the call
-# and entry condition when we come back
-
- # fix _all_ non-local jumps:
-
- if ( $TargetPlatform =~ /^.*-apple-darwin.*/m ) {
- # On Darwin, we've got local-looking jumps that are
- # actually global (i.e. jumps to Lfoo$stub or via
- # Lfoo$non_lazy_ptr), so we fix those first.
- # In fact, we just fix everything that contains a dollar
- # because false positives don't hurt here.
-
- s/^(\tjmp\s+\*?L.*\$.*\n)/$exit_patch$1/gm;
- }
-
- s/^\tjmp\s+\*${T_X86_PRE_LLBL_PAT}/\tJMP___SL/gom;
- s/^\tjmp\s+${T_X86_PRE_LLBL_PAT}/\tJMP___L/gom;
-
- s/^(\tjmp\s+.*\n)/$exit_patch$1/gm; # here's the fix...
-
- s/^\tJMP___SL/\tjmp \*${T_X86_PRE_LLBL}/gom;
- s/^\tJMP___L/\tjmp ${T_X86_PRE_LLBL}/gom;
-
- if ($StolenX86Regs == 2 ) {
- die "ARGH! Jump uses \%esi or \%edi with -monly-2-regs:\n$_"
- if /^\t(jmp|call)\s+.*\%e(si|di)/m;
- } elsif ($StolenX86Regs == 3 ) {
- die "ARGH! Jump uses \%edi with -monly-3-regs:\n$_"
- if /^\t(jmp|call)\s+.*\%edi/m;
- }
-
- # --------------------------------------------------------
- # that's it -- print it
- #
- #die "Funny jumps?\n$_" if /${T_X86_BADJMP}/o; # paranoia
-
- print OUTASM $_;
-
- if ( $need_fallthru_patch ) { # exit patch for end of slow entry code
- print OUTASM $exit_patch;
- # ToDo: make it not print if there is a "jmp" at the end
- }
-}
-\end{code}
-
-\begin{code}
-sub init_FUNNY_THINGS {
- # use vars '%KNOWN_FUNNY_THING'; # Unused?
- %KNOWN_FUNNY_THING = (
- # example
- # "${T_US}stg_.*{T_POST_LBL}", 1,
- );
-}
-\end{code}
-
-The following table reversal is used for both info tables and return
-vectors. In both cases, we remove the first entry from the table,
-reverse the table, put the label at the end, and paste some code
-(that which is normally referred to by the first entry in the table)
-right after the table itself. (The code pasting is done elsewhere.)
-
-\begin{code}
-sub rev_tbl {
- # use vars '$discard1'; # Unused?
- local($symb, $tbl, $discard1) = @_;
-
- return ($tbl) if ($TargetPlatform =~ /^ia64-/m
- || $TABLES_NEXT_TO_CODE eq "NO");
-
- local($before) = '';
- local($label) = '';
- local(@imports) = (); # hppa only
- local(@words) = ();
- local($after) = '';
- local(@lines) = split(/\n/m, $tbl);
- local($i, $j);
-
- # Deal with the header...
- for ($i = 0; $i <= $#lines && $lines[$i] !~ /^\t?${T_DOT_WORD}\s+/om; $i++) {
- $label .= $lines[$i] . "\n",
- next if $lines[$i] =~ /^[A-Za-z0-9_]+_info${T_POST_LBL}$/om
- || $lines[$i] =~ /${T_DOT_GLOBAL}/om
- || $lines[$i] =~ /^${T_US}\S+_vtbl${T_POST_LBL}$/om;
-
- $before .= $lines[$i] . "\n"; # otherwise...
- }
-
- $infoname = $label;
- $infoname =~ s/(.|\n)*^([A-Za-z0-9_]+_info)${T_POST_LBL}$(.|\n)*/$2/m;
-
- # Grab the table data...
- if ( $TargetPlatform !~ /^hppa/m ) {
- for ( ; $i <= $#lines && $lines[$i] =~ /^\t?${T_DOT_WORD}\s+/om; $i++) {
- $line = $lines[$i];
- # Convert addresses of SRTs, slow entrypoints and large bitmaps
- # to offsets (relative to the info label),
- # in order to support position independent code.
- $line =~ s/$infoname/0/m
- || $line =~ s/([A-Za-z0-9_]+_srtd)$/$1 - $infoname/m
- || $line =~ s/([A-Za-z0-9_]+_srt(\+\d+)?)$/$1 - $infoname/m
- || $line =~ s/([A-Za-z0-9_]+_str)$/$1 - $infoname/m
- || $line =~ s/([A-Za-z0-9_]+_slow)$/$1 - $infoname/m
- || $line =~ s/([A-Za-z0-9_]+_btm)$/$1 - $infoname/m
- || $line =~ s/([A-Za-z0-9_]+_alt)$/$1 - $infoname/m
- || $line =~ s/([A-Za-z0-9_]+_dflt)$/$1 - $infoname/m
- || $line =~ s/([A-Za-z0-9_]+_ret)$/$1 - $infoname/m;
- push(@words, $line);
- }
- } else { # hppa weirdness
- for ( ; $i <= $#lines && $lines[$i] =~ /^\s+(${T_DOT_WORD}|\.IMPORT)/m; $i++) {
- # FIXME: the RTS now expects offsets instead of addresses
- # for all labels in info tables.
- if ($lines[$i] =~ /^\s+\.IMPORT/m) {
- push(@imports, $lines[$i]);
- } else {
- # We don't use HP's ``function pointers''
- # We just use labels in code space, like normal people
- $lines[$i] =~ s/P%//m;
- push(@words, $lines[$i]);
- }
- }
- }
-
- # Now throw away any initial zero word from the table. This is a hack
- # that lets us reduce the size of info tables when the SRT field is not
- # needed: see comments StgFunInfoTable in InfoTables.h.
- #
- # The .zero business is for Linux/ELF.
- # The .skip business is for Sparc/Solaris/ELF.
- # The .blockz business is for HPPA.
-# if ($discard1) {
-# if ($words[0] =~ /^\t?(${T_DOT_WORD}\s+0|\.zero\s+4|\.skip\s+4|\.blockz\s+4)/) {
-# shift(@words);
-# }
-# }
-
- for (; $i <= $#lines; $i++) {
- $after .= $lines[$i] . "\n";
- }
-
- # Alphas: If we have anonymous text (not part of a procedure), the
- # linker may complain about missing exception information. Bleh.
- # To suppress this, we place a .ent/.end pair around the code.
- # At the same time, we have to be careful and not enclose any leading
- # .file/.loc directives.
- if ( $TargetPlatform =~ /^alpha-/m && $label =~ /^([A-Za-z0-9_]+):$/m) {
- local ($ident) = $1;
- $before =~ s/^((\s*\.(file|loc)\s+[^\n]*\n)*)/$1\t.ent $ident\n/m;
- $after .= "\t.end $ident\n";
- }
-
- # Alphas: The heroic Simon Marlow found a bug in the Digital UNIX
- # assembler (!) wherein .quad constants inside .text sections are
- # first narrowed to 32 bits then sign-extended back to 64 bits.
- # This obviously screws up our 64-bit bitmaps, so we work around
- # the bug by replacing .quad with .align 3 + .long + .long [ccshan]
- if ( $TargetPlatform =~ /^alpha-/m ) {
- foreach (@words) {
- if (/^\s*\.quad\s+([-+0-9].*\S)\s*$/m && length $1 >= 10) {
- local ($number) = $1;
- if ($number =~ /^([-+])?(0x?)?([0-9]+)$/m) {
- local ($sign, $base, $digits) = ($1, $2, $3);
- $base = (10, 8, 16)[length $base];
- local ($hi, $lo) = (0, 0);
- foreach $i (split(//, $digits)) {
- $j = $lo * $base + $i;
- $lo = $j % 4294967296;
- $hi = $hi * $base + ($j - $lo) / 4294967296;
- }
- ($hi, $lo) = (4294967295 - $hi, 4294967296 - $lo)
- if $sign eq "-";
- $_ = "\t.align 3\n\t.long $lo\n\t.long $hi\n";
- # printf STDERR "TURNING %s into 0x %08x %08x\n", $number, $hi, $lo;
- } else {
- print STDERR "Cannot handle \".quad $number\" in info table\n";
- exit 1;
- }
- }
- }
- }
-
- if ( $TargetPlatform =~ /x86_64-apple-darwin/m ) {
- # Tack a label to the front of the info table, too.
- # For now, this just serves to work around a crash in Apple's new
- # 64-bit linker (it seems to assume that there is no data before the
- # first label in a section).
-
- # The plan for the future is to do this on all Darwin platforms, and
- # to add a reference to this label after the entry code, just as the
- # NCG does, so we can enable dead-code-stripping in the linker without
- # losing our info tables. (Hence the name _dsp, for dead-strip preventer)
-
- $before .= "\n${infoname}_dsp:\n";
- }
-
- $tbl = $before
- . (($TargetPlatform !~ /^hppa/m) ? '' : join("\n", @imports) . "\n")
- . join("\n", @words) . "\n"
- . $label . $after;
-
-# print STDERR "before=$before\n";
-# print STDERR "label=$label\n";
-# print STDERR "words=",(reverse @words),"\n";
-# print STDERR "after=$after\n";
-
- $tbl;
-}
-\end{code}
-
-The HP is a major nuisance. The threaded code mangler moved info
-tables from data space to code space, but unthreaded code in the RTS
-still has references to info tables in data space. Since the HP
-linker is very precise about where symbols live, we need to patch the
-references in the unthreaded RTS as well.
-
-\begin{code}
-sub mini_mangle_asm_hppa {
- local($in_asmf, $out_asmf) = @_;
-
- open(INASM, "< $in_asmf")
- || &tidy_up_and_die(1,"$Pgm: failed to open `$in_asmf' (to read)\n");
- open(OUTASM,"> $out_asmf")
- || &tidy_up_and_die(1,"$Pgm: failed to open `$out_asmf' (to write)\n");
-
- while (<INASM>) {
- s/_info,DATA/_info,CODE/m; # Move _info references to code space
- s/P%_PR/_PR/m;
- print OUTASM;
- }
-
- # finished:
- close(OUTASM) || &tidy_up_and_die(1,"Failed writing to $out_asmf\n");
- close(INASM) || &tidy_up_and_die(1,"Failed reading from $in_asmf\n");
-}
-
-\end{code}
-
-\begin{code}
-sub tidy_up_and_die {
- local($return_val, $msg) = @_;
- print STDERR $msg;
- exit (($return_val == 0) ? 0 : 1);
-}
-\end{code}
+++ /dev/null
-# -----------------------------------------------------------------------------
-#
-# (c) 2009 The University of Glasgow
-#
-# This file is part of the GHC build system.
-#
-# To understand how the build system works and how to modify it, see
-# http://hackage.haskell.org/trac/ghc/wiki/Building/Architecture
-# http://hackage.haskell.org/trac/ghc/wiki/Building/Modifying
-#
-# -----------------------------------------------------------------------------
-
-driver/mangler_PERL_SRC = ghc-asm.lprl
-driver/mangler_dist_PROG = $(GHC_MANGLER_PGM)
-driver/mangler_dist_TOPDIR = YES
-driver/mangler_dist_INSTALL_IN = $(DESTDIR)$(topdir)
-
-$(eval $(call build-perl,driver/mangler,dist))
-
# -----------------------------------------------------------------------------
# Building dependencies
+include rules/dependencies.mk
include rules/build-dependencies.mk
include rules/include-dependencies.mk
ifneq "$(GhcUnregisterised)" "YES"
BUILD_DIRS += \
- $(GHC_MANGLER_DIR) \
$(GHC_SPLIT_DIR)
endif
$(includes_H_PLATFORM) \
$(includes_H_FILES) \
includes/ghcconfig.h \
- includes/rts/Config.h \
$(INSTALL_HEADERS) \
$(INSTALL_LIBEXECS) \
$(INSTALL_LIBEXEC_SCRIPTS) \
macros <- readIORef macros_ref
let{ (str, cmds) = case str' of
':' : rest -> (rest, builtin_commands)
- _ -> (str', macros ++ builtin_commands) }
+ _ -> (str', builtin_commands ++ macros) }
-- look for exact match first, then the first prefix match
+ -- We consider builtin commands first: since new macros are appended
+ -- on the *end* of the macros list, this is consistent with the view
+ -- that things defined earlier should take precedence. See also #3858
return $ case [ c | c <- cmds, str == cmdName c ] of
c:_ -> Just c
[] -> case [ c | c@(s,_,_) <- cmds, str `isPrefixOf` s ] of
haskellish (f,Nothing) =
looksLikeModuleName f || isHaskellSrcFilename f || '.' `notElem` f
haskellish (_,Just phase) =
- phase `notElem` [As, Cc, CmmCpp, Cmm, StopLn]
+ phase `notElem` [As, Cc, Cobjc, CmmCpp, Cmm, StopLn]
hsc_env <- GHC.getSession
# The GHC programs need to depend on all the helper programs they might call
ifeq "$(GhcUnregisterised)" "NO"
-$(GHC_STAGE1) : $(MANGLER) $(SPLIT)
-$(GHC_STAGE2) : $(MANGLER) $(SPLIT)
-$(GHC_STAGE3) : $(MANGLER) $(SPLIT)
+$(GHC_STAGE1) : $(SPLIT)
+$(GHC_STAGE2) : $(SPLIT)
+$(GHC_STAGE3) : $(SPLIT)
endif
$(GHC_STAGE1) : $(INPLACE_LIB)/extra-gcc-opts
RtsFlags.GcFlags.heapSizeSuggestion = 6*1024*1024 / BLOCK_SIZE;
RtsFlags.GcFlags.maxStkSize = 512*1024*1024 / sizeof(W_);
RtsFlags.GcFlags.giveStats = COLLECT_GC_STATS;
- RtsFlags.GcFlags.statsFile = stderr;
// See #3408: the default idle GC time of 0.3s is too short on
// Windows where we receive console events once per second or so.
+++ /dev/null
-#ifndef MAKING_GHC_BUILD_SYSTEM_DEPENDENCIES
-#warning RtsFlags.h is DEPRECATED; please just #include "Rts.h"
-#endif
-
-#include "Rts.h"
#ifndef RTSOPTS_H
#define RTSOPTS_H
-typedef enum {rtsOptsNone, rtsOptsSafeOnly, rtsOptsAll} rtsOptsEnabledEnum;
+typedef enum {
+ RtsOptsNone, // +RTS causes an error
+ RtsOptsSafeOnly, // safe RTS options allowed; others cause an error
+ RtsOptsAll // all RTS options allowed
+ } RtsOptsEnabledEnum;
-extern const rtsOptsEnabledEnum rtsOptsEnabled;
+extern const RtsOptsEnabledEnum rtsOptsEnabled;
#endif /* RTSOPTS_H */
includes_H_SUBDIRS += stg
includes_H_FILES := $(wildcard $(patsubst %,includes/%/*.h,$(includes_H_SUBDIRS)))
+# This isn't necessary, but it makes the paths look a little prettier
+includes_H_FILES := $(subst /./,/,$(includes_H_FILES))
#
# Options
extern RTS_FLAGS RtsFlags;
#endif
-/* Routines that operate-on/to-do-with RTS flags: */
-
-void initRtsFlagsDefaults(void);
-void setupRtsFlags(int *argc, char *argv[], int *rts_argc, char *rts_argv[]);
-void setProgName(char *argv[]);
-
-
/*
* The printf formats are here, so we are less likely to make
* overly-long filenames (with disastrous results). No more than 128
typedef struct _HpcModuleInfo {
char *modName; // name of module
StgWord32 tickCount; // number of ticks
- StgWord32 tickOffset; // offset into a single large .tix Array
- StgWord32 hashNo; // Hash number for this module's mix info
+ StgWord32 hashNo; // Hash number for this module's mix info
StgWord64 *tixArr; // tix Array; local for this module
+ rtsBool from_file; // data was read from the .tix file
struct _HpcModuleInfo *next;
} HpcModuleInfo;
-int hs_hpc_module (char *modName,
- StgWord32 modCount,
- StgWord32 modHashNo,
- StgWord64 *tixArr);
+void hs_hpc_module (char *modName,
+ StgWord32 modCount,
+ StgWord32 modHashNo,
+ StgWord64 *tixArr);
HpcModuleInfo * hs_hpc_rootModule (void);
#if defined(THREADED_RTS) /* to the end */
-# if defined(HAVE_PTHREAD_H) && !defined(WANT_NATIVE_WIN32_THREADS)
+#if defined(HAVE_PTHREAD_H) && !defined(mingw32_HOST_OS)
#if CMINUSMINUS
RTS_FUN_DECL(stg_newMutVarzh);
RTS_FUN_DECL(stg_atomicModifyMutVarzh);
+RTS_FUN_DECL(stg_casMutVarzh);
RTS_FUN_DECL(stg_isEmptyMVarzh);
RTS_FUN_DECL(stg_newMVarzh);
return old;
}
-STATIC_INLINE StgWord
+EXTERN_INLINE StgWord cas(StgVolatilePtr p, StgWord o, StgWord n);
+EXTERN_INLINE StgWord
cas(StgVolatilePtr p, StgWord o, StgWord n)
{
StgWord result;
-Subproject commit ec77c2ce0ef81e7bfee1839ddae6326f69a896ec
+Subproject commit f643d954e30d5ac635d3c0ff41ad40401fbd5e92
put (stability ipi)
put (homepage ipi)
put (pkgUrl ipi)
+ put (synopsis ipi)
put (description ipi)
put (category ipi)
put (exposed ipi)
stability <- get
homepage <- get
pkgUrl <- get
+ synopsis <- get
description <- get
category <- get
exposed <- get
case $* in
--inplace)
HADDOCK=../inplace/bin/haddock
- for LIB in `grep '^libraries/[^ ]\+ \+- \+[^ ]\+ \+[^ ]\+ \+[^ ]\+' ../packages | sed -e 's#libraries/##' -e 's/ .*//'`
+ for LIB in `grep '^libraries/[^ ]* *- ' ../packages | sed -e 's#libraries/##' -e 's/ .*//'`
do
HADDOCK_FILE="$LIB/dist-install/doc/html/$LIB/$LIB.haddock"
if [ -f "$HADDOCK_FILE" ]
GhcStage2HcOpts=-O2
GhcStage3HcOpts=-O2
+# These options modify whether or not a built compiler for a bootstrap
+# stage defaults to using the new code generation path. The new
+# code generation path is a bit slower, so for development just
+# GhcStage2DefaultNewCodegen=YES, but it's also a good idea to try
+# building all libraries and the stage2 compiler with the
+# new code generator, which involves GhcStage1DefaultNewCodegen=YES.
+GhcStage1DefaultNewCodegen=NO
+GhcStage2DefaultNewCodegen=NO
+GhcStage3DefaultNewCodegen=NO
+
GhcDebugged=NO
GhcDynamic=NO
GhcProfiled=NO
# Do we support shared libs?
-PlatformSupportsSharedLibs = $(if $(filter $(TARGETPLATFORM),\
- i386-unknown-linux x86_64-unknown-linux \
+SharedLibsPlatformList = i386-unknown-linux x86_64-unknown-linux \
i386-unknown-freebsd x86_64-unknown-freebsd \
i386-unknown-openbsd x86_64-unknown-openbsd \
i386-unknown-mingw32 \
- i386-unknown-solaris2 \
- i386-apple-darwin powerpc-apple-darwin),YES,NO)
+ i386-apple-darwin powerpc-apple-darwin
+
+ifeq ($(SOLARIS_BROKEN_SHLD), NO)
+SharedLibsPlatformList := $(SharedLibsPlatformList) i386-unknown-solaris2
+endif
+
+PlatformSupportsSharedLibs = $(if $(filter $(TARGETPLATFORM),\
+ $(SharedLibsPlatformList)),YES,NO)
# Build a compiler that will build *unregisterised* libraries and
# binaries by default. Unregisterised code is supposed to compile and
GHC_GHCTAGS_PGM = ghctags$(exeext)
GHC_HSC2HS_PGM = hsc2hs$(exeext)
GHC_TOUCHY_PGM = touchy$(exeext)
-GHC_MANGLER_PGM = ghc-asm
GHC_SPLIT_PGM = ghc-split
GHC_SYSMAN_PGM = SysMan
GHC_GENPRIMOP_PGM = genprimopcode$(exeext)
endif
HP2PS = $(GHC_HP2PS_DIR)/$(GHC_HP2PS_PGM)
-MANGLER = $(INPLACE_LIB)/$(GHC_MANGLER_PGM)
SPLIT = $(INPLACE_LIB)/$(GHC_SPLIT_PGM)
SYSMAN = $(GHC_SYSMAN_DIR)/$(GHC_SYSMAN_PGM)
LTX = $(GHC_LTX_DIR)/$(GHC_LTX_PGM)
CONF_CC_OPTS += -G0
endif
+# The .hsc files aren't currently safe for cross-compilation on Windows:
+# libraries\haskeline\.\System\Console\Haskeline\Backend\Win32.hsc:160
+# directive "let" is not safe for cross-compilation
+ifneq "$(Windows)" "YES"
+SRC_HSC2HS_OPTS += --cross-safe
+endif
SRC_HSC2HS_OPTS += $(addprefix --cflag=,$(filter-out -O,$(SRC_CC_OPTS) $(CONF_CC_OPTS_STAGE0)))
SRC_HSC2HS_OPTS += $(foreach d,$(GMP_INCLUDE_DIRS),-I$(d))
# overflowing command-line length limits.
LdIsGNULd = @LdIsGNULd@
+# Set to YES if ld has the --build-id flag. Sometimes we need to
+# disable it with --build-id=none.
+LdHasBuildId = @LdHasBuildId@
+
# On MSYS, building with SplitObjs=YES fails with
# ar: Bad file number
# see #3201. We need to specify a smaller max command-line size
# (This will make a Cygwin build run slowly
# because of all those shell invocations.)
ifeq "$(OSTYPE)" "cygwin"
-MK_INSTALL_DEST = "$(shell cygpath $1)"
+MK_INSTALL_DEST = $$(cygpath $1)
else
MK_INSTALL_DEST = $1
endif
# This distinguishes "msys" and "cygwin", which are not
# not distinguished by HOST_OS_CPP
OSTYPE=@OSTYPE@
+
+# In case of Solaris OS, does it provide broken shared libs
+# linker or not?
+SOLARIS_BROKEN_SHLD=@SOLARIS_BROKEN_SHLD@
GHC_GENPRIMOP_DIR = $(GHC_UTILS_DIR)/genprimopcode
GHC_GENAPPLY_DIR = $(GHC_UTILS_DIR)/genapply
GHC_CABAL_DIR = $(GHC_UTILS_DIR)/ghc-cabal
-GHC_MANGLER_DIR = $(GHC_DRIVER_DIR)/mangler
GHC_SPLIT_DIR = $(GHC_DRIVER_DIR)/split
GHC_SYSMAN_DIR = $(GHC_RTS_DIR)/parallel
# "-" if there is no upstream.
#
# Lines that start with a '#' are comments.
-. - ghc git -
-ghc-tarballs - ghc-tarballs darcs -
-utils/hsc2hs - hsc2hs darcs -
+. - ghc.git git -
+ghc-tarballs - ghc-tarballs.git git -
+utils/hsc2hs - hsc2hs.git git -
# haddock does have an upstream:
# http://code.haskell.org/haddock/
# but it stays buildable with the last stable release rather than tracking HEAD,
# and is resynced with the GHC HEAD branch by David Waern when appropriate
-utils/haddock - haddock2 darcs -
-libraries/array - packages/array darcs -
-libraries/base - packages/base darcs -
-libraries/binary - packages/binary darcs http://code.haskell.org/binary/
-libraries/bytestring - packages/bytestring darcs http://darcs.haskell.org/bytestring/
-libraries/Cabal - packages/Cabal darcs http://darcs.haskell.org/cabal/
-libraries/containers - packages/containers darcs -
-libraries/directory - packages/directory darcs -
-libraries/extensible-exceptions - packages/extensible-exceptions darcs -
-libraries/filepath - packages/filepath darcs -
-libraries/ghc-prim - packages/ghc-prim darcs -
-libraries/haskeline - packages/haskeline darcs http://code.haskell.org/haskeline/
-libraries/haskell98 - packages/haskell98 darcs -
-libraries/haskell2010 - packages/haskell2010 darcs -
-libraries/hoopl - packages/hoopl darcs -
-libraries/hpc - packages/hpc darcs -
-libraries/integer-gmp - packages/integer-gmp darcs -
-libraries/integer-simple - packages/integer-simple darcs -
-libraries/mtl - packages/mtl darcs -
-libraries/old-locale - packages/old-locale darcs -
-libraries/old-time - packages/old-time darcs -
-libraries/pretty - packages/pretty darcs -
-libraries/process - packages/process darcs -
-libraries/random - packages/random darcs -
-libraries/template-haskell - packages/template-haskell darcs -
-libraries/terminfo - packages/terminfo darcs http://code.haskell.org/terminfo/
-libraries/unix - packages/unix darcs -
-libraries/utf8-string - packages/utf8-string darcs http://code.haskell.org/utf8-string/
-libraries/Win32 - packages/Win32 darcs -
-libraries/xhtml - packages/xhtml darcs -
-testsuite testsuite testsuite darcs -
-nofib nofib nofib darcs -
-libraries/deepseq extra packages/deepseq darcs -
-libraries/parallel extra packages/parallel darcs -
-libraries/stm extra packages/stm darcs -
-libraries/primitive dph packages/primitive darcs http://code.haskell.org/primitive
-libraries/vector dph packages/vector darcs http://code.haskell.org/vector
-libraries/dph dph packages/dph darcs -
+utils/haddock - haddock2.git git -
+libraries/array - packages/array.git git -
+libraries/base - packages/base.git git -
+libraries/binary - packages/binary.git git http://code.haskell.org/binary/
+libraries/bytestring - packages/bytestring.git git http://darcs.haskell.org/bytestring/
+libraries/Cabal - packages/Cabal.git git http://darcs.haskell.org/cabal/
+libraries/containers - packages/containers.git git -
+libraries/directory - packages/directory.git git -
+libraries/extensible-exceptions - packages/extensible-exceptions.git git -
+libraries/filepath - packages/filepath.git git -
+libraries/ghc-prim - packages/ghc-prim.git git -
+libraries/haskeline - packages/haskeline.git git http://code.haskell.org/haskeline/
+libraries/haskell98 - packages/haskell98.git git -
+libraries/haskell2010 - packages/haskell2010.git git -
+libraries/hoopl - packages/hoopl.git git -
+libraries/hpc - packages/hpc.git git -
+libraries/integer-gmp - packages/integer-gmp.git git -
+libraries/integer-simple - packages/integer-simple.git git -
+libraries/mtl - packages/mtl.git git -
+libraries/old-locale - packages/old-locale.git git -
+libraries/old-time - packages/old-time.git git -
+libraries/pretty - packages/pretty.git git -
+libraries/process - packages/process.git git -
+libraries/random - packages/random.git git -
+libraries/template-haskell - packages/template-haskell.git git -
+libraries/terminfo - packages/terminfo.git git http://code.haskell.org/terminfo/
+libraries/unix - packages/unix.git git -
+libraries/utf8-string - packages/utf8-string.git git http://code.haskell.org/utf8-string/
+libraries/Win32 - packages/Win32.git git -
+libraries/xhtml - packages/xhtml.git git -
+testsuite testsuite testsuite.git git -
+nofib nofib nofib.git git -
+libraries/deepseq extra packages/deepseq.git git -
+libraries/parallel extra packages/parallel.git git -
+libraries/stm extra packages/stm.git git -
+libraries/primitive dph packages/primitive.git git http://code.haskell.org/primitive
+libraries/vector dph packages/vector.git git http://code.haskell.org/vector
+libraries/dph dph packages/dph.git git -
+++ /dev/null
-# Despite the name "package", this file contains the master list of
-# the *repositories* that make up GHC. It is parsed by boot and darcs-all.
-#
-# Some of this information is duplicated elsewhere in the build system:
-# See Trac #3896
-# In particular when adding libraries to this file, you also need to add
-# the library to the SUBDIRS variable in libraries/Makefile so that they
-# actually get built
-#
-# The repos are of several kinds:
-# - The main GHC source repo
-# - Each boot package lives in a repo
-# - DPH is a repo that contains several packages
-# - Haddock and hsc2hs are applications, built on top of GHC,
-# and in turn needed to bootstrap GHC
-# - ghc-tarballs is need to build GHC
-# - nofib and testsuite are optional helpers
-#
-# The format of the lines in this file is:
-# localpath tag remotepath VCS upstream
-# where
-# * 'localpath' is where to put the repository in a checked out tree.
-# * 'remotepath' is where the repository is in the central repository.
-# * 'VCS' is what version control system the repo uses.
-#
-# * The 'tag' determines when "darcs-all get" will get the
-# repo. If the tag is "-" then it will always get it, but if there
-# is a tag then a corresponding flag must be given to darcs-all, e.g.
-# if you want to get the packages with an "extralibs" or "testsuite"
-# tag then you need to use "darcs-all --extra --testsuite get".
-# Support for new tags must be manually added to the darcs-all script.
-#
-# 'tag' is also used to determine which packages the build system
-# deems to have the EXTRA_PACKAGE property: tags 'dph' and 'extra'
-# both give this property
-#
-# * 'upstream' is the URL of the upstream repo, where there is one, or
-# "-" if there is no upstream.
-#
-# Lines that start with a '#' are comments.
-. - ghc.git git -
-ghc-tarballs - ghc-tarballs darcs -
-utils/hsc2hs - hsc2hs darcs -
-# haddock does have an upstream:
-# http://code.haskell.org/haddock/
-# but it stays buildable with the last stable release rather than tracking HEAD,
-# and is resynced with the GHC HEAD branch by David Waern when appropriate
-utils/haddock - haddock2 darcs -
-libraries/array - packages/array darcs -
-libraries/base - packages/base darcs -
-libraries/binary - packages/binary darcs http://code.haskell.org/binary/
-libraries/bytestring - packages/bytestring darcs http://darcs.haskell.org/bytestring/
-libraries/Cabal - packages/Cabal darcs http://darcs.haskell.org/cabal/
-libraries/containers - packages/containers darcs -
-libraries/directory - packages/directory darcs -
-libraries/extensible-exceptions - packages/extensible-exceptions darcs -
-libraries/filepath - packages/filepath darcs -
-libraries/ghc-prim - packages/ghc-prim darcs -
-libraries/haskeline - packages/haskeline darcs http://code.haskell.org/haskeline/
-libraries/haskell98 - packages/haskell98 darcs -
-libraries/haskell2010 - packages/haskell2010 darcs -
-libraries/hoopl - packages/hoopl darcs -
-libraries/hpc - packages/hpc darcs -
-libraries/integer-gmp - packages/integer-gmp darcs -
-libraries/integer-simple - packages/integer-simple darcs -
-libraries/mtl - packages/mtl darcs -
-libraries/old-locale - packages/old-locale darcs -
-libraries/old-time - packages/old-time darcs -
-libraries/pretty - packages/pretty darcs -
-libraries/process - packages/process darcs -
-libraries/random - packages/random darcs -
-libraries/template-haskell - packages/template-haskell darcs -
-libraries/terminfo - packages/terminfo darcs http://code.haskell.org/terminfo/
-libraries/unix - packages/unix darcs -
-libraries/utf8-string - packages/utf8-string darcs http://code.haskell.org/utf8-string/
-libraries/Win32 - packages/Win32 darcs -
-libraries/xhtml - packages/xhtml darcs -
-testsuite testsuite testsuite darcs -
-nofib nofib nofib darcs -
-libraries/deepseq extra packages/deepseq darcs -
-libraries/parallel extra packages/parallel darcs -
-libraries/stm extra packages/stm darcs -
-libraries/primitive dph packages/primitive darcs http://code.haskell.org/primitive
-libraries/vector dph packages/vector darcs http://code.haskell.org/vector
-libraries/dph dph packages/dph darcs -
------------------------------------------------------------------------ */
void
-markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta,
- rtsBool no_mark_sparks USED_IF_THREADS)
+markCapability (evac_fn evac, void *user, Capability *cap,
+ rtsBool no_mark_sparks USED_IF_THREADS)
{
- nat i;
- Capability *cap;
InCall *incall;
// Each GC thread is responsible for following roots from the
// or fewer Capabilities as GC threads, but just in case there
// are more, we mark every Capability whose number is the GC
// thread's index plus a multiple of the number of GC threads.
- for (i = i0; i < n_capabilities; i += delta) {
- cap = &capabilities[i];
- evac(user, (StgClosure **)(void *)&cap->run_queue_hd);
- evac(user, (StgClosure **)(void *)&cap->run_queue_tl);
+ evac(user, (StgClosure **)(void *)&cap->run_queue_hd);
+ evac(user, (StgClosure **)(void *)&cap->run_queue_tl);
#if defined(THREADED_RTS)
- evac(user, (StgClosure **)(void *)&cap->inbox);
+ evac(user, (StgClosure **)(void *)&cap->inbox);
#endif
- for (incall = cap->suspended_ccalls; incall != NULL;
- incall=incall->next) {
- evac(user, (StgClosure **)(void *)&incall->suspended_tso);
- }
+ for (incall = cap->suspended_ccalls; incall != NULL;
+ incall=incall->next) {
+ evac(user, (StgClosure **)(void *)&incall->suspended_tso);
+ }
#if defined(THREADED_RTS)
- if (!no_mark_sparks) {
- traverseSparkQueue (evac, user, cap);
- }
-#endif
+ if (!no_mark_sparks) {
+ traverseSparkQueue (evac, user, cap);
}
+#endif
-#if !defined(THREADED_RTS)
- evac(user, (StgClosure **)(void *)&blocked_queue_hd);
- evac(user, (StgClosure **)(void *)&blocked_queue_tl);
- evac(user, (StgClosure **)(void *)&sleeping_queue);
-#endif
+ // Free STM structures for this Capability
+ stmPreGCHook(cap);
}
void
markCapabilities (evac_fn evac, void *user)
{
- markSomeCapabilities(evac, user, 0, 1, rtsFalse);
+ nat n;
+ for (n = 0; n < n_capabilities; n++) {
+ markCapability(evac, user, &capabilities[n], rtsFalse);
+ }
}
-
-/* -----------------------------------------------------------------------------
- Messages
- -------------------------------------------------------------------------- */
-
void freeCapabilities (void);
// For the GC:
-void markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta,
- rtsBool no_mark_sparks);
+void markCapability (evac_fn evac, void *user, Capability *cap,
+ rtsBool no_mark_sparks USED_IF_THREADS);
+
void markCapabilities (evac_fn evac, void *user);
+
void traverseSparkQueues (evac_fn evac, void *user);
/* -----------------------------------------------------------------------------
/* Linked list of (key, data) pairs for separate chaining */
-struct hashlist {
+typedef struct hashlist {
StgWord key;
void *data;
struct hashlist *next; /* Next cell in bucket chain (same hash value) */
-};
+} HashList;
-typedef struct hashlist HashList;
+typedef struct chunklist {
+ HashList *chunk;
+ struct chunklist *next;
+} HashListChunk;
struct hashtable {
int split; /* Next bucket to split when expanding */
int kcount; /* Number of keys */
int bcount; /* Number of buckets */
HashList **dir[HDIRSIZE]; /* Directory of segments */
- HashFunction *hash; /* hash function */
+ HashList *freeList; /* free list of HashLists */
+ HashListChunk *chunks;
+ HashFunction *hash; /* hash function */
CompareFunction *compare; /* key comparison function */
};
* no effort to actually return the space to the malloc arena.
* -------------------------------------------------------------------------- */
-static HashList *freeList = NULL;
-
-static struct chunkList {
- void *chunk;
- struct chunkList *next;
-} *chunks;
-
static HashList *
-allocHashList(void)
+allocHashList (HashTable *table)
{
HashList *hl, *p;
- struct chunkList *cl;
+ HashListChunk *cl;
- if ((hl = freeList) != NULL) {
- freeList = hl->next;
+ if ((hl = table->freeList) != NULL) {
+ table->freeList = hl->next;
} else {
hl = stgMallocBytes(HCHUNK * sizeof(HashList), "allocHashList");
cl = stgMallocBytes(sizeof (*cl), "allocHashList: chunkList");
- cl->chunk = hl;
- cl->next = chunks;
- chunks = cl;
+ cl->chunk = hl;
+ cl->next = table->chunks;
+ table->chunks = cl;
- freeList = hl + 1;
- for (p = freeList; p < hl + HCHUNK - 1; p++)
+ table->freeList = hl + 1;
+ for (p = table->freeList; p < hl + HCHUNK - 1; p++)
p->next = p + 1;
p->next = NULL;
}
}
static void
-freeHashList(HashList *hl)
+freeHashList (HashTable *table, HashList *hl)
{
- hl->next = freeList;
- freeList = hl;
+ hl->next = table->freeList;
+ table->freeList = hl;
}
void
segment = bucket / HSEGSIZE;
index = bucket % HSEGSIZE;
- hl = allocHashList();
+ hl = allocHashList(table);
hl->key = key;
hl->data = data;
table->dir[segment][index] = hl->next;
else
prev->next = hl->next;
- freeHashList(hl);
+ freeHashList(table,hl);
table->kcount--;
return hl->data;
}
long index;
HashList *hl;
HashList *next;
+ HashListChunk *cl, *cl_next;
/* The last bucket with something in it is table->max + table->split - 1 */
segment = (table->max + table->split - 1) / HSEGSIZE;
next = hl->next;
if (freeDataFun != NULL)
(*freeDataFun)(hl->data);
- freeHashList(hl);
- }
+ }
index--;
}
stgFree(table->dir[segment]);
segment--;
index = HSEGSIZE - 1;
}
+ for (cl = table->chunks; cl != NULL; cl = cl_next) {
+ cl_next = cl->next;
+ stgFree(cl->chunk);
+ stgFree(cl);
+ }
stgFree(table);
}
table->mask2 = 2 * HSEGSIZE - 1;
table->kcount = 0;
table->bcount = HSEGSIZE;
+ table->freeList = NULL;
+ table->chunks = NULL;
table->hash = hash;
table->compare = compare;
void
exitHashTable(void)
{
- struct chunkList *cl;
-
- while ((cl = chunks) != NULL) {
- chunks = cl->next;
- stgFree(cl->chunk);
- stgFree(cl);
- }
+ /* nothing to do */
}
#include "Rts.h"
#include "Trace.h"
+#include "Hash.h"
+#include "RtsUtils.h"
#include <stdio.h>
#include <ctype.h>
static FILE *tixFile; // file being read/written
static int tix_ch; // current char
+static HashTable * moduleHash = NULL; // module name -> HpcModuleInfo
+
HpcModuleInfo *modules = 0;
-HpcModuleInfo *nextModule = 0;
-int totalTixes = 0; // total number of tix boxes.
-static char *tixFilename;
+static char *tixFilename = NULL;
static void GNU_ATTRIBUTE(__noreturn__)
failure(char *msg) {
}
static char *expectString(void) {
- char tmp[256], *res;
+ char tmp[256], *res; // XXX
int tmp_ix = 0;
expect('"');
while (tix_ch != '"') {
}
tmp[tmp_ix++] = 0;
expect('"');
- res = malloc(tmp_ix);
+ res = stgMallocBytes(tmp_ix,"Hpc.expectString");
strcpy(res,tmp);
return res;
}
static void
readTix(void) {
unsigned int i;
- HpcModuleInfo *tmpModule;
+ HpcModuleInfo *tmpModule, *lookup;
- totalTixes = 0;
-
ws();
expect('T');
expect('i');
ws();
while(tix_ch != ']') {
- tmpModule = (HpcModuleInfo *)calloc(1,sizeof(HpcModuleInfo));
+ tmpModule = (HpcModuleInfo *)stgMallocBytes(sizeof(HpcModuleInfo),
+ "Hpc.readTix");
+ tmpModule->from_file = rtsTrue;
expect('T');
expect('i');
expect('x');
ws();
tmpModule -> tickCount = (int)expectWord64();
tmpModule -> tixArr = (StgWord64 *)calloc(tmpModule->tickCount,sizeof(StgWord64));
- tmpModule -> tickOffset = totalTixes;
- totalTixes += tmpModule -> tickCount;
ws();
expect('[');
ws();
expect(']');
ws();
- if (!modules) {
- modules = tmpModule;
+ lookup = lookupHashTable(moduleHash, (StgWord)tmpModule->modName);
+ if (tmpModule == NULL) {
+ debugTrace(DEBUG_hpc,"readTix: new HpcModuleInfo for %s",
+ tmpModule->modName);
+ insertHashTable(moduleHash, (StgWord)tmpModule->modName, tmpModule);
} else {
- nextModule->next=tmpModule;
+ ASSERT(lookup->tixArr != 0);
+ ASSERT(!strcmp(tmpModule->modName, lookup->modName));
+ debugTrace(DEBUG_hpc,"readTix: existing HpcModuleInfo for %s",
+ tmpModule->modName);
+ if (tmpModule->hashNo != lookup->hashNo) {
+ fprintf(stderr,"in module '%s'\n",tmpModule->modName);
+ failure("module mismatch with .tix/.mix file hash number");
+ if (tixFilename != NULL) {
+ fprintf(stderr,"(perhaps remove %s ?)\n",tixFilename);
+ }
+ stg_exit(EXIT_FAILURE);
+ }
+ for (i=0; i < tmpModule->tickCount; i++) {
+ lookup->tixArr[i] = tmpModule->tixArr[i];
+ }
+ stgFree(tmpModule->tixArr);
+ stgFree(tmpModule->modName);
+ stgFree(tmpModule);
}
- nextModule=tmpModule;
-
+
if (tix_ch == ',') {
expect(',');
ws();
fclose(tixFile);
}
-static void hpc_init(void) {
+void
+startupHpc(void)
+{
char *hpc_tixdir;
char *hpc_tixfile;
+
+ if (moduleHash == NULL) {
+ // no modules were registered with hs_hpc_module, so don't bother
+ // creating the .tix file.
+ return;
+ }
+
if (hpc_inited != 0) {
return;
}
hpc_tixdir = getenv("HPCTIXDIR");
hpc_tixfile = getenv("HPCTIXFILE");
+ debugTrace(DEBUG_hpc,"startupHpc");
+
/* XXX Check results of mallocs/strdups, and check we are requesting
enough bytes */
if (hpc_tixfile != NULL) {
#endif
/* Then, try open the file
*/
- tixFilename = (char *) malloc(strlen(hpc_tixdir) + strlen(prog_name) + 12);
+ tixFilename = (char *) stgMallocBytes(strlen(hpc_tixdir) +
+ strlen(prog_name) + 12,
+ "Hpc.startupHpc");
sprintf(tixFilename,"%s/%s-%d.tix",hpc_tixdir,prog_name,(int)hpc_pid);
} else {
- tixFilename = (char *) malloc(strlen(prog_name) + 6);
+ tixFilename = (char *) stgMallocBytes(strlen(prog_name) + 6,
+ "Hpc.startupHpc");
sprintf(tixFilename, "%s.tix", prog_name);
}
}
}
-/* Called on a per-module basis, at startup time, declaring where the tix boxes are stored in memory.
- * This memory can be uninitized, because we will initialize it with either the contents
- * of the tix file, or all zeros.
+/*
+ * Called on a per-module basis, by a constructor function compiled
+ * with each module (see Coverage.hpcInitCode), declaring where the
+ * tix boxes are stored in memory. This memory can be uninitized,
+ * because we will initialize it with either the contents of the tix
+ * file, or all zeros.
+ *
+ * Note that we might call this before reading the .tix file, or after
+ * in the case where we loaded some Haskell code from a .so with
+ * dlopen(). So we must handle the case where we already have an
+ * HpcModuleInfo for the module which was read from the .tix file.
*/
-int
+void
hs_hpc_module(char *modName,
StgWord32 modCount,
StgWord32 modHashNo,
- StgWord64 *tixArr) {
- HpcModuleInfo *tmpModule, *lastModule;
- unsigned int i;
- int offset = 0;
-
- debugTrace(DEBUG_hpc,"hs_hpc_module(%s,%d)",modName,(nat)modCount);
+ StgWord64 *tixArr)
+{
+ HpcModuleInfo *tmpModule;
+ nat i;
- hpc_init();
+ if (moduleHash == NULL) {
+ moduleHash = allocStrHashTable();
+ }
- tmpModule = modules;
- lastModule = 0;
-
- for(;tmpModule != 0;tmpModule = tmpModule->next) {
- if (!strcmp(tmpModule->modName,modName)) {
+ tmpModule = lookupHashTable(moduleHash, (StgWord)modName);
+ if (tmpModule == NULL)
+ {
+ // Did not find entry so add one on.
+ tmpModule = (HpcModuleInfo *)stgMallocBytes(sizeof(HpcModuleInfo),
+ "Hpc.hs_hpc_module");
+ tmpModule->modName = modName;
+ tmpModule->tickCount = modCount;
+ tmpModule->hashNo = modHashNo;
+
+ tmpModule->tixArr = tixArr;
+ for(i=0;i < modCount;i++) {
+ tixArr[i] = 0;
+ }
+ tmpModule->next = modules;
+ tmpModule->from_file = rtsFalse;
+ modules = tmpModule;
+ insertHashTable(moduleHash, (StgWord)modName, tmpModule);
+ }
+ else
+ {
if (tmpModule->tickCount != modCount) {
- failure("inconsistent number of tick boxes");
+ failure("inconsistent number of tick boxes");
}
- assert(tmpModule->tixArr != 0);
+ ASSERT(tmpModule->tixArr != 0);
if (tmpModule->hashNo != modHashNo) {
- fprintf(stderr,"in module '%s'\n",tmpModule->modName);
- failure("module mismatch with .tix/.mix file hash number");
- fprintf(stderr,"(perhaps remove %s ?)\n",tixFilename);
- stg_exit(1);
-
+ fprintf(stderr,"in module '%s'\n",tmpModule->modName);
+ failure("module mismatch with .tix/.mix file hash number");
+ if (tixFilename != NULL) {
+ fprintf(stderr,"(perhaps remove %s ?)\n",tixFilename);
+ }
+ stg_exit(EXIT_FAILURE);
}
+ // The existing tixArr was made up when we read the .tix file,
+ // whereas this is the real tixArr, so copy the data from the
+ // .tix into the real tixArr.
for(i=0;i < modCount;i++) {
- tixArr[i] = tmpModule->tixArr[i];
+ tixArr[i] = tmpModule->tixArr[i];
}
- tmpModule->tixArr = tixArr;
- return tmpModule->tickOffset;
- }
- lastModule = tmpModule;
- }
- // Did not find entry so add one on.
- tmpModule = (HpcModuleInfo *)calloc(1,sizeof(HpcModuleInfo));
- tmpModule->modName = modName;
- tmpModule->tickCount = modCount;
- tmpModule->hashNo = modHashNo;
- if (lastModule) {
- tmpModule->tickOffset = lastModule->tickOffset + lastModule->tickCount;
- } else {
- tmpModule->tickOffset = 0;
- }
- tmpModule->tixArr = tixArr;
- for(i=0;i < modCount;i++) {
- tixArr[i] = 0;
- }
- tmpModule->next = 0;
-
- if (!modules) {
- modules = tmpModule;
- } else {
- lastModule->next=tmpModule;
- }
-
- debugTrace(DEBUG_hpc,"end: hs_hpc_module");
-
- return offset;
-}
-
-/* This is called after all the modules have registered their local tixboxes,
- * and does a sanity check: are we good to go?
- */
-
-void
-startupHpc(void) {
- debugTrace(DEBUG_hpc,"startupHpc");
-
- if (hpc_inited == 0) {
- return;
+ if (tmpModule->from_file) {
+ stgFree(tmpModule->modName);
+ stgFree(tmpModule->tixArr);
+ }
+ tmpModule->from_file = rtsFalse;
}
}
-
static void
writeTix(FILE *f) {
HpcModuleInfo *tmpModule;
tmpModule->modName,
(nat)tmpModule->hashNo,
(nat)tmpModule->tickCount);
- debugTrace(DEBUG_hpc,"%s: %u (offset=%u) (hash=%u)\n",
+ debugTrace(DEBUG_hpc,"%s: %u (hash=%u)\n",
tmpModule->modName,
(nat)tmpModule->tickCount,
- (nat)tmpModule->hashNo,
- (nat)tmpModule->tickOffset);
+ (nat)tmpModule->hashNo);
inner_comma = 0;
for(i = 0;i < tmpModule->tickCount;i++) {
fclose(f);
}
-/* Called at the end of execution, to write out the Hpc *.tix file
+static void
+freeHpcModuleInfo (HpcModuleInfo *mod)
+{
+ if (mod->from_file) {
+ stgFree(mod->modName);
+ stgFree(mod->tixArr);
+ }
+ stgFree(mod);
+}
+
+/* Called at the end of execution, to write out the Hpc *.tix file
* for this exection. Safe to call, even if coverage is not used.
*/
void
FILE *f = fopen(tixFilename,"w");
writeTix(f);
}
+
+ freeHashTable(moduleHash, (void (*)(void *))freeHpcModuleInfo);
+ moduleHash = NULL;
+
+ stgFree(tixFilename);
+ tixFilename = NULL;
}
//////////////////////////////////////////////////////////////////////////////
#include <sys/wait.h>
#endif
-#if defined(linux_HOST_OS ) || defined(freebsd_HOST_OS) || \
- defined(dragonfly_HOST_OS) || defined(netbsd_HOST_OS ) || \
- defined(openbsd_HOST_OS ) || \
- ( defined(darwin_HOST_OS ) && !defined(powerpc_HOST_ARCH) ) || \
- defined(kfreebsdgnu_HOST_OS)
-/* Don't use mmap on powerpc-apple-darwin as mmap doesn't support
+#if !defined(powerpc_HOST_ARCH) && \
+ ( defined(linux_HOST_OS ) || defined(freebsd_HOST_OS) || \
+ defined(dragonfly_HOST_OS) || defined(netbsd_HOST_OS ) || \
+ defined(openbsd_HOST_OS ) || defined(darwin_HOST_OS ) || \
+ defined(kfreebsdgnu_HOST_OS) )
+/* Don't use mmap on powerpc_HOST_ARCH as mmap doesn't support
* reallocating but we need to allocate jump islands just after each
* object images. Otherwise relative branches to jump islands can fail
* due to 24-bits displacement overflow.
#elif defined(darwin_HOST_OS)
# define OBJFORMAT_MACHO
# include <regex.h>
+# include <mach/machine.h>
+# include <mach-o/fat.h>
# include <mach-o/loader.h>
# include <mach-o/nlist.h>
# include <mach-o/reloc.h>
SymI_HasProto(signal_handlers) \
SymI_HasProto(stg_sig_install) \
SymI_HasProto(rtsTimerSignal) \
+ SymI_HasProto(atexit) \
SymI_NeedsProto(nocldstop)
#endif
SymI_HasProto(stg_newTVarzh) \
SymI_HasProto(stg_noDuplicatezh) \
SymI_HasProto(stg_atomicModifyMutVarzh) \
+ SymI_HasProto(stg_casMutVarzh) \
SymI_HasProto(stg_newPinnedByteArrayzh) \
SymI_HasProto(stg_newAlignedPinnedByteArrayzh) \
SymI_HasProto(newSpark) \
int pagesize, size;
static nat fixed = 0;
+ IF_DEBUG(linker, debugBelch("mmapForLinker: start\n"));
pagesize = getpagesize();
size = ROUND_UP(bytes, pagesize);
}
#endif
+ IF_DEBUG(linker, debugBelch("mmapForLinker: \tprotection %#0x\n", PROT_EXEC | PROT_READ | PROT_WRITE));
+ IF_DEBUG(linker, debugBelch("mmapForLinker: \tflags %#0x\n", MAP_PRIVATE | TRY_MAP_32BIT | fixed | flags));
result = mmap(map_addr, size, PROT_EXEC|PROT_READ|PROT_WRITE,
MAP_PRIVATE|TRY_MAP_32BIT|fixed|flags, fd, 0);
}
#endif
+ IF_DEBUG(linker, debugBelch("mmapForLinker: mapped %lu bytes starting at %p\n", (lnat)size, result));
+ IF_DEBUG(linker, debugBelch("mmapForLinker: done\n"));
return result;
}
#endif // USE_MMAP
) {
ObjectCode* oc;
+ IF_DEBUG(linker, debugBelch("mkOc: start\n"));
oc = stgMallocBytes(sizeof(ObjectCode), "loadArchive(oc)");
# if defined(OBJFORMAT_ELF)
oc->next = objects;
objects = oc;
+ IF_DEBUG(linker, debugBelch("mkOc: done\n"));
return oc;
}
char *fileName;
size_t fileNameSize;
int isObject, isGnuIndex;
- char tmp[12];
+ char tmp[20];
char *gnuFileIndex;
int gnuFileIndexSize;
-#if !defined(USE_MMAP) && defined(darwin_HOST_OS)
+#if defined(darwin_HOST_OS)
+ int i;
+ uint32_t nfat_arch, nfat_offset, cputype, cpusubtype;
+#if defined(i386_HOST_ARCH)
+ const uint32_t mycputype = CPU_TYPE_X86;
+ const uint32_t mycpusubtype = CPU_SUBTYPE_X86_ALL;
+#elif defined(x86_64_HOST_ARCH)
+ const uint32_t mycputype = CPU_TYPE_X86_64;
+ const uint32_t mycpusubtype = CPU_SUBTYPE_X86_64_ALL;
+#elif defined(powerpc_HOST_ARCH)
+ const uint32_t mycputype = CPU_TYPE_POWERPC;
+ const uint32_t mycpusubtype = CPU_SUBTYPE_POWERPC_ALL;
+#elif defined(powerpc64_HOST_ARCH)
+ const uint32_t mycputype = CPU_TYPE_POWERPC64;
+ const uint32_t mycpusubtype = CPU_SUBTYPE_POWERPC_ALL;
+#else
+#error Unknown Darwin architecture
+#endif
+#if !defined(USE_MMAP)
int misalignment;
#endif
+#endif
+ IF_DEBUG(linker, debugBelch("loadArchive: start\n"));
IF_DEBUG(linker, debugBelch("loadArchive: Loading archive `%s'\n", path));
gnuFileIndex = NULL;
if (!f)
barf("loadObj: can't read `%s'", path);
+ /* Check if this is an archive by looking for the magic "!<arch>\n"
+ * string. Usually, if this fails, we barf and quit. On Darwin however,
+ * we may have a fat archive, which contains archives for more than
+ * one architecture. Fat archives start with the magic number 0xcafebabe,
+ * always stored big endian. If we find a fat_header, we scan through
+ * the fat_arch structs, searching through for one for our host
+ * architecture. If a matching struct is found, we read the offset
+ * of our archive data (nfat_offset) and seek forward nfat_offset bytes
+ * from the start of the file.
+ *
+ * A subtlety is that all of the members of the fat_header and fat_arch
+ * structs are stored big endian, so we need to call byte order
+ * conversion functions.
+ *
+ * If we find the appropriate architecture in a fat archive, we gobble
+ * its magic "!<arch>\n" string and continue processing just as if
+ * we had a single architecture archive.
+ */
+
n = fread ( tmp, 1, 8, f );
- if (strncmp(tmp, "!<arch>\n", 8) != 0)
+ if (n != 8)
+ barf("loadArchive: Failed reading header from `%s'", path);
+ if (strncmp(tmp, "!<arch>\n", 8) != 0) {
+
+#if defined(darwin_HOST_OS)
+ /* Not a standard archive, look for a fat archive magic number: */
+ if (ntohl(*(uint32_t *)tmp) == FAT_MAGIC) {
+ nfat_arch = ntohl(*(uint32_t *)(tmp + 4));
+ IF_DEBUG(linker, debugBelch("loadArchive: found a fat archive containing %d architectures\n", nfat_arch));
+ nfat_offset = 0;
+
+ for (i = 0; i < (int)nfat_arch; i++) {
+ /* search for the right arch */
+ n = fread( tmp, 1, 20, f );
+ if (n != 8)
+ barf("loadArchive: Failed reading arch from `%s'", path);
+ cputype = ntohl(*(uint32_t *)tmp);
+ cpusubtype = ntohl(*(uint32_t *)(tmp + 4));
+
+ if (cputype == mycputype && cpusubtype == mycpusubtype) {
+ IF_DEBUG(linker, debugBelch("loadArchive: found my archive in a fat archive\n"));
+ nfat_offset = ntohl(*(uint32_t *)(tmp + 8));
+ break;
+ }
+ }
+
+ if (nfat_offset == 0) {
+ barf ("loadArchive: searched %d architectures, but no host arch found", (int)nfat_arch);
+ }
+ else {
+ n = fseek( f, nfat_offset, SEEK_SET );
+ if (n != 0)
+ barf("loadArchive: Failed to seek to arch in `%s'", path);
+ n = fread ( tmp, 1, 8, f );
+ if (n != 8)
+ barf("loadArchive: Failed reading header from `%s'", path);
+ if (strncmp(tmp, "!<arch>\n", 8) != 0) {
+ barf("loadArchive: couldn't find archive in `%s' at offset %d", path, nfat_offset);
+ }
+ }
+ }
+ else {
+ barf("loadArchive: Neither an archive, nor a fat archive: `%s'", path);
+ }
+
+#else
barf("loadArchive: Not an archive: `%s'", path);
+#endif
+ }
+
+ IF_DEBUG(linker, debugBelch("loadArchive: loading archive contents\n"));
while(1) {
n = fread ( fileName, 1, 16, f );
if (n != 16) {
if (feof(f)) {
+ IF_DEBUG(linker, debugBelch("loadArchive: EOF while reading from '%s'\n", path));
break;
}
else {
barf("loadArchive: Failed reading file name from `%s'", path);
}
}
+
+#if defined(darwin_HOST_OS)
+ if (strncmp(fileName, "!<arch>\n", 8) == 0) {
+ IF_DEBUG(linker, debugBelch("loadArchive: found the start of another archive, breaking\n"));
+ break;
+ }
+#endif
+
n = fread ( tmp, 1, 12, f );
if (n != 12)
barf("loadArchive: Failed reading mod time from `%s'", path);
for (n = 0; isdigit(tmp[n]); n++);
tmp[n] = '\0';
memberSize = atoi(tmp);
+
+ IF_DEBUG(linker, debugBelch("loadArchive: size of this archive member is %d\n", memberSize));
n = fread ( tmp, 1, 2, f );
+ if (n != 2)
+ barf("loadArchive: Failed reading magic from `%s'", path);
if (strncmp(tmp, "\x60\x0A", 2) != 0)
barf("loadArchive: Failed reading magic from `%s' at %ld. Got %c%c",
path, ftell(f), tmp[0], tmp[1]);
path);
}
fileName[thisFileNameSize] = 0;
+
+ /* On OS X at least, thisFileNameSize is the size of the
+ fileName field, not the length of the fileName
+ itself. */
+ thisFileNameSize = strlen(fileName);
}
else {
barf("loadArchive: BSD-variant filename size not found while reading filename from `%s'", path);
&& fileName[thisFileNameSize - 2] == '.'
&& fileName[thisFileNameSize - 1] == 'o';
+ IF_DEBUG(linker, debugBelch("loadArchive: \tthisFileNameSize = %d\n", (int)thisFileNameSize));
+ IF_DEBUG(linker, debugBelch("loadArchive: \tisObject = %d\n", isObject));
+
if (isObject) {
char *archiveMemberName;
gnuFileIndexSize = memberSize;
}
else {
+ IF_DEBUG(linker, debugBelch("loadArchive: '%s' does not appear to be an object file\n", fileName));
n = fseek(f, memberSize, SEEK_CUR);
if (n != 0)
barf("loadArchive: error whilst seeking by %d in `%s'",
memberSize, path);
}
+
/* .ar files are 2-byte aligned */
if (memberSize % 2) {
+ IF_DEBUG(linker, debugBelch("loadArchive: trying to read one pad byte\n"));
n = fread ( tmp, 1, 1, f );
if (n != 1) {
if (feof(f)) {
+ IF_DEBUG(linker, debugBelch("loadArchive: found EOF while reading one pad byte\n"));
break;
}
else {
barf("loadArchive: Failed reading padding from `%s'", path);
}
}
+ IF_DEBUG(linker, debugBelch("loadArchive: successfully read one pad byte\n"));
}
+ IF_DEBUG(linker, debugBelch("loadArchive: reached end of archive loading while loop\n"));
}
fclose(f);
#endif
}
+ IF_DEBUG(linker, debugBelch("loadArchive: done\n"));
return 1;
}
loadOc( ObjectCode* oc ) {
int r;
- IF_DEBUG(linker, debugBelch("loadOc\n"));
+ IF_DEBUG(linker, debugBelch("loadOc: start\n"));
# if defined(OBJFORMAT_MACHO) && (defined(powerpc_HOST_ARCH) || defined(x86_64_HOST_ARCH))
r = ocAllocateSymbolExtras_MachO ( oc );
if (!r) {
- IF_DEBUG(linker, debugBelch("ocAllocateSymbolExtras_MachO failed\n"));
+ IF_DEBUG(linker, debugBelch("loadOc: ocAllocateSymbolExtras_MachO failed\n"));
return r;
}
# elif defined(OBJFORMAT_ELF) && (defined(powerpc_HOST_ARCH) || defined(x86_64_HOST_ARCH))
r = ocAllocateSymbolExtras_ELF ( oc );
if (!r) {
- IF_DEBUG(linker, debugBelch("ocAllocateSymbolExtras_ELF failed\n"));
+ IF_DEBUG(linker, debugBelch("loadOc: ocAllocateSymbolExtras_ELF failed\n"));
return r;
}
#endif
barf("loadObj: no verify method");
# endif
if (!r) {
- IF_DEBUG(linker, debugBelch("ocVerifyImage_* failed\n"));
+ IF_DEBUG(linker, debugBelch("loadOc: ocVerifyImage_* failed\n"));
return r;
}
barf("loadObj: no getNames method");
# endif
if (!r) {
- IF_DEBUG(linker, debugBelch("ocGetNames_* failed\n"));
+ IF_DEBUG(linker, debugBelch("loadOc: ocGetNames_* failed\n"));
return r;
}
/* loaded, but not resolved yet */
oc->status = OBJECT_LOADED;
- IF_DEBUG(linker, debugBelch("loadObj done.\n"));
+ IF_DEBUG(linker, debugBelch("loadOc: done.\n"));
return 1;
}
* which may be prodded during relocation, and abort if we try and write
* outside any of these.
*/
-static void addProddableBlock ( ObjectCode* oc, void* start, int size )
+static void
+addProddableBlock ( ObjectCode* oc, void* start, int size )
{
ProddableBlock* pb
= stgMallocBytes(sizeof(ProddableBlock), "addProddableBlock");
- IF_DEBUG(linker, debugBelch("addProddableBlock %p %p %d\n", oc, start, size));
+
+ IF_DEBUG(linker, debugBelch("addProddableBlock: %p %p %d\n", oc, start, size));
ASSERT(size > 0);
pb->start = start;
pb->size = size;
oc->proddables = pb;
}
-static void checkProddableBlock ( ObjectCode* oc, void* addr )
+static void
+checkProddableBlock (ObjectCode *oc, void *addr )
{
ProddableBlock* pb;
+
for (pb = oc->proddables; pb != NULL; pb = pb->next) {
char* s = (char*)(pb->start);
char* e = s + pb->size - 1;
/* -----------------------------------------------------------------------------
* Section management.
*/
-static void addSection ( ObjectCode* oc, SectionKind kind,
+static void
+addSection ( ObjectCode* oc, SectionKind kind,
void* start, void* end )
{
Section* s = stgMallocBytes(sizeof(Section), "addSection");
s->kind = kind;
s->next = oc->sections;
oc->sections = s;
- /*
- debugBelch("addSection: %p-%p (size %d), kind %d\n",
- start, ((char*)end)-1, end - start + 1, kind );
- */
+
+ IF_DEBUG(linker, debugBelch("addSection: %p-%p (size %ld), kind %d\n",
+ start, ((char*)end)-1, (long)end - (long)start + 1, kind ));
}
Because the PPC has split data/instruction caches, we have to
do that whenever we modify code at runtime.
*/
-static void ocFlushInstructionCacheFrom(void* begin, size_t length)
+
+static void
+ocFlushInstructionCacheFrom(void* begin, size_t length)
{
size_t n = (length + 3) / 4;
unsigned long* p = begin;
"isync"
);
}
-static void ocFlushInstructionCache( ObjectCode *oc )
+
+static void
+ocFlushInstructionCache( ObjectCode *oc )
{
/* The main object code */
- ocFlushInstructionCacheFrom(oc->image + oc->misalignment, oc->fileSize);
+ ocFlushInstructionCacheFrom(oc->image
+#ifdef darwin_HOST_OS
+ + oc->misalignment
+#endif
+ , oc->fileSize);
/* Jump Islands */
ocFlushInstructionCacheFrom(oc->symbol_extras, sizeof(SymbolExtra) * oc->n_symbol_extras);
}
-#endif
+#endif /* powerpc_HOST_ARCH */
+
/* --------------------------------------------------------------------------
* PEi386 specifics (Win32 targets)
#endif
#ifdef powerpc_HOST_ARCH
-static int ocAllocateSymbolExtras_MachO(ObjectCode* oc)
+static int
+ocAllocateSymbolExtras_MachO(ObjectCode* oc)
{
struct mach_header *header = (struct mach_header *) oc->image;
struct load_command *lc = (struct load_command *) (header + 1);
unsigned i;
- for( i = 0; i < header->ncmds; i++ )
- {
- if( lc->cmd == LC_SYMTAB )
- {
+ IF_DEBUG(linker, debugBelch("ocAllocateSymbolExtras_MachO: start\n"));
+
+ for (i = 0; i < header->ncmds; i++) {
+ if (lc->cmd == LC_SYMTAB) {
+
// Find out the first and last undefined external
// symbol, so we don't have to allocate too many
- // jump islands.
+ // jump islands/GOT entries.
+
struct symtab_command *symLC = (struct symtab_command *) lc;
unsigned min = symLC->nsyms, max = 0;
struct nlist *nlist =
symLC ? (struct nlist*) ((char*) oc->image + symLC->symoff)
: NULL;
- for(i=0;i<symLC->nsyms;i++)
- {
- if(nlist[i].n_type & N_STAB)
+
+ for (i = 0; i < symLC->nsyms; i++) {
+
+ if (nlist[i].n_type & N_STAB) {
;
- else if(nlist[i].n_type & N_EXT)
- {
+ } else if (nlist[i].n_type & N_EXT) {
+
if((nlist[i].n_type & N_TYPE) == N_UNDF
- && (nlist[i].n_value == 0))
- {
- if(i < min)
+ && (nlist[i].n_value == 0)) {
+
+ if (i < min) {
min = i;
- if(i > max)
+ }
+
+ if (i > max) {
max = i;
}
}
}
- if(max >= min)
+ }
+
+ if (max >= min) {
return ocAllocateSymbolExtras(oc, max - min + 1, min);
+ }
break;
}
lc = (struct load_command *) ( ((char *)lc) + lc->cmdsize );
}
+
return ocAllocateSymbolExtras(oc,0,0);
}
+
#endif
#ifdef x86_64_HOST_ARCH
-static int ocAllocateSymbolExtras_MachO(ObjectCode* oc)
+static int
+ocAllocateSymbolExtras_MachO(ObjectCode* oc)
{
struct mach_header *header = (struct mach_header *) oc->image;
struct load_command *lc = (struct load_command *) (header + 1);
unsigned i;
- for( i = 0; i < header->ncmds; i++ )
- {
- if( lc->cmd == LC_SYMTAB )
- {
+ IF_DEBUG(linker, debugBelch("ocAllocateSymbolExtras_MachO: start\n"));
+
+ for (i = 0; i < header->ncmds; i++) {
+ if (lc->cmd == LC_SYMTAB) {
+
// Just allocate one entry for every symbol
struct symtab_command *symLC = (struct symtab_command *) lc;
+ IF_DEBUG(linker, debugBelch("ocAllocateSymbolExtras_MachO: allocate %d symbols\n", symLC->nsyms));
+ IF_DEBUG(linker, debugBelch("ocAllocateSymbolExtras_MachO: done\n"));
return ocAllocateSymbolExtras(oc, symLC->nsyms, 0);
}
lc = (struct load_command *) ( ((char *)lc) + lc->cmdsize );
}
+
+ IF_DEBUG(linker, debugBelch("ocAllocateSymbolExtras_MachO: allocated no symbols\n"));
+ IF_DEBUG(linker, debugBelch("ocAllocateSymbolExtras_MachO: done\n"));
return ocAllocateSymbolExtras(oc,0,0);
}
#endif
-static int ocVerifyImage_MachO(ObjectCode* oc)
+static int
+ocVerifyImage_MachO(ObjectCode * oc)
{
char *image = (char*) oc->image;
struct mach_header *header = (struct mach_header*) image;
+ IF_DEBUG(linker, debugBelch("ocVerifyImage_MachO: start\n"));
+
#if x86_64_HOST_ARCH || powerpc64_HOST_ARCH
if(header->magic != MH_MAGIC_64) {
errorBelch("%s: Bad magic. Expected: %08x, got: %08x.\n",
return 0;
}
#endif
+
// FIXME: do some more verifying here
+ IF_DEBUG(linker, debugBelch("ocVerifyImage_MachO: done\n"));
return 1;
}
-static int resolveImports(
+static int
+resolveImports(
ObjectCode* oc,
char *image,
struct symtab_command *symLC,
#if i386_HOST_ARCH
int isJumpTable = 0;
- if(!strcmp(sect->sectname,"__jump_table"))
- {
+
+ if (strcmp(sect->sectname,"__jump_table") == 0) {
isJumpTable = 1;
itemSize = 5;
ASSERT(sect->reserved2 == itemSize);
}
+
#endif
for(i=0; i*itemSize < sect->size;i++)
void *addr = NULL;
IF_DEBUG(linker, debugBelch("resolveImports: resolving %s\n", nm));
+
if ((symbol->n_type & N_TYPE) == N_UNDF
&& (symbol->n_type & N_EXT) && (symbol->n_value != 0)) {
addr = (void*) (symbol->n_value);
ASSERT(addr);
#if i386_HOST_ARCH
- if(isJumpTable)
- {
+ if (isJumpTable) {
checkProddableBlock(oc,image + sect->offset + i*itemSize);
- *(image + sect->offset + i*itemSize) = 0xe9; // jmp
+
+ *(image + sect->offset + i * itemSize) = 0xe9; // jmp opcode
*(unsigned*)(image + sect->offset + i*itemSize + 1)
= (char*)addr - (image + sect->offset + i*itemSize + 5);
}
// and use #ifdefs for the other types.
// Step 1: Figure out what the relocated value should be
- if(scat->r_type == GENERIC_RELOC_VANILLA)
- {
- word = *wordPtr + (unsigned long) relocateAddress(
- oc,
+ if (scat->r_type == GENERIC_RELOC_VANILLA) {
+ word = *wordPtr
+ + (unsigned long) relocateAddress(oc,
nSections,
sections,
scat->r_value)
struct scattered_relocation_info *pair =
(struct scattered_relocation_info*) &relocs[i+1];
- if(!pair->r_scattered || pair->r_type != GENERIC_RELOC_PAIR)
+ if (!pair->r_scattered || pair->r_type != GENERIC_RELOC_PAIR) {
barf("Invalid Mach-O file: "
"RELOC_*_SECTDIFF not followed by RELOC_PAIR");
+ }
word = (unsigned long)
(relocateAddress(oc, nSections, sections, scat->r_value)
|| scat->r_type == PPC_RELOC_LO14)
{ // these are generated by label+offset things
struct relocation_info *pair = &relocs[i+1];
- if((pair->r_address & R_SCATTERED) || pair->r_type != PPC_RELOC_PAIR)
+
+ if ((pair->r_address & R_SCATTERED) || pair->r_type != PPC_RELOC_PAIR) {
barf("Invalid Mach-O file: "
"PPC_RELOC_* not followed by PPC_RELOC_PAIR");
+ }
if(scat->r_type == PPC_RELOC_LO16)
{
i++;
}
#endif
- else
- {
+ else {
barf ("Don't know how to handle this Mach-O "
"scattered relocation entry: "
"object file %s; entry type %ld; "
*wordPtr = word;
}
#ifdef powerpc_HOST_ARCH
- else if(scat->r_type == PPC_RELOC_LO16_SECTDIFF || scat->r_type == PPC_RELOC_LO16)
+ else if (scat->r_type == PPC_RELOC_LO16_SECTDIFF
+ || scat->r_type == PPC_RELOC_LO16)
{
((unsigned short*) wordPtr)[1] = word & 0xFFFF;
}
- else if(scat->r_type == PPC_RELOC_HI16_SECTDIFF || scat->r_type == PPC_RELOC_HI16)
+ else if (scat->r_type == PPC_RELOC_HI16_SECTDIFF
+ || scat->r_type == PPC_RELOC_HI16)
{
((unsigned short*) wordPtr)[1] = (word >> 16) & 0xFFFF;
}
- else if(scat->r_type == PPC_RELOC_HA16_SECTDIFF || scat->r_type == PPC_RELOC_HA16)
+ else if (scat->r_type == PPC_RELOC_HA16_SECTDIFF
+ || scat->r_type == PPC_RELOC_HA16)
{
((unsigned short*) wordPtr)[1] = ((word >> 16) & 0xFFFF)
+ ((word & (1<<15)) ? 1 : 0);
else /* !(relocs[i].r_address & R_SCATTERED) */
{
struct relocation_info *reloc = &relocs[i];
- if(reloc->r_pcrel && !reloc->r_extern)
+ if (reloc->r_pcrel && !reloc->r_extern) {
+ IF_DEBUG(linker, debugBelch("relocateSection: pc relative but not external, skipping\n"));
continue;
+ }
- if(reloc->r_length == 2)
- {
+ if (reloc->r_length == 2) {
unsigned long word = 0;
#ifdef powerpc_HOST_ARCH
unsigned long jumpIsland = 0;
unsigned long* wordPtr = (unsigned long*) (image + sect->offset + reloc->r_address);
checkProddableBlock(oc,wordPtr);
- if(reloc->r_type == GENERIC_RELOC_VANILLA)
- {
+ if (reloc->r_type == GENERIC_RELOC_VANILLA) {
word = *wordPtr;
}
#ifdef powerpc_HOST_ARCH
- else if(reloc->r_type == PPC_RELOC_LO16)
- {
+ else if (reloc->r_type == PPC_RELOC_LO16) {
word = ((unsigned short*) wordPtr)[1];
word |= ((unsigned long) relocs[i+1].r_address & 0xFFFF) << 16;
}
- else if(reloc->r_type == PPC_RELOC_HI16)
- {
+ else if (reloc->r_type == PPC_RELOC_HI16) {
word = ((unsigned short*) wordPtr)[1] << 16;
word |= ((unsigned long) relocs[i+1].r_address & 0xFFFF);
}
- else if(reloc->r_type == PPC_RELOC_HA16)
- {
+ else if (reloc->r_type == PPC_RELOC_HA16) {
word = ((unsigned short*) wordPtr)[1] << 16;
word += ((short)relocs[i+1].r_address & (short)0xFFFF);
}
- else if(reloc->r_type == PPC_RELOC_BR24)
- {
+ else if (reloc->r_type == PPC_RELOC_BR24) {
word = *wordPtr;
word = (word & 0x03FFFFFC) | ((word & 0x02000000) ? 0xFC000000 : 0);
}
#endif
- else
- {
+ else {
barf("Can't handle this Mach-O relocation entry "
"(not scattered): "
"object file %s; entry type %ld; address %#lx\n",
return 0;
}
- if(!reloc->r_extern)
- {
- long delta =
- sections[reloc->r_symbolnum-1].offset
+ if (!reloc->r_extern) {
+ long delta = sections[reloc->r_symbolnum-1].offset
- sections[reloc->r_symbolnum-1].addr
+ ((long) image);
word += delta;
}
- else
- {
+ else {
struct nlist *symbol = &nlist[reloc->r_symbolnum];
char *nm = image + symLC->stroff + symbol->n_un.n_strx;
void *symbolAddress = lookupSymbol(nm);
- if(!symbolAddress)
- {
+
+ if (!symbolAddress) {
errorBelch("\nunknown symbol `%s'", nm);
return 0;
}
- if(reloc->r_pcrel)
- {
+ if (reloc->r_pcrel) {
#ifdef powerpc_HOST_ARCH
// In the .o file, this should be a relative jump to NULL
// and we'll change it to a relative jump to the symbol
reloc->r_symbolnum,
(unsigned long) symbolAddress)
-> jumpIsland;
- if(jumpIsland != 0)
- {
+ if (jumpIsland != 0) {
offsetToJumpIsland = word + jumpIsland
- (((long)image) + sect->offset - sect->addr);
}
word += (unsigned long) symbolAddress
- (((long)image) + sect->offset - sect->addr);
}
- else
- {
+ else {
word += (unsigned long) symbolAddress;
}
}
- if(reloc->r_type == GENERIC_RELOC_VANILLA)
- {
+ if (reloc->r_type == GENERIC_RELOC_VANILLA) {
*wordPtr = word;
continue;
}
else if(reloc->r_type == PPC_RELOC_LO16)
{
((unsigned short*) wordPtr)[1] = word & 0xFFFF;
- i++; continue;
+ i++;
+ continue;
}
else if(reloc->r_type == PPC_RELOC_HI16)
{
((unsigned short*) wordPtr)[1] = (word >> 16) & 0xFFFF;
- i++; continue;
+ i++;
+ continue;
}
else if(reloc->r_type == PPC_RELOC_HA16)
{
((unsigned short*) wordPtr)[1] = ((word >> 16) & 0xFFFF)
+ ((word & (1<<15)) ? 1 : 0);
- i++; continue;
+ i++;
+ continue;
}
else if(reloc->r_type == PPC_RELOC_BR24)
{
- if((word & 0x03) != 0)
+ if ((word & 0x03) != 0) {
barf("%s: unconditional relative branch with a displacement "
"which isn't a multiple of 4 bytes: %#lx",
OC_INFORMATIVE_FILENAME(oc),
word);
+ }
if((word & 0xFE000000) != 0xFE000000 &&
- (word & 0xFE000000) != 0x00000000)
- {
+ (word & 0xFE000000) != 0x00000000) {
// The branch offset is too large.
// Therefore, we try to use a jump island.
- if(jumpIsland == 0)
- {
+ if (jumpIsland == 0) {
barf("%s: unconditional relative branch out of range: "
"no jump island available: %#lx",
OC_INFORMATIVE_FILENAME(oc),
}
word = offsetToJumpIsland;
+
if((word & 0xFE000000) != 0xFE000000 &&
- (word & 0xFE000000) != 0x00000000)
+ (word & 0xFE000000) != 0x00000000) {
barf("%s: unconditional relative branch out of range: "
"jump island out of range: %#lx",
OC_INFORMATIVE_FILENAME(oc),
word);
}
+ }
*wordPtr = (*wordPtr & 0xFC000003) | (word & 0x03FFFFFC);
continue;
}
}
#endif
}
+
IF_DEBUG(linker, debugBelch("relocateSection: done\n"));
return 1;
}
-static int ocGetNames_MachO(ObjectCode* oc)
+static int
+ocGetNames_MachO(ObjectCode* oc)
{
char *image = (char*) oc->image;
struct mach_header *header = (struct mach_header*) image;
for(i=0;i<header->ncmds;i++)
{
- if(lc->cmd == LC_SEGMENT || lc->cmd == LC_SEGMENT_64)
+ if (lc->cmd == LC_SEGMENT || lc->cmd == LC_SEGMENT_64) {
segLC = (struct segment_command*) lc;
- else if(lc->cmd == LC_SYMTAB)
+ }
+ else if (lc->cmd == LC_SYMTAB) {
symLC = (struct symtab_command*) lc;
+ }
+
lc = (struct load_command *) ( ((char*)lc) + lc->cmdsize );
}
nlist = symLC ? (struct nlist*) (image + symLC->symoff)
: NULL;
- if(!segLC)
+ if (!segLC) {
barf("ocGetNames_MachO: no segment load command");
+ }
+ IF_DEBUG(linker, debugBelch("ocGetNames_MachO: will load %d sections\n", segLC->nsects));
for(i=0;i<segLC->nsects;i++)
{
- IF_DEBUG(linker, debugBelch("ocGetNames_MachO: segment %d\n", i));
- if (sections[i].size == 0)
+ IF_DEBUG(linker, debugBelch("ocGetNames_MachO: section %d\n", i));
+
+ if (sections[i].size == 0) {
+ IF_DEBUG(linker, debugBelch("ocGetNames_MachO: found a zero length section, skipping\n"));
continue;
+ }
if((sections[i].flags & SECTION_TYPE) == S_ZEROFILL)
{
sections[i].offset = zeroFillArea - image;
}
- if(!strcmp(sections[i].sectname,"__text"))
+ if (!strcmp(sections[i].sectname,"__text")) {
+
+ IF_DEBUG(linker, debugBelch("ocGetNames_MachO: adding __text section\n"));
addSection(oc, SECTIONKIND_CODE_OR_RODATA,
(void*) (image + sections[i].offset),
(void*) (image + sections[i].offset + sections[i].size));
- else if(!strcmp(sections[i].sectname,"__const"))
+ }
+ else if (!strcmp(sections[i].sectname,"__const")) {
+
+ IF_DEBUG(linker, debugBelch("ocGetNames_MachO: adding __const section\n"));
addSection(oc, SECTIONKIND_RWDATA,
(void*) (image + sections[i].offset),
(void*) (image + sections[i].offset + sections[i].size));
- else if(!strcmp(sections[i].sectname,"__data"))
+ }
+ else if (!strcmp(sections[i].sectname,"__data")) {
+
+ IF_DEBUG(linker, debugBelch("ocGetNames_MachO: adding __data section\n"));
addSection(oc, SECTIONKIND_RWDATA,
(void*) (image + sections[i].offset),
(void*) (image + sections[i].offset + sections[i].size));
+ }
else if(!strcmp(sections[i].sectname,"__bss")
- || !strcmp(sections[i].sectname,"__common"))
+ || !strcmp(sections[i].sectname,"__common")) {
+
+ IF_DEBUG(linker, debugBelch("ocGetNames_MachO: adding __bss section\n"));
addSection(oc, SECTIONKIND_RWDATA,
(void*) (image + sections[i].offset),
(void*) (image + sections[i].offset + sections[i].size));
-
- addProddableBlock(oc, (void*) (image + sections[i].offset),
+ }
+ addProddableBlock(oc,
+ (void *) (image + sections[i].offset),
sections[i].size);
}
// count external symbols defined here
oc->n_symbols = 0;
- if(symLC)
- {
- for(i=0;i<symLC->nsyms;i++)
- {
- if(nlist[i].n_type & N_STAB)
+ if (symLC) {
+ for (i = 0; i < symLC->nsyms; i++) {
+ if (nlist[i].n_type & N_STAB) {
;
+ }
else if(nlist[i].n_type & N_EXT)
{
if((nlist[i].n_type & N_TYPE) == N_UNDF
oc->symbols[curSymbol++] = nm;
}
}
+ else
+ {
+ IF_DEBUG(linker, debugBelch("ocGetNames_MachO: \t...not external, skipping\n"));
+ }
+ }
+ else
+ {
+ IF_DEBUG(linker, debugBelch("ocGetNames_MachO: \t...not defined in this section, skipping\n"));
}
}
}
commonStorage = stgCallocBytes(1,commonSize,"ocGetNames_MachO(common symbols)");
commonCounter = (unsigned long)commonStorage;
- if(symLC)
- {
- for(i=0;i<symLC->nsyms;i++)
- {
+
+ if (symLC) {
+ for (i = 0; i < symLC->nsyms; i++) {
if((nlist[i].n_type & N_TYPE) == N_UNDF
- && (nlist[i].n_type & N_EXT) && (nlist[i].n_value != 0))
- {
+ && (nlist[i].n_type & N_EXT)
+ && (nlist[i].n_value != 0)) {
+
char *nm = image + symLC->stroff + nlist[i].n_un.n_strx;
unsigned long sz = nlist[i].n_value;
}
}
}
+
+ IF_DEBUG(linker, debugBelch("ocGetNames_MachO: done\n"));
return 1;
}
-static int ocResolve_MachO(ObjectCode* oc)
+static int
+ocResolve_MachO(ObjectCode* oc)
{
char *image = (char*) oc->image;
struct mach_header *header = (struct mach_header*) image;
IF_DEBUG(linker, debugBelch("ocResolve_MachO: start\n"));
for (i = 0; i < header->ncmds; i++)
{
- if(lc->cmd == LC_SEGMENT || lc->cmd == LC_SEGMENT_64)
+ if (lc->cmd == LC_SEGMENT || lc->cmd == LC_SEGMENT_64) {
segLC = (struct segment_command*) lc;
- else if(lc->cmd == LC_SYMTAB)
+ IF_DEBUG(linker, debugBelch("ocResolve_MachO: found a 32 or 64 bit segment load command\n"));
+ }
+ else if (lc->cmd == LC_SYMTAB) {
symLC = (struct symtab_command*) lc;
- else if(lc->cmd == LC_DYSYMTAB)
+ IF_DEBUG(linker, debugBelch("ocResolve_MachO: found a symbol table load command\n"));
+ }
+ else if (lc->cmd == LC_DYSYMTAB) {
dsymLC = (struct dysymtab_command*) lc;
+ IF_DEBUG(linker, debugBelch("ocResolve_MachO: found a dynamic symbol table load command\n"));
+ }
+
lc = (struct load_command *) ( ((char*)lc) + lc->cmdsize );
}
extern void* symbolsWithoutUnderscore[];
-static void machoInitSymbolsWithoutUnderscore()
+static void
+machoInitSymbolsWithoutUnderscore(void)
{
void **p = symbolsWithoutUnderscore;
__asm__ volatile(".globl _symbolsWithoutUnderscore\n.data\n_symbolsWithoutUnderscore:");
* Figure out by how much to shift the entire Mach-O file in memory
* when loading so that its single segment ends up 16-byte-aligned
*/
-static int machoGetMisalignment( FILE * f )
+static int
+machoGetMisalignment( FILE * f )
{
struct mach_header header;
int misalignment;
#include "Rts.h"
#include "RtsMain.h"
-/* The symbol for the Haskell Main module's init function. It is safe to refer
- * to it here because this Main.o object file will only be linked in if we are
- * linking a Haskell program that uses a Haskell Main.main function.
- */
-extern void __stginit_ZCMain(void);
-
/* Similarly, we can refer to the ZCMain_main_closure here */
extern StgClosure ZCMain_main_closure;
int main(int argc, char *argv[])
{
- return hs_main(argc, argv, &__stginit_ZCMain, &ZCMain_main_closure);
+ return hs_main(argc, argv, &ZCMain_main_closure);
}
RET_P(mv);
}
+stg_casMutVarzh
+ /* MutVar# s a -> a -> a -> State# s -> (# State#, Int#, a #) */
+{
+ W_ mv, old, new, h;
+
+ mv = R1;
+ old = R2;
+ new = R3;
+
+ (h) = foreign "C" cas(mv + SIZEOF_StgHeader + OFFSET_StgMutVar_var,
+ old, new) [];
+ if (h != old) {
+ RET_NP(1,h);
+ } else {
+ RET_NP(0,h);
+ }
+}
+
+
stg_atomicModifyMutVarzh
{
W_ mv, f, z, x, y, r, h;
{
}
-void freeProfiling1 (void)
+void freeProfiling (void)
{
}
* closure_cats
*/
-unsigned int CC_ID;
-unsigned int CCS_ID;
-unsigned int HP_ID;
+unsigned int CC_ID = 1;
+unsigned int CCS_ID = 1;
+unsigned int HP_ID = 1;
/* figures for the profiling report.
*/
/* Linked lists to keep track of cc's and ccs's that haven't
* been declared in the log file yet
*/
-CostCentre *CC_LIST;
-CostCentreStack *CCS_LIST;
+CostCentre *CC_LIST = NULL;
+CostCentreStack *CCS_LIST = NULL;
/*
* Built-in cost centres and cost-centre stacks:
/* for the benefit of allocate()... */
CCCS = CCS_SYSTEM;
-
- /* Initialize counters for IDs */
- CC_ID = 1;
- CCS_ID = 1;
- HP_ID = 1;
-
- /* Initialize Declaration lists to NULL */
- CC_LIST = NULL;
- CCS_LIST = NULL;
-
- /* Register all the cost centres / stacks in the program
- * CC_MAIN gets link = 0, all others have non-zero link.
- */
- REGISTER_CC(CC_MAIN);
- REGISTER_CC(CC_SYSTEM);
- REGISTER_CC(CC_GC);
- REGISTER_CC(CC_OVERHEAD);
- REGISTER_CC(CC_SUBSUMED);
- REGISTER_CC(CC_DONT_CARE);
- REGISTER_CCS(CCS_MAIN);
- REGISTER_CCS(CCS_SYSTEM);
- REGISTER_CCS(CCS_GC);
- REGISTER_CCS(CCS_OVERHEAD);
- REGISTER_CCS(CCS_SUBSUMED);
- REGISTER_CCS(CCS_DONT_CARE);
-
- CCCS = CCS_OVERHEAD;
-
- /* cost centres are registered by the per-module
- * initialisation code now...
- */
}
void
-freeProfiling1 (void)
+freeProfiling (void)
{
arenaFree(prof_arena);
}
* information into it. */
initProfilingLogFile();
+ /* Register all the cost centres / stacks in the program
+ * CC_MAIN gets link = 0, all others have non-zero link.
+ */
+ REGISTER_CC(CC_MAIN);
+ REGISTER_CC(CC_SYSTEM);
+ REGISTER_CC(CC_GC);
+ REGISTER_CC(CC_OVERHEAD);
+ REGISTER_CC(CC_SUBSUMED);
+ REGISTER_CC(CC_DONT_CARE);
+
+ REGISTER_CCS(CCS_SYSTEM);
+ REGISTER_CCS(CCS_GC);
+ REGISTER_CCS(CCS_OVERHEAD);
+ REGISTER_CCS(CCS_SUBSUMED);
+ REGISTER_CCS(CCS_DONT_CARE);
+ REGISTER_CCS(CCS_MAIN);
+
/* find all the "special" cost centre stacks, and make them children
* of CCS_MAIN.
*/
- ASSERT(CCS_MAIN->prevStack == 0);
+ ASSERT(CCS_LIST == CCS_MAIN);
+ CCS_LIST = CCS_LIST->prevStack;
+ CCS_MAIN->prevStack = NULL;
CCS_MAIN->root = CC_MAIN;
ccsSetSelected(CCS_MAIN);
DecCCS(CCS_MAIN);
- for (ccs = CCS_LIST; ccs != CCS_MAIN; ) {
+ for (ccs = CCS_LIST; ccs != NULL; ) {
next = ccs->prevStack;
- ccs->prevStack = 0;
+ ccs->prevStack = NULL;
ActualPush_(CCS_MAIN,ccs->cc,ccs);
ccs->root = ccs->cc;
ccs = next;
#include "BeginPrivate.h"
void initProfiling1 (void);
-void freeProfiling1 (void);
void initProfiling2 (void);
void endProfiling (void);
+void freeProfiling (void);
extern FILE *prof_file;
extern FILE *hp_file;
#include "RtsOpts.h"
#include "RtsUtils.h"
#include "Profiling.h"
+#include "RtsFlags.h"
#ifdef HAVE_CTYPE_H
#include <ctype.h>
Static function decls
-------------------------------------------------------------------------- */
-static int /* return NULL on error */
-open_stats_file (
- I_ arg,
- int argc, char *argv[],
- int rts_argc, char *rts_argv[],
- const char *FILENAME_FMT,
- FILE **file_ret);
+static void procRtsOpts (int rts_argc0, RtsOptsEnabledEnum enabled);
+
+static void normaliseRtsOpts (void);
+
+static void initStatsFile (FILE *f);
+
+static int openStatsFile (char *filename, const char *FILENAME_FMT,
+ FILE **file_ret);
+
+static StgWord64 decodeSize (const char *flag, nat offset,
+ StgWord64 min, StgWord64 max);
+
+static void bad_option (const char *s);
-static StgWord64 decodeSize(const char *flag, nat offset, StgWord64 min, StgWord64 max);
-static void bad_option(const char *s);
#ifdef TRACING
static void read_trace_flags(char *arg);
#endif
+static void errorUsage (void) GNU_ATTRIBUTE(__noreturn__);
+
/* -----------------------------------------------------------------------------
* Command-line option parsing routines.
* ---------------------------------------------------------------------------*/
" -Da DEBUG: apply",
" -Dl DEBUG: linker",
" -Dm DEBUG: stm",
-" -Dz DEBUG: stack squezing",
+" -Dz DEBUG: stack squeezing",
" -Dc DEBUG: program coverage",
" -Dr DEBUG: sparks",
"",
return(strcmp(a, b) == 0);
}
-static void
-splitRtsFlags(char *s, int *rts_argc, char *rts_argv[])
+static void splitRtsFlags(char *s)
{
char *c1, *c2;
if (c1 == c2) { break; }
- if (*rts_argc < MAX_RTS_ARGS-1) {
+ if (rts_argc < MAX_RTS_ARGS-1) {
s = stgMallocBytes(c2-c1+1, "RtsFlags.c:splitRtsFlags()");
strncpy(s, c1, c2-c1);
s[c2-c1] = '\0';
- rts_argv[(*rts_argc)++] = s;
+ rts_argv[rts_argc++] = s;
} else {
barf("too many RTS arguments (max %d)", MAX_RTS_ARGS-1);
}
} while (*c1 != '\0');
}
-void
-setupRtsFlags(int *argc, char *argv[], int *rts_argc, char *rts_argv[])
+/* -----------------------------------------------------------------------------
+ Parse the command line arguments, collecting options for the RTS.
+
+ On return:
+ - argv[] is *modified*, any RTS options have been stripped out
+ - *argc contains the new count of arguments in argv[]
+
+ - rts_argv[] (global) contains the collected RTS args
+ - rts_argc (global) contains the count of args in rts_argv
+
+ - prog_argv[] (global) contains the non-RTS args (== argv)
+ - prog_argc (global) contains the count of args in prog_argv
+
+ - prog_name (global) contains the basename of argv[0]
+
+ -------------------------------------------------------------------------- */
+
+void setupRtsFlags (int *argc, char *argv[])
{
- rtsBool error = rtsFalse;
- I_ mode;
- I_ arg, total_arg;
+ nat mode;
+ nat total_arg;
+ nat arg, rts_argc0;
setProgName (argv);
total_arg = *argc;
arg = 1;
*argc = 1;
- *rts_argc = 0;
+ rts_argc = 0;
+
+ rts_argc0 = rts_argc;
// process arguments from the ghc_rts_opts global variable first.
// (arguments from the GHCRTS environment variable and the command
// line override these).
{
if (ghc_rts_opts != NULL) {
- splitRtsFlags(ghc_rts_opts, rts_argc, rts_argv);
- }
+ splitRtsFlags(ghc_rts_opts);
+ // opts from ghc_rts_opts are always enabled:
+ procRtsOpts(rts_argc0, RtsOptsAll);
+ rts_argc0 = rts_argc;
+ }
}
// process arguments from the GHCRTS environment variable next
char *ghc_rts = getenv("GHCRTS");
if (ghc_rts != NULL) {
- if (rtsOptsEnabled != rtsOptsNone) {
- splitRtsFlags(ghc_rts, rts_argc, rts_argv);
- }
- else {
+ if (rtsOptsEnabled == RtsOptsNone) {
errorBelch("Warning: Ignoring GHCRTS variable as RTS options are disabled.\n Link with -rtsopts to enable them.");
// We don't actually exit, just warn
+ } else {
+ splitRtsFlags(ghc_rts);
+ procRtsOpts(rts_argc0, rtsOptsEnabled);
+ rts_argc0 = rts_argc;
}
- }
+ }
}
// Split arguments (argv) into PGM (argv) and RTS (rts_argv) parts
break;
}
else if (strequal("+RTS", argv[arg])) {
- if (rtsOptsEnabled != rtsOptsNone) {
- mode = RTS;
- }
- else {
- errorBelch("RTS options are disabled. Link with -rtsopts to enable them.");
- stg_exit(EXIT_FAILURE);
- }
- }
+ mode = RTS;
+ }
else if (strequal("-RTS", argv[arg])) {
mode = PGM;
}
- else if (mode == RTS && *rts_argc < MAX_RTS_ARGS-1) {
- rts_argv[(*rts_argc)++] = argv[arg];
+ else if (mode == RTS && rts_argc < MAX_RTS_ARGS-1) {
+ rts_argv[rts_argc++] = argv[arg];
}
else if (mode == PGM) {
argv[(*argc)++] = argv[arg];
argv[(*argc)++] = argv[arg];
}
argv[*argc] = (char *) 0;
- rts_argv[*rts_argc] = (char *) 0;
+ rts_argv[rts_argc] = (char *) 0;
+
+ procRtsOpts(rts_argc0, rtsOptsEnabled);
+
+ normaliseRtsOpts();
+
+ setProgArgv(*argc, argv);
+
+ if (RtsFlags.GcFlags.statsFile != NULL) {
+ initStatsFile (RtsFlags.GcFlags.statsFile);
+ }
+ if (RtsFlags.TickyFlags.tickyFile != NULL) {
+ initStatsFile (RtsFlags.GcFlags.statsFile);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * procRtsOpts: Process rts_argv between rts_argc0 and rts_argc.
+ * -------------------------------------------------------------------------- */
+
+static void procRtsOpts (int rts_argc0, RtsOptsEnabledEnum enabled)
+{
+ rtsBool error = rtsFalse;
+ int arg;
// Process RTS (rts_argv) part: mainly to determine statsfile
- for (arg = 0; arg < *rts_argc; arg++) {
- if (rts_argv[arg][0] != '-') {
+ for (arg = rts_argc0; arg < rts_argc; arg++) {
+ if (rts_argv[arg][0] != '-') {
fflush(stdout);
errorBelch("unexpected RTS argument: %s", rts_argv[arg]);
error = rtsTrue;
} else {
+ if (enabled == RtsOptsNone) {
+ errorBelch("RTS options are disabled. Link with -rtsopts to enable them.");
+ stg_exit(EXIT_FAILURE);
+ }
+
switch(rts_argv[arg][1]) {
case '-':
if (strequal("info", &rts_argv[arg][2])) {
break;
}
- if (rtsOptsEnabled != rtsOptsAll)
- {
+ if (enabled == RtsOptsSafeOnly) {
errorBelch("Most RTS options are disabled. Link with -rtsopts to enable them.");
stg_exit(EXIT_FAILURE);
}
stats:
{
int r;
- r = open_stats_file(arg, *argc, argv,
- *rts_argc, rts_argv, NULL,
- &RtsFlags.GcFlags.statsFile);
+ r = openStatsFile(rts_argv[arg]+2, NULL,
+ &RtsFlags.GcFlags.statsFile);
if (r == -1) { error = rtsTrue; }
}
break;
{
int r;
- r = open_stats_file(arg, *argc, argv,
- *rts_argc, rts_argv, TICKY_FILENAME_FMT,
- &RtsFlags.TickyFlags.tickyFile);
+ r = openStatsFile(rts_argv[arg]+2,
+ TICKY_FILENAME_FMT,
+ &RtsFlags.TickyFlags.tickyFile);
if (r == -1) { error = rtsTrue; }
}
) break;
}
}
+ if (error) errorUsage();
+}
+
+/* -----------------------------------------------------------------------------
+ * normaliseRtsOpts: Set some derived values, and make sure things are
+ * within sensible ranges.
+ * -------------------------------------------------------------------------- */
+
+static void normaliseRtsOpts (void)
+{
if (RtsFlags.MiscFlags.tickInterval < 0) {
RtsFlags.MiscFlags.tickInterval = 50;
}
if (RtsFlags.GcFlags.stkChunkBufferSize >
RtsFlags.GcFlags.stkChunkSize / 2) {
errorBelch("stack chunk buffer size (-kb) must be less than 50%% of the stack chunk size (-kc)");
- error = rtsTrue;
+ errorUsage();
}
+}
- if (error) {
- const char **p;
+static void errorUsage (void)
+{
+ const char **p;
- fflush(stdout);
- for (p = usage_text; *p; p++)
- errorBelch("%s", *p);
- stg_exit(EXIT_FAILURE);
- }
+ fflush(stdout);
+ for (p = usage_text; *p; p++)
+ errorBelch("%s", *p);
+ stg_exit(EXIT_FAILURE);
}
-
static void
stats_fprintf(FILE *f, char *s, ...)
{
va_end(ap);
}
-static int /* return -1 on error */
-open_stats_file (
- I_ arg,
- int argc, char *argv[],
- int rts_argc, char *rts_argv[],
- const char *FILENAME_FMT,
- FILE **file_ret)
+/* -----------------------------------------------------------------------------
+ * openStatsFile: open a file in which to put some runtime stats
+ * -------------------------------------------------------------------------- */
+
+static int // return -1 on error
+openStatsFile (char *filename, // filename, or NULL
+ const char *filename_fmt, // if filename == NULL, use
+ // this fmt with sprintf to
+ // generate the filename. %s
+ // expands to the program name.
+ FILE **file_ret) // return the FILE*
{
FILE *f = NULL;
- if (strequal(rts_argv[arg]+2, "stderr")
- || (FILENAME_FMT == NULL && rts_argv[arg][2] == '\0')) {
+ if (strequal(filename, "stderr")
+ || (filename_fmt == NULL && *filename == '\0')) {
f = NULL; /* NULL means use debugBelch */
} else {
- if (rts_argv[arg][2] != '\0') { /* stats file specified */
- f = fopen(rts_argv[arg]+2,"w");
+ if (*filename != '\0') { /* stats file specified */
+ f = fopen(filename,"w");
} else {
char stats_filename[STATS_FILENAME_MAXLEN]; /* default <program>.<ext> */
- sprintf(stats_filename, FILENAME_FMT, argv[0]);
+ sprintf(stats_filename, filename_fmt, prog_name);
f = fopen(stats_filename,"w");
}
if (f == NULL) {
- errorBelch("Can't open stats file %s\n", rts_argv[arg]+2);
+ errorBelch("Can't open stats file %s\n", filename);
return -1;
}
}
*file_ret = f;
- {
- /* Write argv and rtsv into start of stats file */
- int count;
- for(count = 0; count < argc; count++) {
- stats_fprintf(f, "%s ", argv[count]);
- }
- stats_fprintf(f, "+RTS ");
- for(count = 0; count < rts_argc; count++)
- stats_fprintf(f, "%s ", rts_argv[count]);
- stats_fprintf(f, "\n");
- }
return 0;
}
+/* -----------------------------------------------------------------------------
+ * initStatsFile: write a line to the file containing the program name
+ * and the arguments it was invoked with.
+-------------------------------------------------------------------------- */
+static void initStatsFile (FILE *f)
+{
+ /* Write prog_argv and rts_argv into start of stats file */
+ int count;
+ for (count = 0; count < prog_argc; count++) {
+ stats_fprintf(f, "%s ", prog_argv[count]);
+ }
+ stats_fprintf(f, "+RTS ");
+ for (count = 0; count < rts_argc; count++)
+ stats_fprintf(f, "%s ", rts_argv[count]);
+ stats_fprintf(f, "\n");
+}
+
+/* -----------------------------------------------------------------------------
+ * decodeSize: parse a string containing a size, like 300K or 1.2M
+-------------------------------------------------------------------------- */
static StgWord64
decodeSize(const char *flag, nat offset, StgWord64 min, StgWord64 max)
void
setProgArgv(int argc, char *argv[])
{
- /* Usually this is done by startupHaskell, so we don't need to call this.
- However, sometimes Hugs wants to change the arguments which Haskell
- getArgs >>= ... will be fed. So you can do that by calling here
- _after_ calling startupHaskell.
- */
- prog_argc = argc;
- prog_argv = argv;
- setProgName(prog_argv);
+ prog_argc = argc;
+ prog_argv = argv;
+ setProgName(prog_argv);
}
/* These functions record and recall the full arguments, including the
--- /dev/null
+/* -----------------------------------------------------------------------------
+ *
+ * (c) The AQUA Project, Glasgow University, 1994-1997
+ * (c) The GHC Team, 1998-2006
+ *
+ * Functions for parsing the argument list.
+ *
+ * ---------------------------------------------------------------------------*/
+
+#ifndef RTSFLAGS_H
+#define RTSFLAGS_H
+
+#include "BeginPrivate.h"
+
+/* Routines that operate-on/to-do-with RTS flags: */
+
+void initRtsFlagsDefaults (void);
+void setupRtsFlags (int *argc, char *argv[]);
+void setProgName (char *argv[]);
+
+#include "EndPrivate.h"
+
+#endif /* RTSFLAGS_H */
# include <windows.h>
#endif
-extern void __stginit_ZCMain(void);
-
/* Annoying global vars for passing parameters to real_main() below
* This is to get around problem with Windows SEH, see hs_main(). */
static int progargc;
static char **progargv;
-static void (*progmain_init)(void); /* This will be __stginit_ZCMain */
static StgClosure *progmain_closure; /* This will be ZCMain_main_closure */
/* Hack: we assume that we're building a batch-mode system unless
SchedulerStatus status;
/* all GranSim/GUM init is done in startupHaskell; sets IAmMainThread! */
- startupHaskell(progargc,progargv,progmain_init);
+ startupHaskell(progargc,progargv,NULL);
/* kick off the computation by creating the main thread with a pointer
to mainIO_closure representing the computation of the overall program;
* This gets called from a tiny main function which gets linked into each
* compiled Haskell program that uses a Haskell main function.
*
- * We expect the caller to pass __stginit_ZCMain for main_init and
- * ZCMain_main_closure for main_closure. The reason we cannot refer to
- * these symbols directly is because we're inside the rts and we do not know
- * for sure that we'll be using a Haskell main function.
+ * We expect the caller to pass ZCMain_main_closure for
+ * main_closure. The reason we cannot refer to this symbol directly
+ * is because we're inside the rts and we do not know for sure that
+ * we'll be using a Haskell main function.
*/
-int hs_main(int argc, char *argv[], void (*main_init)(void), StgClosure *main_closure)
+int hs_main(int argc, char *argv[], StgClosure *main_closure)
{
/* We do this dance with argc and argv as otherwise the SEH exception
stuff (the BEGIN/END CATCH below) on Windows gets confused */
progargc = argc;
progargv = argv;
- progmain_init = main_init;
progmain_closure = main_closure;
#if defined(mingw32_HOST_OS)
* The entry point for Haskell programs that use a Haskell main function
* -------------------------------------------------------------------------- */
-int hs_main(int argc, char *argv[], void (*main_init)(void), StgClosure *main_closure);
+int hs_main(int argc, char *argv[], StgClosure *main_closure);
#endif /* RTSMAIN_H */
#include "HsFFI.h"
#include "sm/Storage.h"
+#include "RtsFlags.h"
#include "RtsUtils.h"
#include "Prelude.h"
#include "Schedule.h" /* initScheduler */
/* Parse the flags, separating the RTS flags from the programs args */
if (argc != NULL && argv != NULL) {
setFullProgArgv(*argc,*argv);
- setupRtsFlags(argc, *argv, &rts_argc, rts_argv);
- setProgArgv(*argc,*argv);
+ setupRtsFlags(argc, *argv);
}
/* Initialise the stats department, phase 1 */
x86_init_fpu();
#endif
+ startupHpc();
+
+ // This must be done after module initialisation.
+ // ToDo: make this work in the presence of multiple hs_add_root()s.
+ initProfiling2();
+
+ // ditto.
+#if defined(THREADED_RTS)
+ ioManagerStart();
+#endif
+
/* Record initialization times */
stat_endInit();
}
// Compatibility interface
void
-startupHaskell(int argc, char *argv[], void (*init_root)(void))
+startupHaskell(int argc, char *argv[], void (*init_root)(void) STG_UNUSED)
{
hs_init(&argc, &argv);
- if(init_root)
- hs_add_root(init_root);
}
/* -----------------------------------------------------------------------------
- Per-module initialisation
-
- This process traverses all the compiled modules in the program
- starting with "Main", and performing per-module initialisation for
- each one.
-
- So far, two things happen at initialisation time:
-
- - we register stable names for each foreign-exported function
- in that module. This prevents foreign-exported entities, and
- things they depend on, from being garbage collected.
-
- - we supply a unique integer to each statically declared cost
- centre and cost centre stack in the program.
-
- The code generator inserts a small function "__stginit_<module>" in each
- module and calls the registration functions in each of the modules it
- imports.
-
- The init* functions are compiled in the same way as STG code,
- i.e. without normal C call/return conventions. Hence we must use
- StgRun to call this stuff.
+ hs_add_root: backwards compatibility. (see #3252)
-------------------------------------------------------------------------- */
-/* The init functions use an explicit stack...
- */
-#define INIT_STACK_BLOCKS 4
-static StgFunPtr *init_stack = NULL;
-
void
-hs_add_root(void (*init_root)(void))
+hs_add_root(void (*init_root)(void) STG_UNUSED)
{
- bdescr *bd;
- nat init_sp;
- Capability *cap;
-
- cap = rts_lock();
-
- if (hs_init_count <= 0) {
- barf("hs_add_root() must be called after hs_init()");
- }
-
- /* The initialisation stack grows downward, with sp pointing
- to the last occupied word */
- init_sp = INIT_STACK_BLOCKS*BLOCK_SIZE_W;
- bd = allocGroup_lock(INIT_STACK_BLOCKS);
- init_stack = (StgFunPtr *)bd->start;
- init_stack[--init_sp] = (StgFunPtr)stg_init_finish;
- if (init_root != NULL) {
- init_stack[--init_sp] = (StgFunPtr)init_root;
- }
-
- cap->r.rSp = (P_)(init_stack + init_sp);
- StgRun((StgFunPtr)stg_init, &cap->r);
-
- freeGroup_lock(bd);
-
- startupHpc();
-
- // This must be done after module initialisation.
- // ToDo: make this work in the presence of multiple hs_add_root()s.
- initProfiling2();
-
- rts_unlock(cap);
-
- // ditto.
-#if defined(THREADED_RTS)
- ioManagerStart();
-#endif
+ /* nothing */
}
/* ----------------------------------------------------------------------------
OnExitHook();
+ // sanity check
+#if defined(DEBUG)
+ checkFPUStack();
+#endif
+
// Free the full argv storage
freeFullProgArgv();
#endif
endProfiling();
- freeProfiling1();
+ freeProfiling();
#ifdef PROFILING
// Originally, this was in report_ccs_profiling(). Now, retainer
return 0;
#endif
}
+
+// Used for detecting a non-empty FPU stack on x86 (see #4914)
+void checkFPUStack(void)
+{
+#ifdef x86_HOST_ARCH
+ static unsigned char buf[108];
+ asm("FSAVE %0":"=m" (buf));
+
+ if(buf[8]!=255 || buf[9]!=255) {
+ errorBelch("NONEMPTY FPU Stack, TAG = %x %x\n",buf[8],buf[9]);
+ abort();
+ }
+#endif
+}
+
/* Alternate to raise(3) for threaded rts, for OpenBSD */
int genericRaise(int sig);
+void checkFPUStack(void);
+
#include "EndPrivate.h"
#endif /* RTSUTILS_H */
/************************************************************************/
-void stmPreGCHook() {
- nat i;
-
+void stmPreGCHook (Capability *cap) {
lock_stm(NO_TREC);
TRACE("stmPreGCHook");
- for (i = 0; i < n_capabilities; i ++) {
- Capability *cap = &capabilities[i];
- cap -> free_tvar_watch_queues = END_STM_WATCH_QUEUE;
- cap -> free_trec_chunks = END_STM_CHUNK_LIST;
- cap -> free_trec_headers = NO_TREC;
- }
+ cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
+ cap->free_trec_chunks = END_STM_CHUNK_LIST;
+ cap->free_trec_headers = NO_TREC;
unlock_stm(NO_TREC);
}
--------------
*/
-void stmPreGCHook(void);
+void stmPreGCHook(Capability *cap);
/*----------------------------------------------------------------------
void
scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
{
-#if defined(THREADED_RTS)
tso->flags |= TSO_LOCKED; // we requested explicit affinity; don't
// move this thread from now on.
+#if defined(THREADED_RTS)
cpu %= RtsFlags.ParFlags.nNodes;
if (cpu == cap->no) {
appendToRunQueue(cap,tso);
#endif
}
+void markScheduler (evac_fn evac USED_IF_NOT_THREADS,
+ void *user USED_IF_NOT_THREADS)
+{
+#if !defined(THREADED_RTS)
+ evac(user, (StgClosure **)(void *)&blocked_queue_hd);
+ evac(user, (StgClosure **)(void *)&blocked_queue_tl);
+ evac(user, (StgClosure **)(void *)&sleeping_queue);
+#endif
+}
+
/* -----------------------------------------------------------------------------
performGC
void initScheduler (void);
void exitScheduler (rtsBool wait_foreign);
void freeScheduler (void);
+void markScheduler (evac_fn evac, void *user);
// Place a new thread on the run queue of the current Capability
void scheduleThread (Capability *cap, StgTSO *tso);
pruned_sparks++; // discard spark
cap->sparks_fizzled++;
}
- } else if (HEAP_ALLOCED(spark) &&
- (Bdescr((P_)spark)->flags & BF_EVACUATED)) {
- if (closure_SHOULD_SPARK(spark)) {
- elements[botInd] = spark; // keep entry (new address)
- botInd++;
- n++;
+ } else if (HEAP_ALLOCED(spark)) {
+ if ((Bdescr((P_)spark)->flags & BF_EVACUATED)) {
+ if (closure_SHOULD_SPARK(spark)) {
+ elements[botInd] = spark; // keep entry (new address)
+ botInd++;
+ n++;
+ } else {
+ pruned_sparks++; // discard spark
+ cap->sparks_fizzled++;
+ }
} else {
pruned_sparks++; // discard spark
- cap->sparks_fizzled++;
+ cap->sparks_gcd++;
}
} else {
- pruned_sparks++; // discard spark
- cap->sparks_gcd++;
+ if (INFO_PTR_TO_STRUCT(info)->type == THUNK_STATIC) {
+ if (*THUNK_STATIC_LINK(spark) != NULL) {
+ elements[botInd] = spark; // keep entry (new address)
+ botInd++;
+ n++;
+ } else {
+ pruned_sparks++; // discard spark
+ cap->sparks_gcd++;
+ }
+ } else {
+ pruned_sparks++; // discard spark
+ cap->sparks_fizzled++;
+ }
}
}
#include "GetTime.h"
#include "sm/Storage.h"
#include "sm/GC.h" // gc_alloc_block_sync, whitehole_spin
+#include "sm/GCThread.h"
+#include "sm/BlockAlloc.h"
#if USE_PAPI
#include "Papi.h"
#define TICK_TO_DBL(t) ((double)(t) / TICKS_PER_SECOND)
-static Ticks ElapsedTimeStart = 0;
+static Ticks
+ start_init_cpu, start_init_elapsed,
+ end_init_cpu, end_init_elapsed,
+ start_exit_cpu, start_exit_elapsed,
+ end_exit_cpu, end_exit_elapsed;
-static Ticks InitUserTime = 0;
-static Ticks InitElapsedTime = 0;
-static Ticks InitElapsedStamp = 0;
+static Ticks GC_tot_cpu = 0;
-static Ticks MutUserTime = 0;
-static Ticks MutElapsedTime = 0;
-static Ticks MutElapsedStamp = 0;
-
-static Ticks ExitUserTime = 0;
-static Ticks ExitElapsedTime = 0;
-
-static StgWord64 GC_tot_alloc = 0;
-static StgWord64 GC_tot_copied = 0;
+static StgWord64 GC_tot_alloc = 0;
+static StgWord64 GC_tot_copied = 0;
static StgWord64 GC_par_max_copied = 0;
static StgWord64 GC_par_avg_copied = 0;
-static Ticks GC_start_time = 0, GC_tot_time = 0; /* User GC Time */
-static Ticks GCe_start_time = 0, GCe_tot_time = 0; /* Elapsed GC time */
-
#ifdef PROFILING
-static Ticks RP_start_time = 0, RP_tot_time = 0; /* retainer prof user time */
-static Ticks RPe_start_time = 0, RPe_tot_time = 0; /* retainer prof elap time */
+static Ticks RP_start_time = 0, RP_tot_time = 0; // retainer prof user time
+static Ticks RPe_start_time = 0, RPe_tot_time = 0; // retainer prof elap time
static Ticks HC_start_time, HC_tot_time = 0; // heap census prof user time
static Ticks HCe_start_time, HCe_tot_time = 0; // heap census prof elap time
#define PROF_VAL(x) 0
#endif
-static lnat MaxResidency = 0; // in words; for stats only
-static lnat AvgResidency = 0;
-static lnat ResidencySamples = 0; // for stats only
-static lnat MaxSlop = 0;
+static lnat max_residency = 0; // in words; for stats only
+static lnat avg_residency = 0;
+static lnat residency_samples = 0; // for stats only
+static lnat max_slop = 0;
-static lnat GC_start_faults = 0, GC_end_faults = 0;
+static lnat GC_end_faults = 0;
-static Ticks *GC_coll_times = NULL;
-static Ticks *GC_coll_etimes = NULL;
+static Ticks *GC_coll_cpu = NULL;
+static Ticks *GC_coll_elapsed = NULL;
+static Ticks *GC_coll_max_pause = NULL;
static void statsFlush( void );
static void statsClose( void );
-Ticks stat_getElapsedGCTime(void)
-{
- return GCe_tot_time;
-}
+/* -----------------------------------------------------------------------------
+ Current elapsed time
+ ------------------------------------------------------------------------- */
Ticks stat_getElapsedTime(void)
{
- return getProcessElapsedTime() - ElapsedTimeStart;
+ return getProcessElapsedTime() - start_init_elapsed;
}
-/* mut_user_time_during_GC() and mut_user_time()
- *
- * The former function can be used to get the current mutator time
- * *during* a GC, i.e. between stat_startGC and stat_endGC. This is
- * used in the heap profiler for accurately time stamping the heap
- * sample.
- *
- * ATTENTION: mut_user_time_during_GC() relies on GC_start_time being
- * defined in stat_startGC() - to minimise system calls,
- * GC_start_time is, however, only defined when really needed (check
- * stat_startGC() for details)
- */
-double
-mut_user_time_during_GC( void )
-{
- return TICK_TO_DBL(GC_start_time - GC_tot_time - PROF_VAL(RP_tot_time + HC_tot_time));
-}
+/* ---------------------------------------------------------------------------
+ Measure the current MUT time, for profiling
+ ------------------------------------------------------------------------ */
double
mut_user_time( void )
{
- Ticks user;
- user = getProcessCPUTime();
- return TICK_TO_DBL(user - GC_tot_time - PROF_VAL(RP_tot_time + HC_tot_time));
+ Ticks cpu;
+ cpu = getProcessCPUTime();
+ return TICK_TO_DBL(cpu - GC_tot_cpu - PROF_VAL(RP_tot_time + HC_tot_time));
}
#ifdef PROFILING
/*
- mut_user_time_during_RP() is similar to mut_user_time_during_GC();
- it returns the MUT time during retainer profiling.
+ mut_user_time_during_RP() returns the MUT time during retainer profiling.
The same is for mut_user_time_during_HC();
*/
double
mut_user_time_during_RP( void )
{
- return TICK_TO_DBL(RP_start_time - GC_tot_time - RP_tot_time - HC_tot_time);
+ return TICK_TO_DBL(RP_start_time - GC_tot_cpu - RP_tot_time - HC_tot_time);
}
double
mut_user_time_during_heap_census( void )
{
- return TICK_TO_DBL(HC_start_time - GC_tot_time - RP_tot_time - HC_tot_time);
+ return TICK_TO_DBL(HC_start_time - GC_tot_cpu - RP_tot_time - HC_tot_time);
}
#endif /* PROFILING */
-// initStats0() has no dependencies, it can be called right at the beginning
+/* ---------------------------------------------------------------------------
+ initStats0() has no dependencies, it can be called right at the beginning
+ ------------------------------------------------------------------------ */
+
void
initStats0(void)
{
- ElapsedTimeStart = 0;
-
- InitUserTime = 0;
- InitElapsedTime = 0;
- InitElapsedStamp = 0;
-
- MutUserTime = 0;
- MutElapsedTime = 0;
- MutElapsedStamp = 0;
+ start_init_cpu = 0;
+ start_init_elapsed = 0;
+ end_init_cpu = 0;
+ end_init_elapsed = 0;
- ExitUserTime = 0;
- ExitElapsedTime = 0;
+ start_exit_cpu = 0;
+ start_exit_elapsed = 0;
+ end_exit_cpu = 0;
+ end_exit_elapsed = 0;
GC_tot_alloc = 0;
GC_tot_copied = 0;
GC_par_max_copied = 0;
GC_par_avg_copied = 0;
- GC_start_time = 0;
- GC_tot_time = 0;
- GCe_start_time = 0;
- GCe_tot_time = 0;
+ GC_tot_cpu = 0;
#ifdef PROFILING
RP_start_time = 0;
HCe_tot_time = 0;
#endif
- MaxResidency = 0;
- AvgResidency = 0;
- ResidencySamples = 0;
- MaxSlop = 0;
+ max_residency = 0;
+ avg_residency = 0;
+ residency_samples = 0;
+ max_slop = 0;
- GC_start_faults = 0;
GC_end_faults = 0;
}
-// initStats1() can be called after setupRtsFlags()
+/* ---------------------------------------------------------------------------
+ initStats1() can be called after setupRtsFlags()
+ ------------------------------------------------------------------------ */
+
void
initStats1 (void)
{
statsPrintf(" Alloc Copied Live GC GC TOT TOT Page Flts\n");
statsPrintf(" bytes bytes bytes user elap user elap\n");
}
- GC_coll_times =
+ GC_coll_cpu =
+ (Ticks *)stgMallocBytes(
+ sizeof(Ticks)*RtsFlags.GcFlags.generations,
+ "initStats");
+ GC_coll_elapsed =
(Ticks *)stgMallocBytes(
sizeof(Ticks)*RtsFlags.GcFlags.generations,
"initStats");
- GC_coll_etimes =
+ GC_coll_max_pause =
(Ticks *)stgMallocBytes(
sizeof(Ticks)*RtsFlags.GcFlags.generations,
"initStats");
for (i = 0; i < RtsFlags.GcFlags.generations; i++) {
- GC_coll_times[i] = 0;
- GC_coll_etimes[i] = 0;
+ GC_coll_cpu[i] = 0;
+ GC_coll_elapsed[i] = 0;
+ GC_coll_max_pause[i] = 0;
}
}
void
stat_startInit(void)
{
- Ticks elapsed;
-
- elapsed = getProcessElapsedTime();
- ElapsedTimeStart = elapsed;
+ getProcessTimes(&start_init_cpu, &start_init_elapsed);
}
void
stat_endInit(void)
{
- Ticks user, elapsed;
-
- getProcessTimes(&user, &elapsed);
+ getProcessTimes(&end_init_cpu, &end_init_elapsed);
- InitUserTime = user;
- InitElapsedStamp = elapsed;
- if (ElapsedTimeStart > elapsed) {
- InitElapsedTime = 0;
- } else {
- InitElapsedTime = elapsed - ElapsedTimeStart;
- }
#if USE_PAPI
/* We start counting events for the mutator
* when garbage collection starts
void
stat_startExit(void)
{
- Ticks user, elapsed;
-
- getProcessTimes(&user, &elapsed);
-
- MutElapsedStamp = elapsed;
- MutElapsedTime = elapsed - GCe_tot_time -
- PROF_VAL(RPe_tot_time + HCe_tot_time) - InitElapsedStamp;
- if (MutElapsedTime < 0) { MutElapsedTime = 0; } /* sometimes -0.00 */
-
- MutUserTime = user - GC_tot_time -
- PROF_VAL(RP_tot_time + HC_tot_time) - InitUserTime;
- if (MutUserTime < 0) { MutUserTime = 0; }
+ getProcessTimes(&start_exit_cpu, &start_exit_elapsed);
#if USE_PAPI
/* We stop counting mutator events
/* This flag is needed, because GC is run once more after this function */
papi_is_reporting = 0;
-
#endif
}
void
stat_endExit(void)
{
- Ticks user, elapsed;
-
- getProcessTimes(&user, &elapsed);
-
- ExitUserTime = user - MutUserTime - GC_tot_time - PROF_VAL(RP_tot_time + HC_tot_time) - InitUserTime;
- ExitElapsedTime = elapsed - MutElapsedStamp;
- if (ExitUserTime < 0) {
- ExitUserTime = 0;
- }
- if (ExitElapsedTime < 0) {
- ExitElapsedTime = 0;
- }
+ getProcessTimes(&end_exit_cpu, &end_exit_elapsed);
}
/* -----------------------------------------------------------------------------
static nat rub_bell = 0;
-/* initialise global variables needed during GC
- *
- * * GC_start_time is read in mut_user_time_during_GC(), which in turn is
- * needed if either PROFILING or DEBUGing is enabled
- */
void
-stat_startGC(void)
+stat_startGC (gc_thread *gct)
{
nat bell = RtsFlags.GcFlags.ringBell;
}
}
- if (RtsFlags.GcFlags.giveStats != NO_GC_STATS
- || RtsFlags.ProfFlags.doHeapProfile)
- // heap profiling needs GC_tot_time
- {
- getProcessTimes(&GC_start_time, &GCe_start_time);
- if (RtsFlags.GcFlags.giveStats) {
- GC_start_faults = getPageFaults();
- }
- }
-
#if USE_PAPI
if(papi_is_reporting) {
/* Switch to counting GC events */
}
#endif
+ getProcessTimes(&gct->gc_start_cpu, &gct->gc_start_elapsed);
+ gct->gc_start_thread_cpu = getThreadCPUTime();
+
+ if (RtsFlags.GcFlags.giveStats != NO_GC_STATS)
+ {
+ gct->gc_start_faults = getPageFaults();
+ }
+}
+
+void
+stat_gcWorkerThreadStart (gc_thread *gct)
+{
+ if (RtsFlags.GcFlags.giveStats != NO_GC_STATS)
+ {
+ getProcessTimes(&gct->gc_start_cpu, &gct->gc_start_elapsed);
+ gct->gc_start_thread_cpu = getThreadCPUTime();
+ }
+}
+
+void
+stat_gcWorkerThreadDone (gc_thread *gct)
+{
+ Ticks thread_cpu, elapsed, gc_cpu, gc_elapsed;
+
+ if (RtsFlags.GcFlags.giveStats != NO_GC_STATS)
+ {
+ elapsed = getProcessElapsedTime();
+ thread_cpu = getThreadCPUTime();
+
+ gc_cpu = thread_cpu - gct->gc_start_thread_cpu;
+ gc_elapsed = elapsed - gct->gc_start_elapsed;
+
+ taskDoneGC(gct->cap->running_task, gc_cpu, gc_elapsed);
+ }
}
/* -----------------------------------------------------------------------------
-------------------------------------------------------------------------- */
void
-stat_endGC (lnat alloc, lnat live, lnat copied, lnat gen,
+stat_endGC (gc_thread *gct,
+ lnat alloc, lnat live, lnat copied, nat gen,
lnat max_copied, lnat avg_copied, lnat slop)
{
if (RtsFlags.GcFlags.giveStats != NO_GC_STATS ||
RtsFlags.ProfFlags.doHeapProfile)
// heap profiling needs GC_tot_time
{
- Ticks time, etime, gc_time, gc_etime;
-
- getProcessTimes(&time, &etime);
- gc_time = time - GC_start_time;
- gc_etime = etime - GCe_start_time;
+ Ticks cpu, elapsed, thread_gc_cpu, gc_cpu, gc_elapsed;
- if (RtsFlags.GcFlags.giveStats == VERBOSE_GC_STATS) {
+ getProcessTimes(&cpu, &elapsed);
+ gc_elapsed = elapsed - gct->gc_start_elapsed;
+
+ thread_gc_cpu = getThreadCPUTime() - gct->gc_start_thread_cpu;
+
+ gc_cpu = cpu - gct->gc_start_cpu;
+
+ taskDoneGC(gct->cap->running_task, thread_gc_cpu, gc_elapsed);
+
+ if (RtsFlags.GcFlags.giveStats == VERBOSE_GC_STATS) {
nat faults = getPageFaults();
statsPrintf("%9ld %9ld %9ld",
alloc*sizeof(W_), copied*sizeof(W_),
live*sizeof(W_));
- statsPrintf(" %5.2f %5.2f %7.2f %7.2f %4ld %4ld (Gen: %2ld)\n",
- TICK_TO_DBL(gc_time),
- TICK_TO_DBL(gc_etime),
- TICK_TO_DBL(time),
- TICK_TO_DBL(etime - ElapsedTimeStart),
- faults - GC_start_faults,
- GC_start_faults - GC_end_faults,
- gen);
-
- GC_end_faults = faults;
+ statsPrintf(" %5.2f %5.2f %7.2f %7.2f %4ld %4ld (Gen: %2d)\n",
+ TICK_TO_DBL(gc_cpu),
+ TICK_TO_DBL(gc_elapsed),
+ TICK_TO_DBL(cpu),
+ TICK_TO_DBL(elapsed - start_init_elapsed),
+ faults - gct->gc_start_faults,
+ gct->gc_start_faults - GC_end_faults,
+ gen);
+
+ GC_end_faults = faults;
statsFlush();
}
- GC_coll_times[gen] += gc_time;
- GC_coll_etimes[gen] += gc_etime;
+ GC_coll_cpu[gen] += gc_cpu;
+ GC_coll_elapsed[gen] += gc_elapsed;
+ if (GC_coll_max_pause[gen] < gc_elapsed) {
+ GC_coll_max_pause[gen] = gc_elapsed;
+ }
GC_tot_copied += (StgWord64) copied;
GC_tot_alloc += (StgWord64) alloc;
GC_par_max_copied += (StgWord64) max_copied;
GC_par_avg_copied += (StgWord64) avg_copied;
- GC_tot_time += gc_time;
- GCe_tot_time += gc_etime;
-
-#if defined(THREADED_RTS)
- {
- Task *task;
- if ((task = myTask()) != NULL) {
- task->gc_time += gc_time;
- task->gc_etime += gc_etime;
- }
- }
-#endif
+ GC_tot_cpu += gc_cpu;
if (gen == RtsFlags.GcFlags.generations-1) { /* major GC? */
- if (live > MaxResidency) {
- MaxResidency = live;
+ if (live > max_residency) {
+ max_residency = live;
}
- ResidencySamples++;
- AvgResidency += live;
+ residency_samples++;
+ avg_residency += live;
}
- if (slop > MaxSlop) MaxSlop = slop;
+ if (slop > max_slop) max_slop = slop;
}
if (rub_bell) {
statsPrintf(" (SLOW_CALLS_" #arity ") %% of (TOTAL_CALLS) : %.1f%%\n", \
SLOW_CALLS_##arity * 100.0/TOTAL_CALLS)
-extern lnat hw_alloc_blocks;
-
void
stat_exit(int alloc)
{
+ generation *gen;
+ Ticks gc_cpu = 0;
+ Ticks gc_elapsed = 0;
+ Ticks init_cpu = 0;
+ Ticks init_elapsed = 0;
+ Ticks mut_cpu = 0;
+ Ticks mut_elapsed = 0;
+ Ticks exit_cpu = 0;
+ Ticks exit_elapsed = 0;
+
if (RtsFlags.GcFlags.giveStats != NO_GC_STATS) {
char temp[BIG_STRING_LEN];
- Ticks time;
- Ticks etime;
- nat g, total_collections = 0;
+ Ticks tot_cpu;
+ Ticks tot_elapsed;
+ nat i, g, total_collections = 0;
- getProcessTimes( &time, &etime );
- etime -= ElapsedTimeStart;
+ getProcessTimes( &tot_cpu, &tot_elapsed );
+ tot_elapsed -= start_init_elapsed;
GC_tot_alloc += alloc;
for (g = 0; g < RtsFlags.GcFlags.generations; g++)
total_collections += generations[g].collections;
- /* avoid divide by zero if time is measured as 0.00 seconds -- SDM */
- if (time == 0.0) time = 1;
- if (etime == 0.0) etime = 1;
+ /* avoid divide by zero if tot_cpu is measured as 0.00 seconds -- SDM */
+ if (tot_cpu == 0.0) tot_cpu = 1;
+ if (tot_elapsed == 0.0) tot_elapsed = 1;
if (RtsFlags.GcFlags.giveStats >= VERBOSE_GC_STATS) {
statsPrintf("%9ld %9.9s %9.9s", (lnat)alloc*sizeof(W_), "", "");
statsPrintf(" %5.2f %5.2f\n\n", 0.0, 0.0);
}
+ for (i = 0; i < RtsFlags.GcFlags.generations; i++) {
+ gc_cpu += GC_coll_cpu[i];
+ gc_elapsed += GC_coll_elapsed[i];
+ }
+
if (RtsFlags.GcFlags.giveStats >= SUMMARY_GC_STATS) {
showStgWord64(GC_tot_alloc*sizeof(W_),
temp, rtsTrue/*commas*/);
temp, rtsTrue/*commas*/);
statsPrintf("%16s bytes copied during GC\n", temp);
- if ( ResidencySamples > 0 ) {
- showStgWord64(MaxResidency*sizeof(W_),
+ if ( residency_samples > 0 ) {
+ showStgWord64(max_residency*sizeof(W_),
temp, rtsTrue/*commas*/);
statsPrintf("%16s bytes maximum residency (%ld sample(s))\n",
- temp, ResidencySamples);
+ temp, residency_samples);
}
- showStgWord64(MaxSlop*sizeof(W_), temp, rtsTrue/*commas*/);
+ showStgWord64(max_slop*sizeof(W_), temp, rtsTrue/*commas*/);
statsPrintf("%16s bytes maximum slop\n", temp);
statsPrintf("%16ld MB total memory in use (%ld MB lost due to fragmentation)\n\n",
(peak_mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W - hw_alloc_blocks * BLOCK_SIZE_W) / (1024 * 1024 / sizeof(W_)));
/* Print garbage collections in each gen */
- for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- statsPrintf(" Generation %d: %5d collections, %5d parallel, %5.2fs, %5.2fs elapsed\n",
- g, generations[g].collections,
- generations[g].par_collections,
- TICK_TO_DBL(GC_coll_times[g]),
- TICK_TO_DBL(GC_coll_etimes[g]));
- }
+ statsPrintf(" Tot time (elapsed) Avg pause Max pause\n");
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ gen = &generations[g];
+ statsPrintf(" Gen %2d %5d colls, %5d par %5.2fs %5.2fs %3.4fs %3.4fs\n",
+ gen->no,
+ gen->collections,
+ gen->par_collections,
+ TICK_TO_DBL(GC_coll_cpu[g]),
+ TICK_TO_DBL(GC_coll_elapsed[g]),
+ gen->collections == 0 ? 0 : TICK_TO_DBL(GC_coll_elapsed[g] / gen->collections),
+ TICK_TO_DBL(GC_coll_max_pause[g]));
+ }
#if defined(THREADED_RTS)
if (RtsFlags.ParFlags.parGcEnabled) {
);
}
#endif
-
- statsPrintf("\n");
+ statsPrintf("\n");
#if defined(THREADED_RTS)
{
}
#endif
- statsPrintf(" INIT time %6.2fs (%6.2fs elapsed)\n",
- TICK_TO_DBL(InitUserTime), TICK_TO_DBL(InitElapsedTime));
- statsPrintf(" MUT time %6.2fs (%6.2fs elapsed)\n",
- TICK_TO_DBL(MutUserTime), TICK_TO_DBL(MutElapsedTime));
- statsPrintf(" GC time %6.2fs (%6.2fs elapsed)\n",
- TICK_TO_DBL(GC_tot_time), TICK_TO_DBL(GCe_tot_time));
+ init_cpu = end_init_cpu - start_init_cpu;
+ init_elapsed = end_init_elapsed - start_init_elapsed;
+
+ exit_cpu = end_exit_cpu - start_exit_cpu;
+ exit_elapsed = end_exit_elapsed - start_exit_elapsed;
+
+ statsPrintf(" INIT time %6.2fs (%6.2fs elapsed)\n",
+ TICK_TO_DBL(init_cpu), TICK_TO_DBL(init_elapsed));
+
+ mut_elapsed = start_exit_elapsed - end_init_elapsed - gc_elapsed;
+
+ mut_cpu = start_exit_cpu - end_init_cpu - gc_cpu
+ - PROF_VAL(RP_tot_time + HC_tot_time);
+ if (mut_cpu < 0) { mut_cpu = 0; }
+
+ statsPrintf(" MUT time %6.2fs (%6.2fs elapsed)\n",
+ TICK_TO_DBL(mut_cpu), TICK_TO_DBL(mut_elapsed));
+ statsPrintf(" GC time %6.2fs (%6.2fs elapsed)\n",
+ TICK_TO_DBL(gc_cpu), TICK_TO_DBL(gc_elapsed));
+
#ifdef PROFILING
- statsPrintf(" RP time %6.2fs (%6.2fs elapsed)\n",
+ statsPrintf(" RP time %6.2fs (%6.2fs elapsed)\n",
TICK_TO_DBL(RP_tot_time), TICK_TO_DBL(RPe_tot_time));
- statsPrintf(" PROF time %6.2fs (%6.2fs elapsed)\n",
+ statsPrintf(" PROF time %6.2fs (%6.2fs elapsed)\n",
TICK_TO_DBL(HC_tot_time), TICK_TO_DBL(HCe_tot_time));
#endif
- statsPrintf(" EXIT time %6.2fs (%6.2fs elapsed)\n",
- TICK_TO_DBL(ExitUserTime), TICK_TO_DBL(ExitElapsedTime));
- statsPrintf(" Total time %6.2fs (%6.2fs elapsed)\n\n",
- TICK_TO_DBL(time), TICK_TO_DBL(etime));
- statsPrintf(" %%GC time %5.1f%% (%.1f%% elapsed)\n\n",
- TICK_TO_DBL(GC_tot_time)*100/TICK_TO_DBL(time),
- TICK_TO_DBL(GCe_tot_time)*100/TICK_TO_DBL(etime));
-
- if (time - GC_tot_time - PROF_VAL(RP_tot_time + HC_tot_time) == 0)
+ statsPrintf(" EXIT time %6.2fs (%6.2fs elapsed)\n",
+ TICK_TO_DBL(exit_cpu), TICK_TO_DBL(exit_elapsed));
+ statsPrintf(" Total time %6.2fs (%6.2fs elapsed)\n\n",
+ TICK_TO_DBL(tot_cpu), TICK_TO_DBL(tot_elapsed));
+#ifndef THREADED_RTS
+ statsPrintf(" %%GC time %5.1f%% (%.1f%% elapsed)\n\n",
+ TICK_TO_DBL(gc_cpu)*100/TICK_TO_DBL(tot_cpu),
+ TICK_TO_DBL(gc_elapsed)*100/TICK_TO_DBL(tot_elapsed));
+#endif
+
+ if (tot_cpu - GC_tot_cpu - PROF_VAL(RP_tot_time + HC_tot_time) == 0)
showStgWord64(0, temp, rtsTrue/*commas*/);
else
showStgWord64(
(StgWord64)((GC_tot_alloc*sizeof(W_))/
- TICK_TO_DBL(time - GC_tot_time -
+ TICK_TO_DBL(tot_cpu - GC_tot_cpu -
PROF_VAL(RP_tot_time + HC_tot_time))),
temp, rtsTrue/*commas*/);
statsPrintf(" Alloc rate %s bytes per MUT second\n\n", temp);
statsPrintf(" Productivity %5.1f%% of total user, %.1f%% of total elapsed\n\n",
- TICK_TO_DBL(time - GC_tot_time -
- PROF_VAL(RP_tot_time + HC_tot_time) - InitUserTime) * 100
- / TICK_TO_DBL(time),
- TICK_TO_DBL(time - GC_tot_time -
- PROF_VAL(RP_tot_time + HC_tot_time) - InitUserTime) * 100
- / TICK_TO_DBL(etime));
+ TICK_TO_DBL(tot_cpu - GC_tot_cpu -
+ PROF_VAL(RP_tot_time + HC_tot_time) - init_cpu) * 100
+ / TICK_TO_DBL(tot_cpu),
+ TICK_TO_DBL(tot_cpu - GC_tot_cpu -
+ PROF_VAL(RP_tot_time + HC_tot_time) - init_cpu) * 100
+ / TICK_TO_DBL(tot_elapsed));
/*
TICK_PRINT(1);
statsPrintf(fmt1, GC_tot_alloc*(StgWord64)sizeof(W_));
statsPrintf(fmt2,
total_collections,
- ResidencySamples == 0 ? 0 :
- AvgResidency*sizeof(W_)/ResidencySamples,
- MaxResidency*sizeof(W_),
- ResidencySamples,
+ residency_samples == 0 ? 0 :
+ avg_residency*sizeof(W_)/residency_samples,
+ max_residency*sizeof(W_),
+ residency_samples,
(unsigned long)(peak_mblocks_allocated * MBLOCK_SIZE / (1024L * 1024L)),
- TICK_TO_DBL(InitUserTime), TICK_TO_DBL(InitElapsedTime),
- TICK_TO_DBL(MutUserTime), TICK_TO_DBL(MutElapsedTime),
- TICK_TO_DBL(GC_tot_time), TICK_TO_DBL(GCe_tot_time));
+ TICK_TO_DBL(init_cpu), TICK_TO_DBL(init_elapsed),
+ TICK_TO_DBL(mut_cpu), TICK_TO_DBL(mut_elapsed),
+ TICK_TO_DBL(gc_cpu), TICK_TO_DBL(gc_elapsed));
}
statsFlush();
statsClose();
}
- if (GC_coll_times)
- stgFree(GC_coll_times);
- GC_coll_times = NULL;
- if (GC_coll_etimes)
- stgFree(GC_coll_etimes);
- GC_coll_etimes = NULL;
+ if (GC_coll_cpu) {
+ stgFree(GC_coll_cpu);
+ GC_coll_cpu = NULL;
+ }
+ if (GC_coll_elapsed) {
+ stgFree(GC_coll_elapsed);
+ GC_coll_elapsed = NULL;
+ }
+ if (GC_coll_max_pause) {
+ stgFree(GC_coll_max_pause);
+ GC_coll_max_pause = NULL;
+ }
}
/* -----------------------------------------------------------------------------
mut = 0;
for (i = 0; i < n_capabilities; i++) {
mut += countOccupied(capabilities[i].mut_lists[g]);
+
+ // Add the pinned object block.
+ bd = capabilities[i].pinned_object_block;
+ if (bd != NULL) {
+ gen_live += bd->free - bd->start;
+ gen_blocks += bd->blocks;
+ }
+
+ gen_live += gcThreadLiveWords(i,g);
gen_live += gcThreadLiveWords(i,g);
gen_blocks += gcThreadLiveBlocks(i,g);
}
#include "BeginPrivate.h"
+struct gc_thread_;
+
void stat_startInit(void);
void stat_endInit(void);
-void stat_startGC(void);
-void stat_endGC (lnat alloc, lnat live,
- lnat copied, lnat gen,
- lnat max_copied, lnat avg_copied, lnat slop);
+void stat_startGC(struct gc_thread_ *gct);
+void stat_endGC (struct gc_thread_ *gct, lnat alloc, lnat live,
+ lnat copied, nat gen,
+ lnat max_copied, lnat avg_copied, lnat slop);
+
+void stat_gcWorkerThreadStart (struct gc_thread_ *gct);
+void stat_gcWorkerThreadDone (struct gc_thread_ *gct);
#ifdef PROFILING
void stat_startRP(void);
taskTimeStamp (Task *task USED_IF_THREADS)
{
#if defined(THREADED_RTS)
- Ticks currentElapsedTime, currentUserTime, elapsedGCTime;
+ Ticks currentElapsedTime, currentUserTime;
currentUserTime = getThreadCPUTime();
currentElapsedTime = getProcessElapsedTime();
- // XXX this is wrong; we want elapsed GC time since the
- // Task started.
- elapsedGCTime = stat_getElapsedGCTime();
-
- task->mut_time =
+ task->mut_time =
currentUserTime - task->muttimestart - task->gc_time;
task->mut_etime =
- currentElapsedTime - task->elapsedtimestart - elapsedGCTime;
+ currentElapsedTime - task->elapsedtimestart - task->gc_etime;
+ if (task->gc_time < 0) { task->gc_time = 0; }
+ if (task->gc_etime < 0) { task->gc_etime = 0; }
if (task->mut_time < 0) { task->mut_time = 0; }
if (task->mut_etime < 0) { task->mut_etime = 0; }
#endif
}
+void
+taskDoneGC (Task *task, Ticks cpu_time, Ticks elapsed_time)
+{
+ task->gc_time += cpu_time;
+ task->gc_etime += elapsed_time;
+}
+
#if defined(THREADED_RTS)
void
//
void taskTimeStamp (Task *task);
+// The current Task has finished a GC, record the amount of time spent.
+void taskDoneGC (Task *task, Ticks cpu_time, Ticks elapsed_time);
+
// Put the task back on the free list, mark it stopped. Used by
// forkProcess().
//
endif
-$(eval $(call build-dependencies,rts,dist,1))
+$(eval $(call dependencies,rts,dist,1))
$(rts_dist_depfile_c_asm) : libffi/dist-install/build/ffi.h $(DTRACEPROBES_H)
$(eval $(call clean-target,rts,dist,rts/dist))
BINDIST_EXTRAS += rts/package.conf.in
-BINDIST_EXTRAS += $(ALL_RTS_LIBS)
#include "Rts.h"
#include "RtsOpts.h"
-const rtsOptsEnabledEnum rtsOptsEnabled = rtsOptsSafeOnly;
+const RtsOptsEnabledEnum rtsOptsEnabled = RtsOptsSafeOnly;
, "-Wl,-search_paths_first"
#endif
-#ifdef darwin_HOST_OS
+#if defined(darwin_HOST_OS) && !defined(x86_64_HOST_ARCH)
, "-read_only_relocs", "warning"
#endif
setIOManagerWakeupFd (int fd)
{
// only called when THREADED_RTS, but unconditionally
- // compiled here because System.Event.Control depends on it.
+ // compiled here because GHC.Event.Control depends on it.
io_manager_wakeup_fd = fd;
}
setIOManagerControlFd (int fd)
{
// only called when THREADED_RTS, but unconditionally
- // compiled here because System.Event.Control depends on it.
+ // compiled here because GHC.Event.Control depends on it.
io_manager_control_fd = fd;
}
// 1. thread the roots
markCapabilities((evac_fn)thread_root, NULL);
+ markScheduler((evac_fn)thread_root, NULL);
+
// the weak pointer lists...
if (weak_ptr_list != NULL) {
thread((void *)&weak_ptr_list);
#include "Storage.h"
#include "GC.h"
#include "GCThread.h"
+#include "GCTDecl.h"
#include "GCUtils.h"
#include "Compact.h"
#include "MarkStack.h"
#include "GC.h"
#include "GCThread.h"
+#include "GCTDecl.h"
#include "Compact.h"
#include "Evac.h"
#include "Scav.h"
static void scavenge_until_all_done (void);
static StgWord inc_running (void);
static StgWord dec_running (void);
-static void wakeup_gc_threads (nat n_threads, nat me);
-static void shutdown_gc_threads (nat n_threads, nat me);
+static void wakeup_gc_threads (nat me);
+static void shutdown_gc_threads (nat me);
static void collect_gct_blocks (void);
#if 0 && defined(DEBUG)
generation *gen;
lnat live_blocks, live_words, allocated, max_copied, avg_copied;
gc_thread *saved_gct;
- nat g, t, n;
+ nat g, n;
// necessary if we stole a callee-saves register for gct:
saved_gct = gct;
ASSERT(sizeof(gen_workspace) == 16 * sizeof(StgWord));
// otherwise adjust the padding in gen_workspace.
- // tell the stats department that we've started a GC
- stat_startGC();
+ // this is the main thread
+ SET_GCT(gc_threads[cap->no]);
- // tell the STM to discard any cached closures it's hoping to re-use
- stmPreGCHook();
+ // tell the stats department that we've started a GC
+ stat_startGC(gct);
// lock the StablePtr table
stablePtrPreGC();
// check sanity *before* GC
IF_DEBUG(sanity, checkSanity(rtsFalse /* before GC */, major_gc));
- // Initialise all our gc_thread structures
- for (t = 0; t < n_gc_threads; t++) {
- init_gc_thread(gc_threads[t]);
- }
-
// Initialise all the generations/steps that we're collecting.
for (g = 0; g <= N; g++) {
prepare_collected_gen(&generations[g]);
prepare_uncollected_gen(&generations[g]);
}
+ // Prepare this gc_thread
+ init_gc_thread(gct);
+
/* Allocate a mark stack if we're doing a major collection.
*/
if (major_gc && oldest_gen->mark) {
mark_sp = NULL;
}
- // this is the main thread
-#ifdef THREADED_RTS
- if (n_gc_threads == 1) {
- SET_GCT(gc_threads[0]);
- } else {
- SET_GCT(gc_threads[cap->no]);
- }
-#else
-SET_GCT(gc_threads[0]);
-#endif
-
/* -----------------------------------------------------------------------
* follow all the roots that we know about:
*/
// NB. do this after the mutable lists have been saved above, otherwise
// the other GC threads will be writing into the old mutable lists.
inc_running();
- wakeup_gc_threads(n_gc_threads, gct->thread_index);
+ wakeup_gc_threads(gct->thread_index);
+
+ traceEventGcWork(gct->cap);
// scavenge the capability-private mutable lists. This isn't part
// of markSomeCapabilities() because markSomeCapabilities() can only
#endif
}
} else {
- scavenge_capability_mut_lists(&capabilities[gct->thread_index]);
+ scavenge_capability_mut_lists(gct->cap);
}
// follow roots from the CAF list (used by GHCi)
// follow all the roots that the application knows about.
gct->evac_gen_no = 0;
- markSomeCapabilities(mark_root, gct, gct->thread_index, n_gc_threads,
- rtsTrue/*prune sparks*/);
+ if (n_gc_threads == 1) {
+ for (n = 0; n < n_capabilities; n++) {
+ markCapability(mark_root, gct, &capabilities[n],
+ rtsTrue/*don't mark sparks*/);
+ }
+ } else {
+ markCapability(mark_root, gct, cap, rtsTrue/*don't mark sparks*/);
+ }
+
+ markScheduler(mark_root, gct);
#if defined(RTS_USER_SIGNALS)
// mark the signal handlers (signals should be already blocked)
break;
}
- shutdown_gc_threads(n_gc_threads, gct->thread_index);
+ shutdown_gc_threads(gct->thread_index);
// Now see which stable names are still alive.
gcStablePtrTable();
pruneSparkQueue(&capabilities[n]);
}
} else {
- pruneSparkQueue(&capabilities[gct->thread_index]);
+ pruneSparkQueue(gct->cap);
}
#endif
// update the max size of older generations after a major GC
resize_generations();
- // Start a new pinned_object_block
- for (n = 0; n < n_capabilities; n++) {
- capabilities[n].pinned_object_block = NULL;
- }
-
// Free the mark stack.
if (mark_stack_top_bd != NULL) {
debugTrace(DEBUG_gc, "mark stack: %d blocks",
// zero the scavenged static object list
if (major_gc) {
nat i;
- for (i = 0; i < n_gc_threads; i++) {
- zero_static_object_list(gc_threads[i]->scavenged_static_objects);
+ if (n_gc_threads == 1) {
+ zero_static_object_list(gct->scavenged_static_objects);
+ } else {
+ for (i = 0; i < n_gc_threads; i++) {
+ zero_static_object_list(gc_threads[i]->scavenged_static_objects);
+ }
}
}
#endif
// ok, GC over: tell the stats department what happened.
- stat_endGC(allocated, live_words, copied, N, max_copied, avg_copied,
+ stat_endGC(gct, allocated, live_words,
+ copied, N, max_copied, avg_copied,
live_blocks * BLOCK_SIZE_W - live_words /* slop */);
// Guess which generation we'll collect *next* time
nat g;
gen_workspace *ws;
+ t->cap = &capabilities[n];
+
#ifdef THREADED_RTS
t->id = 0;
initSpinLock(&t->gc_spin);
loop:
- traceEventGcWork(&capabilities[gct->thread_index]);
-
#if defined(THREADED_RTS)
if (n_gc_threads > 1) {
scavenge_loop();
// scavenge_loop() only exits when there's no work to do
r = dec_running();
- traceEventGcIdle(&capabilities[gct->thread_index]);
+ traceEventGcIdle(gct->cap);
debugTrace(DEBUG_gc, "%d GC threads still running", r);
// usleep(1);
if (any_work()) {
inc_running();
+ traceEventGcWork(gct->cap);
goto loop;
}
// any_work() does not remove the work from the queue, it
// scavenge_loop() to perform any pending work.
}
- traceEventGcDone(&capabilities[gct->thread_index]);
+ traceEventGcDone(gct->cap);
}
#if defined(THREADED_RTS)
gct = gc_threads[cap->no];
gct->id = osThreadId();
+ stat_gcWorkerThreadStart(gct);
+
// Wait until we're told to wake up
RELEASE_SPIN_LOCK(&gct->mut_spin);
gct->wakeup = GC_THREAD_STANDING_BY;
}
papi_thread_start_gc1_count(gct->papi_events);
#endif
-
+
+ init_gc_thread(gct);
+
+ traceEventGcWork(gct->cap);
+
// Every thread evacuates some roots.
gct->evac_gen_no = 0;
- markSomeCapabilities(mark_root, gct, gct->thread_index, n_gc_threads,
- rtsTrue/*prune sparks*/);
- scavenge_capability_mut_lists(&capabilities[gct->thread_index]);
+ markCapability(mark_root, gct, cap, rtsTrue/*prune sparks*/);
+ scavenge_capability_mut_lists(cap);
scavenge_until_all_done();
ACQUIRE_SPIN_LOCK(&gct->mut_spin);
debugTrace(DEBUG_gc, "GC thread %d on my way...", gct->thread_index);
+ // record the time spent doing GC in the Task structure
+ stat_gcWorkerThreadDone(gct);
+
SET_GCT(saved_gct);
}
}
static void
-wakeup_gc_threads (nat n_threads USED_IF_THREADS, nat me USED_IF_THREADS)
+wakeup_gc_threads (nat me USED_IF_THREADS)
{
#if defined(THREADED_RTS)
nat i;
- for (i=0; i < n_threads; i++) {
+
+ if (n_gc_threads == 1) return;
+
+ for (i=0; i < n_gc_threads; i++) {
if (i == me) continue;
inc_running();
debugTrace(DEBUG_gc, "waking up gc thread %d", i);
// standby state, otherwise they may still be executing inside
// any_work(), and may even remain awake until the next GC starts.
static void
-shutdown_gc_threads (nat n_threads USED_IF_THREADS, nat me USED_IF_THREADS)
+shutdown_gc_threads (nat me USED_IF_THREADS)
{
#if defined(THREADED_RTS)
nat i;
- for (i=0; i < n_threads; i++) {
+
+ if (n_gc_threads == 1) return;
+
+ for (i=0; i < n_gc_threads; i++) {
if (i == me) continue;
while (gc_threads[i]->wakeup != GC_THREAD_WAITING_TO_CONTINUE) { write_barrier(); }
}
t->static_objects = END_OF_STATIC_LIST;
t->scavenged_static_objects = END_OF_STATIC_LIST;
t->scan_bd = NULL;
- t->mut_lists = capabilities[t->thread_index].mut_lists;
+ t->mut_lists = t->cap->mut_lists;
t->evac_gen_no = 0;
t->failed_to_evac = rtsFalse;
t->eager_promotion = rtsTrue;
#include "Capability.h"
#include "Trace.h"
#include "Schedule.h"
-// DO NOT include "GCThread.h", we don't want the register variable
+// DO NOT include "GCTDecl.h", we don't want the register variable
/* -----------------------------------------------------------------------------
isAlive determines whether the given closure is still alive (after
if (IS_FORWARDING_PTR(info)) {
// alive!
- return (StgClosure*)UN_FORWARDING_PTR(info);
+ return TAG_CLOSURE(tag,(StgClosure*)UN_FORWARDING_PTR(info));
}
info = INFO_PTR_TO_STRUCT(info);
--- /dev/null
+/* -----------------------------------------------------------------------------
+ *
+ * (c) The GHC Team 1998-2009
+ *
+ * Documentation on the architecture of the Garbage Collector can be
+ * found in the online commentary:
+ *
+ * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
+ *
+ * ---------------------------------------------------------------------------*/
+
+#ifndef SM_GCTDECL_H
+#define SM_GCTDECL_H
+
+#include "BeginPrivate.h"
+
+/* -----------------------------------------------------------------------------
+ The gct variable is thread-local and points to the current thread's
+ gc_thread structure. It is heavily accessed, so we try to put gct
+ into a global register variable if possible; if we don't have a
+ register then use gcc's __thread extension to create a thread-local
+ variable.
+ -------------------------------------------------------------------------- */
+
+#if defined(THREADED_RTS)
+
+#define GLOBAL_REG_DECL(type,name,reg) register type name REG(reg);
+
+#define SET_GCT(to) gct = (to)
+
+
+
+#if (defined(i386_HOST_ARCH) && defined(linux_HOST_OS))
+// Using __thread is better than stealing a register on x86/Linux, because
+// we have too few registers available. In my tests it was worth
+// about 5% in GC performance, but of course that might change as gcc
+// improves. -- SDM 2009/04/03
+//
+// We ought to do the same on MacOS X, but __thread is not
+// supported there yet (gcc 4.0.1).
+
+extern __thread gc_thread* gct;
+#define DECLARE_GCT __thread gc_thread* gct;
+
+
+#elif defined(sparc_HOST_ARCH)
+// On SPARC we can't pin gct to a register. Names like %l1 are just offsets
+// into the register window, which change on each function call.
+//
+// There are eight global (non-window) registers, but they're used for other purposes.
+// %g0 -- always zero
+// %g1 -- volatile over function calls, used by the linker
+// %g2-%g3 -- used as scratch regs by the C compiler (caller saves)
+// %g4 -- volatile over function calls, used by the linker
+// %g5-%g7 -- reserved by the OS
+
+extern __thread gc_thread* gct;
+#define DECLARE_GCT __thread gc_thread* gct;
+
+
+#elif defined(REG_Base) && !defined(i386_HOST_ARCH)
+// on i386, REG_Base is %ebx which is also used for PIC, so we don't
+// want to steal it
+
+GLOBAL_REG_DECL(gc_thread*, gct, REG_Base)
+#define DECLARE_GCT /* nothing */
+
+
+#elif defined(REG_R1)
+
+GLOBAL_REG_DECL(gc_thread*, gct, REG_R1)
+#define DECLARE_GCT /* nothing */
+
+
+#elif defined(__GNUC__)
+
+extern __thread gc_thread* gct;
+#define DECLARE_GCT __thread gc_thread* gct;
+
+#else
+
+#error Cannot find a way to declare the thread-local gct
+
+#endif
+
+#else // not the threaded RTS
+
+extern StgWord8 the_gc_thread[];
+
+#define gct ((gc_thread*)&the_gc_thread)
+#define SET_GCT(to) /*nothing*/
+#define DECLARE_GCT /*nothing*/
+
+#endif // THREADED_RTS
+
+#include "EndPrivate.h"
+
+#endif // SM_GCTDECL_H
#define SM_GCTHREAD_H
#include "WSDeque.h"
+#include "GetTime.h" // for Ticks
#include "BeginPrivate.h"
------------------------------------------------------------------------- */
typedef struct gc_thread_ {
+ Capability *cap;
+
#ifdef THREADED_RTS
OSThreadId id; // The OS thread that this struct belongs to
SpinLock gc_spin;
// instead of the to-space
// corresponding to the object
- lnat thunk_selector_depth; // ummm.... not used as of now
+ lnat thunk_selector_depth; // used to avoid unbounded recursion in
+ // evacuate() for THUNK_SELECTOR
#ifdef USE_PAPI
int papi_events;
lnat no_work;
lnat scav_find_work;
+ Ticks gc_start_cpu; // process CPU time
+ Ticks gc_start_elapsed; // process elapsed time
+ Ticks gc_start_thread_cpu; // thread CPU time
+ lnat gc_start_faults;
+
// -------------------
// workspaces
- // array of workspaces, indexed by stp->abs_no. This is placed
+ // array of workspaces, indexed by gen->abs_no. This is placed
// directly at the end of the gc_thread structure so that we can get from
// the gc_thread pointer to a workspace using only pointer
// arithmetic, no memory access. This happens in the inner loop
extern nat n_gc_threads;
-/* -----------------------------------------------------------------------------
- The gct variable is thread-local and points to the current thread's
- gc_thread structure. It is heavily accessed, so we try to put gct
- into a global register variable if possible; if we don't have a
- register then use gcc's __thread extension to create a thread-local
- variable.
-
- Even on x86 where registers are scarce, it is worthwhile using a
- register variable here: I measured about a 2-5% slowdown with the
- __thread version.
- -------------------------------------------------------------------------- */
-
extern gc_thread **gc_threads;
-#if defined(THREADED_RTS)
-
-#define GLOBAL_REG_DECL(type,name,reg) register type name REG(reg);
-
-#define SET_GCT(to) gct = (to)
-
-
-
-#if (defined(i386_HOST_ARCH) && defined(linux_HOST_OS))
-// Using __thread is better than stealing a register on x86/Linux, because
-// we have too few registers available. In my tests it was worth
-// about 5% in GC performance, but of course that might change as gcc
-// improves. -- SDM 2009/04/03
-//
-// We ought to do the same on MacOS X, but __thread is not
-// supported there yet (gcc 4.0.1).
-
-extern __thread gc_thread* gct;
-#define DECLARE_GCT __thread gc_thread* gct;
-
-
-#elif defined(sparc_HOST_ARCH)
-// On SPARC we can't pin gct to a register. Names like %l1 are just offsets
-// into the register window, which change on each function call.
-//
-// There are eight global (non-window) registers, but they're used for other purposes.
-// %g0 -- always zero
-// %g1 -- volatile over function calls, used by the linker
-// %g2-%g3 -- used as scratch regs by the C compiler (caller saves)
-// %g4 -- volatile over function calls, used by the linker
-// %g5-%g7 -- reserved by the OS
-
-extern __thread gc_thread* gct;
-#define DECLARE_GCT __thread gc_thread* gct;
-
-
-#elif defined(REG_Base) && !defined(i386_HOST_ARCH)
-// on i386, REG_Base is %ebx which is also used for PIC, so we don't
-// want to steal it
-
-GLOBAL_REG_DECL(gc_thread*, gct, REG_Base)
-#define DECLARE_GCT /* nothing */
-
-
-#elif defined(REG_R1)
-
-GLOBAL_REG_DECL(gc_thread*, gct, REG_R1)
-#define DECLARE_GCT /* nothing */
-
-
-#elif defined(__GNUC__)
-
-extern __thread gc_thread* gct;
-#define DECLARE_GCT __thread gc_thread* gct;
-
-#else
-
-#error Cannot find a way to declare the thread-local gct
-
-#endif
-
-#else // not the threaded RTS
-
-extern StgWord8 the_gc_thread[];
-
-#define gct ((gc_thread*)&the_gc_thread)
-#define SET_GCT(to) /*nothing*/
-#define DECLARE_GCT /*nothing*/
-
-#endif
-
#include "EndPrivate.h"
#endif // SM_GCTHREAD_H
#include "Storage.h"
#include "GC.h"
#include "GCThread.h"
+#include "GCTDecl.h"
#include "GCUtils.h"
#include "Printer.h"
#include "Trace.h"
#include "BeginPrivate.h"
+#include "GCTDecl.h"
+
bdescr *allocBlock_sync(void);
void freeChain_sync(bdescr *bd);
#include "MarkWeak.h"
#include "GC.h"
#include "GCThread.h"
+#include "GCTDecl.h"
#include "Evac.h"
#include "Trace.h"
#include "Schedule.h"
for (i = 0; i < n_capabilities; i++) {
markBlocks(nurseries[i].blocks);
+ markBlocks(capabilities[i].pinned_object_block);
}
#ifdef PROFILING
for (i = 0; i < n_capabilities; i++) {
ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks);
nursery_blocks += nurseries[i].n_blocks;
+ if (capabilities[i].pinned_object_block != NULL) {
+ nursery_blocks += capabilities[i].pinned_object_block->blocks;
+ }
}
retainer_blocks = 0;
// If we don't have a block of pinned objects yet, or the current
// one isn't large enough to hold the new object, allocate a new one.
if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) {
+ // The pinned_object_block remains attached to the capability
+ // until it is full, even if a GC occurs. We want this
+ // behaviour because otherwise the unallocated portion of the
+ // block would be forever slop, and under certain workloads
+ // (allocating a few ByteStrings per GC) we accumulate a lot
+ // of slop.
+ //
+ // So, the pinned_object_block is initially marked
+ // BF_EVACUATED so the GC won't touch it. When it is full,
+ // we place it on the large_objects list, and at the start of
+ // the next GC the BF_EVACUATED flag will be cleared, and the
+ // block will be promoted as usual (if anything in it is
+ // live).
ACQUIRE_SM_LOCK;
- cap->pinned_object_block = bd = allocBlock();
- dbl_link_onto(bd, &g0->large_objects);
- g0->n_large_blocks++;
+ if (bd != NULL) {
+ dbl_link_onto(bd, &g0->large_objects);
+ g0->n_large_blocks++;
+ g0->n_new_large_words += bd->free - bd->start;
+ }
+ cap->pinned_object_block = bd = allocBlock();
RELEASE_SM_LOCK;
initBdescr(bd, g0, g0);
- bd->flags = BF_PINNED | BF_LARGE;
+ bd->flags = BF_PINNED | BF_LARGE | BF_EVACUATED;
bd->free = bd->start;
}
- g0->n_new_large_words += n;
p = bd->free;
bd->free += n;
return p;
# All the .a/.so library file dependencies for this library
$1_$2_$3_DEPS_LIBS=$$(foreach dep,$$($1_$2_DEPS),$$($$(dep)_$2_$3_LIB))
-ifneq "$$(BootingFromHc)" "YES"
-$1_$2_$3_MKSTUBOBJS = $$(FIND) $1/$2/build -name "*_stub.$$($3_osuf)" -print
-# HACK ^^^ we tried to use $(wildcard), but apparently it fails due to
-# make using cached directory contents, or something.
-else
-$1_$2_$3_MKSTUBOBJS = true
+ifeq "$$(BootingFromHc)" "YES"
$1_$2_$3_C_OBJS += $$(shell $$(FIND) $1/$2/build -name "*_stub.c" -print | sed 's/c$$$$/o/')
endif
ifeq "$$(HOSTPLATFORM)" "i386-unknown-mingw32"
$$($1_$2_$3_LIB) : $$($1_$2_$3_ALL_OBJS) $$(ALL_RTS_LIBS) $$($1_$2_$3_DEPS_LIBS)
"$$($1_$2_HC)" $$($1_$2_$3_ALL_OBJS) \
- `$$($1_$2_$3_MKSTUBOBJS)` \
-shared -dynamic -dynload deploy \
$$(addprefix -l,$$($1_$2_EXTRA_LIBRARIES)) \
-no-auto-link-packages $$(addprefix -package ,$$($1_$2_DEPS)) \
else
$$($1_$2_$3_LIB) : $$($1_$2_$3_ALL_OBJS) $$(ALL_RTS_LIBS) $$($1_$2_$3_DEPS_LIBS)
"$$($1_$2_HC)" $$($1_$2_$3_ALL_OBJS) \
- `$$($1_$2_$3_MKSTUBOBJS)` \
-shared -dynamic -dynload deploy \
-dylib-install-name $(ghclibdir)/`basename "$$@" | sed 's/^libHS//;s/[-]ghc.*//'`/`basename "$$@"` \
-no-auto-link-packages $$(addprefix -package ,$$($1_$2_DEPS)) \
"$$(RM)" $$(RM_OPTS) $$@ $$@.contents
ifeq "$$($1_$2_SplitObjs)" "YES"
$$(FIND) $$(patsubst %.$$($3_osuf),%_$$($3_osuf)_split,$$($1_$2_$3_HS_OBJS)) -name '*.$$($3_osuf)' -print >> $$@.contents
- echo $$($1_$2_$3_NON_HS_OBJS) `$$($1_$2_$3_MKSTUBOBJS)` >> $$@.contents
+ echo $$($1_$2_$3_NON_HS_OBJS) >> $$@.contents
else
- echo $$($1_$2_$3_ALL_OBJS) `$$($1_$2_$3_MKSTUBOBJS)` >> $$@.contents
+ echo $$($1_$2_$3_ALL_OBJS) >> $$@.contents
endif
ifeq "$$(ArSupportsAtFile)" "YES"
"$$(AR)" $$(AR_OPTS) $$(EXTRA_AR_ARGS) $$@ @$$@.contents
endif
endif
$$($1_$2_GHCI_LIB) : $$($1_$2_$3_HS_OBJS) $$($1_$2_$3_CMM_OBJS) $$($1_$2_$3_C_OBJS) $$($1_$2_$3_S_OBJS) $$($1_$2_EXTRA_OBJS)
- "$$(LD)" -r -o $$@ $$(EXTRA_LD_OPTS) $$($1_$2_$3_HS_OBJS) $$($1_$2_$3_CMM_OBJS) $$($1_$2_$3_C_OBJS) $$($1_$2_$3_S_OBJS) `$$($1_$2_$3_MKSTUBOBJS)` $$($1_$2_EXTRA_OBJS)
+ "$$(LD)" -r -o $$@ $$(EXTRA_LD_OPTS) $$($1_$2_$3_HS_OBJS) $$($1_$2_$3_CMM_OBJS) $$($1_$2_$3_C_OBJS) $$($1_$2_$3_S_OBJS) $$($1_$2_EXTRA_OBJS)
ifeq "$$($1_$2_BUILD_GHCI_LIB)" "YES"
# Don't bother making ghci libs for bootstrapping packages
$(call c-sources,$1,$2)
$(call includes-sources,$1,$2)
-# --- DEPENDENCIES
-# We always have the dependency rules available, as we need to know
-# how to build hsc2hs's dependency file in phase 0
-$(call build-dependencies,$1,$2,$3)
-ifneq "$(phase)" "0"
-# From phase 1 we actually include the dependency files for the
-# bootstrapping stuff
-ifeq "$3" "0"
-$(call include-dependencies,$1,$2,$3)
-else ifeq "$(phase)" "final"
-# In the final phase, we also include the dependency files for
-# everything else
-$(call include-dependencies,$1,$2,$3)
-endif
-endif
+$(call dependencies,$1,$2,$3)
# Now generate all the build rules for each way in this directory:
$$(foreach way,$$($1_$2_WAYS),$$(eval \
endif
endif
-# --- DEPENDENCIES
-# We always have the dependency rules available, as we need to know
-# how to build hsc2hs's dependency file in phase 0
-$(call build-dependencies,$1,$2,$3)
-ifneq "$(phase)" "0"
-# From phase 1 we actually include the dependency files for the
-# bootstrapping stuff
-ifeq "$3" "0"
-$(call include-dependencies,$1,$2,$3)
-else ifeq "$(phase)" "final"
-# In the final phase, we also include the dependency files for
-# everything else
-$(call include-dependencies,$1,$2,$3)
-endif
-endif
+$(call dependencies,$1,$2,$3)
endef
--- /dev/null
+# -----------------------------------------------------------------------------
+#
+# (c) 2009 The University of Glasgow
+#
+# This file is part of the GHC build system.
+#
+# To understand how the build system works and how to modify it, see
+# http://hackage.haskell.org/trac/ghc/wiki/Building/Architecture
+# http://hackage.haskell.org/trac/ghc/wiki/Building/Modifying
+#
+# -----------------------------------------------------------------------------
+
+define dependencies
+$(call trace, dependencies($1,$2,$3))
+$(call profStart, dependencies($1,$2,$3))
+# $1 = dir
+# $2 = distdir
+# $3 = GHC stage to use (0 == bootstrapping compiler)
+
+# We always have the dependency rules available, as we need to know
+# how to build hsc2hs's dependency file in phase 0
+$(call build-dependencies,$1,$2,$3)
+
+ifneq "$(phase)" "0"
+# From phase 1 we actually include the dependency files for the
+# bootstrapping stuff
+ifeq "$3" "0"
+$(call include-dependencies,$1,$2,$3)
+else ifeq "$(phase)" "final"
+# In the final phase, we also include the dependency files for
+# everything else
+$(call include-dependencies,$1,$2,$3)
+endif
+endif
+
+$(call profEnd, dependencies($1,$2,$3))
+endef
+
$1_$2_DEP_INCLUDE_DIRS_FLAG = -I
endif
-# We have to do this mangling using the shell, because words may contain
-# spaces and GNU make doesn't have any quoting interpretation.
-ifneq ($$(strip $$($1_$2_DEP_INCLUDE_DIRS)),)
-$1_$2_CC_INC_FLAGS:=$$(shell for i in $$($1_$2_DEP_INCLUDE_DIRS); do echo $$($1_$2_DEP_INCLUDE_DIRS_FLAG)\"$$$$i\"; done)
+ifneq ($$(strip $$($1_$2_DEP_INCLUDE_DIRS_SINGLE_QUOTED)),)
+$1_$2_CC_INC_FLAGS := $$(subst $$(space)',$$(space)$$($1_$2_DEP_INCLUDE_DIRS_FLAG)',$$(space)$$($1_$2_DEP_INCLUDE_DIRS_SINGLE_QUOTED))
endif
# The CONF_CC_OPTS_STAGE$4 options are what we use to get gcc to
$$($1_$2_CC_INC_FLAGS) \
$$($1_$2_DEP_CC_OPTS)
-ifneq ($$(strip $$($1_$2_DEP_LIB_DIRS)),)
-$1_$2_DIST_LD_LIB_DIRS:=$$(shell for i in $$($1_$2_DEP_LIB_DIRS); do echo \"-L$$$$i\"; done)
+ifneq ($$(strip $$($1_$2_DEP_LIB_DIRS_SINGLE_QUOTED)),)
+$1_$2_DIST_LD_LIB_DIRS := $$(subst $$(space)',$$(space)-L',$$(space)$$($1_$2_DEP_LIB_DIRS_SINGLE_QUOTED))
endif
$1_$2_DIST_LD_OPTS = \
use strict;
use Cwd;
+# Usage:
+#
+# ./sync-all [-q] [-s] [--ignore-failure] [-r repo]
+# [--nofib] [--testsuite] [--checked-out] cmd [git flags]
+#
+# Applies the command "cmd" to each repository in the tree.
+# sync-all will try to do the right thing for both git and darcs repositories.
+#
+# e.g.
+# ./sync-all -r http://darcs.haskell.org/ghc get
+# To get any repos which do not exist in the local tree
+#
+# ./sync-all pull
+# To pull everything from the default repos
+#
+# -------------- Flags -------------------
+# -q says to be quite, and -s to be silent.
+#
+# --ignore-failure says to ignore errors and move on to the next repository
+#
+# -r repo says to use repo as the location of package repositories
+#
+# --checked-out says that the remote repo is in checked-out layout, as
+# opposed to the layout used for the main repo. By default a repo on
+# the local filesystem is assumed to be checked-out, and repos accessed
+# via HTTP or SSH are assumed to be in the main repo layout; use
+# --checked-out to override the latter.
+#
+# --nofib, --testsuite also get the nofib and testsuite repos respectively
+#
+# ------------ Which repos to use -------------
+# sync-all uses the following algorithm to decide which remote repos to use
+#
+# It always computes the remote repos from a single base, $repo_base
+# How is $repo_base set?
+# If you say "-r repo", then that's $repo_base
+# otherwise $repo_base is set by asking git where the ghc repo came
+# from, and removing the last component (e.g. /ghc.git/ of /ghc/).
+#
+# Then sync-all iterates over the package found in the file
+# ./packages; see that file for a description of the contents.
+#
+# If $repo_base looks like a local filesystem path, or if you give
+# the --checked-out flag, sync-all works on repos of form
+# $repo_base/<local-path>
+# otherwise sync-all works on repos of form
+# $repo_base/<remote-path>
+# This logic lets you say
+# both sync-all -r http://darcs.haskell.org/ghc-6.12 pull
+# and sync-all -r ../HEAD pull
+# The latter is called a "checked-out tree".
+
+# NB: sync-all *ignores* the defaultrepo of all repos other than the
+# root one. So the remote repos must be laid out in one of the two
+# formats given by <local-path> and <remote-path> in the file 'packages'.
+
+$| = 1; # autoflush stdout after each print, to avoid output after die
+
my $defaultrepo;
my @packages;
my $verbose = 2;
my $ignore_failure = 0;
-my $want_remote_repo = 0;
my $checked_out_flag = 0;
my $get_mode;
-# Flags specific to a particular command
-my $local_repo_unnecessary = 0;
-
my %tags;
# Figure out where to get the other repositories from.
my @repos;
my $lineNum;
- open IN, "< packages.git" or die "Can't open packages file";
+ open IN, "< packages" or die "Can't open packages file";
@repos = <IN>;
close IN;
}
sub scm {
+ my $dir = shift;
my $scm = shift;
-
- message "== running $scm @_";
+ my $pwd;
+
+ if ($dir eq '.') {
+ message "== running $scm @_";
+ } else {
+ message "== $dir: running $scm @_";
+ $pwd = getcwd();
+ chdir($dir);
+ }
+
system ($scm, @_) == 0
or $ignore_failure
or die "$scm failed: $?";
-}
-sub repoexists {
- my ($scm, $localpath) = @_;
-
- if ($scm eq "darcs") {
- -d "$localpath/_darcs";
- }
- else {
- -d "$localpath/.git";
+ if ($dir ne '.') {
+ chdir($pwd);
}
}
my $scm;
my $upstream;
my $line;
+ my $branch_name;
+ my $subcommand;
my $path;
my $wd_before = getcwd;
- my @scm_args;
+ my $pwd;
+ my @args;
my ($repo_base, $checked_out_tree) = getrepo();
+ my $is_github_repo = $repo_base =~ m/(git@|git:\/\/|https:\/\/)github.com/;
+
parsePackages;
+ @args = ();
+
+ if ($command =~ /^remote$/) {
+ while (@_ > 0 && $_[0] =~ /^-/) {
+ push(@args,shift);
+ }
+ if (@_ < 1) { help(); }
+ $subcommand = shift;
+ if ($subcommand ne 'add' && $subcommand ne 'rm' && $subcommand ne 'set-url') {
+ help();
+ }
+ while (@_ > 0 && $_[0] =~ /^-/) {
+ push(@args,shift);
+ }
+ if (($subcommand eq 'add' || $subcommand eq 'rm') && @_ < 1) {
+ help();
+ } elsif (@_ < 1) { # set-url
+ $branch_name = 'origin';
+ } else {
+ $branch_name = shift;
+ }
+ } elsif ($command eq 'new') {
+ if (@_ < 1) {
+ $branch_name = 'origin';
+ } else {
+ $branch_name = shift;
+ }
+ }
+
+ push(@args, @_);
+
for $line (@packages) {
- $localpath = $$line{"localpath"};
- $tag = $$line{"tag"};
- $remotepath = $$line{"remotepath"};
- $scm = $$line{"vcs"};
- $upstream = $$line{"upstream"};
+ $localpath = $$line{"localpath"};
+ $tag = $$line{"tag"};
+ $remotepath = $$line{"remotepath"};
+ $scm = $$line{"vcs"};
+ $upstream = $$line{"upstream"};
- # Check the SCM is OK as early as possible
- die "Unknown SCM: $scm" if (($scm ne "darcs") and ($scm ne "git"));
+ # Check the SCM is OK as early as possible
+ die "Unknown SCM: $scm" if (($scm ne "darcs") and ($scm ne "git"));
- # Work out the path for this package in the repo we pulled from
- if ($checked_out_tree) {
- $path = "$repo_base/$localpath";
- }
- else {
- $path = "$repo_base/$remotepath";
- }
+ # We can't create directories on GitHub, so we translate
+ # "package/foo" into "package-foo".
+ if ($is_github_repo) {
+ $remotepath =~ s/\//-/;
+ }
- # Work out the arguments we should give to the SCM
- if ($command =~ /^(?:w|wh|wha|what|whats|whatsn|whatsne|whatsnew)$/) {
- @scm_args = (($scm eq "darcs" and "whatsnew")
- or ($scm eq "git" and "status"));
-
- # Hack around 'darcs whatsnew' failing if there are no changes
- $ignore_failure = 1;
- }
- elsif ($command =~ /^(?:pus|push)$/) {
- @scm_args = "push";
- $want_remote_repo = 1;
+ # Work out the path for this package in the repo we pulled from
+ if ($checked_out_tree) {
+ $path = "$repo_base/$localpath";
+ }
+ else {
+ $path = "$repo_base/$remotepath";
+ }
+
+ if ($command =~ /^(?:g|ge|get)$/) {
+ # Skip any repositories we have not included the tag for
+ if (not defined($tags{$tag})) {
+ $tags{$tag} = 0;
}
- elsif ($command =~ /^(?:pul|pull)$/) {
- @scm_args = "pull";
- $want_remote_repo = 1;
- # Q: should we append the -a argument for darcs repos?
+ if ($tags{$tag} == 0) {
+ next;
}
- elsif ($command =~ /^(?:g|ge|get)$/) {
- # Skip any repositories we have not included the tag for
- if (not defined($tags{$tag})) {
- next;
- }
-
- if (-d $localpath) {
- warning("$localpath already present; omitting") if $localpath ne ".";
- next;
+
+ if (-d $localpath) {
+ warning("$localpath already present; omitting")
+ if $localpath ne ".";
+ if ($scm eq "git") {
+ scm ($localpath, $scm, "config", "core.ignorecase", "true");
}
-
+ next;
+ }
+
+ # Note that we use "." as the path, as $localpath
+ # doesn't exist yet.
+ if ($scm eq "darcs") {
# The first time round the loop, default the get-mode
if (not defined($get_mode)) {
warning("adding --partial, to override use --complete");
$get_mode = "--partial";
}
-
- # The only command that doesn't need a repo
- $local_repo_unnecessary = 1;
-
- if ($scm eq "darcs") {
- # Note: we can only use the get-mode with darcs for now
- @scm_args = ("get", $get_mode, $path, $localpath);
- }
- else {
- @scm_args = ("clone", $path, $localpath);
- }
- }
- elsif ($command =~ /^(?:s|se|sen|send)$/) {
- @scm_args = (($scm eq "darcs" and "send")
- or ($scm eq "git" and "send-email"));
- $want_remote_repo = 1;
+ scm (".", $scm, "get", $get_mode, $path, $localpath, @args);
}
else {
- die "Unknown command: $command";
+ scm (".", $scm, "clone", $path, $localpath, @args);
+ scm ($localpath, $scm, "config", "core.ignorecase", "true");
}
-
- # Actually execute the command
- if (repoexists ($scm, $localpath)) {
- if ($want_remote_repo) {
- if ($scm eq "darcs") {
- scm ($scm, @scm_args, @_, "--repodir=$localpath", $path);
- } else {
- # git pull doesn't like to be used with --work-dir
- scm ($scm, "--git-dir=$localpath/.git", @scm_args, @_, $path, "master");
- }
- } else {
- # git status *must* be used with --work-dir, if we don't chdir() to the dir
- scm ($scm, "--git-dir=$localpath/.git", "--work-tree=$localpath", @scm_args, @_);
- }
+ next;
+ }
+
+ if (-d "$localpath/_darcs") {
+ if (-d "$localpath/.git") {
+ die "Found both _darcs and .git in $localpath";
}
- elsif ($local_repo_unnecessary) {
- # Don't bother to change directory in this case
- scm ($scm, @scm_args, @_);
+ else {
+ $scm = "darcs";
+ }
+ }
+ else {
+ if (-d "$localpath/.git") {
+ $scm = "git";
}
elsif ($tag eq "") {
- message "== Required repo $localpath is missing! Skipping";
+ die "Required repo $localpath is missing";
}
else {
message "== $localpath repo not present; skipping";
}
- }
-}
-
-sub main {
- if (! -d ".git" || ! -d "compiler") {
- die "error: sync-all must be run from the top level of the ghc tree."
- }
+ }
- $tags{"-"} = 1;
- $tags{"dph"} = 1;
+ # Work out the arguments we should give to the SCM
+ if ($command =~ /^(?:w|wh|wha|what|whats|whatsn|whatsne|whatsnew|status)$/) {
+ if ($scm eq "darcs") {
+ $command = "whatsnew";
+ }
+ elsif ($scm eq "git") {
+ $command = "status";
+ }
+ else {
+ die "Unknown scm";
+ }
- while ($#_ ne -1) {
- my $arg = shift;
- # We handle -q here as well as lower down as we need to skip over it
- # if it comes before the source-control command
- if ($arg eq "-q") {
- $verbose = 1;
+ # Hack around 'darcs whatsnew' failing if there are no changes
+ $ignore_failure = 1;
+ scm ($localpath, $scm, $command, @args);
}
- elsif ($arg eq "-s") {
- $verbose = 0;
+ elsif ($command =~ /^commit$/) {
+ # git fails if there is nothing to commit, so ignore failures
+ $ignore_failure = 1;
+ scm ($localpath, $scm, "commit", @args);
}
- elsif ($arg eq "-r") {
- $defaultrepo = shift;
+ elsif ($command =~ /^(?:pus|push)$/) {
+ scm ($localpath, $scm, "push", @args);
}
- elsif ($arg eq "--ignore-failure") {
- $ignore_failure = 1;
+ elsif ($command =~ /^(?:pul|pull)$/) {
+ scm ($localpath, $scm, "pull", @args);
}
- elsif ($arg eq "--complete" || $arg eq "--partial") {
- $get_mode = $arg;
+ elsif ($command =~ /^(?:s|se|sen|send)$/) {
+ if ($scm eq "darcs") {
+ $command = "send";
+ }
+ elsif ($scm eq "git") {
+ $command = "send-email";
+ }
+ else {
+ die "Unknown scm";
+ }
+ scm ($localpath, $scm, $command, @args);
}
- # Use --checked-out if the remote repos are a checked-out tree,
- # rather than the master trees.
- elsif ($arg eq "--checked-out") {
- $checked_out_flag = 1;
+ elsif ($command =~ /^fetch$/) {
+ scm ($localpath, $scm, "fetch", @args);
}
- # --<tag> says we grab the libs tagged 'tag' with
- # 'get'. It has no effect on the other commands.
- elsif ($arg =~ m/^--/) {
- $arg =~ s/^--//;
- $tags{$arg} = 1;
+ elsif ($command =~ /^new$/) {
+ my @scm_args = ("log", "$branch_name..");
+ scm ($localpath, $scm, @scm_args, @args);
}
- else {
- unshift @_, $arg;
- if (grep /^-q$/, @_) {
- $verbose = 1;
+ elsif ($command =~ /^remote$/) {
+ my @scm_args;
+ if ($subcommand eq 'add') {
+ @scm_args = ("remote", "add", $branch_name, $path);
+ } elsif ($subcommand eq 'rm') {
+ @scm_args = ("remote", "rm", $branch_name);
+ } elsif ($subcommand eq 'set-url') {
+ @scm_args = ("remote", "set-url", $branch_name, $path);
}
- last;
+ scm ($localpath, $scm, @scm_args, @args);
+ }
+ elsif ($command =~ /^grep$/) {
+ # Hack around 'git grep' failing if there are no matches
+ $ignore_failure = 1;
+ scm ($localpath, $scm, "grep", @args)
+ unless $scm eq "darcs";
+ }
+ elsif ($command =~ /^reset$/) {
+ scm ($localpath, $scm, "reset", @args)
+ unless $scm eq "darcs";
+ }
+ elsif ($command =~ /^config$/) {
+ scm ($localpath, $scm, "config", @args)
+ unless $scm eq "darcs";
+ }
+ else {
+ die "Unknown command: $command";
}
}
+}
- if ($#_ eq -1) {
+
+sub help()
+{
# Get the built in help
my $help = <<END;
What do you want to do?
Supported commands:
* whatsnew
+ * commit
* push
* pull
* get, with options:
* --<package-tag>
* --complete
* --partial
+ * fetch
* send
+ * new
+ * remote add <branch-name>
+ * remote rm <branch-name>
+ * remote set-url [--push] <branch-name>
+ * grep
+ * reset
+ * config
Available package-tags are:
END
my @available_tags = keys %available_tags;
print "$help@available_tags\n";
exit 1;
+}
+
+sub main {
+ if (! -d ".git" || ! -d "compiler") {
+ die "error: sync-all must be run from the top level of the ghc tree."
+ }
+
+ $tags{"-"} = 1;
+ $tags{"dph"} = 1;
+
+ while ($#_ ne -1) {
+ my $arg = shift;
+ # We handle -q here as well as lower down as we need to skip over it
+ # if it comes before the source-control command
+ if ($arg eq "-q") {
+ $verbose = 1;
+ }
+ elsif ($arg eq "-s") {
+ $verbose = 0;
+ }
+ elsif ($arg eq "-r") {
+ $defaultrepo = shift;
+ }
+ elsif ($arg eq "--ignore-failure") {
+ $ignore_failure = 1;
+ }
+ elsif ($arg eq "--complete" || $arg eq "--partial") {
+ $get_mode = $arg;
+ }
+ # Use --checked-out if the remote repos are a checked-out tree,
+ # rather than the master trees.
+ elsif ($arg eq "--checked-out") {
+ $checked_out_flag = 1;
+ }
+ # --<tag> says we grab the libs tagged 'tag' with
+ # 'get'. It has no effect on the other commands.
+ elsif ($arg =~ m/^--no-(.*)$/) {
+ $tags{$1} = 0;
+ }
+ elsif ($arg =~ m/^--(.*)$/) {
+ $tags{$1} = 1;
+ }
+ else {
+ unshift @_, $arg;
+ if (grep /^-q$/, @_) {
+ $verbose = 1;
+ }
+ last;
+ }
+ }
+
+ if ($#_ eq -1) {
+ help();
}
else {
# Give the command and rest of the arguments to the main loop
--- /dev/null
+
+GHC = ghc
+
+PREL_NAMES = ../../compiler/prelude/PrelNames.lhs
+DS_META = ../../compiler/deSugar/DsMeta.hs
+
+.PHONY: check
+
+check: checkUniques
+ ./checkUniques mkPreludeClassUnique $(PREL_NAMES)
+ ./checkUniques mkPreludeTyConUnique $(PREL_NAMES) $(DS_META)
+ ./checkUniques mkPreludeDataConUnique $(PREL_NAMES)
+ ./checkUniques mkPreludeMiscIdUnique $(PREL_NAMES) $(DS_META)
+
+checkUniques: checkUniques.hs
+ $(GHC) --make $@
--- /dev/null
+{-# LANGUAGE PatternGuards #-}
+
+-- Some things could be improved, e.g.:
+-- * Check that each file given contains at least one instance of the
+-- function
+-- * Check that we are testing all functions
+-- * If a problem is found, give better location information, e.g.
+-- which problem the file is in
+
+module Main (main) where
+
+import Control.Concurrent
+import Control.Exception
+import Control.Monad
+import Control.Monad.State
+import Data.Char
+import Data.Set (Set)
+import qualified Data.Set as Set
+import System.Environment
+import System.Exit
+import System.IO
+import System.Process
+
+main :: IO ()
+main = do args <- getArgs
+ case args of
+ function : files ->
+ doit function files
+
+die :: String -> IO a
+die err = do hPutStrLn stderr err
+ exitFailure
+
+type M = StateT St IO
+
+data St = St {
+ stSeen :: Set Int,
+ stLast :: Maybe Int,
+ stHadAProblem :: Bool
+ }
+
+emptyState :: St
+emptyState = St {
+ stSeen = Set.empty,
+ stLast = Nothing,
+ stHadAProblem = False
+ }
+
+use :: Int -> M ()
+use n = do st <- get
+ let seen = stSeen st
+ put $ st { stSeen = Set.insert n seen, stLast = Just n }
+ if (n `Set.member` seen)
+ then problem ("Duplicate " ++ show n)
+ else case stLast st of
+ Just l
+ | (l > n) ->
+ problem ("Decreasing order for " ++ show l
+ ++ " -> " ++ show n)
+ _ ->
+ return ()
+
+problem :: String -> M ()
+problem str = do lift $ putStrLn str
+ st <- get
+ put $ st { stHadAProblem = True }
+
+doit :: String -> [FilePath] -> IO ()
+doit function files
+ = do (hIn, hOut, hErr, ph) <- runInteractiveProcess
+ "grep" ("-h" : function : files)
+ Nothing Nothing
+ hClose hIn
+ strOut <- hGetContents hOut
+ strErr <- hGetContents hErr
+ forkIO $ do evaluate (length strOut)
+ return ()
+ forkIO $ do evaluate (length strErr)
+ return ()
+ ec <- waitForProcess ph
+ case (ec, strErr) of
+ (ExitSuccess, "") ->
+ check function strOut
+ _ ->
+ error "grep failed"
+
+check :: String -> String -> IO ()
+check function str
+ = do let ls = lines str
+ -- filter out lines that start with whitespace. They're
+ -- from things like:
+ -- import M ( ...,
+ -- ..., <function>, ...
+ ls' = filter (not . all isSpace . take 1) ls
+ ns <- mapM (parseLine function) ls'
+ st <- execStateT (do mapM_ use ns
+ st <- get
+ when (Set.null (stSeen st)) $
+ problem "No values found")
+ emptyState
+ when (stHadAProblem st) exitFailure
+
+parseLine :: String -> String -> IO Int
+parseLine function str
+ = -- words isn't necessarily quite right, e.g. we could have
+ -- "var=" rather than "var =", but it works for the code
+ -- we have
+ case words str of
+ _var : "=" : fun : numStr : rest
+ | fun == function,
+ null rest || "--" == head rest,
+ [(num, "")] <- reads numStr
+ -> return num
+ _ -> error ("Bad line: " ++ show str)
+
dep_ids = map snd (externalPackageDeps lbi)
+ wrappedIncludeDirs <- wrap $ forDeps Installed.includeDirs
+ wrappedLibraryDirs <- wrap $ forDeps Installed.libraryDirs
+
let variablePrefix = directory ++ '_':distdir
let xs = [variablePrefix ++ "_VERSION = " ++ display (pkgVersion (package pd)),
variablePrefix ++ "_MODULES = " ++ unwords (map display modules),
++ languageToFlags (compiler lbi) (defaultLanguage bi)
++ extensionsToFlags (compiler lbi) (usedExtensions bi)
++ programOverrideArgs ghcProg)),
- variablePrefix ++ "_CC_OPTS = " ++ unwords (ccOptions bi),
- variablePrefix ++ "_CPP_OPTS = " ++ unwords (cppOptions bi),
- variablePrefix ++ "_LD_OPTS = " ++ unwords (ldOptions bi),
- variablePrefix ++ "_DEP_INCLUDE_DIRS = " ++ unwords (wrap $ forDeps Installed.includeDirs),
- variablePrefix ++ "_DEP_CC_OPTS = " ++ unwords (forDeps Installed.ccOptions),
- variablePrefix ++ "_DEP_LIB_DIRS = " ++ unwords (wrap $ forDeps Installed.libraryDirs),
- variablePrefix ++ "_DEP_EXTRA_LIBS = " ++ unwords (forDeps Installed.extraLibraries),
- variablePrefix ++ "_DEP_LD_OPTS = " ++ unwords (forDeps Installed.ldOptions),
- variablePrefix ++ "_BUILD_GHCI_LIB = " ++ boolToYesNo (withGHCiLib lbi),
+ variablePrefix ++ "_CC_OPTS = " ++ unwords (ccOptions bi),
+ variablePrefix ++ "_CPP_OPTS = " ++ unwords (cppOptions bi),
+ variablePrefix ++ "_LD_OPTS = " ++ unwords (ldOptions bi),
+ variablePrefix ++ "_DEP_INCLUDE_DIRS_SINGLE_QUOTED = " ++ unwords wrappedIncludeDirs,
+ variablePrefix ++ "_DEP_CC_OPTS = " ++ unwords (forDeps Installed.ccOptions),
+ variablePrefix ++ "_DEP_LIB_DIRS_SINGLE_QUOTED = " ++ unwords wrappedLibraryDirs,
+ variablePrefix ++ "_DEP_EXTRA_LIBS = " ++ unwords (forDeps Installed.extraLibraries),
+ variablePrefix ++ "_DEP_LD_OPTS = " ++ unwords (forDeps Installed.ldOptions),
+ variablePrefix ++ "_BUILD_GHCI_LIB = " ++ boolToYesNo (withGHCiLib lbi),
"",
-- Sometimes we need to modify the automatically-generated package-data.mk
-- bindings in a special way for the GHC build system, so allow that here:
else description pd
where
escape = foldr (\c xs -> if c == '#' then '\\':'#':xs else c:xs) []
- wrap = map (\s -> "\'" ++ s ++ "\'")
+ wrap = mapM wrap1
+ wrap1 s
+ | null s = die ["Wrapping empty value"]
+ | '\'' `elem` s = die ["Single quote in value to be wrapped:", s]
+ -- We want to be able to assume things like <space><quote> is the
+ -- start of a value, so check there are no spaces in confusing
+ -- positions
+ | head s == ' ' = die ["Leading space in value to be wrapped:", s]
+ | last s == ' ' = die ["Trailing space in value to be wrapped:", s]
+ | otherwise = return ("\'" ++ s ++ "\'")
boolToYesNo True = "YES"
boolToYesNo False = "NO"
+
endif
-# depend on ghc-cabal, otherwise we build Cabal twice when building in parallel
+# depend on ghc-cabal, otherwise we build Cabal twice when building in parallel.
+# (ghc-cabal is an order-only dependency, we don't need to rebuild ghc-pkg
+# if ghc-cabal is newer).
# The binary package is not warning-clean, so we need a few -fno-warns here.
-utils/ghc-pkg/dist/build/$(utils/ghc-pkg_dist_PROG)$(exeext): utils/ghc-pkg/Main.hs utils/ghc-pkg/Version.hs $(GHC_CABAL_INPLACE) | bootstrapping/. $$(dir $$@)/.
+utils/ghc-pkg/dist/build/$(utils/ghc-pkg_dist_PROG)$(exeext): utils/ghc-pkg/Main.hs utils/ghc-pkg/Version.hs | bootstrapping/. $$(dir $$@)/. $(GHC_CABAL_INPLACE)
"$(GHC)" $(SRC_HC_OPTS) --make utils/ghc-pkg/Main.hs -o $@ \
-no-user-package-conf \
-Wall -fno-warn-unused-imports \
INSTDIR=`cygpath -m "$INSTDIR"`
fi
- /usr/bin/perl -w boot --required-tag=dph
+ /usr/bin/perl -w boot --validate --required-tag=dph
./configure --prefix="$INSTDIR" $config_args
fi