A slew of SMP-related changes.
- New locking scheme for thunks: we now check whether the thunk
being entered is in our private allocation area, and if so
we don't lock it. Well, that's the upshot. In practice it's
a lot more fiddly than that.
- I/O blocking is handled a bit more sanely now (but still not
properly, methinks)
- deadlock detection is back
- remove old pre-SMP scheduler code
- revamp the timing code. We actually get reasonable-looking
timing info for SMP programs now.
- fix a bug in the garbage collector to do with IND_OLDGENs appearing
on the mutable list of the old generation.
- move BDescr() function from rts/BlockAlloc.h to includes/Block.h.
- move struct generation and struct step into includes/StgStorage.h (sigh)
- add UPD_IND_NOLOCK for updating with an indirection where locking
the black hole is not required.
/* -----------------------------------------------------------------------------
- * $Id: Block.h,v 1.5 1999/03/02 19:44:07 sof Exp $
+ * $Id: Block.h,v 1.6 1999/11/09 15:47:07 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
#define BDESCR_SHIFT 5
#endif
+/* Finding the block descriptor for a given block -------------------------- */
+
+static inline bdescr *Bdescr(StgPtr p)
+{
+ return (bdescr *)
+ ((((W_)p & MBLOCK_MASK & ~BLOCK_MASK) >> (BLOCK_SHIFT-BDESCR_SHIFT))
+ | ((W_)p & ~MBLOCK_MASK)
+ );
+}
+
/* Useful Macros ------------------------------------------------------------ */
/* Offset of first real data block in a megablock */
/* -----------------------------------------------------------------------------
- * $Id: Regs.h,v 1.5 1999/11/02 15:05:51 simonmar Exp $
+ * $Id: Regs.h,v 1.6 1999/11/09 15:47:08 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
StgPtr rHp;
StgPtr rHpLim;
StgTSO *rCurrentTSO;
- bdescr *rNursery;
- bdescr *rCurrentNursery;
+ struct _bdescr *rNursery;
+ struct _bdescr *rCurrentNursery;
#ifdef SMP
struct StgRegTable_ *link;
#endif
/* -----------------------------------------------------------------------------
- * $Id: Rts.h,v 1.8 1999/11/02 15:05:52 simonmar Exp $
+ * $Id: Rts.h,v 1.9 1999/11/09 15:47:08 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
#define RTS_H
#ifndef IN_STG_CODE
-#define NOT_IN_STG_CODE
+#define IN_STG_CODE 0
#endif
#include "Stg.h"
/* -----------------------------------------------------------------------------
- * $Id: Stg.h,v 1.19 1999/11/05 12:28:05 simonmar Exp $
+ * $Id: Stg.h,v 1.20 1999/11/09 15:47:08 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
* with that. If "Stg.h" is included via "Rts.h", we're assumed to
* be in vanilla C.
*/
-#ifdef NOT_IN_STG_CODE
+#if ! IN_STG_CODE
# ifndef NO_REGS
# define NO_REGS /* don't define fixed registers */
# endif
#else
-# define IN_STG_CODE
+# define IN_STG_CODE 1
#endif
/* Configuration */
#include "ClosureTypes.h"
#include "InfoTables.h"
#include "TSO.h"
-#include "Block.h"
/* STG/Optimised-C related stuff */
#include "SMP.h"
#include "MachRegs.h"
#include "Regs.h"
#include "TailCalls.h"
+#include "Block.h"
/* RTS public interface */
#include "RtsAPI.h"
/* -----------------------------------------------------------------------------
- * $Id: StgMacros.h,v 1.16 1999/11/05 12:28:05 simonmar Exp $
+ * $Id: StgMacros.h,v 1.17 1999/11/09 15:47:09 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
#ifdef EAGER_BLACKHOLING
# ifdef SMP
-# define UPD_BH_UPDATABLE(info) \
- TICK_UPD_BH_UPDATABLE(); \
- LOCK_THUNK(info); \
+# define UPD_BH_UPDATABLE(info) \
+ TICK_UPD_BH_UPDATABLE(); \
+ { \
+ bdescr *bd = Bdescr(R1.p); \
+ if (bd->back != (bdescr *)BaseReg) { \
+ if (bd->gen->no >= 1 || bd->step->no >= 1) { \
+ LOCK_THUNK(info); \
+ } else { \
+ EXTFUN_RTS(stg_gc_enter_1_hponly); \
+ JMP_(stg_gc_enter_1_hponly); \
+ } \
+ } \
+ } \
SET_INFO(R1.cl,&BLACKHOLE_info)
-# define UPD_BH_SINGLE_ENTRY(info) \
- TICK_UPD_BH_SINGLE_ENTRY(); \
- LOCK_THUNK(info); \
+# define UPD_BH_SINGLE_ENTRY(info) \
+ TICK_UPD_BH_SINGLE_ENTRY(); \
+ { \
+ bdescr *bd = Bdescr(R1.p); \
+ if (bd->back != (bdescr *)BaseReg) { \
+ if (bd->gen->no >= 1 || bd->step->no >= 1) { \
+ LOCK_THUNK(info); \
+ } else { \
+ EXTFUN_RTS(stg_gc_enter_1_hponly); \
+ JMP_(stg_gc_enter_1_hponly); \
+ } \
+ } \
+ } \
SET_INFO(R1.cl,&BLACKHOLE_info)
# else
# define UPD_BH_UPDATABLE(info) \
/* -----------------------------------------------------------------------------
- * $Id: StgStorage.h,v 1.5 1999/11/02 15:05:53 simonmar Exp $
+ * $Id: StgStorage.h,v 1.6 1999/11/09 15:47:09 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
#ifndef STGSTORAGE_H
#define STGSTORAGE_H
+/* GENERATION GC NOTES
+ *
+ * We support an arbitrary number of generations, with an arbitrary number
+ * of steps per generation. Notes (in no particular order):
+ *
+ * - all generations except the oldest should have two steps. This gives
+ * objects a decent chance to age before being promoted, and in
+ * particular will ensure that we don't end up with too many
+ * thunks being updated in older generations.
+ *
+ * - the oldest generation has one step. There's no point in aging
+ * objects in the oldest generation.
+ *
+ * - generation 0, step 0 (G0S0) is the allocation area. It is given
+ * a fixed set of blocks during initialisation, and these blocks
+ * are never freed.
+ *
+ * - during garbage collection, each step which is an evacuation
+ * destination (i.e. all steps except G0S0) is allocated a to-space.
+ * evacuated objects are allocated into the step's to-space until
+ * GC is finished, when the original step's contents may be freed
+ * and replaced by the to-space.
+ *
+ * - the mutable-list is per-generation (not per-step). G0 doesn't
+ * have one (since every garbage collection collects at least G0).
+ *
+ * - block descriptors contain pointers to both the step and the
+ * generation that the block belongs to, for convenience.
+ *
+ * - static objects are stored in per-generation lists. See GC.c for
+ * details of how we collect CAFs in the generational scheme.
+ *
+ * - large objects are per-step, and are promoted in the same way
+ * as small objects, except that we may allocate large objects into
+ * generation 1 initially.
+ */
+
+typedef struct _step {
+ unsigned int no; /* step number */
+ bdescr *blocks; /* blocks in this step */
+ unsigned int n_blocks; /* number of blocks */
+ struct _step *to; /* where collected objects from this step go */
+ struct _generation *gen; /* generation this step belongs to */
+ bdescr *large_objects; /* large objects (doubly linked) */
+
+ /* temporary use during GC: */
+ StgPtr hp; /* next free locn in to-space */
+ StgPtr hpLim; /* end of current to-space block */
+ bdescr *hp_bd; /* bdescr of current to-space block */
+ bdescr *to_space; /* bdescr of first to-space block */
+ unsigned int to_blocks; /* number of blocks in to-space */
+ bdescr *scan_bd; /* block currently being scanned */
+ StgPtr scan; /* scan pointer in current block */
+ bdescr *new_large_objects; /* large objects collected so far */
+ bdescr *scavenged_large_objects; /* live large objects after GC (dbl link) */
+} step;
+
+typedef struct _generation {
+ unsigned int no; /* generation number */
+ step *steps; /* steps */
+ unsigned int n_steps; /* number of steps */
+ unsigned int max_blocks; /* max blocks in step 0 */
+ StgMutClosure *mut_list; /* mutable objects in this generation (not G0)*/
+ StgMutClosure *mut_once_list; /* objects that point to younger generations */
+
+ /* temporary use during GC: */
+ StgMutClosure *saved_mut_list;
+
+ /* stats information */
+ unsigned int collections;
+ unsigned int failed_promotions;
+} generation;
+
/* -----------------------------------------------------------------------------
Allocation area for compiled code
/* -----------------------------------------------------------------------------
- * $Id: Updates.h,v 1.14 1999/11/02 15:05:53 simonmar Exp $
+ * $Id: Updates.h,v 1.15 1999/11/09 15:47:09 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
if you *really* need an IND use UPD_REAL_IND
*/
#ifdef SMP
+#define UPD_REAL_IND(updclosure, heapptr) \
+ { \
+ const StgInfoTable *info; \
+ if (Bdescr((P_)updclosure)->back != (bdescr *)BaseReg) { \
+ info = LOCK_CLOSURE(updclosure); \
+ } else { \
+ info = updclosure->header.info; \
+ } \
+ AWAKEN_BQ(info,updclosure); \
+ updateWithIndirection(info, \
+ (StgClosure *)updclosure, \
+ (StgClosure *)heapptr); \
+ }
+#else
#define UPD_REAL_IND(updclosure, heapptr) \
{ \
const StgInfoTable *info; \
- info = LOCK_CLOSURE(updclosure); \
- \
- if (info == &BLACKHOLE_BQ_info) { \
- STGCALL1(awakenBlockedQueue, \
- ((StgBlockingQueue *)updclosure)->blocking_queue); \
- } \
- updateWithIndirection((StgClosure *)updclosure, \
- (StgClosure *)heapptr); \
+ info = ((StgClosure *)updclosure)->header.info; \
+ AWAKEN_BQ(info,updclosure); \
+ updateWithIndirection(info, \
+ (StgClosure *)updclosure, \
+ (StgClosure *)heapptr); \
}
-#else
-#define UPD_REAL_IND(updclosure, heapptr) \
- AWAKEN_BQ(updclosure); \
- updateWithIndirection((StgClosure *)updclosure, \
- (StgClosure *)heapptr);
#endif
#if defined(PROFILING) || defined(TICKY_TICKY)
-#define UPD_PERM_IND(updclosure, heapptr) \
- AWAKEN_BQ(updclosure); \
- updateWithPermIndirection((StgClosure *)updclosure, \
- (StgClosure *)heapptr);
+#define UPD_PERM_IND(updclosure, heapptr) \
+ { \
+ const StgInfoTable *info; \
+ info = ((StgClosure *)updclosure)->header.info; \
+ AWAKEN_BQ(info,updclosure); \
+ updateWithPermIndirection(info, \
+ (StgClosure *)updclosure, \
+ (StgClosure *)heapptr); \
+ }
+#endif
+
+#ifdef SMP
+#define UPD_IND_NOLOCK(updclosure, heapptr) \
+ { \
+ const StgInfoTable *info; \
+ info = updclosure->header.info; \
+ AWAKEN_BQ(info,updclosure); \
+ updateWithIndirection(info, \
+ (StgClosure *)updclosure, \
+ (StgClosure *)heapptr); \
+ }
+#else
+#define UPD_IND_NOLOCK(updclosure,heapptr) UPD_IND(updclosure,heapptr)
#endif
/* -----------------------------------------------------------------------------
extern void awakenBlockedQueue(StgTSO *q);
-#define AWAKEN_BQ(closure) \
- if (closure->header.info == &BLACKHOLE_BQ_info) { \
- STGCALL1(awakenBlockedQueue, \
- ((StgBlockingQueue *)closure)->blocking_queue); \
+#define AWAKEN_BQ(info,closure) \
+ if (info == &BLACKHOLE_BQ_info) { \
+ STGCALL1(awakenBlockedQueue, \
+ ((StgBlockingQueue *)closure)->blocking_queue); \
}
/* -----------------------------------------------------------------------------
- * $Id: BlockAlloc.h,v 1.7 1999/11/02 17:08:28 simonmar Exp $
+ * $Id: BlockAlloc.h,v 1.8 1999/11/09 15:46:49 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
extern void freeGroup(bdescr *p);
extern void freeChain(bdescr *p);
-/* Finding the block descriptor for a given block -------------------------- */
-
-static inline bdescr *Bdescr(StgPtr p)
-{
- return (bdescr *)
- ((((W_)p & MBLOCK_MASK & ~BLOCK_MASK) >> (BLOCK_SHIFT-BDESCR_SHIFT))
- | ((W_)p & ~MBLOCK_MASK)
- );
-}
-
/* Round a value to megablocks --------------------------------------------- */
#define WORDS_PER_MBLOCK (BLOCKS_PER_MBLOCK * BLOCK_SIZE_W)
/* -----------------------------------------------------------------------------
- * $Id: ClosureFlags.c,v 1.3 1999/11/02 15:05:56 simonmar Exp $
+ * $Id: ClosureFlags.c,v 1.4 1999/11/09 15:46:49 simonmar Exp $
*
* (c) The GHC Team 1998-1999
*
/* IND_STATIC */ ( _STA ),
/* CAF_UNENTERED */ ( 0 ),
/* CAF_ENTERED */ ( 0 ),
-/* BLACKHOLE_BQ */ ( _BTM|_NS| _MUT|_UPT ),
+/* CAF_BLACKHOLE */ ( _BTM|_NS| _MUT|_UPT ),
/* RET_BCO */ ( _BTM ),
/* RET_SMALL */ ( _BTM| _SRT),
/* RET_VEC_SMALL */ ( _BTM| _SRT),
/* CATCH_FRAME */ ( _BTM ),
/* STOP_FRAME */ ( _BTM ),
/* SEQ_FRAME */ ( _BTM ),
-/* BLACKHOLE */ ( _NS| _UPT ),
+/* BLACKHOLE */ ( _NS| _MUT|_UPT ),
/* BLACKHOLE_BQ */ ( _NS| _MUT|_UPT ),
/* SE_BLACKHOLE */ ( _NS| _UPT ),
/* SE_CAF_BLACKHOLE */ ( _NS| _UPT ),
/* -----------------------------------------------------------------------------
- * $Id: GC.c,v 1.65 1999/11/02 15:05:56 simonmar Exp $
+ * $Id: GC.c,v 1.66 1999/11/09 15:46:49 simonmar Exp $
*
* (c) The GHC Team 1998-1999
*
continue;
}
+ /* Happens if a BLACKHOLE_BQ in the old generation is updated:
+ */
+ case IND_OLDGEN:
+ case IND_OLDGEN_PERM:
+ /* Try to pull the indirectee into this generation, so we can
+ * remove the indirection from the mutable list.
+ */
+ evac_gen = gen->no;
+ ((StgIndOldGen *)p)->indirectee =
+ evacuate(((StgIndOldGen *)p)->indirectee);
+ evac_gen = 0;
+
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ p->mut_link = gen->mut_once_list;
+ gen->mut_once_list = p;
+ } else {
+ p->mut_link = NULL;
+ }
+ continue;
+
default:
/* shouldn't have anything else on the mutables list */
- barf("scavenge_mut_list: strange object? %d", (int)(info->type));
+ barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
}
}
}
* sorted out? oh yes: we aren't counting each enter properly
* in this case. See the log somewhere. KSW 1999-04-21
*/
- UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */
+ UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
sp = (P_)frame - 1; /* sp = stuff to slide */
displacement += sizeofW(StgUpdateFrame);
/* -----------------------------------------------------------------------------
- * $Id: HeapStackCheck.hc,v 1.9 1999/08/25 16:11:48 simonmar Exp $
+ * $Id: HeapStackCheck.hc,v 1.10 1999/11/09 15:46:51 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
FE_
}
+EXTFUN(stg_gc_enter_1_hponly)
+{
+ FB_
+ Sp -= 1;
+ Sp[0] = R1.w;
+ R1.i = HeapOverflow;
+ SaveThreadState();
+ CurrentTSO->whatNext = ThreadEnterGHC;
+ JMP_(StgReturn);
+ FE_
+}
+
/*- 2 Regs--------------------------------------------------------------------*/
EXTFUN(stg_gc_enter_2)
/* -----------------------------------------------------------------------------
- * $Id: PrimOps.hc,v 1.33 1999/11/02 15:05:58 simonmar Exp $
+ * $Id: PrimOps.hc,v 1.34 1999/11/09 15:46:53 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
ASSERT(CurrentTSO->why_blocked == NotBlocked);
CurrentTSO->why_blocked = BlockedOnDelay;
+ ACQUIRE_LOCK(&sched_mutex);
+
/* Add on ticks_since_select, since these will be subtracted at
* the next awaitEvent call.
*/
CurrentTSO->block_info.delay = R1.i + ticks_since_select;
- ACQUIRE_LOCK(&sched_mutex);
APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
+
RELEASE_LOCK(&sched_mutex);
JMP_(stg_block_noregs);
FE_
/* -----------------------------------------------------------------------------
- * $Id: Schedule.c,v 1.30 1999/11/08 15:30:39 sewardj Exp $
+ * $Id: Schedule.c,v 1.31 1999/11/09 15:46:54 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
*/
static StgTSO *suspended_ccalling_threads;
-#ifndef SMP
-static rtsBool in_ccall_gc;
-#endif
-
static void GetRoots(void);
static StgTSO *threadStackOverflow(StgTSO *tso);
while (1) {
- /* Check whether any waiting threads need to be woken up.
- * If the run queue is empty, we can wait indefinitely for
- * something to happen.
+ /* Check whether any waiting threads need to be woken up. If the
+ * run queue is empty, and there are no other tasks running, we
+ * can wait indefinitely for something to happen.
+ * ToDo: what if another client comes along & requests another
+ * main thread?
*/
if (blocked_queue_hd != END_TSO_QUEUE) {
- awaitEvent(run_queue_hd == END_TSO_QUEUE);
+ awaitEvent(
+ (run_queue_hd == END_TSO_QUEUE)
+#ifdef SMP
+ && (n_free_capabilities == RtsFlags.ConcFlags.nNodes)
+#endif
+ );
}
/* check for signals each time around the scheduler */
}
#endif
+ /* Detect deadlock: when we have no threads to run, there are
+ * no threads waiting on I/O or sleeping, and all the other
+ * tasks are waiting for work, we must have a deadlock. Inform
+ * all the main threads.
+ */
+#ifdef SMP
+ if (blocked_queue_hd == END_TSO_QUEUE
+ && run_queue_hd == END_TSO_QUEUE
+ && (n_free_capabilities == RtsFlags.ConcFlags.nNodes)
+ ) {
+ StgMainThread *m;
+ for (m = main_threads; m != NULL; m = m->link) {
+ m->ret = NULL;
+ m->stat = Deadlock;
+ pthread_cond_broadcast(&m->wakeup);
+ }
+ main_threads = NULL;
+ }
+#else /* ! SMP */
+ if (blocked_queue_hd == END_TSO_QUEUE
+ && run_queue_hd == END_TSO_QUEUE) {
+ StgMainThread *m = main_threads;
+ m->ret = NULL;
+ m->stat = Deadlock;
+ main_threads = m->link;
+ return;
+ }
+#endif
+
#ifdef SMP
/* If there's a GC pending, don't do anything until it has
* completed.
/* set the context_switch flag
*/
- if (run_queue_hd == END_TSO_QUEUE)
+ if (run_queue_hd == END_TSO_QUEUE)
context_switch = 0;
else
context_switch = 1;
-
+
RELEASE_LOCK(&sched_mutex);
#ifdef SMP
static void
term_handler(int sig STG_UNUSED)
{
- nat i;
- pthread_t me = pthread_self();
-
- for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
- if (task_ids[i].id == me) {
- task_ids[i].mut_time = usertime() - task_ids[i].gc_time;
- if (task_ids[i].mut_time < 0.0) {
- task_ids[i].mut_time = 0.0;
- }
- }
- }
+ stat_workerStop();
ACQUIRE_LOCK(&term_mutex);
await_death--;
RELEASE_LOCK(&term_mutex);
barf("startTasks: Can't create new Posix thread");
}
task_ids[i].id = tid;
+ task_ids[i].mut_time = 0.0;
+ task_ids[i].mut_etime = 0.0;
+ task_ids[i].gc_time = 0.0;
+ task_ids[i].gc_etime = 0.0;
+ task_ids[i].elapsedtimestart = elapsedtime();
IF_DEBUG(scheduler,fprintf(stderr,"schedule: Started task: %ld\n",tid););
}
}
m->link = main_threads;
main_threads = m;
+ IF_DEBUG(scheduler, fprintf(stderr, "schedule: new main thread (%d)\n",
+ m->tso->id));
+
#ifdef SMP
- pthread_cond_wait(&m->wakeup, &sched_mutex);
+ do {
+ pthread_cond_wait(&m->wakeup, &sched_mutex);
+ } while (m->stat == NoStatus);
#else
schedule();
+ ASSERT(m->stat != NoStatus);
#endif
stat = m->stat;
- ASSERT(stat != NoStatus);
#ifdef SMP
pthread_cond_destroy(&m->wakeup);
return stat;
}
-
-#if 0
-SchedulerStatus schedule(StgTSO *main, StgClosure **ret_val)
-{
- StgTSO *t;
- StgThreadReturnCode ret;
- StgTSO **MainTSO;
- rtsBool in_ccall_gc;
-
- /* Return value is NULL by default, it is only filled in if the
- * main thread completes successfully.
- */
- if (ret_val) { *ret_val = NULL; }
-
- /* Save away a pointer to the main thread so that we can keep track
- * of it should a garbage collection happen. We keep a stack of
- * main threads in order to support scheduler re-entry. We can't
- * use the normal TSO linkage for this stack, because the main TSO
- * may need to be linked onto other queues.
- */
- main_threads[next_main_thread] = main;
- MainTSO = &main_threads[next_main_thread];
- next_main_thread++;
- IF_DEBUG(scheduler,
- fprintf(stderr, "Scheduler entered: nesting = %d\n",
- next_main_thread););
-
- /* Are we being re-entered?
- */
- if (CurrentTSO != NULL) {
- /* This happens when a _ccall_gc from Haskell ends up re-entering
- * the scheduler.
- *
- * Block the current thread (put it on the ccalling_queue) and
- * continue executing. The calling thread better have stashed
- * away its state properly and left its stack with a proper stack
- * frame on the top.
- */
- threadPaused(CurrentTSO);
- CurrentTSO->link = ccalling_threads;
- ccalling_threads = CurrentTSO;
- in_ccall_gc = rtsTrue;
- IF_DEBUG(scheduler,
- fprintf(stderr, "Re-entry, thread %d did a _ccall_gc\n",
- CurrentTSO->id););
- } else {
- in_ccall_gc = rtsFalse;
- }
-
- /* Take a thread from the run queue.
- */
- t = POP_RUN_QUEUE();
-
- while (t != END_TSO_QUEUE) {
- CurrentTSO = t;
-
- /* If we have more threads on the run queue, set up a context
- * switch at some point in the future.
- */
- if (run_queue_hd != END_TSO_QUEUE || blocked_queue_hd != END_TSO_QUEUE) {
- context_switch = 1;
- } else {
- context_switch = 0;
- }
- IF_DEBUG(scheduler, belch("Running thread %ld...\n", t->id));
-
- /* Be friendly to the storage manager: we're about to *run* this
- * thread, so we better make sure the TSO is mutable.
- */
- if (t->mut_link == NULL) {
- recordMutable((StgMutClosure *)t);
- }
-
- /* Run the current thread */
- switch (t->whatNext) {
- case ThreadKilled:
- case ThreadComplete:
- /* thread already killed. Drop it and carry on. */
- goto next_thread;
- case ThreadEnterGHC:
- ret = StgRun((StgFunPtr) stg_enterStackTop);
- break;
- case ThreadRunGHC:
- ret = StgRun((StgFunPtr) stg_returnToStackTop);
- break;
- case ThreadEnterHugs:
-#ifdef INTERPRETER
- {
- IF_DEBUG(scheduler,belch("entering Hugs"));
- LoadThreadState();
- /* CHECK_SENSIBLE_REGS(); */
- {
- StgClosure* c = (StgClosure *)Sp[0];
- Sp += 1;
- ret = enter(c);
- }
- SaveThreadState();
- break;
- }
-#else
- barf("Panic: entered a BCO but no bytecode interpreter in this build");
-#endif
- default:
- barf("schedule: invalid whatNext field");
- }
-
- /* We may have garbage collected while running the thread
- * (eg. something nefarious like _ccall_GC_ performGC), and hence
- * CurrentTSO may have moved. Update t to reflect this.
- */
- t = CurrentTSO;
- CurrentTSO = NULL;
-
- /* Costs for the scheduler are assigned to CCS_SYSTEM */
-#ifdef PROFILING
- CCCS = CCS_SYSTEM;
-#endif
-
- switch (ret) {
-
- case HeapOverflow:
- IF_DEBUG(scheduler,belch("Thread %ld stopped: HeapOverflow\n", t->id));
- threadPaused(t);
- PUSH_ON_RUN_QUEUE(t);
- GarbageCollect(GetRoots);
- break;
-
- case StackOverflow:
- IF_DEBUG(scheduler,belch("Thread %ld stopped, StackOverflow\n", t->id));
- {
- nat i;
- /* enlarge the stack */
- StgTSO *new_t = threadStackOverflow(t);
-
- /* This TSO has moved, so update any pointers to it from the
- * main thread stack. It better not be on any other queues...
- * (it shouldn't be)
- */
- for (i = 0; i < next_main_thread; i++) {
- if (main_threads[i] == t) {
- main_threads[i] = new_t;
- }
- }
- t = new_t;
- }
- PUSH_ON_RUN_QUEUE(t);
- break;
-
- case ThreadYielding:
- IF_DEBUG(scheduler,
- if (t->whatNext == ThreadEnterHugs) {
- /* ToDo: or maybe a timer expired when we were in Hugs?
- * or maybe someone hit ctrl-C
- */
- belch("Thread %ld stopped to switch to Hugs\n", t->id);
- } else {
- belch("Thread %ld stopped, timer expired\n", t->id);
- }
- );
- threadPaused(t);
- if (interrupted) {
- IF_DEBUG(scheduler,belch("Scheduler interrupted - returning"));
- deleteThread(t);
- while (run_queue_hd != END_TSO_QUEUE) {
- run_queue_hd = t->link;
- deleteThread(t);
- }
- run_queue_tl = END_TSO_QUEUE;
- /* ToDo: should I do the same with blocked queues? */
- return Interrupted;
- }
-
- /* Put the thread back on the run queue, at the end.
- * t->link is already set to END_TSO_QUEUE.
- */
- APPEND_TO_RUN_QUEUE(t);
- break;
-
- case ThreadBlocked:
- IF_DEBUG(scheduler,
- fprintf(stderr, "Thread %d stopped, ", t->id);
- printThreadBlockage(t);
- fprintf(stderr, "\n"));
- threadPaused(t);
- /* assume the thread has put itself on some blocked queue
- * somewhere.
- */
- break;
-
- case ThreadFinished:
- IF_DEBUG(scheduler,fprintf(stderr,"thread %ld finished\n", t->id));
- t->whatNext = ThreadComplete;
- break;
-
- default:
- barf("schedule: invalid thread return code");
- }
-
- /* check for signals each time around the scheduler */
-#ifndef __MINGW32__
- if (signals_pending()) {
- start_signal_handlers();
- }
-#endif
- /* If our main thread has finished or been killed, return.
- * If we were re-entered as a result of a _ccall_gc, then
- * pop the blocked thread off the ccalling_threads stack back
- * into CurrentTSO.
- */
- if ((*MainTSO)->whatNext == ThreadComplete
- || (*MainTSO)->whatNext == ThreadKilled) {
- next_main_thread--;
- if (in_ccall_gc) {
- CurrentTSO = ccalling_threads;
- ccalling_threads = ccalling_threads->link;
- /* remember to stub the link field of CurrentTSO */
- CurrentTSO->link = END_TSO_QUEUE;
- }
- if ((*MainTSO)->whatNext == ThreadComplete) {
- /* we finished successfully, fill in the return value */
- if (ret_val) { *ret_val = (StgClosure *)(*MainTSO)->sp[0]; };
- return Success;
- } else {
- return Killed;
- }
- }
-
- next_thread:
- /* Checked whether any waiting threads need to be woken up.
- * If the run queue is empty, we can wait indefinitely for
- * something to happen.
- */
- if (blocked_queue_hd != END_TSO_QUEUE) {
- awaitEvent(run_queue_hd == END_TSO_QUEUE);
- }
-
- t = POP_RUN_QUEUE();
- }
-
- /* If we got to here, then we ran out of threads to run, but the
- * main thread hasn't finished yet. It must be blocked on an MVar
- * or a black hole somewhere, so we return deadlock.
- */
- return Deadlock;
-}
-#endif
-
/* -----------------------------------------------------------------------------
Debugging: why is a thread blocked
-------------------------------------------------------------------------- */
* this will also wake up any threads currently
* waiting on the result.
*/
- UPD_IND(su->updatee,ap); /* revert the black hole */
+ UPD_IND_NOLOCK(su->updatee,ap); /* revert the black hole */
su = su->link;
sp += sizeofW(StgUpdateFrame) -1;
sp[0] = (W_)ap; /* push onto stack */
/* -----------------------------------------------------------------------------
- * $Id: Schedule.h,v 1.9 1999/11/02 15:06:02 simonmar Exp $
+ * $Id: Schedule.h,v 1.10 1999/11/09 15:46:55 simonmar Exp $
*
* (c) The GHC Team 1998-1999
*
#ifdef SMP
typedef struct {
pthread_t id;
+ double elapsedtimestart;
double mut_time;
+ double mut_etime;
double gc_time;
double gc_etime;
} task_info;
/* -----------------------------------------------------------------------------
- * $Id: Signals.c,v 1.9 1999/11/02 15:06:02 simonmar Exp $
+ * $Id: Signals.c,v 1.10 1999/11/09 15:46:57 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
#endif
static void
-shutdown_handler(int sig)
+shutdown_handler(int sig STG_UNUSED)
{
#ifdef SMP
/* if I'm a worker thread, send this signal to the guy who
/* -----------------------------------------------------------------------------
- * $Id: Stats.c,v 1.16 1999/11/02 17:19:16 simonmar Exp $
+ * $Id: Stats.c,v 1.17 1999/11/09 15:46:57 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
static double InitUserTime = 0.0;
static double InitElapsedTime = 0.0;
+static double InitElapsedStamp = 0.0;
static double MutUserTime = 0.0;
static double MutElapsedTime = 0.0;
+static double MutElapsedStamp = 0.0;
static double ExitUserTime = 0.0;
static double ExitElapsedTime = 0.0;
FT2longlong(kT,kernelTime);
FT2longlong(uT,userTime);
- return (((StgDouble)(uT + kT))/TicksPerSecond - ElapsedTimeStart);
+ return (((StgDouble)(uT + kT))/TicksPerSecond);
}
#else
double
elapsedtime(void)
{
+
# if ! (defined(HAVE_TIMES) || defined(HAVE_FTIME))
/* We will #ifdef around the fprintf for machines
we *know* are unsupported. (WDP 94/05)
struct tms t;
clock_t r = times(&t);
- return (((double)r)/TicksPerSecond - ElapsedTimeStart);
+ return (((double)r)/TicksPerSecond);
# else /* HAVE_FTIME */
struct timeb t;
ftime(&t);
- return (fabs(t.time + 1e-3*t.millitm - ElapsedTimeStart));
+ return (fabs(t.time + 1e-3*t.millitm));
# endif /* HAVE_FTIME */
# endif /* not stumped */
end_init(void)
{
InitUserTime = usertime();
- InitElapsedTime = elapsedtime();
+ InitElapsedStamp = elapsedtime();
+ InitElapsedTime = InitElapsedStamp - ElapsedTimeStart;
if (InitElapsedTime < 0.0) {
InitElapsedTime = 0.0;
}
}
+/* -----------------------------------------------------------------------------
+ stat_startExit and stat_endExit
+
+ These two measure the time taken in shutdownHaskell().
+ -------------------------------------------------------------------------- */
+
void
stat_startExit(void)
{
- MutElapsedTime = elapsedtime() - GCe_tot_time - InitElapsedTime;
+ MutElapsedStamp = elapsedtime();
+ MutElapsedTime = MutElapsedStamp - GCe_tot_time - InitElapsedStamp;
if (MutElapsedTime < 0) { MutElapsedTime = 0; } /* sometimes -0.00 */
/* for SMP, we don't know the mutator time yet, we have to inspect
#else
ExitUserTime = usertime() - MutUserTime - GC_tot_time - InitUserTime;
#endif
- ExitElapsedTime = elapsedtime() - MutElapsedTime;
+ ExitElapsedTime = elapsedtime() - MutElapsedStamp;
if (ExitUserTime < 0.0) {
ExitUserTime = 0.0;
}
GC_tot_copied += (ullong) copied;
GC_tot_alloc += (ullong) alloc;
- GC_tot_time += time-GC_start_time;
- GCe_tot_time += etime-GCe_start_time;
+ GC_tot_time += gc_time;
+ GCe_tot_time += gc_etime;
#ifdef SMP
{
}
/* -----------------------------------------------------------------------------
+ stat_workerStop
+
+ Called under SMP when a worker thread finishes. We drop the timing
+ stats for this thread into the task_ids struct for that thread.
+ -------------------------------------------------------------------------- */
+
+#ifdef SMP
+void
+stat_workerStop(void)
+{
+ nat i;
+ pthread_t me = pthread_self();
+
+ for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
+ if (task_ids[i].id == me) {
+ task_ids[i].mut_time = usertime() - task_ids[i].gc_time;
+ task_ids[i].mut_etime = elapsedtime()
+ - GCe_tot_time
+ - task_ids[i].elapsedtimestart;
+ if (task_ids[i].mut_time < 0.0) { task_ids[i].mut_time = 0.0; }
+ if (task_ids[i].mut_etime < 0.0) { task_ids[i].mut_etime = 0.0; }
+ }
+ }
+}
+#endif
+
+/* -----------------------------------------------------------------------------
Called at the end of execution
NOTE: number of allocations is not entirely accurate: it doesn't
if (sf != NULL){
char temp[BIG_STRING_LEN];
double time = usertime();
- double etime = elapsedtime();
+ double etime = elapsedtime() - ElapsedTimeStart;
/* avoid divide by zero if time is measured as 0.00 seconds -- SDM */
if (time == 0.0) time = 0.0001;
MutUserTime = 0.0;
for (i = 0; i < RtsFlags.ConcFlags.nNodes; i++) {
MutUserTime += task_ids[i].mut_time;
- fprintf(sf, " Task %2d: MUT time: %6.2fs, GC time: %6.2fs\n",
- i, task_ids[i].mut_time, task_ids[i].gc_time);
+ fprintf(sf, " Task %2d: MUT time: %6.2fs (%6.2fs elapsed)\n"
+ " GC time: %6.2fs (%6.2fs elapsed)\n\n",
+ i,
+ task_ids[i].mut_time, task_ids[i].mut_etime,
+ task_ids[i].gc_time, task_ids[i].gc_etime);
}
}
time = MutUserTime + GC_tot_time + InitUserTime + ExitUserTime;
if (MutUserTime < 0) { MutUserTime = 0; }
- fprintf(sf,"\n");
#endif
fprintf(sf, " INIT time %6.2fs (%6.2fs elapsed)\n",
/* -----------------------------------------------------------------------------
- * $Id: Stats.h,v 1.7 1999/11/02 17:19:17 simonmar Exp $
+ * $Id: Stats.h,v 1.8 1999/11/09 15:46:58 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
extern StgDouble usertime(void);
extern void end_init(void);
extern void stat_exit(int alloc);
+extern void stat_workerStop(void);
extern void stat_startGC(void);
extern void stat_endGC(lnat alloc, lnat collect, lnat live,
/* -----------------------------------------------------------------------------
- * $Id: StgStdThunks.hc,v 1.9 1999/11/02 15:06:04 simonmar Exp $
+ * $Id: StgStdThunks.hc,v 1.10 1999/11/09 15:46:58 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
*
* ---------------------------------------------------------------------------*/
-#include "Stg.h"
+#include "Rts.h"
+#include "StoragePriv.h"
+#include "HeapStackCheck.h"
/* -----------------------------------------------------------------------------
The code for a thunk that simply extracts a field from a
/* -----------------------------------------------------------------------------
- * $Id: Storage.c,v 1.20 1999/11/02 15:06:04 simonmar Exp $
+ * $Id: Storage.c,v 1.21 1999/11/09 15:46:59 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
#ifdef SMP
{
Capability *cap;
-
+ bdescr *bd;
+
g0s0->blocks = NULL;
g0s0->n_blocks = 0;
for (cap = free_capabilities; cap != NULL; cap = cap->link) {
cap->rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
cap->rCurrentNursery = cap->rNursery;
+ for (bd = cap->rNursery; bd != NULL; bd = bd->link) {
+ bd->back = (bdescr *)cap;
+ }
}
+ /* Set the back links to be equal to the Capability,
+ * so we can do slightly better informed locking.
+ */
}
#else /* SMP */
nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize;
#ifdef SMP
Capability *cap;
- /* All tasks must be stopped */
- ASSERT(n_free_capabilities == RtsFlags.ConcFlags.nNodes);
+ /* All tasks must be stopped. Can't assert that all the
+ capabilities are owned by the scheduler, though: one or more
+ tasks might have been stopped while they were running (non-main)
+ threads. */
+ /* ASSERT(n_free_capabilities == RtsFlags.ConcFlags.nNodes); */
allocated =
n_free_capabilities * RtsFlags.GcFlags.minAllocAreaSize * BLOCK_SIZE_W
/* -----------------------------------------------------------------------------
- * $Id: Storage.h,v 1.10 1999/11/02 15:06:05 simonmar Exp $
+ * $Id: Storage.h,v 1.11 1999/11/09 15:46:59 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
}
}
-#define updateWithIndirection(p1, p2) \
- { \
- bdescr *bd; \
- \
- bd = Bdescr((P_)p1); \
- if (bd->gen->no == 0) { \
- ((StgInd *)p1)->indirectee = p2; \
- SET_INFO(p1,&IND_info); \
- TICK_UPD_NEW_IND(); \
- } else { \
- ((StgIndOldGen *)p1)->indirectee = p2; \
- ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list; \
- bd->gen->mut_once_list = (StgMutClosure *)p1; \
- SET_INFO(p1,&IND_OLDGEN_info); \
- TICK_UPD_OLD_IND(); \
- } \
+#define updateWithIndirection(info, p1, p2) \
+ { \
+ bdescr *bd; \
+ \
+ bd = Bdescr((P_)p1); \
+ if (bd->gen->no == 0) { \
+ ((StgInd *)p1)->indirectee = p2; \
+ SET_INFO(p1,&IND_info); \
+ TICK_UPD_NEW_IND(); \
+ } else { \
+ ((StgIndOldGen *)p1)->indirectee = p2; \
+ if (info != &BLACKHOLE_BQ_info) { \
+ ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list; \
+ bd->gen->mut_once_list = (StgMutClosure *)p1; \
+ } \
+ SET_INFO(p1,&IND_OLDGEN_info); \
+ TICK_UPD_OLD_IND(); \
+ } \
}
#if defined(TICKY_TICKY) || defined(PROFILING)
static inline void
-updateWithPermIndirection(StgClosure *p1, StgClosure *p2)
+updateWithPermIndirection(info, StgClosure *p1, StgClosure *p2)
{
bdescr *bd;
TICK_UPD_NEW_PERM_IND(p1);
} else {
((StgIndOldGen *)p1)->indirectee = p2;
- ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list;
- bd->gen->mut_once_list = (StgMutClosure *)p1;
+ if (info != &BLACKHOLE_BQ_info) {
+ ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list;
+ bd->gen->mut_once_list = (StgMutClosure *)p1;
+ }
SET_INFO(p1,&IND_OLDGEN_PERM_info);
TICK_UPD_OLD_PERM_IND();
}
/* -----------------------------------------------------------------------------
- * $Id: StoragePriv.h,v 1.9 1999/11/02 15:06:05 simonmar Exp $
+ * $Id: StoragePriv.h,v 1.10 1999/11/09 15:47:00 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
#ifndef STORAGEPRIV_H
#define STORAGEPRIV_H
-/* GENERATION GC NOTES
- *
- * We support an arbitrary number of generations, with an arbitrary number
- * of steps per generation. Notes (in no particular order):
- *
- * - all generations except the oldest should have two steps. This gives
- * objects a decent chance to age before being promoted, and in
- * particular will ensure that we don't end up with too many
- * thunks being updated in older generations.
- *
- * - the oldest generation has one step. There's no point in aging
- * objects in the oldest generation.
- *
- * - generation 0, step 0 (G0S0) is the allocation area. It is given
- * a fixed set of blocks during initialisation, and these blocks
- * are never freed.
- *
- * - during garbage collection, each step which is an evacuation
- * destination (i.e. all steps except G0S0) is allocated a to-space.
- * evacuated objects are allocated into the step's to-space until
- * GC is finished, when the original step's contents may be freed
- * and replaced by the to-space.
- *
- * - the mutable-list is per-generation (not per-step). G0 doesn't
- * have one (since every garbage collection collects at least G0).
- *
- * - block descriptors contain pointers to both the step and the
- * generation that the block belongs to, for convenience.
- *
- * - static objects are stored in per-generation lists. See GC.c for
- * details of how we collect CAFs in the generational scheme.
- *
- * - large objects are per-step, and are promoted in the same way
- * as small objects, except that we may allocate large objects into
- * generation 1 initially.
- */
-
-typedef struct _step {
- nat no; /* step number */
- bdescr *blocks; /* blocks in this step */
- nat n_blocks; /* number of blocks */
- struct _step *to; /* where collected objects from this step go */
- struct _generation *gen; /* generation this step belongs to */
- bdescr *large_objects; /* large objects (doubly linked) */
-
- /* temporary use during GC: */
- StgPtr hp; /* next free locn in to-space */
- StgPtr hpLim; /* end of current to-space block */
- bdescr *hp_bd; /* bdescr of current to-space block */
- bdescr *to_space; /* bdescr of first to-space block */
- nat to_blocks; /* number of blocks in to-space */
- bdescr *scan_bd; /* block currently being scanned */
- StgPtr scan; /* scan pointer in current block */
- bdescr *new_large_objects; /* large objects collected so far */
- bdescr *scavenged_large_objects; /* live large objects after GC (dbl link) */
-} step;
-
-typedef struct _generation {
- nat no; /* generation number */
- step *steps; /* steps */
- nat n_steps; /* number of steps */
- nat max_blocks; /* max blocks in step 0 */
- StgMutClosure *mut_list; /* mutable objects in this generation (not G0)*/
- StgMutClosure *mut_once_list; /* objects that point to younger generations */
-
- /* temporary use during GC: */
- StgMutClosure *saved_mut_list;
-
- /* stats information */
- nat collections;
- nat failed_promotions;
-} generation;
-
#define END_OF_STATIC_LIST stgCast(StgClosure*,1)
extern generation *generations;
/* -----------------------------------------------------------------------------
- * $Id: Updates.hc,v 1.20 1999/11/02 15:06:05 simonmar Exp $
+ * $Id: Updates.hc,v 1.21 1999/11/09 15:47:00 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
/* Tick - it must be a con, all the paps are handled \
* in stg_upd_PAP and PAP_entry below \
*/ \
- TICK_UPD_CON_IN_NEW(sizeW_fromITBL(get_itbl(Su))); \
+ TICK_UPD_CON_IN_NEW(sizeW_fromITBL(get_itbl(Su))); \
\
- /* update the updatee with an indirection to the return value */\
- UPD_IND(Su,R1.p); \
+ if (Bdescr(updatee)->back != BaseReg) { \
+ LOCK_CLOSURE(Su); \
+ } \
+ \
+ UPD_IND_NOLOCK(Su,R1.p); \
+ \
+ /* update the updatee with an indirection \
+ * to the return value \
+ */ \
\
/* reset Su to the next update frame */ \
Su = ((StgUpdateFrame *)Sp)->link; \
*/ \
TICK_UPD_CON_IN_NEW(sizeW_fromITBL(get_itbl(updatee))); \
\
- /* update the updatee with an indirection to the return value */\
- UPD_IND(updatee,R1.p); \
+ UPD_IND(updatee, R1.cl); \
\
/* reset Su to the next update frame */ \
Su = ((StgUpdateFrame *)Sp)->link; \