X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FSchedule.c;h=b0271153a63fd53b328f8c66ea23e94412966761;hb=dbef766ce79e37a74468a07a93b15ba1f06fe8f8;hp=369c8c603a522b6dc46e8510c69b9cdbf9097151;hpb=32a464f1b66cd025f643dc15dc6851cb72bfb014;p=ghc-hetmet.git diff --git a/ghc/rts/Schedule.c b/ghc/rts/Schedule.c index 369c8c6..b027115 100644 --- a/ghc/rts/Schedule.c +++ b/ghc/rts/Schedule.c @@ -1,5 +1,5 @@ /* --------------------------------------------------------------------------- - * $Id: Schedule.c,v 1.96 2001/06/04 16:26:54 simonmar Exp $ + * $Id: Schedule.c,v 1.108 2001/11/26 16:54:22 simonmar Exp $ * * (c) The GHC Team, 1998-2000 * @@ -74,6 +74,7 @@ //@node Includes, Variables and Data structures, Main scheduling code, Main scheduling code //@subsection Includes +#include "PosixSource.h" #include "Rts.h" #include "SchedAPI.h" #include "RtsUtils.h" @@ -81,7 +82,6 @@ #include "Storage.h" #include "StgRun.h" #include "StgStartup.h" -#include "GC.h" #include "Hooks.h" #include "Schedule.h" #include "StgMiscClosures.h" @@ -95,6 +95,11 @@ #include "Stats.h" #include "Itimer.h" #include "Prelude.h" +#ifdef PROFILING +#include "Proftimer.h" +#include "ProfHeap.h" +#include "RetainerProfile.h" +#endif #if defined(GRAN) || defined(PAR) # include "GranSimRts.h" # include "GranSim.h" @@ -181,7 +186,6 @@ StgTSO *all_threads; */ static StgTSO *suspended_ccalling_threads; -static void GetRoots(void); static StgTSO *threadStackOverflow(StgTSO *tso); /* KH: The following two flags are shared memory locations. There is no need @@ -225,13 +229,10 @@ StgThreadID next_thread_id = 1; * Locks required: sched_mutex. */ #ifdef SMP -//@cindex free_capabilities -//@cindex n_free_capabilities Capability *free_capabilities; /* Available capabilities for running threads */ nat n_free_capabilities; /* total number of available capabilities */ #else -//@cindex MainRegTable -Capability MainRegTable; /* for non-SMP, we have one global capability */ +Capability MainCapability; /* for non-SMP, we have one global capability */ #endif #if defined(GRAN) @@ -445,6 +446,7 @@ schedule( void ) pthread_cond_broadcast(&m->wakeup); break; case ThreadKilled: + if (m->ret) *(m->ret) = NULL; *prev = m->link; if (was_interrupted) { m->stat = Interrupted; @@ -459,7 +461,8 @@ schedule( void ) } } -#else +#else // not SMP + # if defined(PAR) /* in GUM do this only on the Main PE */ if (IAmMainThread) @@ -477,6 +480,7 @@ schedule( void ) m->stat = Success; return; } else { + if (m->ret) { *(m->ret) = NULL; }; if (was_interrupted) { m->stat = Interrupted; } else { @@ -525,7 +529,14 @@ schedule( void ) pthread_cond_signal(&thread_ready_cond); } } -#endif /* SMP */ +#endif // SMP + + /* check for signals each time around the scheduler */ +#ifndef mingw32_TARGET_OS + if (signals_pending()) { + startSignalHandlers(); + } +#endif /* Check whether any waiting threads need to be woken up. If the * run queue is empty, and there are no other tasks running, we @@ -544,13 +555,6 @@ schedule( void ) /* we can be interrupted while waiting for I/O... */ if (interrupted) continue; - /* check for signals each time around the scheduler */ -#ifndef mingw32_TARGET_OS - if (signals_pending()) { - start_signal_handlers(); - } -#endif - /* * Detect deadlock: when we have no threads to run, there are no * threads waiting on I/O or sleeping, and all the other tasks are @@ -582,12 +586,14 @@ schedule( void ) StgMainThread *m = main_threads; #ifdef SMP for (; m != NULL; m = m->link) { + deleteThread(m->tso); m->ret = NULL; m->stat = Deadlock; pthread_cond_broadcast(&m->wakeup); } main_threads = NULL; #else + deleteThread(m->tso); m->ret = NULL; m->stat = Deadlock; main_threads = m->link; @@ -898,6 +904,9 @@ schedule( void ) */ ASSERT(run_queue_hd != END_TSO_QUEUE); t = POP_RUN_QUEUE(); + + // Sanity check the thread we're about to run. This can be + // expensive if there is lots of thread switching going on... IF_DEBUG(sanity,checkTSO(t)); #endif @@ -909,19 +918,23 @@ schedule( void ) free_capabilities = cap->link; n_free_capabilities--; #else - cap = &MainRegTable; + cap = &MainCapability; #endif - - cap->rCurrentTSO = t; + + cap->r.rCurrentTSO = t; /* context switches are now initiated by the timer signal, unless * the user specified "context switch as often as possible", with * +RTS -C0 */ - if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0 - && (run_queue_hd != END_TSO_QUEUE - || blocked_queue_hd != END_TSO_QUEUE - || sleeping_queue != END_TSO_QUEUE)) + if ( +#ifdef PROFILING + RtsFlags.ProfFlags.profileInterval == 0 || +#endif + (RtsFlags.ConcFlags.ctxtSwitchTicks == 0 + && (run_queue_hd != END_TSO_QUEUE + || blocked_queue_hd != END_TSO_QUEUE + || sleeping_queue != END_TSO_QUEUE))) context_switch = 1; else context_switch = 0; @@ -931,20 +944,24 @@ schedule( void ) IF_DEBUG(scheduler, sched_belch("-->> Running TSO %ld (%p) %s ...", t->id, t, whatNext_strs[t->what_next])); +#ifdef PROFILING + startHeapProfTimer(); +#endif + /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ /* Run the current thread */ - switch (cap->rCurrentTSO->what_next) { + switch (cap->r.rCurrentTSO->what_next) { case ThreadKilled: case ThreadComplete: /* Thread already finished, return to scheduler. */ ret = ThreadFinished; break; case ThreadEnterGHC: - ret = StgRun((StgFunPtr) stg_enterStackTop, cap); + ret = StgRun((StgFunPtr) stg_enterStackTop, &cap->r); break; case ThreadRunGHC: - ret = StgRun((StgFunPtr) stg_returnToStackTop, cap); + ret = StgRun((StgFunPtr) stg_returnToStackTop, &cap->r); break; case ThreadEnterInterp: ret = interpretBCO(cap); @@ -956,6 +973,7 @@ schedule( void ) /* Costs for the scheduler are assigned to CCS_SYSTEM */ #ifdef PROFILING + stopHeapProfTimer(); CCCS = CCS_SYSTEM; #endif @@ -966,7 +984,7 @@ schedule( void ) #elif !defined(GRAN) && !defined(PAR) IF_DEBUG(scheduler,fprintf(stderr,"scheduler: ");); #endif - t = cap->rCurrentTSO; + t = cap->r.rCurrentTSO; #if defined(PAR) /* HACK 675: if the last thread didn't yield, make sure to print a @@ -979,14 +997,65 @@ schedule( void ) switch (ret) { case HeapOverflow: #if defined(GRAN) - IF_DEBUG(gran, - DumpGranEvent(GR_DESCHEDULE, t)); + IF_DEBUG(gran, DumpGranEvent(GR_DESCHEDULE, t)); globalGranStats.tot_heapover++; #elif defined(PAR) - // IF_DEBUG(par, - //DumpGranEvent(GR_DESCHEDULE, t); globalParStats.tot_heapover++; #endif + + // did the task ask for a large block? + if (cap->r.rHpAlloc > BLOCK_SIZE_W) { + // if so, get one and push it on the front of the nursery. + bdescr *bd; + nat blocks; + + blocks = (nat)BLOCK_ROUND_UP(cap->r.rHpAlloc * sizeof(W_)) / BLOCK_SIZE; + + IF_DEBUG(scheduler,belch("--<< thread %ld (%p; %s) stopped: requesting a large block (size %d)", + t->id, t, + whatNext_strs[t->what_next], blocks)); + + // don't do this if it would push us over the + // alloc_blocks_lim limit; we'll GC first. + if (alloc_blocks + blocks < alloc_blocks_lim) { + + alloc_blocks += blocks; + bd = allocGroup( blocks ); + + // link the new group into the list + bd->link = cap->r.rCurrentNursery; + bd->u.back = cap->r.rCurrentNursery->u.back; + if (cap->r.rCurrentNursery->u.back != NULL) { + cap->r.rCurrentNursery->u.back->link = bd; + } else { + ASSERT(g0s0->blocks == cap->r.rCurrentNursery && + g0s0->blocks == cap->r.rNursery); + cap->r.rNursery = g0s0->blocks = bd; + } + cap->r.rCurrentNursery->u.back = bd; + + // initialise it as a nursery block + bd->step = g0s0; + bd->gen_no = 0; + bd->flags = 0; + bd->free = bd->start; + + // don't forget to update the block count in g0s0. + g0s0->n_blocks += blocks; + ASSERT(countBlocks(g0s0->blocks) == g0s0->n_blocks); + + // now update the nursery to point to the new block + cap->r.rCurrentNursery = bd; + + // we might be unlucky and have another thread get on the + // run queue before us and steal the large block, but in that + // case the thread will just end up requesting another large + // block. + PUSH_ON_RUN_QUEUE(t); + break; + } + } + /* make all the running tasks block on a condition variable, * maybe set context_switch and wait till they all pile in, * then have them wait on a GC condition variable. @@ -1206,6 +1275,15 @@ schedule( void ) n_free_capabilities++; #endif +#ifdef PROFILING + if (RtsFlags.ProfFlags.profileInterval==0 || performHeapProfile) { + GarbageCollect(GetRoots, rtsTrue); + heapCensus(); + performHeapProfile = rtsFalse; + ready_to_gc = rtsFalse; // we already GC'd + } +#endif + #ifdef SMP if (ready_to_gc && n_free_capabilities == RtsFlags.ParFlags.nNodes) #else @@ -1236,24 +1314,20 @@ schedule( void ) G_CURR_THREADQ(0)); #endif /* GRAN */ } + #if defined(GRAN) next_thread: IF_GRAN_DEBUG(unused, print_eventq(EventHd)); event = get_next_event(); - #elif defined(PAR) next_thread: /* ToDo: wait for next message to arrive rather than busy wait */ - -#else /* GRAN */ - /* not any more - next_thread: - t = take_off_run_queue(END_TSO_QUEUE); - */ #endif /* GRAN */ + } /* end of while(1) */ + IF_PAR_DEBUG(verbose, belch("== Leaving schedule() after having received Finish")); } @@ -1304,21 +1378,25 @@ void deleteAllThreads ( void ) * ------------------------------------------------------------------------- */ StgInt -suspendThread( Capability *cap ) +suspendThread( StgRegTable *reg ) { nat tok; + Capability *cap; + + // assume that *reg is a pointer to the StgRegTable part of a Capability + cap = (Capability *)((void *)reg - sizeof(StgFunTable)); ACQUIRE_LOCK(&sched_mutex); IF_DEBUG(scheduler, - sched_belch("thread %d did a _ccall_gc", cap->rCurrentTSO->id)); + sched_belch("thread %d did a _ccall_gc", cap->r.rCurrentTSO->id)); - threadPaused(cap->rCurrentTSO); - cap->rCurrentTSO->link = suspended_ccalling_threads; - suspended_ccalling_threads = cap->rCurrentTSO; + threadPaused(cap->r.rCurrentTSO); + cap->r.rCurrentTSO->link = suspended_ccalling_threads; + suspended_ccalling_threads = cap->r.rCurrentTSO; /* Use the thread ID as the token; it should be unique */ - tok = cap->rCurrentTSO->id; + tok = cap->r.rCurrentTSO->id; #ifdef SMP cap->link = free_capabilities; @@ -1330,7 +1408,7 @@ suspendThread( Capability *cap ) return tok; } -Capability * +StgRegTable * resumeThread( StgInt tok ) { StgTSO *tso, **prev; @@ -1362,13 +1440,13 @@ resumeThread( StgInt tok ) free_capabilities = cap->link; n_free_capabilities--; #else - cap = &MainRegTable; + cap = &MainCapability; #endif - cap->rCurrentTSO = tso; + cap->r.rCurrentTSO = tso; RELEASE_LOCK(&sched_mutex); - return cap; + return &cap->r; } @@ -1395,6 +1473,16 @@ int cmp_thread(const StgTSO *tso1, const StgTSO *tso2) } /* --------------------------------------------------------------------------- + * Fetching the ThreadID from an StgTSO. + * + * This is used in the implementation of Show for ThreadIds. + * ------------------------------------------------------------------------ */ +int rts_getThreadId(const StgTSO *tso) +{ + return tso->id; +} + +/* --------------------------------------------------------------------------- Create a new thread. The new thread starts with the given stack size. Before the @@ -1724,7 +1812,15 @@ term_handler(int sig STG_UNUSED) } #endif -//@cindex initScheduler +static void +initCapability( Capability *cap ) +{ + cap->f.stgChk0 = (F_)__stg_chk_0; + cap->f.stgChk1 = (F_)__stg_chk_1; + cap->f.stgGCEnter1 = (F_)__stg_gc_enter_1; + cap->f.stgUpdatePAP = (F_)__stg_update_PAP; +} + void initScheduler(void) { @@ -1781,6 +1877,7 @@ initScheduler(void) prev = NULL; for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) { cap = stgMallocBytes(sizeof(Capability), "initScheduler:capabilities"); + initCapability(cap); cap->link = prev; prev = cap; } @@ -1789,6 +1886,8 @@ initScheduler(void) } IF_DEBUG(scheduler,fprintf(stderr,"scheduler: Allocated %d capabilities\n", n_free_capabilities);); +#else + initCapability(&MainCapability); #endif #if defined(SMP) || defined(PAR) @@ -2093,7 +2192,8 @@ take_off_run_queue(StgTSO *tso) { KH @ 25/10/99 */ -static void GetRoots(void) +void +GetRoots(evac_fn evac) { StgMainThread *m; @@ -2102,16 +2202,16 @@ static void GetRoots(void) nat i; for (i=0; i<=RtsFlags.GranFlags.proc; i++) { if ((run_queue_hds[i] != END_TSO_QUEUE) && ((run_queue_hds[i] != NULL))) - run_queue_hds[i] = (StgTSO *)MarkRoot((StgClosure *)run_queue_hds[i]); + evac((StgClosure **)&run_queue_hds[i]); if ((run_queue_tls[i] != END_TSO_QUEUE) && ((run_queue_tls[i] != NULL))) - run_queue_tls[i] = (StgTSO *)MarkRoot((StgClosure *)run_queue_tls[i]); + evac((StgClosure **)&run_queue_tls[i]); if ((blocked_queue_hds[i] != END_TSO_QUEUE) && ((blocked_queue_hds[i] != NULL))) - blocked_queue_hds[i] = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hds[i]); + evac((StgClosure **)&blocked_queue_hds[i]); if ((blocked_queue_tls[i] != END_TSO_QUEUE) && ((blocked_queue_tls[i] != NULL))) - blocked_queue_tls[i] = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tls[i]); + evac((StgClosure **)&blocked_queue_tls[i]); if ((ccalling_threadss[i] != END_TSO_QUEUE) && ((ccalling_threadss[i] != NULL))) - ccalling_threadss[i] = (StgTSO *)MarkRoot((StgClosure *)ccalling_threadss[i]); + evac((StgClosure **)&ccalling_threads[i]); } } @@ -2119,31 +2219,31 @@ static void GetRoots(void) #else /* !GRAN */ if (run_queue_hd != END_TSO_QUEUE) { - ASSERT(run_queue_tl != END_TSO_QUEUE); - run_queue_hd = (StgTSO *)MarkRoot((StgClosure *)run_queue_hd); - run_queue_tl = (StgTSO *)MarkRoot((StgClosure *)run_queue_tl); + ASSERT(run_queue_tl != END_TSO_QUEUE); + evac((StgClosure **)&run_queue_hd); + evac((StgClosure **)&run_queue_tl); } - + if (blocked_queue_hd != END_TSO_QUEUE) { - ASSERT(blocked_queue_tl != END_TSO_QUEUE); - blocked_queue_hd = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_hd); - blocked_queue_tl = (StgTSO *)MarkRoot((StgClosure *)blocked_queue_tl); + ASSERT(blocked_queue_tl != END_TSO_QUEUE); + evac((StgClosure **)&blocked_queue_hd); + evac((StgClosure **)&blocked_queue_tl); } - + if (sleeping_queue != END_TSO_QUEUE) { - sleeping_queue = (StgTSO *)MarkRoot((StgClosure *)sleeping_queue); + evac((StgClosure **)&sleeping_queue); } #endif for (m = main_threads; m != NULL; m = m->link) { - m->tso = (StgTSO *)MarkRoot((StgClosure *)m->tso); + evac((StgClosure **)&m->tso); + } + if (suspended_ccalling_threads != END_TSO_QUEUE) { + evac((StgClosure **)&suspended_ccalling_threads); } - if (suspended_ccalling_threads != END_TSO_QUEUE) - suspended_ccalling_threads = - (StgTSO *)MarkRoot((StgClosure *)suspended_ccalling_threads); #if defined(SMP) || defined(PAR) || defined(GRAN) - markSparkQueue(); + markSparkQueue(evac); #endif } @@ -2160,7 +2260,7 @@ static void GetRoots(void) This needs to be protected by the GC condition variable above. KH. -------------------------------------------------------------------------- */ -void (*extra_roots)(void); +void (*extra_roots)(evac_fn); void performGC(void) @@ -2175,17 +2275,16 @@ performMajorGC(void) } static void -AllRoots(void) +AllRoots(evac_fn evac) { - GetRoots(); /* the scheduler's roots */ - extra_roots(); /* the user's roots */ + GetRoots(evac); // the scheduler's roots + extra_roots(evac); // the user's roots } void -performGCWithRoots(void (*get_roots)(void)) +performGCWithRoots(void (*get_roots)(evac_fn)) { extra_roots = get_roots; - GarbageCollect(AllRoots,rtsFalse); } @@ -2248,7 +2347,7 @@ threadStackOverflow(StgTSO *tso) dest->stack_size = new_stack_size; /* and relocate the update frame list */ - relocate_TSO(tso, dest); + relocate_stack(dest, diff); /* Mark the old TSO as relocated. We have to check for relocated * TSOs in the garbage collector and any primops that deal with TSOs. @@ -2922,7 +3021,7 @@ raiseAsync(StgTSO *tso, StgClosure *exception) } while (1) { - int words = ((P_)su - (P_)sp) - 1; + nat words = ((P_)su - (P_)sp) - 1; nat i; StgAP_UPD * ap;