X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FSchedule.h;h=0e18168755b0c1df65f3c38bdd5500bfca629456;hb=dd56e9ab4544e83d27532a8d9058140bfe81825c;hp=2afedeec86331a19900676289030ba8c0ff69c85;hpb=ab0e778ccfde61aed4c22679b24d175fc6cc9bf3;p=ghc-hetmet.git diff --git a/rts/Schedule.h b/rts/Schedule.h index 2afedee..0e18168 100644 --- a/rts/Schedule.h +++ b/rts/Schedule.h @@ -12,13 +12,15 @@ #include "OSThreads.h" #include "Capability.h" +#include "EventLog.h" /* initScheduler(), exitScheduler() * Called from STG : no * Locks assumed : none */ void initScheduler (void); -void exitScheduler (void); +void exitScheduler (rtsBool wait_foreign); +void freeScheduler (void); // Place a new thread on the run queue of the current Capability void scheduleThread (Capability *cap, StgTSO *tso); @@ -35,13 +37,7 @@ void scheduleThreadOn(Capability *cap, StgWord cpu, StgTSO *tso); * Called from STG : yes * Locks assumed : none */ -#if defined(GRAN) -void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node); -#elif defined(PAR) -void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node); -#else void awakenBlockedQueue (Capability *cap, StgTSO *tso); -#endif /* wakeUpRts() * @@ -63,42 +59,20 @@ StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *excepti /* findRetryFrameHelper */ StgWord findRetryFrameHelper (StgTSO *tso); -/* GetRoots(evac_fn f) - * - * Call f() for each root known to the scheduler. - * - * Called from STG : NO - * Locks assumed : ???? - */ -void GetRoots(evac_fn); - /* workerStart() * * Entry point for a new worker task. * Called from STG : NO * Locks assumed : none */ -void workerStart(Task *task); - -#if defined(GRAN) -void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node); -void unlink_from_bq(StgTSO* tso, StgClosure* node); -void initThread(StgTSO *tso, nat stack_size, StgInt pri); -#elif defined(PAR) -nat run_queue_len(void); -void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node); -void initThread(StgTSO *tso, nat stack_size); -#else +#if defined(THREADED_RTS) +void OSThreadProcAttr workerStart(Task *task); +#endif + char *info_type(StgClosure *closure); // dummy char *info_type_by_ip(StgInfoTable *ip); // dummy void awaken_blocked_queue(StgTSO *q); void initThread(StgTSO *tso, nat stack_size); -#endif - -/* Context switch flag. - * Locks required : none (conflicts are harmless) - */ -extern int RTS_VAR(context_switch); /* The state of the scheduler. This is used to control the sequence * of events during shutdown, and when the runtime is interrupted @@ -108,7 +82,7 @@ extern int RTS_VAR(context_switch); #define SCHED_INTERRUPTING 1 /* ^C detected, before threads are deleted */ #define SCHED_SHUTTING_DOWN 2 /* final shutdown */ -extern rtsBool RTS_VAR(sched_state); +extern volatile StgWord RTS_VAR(sched_state); /* * flag that tracks whether we have done any execution in this time slice. @@ -124,27 +98,18 @@ extern rtsBool RTS_VAR(sched_state); * INACTIVE to DONE_GC happens under sched_mutex. No lock required * to set it to ACTIVITY_YES. */ -extern nat recent_activity; +extern volatile StgWord recent_activity; /* Thread queues. * Locks required : sched_mutex * * In GranSim we have one run/blocked_queue per PE. */ -#if defined(GRAN) -// run_queue_hds defined in GranSim.h -#else extern StgTSO *RTS_VAR(blackhole_queue); #if !defined(THREADED_RTS) extern StgTSO *RTS_VAR(blocked_queue_hd), *RTS_VAR(blocked_queue_tl); extern StgTSO *RTS_VAR(sleeping_queue); #endif -#endif - -/* Linked list of all threads. - * Locks required : sched_mutex - */ -extern StgTSO *RTS_VAR(all_threads); /* Set to rtsTrue if there are threads on the blackhole_queue, and * it is possible that one or more of them may be available to run. @@ -155,6 +120,8 @@ extern StgTSO *RTS_VAR(all_threads); */ extern rtsBool blackholes_need_checking; +extern rtsBool heap_overflow; + #if defined(THREADED_RTS) extern Mutex RTS_VAR(sched_mutex); #endif @@ -167,6 +134,7 @@ void interruptStgRts (void); nat run_queue_len (void); void resurrectThreads (StgTSO *); +void performPendingThrowTos (StgTSO *); void printAllThreads(void); @@ -175,9 +143,6 @@ void printAllThreads(void); #ifdef DEBUG void print_bq (StgClosure *node); #endif -#if defined(PAR) -void print_bqe (StgBlockingQueueElement *bqe); -#endif /* ----------------------------------------------------------------------------- * Some convenient macros/inline functions... @@ -194,23 +159,23 @@ void print_bqe (StgBlockingQueueElement *bqe); INLINE_HEADER void appendToRunQueue (Capability *cap, StgTSO *tso) { - ASSERT(tso->link == END_TSO_QUEUE); + ASSERT(tso->_link == END_TSO_QUEUE); if (cap->run_queue_hd == END_TSO_QUEUE) { cap->run_queue_hd = tso; } else { - cap->run_queue_tl->link = tso; + setTSOLink(cap, cap->run_queue_tl, tso); } cap->run_queue_tl = tso; + postEvent (cap, EVENT_THREAD_RUNNABLE, tso->id, 0); } -/* Push a thread on the beginning of the run queue. Used for - * newly awakened threads, so they get run as soon as possible. +/* Push a thread on the beginning of the run queue. * ASSUMES: cap->running_task is the current task. */ INLINE_HEADER void pushOnRunQueue (Capability *cap, StgTSO *tso) { - tso->link = cap->run_queue_hd; + setTSOLink(cap, tso, cap->run_queue_hd); cap->run_queue_hd = tso; if (cap->run_queue_tl == END_TSO_QUEUE) { cap->run_queue_tl = tso; @@ -224,8 +189,8 @@ popRunQueue (Capability *cap) { StgTSO *t = cap->run_queue_hd; ASSERT(t != END_TSO_QUEUE); - cap->run_queue_hd = t->link; - t->link = END_TSO_QUEUE; + cap->run_queue_hd = t->_link; + t->_link = END_TSO_QUEUE; // no write barrier req'd if (cap->run_queue_hd == END_TSO_QUEUE) { cap->run_queue_tl = END_TSO_QUEUE; } @@ -238,27 +203,32 @@ popRunQueue (Capability *cap) INLINE_HEADER void appendToBlockedQueue(StgTSO *tso) { - ASSERT(tso->link == END_TSO_QUEUE); + ASSERT(tso->_link == END_TSO_QUEUE); if (blocked_queue_hd == END_TSO_QUEUE) { blocked_queue_hd = tso; } else { - blocked_queue_tl->link = tso; + setTSOLink(&MainCapability, blocked_queue_tl, tso); } blocked_queue_tl = tso; } #endif #if defined(THREADED_RTS) +// Assumes: my_cap is owned by the current Task. We hold +// other_cap->lock, but we do not necessarily own other_cap; another +// Task may be running on it. INLINE_HEADER void -appendToWakeupQueue (Capability *cap, StgTSO *tso) +appendToWakeupQueue (Capability *my_cap, Capability *other_cap, StgTSO *tso) { - ASSERT(tso->link == END_TSO_QUEUE); - if (cap->wakeup_queue_hd == END_TSO_QUEUE) { - cap->wakeup_queue_hd = tso; + ASSERT(tso->_link == END_TSO_QUEUE); + if (other_cap->wakeup_queue_hd == END_TSO_QUEUE) { + other_cap->wakeup_queue_hd = tso; } else { - cap->wakeup_queue_tl->link = tso; + // my_cap is passed to setTSOLink() because it may need to + // write to the mutable list. + setTSOLink(my_cap, other_cap->wakeup_queue_tl, tso); } - cap->wakeup_queue_tl = tso; + other_cap->wakeup_queue_tl = tso; } #endif @@ -301,11 +271,5 @@ emptyThreadQueues(Capability *cap) #endif /* !IN_STG_CODE */ -INLINE_HEADER void -dirtyTSO (StgTSO *tso) -{ - tso->flags |= TSO_DIRTY; -} - #endif /* SCHEDULE_H */