#define STOP_FRAME 44
#define CAF_BLACKHOLE 45
#define BLACKHOLE 46
-#define BLACKHOLE_BQ 47
#define SE_BLACKHOLE 48
#define SE_CAF_BLACKHOLE 49
#define MVAR 50
struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */
} StgRBH;
-#else
-
-typedef struct StgBlockingQueue_ {
- StgHeader header;
- struct StgTSO_ *blocking_queue;
-} StgBlockingQueue;
-
#endif
#if defined(PAR)
#else
extern StgTSO *createThread(nat stack_size);
#endif
-#if defined(PAR) || defined(SMP)
-extern void taskStart(void);
-#endif
extern void scheduleThread(StgTSO *tso);
extern SchedulerStatus scheduleWaitThread(StgTSO *tso, /*out*/HaskellObj* ret,
Capability *initialCapability);
RTS_INFO(stg_CAF_ENTERED_info);
RTS_INFO(stg_BLACKHOLE_info);
RTS_INFO(stg_CAF_BLACKHOLE_info);
-RTS_INFO(stg_BLACKHOLE_BQ_info);
#ifdef TICKY_TICKY
RTS_INFO(stg_SE_BLACKHOLE_info);
RTS_INFO(stg_SE_CAF_BLACKHOLE_info);
RTS_ENTRY(stg_CAF_ENTERED_entry);
RTS_ENTRY(stg_BLACKHOLE_entry);
RTS_ENTRY(stg_CAF_BLACKHOLE_entry);
-RTS_ENTRY(stg_BLACKHOLE_BQ_entry);
#ifdef TICKY_TICKY
RTS_ENTRY(stg_SE_BLACKHOLE_entry);
RTS_ENTRY(stg_SE_CAF_BLACKHOLE_entry);
-------------------------------------------------------------------------- */
-/*
- * Storage manager mutex
- */
-#if defined(SMP)
-extern Mutex sm_mutex;
-#define ACQUIRE_SM_LOCK ACQUIRE_LOCK(&sm_mutex)
-#define RELEASE_SM_LOCK RELEASE_LOCK(&sm_mutex)
-#else
-#define ACQUIRE_SM_LOCK
-#define RELEASE_SM_LOCK
-#endif
-
/* ToDo: shouldn't recordMutable acquire some
* kind of lock in the SMP case? Or do we need per-processor
* mutable lists?
{ return stg_max(sizeofW(StgHeader)+MIN_UPD_SIZE, sizeofW(StgSelector)); }
INLINE_HEADER StgOffset BLACKHOLE_sizeW ( void )
-{ return stg_max(sizeofW(StgHeader)+MIN_UPD_SIZE, sizeofW(StgBlockingQueue)); }
+{ return sizeofW(StgHeader)+MIN_UPD_SIZE; }
/* --------------------------------------------------------------------------
Sizes of closures
BLOCK_BEGIN \
DECLARE_IPTR(info); \
info = GET_INFO(updclosure); \
- AWAKEN_BQ(info,updclosure); \
- updateWithIndirection(GET_INFO(updclosure), ind_info, \
+ updateWithIndirection(ind_info, \
updclosure, \
heapptr, \
and_then); \
#if defined(PROFILING) || defined(TICKY_TICKY)
#define UPD_PERM_IND(updclosure, heapptr) \
BLOCK_BEGIN \
- DECLARE_IPTR(info); \
- info = GET_INFO(updclosure); \
- AWAKEN_BQ(info,updclosure); \
- updateWithPermIndirection(info, \
- updclosure, \
+ updateWithPermIndirection(updclosure, \
heapptr); \
BLOCK_END
#endif
# ifdef TICKY_TICKY
# define UPD_IND_NOLOCK(updclosure, heapptr) \
BLOCK_BEGIN \
- DECLARE_IPTR(info); \
- info = GET_INFO(updclosure); \
- AWAKEN_BQ_NOLOCK(info,updclosure); \
- updateWithPermIndirection(info, \
- updclosure, \
+ updateWithPermIndirection(updclosure, \
heapptr); \
BLOCK_END
# else
# define UPD_IND_NOLOCK(updclosure, heapptr) \
BLOCK_BEGIN \
- DECLARE_IPTR(info); \
- info = GET_INFO(updclosure); \
- AWAKEN_BQ_NOLOCK(info,updclosure); \
- updateWithIndirection(info, INFO_PTR(stg_IND_info), \
+ updateWithIndirection(INFO_PTR(stg_IND_info), \
updclosure, \
heapptr,); \
BLOCK_END
DO_AWAKEN_BQ(((StgBlockingQueue *)closure)->blocking_queue, closure); \
}
-
-#else /* !GRAN && !PAR */
-
-#define DO_AWAKEN_BQ(closure) \
- FCALL awakenBlockedQueue(StgBlockingQueue_blocking_queue(closure) ARG_PTR);
-
-#define AWAKEN_BQ(info,closure) \
- if (info == INFO_PTR(stg_BLACKHOLE_BQ_info)) { \
- DO_AWAKEN_BQ(closure); \
- }
-
-#define AWAKEN_STATIC_BQ(info,closure) \
- if (info == INFO_PTR(stg_BLACKHOLE_BQ_STATIC_info)) { \
- DO_AWAKEN_BQ(closure); \
- }
-
-#ifdef RTS_SUPPORTS_THREADS
-#define DO_AWAKEN_BQ_NOLOCK(closure) \
- FCALL awakenBlockedQueueNoLock(StgBlockingQueue_blocking_queue(closure) ARG_PTR);
-
-#define AWAKEN_BQ_NOLOCK(info,closure) \
- if (info == INFO_PTR(stg_BLACKHOLE_BQ_info)) { \
- DO_AWAKEN_BQ_NOLOCK(closure); \
- }
-#endif
#endif /* GRAN || PAR */
/* -----------------------------------------------------------------------------
*/
#ifdef CMINUSMINUS
#define generation(n) (W_[generations] + n*SIZEOF_generation)
-#define updateWithIndirection(info, ind_info, p1, p2, and_then) \
+#define updateWithIndirection(ind_info, p1, p2, and_then) \
W_ bd; \
\
/* ASSERT( p1 != p2 && !closure_IND(p1) ); \
TICK_UPD_NEW_IND(); \
and_then; \
} else { \
- if (info != stg_BLACKHOLE_BQ_info) { \
- DEBUG_FILL_SLOP(p1); \
- foreign "C" recordMutableGen(p1 "ptr", \
+ DEBUG_FILL_SLOP(p1); \
+ foreign "C" recordMutableGen(p1 "ptr", \
generation(TO_W_(bdescr_gen_no(bd))) "ptr"); \
- } \
StgInd_indirectee(p1) = p2; \
SET_INFO(p1, stg_IND_OLDGEN_info); \
LDV_RECORD_CREATE(p1); \
and_then; \
}
#else
-#define updateWithIndirection(_info, ind_info, p1, p2, and_then) \
+#define updateWithIndirection(ind_info, p1, p2, and_then) \
{ \
bdescr *bd; \
\
TICK_UPD_NEW_IND(); \
and_then; \
} else { \
- if (_info != &stg_BLACKHOLE_BQ_info) { \
- DEBUG_FILL_SLOP(p1); \
- recordMutableGen(p1, &generations[bd->gen_no]); \
- } \
+ DEBUG_FILL_SLOP(p1); \
+ recordMutableGen(p1, &generations[bd->gen_no]); \
((StgInd *)p1)->indirectee = p2; \
SET_INFO(p1, &stg_IND_OLDGEN_info); \
TICK_UPD_OLD_IND(); \
*/
#ifndef CMINUSMINUS
INLINE_HEADER void
-updateWithPermIndirection(const StgInfoTable *info,
- StgClosure *p1,
+updateWithPermIndirection(StgClosure *p1,
StgClosure *p2)
{
bdescr *bd;
LDV_RECORD_CREATE(p1);
TICK_UPD_NEW_PERM_IND(p1);
} else {
- if (info != &stg_BLACKHOLE_BQ_info) {
- recordMutableGen(p1, &generations[bd->gen_no]);
- }
+ recordMutableGen(p1, &generations[bd->gen_no]);
((StgInd *)p1)->indirectee = p2;
SET_INFO(p1, &stg_IND_OLDGEN_PERM_info);
/*
opt_struct_size(StgTSOGranInfo,GRAN);
opt_struct_size(StgTSODistInfo,DIST);
- closure_size(StgBlockingQueue);
- closure_field(StgBlockingQueue, blocking_queue);
-
closure_field(StgUpdateFrame, updatee);
closure_field(StgCatchFrame, handler);
Capability MainCapability; /* for non-SMP, we have one global capability */
#endif
-#if defined(RTS_SUPPORTS_THREADS)
-
nat rts_n_free_capabilities;
+#if defined(RTS_SUPPORTS_THREADS)
+
/* returning_worker_cond: when a worker thread returns from executing an
* external call, it needs to wait for an RTS Capability before passing
* on the result of the call to the Haskell thread that made it.
static rtsBool passingCapability = rtsFalse;
#endif
+#if defined(SMP)
+/*
+ * Free capability list.
+ */
+Capability *free_capabilities;
+#endif
+
#ifdef SMP
#define UNUSED_IF_NOT_SMP
#else
#endif
#if defined(RTS_USER_SIGNALS)
-#define ANY_WORK_TO_DO() (!EMPTY_RUN_QUEUE() || interrupted || signals_pending())
+#define ANY_WORK_TO_DO() (!EMPTY_RUN_QUEUE() || interrupted || blackholes_need_checking || signals_pending())
#else
-#define ANY_WORK_TO_DO() (!EMPTY_RUN_QUEUE() || interrupted)
+#define ANY_WORK_TO_DO() (!EMPTY_RUN_QUEUE() || interrupted || blackholes_need_checking)
#endif
/* ----------------------------------------------------------------------------
cap->f.stgGCFun = (F_)__stg_gc_fun;
}
+/* -----------------------------------------------------------------------------
+ * Function: initCapabilities_(nat)
+ *
+ * Purpose: upon startup, allocate and fill in table
+ * holding 'n' Capabilities. Only for SMP, since
+ * it is the only build that supports multiple
+ * capabilities within the RTS.
+ * -------------------------------------------------------------------------- */
#if defined(SMP)
-static void initCapabilities_(nat n);
-#endif
+static void
+initCapabilities_(nat n)
+{
+ nat i;
+ Capability *cap, *prev;
+ cap = NULL;
+ prev = NULL;
+ for (i = 0; i < n; i++) {
+ cap = stgMallocBytes(sizeof(Capability), "initCapabilities");
+ initCapability(cap);
+ cap->link = prev;
+ prev = cap;
+ }
+ free_capabilities = cap;
+ rts_n_free_capabilities = n;
+ IF_DEBUG(scheduler,
+ sched_belch("allocated %d capabilities", rts_n_free_capabilities));
+}
+#endif /* SMP */
/* ---------------------------------------------------------------------------
* Function: initCapabilities()
#if defined(RTS_SUPPORTS_THREADS)
initCondition(&returning_worker_cond);
initCondition(&thread_ready_cond);
- rts_n_free_capabilities = 1;
#endif
- return;
+ rts_n_free_capabilities = 1;
}
-#if defined(SMP)
-/* Free capability list. */
-static Capability *free_capabilities; /* Available capabilities for running threads */
-static Capability *returning_capabilities;
- /* Capabilities being passed to returning worker threads */
-#endif
-
/* ----------------------------------------------------------------------------
grabCapability( Capability** )
void
grabCapability( Capability** cap )
{
-#if !defined(SMP)
-#if defined(RTS_SUPPORTS_THREADS)
+#if defined(SMP)
+ ASSERT(rts_n_free_capabilities > 0);
+ *cap = free_capabilities;
+ free_capabilities = (*cap)->link;
+ rts_n_free_capabilities--;
+#else
+# if defined(RTS_SUPPORTS_THREADS)
ASSERT(rts_n_free_capabilities == 1);
rts_n_free_capabilities = 0;
-#endif
+# endif
*cap = &MainCapability;
handleSignalsInThisThread();
-#else
- *cap = free_capabilities;
- free_capabilities = (*cap)->link;
- rts_n_free_capabilities--;
#endif
#if defined(RTS_SUPPORTS_THREADS)
IF_DEBUG(scheduler, sched_belch("worker: got capability"));
{
// Precondition: sched_mutex is held.
#if defined(RTS_SUPPORTS_THREADS)
-#ifndef SMP
+#if !defined(SMP)
ASSERT(rts_n_free_capabilities == 0);
#endif
// Check to see whether a worker thread can be given
#if defined(SMP)
// SMP variant untested
- cap->link = returning_capabilities;
- returning_capabilities = cap;
+ cap->link = free_capabilities;
+ free_capabilities = cap;
#endif
rts_n_waiting_workers--;
context_switch = 1; // make sure it's our turn soon
waitCondition(&returning_worker_cond, pMutex);
#if defined(SMP)
- *pCap = returning_capabilities;
- returning_capabilities = (*pCap)->link;
+ *pCap = free_capabilities;
+ free_capabilities = (*pCap)->link;
+ ASSERT(pCap != NULL);
#else
*pCap = &MainCapability;
ASSERT(rts_n_free_capabilities == 0);
- handleSignalsInThisThread();
#endif
+ handleSignalsInThisThread();
} else {
grabCapability(pCap);
}
*pCap = NULL;
}
- // Post-condition: pMutex is assumed held, and either:
+ // Post-condition: either:
//
// 1. *pCap is NULL, in which case the current thread does not
// hold a capability now, or
startSchedulerTaskIfNecessary();
#endif
}
-
-/* ------------------------------------------------------------------------- */
-
-#if defined(SMP)
-/*
- * Function: initCapabilities_(nat)
- *
- * Purpose: upon startup, allocate and fill in table
- * holding 'n' Capabilities. Only for SMP, since
- * it is the only build that supports multiple
- * capabilities within the RTS.
- */
-static void
-initCapabilities_(nat n)
-{
- nat i;
- Capability *cap, *prev;
- cap = NULL;
- prev = NULL;
- for (i = 0; i < n; i++) {
- cap = stgMallocBytes(sizeof(Capability), "initCapabilities");
- initCapability(cap);
- cap->link = prev;
- prev = cap;
- }
- free_capabilities = cap;
- rts_n_free_capabilities = n;
- returning_capabilities = NULL;
- IF_DEBUG(scheduler,
- sched_belch("allocated %d capabilities", n_free_capabilities));
-}
-#endif /* SMP */
-
extern void passCapabilityToWorker( void );
extern nat rts_n_free_capabilities;
+
+extern Capability *free_capabilities;
+
/* number of worker threads waiting for a return capability
*/
extern nat rts_n_waiting_workers;
static inline rtsBool allFreeCapabilities (void)
{
+#if defined(SMP)
+ return (rts_n_free_capabilities == RTS_DEREF(RtsFlags).ParFlags.nNodes);
+#else
return (rts_n_free_capabilities == 1);
+#endif
}
#else // !RTS_SUPPORTS_THREADS
#include "RtsFlags.h"
#include "RtsUtils.h"
#include "Apply.h"
+#include "OSThreads.h"
#include "Storage.h"
#include "LdvProfile.h"
#include "Updates.h"
REGPARM1 static StgClosure *
evacuate(StgClosure *q)
{
+#if defined(PAR)
StgClosure *to;
+#endif
bdescr *bd = NULL;
step *stp;
const StgInfoTable *info;
case BLACKHOLE:
return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
- case BLACKHOLE_BQ:
- to = copy(q,BLACKHOLE_sizeW(),stp);
- return to;
-
case THUNK_SELECTOR:
{
StgClosure *p;
}
#if defined(PAR)
- case RBH: // cf. BLACKHOLE_BQ
+ case RBH:
{
//StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
to = copy(q,BLACKHOLE_sizeW(),stp);
case SE_CAF_BLACKHOLE:
case SE_BLACKHOLE:
case BLACKHOLE:
- case BLACKHOLE_BQ:
#if defined(PAR)
case RBH:
case BLOCKED_FETCH:
p += BLACKHOLE_sizeW();
break;
- case BLACKHOLE_BQ:
- {
- StgBlockingQueue *bh = (StgBlockingQueue *)p;
- bh->blocking_queue =
- (StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
- failed_to_evac = rtsTrue;
- p += BLACKHOLE_sizeW();
- break;
- }
-
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
}
#if defined(PAR)
- case RBH: // cf. BLACKHOLE_BQ
+ case RBH:
{
#if 0
nat size, ptrs, nonptrs, vhs;
p += sizeofW(StgFetchMe);
break; // nothing to do in this case
- case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
+ case FETCH_ME_BQ:
{
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
case ARR_WORDS:
break;
- case BLACKHOLE_BQ:
- {
- StgBlockingQueue *bh = (StgBlockingQueue *)p;
- bh->blocking_queue =
- (StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
- failed_to_evac = rtsTrue;
- break;
- }
-
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
}
#if defined(PAR)
- case RBH: // cf. BLACKHOLE_BQ
+ case RBH:
{
#if 0
nat size, ptrs, nonptrs, vhs;
case FETCH_ME:
break; // nothing to do in this case
- case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
+ case FETCH_ME_BQ:
{
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
case BLACKHOLE:
break;
- case BLACKHOLE_BQ:
- {
- StgBlockingQueue *bh = (StgBlockingQueue *)p;
- evac_gen = 0; // repeatedly mutable
- bh->blocking_queue =
- (StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
- failed_to_evac = rtsTrue;
- break;
- }
-
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
}
#if defined(PAR)
- case RBH: // cf. BLACKHOLE_BQ
+ case RBH:
{
#if 0
nat size, ptrs, nonptrs, vhs;
case FETCH_ME:
break; // nothing to do in this case
- case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
+ case FETCH_ME_BQ:
{
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
{
StgClosure *frame;
StgRetInfoTable *info;
- StgBlockingQueue *bh;
+ StgClosure *bh;
StgPtr stack_end;
stack_end = &tso->stack[tso->stack_size];
switch (info->i.type) {
case UPDATE_FRAME:
- bh = (StgBlockingQueue *)((StgUpdateFrame *)frame)->updatee;
+ bh = ((StgUpdateFrame *)frame)->updatee;
/* if the thunk is already blackholed, it means we've also
* already blackholed the rest of the thunks on this stack,
return;
}
- if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
- bh->header.info != &stg_CAF_BLACKHOLE_info) {
+ if (bh->header.info != &stg_CAF_BLACKHOLE_info) {
#if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
debugBelch("Unexpected lazy BHing required at 0x%04x",(int)bh);
#endif
* screw us up if we don't check.
*/
if (upd->updatee != updatee && !closure_IND(upd->updatee)) {
- // this wakes the threads up
UPD_IND_NOLOCK(upd->updatee, updatee);
}
// single update frame, or the topmost update frame in a series
else {
- StgBlockingQueue *bh = (StgBlockingQueue *)upd->updatee;
+ StgClosure *bh = upd->updatee;
// Do lazy black-holing
if (bh->header.info != &stg_BLACKHOLE_info &&
- bh->header.info != &stg_BLACKHOLE_BQ_info &&
bh->header.info != &stg_CAF_BLACKHOLE_info) {
#if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
debugBelch("Unexpected lazy BHing required at 0x%04x",(int)bh);
#include "Rts.h"
#include "RtsUtils.h"
#include "RtsFlags.h"
+#include "OSThreads.h"
#include "Storage.h"
#include "BlockAlloc.h"
#include "MBlock.h"
case SE_CAF_BLACKHOLE:
case SE_BLACKHOLE:
case BLACKHOLE:
- case BLACKHOLE_BQ:
{
StgPtr end;
GC_GENERIC
}
-#ifdef SMP
-stg_gc_enter_1_hponly
-{
- Sp_adj(-1);
- Sp(0) = R1;
- R1 = HeapOverflow;
- SAVE_THREAD_STATE();
- TSO_what_next(CurrentTSO) = ThreadRunGHC::I16;
- jump StgReturn;
-}
-#endif
-
#if defined(GRAN)
/*
ToDo: merge the block and yield macros, calling something like BLOCK(N)
case FUN_1_1:
case FUN_0_2:
- case BLACKHOLE_BQ:
case BLACKHOLE:
case SE_BLACKHOLE:
case CAF_BLACKHOLE:
SymX(stable_ptr_table) \
SymX(stackOverflow) \
SymX(stg_CAF_BLACKHOLE_info) \
- SymX(stg_BLACKHOLE_BQ_info) \
SymX(awakenBlockedQueue) \
SymX(stg_CHARLIKE_closure) \
SymX(stg_EMPTY_MVAR_info) \
}
case CAF_BLACKHOLE:
- debugBelch("CAF_BH(");
- printPtr((StgPtr)stgCast(StgBlockingQueue*,obj)->blocking_queue);
- debugBelch(")\n");
+ debugBelch("CAF_BH");
break;
case BLACKHOLE:
debugBelch("BH\n");
break;
- case BLACKHOLE_BQ:
- debugBelch("BQ(");
- printPtr((StgPtr)stgCast(StgBlockingQueue*,obj)->blocking_queue);
- debugBelch(")\n");
- break;
-
case SE_BLACKHOLE:
debugBelch("SE_BH\n");
break;
, "STOP_FRAME"
, "BLACKHOLE"
- , "BLACKHOLE_BQ"
, "MVAR"
, "ARR_WORDS"
case SE_CAF_BLACKHOLE:
case SE_BLACKHOLE:
case BLACKHOLE:
- case BLACKHOLE_BQ:
case CONSTR_INTLIKE:
case CONSTR_CHARLIKE:
case FUN_1_0:
case MUT_VAR:
*first_child = ((StgMutVar *)c)->var;
return;
- case BLACKHOLE_BQ:
- // blocking_queue must be TSO and the head of a linked list of TSOs.
- // Shoule it be a child? Seems to be yes.
- *first_child = (StgClosure *)((StgBlockingQueue *)c)->blocking_queue;
- return;
case THUNK_SELECTOR:
*first_child = ((StgSelector *)c)->selectee;
return;
case ARR_WORDS:
// one child (fixed), no SRT
case MUT_VAR:
- case BLACKHOLE_BQ:
case THUNK_SELECTOR:
case IND_PERM:
case IND_OLDGEN_PERM:
case BLACKHOLE:
case SE_BLACKHOLE:
case SE_CAF_BLACKHOLE:
- case BLACKHOLE_BQ:
// indirection
case IND_PERM:
case IND_OLDGEN_PERM:
case BLACKHOLE:
case SE_BLACKHOLE:
case SE_CAF_BLACKHOLE:
- case BLACKHOLE_BQ:
case IND_PERM:
case IND_OLDGEN:
case IND_OLDGEN_PERM:
#include "PosixSource.h"
#include "Rts.h"
+#include "OSThreads.h"
#include "Storage.h"
#include "RtsAPI.h"
#include "SchedAPI.h"
#include "RtsFlags.h"
#include "RtsUtils.h"
#include "Prelude.h"
-#include "OSThreads.h"
#include "Schedule.h"
#include "Capability.h"
// b) wake the current worker thread from awaitEvent()
// (so that a thread started by rts_eval* will start immediately)
waitForReturnCapability(&sched_mutex,&rtsApiCapability);
+#else
+ grabCapability(&rtsApiCapability);
#endif
}
#include "RtsAPI.h"
#include "RtsUtils.h"
#include "RtsFlags.h"
+#include "OSThreads.h"
#include "Storage.h" /* initStorage, exitStorage */
#include "Schedule.h" /* initScheduler */
#include "Stats.h" /* initStats */
return stg_max(sizeW_fromITBL(info), sizeofW(StgHeader) + MIN_UPD_SIZE);
}
- case BLACKHOLE_BQ:
- checkBQ(((StgBlockingQueue *)p)->blocking_queue, p);
- /* fall through to basic ptr check */
case FUN:
case FUN_1_0:
case FUN_0_1:
case MUT_ARR_PTRS:
case MUT_ARR_PTRS_FROZEN:
+ case MUT_ARR_PTRS_FROZEN0:
{
StgMutArrPtrs* a = (StgMutArrPtrs *)p;
nat i;
break;
case BlockedOnBlackHole:
checkClosureShallow(tso->block_info.closure);
- ASSERT(/* Can't be a BLACKHOLE because *this* closure is on its BQ */
- get_itbl(tso->block_info.closure)->type==BLACKHOLE_BQ ||
+ ASSERT(get_itbl(tso->block_info.closure)->type==BLACKHOLE ||
get_itbl(tso->block_info.closure)->type==RBH);
break;
case BlockedOnRead:
rtsBool end = rtsFalse;
StgInfoTable *info = get_itbl(closure);
- ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR
- || info->type == FETCH_ME_BQ || info->type == RBH);
+ ASSERT(info->type == MVAR || info->type == FETCH_ME_BQ || info->type == RBH);
do {
switch (get_itbl(bqe)->type) {
rtsBool end = rtsFalse;
StgInfoTable *info = get_itbl(closure);
- ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
+ ASSERT(info->type == MVAR);
do {
switch (get_itbl(bqe)->type) {
rtsBool end = rtsFalse;
StgInfoTable *info = get_itbl(closure);
- ASSERT(info->type == BLACKHOLE_BQ || info->type == MVAR);
+ ASSERT(info->type == MVAR);
do {
switch (get_itbl(bqe)->type) {
StgTSO *run_queue_tl = NULL;
StgTSO *blocked_queue_hd = NULL;
StgTSO *blocked_queue_tl = NULL;
+StgTSO *blackhole_queue = NULL;
StgTSO *sleeping_queue = NULL; /* perhaps replace with a hash table? */
#endif
+/* The blackhole_queue should be checked for threads to wake up. See
+ * Schedule.h for more thorough comment.
+ */
+rtsBool blackholes_need_checking = rtsFalse;
+
/* Linked list of all threads.
* Used for detecting garbage collected threads.
*/
static void scheduleHandleInterrupt(void);
static void scheduleStartSignalHandlers(void);
static void scheduleCheckBlockedThreads(void);
+static void scheduleCheckBlackHoles(void);
static void scheduleDetectDeadlock(void);
#if defined(GRAN)
static StgTSO *scheduleProcessEvent(rtsEvent *event);
static void scheduleDoGC(void);
static void unblockThread(StgTSO *tso);
+static rtsBool checkBlackHoles(void);
static SchedulerStatus waitThread_(/*out*/StgMainThread* m,
Capability *initialCapability
);
scheduleStartSignalHandlers();
+ // Only check the black holes here if we've nothing else to do.
+ // During normal execution, the black hole list only gets checked
+ // at GC time, to avoid repeatedly traversing this possibly long
+ // list each time around the scheduler.
+ if (EMPTY_RUN_QUEUE()) { scheduleCheckBlackHoles(); }
+
scheduleCheckBlockedThreads();
scheduleDetectDeadlock();
startHeapProfTimer();
#endif
- /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
- /* Run the current thread
- */
+ // ----------------------------------------------------------------------
+ // Run the current thread
+
prev_what_next = t->what_next;
errno = t->saved_errno;
barf("schedule: invalid what_next field");
}
+ // We have run some Haskell code: there might be blackhole-blocked
+ // threads to wake up now.
+ if ( blackhole_queue != END_TSO_QUEUE ) {
+ blackholes_need_checking = rtsTrue;
+ }
+
in_haskell = rtsFalse;
// The TSO might have moved, eg. if it re-entered the RTS and a GC
// And save the current errno in this thread.
t->saved_errno = errno;
- /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
+ // ----------------------------------------------------------------------
/* Costs for the scheduler are assigned to CCS_SYSTEM */
#if defined(PROFILING)
// We shouldn't be here...
barf("schedule: awaitEvent() in threaded RTS");
#endif
- awaitEvent( EMPTY_RUN_QUEUE() );
+ awaitEvent( EMPTY_RUN_QUEUE() && !blackholes_need_checking );
+ }
+}
+
+
+/* ----------------------------------------------------------------------------
+ * Check for threads blocked on BLACKHOLEs that can be woken up
+ * ASSUMES: sched_mutex
+ * ------------------------------------------------------------------------- */
+static void
+scheduleCheckBlackHoles( void )
+{
+ if ( blackholes_need_checking )
+ {
+ checkBlackHoles();
+ blackholes_need_checking = rtsFalse;
}
}
{
/*
* Detect deadlock: when we have no threads to run, there are no
- * threads waiting on I/O or sleeping, and all the other tasks are
- * waiting for work, we must have a deadlock of some description.
- *
- * We first try to find threads blocked on themselves (ie. black
- * holes), and generate NonTermination exceptions where necessary.
- *
- * If no threads are black holed, we have a deadlock situation, so
- * inform all the main threads.
+ * threads blocked, waiting for I/O, or sleeping, and all the
+ * other tasks are waiting for work, we must have a deadlock of
+ * some description.
*/
-#if !defined(PARALLEL_HASKELL) && !defined(RTS_SUPPORTS_THREADS)
if ( EMPTY_THREAD_QUEUES() )
{
+#if !defined(PARALLEL_HASKELL) && !defined(RTS_SUPPORTS_THREADS)
IF_DEBUG(scheduler, sched_belch("deadlocked, forcing major GC..."));
// Garbage collection can release some new threads due to
barf("deadlock: main thread blocked in a strange way");
}
}
- }
#elif defined(RTS_SUPPORTS_THREADS)
// ToDo: add deadlock detection in threaded RTS
#elif defined(PARALLEL_HASKELL)
// ToDo: add deadlock detection in GUM (similar to SMP) -- HWL
#endif
+ }
}
/* ----------------------------------------------------------------------------
}
}
+ // so this happens periodically:
+ scheduleCheckBlackHoles();
+
/* everybody back, start the GC.
* Could do it in this thread, or signal a condition var
* to do it in another thread. Either way, we need to
// being GC'd, and we don't want the "main thread has been GC'd" panic.
ASSERT(blocked_queue_hd == END_TSO_QUEUE);
+ ASSERT(blackhole_queue == END_TSO_QUEUE);
ASSERT(sleeping_queue == END_TSO_QUEUE);
}
blocked_queue_hds[i] = END_TSO_QUEUE;
blocked_queue_tls[i] = END_TSO_QUEUE;
ccalling_threadss[i] = END_TSO_QUEUE;
+ blackhole_queue[i] = END_TSO_QUEUE;
sleeping_queue = END_TSO_QUEUE;
}
#else
run_queue_tl = END_TSO_QUEUE;
blocked_queue_hd = END_TSO_QUEUE;
blocked_queue_tl = END_TSO_QUEUE;
+ blackhole_queue = END_TSO_QUEUE;
sleeping_queue = END_TSO_QUEUE;
#endif
}
#endif
+ if (blackhole_queue != END_TSO_QUEUE) {
+ evac((StgClosure **)&blackhole_queue);
+ }
+
if (suspended_ccalling_threads != END_TSO_QUEUE) {
evac((StgClosure **)&suspended_ccalling_threads);
}
}
case BlockedOnBlackHole:
- ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
{
- StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
-
- last = &bq->blocking_queue;
- for (t = bq->blocking_queue; t != END_TSO_QUEUE;
+ last = &blackhole_queue;
+ for (t = blackhole_queue; t != END_TSO_QUEUE;
last = &t->link, t = t->link) {
if (t == tso) {
*last = tso->link;
#endif
/* -----------------------------------------------------------------------------
+ * checkBlackHoles()
+ *
+ * Check the blackhole_queue for threads that can be woken up. We do
+ * this periodically: before every GC, and whenever the run queue is
+ * empty.
+ *
+ * An elegant solution might be to just wake up all the blocked
+ * threads with awakenBlockedQueue occasionally: they'll go back to
+ * sleep again if the object is still a BLACKHOLE. Unfortunately this
+ * doesn't give us a way to tell whether we've actually managed to
+ * wake up any threads, so we would be busy-waiting.
+ *
+ * -------------------------------------------------------------------------- */
+
+static rtsBool
+checkBlackHoles( void )
+{
+ StgTSO **prev, *t;
+ rtsBool any_woke_up = rtsFalse;
+ StgHalfWord type;
+
+ IF_DEBUG(scheduler, sched_belch("checking threads blocked on black holes"));
+
+ // ASSUMES: sched_mutex
+ prev = &blackhole_queue;
+ t = blackhole_queue;
+ while (t != END_TSO_QUEUE) {
+ ASSERT(t->why_blocked == BlockedOnBlackHole);
+ type = get_itbl(t->block_info.closure)->type;
+ if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
+ t = unblockOneLocked(t);
+ *prev = t;
+ any_woke_up = rtsTrue;
+ } else {
+ prev = &t->link;
+ t = t->link;
+ }
+ }
+
+ return any_woke_up;
+}
+
+/* -----------------------------------------------------------------------------
* raiseAsync()
*
* The following function implements the magic for raising an
} /* for */
debugBelch("\n");
}
-#else
-/*
- Nice and easy: only TSOs on the blocking queue
-*/
-void
-print_bq (StgClosure *node)
-{
- StgTSO *tso;
-
- ASSERT(node!=(StgClosure*)NULL); // sanity check
- for (tso = ((StgBlockingQueue*)node)->blocking_queue;
- tso != END_TSO_QUEUE;
- tso=tso->link) {
- ASSERT(tso!=NULL && tso!=END_TSO_QUEUE); // sanity check
- ASSERT(get_itbl(tso)->type == TSO); // guess what, sanity check
- debugBelch(" TSO %d (%p),", tso->id, tso);
- }
- debugBelch("\n");
-}
# endif
#if defined(PARALLEL_HASKELL)
#else
extern StgTSO *RTS_VAR(run_queue_hd), *RTS_VAR(run_queue_tl);
extern StgTSO *RTS_VAR(blocked_queue_hd), *RTS_VAR(blocked_queue_tl);
+extern StgTSO *RTS_VAR(blackhole_queue);
extern StgTSO *RTS_VAR(sleeping_queue);
#endif
/* Linked list of all threads. */
extern StgTSO *RTS_VAR(all_threads);
+/* Set to rtsTrue if there are threads on the blackhole_queue, and
+ * it is possible that one or more of them may be available to run.
+ * This flag is set to rtsFalse after we've checked the queue, and
+ * set to rtsTrue just before we run some Haskell code. It is used
+ * to decide whether we should yield the Capability or not.
+ */
+extern rtsBool blackholes_need_checking;
+
#if defined(RTS_SUPPORTS_THREADS)
/* Schedule.c has detailed info on what these do */
extern Mutex RTS_VAR(sched_mutex);
SchedulerStatus stat;
StgClosure ** ret;
#if defined(RTS_SUPPORTS_THREADS)
-#if defined(THREADED_RTS)
Condition bound_thread_cond;
-#else
- Condition wakeup;
-#endif
#endif
struct StgMainThread_ *prev;
struct StgMainThread_ *link;
static void slide_spark_pool( StgSparkPool *pool );
-rtsBool
+void
initSparkPools( void )
{
Capability *cap;
pool->hd = pool->base;
pool->tl = pool->base;
}
- return rtsTrue; /* Qapla' */
}
/*
#elif defined(PAR) || defined(SMP)
StgClosure *findSpark( rtsBool );
-rtsBool initSparkPools( void );
+void initSparkPools( void );
void markSparkQueue( void );
#if defined(PAR)
StgTSO *activateSpark (rtsSpark spark) ;
#include "Rts.h"
#include "Hash.h"
#include "RtsUtils.h"
+#include "OSThreads.h"
#include "Storage.h"
#include "RtsAPI.h"
#include "RtsFlags.h"
#include "ParTicky.h" /* ToDo: move into Rts.h */
#include "Profiling.h"
#include "Storage.h"
+#include "Task.h"
#ifdef HAVE_UNISTD_H
#include <unistd.h>
GC_tot_time += gc_time;
GCe_tot_time += gc_etime;
-#ifdef SMP
+#if defined(SMP)
{
nat i;
pthread_t me = pthread_self();
for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
- if (me == task_ids[i].id) {
- task_ids[i].gc_time += gc_time;
- task_ids[i].gc_etime += gc_etime;
+ if (me == taskTable[i].id) {
+ taskTable[i].gc_time += gc_time;
+ taskTable[i].gc_etime += gc_etime;
break;
}
}
stat_workerStop
Called under SMP when a worker thread finishes. We drop the timing
- stats for this thread into the task_ids struct for that thread.
+ stats for this thread into the taskTable struct for that thread.
-------------------------------------------------------------------------- */
#if defined(SMP)
getTimes();
for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
- if (task_ids[i].id == me) {
- task_ids[i].mut_time = CurrentUserTime - task_ids[i].gc_time;
- task_ids[i].mut_etime = CurrentElapsedTime
+ if (taskTable[i].id == me) {
+ taskTable[i].mut_time = CurrentUserTime - taskTable[i].gc_time;
+ taskTable[i].mut_etime = CurrentElapsedTime
- GCe_tot_time
- - task_ids[i].elapsedtimestart;
- if (task_ids[i].mut_time < 0.0) { task_ids[i].mut_time = 0.0; }
- if (task_ids[i].mut_etime < 0.0) { task_ids[i].mut_etime = 0.0; }
+ - taskTable[i].elapsedtimestart;
+ if (taskTable[i].mut_time < 0.0) { taskTable[i].mut_time = 0.0; }
+ if (taskTable[i].mut_etime < 0.0) { taskTable[i].mut_etime = 0.0; }
}
}
}
{ nat i;
MutUserTime = 0.0;
for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
- MutUserTime += task_ids[i].mut_time;
+ MutUserTime += taskTable[i].mut_time;
}
}
time = MutUserTime + GC_tot_time + InitUserTime + ExitUserTime;
statsPrintf(" Task %2d: MUT time: %6.2fs (%6.2fs elapsed)\n"
" GC time: %6.2fs (%6.2fs elapsed)\n\n",
i,
- TICK_TO_DBL(task_ids[i].mut_time),
- TICK_TO_DBL(task_ids[i].mut_etime),
- TICK_TO_DBL(task_ids[i].gc_time),
- TICK_TO_DBL(task_ids[i].gc_etime));
+ TICK_TO_DBL(taskTable[i].mut_time),
+ TICK_TO_DBL(taskTable[i].mut_etime),
+ TICK_TO_DBL(taskTable[i].gc_time),
+ TICK_TO_DBL(taskTable[i].gc_etime));
}
}
#endif
waiting for the evaluation of the closure to finish.
------------------------------------------------------------------------- */
-/* Note: a BLACKHOLE and BLACKHOLE_BQ must be big enough to be
+/* Note: a BLACKHOLE must be big enough to be
* overwritten with an indirection/evacuee/catch. Thus we claim it
- * has 1 non-pointer word of payload (in addition to the pointer word
- * for the blocking queue in a BQ), which should be big enough for an
- * old-generation indirection.
+ * has 1 non-pointer word of payload.
*/
INFO_TABLE(stg_BLACKHOLE,0,1,BLACKHOLE,"BLACKHOLE","BLACKHOLE")
{
/* Actually this is not necessary because R1 is about to be destroyed. */
LDV_ENTER(R1);
- /* Put ourselves on the blocking queue for this black hole */
- StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
- StgBlockingQueue_blocking_queue(R1) = CurrentTSO;
+ /* Put ourselves on the blackhole queue */
+ StgTSO_link(CurrentTSO) = W_[blackhole_queue];
+ W_[blackhole_queue] = CurrentTSO;
/* jot down why and on what closure we are blocked */
StgTSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
StgTSO_block_info(CurrentTSO) = R1;
- /* Change the BLACKHOLE into a BLACKHOLE_BQ */
-#ifdef PROFILING
- /* The size remains the same, so we call LDV_recordDead() -
- no need to fill slop. */
- foreign "C" LDV_recordDead(R1 "ptr", BYTES_TO_WDS(SIZEOF_StgBlockingQueue));
-#endif
- /*
- * Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
- */
- StgHeader_info(R1) = stg_BLACKHOLE_BQ_info;
-#ifdef PROFILING
- foreign "C" LDV_RECORD_CREATE(R1);
-#endif
-
- /* closure is mutable since something has just been added to its BQ */
- foreign "C" recordMutable(R1 "ptr");
-
- /* PAR: dumping of event now done in blockThread -- HWL */
-
/* stg_gen_block is too heavyweight, use a specialised one */
jump stg_block_1;
}
-INFO_TABLE(stg_BLACKHOLE_BQ,1,0,BLACKHOLE_BQ,"BLACKHOLE_BQ","BLACKHOLE_BQ")
-{
-#if defined(GRAN)
- /* Before overwriting TSO_LINK */
- STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1 /*Node*/);
-#endif
-
- TICK_ENT_BH();
- LDV_ENTER(R1);
-
- /* Put ourselves on the blocking queue for this black hole */
- StgTSO_link(CurrentTSO) = StgBlockingQueue_blocking_queue(R1);
- StgBlockingQueue_blocking_queue(R1) = CurrentTSO;
-
- /* jot down why and on what closure we are blocked */
- StgTSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
- StgTSO_block_info(CurrentTSO) = R1;
-
- /* PAR: dumping of event now done in blockThread -- HWL */
-
- /* stg_gen_block is too heavyweight, use a specialised one */
- jump stg_block_1;
-}
-
-/*
- Revertible black holes are needed in the parallel world, to handle
- negative acknowledgements of messages containing updatable closures.
- The idea is that when the original message is transmitted, the closure
- is turned into a revertible black hole...an object which acts like a
- black hole when local threads try to enter it, but which can be reverted
- back to the original closure if necessary.
-
- It's actually a lot like a blocking queue (BQ) entry, because revertible
- black holes are initially set up with an empty blocking queue.
-*/
-
#if defined(PAR) || defined(GRAN)
INFO_TABLE(stg_RBH,1,1,RBH,"RBH","RBH")
TICK_ENT_BH();
LDV_ENTER(R1);
- /* Put ourselves on the blocking queue for this black hole */
- StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
- StgBlockingQueue_blocking_queue(R1) = CurrentTSO;
+ /* Put ourselves on the blackhole queue */
+ StgTSO_link(CurrentTSO) = W_[blackhole_queue];
+ W_[blackhole_queue] = CurrentTSO;
/* jot down why and on what closure we are blocked */
StgTSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
StgTSO_block_info(CurrentTSO) = R1;
- /* Change the CAF_BLACKHOLE into a BLACKHOLE_BQ_STATIC */
- StgHeader_info(R1) = stg_BLACKHOLE_BQ_info;
-
- /* closure is mutable since something has just been added to its BQ */
- foreign "C" recordMutable(R1 "ptr");
-
- /* PAR: dumping of event now done in blockThread -- HWL */
-
/* stg_gen_block is too heavyweight, use a specialised one */
jump stg_block_1;
}
#ifdef EAGER_BLACKHOLING
-INFO_TABLE(stg_SE_BLACKHOLE_info, stg_SE_BLACKHOLE_entry,0,1,SE_BLACKHOLE,,IF_,"SE_BLACKHOLE","SE_BLACKHOLE");
-IF_(stg_SE_BLACKHOLE_entry)
-{
- STGCALL3(fprintf,stderr,"SE_BLACKHOLE at %p entered!\n",R1);
- STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
-}
+INFO_TABLE(stg_SE_BLACKHOLE,0,1,SE_BLACKHOLE,"SE_BLACKHOLE","SE_BLACKHOLE")
+{ foreign "C" barf("SE_BLACKHOLE object entered!"); }
-INFO_TABLE(stg_SE_CAF_BLACKHOLE_info, SE_CAF_BLACKHOLE_entry,0,1,SE_CAF_BLACKHOLE,,IF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
-IF_(stg_SE_CAF_BLACKHOLE_entry)
-{
- STGCALL3(fprintf,stderr,"SE_CAF_BLACKHOLE at %p entered!\n",R1);
- STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
-}
+INFO_TABLE(stg_SE_CAF_BLACKHOLE,0,1,SE_CAF_BLACKHOLE,"SE_CAF_BLACKHOLE","SE_CAF_BLACKHOLE")
+{ foreign "C" barf("SE_CAF_BLACKHOLE object entered!"); }
#endif
/* ----------------------------------------------------------------------------
stg_init
{
W_ next;
- Sp = W_[MainCapability + OFFSET_Capability_r + OFFSET_StgRegTable_rSp];
+ Sp = W_[BaseReg + OFFSET_StgRegTable_rSp];
next = W_[Sp];
Sp_adj(1);
jump next;
#include "Weak.h"
#include "Sanity.h"
#include "Arena.h"
-
+#include "OSThreads.h"
+#include "Capability.h"
#include "Storage.h"
#include "Schedule.h"
-#include "OSThreads.h"
-
#include "RetainerProfile.h" // for counting memory blocks (memInventory)
#include <stdlib.h>
static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size);
static void stgDeallocForGMP (void *ptr, size_t size);
+/*
+ * Storage manager mutex
+ */
+#if defined(SMP)
+extern Mutex sm_mutex;
+#define ACQUIRE_SM_LOCK ACQUIRE_LOCK(&sm_mutex)
+#define RELEASE_SM_LOCK RELEASE_LOCK(&sm_mutex)
+#else
+#define ACQUIRE_SM_LOCK
+#define RELEASE_SM_LOCK
+#endif
+
void
initStorage( void )
{
{
#ifdef SMP
Capability *cap;
- bdescr *bd;
g0s0->blocks = NULL;
g0s0->n_blocks = 0;
for (cap = free_capabilities; cap != NULL; cap = cap->link) {
cap->r.rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
cap->r.rCurrentNursery = cap->r.rNursery;
- /* Set the back links to be equal to the Capability,
- * so we can do slightly better informed locking.
- */
- for (bd = cap->r.rNursery; bd != NULL; bd = bd->link) {
- bd->u.back = (bdescr *)cap;
- }
}
#else /* SMP */
g0s0->blocks = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
Capability *cap;
/* All tasks must be stopped */
- ASSERT(n_free_capabilities == RtsFlags.ParFlags.nNodes);
+ ASSERT(rts_n_free_capabilities == RtsFlags.ParFlags.nNodes);
for (cap = free_capabilities; cap != NULL; cap = cap->link) {
for (bd = cap->r.rNursery; bd; bd = bd->link) {
/* ASSERT(n_free_capabilities == RtsFlags.ParFlags.nNodes); */
allocated =
- n_free_capabilities * RtsFlags.GcFlags.minAllocAreaSize * BLOCK_SIZE_W
+ rts_n_free_capabilities * RtsFlags.GcFlags.minAllocAreaSize * BLOCK_SIZE_W
+ allocated_bytes();
for (cap = free_capabilities; cap != NULL; cap = cap->link) {
#include "RtsFlags.h"
#include "Schedule.h"
+#if HAVE_SIGNAL_H
+#include <signal.h>
+#endif
+
/* There's not all that much code that is shared between the
* SMP and threads version of the 'task manager.' A sign
* that the code ought to be structured differently..(Maybe ToDo).
* accessed with the RTS lock in hand.
*/
#if defined(SMP)
-static TaskInfo* taskTable;
+TaskInfo* taskTable;
#endif
/* upper bound / the number of tasks created. */
static nat maxTasks;
/* number of tasks currently created */
static nat taskCount;
+static nat awaitDeath;
#if defined(SMP)
void
}
}
-void
+rtsBool
startTask ( void (*taskStart)(void) )
{
int r;
taskTable[taskCount].elapsedtimestart = stat_getElapsedTime();
IF_DEBUG(scheduler,debugBelch("scheduler: Started task: %ld\n",tid););
- return;
+ return rtsTrue;
}
void
-stopTaskManager ()
+stopTaskManager (void)
{
nat i;
OSThreadId tid = osThreadId();
#endif
/* Send 'em all a SIGHUP. That should shut 'em up. */
- await_death = maxCount - 1;
- for (i = 0; i < maxCount; i++) {
+ awaitDeath = taskCount==0 ? 0 : taskCount-1;
+ for (i = 0; i < taskCount; i++) {
/* don't cancel the thread running this piece of code. */
if ( taskTable[i].id != tid ) {
pthread_kill(taskTable[i].id,SIGTERM);
}
}
- while (await_death > 0) {
+ while (awaitDeath > 0) {
sched_yield();
}
}
void
-resetTaskManagerAfterFork ()
+resetTaskManagerAfterFork (void)
{
barf("resetTaskManagerAfterFork not implemented for SMP");
}
return rtsFalse;
}
-
r = createOSThread(&tid,taskStart);
if (r != 0) {
barf("startTask: Can't create new task");
double gc_etime;
} TaskInfo;
-extern TaskInfo *taskIds;
+extern TaskInfo *taskTable;
extern void startTaskManager ( nat maxTasks, void (*taskStart)(void) );
extern void stopTaskManager ( void );