X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FThreads.c;h=dcb916a92bd4173f326507eed85292eb003fad5f;hb=209e093599d0d4db5487d124895d817c55b7c052;hp=28820c8d440e8e1d59cb2daec2d180e6e674889c;hpb=a2a67cd520b9841114d69a87a423dabcb3b4368e;p=ghc-hetmet.git diff --git a/rts/Threads.c b/rts/Threads.c index 28820c8..dcb916a 100644 --- a/rts/Threads.c +++ b/rts/Threads.c @@ -9,11 +9,22 @@ #include "PosixSource.h" #include "Rts.h" +#include "Capability.h" +#include "Updates.h" #include "Threads.h" #include "STM.h" #include "Schedule.h" #include "Trace.h" #include "ThreadLabels.h" +#include "Updates.h" +#include "Messages.h" +#include "RaiseAsync.h" +#include "Prelude.h" +#include "Printer.h" +#include "sm/Sanity.h" +#include "sm/Storage.h" + +#include /* Next thread ID to allocate. * LOCK: sched_mutex @@ -49,67 +60,78 @@ StgTSO * createThread(Capability *cap, nat size) { StgTSO *tso; + StgStack *stack; nat stack_size; /* sched_mutex is *not* required */ - /* First check whether we should create a thread at all */ - - // ToDo: check whether size = stack_size - TSO_STRUCT_SIZEW - /* catch ridiculously small stack sizes */ - if (size < MIN_STACK_WORDS + TSO_STRUCT_SIZEW) { - size = MIN_STACK_WORDS + TSO_STRUCT_SIZEW; + if (size < MIN_STACK_WORDS + sizeofW(StgStack)) { + size = MIN_STACK_WORDS + sizeofW(StgStack); } - size = round_to_mblocks(size); - tso = (StgTSO *)allocateLocal(cap, size); - - stack_size = size - TSO_STRUCT_SIZEW; - TICK_ALLOC_TSO(stack_size, 0); - + /* The size argument we are given includes all the per-thread + * overheads: + * + * - The TSO structure + * - The STACK header + * + * This is so that we can use a nice round power of 2 for the + * default stack size (e.g. 1k), and if we're allocating lots of + * threads back-to-back they'll fit nicely in a block. It's a bit + * of a benchmark hack, but it doesn't do any harm. + */ + stack_size = round_to_mblocks(size - sizeofW(StgTSO)); + stack = (StgStack *)allocate(cap, stack_size); + TICK_ALLOC_STACK(stack_size); + SET_HDR(stack, &stg_STACK_info, CCS_SYSTEM); + stack->stack_size = stack_size - sizeofW(StgStack); + stack->sp = stack->stack + stack->stack_size; + stack->dirty = 1; + + tso = (StgTSO *)allocate(cap, sizeofW(StgTSO)); + TICK_ALLOC_TSO(); SET_HDR(tso, &stg_TSO_info, CCS_SYSTEM); // Always start with the compiled code evaluator tso->what_next = ThreadRunGHC; - tso->why_blocked = NotBlocked; - tso->blocked_exceptions = END_TSO_QUEUE; - tso->flags = TSO_DIRTY; - + tso->block_info.closure = (StgClosure *)END_TSO_QUEUE; + tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE; + tso->bq = (StgBlockingQueue *)END_TSO_QUEUE; + tso->flags = 0; + tso->dirty = 1; + tso->_link = END_TSO_QUEUE; + tso->saved_errno = 0; tso->bound = NULL; tso->cap = cap; - tso->stack_size = stack_size; - tso->max_stack_size = round_to_mblocks(RtsFlags.GcFlags.maxStkSize) - - TSO_STRUCT_SIZEW; - tso->sp = (P_)&(tso->stack) + stack_size; + tso->stackobj = stack; + tso->tot_stack_size = stack->stack_size; tso->trec = NO_TREC; - + #ifdef PROFILING tso->prof.CCCS = CCS_MAIN; #endif - /* put a stop frame on the stack */ - tso->sp -= sizeofW(StgStopFrame); - SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM); - tso->_link = END_TSO_QUEUE; - + // put a stop frame on the stack + stack->sp -= sizeofW(StgStopFrame); + SET_HDR((StgClosure*)stack->sp, + (StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM); + /* Link the new thread on the global thread list. */ ACQUIRE_LOCK(&sched_mutex); tso->id = next_thread_id++; // while we have the mutex - tso->global_link = g0s0->threads; - g0s0->threads = tso; + tso->global_link = g0->threads; + g0->threads = tso; RELEASE_LOCK(&sched_mutex); - postEvent (cap, EVENT_CREATE_THREAD, tso->id, 0); + // ToDo: report the stack size in the event? + traceEventCreateThread(cap, tso); - debugTrace(DEBUG_sched, - "created thread %ld, stack size = %lx words", - (long)tso->id, (long)tso->stack_size); return tso; } @@ -147,7 +169,7 @@ rts_getThreadId(StgPtr tso) Fails fatally if the TSO is not on the queue. -------------------------------------------------------------------------- */ -void +rtsBool // returns True if we modified queue removeThreadFromQueue (Capability *cap, StgTSO **queue, StgTSO *tso) { StgTSO *t, *prev; @@ -157,123 +179,272 @@ removeThreadFromQueue (Capability *cap, StgTSO **queue, StgTSO *tso) if (t == tso) { if (prev) { setTSOLink(cap,prev,t->_link); + t->_link = END_TSO_QUEUE; + return rtsFalse; } else { *queue = t->_link; + t->_link = END_TSO_QUEUE; + return rtsTrue; } - return; } } barf("removeThreadFromQueue: not found"); } -void +rtsBool // returns True if we modified head or tail removeThreadFromDeQueue (Capability *cap, StgTSO **head, StgTSO **tail, StgTSO *tso) { StgTSO *t, *prev; + rtsBool flag = rtsFalse; prev = NULL; for (t = *head; t != END_TSO_QUEUE; prev = t, t = t->_link) { if (t == tso) { if (prev) { setTSOLink(cap,prev,t->_link); + flag = rtsFalse; } else { *head = t->_link; + flag = rtsTrue; } - if (*tail == tso) { + t->_link = END_TSO_QUEUE; + if (*tail == tso) { if (prev) { *tail = prev; } else { *tail = END_TSO_QUEUE; } - } - return; + return rtsTrue; + } else { + return flag; + } } } barf("removeThreadFromMVarQueue: not found"); } +/* ---------------------------------------------------------------------------- + tryWakeupThread() + + Attempt to wake up a thread. tryWakeupThread is idempotent: it is + always safe to call it too many times, but it is not safe in + general to omit a call. + + ------------------------------------------------------------------------- */ + void -removeThreadFromMVarQueue (Capability *cap, StgMVar *mvar, StgTSO *tso) +tryWakeupThread (Capability *cap, StgTSO *tso) { - removeThreadFromDeQueue (cap, &mvar->head, &mvar->tail, tso); + traceEventThreadWakeup (cap, tso, tso->cap->no); + +#ifdef THREADED_RTS + if (tso->cap != cap) + { + MessageWakeup *msg; + msg = (MessageWakeup *)allocate(cap,sizeofW(MessageWakeup)); + SET_HDR(msg, &stg_MSG_TRY_WAKEUP_info, CCS_SYSTEM); + msg->tso = tso; + sendMessage(cap, tso->cap, (Message*)msg); + debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld on cap %d", + (lnat)tso->id, tso->cap->no); + return; + } +#endif + + switch (tso->why_blocked) + { + case BlockedOnMVar: + { + if (tso->_link == END_TSO_QUEUE) { + tso->block_info.closure = (StgClosure*)END_TSO_QUEUE; + goto unblock; + } else { + return; + } + } + + case BlockedOnMsgThrowTo: + { + const StgInfoTable *i; + + i = lockClosure(tso->block_info.closure); + unlockClosure(tso->block_info.closure, i); + if (i != &stg_MSG_NULL_info) { + debugTraceCap(DEBUG_sched, cap, "thread %ld still blocked on throwto (%p)", + (lnat)tso->id, tso->block_info.throwto->header.info); + return; + } + + // remove the block frame from the stack + ASSERT(tso->stackobj->sp[0] == (StgWord)&stg_block_throwto_info); + tso->stackobj->sp += 3; + goto unblock; + } + + case BlockedOnBlackHole: + case BlockedOnSTM: + case ThreadMigrating: + goto unblock; + + default: + // otherwise, do nothing + return; + } + +unblock: + // just run the thread now, if the BH is not really available, + // we'll block again. + tso->why_blocked = NotBlocked; + appendToRunQueue(cap,tso); + + // We used to set the context switch flag here, which would + // trigger a context switch a short time in the future (at the end + // of the current nursery block). The idea is that we have just + // woken up a thread, so we may need to load-balance and migrate + // threads to other CPUs. On the other hand, setting the context + // switch flag here unfairly penalises the current thread by + // yielding its time slice too early. + // + // The synthetic benchmark nofib/smp/chan can be used to show the + // difference quite clearly. + + // cap->context_switch = 1; } /* ---------------------------------------------------------------------------- - unblockOne() - - unblock a single thread. + migrateThread ------------------------------------------------------------------------- */ -StgTSO * -unblockOne (Capability *cap, StgTSO *tso) +void +migrateThread (Capability *from, StgTSO *tso, Capability *to) { - return unblockOne_(cap,tso,rtsTrue); // allow migration + traceEventMigrateThread (from, tso, to->no); + // ThreadMigrating tells the target cap that it needs to be added to + // the run queue when it receives the MSG_TRY_WAKEUP. + tso->why_blocked = ThreadMigrating; + tso->cap = to; + tryWakeupThread(from, tso); } -StgTSO * -unblockOne_ (Capability *cap, StgTSO *tso, - rtsBool allow_migrate USED_IF_THREADS) -{ - StgTSO *next; - - // NO, might be a WHITEHOLE: ASSERT(get_itbl(tso)->type == TSO); - ASSERT(tso->why_blocked != NotBlocked); - - tso->why_blocked = NotBlocked; - next = tso->_link; - tso->_link = END_TSO_QUEUE; - -#if defined(THREADED_RTS) - if (tso->cap == cap || (!tsoLocked(tso) && - allow_migrate && - RtsFlags.ParFlags.wakeupMigrate)) { - // We are waking up this thread on the current Capability, which - // might involve migrating it from the Capability it was last on. - if (tso->bound) { - ASSERT(tso->bound->cap == tso->cap); - tso->bound->cap = cap; - } +/* ---------------------------------------------------------------------------- + awakenBlockedQueue - tso->cap = cap; - appendToRunQueue(cap,tso); + wakes up all the threads on the specified queue. + ------------------------------------------------------------------------- */ - // context-switch soonish so we can migrate the new thread if - // necessary. NB. not contextSwitchCapability(cap), which would - // force a context switch immediately. - cap->context_switch = 1; - } else { - // we'll try to wake it up on the Capability it was last on. - wakeupThreadOnCapability(cap, tso->cap, tso); - } -#else - appendToRunQueue(cap,tso); +void +wakeBlockingQueue(Capability *cap, StgBlockingQueue *bq) +{ + MessageBlackHole *msg; + const StgInfoTable *i; + + ASSERT(bq->header.info == &stg_BLOCKING_QUEUE_DIRTY_info || + bq->header.info == &stg_BLOCKING_QUEUE_CLEAN_info ); + + for (msg = bq->queue; msg != (MessageBlackHole*)END_TSO_QUEUE; + msg = msg->link) { + i = msg->header.info; + if (i != &stg_IND_info) { + ASSERT(i == &stg_MSG_BLACKHOLE_info); + tryWakeupThread(cap,msg->tso); + } + } - // context-switch soonish so we can migrate the new thread if - // necessary. NB. not contextSwitchCapability(cap), which would - // force a context switch immediately. - cap->context_switch = 1; + // overwrite the BQ with an indirection so it will be + // collected at the next GC. +#if defined(DEBUG) && !defined(THREADED_RTS) + // XXX FILL_SLOP, but not if THREADED_RTS because in that case + // another thread might be looking at this BLOCKING_QUEUE and + // checking the owner field at the same time. + bq->bh = 0; bq->queue = 0; bq->owner = 0; #endif + OVERWRITE_INFO(bq, &stg_IND_info); +} - postEvent (cap, EVENT_THREAD_WAKEUP, tso->id, tso->cap->no); +// If we update a closure that we know we BLACKHOLE'd, and the closure +// no longer points to the current TSO as its owner, then there may be +// an orphaned BLOCKING_QUEUE closure with blocked threads attached to +// it. We therefore traverse the BLOCKING_QUEUEs attached to the +// current TSO to see if any can now be woken up. +void +checkBlockingQueues (Capability *cap, StgTSO *tso) +{ + StgBlockingQueue *bq, *next; + StgClosure *p; - debugTrace(DEBUG_sched, "waking up thread %ld on cap %d", - (long)tso->id, tso->cap->no); + debugTraceCap(DEBUG_sched, cap, + "collision occurred; checking blocking queues for thread %ld", + (lnat)tso->id); + + for (bq = tso->bq; bq != (StgBlockingQueue*)END_TSO_QUEUE; bq = next) { + next = bq->link; - return next; + if (bq->header.info == &stg_IND_info) { + // ToDo: could short it out right here, to avoid + // traversing this IND multiple times. + continue; + } + + p = bq->bh; + + if (p->header.info != &stg_BLACKHOLE_info || + ((StgInd *)p)->indirectee != (StgClosure*)bq) + { + wakeBlockingQueue(cap,bq); + } + } } /* ---------------------------------------------------------------------------- - awakenBlockedQueue + updateThunk - wakes up all the threads on the specified queue. + Update a thunk with a value. In order to do this, we need to know + which TSO owns (or is evaluating) the thunk, in case we need to + awaken any threads that are blocked on it. ------------------------------------------------------------------------- */ void -awakenBlockedQueue(Capability *cap, StgTSO *tso) +updateThunk (Capability *cap, StgTSO *tso, StgClosure *thunk, StgClosure *val) { - while (tso != END_TSO_QUEUE) { - tso = unblockOne(cap,tso); + StgClosure *v; + StgTSO *owner; + const StgInfoTable *i; + + i = thunk->header.info; + if (i != &stg_BLACKHOLE_info && + i != &stg_CAF_BLACKHOLE_info && + i != &__stg_EAGER_BLACKHOLE_info && + i != &stg_WHITEHOLE_info) { + updateWithIndirection(cap, thunk, val); + return; + } + + v = ((StgInd*)thunk)->indirectee; + + updateWithIndirection(cap, thunk, val); + + i = v->header.info; + if (i == &stg_TSO_info) { + owner = (StgTSO*)v; + if (owner != tso) { + checkBlockingQueues(cap, tso); + } + return; + } + + if (i != &stg_BLOCKING_QUEUE_CLEAN_info && + i != &stg_BLOCKING_QUEUE_DIRTY_info) { + checkBlockingQueues(cap, tso); + return; + } + + owner = ((StgBlockingQueue*)v)->owner; + + if (owner != tso) { + checkBlockingQueues(cap, tso); + } else { + wakeBlockingQueue(cap, (StgBlockingQueue*)v); } } @@ -305,6 +476,222 @@ isThreadBound(StgTSO* tso USED_IF_THREADS) return rtsFalse; } +/* ----------------------------------------------------------------------------- + Stack overflow + + If the thread has reached its maximum stack size, then raise the + StackOverflow exception in the offending thread. Otherwise + relocate the TSO into a larger chunk of memory and adjust its stack + size appropriately. + -------------------------------------------------------------------------- */ + +void +threadStackOverflow (Capability *cap, StgTSO *tso) +{ + StgStack *new_stack, *old_stack; + StgUnderflowFrame *frame; + lnat chunk_size; + + IF_DEBUG(sanity,checkTSO(tso)); + + if (tso->tot_stack_size >= RtsFlags.GcFlags.maxStkSize + && !(tso->flags & TSO_BLOCKEX)) { + // NB. never raise a StackOverflow exception if the thread is + // inside Control.Exceptino.block. It is impractical to protect + // against stack overflow exceptions, since virtually anything + // can raise one (even 'catch'), so this is the only sensible + // thing to do here. See bug #767. + // + + if (tso->flags & TSO_SQUEEZED) { + return; + } + // #3677: In a stack overflow situation, stack squeezing may + // reduce the stack size, but we don't know whether it has been + // reduced enough for the stack check to succeed if we try + // again. Fortunately stack squeezing is idempotent, so all we + // need to do is record whether *any* squeezing happened. If we + // are at the stack's absolute -K limit, and stack squeezing + // happened, then we try running the thread again. The + // TSO_SQUEEZED flag is set by threadPaused() to tell us whether + // squeezing happened or not. + + debugTrace(DEBUG_gc, + "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)", + (long)tso->id, tso, (long)tso->stackobj->stack_size, + RtsFlags.GcFlags.maxStkSize); + IF_DEBUG(gc, + /* If we're debugging, just print out the top of the stack */ + printStackChunk(tso->stackobj->sp, + stg_min(tso->stackobj->stack + tso->stackobj->stack_size, + tso->stackobj->sp+64))); + + // Send this thread the StackOverflow exception + throwToSingleThreaded(cap, tso, (StgClosure *)stackOverflow_closure); + } + + + // We also want to avoid enlarging the stack if squeezing has + // already released some of it. However, we don't want to get into + // a pathalogical situation where a thread has a nearly full stack + // (near its current limit, but not near the absolute -K limit), + // keeps allocating a little bit, squeezing removes a little bit, + // and then it runs again. So to avoid this, if we squeezed *and* + // there is still less than BLOCK_SIZE_W words free, then we enlarge + // the stack anyway. + if ((tso->flags & TSO_SQUEEZED) && + ((W_)(tso->stackobj->sp - tso->stackobj->stack) >= BLOCK_SIZE_W)) { + return; + } + + old_stack = tso->stackobj; + + // If we used less than half of the previous stack chunk, then we + // must have failed a stack check for a large amount of stack. In + // this case we allocate a double-sized chunk to try to + // accommodate the large stack request. If that also fails, the + // next chunk will be 4x normal size, and so on. + // + // It would be better to have the mutator tell us how much stack + // was needed, as we do with heap allocations, but this works for + // now. + // + if (old_stack->sp > old_stack->stack + old_stack->stack_size / 2) + { + chunk_size = 2 * (old_stack->stack_size + sizeofW(StgStack)); + } + else + { + chunk_size = RtsFlags.GcFlags.stkChunkSize; + } + + debugTraceCap(DEBUG_sched, cap, + "allocating new stack chunk of size %d bytes", + chunk_size * sizeof(W_)); + + new_stack = (StgStack*) allocate(cap, chunk_size); + SET_HDR(new_stack, &stg_STACK_info, CCS_SYSTEM); + TICK_ALLOC_STACK(chunk_size); + + new_stack->dirty = 0; // begin clean, we'll mark it dirty below + new_stack->stack_size = chunk_size - sizeofW(StgStack); + new_stack->sp = new_stack->stack + new_stack->stack_size; + + tso->tot_stack_size += new_stack->stack_size; + + new_stack->sp -= sizeofW(StgUnderflowFrame); + frame = (StgUnderflowFrame*)new_stack->sp; + frame->info = &stg_stack_underflow_frame_info; + frame->next_chunk = old_stack; + + { + StgWord *sp; + nat chunk_words, size; + + // find the boundary of the chunk of old stack we're going to + // copy to the new stack. We skip over stack frames until we + // reach the smaller of + // + // * the chunk buffer size (+RTS -kb) + // * the end of the old stack + // + for (sp = old_stack->sp; + sp < stg_min(old_stack->sp + RtsFlags.GcFlags.stkChunkBufferSize, + old_stack->stack + old_stack->stack_size); ) + { + size = stack_frame_sizeW((StgClosure*)sp); + + // if including this frame would exceed the size of the + // new stack (taking into account the underflow frame), + // then stop at the previous frame. + if (sp + size > old_stack->stack + (new_stack->stack_size - + sizeofW(StgUnderflowFrame))) { + break; + } + sp += size; + } + + // copy the stack chunk between tso->sp and sp to + // new_tso->sp + (tso->sp - sp) + chunk_words = sp - old_stack->sp; + + memcpy(/* dest */ new_stack->sp - chunk_words, + /* source */ old_stack->sp, + /* size */ chunk_words * sizeof(W_)); + + old_stack->sp += chunk_words; + new_stack->sp -= chunk_words; + } + + // if the old stack chunk is now empty, discard it. With the + // default settings, -ki1k -kb1k, this means the first stack chunk + // will be discarded after the first overflow, being replaced by a + // non-moving 32k chunk. + if (old_stack->sp == old_stack->stack + old_stack->stack_size) { + frame->next_chunk = new_stack; + } + + tso->stackobj = new_stack; + + // we're about to run it, better mark it dirty + dirty_STACK(cap, new_stack); + + IF_DEBUG(sanity,checkTSO(tso)); + // IF_DEBUG(scheduler,printTSO(new_tso)); +} + + +/* --------------------------------------------------------------------------- + Stack underflow - called from the stg_stack_underflow_info frame + ------------------------------------------------------------------------ */ + +nat // returns offset to the return address +threadStackUnderflow (Capability *cap, StgTSO *tso) +{ + StgStack *new_stack, *old_stack; + StgUnderflowFrame *frame; + nat retvals; + + debugTraceCap(DEBUG_sched, cap, "stack underflow"); + + old_stack = tso->stackobj; + + frame = (StgUnderflowFrame*)(old_stack->stack + old_stack->stack_size + - sizeofW(StgUnderflowFrame)); + ASSERT(frame->info == &stg_stack_underflow_frame_info); + + new_stack = (StgStack*)frame->next_chunk; + tso->stackobj = new_stack; + + retvals = (P_)frame - old_stack->sp; + if (retvals != 0) + { + // we have some return values to copy to the old stack + if ((nat)(new_stack->sp - new_stack->stack) < retvals) + { + barf("threadStackUnderflow: not enough space for return values"); + } + + new_stack->sp -= retvals; + + memcpy(/* dest */ new_stack->sp, + /* src */ old_stack->sp, + /* size */ retvals * sizeof(W_)); + } + + // empty the old stack. The GC may still visit this object + // because it is on the mutable list. + old_stack->sp = old_stack->stack + old_stack->stack_size; + + // restore the stack parameters, and update tot_stack_size + tso->tot_stack_size -= old_stack->stack_size; + + // we're about to run it, better mark it dirty + dirty_STACK(cap, new_stack); + + return retvals; +} + /* ---------------------------------------------------------------------------- * Debugging: why is a thread blocked * ------------------------------------------------------------------------- */ @@ -331,21 +718,24 @@ printThreadBlockage(StgTSO *tso) case BlockedOnMVar: debugBelch("is blocked on an MVar @ %p", tso->block_info.closure); break; - case BlockedOnException: - debugBelch("is blocked on delivering an exception to thread %lu", - (unsigned long)tso->block_info.tso->id); - break; case BlockedOnBlackHole: - debugBelch("is blocked on a black hole"); + debugBelch("is blocked on a black hole %p", + ((StgBlockingQueue*)tso->block_info.bh->bh)); + break; + case BlockedOnMsgThrowTo: + debugBelch("is blocked on a throwto message"); break; case NotBlocked: debugBelch("is not blocked"); break; + case ThreadMigrating: + debugBelch("is runnable, but not on the run queue"); + break; case BlockedOnCCall: debugBelch("is blocked on an external call"); break; - case BlockedOnCCall_NoUnblockExc: - debugBelch("is blocked on an external call (exceptions were already blocked)"); + case BlockedOnCCall_Interruptible: + debugBelch("is blocked on an external call (but may be interrupted)"); break; case BlockedOnSTM: debugBelch("is blocked on an STM operation"); @@ -356,6 +746,7 @@ printThreadBlockage(StgTSO *tso) } } + void printThreadStatus(StgTSO *t) { @@ -364,10 +755,7 @@ printThreadStatus(StgTSO *t) void *label = lookupThreadLabel(t->id); if (label) debugBelch("[\"%s\"] ",(char *)label); } - if (t->what_next == ThreadRelocated) { - debugBelch("has been relocated...\n"); - } else { - switch (t->what_next) { + switch (t->what_next) { case ThreadKilled: debugBelch("has been killed"); break; @@ -377,37 +765,20 @@ printThreadStatus(StgTSO *t) default: printThreadBlockage(t); } - if (t->flags & TSO_DIRTY) { + if (t->dirty) { debugBelch(" (TSO_DIRTY)"); - } else if (t->flags & TSO_LINK_DIRTY) { - debugBelch(" (TSO_LINK_DIRTY)"); } debugBelch("\n"); - } } void printAllThreads(void) { StgTSO *t, *next; - nat i, s; + nat i, g; Capability *cap; -# if defined(GRAN) - char time_string[TIME_STR_LEN], node_str[NODE_STR_LEN]; - ullong_format_string(TIME_ON_PROC(CurrentProc), - time_string, rtsFalse/*no commas!*/); - - debugBelch("all threads at [%s]:\n", time_string); -# elif defined(PARALLEL_HASKELL) - char time_string[TIME_STR_LEN], node_str[NODE_STR_LEN]; - ullong_format_string(CURRENT_TIME, - time_string, rtsFalse/*no commas!*/); - - debugBelch("all threads at [%s]:\n", time_string); -# else debugBelch("all threads:\n"); -# endif for (i = 0; i < n_capabilities; i++) { cap = &capabilities[i]; @@ -418,16 +789,12 @@ printAllThreads(void) } debugBelch("other threads:\n"); - for (s = 0; s < total_steps; s++) { - for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) { + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { + for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) { if (t->why_blocked != NotBlocked) { printThreadStatus(t); } - if (t->what_next == ThreadRelocated) { - next = t->_link; - } else { - next = t->global_link; - } + next = t->global_link; } } }