1 /* ---------------------------------------------------------------------------
3 * (c) The GHC Team, 2003-2006
7 * A Capability represent the token required to execute STG code,
8 * and all the state an OS thread/task needs to run Haskell code:
9 * its STG registers, a pointer to its TSO, a nursery etc. During
10 * STG execution, a pointer to the capabilitity is kept in a
11 * register (BaseReg; actually it is a pointer to cap->r).
13 * Only in an THREADED_RTS build will there be multiple capabilities,
14 * for non-threaded builds there is only one global capability, namely
17 * --------------------------------------------------------------------------*/
19 #include "PosixSource.h"
24 #include "OSThreads.h"
25 #include "Capability.h"
30 // one global capability, this is the Capability for non-threaded
31 // builds, and for +RTS -N1
32 Capability MainCapability;
35 Capability *capabilities = NULL;
37 // Holds the Capability which last became free. This is used so that
38 // an in-call has a chance of quickly finding a free Capability.
39 // Maintaining a global free list of Capabilities would require global
40 // locking, so we don't do that.
41 Capability *last_free_capability;
43 /* GC indicator, in scope for the scheduler, init'ed to false */
44 volatile StgWord waiting_for_gc = 0;
46 #if defined(THREADED_RTS)
50 return blackholes_need_checking
51 || sched_state >= SCHED_INTERRUPTING
56 #if defined(THREADED_RTS)
58 findSpark (Capability *cap)
65 if (!emptyRunQueue(cap)) {
66 // If there are other threads, don't try to run any new
67 // sparks: sparks might be speculative, we don't want to take
68 // resources away from the main computation.
72 // first try to get a spark from our own pool.
73 // We should be using reclaimSpark(), because it works without
74 // needing any atomic instructions:
75 // spark = reclaimSpark(cap->sparks);
76 // However, measurements show that this makes at least one benchmark
77 // slower (prsa) and doesn't affect the others.
78 spark = tryStealSpark(cap);
80 cap->sparks_converted++;
84 if (n_capabilities == 1) { return NULL; } // makes no sense...
86 debugTrace(DEBUG_sched,
87 "cap %d: Trying to steal work from other capabilities",
93 /* visit cap.s 0..n-1 in sequence until a theft succeeds. We could
94 start at a random place instead of 0 as well. */
95 for ( i=0 ; i < n_capabilities ; i++ ) {
96 robbed = &capabilities[i];
97 if (cap == robbed) // ourselves...
100 if (emptySparkPoolCap(robbed)) // nothing to steal here
103 spark = tryStealSpark(robbed);
104 if (spark == NULL && !emptySparkPoolCap(robbed)) {
105 // we conflicted with another thread while trying to steal;
111 debugTrace(DEBUG_sched,
112 "cap %d: Stole a spark from capability %d",
113 cap->no, robbed->no);
114 cap->sparks_converted++;
117 // otherwise: no success, try next one
121 debugTrace(DEBUG_sched, "No sparks stolen");
125 // Returns True if any spark pool is non-empty at this moment in time
126 // The result is only valid for an instant, of course, so in a sense
127 // is immediately invalid, and should not be relied upon for
134 for (i=0; i < n_capabilities; i++) {
135 if (!emptySparkPoolCap(&capabilities[i])) {
143 /* -----------------------------------------------------------------------------
144 * Manage the returning_tasks lists.
146 * These functions require cap->lock
147 * -------------------------------------------------------------------------- */
149 #if defined(THREADED_RTS)
151 newReturningTask (Capability *cap, Task *task)
153 ASSERT_LOCK_HELD(&cap->lock);
154 ASSERT(task->return_link == NULL);
155 if (cap->returning_tasks_hd) {
156 ASSERT(cap->returning_tasks_tl->return_link == NULL);
157 cap->returning_tasks_tl->return_link = task;
159 cap->returning_tasks_hd = task;
161 cap->returning_tasks_tl = task;
165 popReturningTask (Capability *cap)
167 ASSERT_LOCK_HELD(&cap->lock);
169 task = cap->returning_tasks_hd;
171 cap->returning_tasks_hd = task->return_link;
172 if (!cap->returning_tasks_hd) {
173 cap->returning_tasks_tl = NULL;
175 task->return_link = NULL;
180 /* ----------------------------------------------------------------------------
183 * The Capability is initially marked not free.
184 * ------------------------------------------------------------------------- */
187 initCapability( Capability *cap, nat i )
192 cap->in_haskell = rtsFalse;
194 cap->run_queue_hd = END_TSO_QUEUE;
195 cap->run_queue_tl = END_TSO_QUEUE;
197 #if defined(THREADED_RTS)
198 initMutex(&cap->lock);
199 cap->running_task = NULL; // indicates cap is free
200 cap->spare_workers = NULL;
201 cap->suspended_ccalling_tasks = NULL;
202 cap->returning_tasks_hd = NULL;
203 cap->returning_tasks_tl = NULL;
204 cap->wakeup_queue_hd = END_TSO_QUEUE;
205 cap->wakeup_queue_tl = END_TSO_QUEUE;
206 cap->sparks_created = 0;
207 cap->sparks_converted = 0;
208 cap->sparks_pruned = 0;
211 cap->f.stgEagerBlackholeInfo = (W_)&__stg_EAGER_BLACKHOLE_info;
212 cap->f.stgGCEnter1 = (F_)__stg_gc_enter_1;
213 cap->f.stgGCFun = (F_)__stg_gc_fun;
215 cap->mut_lists = stgMallocBytes(sizeof(bdescr *) *
216 RtsFlags.GcFlags.generations,
219 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
220 cap->mut_lists[g] = NULL;
223 cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
224 cap->free_invariant_check_queues = END_INVARIANT_CHECK_QUEUE;
225 cap->free_trec_chunks = END_STM_CHUNK_LIST;
226 cap->free_trec_headers = NO_TREC;
227 cap->transaction_tokens = 0;
228 cap->context_switch = 0;
231 /* ---------------------------------------------------------------------------
232 * Function: initCapabilities()
234 * Purpose: set up the Capability handling. For the THREADED_RTS build,
235 * we keep a table of them, the size of which is
236 * controlled by the user via the RTS flag -N.
238 * ------------------------------------------------------------------------- */
240 initCapabilities( void )
242 #if defined(THREADED_RTS)
246 // We can't support multiple CPUs if BaseReg is not a register
247 if (RtsFlags.ParFlags.nNodes > 1) {
248 errorBelch("warning: multiple CPUs not supported in this build, reverting to 1");
249 RtsFlags.ParFlags.nNodes = 1;
253 n_capabilities = RtsFlags.ParFlags.nNodes;
255 if (n_capabilities == 1) {
256 capabilities = &MainCapability;
257 // THREADED_RTS must work on builds that don't have a mutable
258 // BaseReg (eg. unregisterised), so in this case
259 // capabilities[0] must coincide with &MainCapability.
261 capabilities = stgMallocBytes(n_capabilities * sizeof(Capability),
265 for (i = 0; i < n_capabilities; i++) {
266 initCapability(&capabilities[i], i);
269 debugTrace(DEBUG_sched, "allocated %d capabilities", n_capabilities);
271 #else /* !THREADED_RTS */
274 capabilities = &MainCapability;
275 initCapability(&MainCapability, 0);
279 // There are no free capabilities to begin with. We will start
280 // a worker Task to each Capability, which will quickly put the
281 // Capability on the free list when it finds nothing to do.
282 last_free_capability = &capabilities[0];
285 /* ----------------------------------------------------------------------------
286 * setContextSwitches: cause all capabilities to context switch as
288 * ------------------------------------------------------------------------- */
290 void setContextSwitches(void)
293 for (i=0; i < n_capabilities; i++) {
294 capabilities[i].context_switch = 1;
298 /* ----------------------------------------------------------------------------
299 * Give a Capability to a Task. The task must currently be sleeping
300 * on its condition variable.
302 * Requires cap->lock (modifies cap->running_task).
304 * When migrating a Task, the migrater must take task->lock before
305 * modifying task->cap, to synchronise with the waking up Task.
306 * Additionally, the migrater should own the Capability (when
307 * migrating the run queue), or cap->lock (when migrating
308 * returning_workers).
310 * ------------------------------------------------------------------------- */
312 #if defined(THREADED_RTS)
314 giveCapabilityToTask (Capability *cap USED_IF_DEBUG, Task *task)
316 ASSERT_LOCK_HELD(&cap->lock);
317 ASSERT(task->cap == cap);
318 trace(TRACE_sched | DEBUG_sched,
319 "passing capability %d to %s %p",
320 cap->no, task->tso ? "bound task" : "worker",
322 ACQUIRE_LOCK(&task->lock);
323 task->wakeup = rtsTrue;
324 // the wakeup flag is needed because signalCondition() doesn't
325 // flag the condition if the thread is already runniing, but we want
327 signalCondition(&task->cond);
328 RELEASE_LOCK(&task->lock);
332 /* ----------------------------------------------------------------------------
333 * Function: releaseCapability(Capability*)
335 * Purpose: Letting go of a capability. Causes a
336 * 'returning worker' thread or a 'waiting worker'
337 * to wake up, in that order.
338 * ------------------------------------------------------------------------- */
340 #if defined(THREADED_RTS)
342 releaseCapability_ (Capability* cap,
343 rtsBool always_wakeup)
347 task = cap->running_task;
349 ASSERT_PARTIAL_CAPABILITY_INVARIANTS(cap,task);
351 cap->running_task = NULL;
353 // Check to see whether a worker thread can be given
354 // the go-ahead to return the result of an external call..
355 if (cap->returning_tasks_hd != NULL) {
356 giveCapabilityToTask(cap,cap->returning_tasks_hd);
357 // The Task pops itself from the queue (see waitForReturnCapability())
361 /* if waiting_for_gc was the reason to release the cap: thread
362 comes from yieldCap->releaseAndQueueWorker. Unconditionally set
363 cap. free and return (see default after the if-protected other
364 special cases). Thread will wait on cond.var and re-acquire the
365 same cap after GC (GC-triggering cap. calls releaseCap and
366 enters the spare_workers case)
368 if (waiting_for_gc) {
369 last_free_capability = cap; // needed?
370 trace(TRACE_sched | DEBUG_sched,
371 "GC pending, set capability %d free", cap->no);
376 // If the next thread on the run queue is a bound thread,
377 // give this Capability to the appropriate Task.
378 if (!emptyRunQueue(cap) && cap->run_queue_hd->bound) {
379 // Make sure we're not about to try to wake ourselves up
380 ASSERT(task != cap->run_queue_hd->bound);
381 task = cap->run_queue_hd->bound;
382 giveCapabilityToTask(cap,task);
386 if (!cap->spare_workers) {
387 // Create a worker thread if we don't have one. If the system
388 // is interrupted, we only create a worker task if there
389 // are threads that need to be completed. If the system is
390 // shutting down, we never create a new worker.
391 if (sched_state < SCHED_SHUTTING_DOWN || !emptyRunQueue(cap)) {
392 debugTrace(DEBUG_sched,
393 "starting new worker on capability %d", cap->no);
394 startWorkerTask(cap, workerStart);
399 // If we have an unbound thread on the run queue, or if there's
400 // anything else to do, give the Capability to a worker thread.
402 !emptyRunQueue(cap) || !emptyWakeupQueue(cap) ||
403 !emptySparkPoolCap(cap) || globalWorkToDo()) {
404 if (cap->spare_workers) {
405 giveCapabilityToTask(cap,cap->spare_workers);
406 // The worker Task pops itself from the queue;
411 last_free_capability = cap;
412 trace(TRACE_sched | DEBUG_sched, "freeing capability %d", cap->no);
416 releaseCapability (Capability* cap USED_IF_THREADS)
418 ACQUIRE_LOCK(&cap->lock);
419 releaseCapability_(cap, rtsFalse);
420 RELEASE_LOCK(&cap->lock);
424 releaseAndWakeupCapability (Capability* cap USED_IF_THREADS)
426 ACQUIRE_LOCK(&cap->lock);
427 releaseCapability_(cap, rtsTrue);
428 RELEASE_LOCK(&cap->lock);
432 releaseCapabilityAndQueueWorker (Capability* cap USED_IF_THREADS)
436 ACQUIRE_LOCK(&cap->lock);
438 task = cap->running_task;
440 // If the current task is a worker, save it on the spare_workers
441 // list of this Capability. A worker can mark itself as stopped,
442 // in which case it is not replaced on the spare_worker queue.
443 // This happens when the system is shutting down (see
444 // Schedule.c:workerStart()).
445 // Also, be careful to check that this task hasn't just exited
446 // Haskell to do a foreign call (task->suspended_tso).
447 if (!isBoundTask(task) && !task->stopped && !task->suspended_tso) {
448 task->next = cap->spare_workers;
449 cap->spare_workers = task;
451 // Bound tasks just float around attached to their TSOs.
453 releaseCapability_(cap,rtsFalse);
455 RELEASE_LOCK(&cap->lock);
459 /* ----------------------------------------------------------------------------
460 * waitForReturnCapability( Task *task )
462 * Purpose: when an OS thread returns from an external call,
463 * it calls waitForReturnCapability() (via Schedule.resumeThread())
464 * to wait for permission to enter the RTS & communicate the
465 * result of the external call back to the Haskell thread that
468 * ------------------------------------------------------------------------- */
470 waitForReturnCapability (Capability **pCap, Task *task)
472 #if !defined(THREADED_RTS)
474 MainCapability.running_task = task;
475 task->cap = &MainCapability;
476 *pCap = &MainCapability;
479 Capability *cap = *pCap;
482 // Try last_free_capability first
483 cap = last_free_capability;
484 if (!cap->running_task) {
486 // otherwise, search for a free capability
487 for (i = 0; i < n_capabilities; i++) {
488 cap = &capabilities[i];
489 if (!cap->running_task) {
493 // Can't find a free one, use last_free_capability.
494 cap = last_free_capability;
497 // record the Capability as the one this Task is now assocated with.
501 ASSERT(task->cap == cap);
504 ACQUIRE_LOCK(&cap->lock);
506 debugTrace(DEBUG_sched, "returning; I want capability %d", cap->no);
508 if (!cap->running_task) {
509 // It's free; just grab it
510 cap->running_task = task;
511 RELEASE_LOCK(&cap->lock);
513 newReturningTask(cap,task);
514 RELEASE_LOCK(&cap->lock);
517 ACQUIRE_LOCK(&task->lock);
518 // task->lock held, cap->lock not held
519 if (!task->wakeup) waitCondition(&task->cond, &task->lock);
521 task->wakeup = rtsFalse;
522 RELEASE_LOCK(&task->lock);
524 // now check whether we should wake up...
525 ACQUIRE_LOCK(&cap->lock);
526 if (cap->running_task == NULL) {
527 if (cap->returning_tasks_hd != task) {
528 giveCapabilityToTask(cap,cap->returning_tasks_hd);
529 RELEASE_LOCK(&cap->lock);
532 cap->running_task = task;
533 popReturningTask(cap);
534 RELEASE_LOCK(&cap->lock);
537 RELEASE_LOCK(&cap->lock);
542 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
544 trace(TRACE_sched | DEBUG_sched, "resuming capability %d", cap->no);
550 #if defined(THREADED_RTS)
551 /* ----------------------------------------------------------------------------
553 * ------------------------------------------------------------------------- */
556 yieldCapability (Capability** pCap, Task *task)
558 Capability *cap = *pCap;
560 debugTrace(DEBUG_sched, "giving up capability %d", cap->no);
562 // We must now release the capability and wait to be woken up
564 task->wakeup = rtsFalse;
565 releaseCapabilityAndQueueWorker(cap);
568 ACQUIRE_LOCK(&task->lock);
569 // task->lock held, cap->lock not held
570 if (!task->wakeup) waitCondition(&task->cond, &task->lock);
572 task->wakeup = rtsFalse;
573 RELEASE_LOCK(&task->lock);
575 debugTrace(DEBUG_sched, "woken up on capability %d", cap->no);
577 ACQUIRE_LOCK(&cap->lock);
578 if (cap->running_task != NULL) {
579 debugTrace(DEBUG_sched,
580 "capability %d is owned by another task", cap->no);
581 RELEASE_LOCK(&cap->lock);
585 if (task->tso == NULL) {
586 ASSERT(cap->spare_workers != NULL);
587 // if we're not at the front of the queue, release it
588 // again. This is unlikely to happen.
589 if (cap->spare_workers != task) {
590 giveCapabilityToTask(cap,cap->spare_workers);
591 RELEASE_LOCK(&cap->lock);
594 cap->spare_workers = task->next;
597 cap->running_task = task;
598 RELEASE_LOCK(&cap->lock);
602 trace(TRACE_sched | DEBUG_sched, "resuming capability %d", cap->no);
603 ASSERT(cap->running_task == task);
607 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
612 /* ----------------------------------------------------------------------------
613 * Wake up a thread on a Capability.
615 * This is used when the current Task is running on a Capability and
616 * wishes to wake up a thread on a different Capability.
617 * ------------------------------------------------------------------------- */
620 wakeupThreadOnCapability (Capability *my_cap,
621 Capability *other_cap,
624 ACQUIRE_LOCK(&other_cap->lock);
626 // ASSUMES: cap->lock is held (asserted in wakeupThreadOnCapability)
628 ASSERT(tso->bound->cap == tso->cap);
629 tso->bound->cap = other_cap;
631 tso->cap = other_cap;
633 ASSERT(tso->bound ? tso->bound->cap == other_cap : 1);
635 if (other_cap->running_task == NULL) {
636 // nobody is running this Capability, we can add our thread
637 // directly onto the run queue and start up a Task to run it.
639 other_cap->running_task = myTask();
640 // precond for releaseCapability_() and appendToRunQueue()
642 appendToRunQueue(other_cap,tso);
644 trace(TRACE_sched, "resuming capability %d", other_cap->no);
645 releaseCapability_(other_cap,rtsFalse);
647 appendToWakeupQueue(my_cap,other_cap,tso);
648 other_cap->context_switch = 1;
649 // someone is running on this Capability, so it cannot be
650 // freed without first checking the wakeup queue (see
651 // releaseCapability_).
654 RELEASE_LOCK(&other_cap->lock);
657 /* ----------------------------------------------------------------------------
660 * Used to indicate that the interrupted flag is now set, or some
661 * other global condition that might require waking up a Task on each
663 * ------------------------------------------------------------------------- */
666 prodCapabilities(rtsBool all)
672 for (i=0; i < n_capabilities; i++) {
673 cap = &capabilities[i];
674 ACQUIRE_LOCK(&cap->lock);
675 if (!cap->running_task) {
676 if (cap->spare_workers) {
677 trace(TRACE_sched, "resuming capability %d", cap->no);
678 task = cap->spare_workers;
679 ASSERT(!task->stopped);
680 giveCapabilityToTask(cap,task);
682 RELEASE_LOCK(&cap->lock);
687 RELEASE_LOCK(&cap->lock);
693 prodAllCapabilities (void)
695 prodCapabilities(rtsTrue);
698 /* ----------------------------------------------------------------------------
701 * Like prodAllCapabilities, but we only require a single Task to wake
702 * up in order to service some global event, such as checking for
703 * deadlock after some idle time has passed.
704 * ------------------------------------------------------------------------- */
707 prodOneCapability (void)
709 prodCapabilities(rtsFalse);
712 /* ----------------------------------------------------------------------------
715 * At shutdown time, we want to let everything exit as cleanly as
716 * possible. For each capability, we let its run queue drain, and
717 * allow the workers to stop.
719 * This function should be called when interrupted and
720 * shutting_down_scheduler = rtsTrue, thus any worker that wakes up
721 * will exit the scheduler and call taskStop(), and any bound thread
722 * that wakes up will return to its caller. Runnable threads are
725 * ------------------------------------------------------------------------- */
728 shutdownCapability (Capability *cap, Task *task, rtsBool safe)
734 // Loop indefinitely until all the workers have exited and there
735 // are no Haskell threads left. We used to bail out after 50
736 // iterations of this loop, but that occasionally left a worker
737 // running which caused problems later (the closeMutex() below
738 // isn't safe, for one thing).
740 for (i = 0; /* i < 50 */; i++) {
741 ASSERT(sched_state == SCHED_SHUTTING_DOWN);
743 debugTrace(DEBUG_sched,
744 "shutting down capability %d, attempt %d", cap->no, i);
745 ACQUIRE_LOCK(&cap->lock);
746 if (cap->running_task) {
747 RELEASE_LOCK(&cap->lock);
748 debugTrace(DEBUG_sched, "not owner, yielding");
752 cap->running_task = task;
754 if (cap->spare_workers) {
755 // Look for workers that have died without removing
756 // themselves from the list; this could happen if the OS
757 // summarily killed the thread, for example. This
758 // actually happens on Windows when the system is
759 // terminating the program, and the RTS is running in a
763 for (t = cap->spare_workers; t != NULL; t = t->next) {
764 if (!osThreadIsAlive(t->id)) {
765 debugTrace(DEBUG_sched,
766 "worker thread %p has died unexpectedly", (void *)t->id);
768 cap->spare_workers = t->next;
770 prev->next = t->next;
777 if (!emptyRunQueue(cap) || cap->spare_workers) {
778 debugTrace(DEBUG_sched,
779 "runnable threads or workers still alive, yielding");
780 releaseCapability_(cap,rtsFalse); // this will wake up a worker
781 RELEASE_LOCK(&cap->lock);
786 // If "safe", then busy-wait for any threads currently doing
787 // foreign calls. If we're about to unload this DLL, for
788 // example, we need to be sure that there are no OS threads
789 // that will try to return to code that has been unloaded.
790 // We can be a bit more relaxed when this is a standalone
791 // program that is about to terminate, and let safe=false.
792 if (cap->suspended_ccalling_tasks && safe) {
793 debugTrace(DEBUG_sched,
794 "thread(s) are involved in foreign calls, yielding");
795 cap->running_task = NULL;
796 RELEASE_LOCK(&cap->lock);
801 debugTrace(DEBUG_sched, "capability %d is stopped.", cap->no);
802 RELEASE_LOCK(&cap->lock);
805 // we now have the Capability, its run queue and spare workers
806 // list are both empty.
808 // ToDo: we can't drop this mutex, because there might still be
809 // threads performing foreign calls that will eventually try to
810 // return via resumeThread() and attempt to grab cap->lock.
811 // closeMutex(&cap->lock);
814 /* ----------------------------------------------------------------------------
817 * Attempt to gain control of a Capability if it is free.
819 * ------------------------------------------------------------------------- */
822 tryGrabCapability (Capability *cap, Task *task)
824 if (cap->running_task != NULL) return rtsFalse;
825 ACQUIRE_LOCK(&cap->lock);
826 if (cap->running_task != NULL) {
827 RELEASE_LOCK(&cap->lock);
831 cap->running_task = task;
832 RELEASE_LOCK(&cap->lock);
837 #endif /* THREADED_RTS */
840 freeCapability (Capability *cap)
842 stgFree(cap->mut_lists);
843 #if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
844 freeSparkPool(cap->sparks);
849 freeCapabilities (void)
851 #if defined(THREADED_RTS)
853 for (i=0; i < n_capabilities; i++) {
854 freeCapability(&capabilities[i]);
857 freeCapability(&MainCapability);
861 /* ---------------------------------------------------------------------------
862 Mark everything directly reachable from the Capabilities. When
863 using multiple GC threads, each GC thread marks all Capabilities
864 for which (c `mod` n == 0), for Capability c and thread n.
865 ------------------------------------------------------------------------ */
868 markSomeCapabilities (evac_fn evac, void *user, nat i0, nat delta,
869 rtsBool prune_sparks USED_IF_THREADS)
875 // Each GC thread is responsible for following roots from the
876 // Capability of the same number. There will usually be the same
877 // or fewer Capabilities as GC threads, but just in case there
878 // are more, we mark every Capability whose number is the GC
879 // thread's index plus a multiple of the number of GC threads.
880 for (i = i0; i < n_capabilities; i += delta) {
881 cap = &capabilities[i];
882 evac(user, (StgClosure **)(void *)&cap->run_queue_hd);
883 evac(user, (StgClosure **)(void *)&cap->run_queue_tl);
884 #if defined(THREADED_RTS)
885 evac(user, (StgClosure **)(void *)&cap->wakeup_queue_hd);
886 evac(user, (StgClosure **)(void *)&cap->wakeup_queue_tl);
888 for (task = cap->suspended_ccalling_tasks; task != NULL;
890 debugTrace(DEBUG_sched,
891 "evac'ing suspended TSO %lu", (unsigned long)task->suspended_tso->id);
892 evac(user, (StgClosure **)(void *)&task->suspended_tso);
895 #if defined(THREADED_RTS)
897 pruneSparkQueue (evac, user, cap);
899 traverseSparkQueue (evac, user, cap);
904 #if !defined(THREADED_RTS)
905 evac(user, (StgClosure **)(void *)&blocked_queue_hd);
906 evac(user, (StgClosure **)(void *)&blocked_queue_tl);
907 evac(user, (StgClosure **)(void *)&sleeping_queue);
912 markCapabilities (evac_fn evac, void *user)
914 markSomeCapabilities(evac, user, 0, 1, rtsFalse);