+/* -----------------------------------------------------------------------------
+ resurrectThreads is called after garbage collection on the list of
+ threads found to be garbage. Each of these threads will be woken
+ up and sent a signal: BlockedOnDeadMVar if the thread was blocked
+ on an MVar, or NonTermination if the thread was blocked on a Black
+ Hole.
+ -------------------------------------------------------------------------- */
+
+void
+resurrectThreads( StgTSO *threads )
+{
+ StgTSO *tso, *next;
+
+ for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
+ next = tso->global_link;
+ tso->global_link = all_threads;
+ all_threads = tso;
+ IF_DEBUG(scheduler, sched_belch("resurrecting thread %d", tso->id));
+
+ switch (tso->why_blocked) {
+ case BlockedOnMVar:
+ case BlockedOnException:
+ raiseAsync(tso,(StgClosure *)BlockedOnDeadMVar_closure);
+ break;
+ case BlockedOnBlackHole:
+ raiseAsync(tso,(StgClosure *)NonTermination_closure);
+ break;
+ case NotBlocked:
+ /* This might happen if the thread was blocked on a black hole
+ * belonging to a thread that we've just woken up (raiseAsync
+ * can wake up threads, remember...).
+ */
+ continue;
+ default:
+ barf("resurrectThreads: thread blocked in a strange way");
+ }
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Blackhole detection: if we reach a deadlock, test whether any
+ * threads are blocked on themselves. Any threads which are found to
+ * be self-blocked get sent a NonTermination exception.
+ *
+ * This is only done in a deadlock situation in order to avoid
+ * performance overhead in the normal case.
+ * -------------------------------------------------------------------------- */
+
+static void
+detectBlackHoles( void )
+{
+ StgTSO *t = all_threads;
+ StgUpdateFrame *frame;
+ StgClosure *blocked_on;
+
+ for (t = all_threads; t != END_TSO_QUEUE; t = t->global_link) {
+
+ while (t->what_next == ThreadRelocated) {
+ t = t->link;
+ ASSERT(get_itbl(t)->type == TSO);
+ }
+
+ if (t->why_blocked != BlockedOnBlackHole) {
+ continue;
+ }
+
+ blocked_on = t->block_info.closure;
+
+ for (frame = t->su; ; frame = frame->link) {
+ switch (get_itbl(frame)->type) {
+
+ case UPDATE_FRAME:
+ if (frame->updatee == blocked_on) {
+ /* We are blocking on one of our own computations, so
+ * send this thread the NonTermination exception.
+ */
+ IF_DEBUG(scheduler,
+ sched_belch("thread %d is blocked on itself", t->id));
+ raiseAsync(t, (StgClosure *)NonTermination_closure);
+ goto done;
+ }
+ else {
+ continue;
+ }
+
+ case CATCH_FRAME:
+ case SEQ_FRAME:
+ continue;
+
+ case STOP_FRAME:
+ break;
+ }
+ break;
+ }
+
+ done: ;
+ }
+}
+
+//@node Debugging Routines, Index, Exception Handling Routines, Main scheduling code
+//@subsection Debugging Routines
+
+/* -----------------------------------------------------------------------------
+ Debugging: why is a thread blocked
+ -------------------------------------------------------------------------- */
+
+#ifdef DEBUG
+
+void
+printThreadBlockage(StgTSO *tso)
+{
+ switch (tso->why_blocked) {
+ case BlockedOnRead:
+ fprintf(stderr,"is blocked on read from fd %d", tso->block_info.fd);
+ break;
+ case BlockedOnWrite:
+ fprintf(stderr,"is blocked on write to fd %d", tso->block_info.fd);
+ break;
+ case BlockedOnDelay:
+ fprintf(stderr,"is blocked until %d", tso->block_info.target);
+ break;
+ case BlockedOnMVar:
+ fprintf(stderr,"is blocked on an MVar");
+ break;
+ case BlockedOnException:
+ fprintf(stderr,"is blocked on delivering an exception to thread %d",
+ tso->block_info.tso->id);
+ break;
+ case BlockedOnBlackHole:
+ fprintf(stderr,"is blocked on a black hole");
+ break;
+ case NotBlocked:
+ fprintf(stderr,"is not blocked");
+ break;
+#if defined(PAR)
+ case BlockedOnGA:
+ fprintf(stderr,"is blocked on global address; local FM_BQ is %p (%s)",
+ tso->block_info.closure, info_type(tso->block_info.closure));
+ break;
+ case BlockedOnGA_NoSend:
+ fprintf(stderr,"is blocked on global address (no send); local FM_BQ is %p (%s)",
+ tso->block_info.closure, info_type(tso->block_info.closure));
+ break;
+#endif
+ default:
+ barf("printThreadBlockage: strange tso->why_blocked: %d for TSO %d (%d)",
+ tso->why_blocked, tso->id, tso);
+ }
+}
+
+void
+printThreadStatus(StgTSO *tso)
+{
+ switch (tso->what_next) {
+ case ThreadKilled:
+ fprintf(stderr,"has been killed");
+ break;
+ case ThreadComplete:
+ fprintf(stderr,"has completed");
+ break;
+ default:
+ printThreadBlockage(tso);
+ }
+}
+
+void
+printAllThreads(void)
+{
+ StgTSO *t;
+
+# if defined(GRAN)
+ char time_string[TIME_STR_LEN], node_str[NODE_STR_LEN];
+ ullong_format_string(TIME_ON_PROC(CurrentProc),
+ time_string, rtsFalse/*no commas!*/);
+
+ sched_belch("all threads at [%s]:", time_string);
+# elif defined(PAR)
+ char time_string[TIME_STR_LEN], node_str[NODE_STR_LEN];
+ ullong_format_string(CURRENT_TIME,
+ time_string, rtsFalse/*no commas!*/);
+
+ sched_belch("all threads at [%s]:", time_string);
+# else
+ sched_belch("all threads:");
+# endif
+
+ for (t = all_threads; t != END_TSO_QUEUE; t = t->global_link) {
+ fprintf(stderr, "\tthread %d ", t->id);
+ printThreadStatus(t);
+ fprintf(stderr,"\n");
+ }
+}
+
+/*
+ Print a whole blocking queue attached to node (debugging only).
+*/
+//@cindex print_bq
+# if defined(PAR)
+void
+print_bq (StgClosure *node)
+{
+ StgBlockingQueueElement *bqe;
+ StgTSO *tso;
+ rtsBool end;
+
+ fprintf(stderr,"## BQ of closure %p (%s): ",
+ node, info_type(node));
+
+ /* should cover all closures that may have a blocking queue */
+ ASSERT(get_itbl(node)->type == BLACKHOLE_BQ ||
+ get_itbl(node)->type == FETCH_ME_BQ ||
+ get_itbl(node)->type == RBH ||
+ get_itbl(node)->type == MVAR);
+
+ ASSERT(node!=(StgClosure*)NULL); // sanity check
+
+ print_bqe(((StgBlockingQueue*)node)->blocking_queue);
+}
+
+/*
+ Print a whole blocking queue starting with the element bqe.
+*/
+void
+print_bqe (StgBlockingQueueElement *bqe)
+{
+ rtsBool end;
+
+ /*
+ NB: In a parallel setup a BQ of an RBH must end with an RBH_Save closure;
+ */
+ for (end = (bqe==END_BQ_QUEUE);
+ !end; // iterate until bqe points to a CONSTR
+ end = (get_itbl(bqe)->type == CONSTR) || (bqe->link==END_BQ_QUEUE),
+ bqe = end ? END_BQ_QUEUE : bqe->link) {
+ ASSERT(bqe != END_BQ_QUEUE); // sanity check
+ ASSERT(bqe != (StgBlockingQueueElement *)NULL); // sanity check
+ /* types of closures that may appear in a blocking queue */
+ ASSERT(get_itbl(bqe)->type == TSO ||
+ get_itbl(bqe)->type == BLOCKED_FETCH ||
+ get_itbl(bqe)->type == CONSTR);
+ /* only BQs of an RBH end with an RBH_Save closure */
+ //ASSERT(get_itbl(bqe)->type != CONSTR || get_itbl(node)->type == RBH);
+
+ switch (get_itbl(bqe)->type) {
+ case TSO:
+ fprintf(stderr," TSO %u (%x),",
+ ((StgTSO *)bqe)->id, ((StgTSO *)bqe));
+ break;
+ case BLOCKED_FETCH:
+ fprintf(stderr," BF (node=%p, ga=((%x, %d, %x)),",
+ ((StgBlockedFetch *)bqe)->node,
+ ((StgBlockedFetch *)bqe)->ga.payload.gc.gtid,
+ ((StgBlockedFetch *)bqe)->ga.payload.gc.slot,
+ ((StgBlockedFetch *)bqe)->ga.weight);
+ break;
+ case CONSTR:
+ fprintf(stderr," %s (IP %p),",
+ (get_itbl(bqe) == &stg_RBH_Save_0_info ? "RBH_Save_0" :
+ get_itbl(bqe) == &stg_RBH_Save_1_info ? "RBH_Save_1" :
+ get_itbl(bqe) == &stg_RBH_Save_2_info ? "RBH_Save_2" :
+ "RBH_Save_?"), get_itbl(bqe));
+ break;
+ default:
+ barf("Unexpected closure type %s in blocking queue", // of %p (%s)",
+ info_type((StgClosure *)bqe)); // , node, info_type(node));
+ break;
+ }
+ } /* for */
+ fputc('\n', stderr);
+}
+# elif defined(GRAN)
+void
+print_bq (StgClosure *node)
+{
+ StgBlockingQueueElement *bqe;
+ PEs node_loc, tso_loc;
+ rtsBool end;
+
+ /* should cover all closures that may have a blocking queue */
+ ASSERT(get_itbl(node)->type == BLACKHOLE_BQ ||
+ get_itbl(node)->type == FETCH_ME_BQ ||
+ get_itbl(node)->type == RBH);
+
+ ASSERT(node!=(StgClosure*)NULL); // sanity check
+ node_loc = where_is(node);
+
+ fprintf(stderr,"## BQ of closure %p (%s) on [PE %d]: ",
+ node, info_type(node), node_loc);
+
+ /*
+ NB: In a parallel setup a BQ of an RBH must end with an RBH_Save closure;
+ */
+ for (bqe = ((StgBlockingQueue*)node)->blocking_queue, end = (bqe==END_BQ_QUEUE);
+ !end; // iterate until bqe points to a CONSTR
+ end = (get_itbl(bqe)->type == CONSTR) || (bqe->link==END_BQ_QUEUE), bqe = end ? END_BQ_QUEUE : bqe->link) {
+ ASSERT(bqe != END_BQ_QUEUE); // sanity check
+ ASSERT(bqe != (StgBlockingQueueElement *)NULL); // sanity check
+ /* types of closures that may appear in a blocking queue */
+ ASSERT(get_itbl(bqe)->type == TSO ||
+ get_itbl(bqe)->type == CONSTR);
+ /* only BQs of an RBH end with an RBH_Save closure */
+ ASSERT(get_itbl(bqe)->type != CONSTR || get_itbl(node)->type == RBH);
+
+ tso_loc = where_is((StgClosure *)bqe);
+ switch (get_itbl(bqe)->type) {
+ case TSO:
+ fprintf(stderr," TSO %d (%p) on [PE %d],",
+ ((StgTSO *)bqe)->id, (StgTSO *)bqe, tso_loc);
+ break;
+ case CONSTR:
+ fprintf(stderr," %s (IP %p),",
+ (get_itbl(bqe) == &stg_RBH_Save_0_info ? "RBH_Save_0" :
+ get_itbl(bqe) == &stg_RBH_Save_1_info ? "RBH_Save_1" :
+ get_itbl(bqe) == &stg_RBH_Save_2_info ? "RBH_Save_2" :
+ "RBH_Save_?"), get_itbl(bqe));
+ break;
+ default:
+ barf("Unexpected closure type %s in blocking queue of %p (%s)",
+ info_type((StgClosure *)bqe), node, info_type(node));
+ break;
+ }
+ } /* for */
+ fputc('\n', stderr);
+}
+#else
+/*
+ Nice and easy: only TSOs on the blocking queue
+*/
+void
+print_bq (StgClosure *node)
+{
+ StgTSO *tso;
+
+ ASSERT(node!=(StgClosure*)NULL); // sanity check
+ for (tso = ((StgBlockingQueue*)node)->blocking_queue;
+ tso != END_TSO_QUEUE;
+ tso=tso->link) {
+ ASSERT(tso!=NULL && tso!=END_TSO_QUEUE); // sanity check
+ ASSERT(get_itbl(tso)->type == TSO); // guess what, sanity check
+ fprintf(stderr," TSO %d (%p),", tso->id, tso);
+ }
+ fputc('\n', stderr);
+}
+# endif
+
+#if defined(PAR)
+static nat
+run_queue_len(void)
+{
+ nat i;
+ StgTSO *tso;
+
+ for (i=0, tso=run_queue_hd;
+ tso != END_TSO_QUEUE;
+ i++, tso=tso->link)
+ /* nothing */
+
+ return i;
+}
+#endif
+
+static void
+sched_belch(char *s, ...)
+{
+ va_list ap;
+ va_start(ap,s);
+#ifdef SMP
+ fprintf(stderr, "scheduler (task %ld): ", pthread_self());
+#elif defined(PAR)
+ fprintf(stderr, "== ");
+#else
+ fprintf(stderr, "scheduler: ");
+#endif
+ vfprintf(stderr, s, ap);
+ fprintf(stderr, "\n");
+}
+
+#endif /* DEBUG */
+
+
+//@node Index, , Debugging Routines, Main scheduling code
+//@subsection Index
+
+//@index
+//* MainRegTable:: @cindex\s-+MainRegTable
+//* StgMainThread:: @cindex\s-+StgMainThread
+//* awaken_blocked_queue:: @cindex\s-+awaken_blocked_queue
+//* blocked_queue_hd:: @cindex\s-+blocked_queue_hd
+//* blocked_queue_tl:: @cindex\s-+blocked_queue_tl
+//* context_switch:: @cindex\s-+context_switch
+//* createThread:: @cindex\s-+createThread
+//* free_capabilities:: @cindex\s-+free_capabilities
+//* gc_pending_cond:: @cindex\s-+gc_pending_cond
+//* initScheduler:: @cindex\s-+initScheduler
+//* interrupted:: @cindex\s-+interrupted
+//* n_free_capabilities:: @cindex\s-+n_free_capabilities
+//* next_thread_id:: @cindex\s-+next_thread_id
+//* print_bq:: @cindex\s-+print_bq
+//* run_queue_hd:: @cindex\s-+run_queue_hd
+//* run_queue_tl:: @cindex\s-+run_queue_tl
+//* sched_mutex:: @cindex\s-+sched_mutex
+//* schedule:: @cindex\s-+schedule
+//* take_off_run_queue:: @cindex\s-+take_off_run_queue
+//* task_ids:: @cindex\s-+task_ids
+//* term_mutex:: @cindex\s-+term_mutex
+//* thread_ready_cond:: @cindex\s-+thread_ready_cond
+//@end index