BLOCK_BUT_FIRST(stg_block_putmvar_finally);
}
+// code fragment executed just before we return to the scheduler
+stg_block_blackhole_finally
+{
+#if defined(SMP)
+ // The last thing we do is release sched_lock, which is
+ // preventing other threads from accessing blackhole_queue and
+ // picking up this thread before we are finished with it.
+ foreign "C" RELEASE_LOCK(sched_mutex "ptr");
+#endif
+ jump StgReturn;
+}
+
+stg_block_blackhole
+{
+ Sp_adj(-2);
+ Sp(1) = R1;
+ Sp(0) = stg_enter_info;
+ BLOCK_BUT_FIRST(stg_block_blackhole_finally);
+}
+
#ifdef mingw32_HOST_OS
INFO_TABLE_RET( stg_block_async, 0/*framesize*/, 0/*bitmap*/, RET_SMALL )
{
#if defined(SMP)
foreign "C" ACQUIRE_LOCK(sched_mutex "ptr");
+ // released in stg_block_blackhole_finally
#endif
/* Put ourselves on the blackhole queue */
StgTSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
StgTSO_block_info(CurrentTSO) = R1;
-#if defined(SMP)
- foreign "C" RELEASE_LOCK(sched_mutex "ptr");
-#endif
-
- /* stg_gen_block is too heavyweight, use a specialised one */
- jump stg_block_1;
+ jump stg_block_blackhole;
}
#if defined(PAR) || defined(GRAN)
#if defined(SMP)
foreign "C" ACQUIRE_LOCK(sched_mutex "ptr");
+ // released in stg_block_blackhole_finally
#endif
/* Put ourselves on the blackhole queue */
StgTSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
StgTSO_block_info(CurrentTSO) = R1;
-#if defined(SMP)
- foreign "C" RELEASE_LOCK(sched_mutex "ptr");
-#endif
-
- /* stg_gen_block is too heavyweight, use a specialised one */
- jump stg_block_1;
+ jump stg_block_blackhole;
}
#ifdef EAGER_BLACKHOLING