#include "Cmm.h"
+#ifdef __PIC__
+import pthread_mutex_unlock;
+#endif
+import EnterCriticalSection;
+import LeaveCriticalSection;
+
/* Stack/Heap Check Failure
* ------------------------
*
* On discovering that a stack or heap check has failed, we do the following:
*
- * - If the context_switch flag is set, indicating that there are more
- * threads waiting to run, we yield to the scheduler
+ * - If HpLim==0, indicating that we should context-switch, we yield
+ * to the scheduler (return ThreadYielding).
+ *
+ * Note that we must leave no slop in the heap (this is a requirement
+ * for LDV profiling, at least), so if we just had a heap-check
+ * failure, then we must retract Hp by HpAlloc. How do we know
+ * whether there was a heap-check failure? HpLim might be zero, and
+ * yet we got here as a result of a stack-check failure. Hence, we
+ * require that HpAlloc is only non-zero if there was a heap-check
+ * failure, otherwise it is zero, so we can always safely subtract
+ * HpAlloc from Hp.
+ *
+ * Hence, HpAlloc is zeroed in LOAD_THREAD_STATE().
+ *
+ * - If the context_switch flag is set (the backup plan if setting HpLim
+ * to 0 didn't trigger a context switch), we yield to the scheduler
* (return ThreadYielding).
*
* - If Hp > HpLim, we've had a heap check failure. This means we've
DEBUG_ONLY(foreign "C" heapCheckFail()); \
if (Hp > HpLim) { \
Hp = Hp - HpAlloc/*in bytes*/; \
+ if (HpLim == 0) { \
+ R1 = ThreadYielding; \
+ goto sched; \
+ } \
if (HpAlloc <= BLOCK_SIZE \
&& bdescr_link(CurrentNursery) != NULL) { \
+ HpAlloc = 0; \
CLOSE_NURSERY(); \
CurrentNursery = bdescr_link(CurrentNursery); \
OPEN_NURSERY(); \
- if (CInt[context_switch] != 0 :: CInt) { \
+ if (Capability_context_switch(MyCapability()) != 0 :: CInt) { \
R1 = ThreadYielding; \
goto sched; \
} else { \
There are canned sequences for 'n' pointer values in registers.
-------------------------------------------------------------------------- */
-INFO_TABLE_RET( stg_enter, RET_SMALL, "ptr" W_ unused)
+INFO_TABLE_RET( stg_enter, RET_SMALL, P_ unused)
{
R1 = Sp(1);
Sp_adj(2);
/*-- R1 is boxed/unpointed -------------------------------------------------- */
-INFO_TABLE_RET( stg_gc_unpt_r1, RET_SMALL, "ptr" W_ unused)
+INFO_TABLE_RET( stg_gc_unpt_r1, RET_SMALL, P_ unused)
{
R1 = Sp(1);
Sp_adj(2);
/*-- Unboxed tuple return, one pointer (unregisterised build only) ---------- */
-INFO_TABLE_RET( stg_ut_1_0_unreg, RET_SMALL, "ptr" W_ unused )
+INFO_TABLE_RET( stg_ut_1_0_unreg, RET_SMALL, P_ unused )
{
Sp_adj(1);
// one ptr is on the stack (Sp(0))
*
* -------------------------------------------------------------------------- */
-INFO_TABLE_RET( stg_block_takemvar, RET_SMALL, "ptr" W_ unused )
+INFO_TABLE_RET( stg_block_takemvar, RET_SMALL, P_ unused )
{
R1 = Sp(1);
Sp_adj(2);
stg_block_takemvar_finally
{
#ifdef THREADED_RTS
- unlockClosure(R3, stg_EMPTY_MVAR_info);
+ unlockClosure(R3, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(R3, stg_MVAR_DIRTY_info);
#endif
jump StgReturn;
}
BLOCK_BUT_FIRST(stg_block_takemvar_finally);
}
-INFO_TABLE_RET( stg_block_putmvar, RET_SMALL, "ptr" W_ unused1, "ptr" W_ unused2 )
+INFO_TABLE_RET( stg_block_putmvar, RET_SMALL, P_ unused1, P_ unused2 )
{
R2 = Sp(2);
R1 = Sp(1);
stg_block_putmvar_finally
{
#ifdef THREADED_RTS
- unlockClosure(R3, stg_FULL_MVAR_info);
+ unlockClosure(R3, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(R3, stg_MVAR_DIRTY_info);
#endif
jump StgReturn;
}
// The last thing we do is release sched_lock, which is
// preventing other threads from accessing blackhole_queue and
// picking up this thread before we are finished with it.
- foreign "C" RELEASE_LOCK(sched_mutex "ptr");
+ RELEASE_LOCK(sched_mutex "ptr");
#endif
jump StgReturn;
}
BLOCK_BUT_FIRST(stg_block_blackhole_finally);
}
-INFO_TABLE_RET( stg_block_throwto, RET_SMALL, "ptr" W_ unused, "ptr" W_ unused )
+INFO_TABLE_RET( stg_block_throwto, RET_SMALL, P_ unused, P_ unused )
{
R2 = Sp(2);
R1 = Sp(1);