/* -----------------------------------------------------------------------------
- * $Id: HeapStackCheck.hc,v 1.11 2000/01/13 14:34:03 hwloidl Exp $
+ * $Id: HeapStackCheck.hc,v 1.26 2002/03/02 17:43:44 sof Exp $
*
* (c) The GHC Team, 1998-1999
*
*
* ---------------------------------------------------------------------------*/
+#include "Stg.h"
#include "Rts.h"
#include "Storage.h" /* for CurrentTSO */
#include "StgRun.h" /* for StgReturn and register saving */
#include "Schedule.h" /* for context_switch */
-#include "HeapStackCheck.h"
/* Stack/Heap Check Failure
* ------------------------
*
* - If the context_switch flag is set, indicating that there are more
* threads waiting to run, we yield to the scheduler
- * (return ThreadYeilding).
+ * (return ThreadYielding).
*
* - If Hp > HpLim, we've had a heap check failure. This means we've
* come to the end of the current heap block, so we try to chain
* ThreadRunGHC thread.
*/
-
#define GC_GENERIC \
if (Hp > HpLim) { \
- if (ExtendNursery(Hp,HpLim)) { \
+ Hp -= HpAlloc; \
+ if (HpAlloc <= BLOCK_SIZE_W && ExtendNursery(Hp,HpLim)) {\
if (context_switch) { \
R1.i = ThreadYielding; \
} else { \
R1.i = StackOverflow; \
} \
SaveThreadState(); \
- CurrentTSO->whatNext = ThreadRunGHC; \
+ CurrentTSO->what_next = ThreadRunGHC; \
JMP_(StgReturn);
#define GC_ENTER \
if (Hp > HpLim) { \
- if (ExtendNursery(Hp,HpLim)) { \
+ Hp -= HpAlloc; \
+ if (HpAlloc <= BLOCK_SIZE_W && ExtendNursery(Hp,HpLim)) {\
if (context_switch) { \
R1.i = ThreadYielding; \
} else { \
R1.i = StackOverflow; \
} \
SaveThreadState(); \
- CurrentTSO->whatNext = ThreadEnterGHC; \
+ CurrentTSO->what_next = ThreadEnterGHC; \
JMP_(StgReturn);
#define HP_GENERIC \
SaveThreadState(); \
- CurrentTSO->whatNext = ThreadRunGHC; \
+ CurrentTSO->what_next = ThreadRunGHC; \
R1.i = HeapOverflow; \
JMP_(StgReturn);
#define STK_GENERIC \
SaveThreadState(); \
- CurrentTSO->whatNext = ThreadRunGHC; \
+ CurrentTSO->what_next = ThreadRunGHC; \
R1.i = StackOverflow; \
JMP_(StgReturn);
#define YIELD_GENERIC \
SaveThreadState(); \
- CurrentTSO->whatNext = ThreadRunGHC; \
+ CurrentTSO->what_next = ThreadRunGHC; \
R1.i = ThreadYielding; \
JMP_(StgReturn);
-#define YIELD_TO_HUGS \
+#define YIELD_TO_INTERPRETER \
SaveThreadState(); \
- CurrentTSO->whatNext = ThreadEnterHugs; \
+ CurrentTSO->what_next = ThreadEnterInterp; \
R1.i = ThreadYielding; \
JMP_(StgReturn);
#define BLOCK_GENERIC \
SaveThreadState(); \
- CurrentTSO->whatNext = ThreadRunGHC; \
+ CurrentTSO->what_next = ThreadRunGHC; \
R1.i = ThreadBlocked; \
JMP_(StgReturn);
#define BLOCK_ENTER \
SaveThreadState(); \
- CurrentTSO->whatNext = ThreadEnterGHC;\
+ CurrentTSO->what_next = ThreadEnterGHC;\
R1.i = ThreadBlocked; \
JMP_(StgReturn);
There are canned sequences for 'n' pointer values in registers.
-------------------------------------------------------------------------- */
-EXTFUN(stg_gc_enter_1)
+EXTFUN(__stg_gc_enter_1)
{
FB_
Sp -= 1;
Sp[0] = R1.w;
R1.i = HeapOverflow;
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
JMP_(StgReturn);
FE_
}
{
FB_
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadYielding;
JMP_(StgReturn);
FE_
Sp -= 1;
Sp[0] = R1.w;
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadYielding;
JMP_(StgReturn);
FE_
Sp[1] = R2.w;
Sp[0] = R1.w;
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadYielding;
JMP_(StgReturn);
FE_
Sp[1] = R2.w;
Sp[0] = R1.w;
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadYielding;
JMP_(StgReturn);
FE_
Sp[1] = R2.w;
Sp[0] = R1.w;
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadYielding;
JMP_(StgReturn);
FE_
Sp[1] = R2.w;
Sp[0] = R1.w;
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadYielding;
JMP_(StgReturn);
FE_
Sp[1] = R2.w;
Sp[0] = R1.w;
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadYielding;
JMP_(StgReturn);
FE_
Sp[1] = R2.w;
Sp[0] = R1.w;
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadYielding;
JMP_(StgReturn);
FE_
Sp[1] = R2.w;
Sp[0] = R1.w;
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadYielding;
JMP_(StgReturn);
FE_
Sp -= 1;
Sp[0] = R1.w;
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadBlocked;
JMP_(StgReturn);
FE_
Sp[1] = R2.w;
Sp[0] = R1.w;
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadBlocked;
JMP_(StgReturn);
FE_
Sp[1] = R2.w;
Sp[0] = R1.w;
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadBlocked;
JMP_(StgReturn);
FE_
Sp[1] = R2.w;
Sp[0] = R1.w;
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadBlocked;
JMP_(StgReturn);
FE_
Sp[1] = R2.w;
Sp[0] = R1.w;
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadBlocked;
JMP_(StgReturn);
FE_
Sp[1] = R2.w;
Sp[0] = R1.w;
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadBlocked;
JMP_(StgReturn);
FE_
Sp[1] = R2.w;
Sp[0] = R1.w;
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadBlocked;
JMP_(StgReturn);
FE_
Sp[1] = R2.w;
Sp[0] = R1.w;
SaveThreadState();
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadBlocked;
JMP_(StgReturn);
FE_
EXTFUN(par_jump)
{
FB_
- CurrentTSO->whatNext = ThreadEnterGHC;
+ CurrentTSO->what_next = ThreadEnterGHC;
R1.i = ThreadBlocked;
JMP_(StgReturn);
FE_
cases are covered below.
-------------------------------------------------------------------------- */
-/*-- No regsiters live (probably a void return) ----------------------------- */
+/*-- No registers live (probably a void return) ----------------------------- */
/* If we change the policy for thread startup to *not* remove the
* return address from the stack, we can get rid of this little
* function/info table...
*/
-INFO_TABLE_SRT_BITMAP(stg_gc_noregs_ret_info, stg_gc_noregs_ret, 0/*BITMAP*/,
+INFO_TABLE_SRT_BITMAP(stg_gc_noregs_info, stg_gc_noregs_ret, 0/*BITMAP*/,
0/*SRT*/, 0/*SRT_OFF*/, 0/*SRT_LEN*/,
RET_SMALL,, EF_, 0, 0);
{
FB_
Sp -= 1;
- Sp[0] = (W_)&stg_gc_noregs_ret_info;
+ Sp[0] = (W_)&stg_gc_noregs_info;
GC_GENERIC
FE_
}
/*-- R1 is boxed/unpointed -------------------------------------------------- */
-INFO_TABLE_SRT_BITMAP(stg_gc_unpt_r1_info, stg_gc_unpt_r1_entry, 0/*BITMAP*/,
+INFO_TABLE_SRT_BITMAP(stg_gc_unpt_r1_info, stg_gc_unpt_r1_ret, 0/*BITMAP*/,
0/*SRT*/, 0/*SRT_OFF*/, 0/*SRT_LEN*/,
RET_SMALL,, EF_, 0, 0);
-EXTFUN(stg_gc_unpt_r1_entry)
+EXTFUN(stg_gc_unpt_r1_ret)
{
FB_
R1.w = Sp[0];
FE_
}
+/*-- Unboxed tuple return (unregisterised build only)------------------ */
+
+INFO_TABLE_SRT_BITMAP(stg_ut_1_0_unreg_info, stg_ut_1_0_unreg_ret, 0/*BITMAP*/,
+ 0/*SRT*/, 0/*SRT_OFF*/, 0/*SRT_LEN*/,
+ RET_SMALL,, EF_, 0, 0);
+
+EXTFUN(stg_ut_1_0_unreg_ret)
+{
+ FB_
+ /* R1 is on the stack (*Sp) */
+ JMP_(ENTRY_CODE(Sp[1]));
+ FE_
+}
+
/*-- R1 is unboxed -------------------------------------------------- */
-INFO_TABLE_SRT_BITMAP(stg_gc_unbx_r1_info, stg_gc_unbx_r1_entry, 1/*BITMAP*/,
+INFO_TABLE_SRT_BITMAP(stg_gc_unbx_r1_info, stg_gc_unbx_r1_ret, 1/*BITMAP*/,
0/*SRT*/, 0/*SRT_OFF*/, 0/*SRT_LEN*/,
RET_SMALL,, EF_, 0, 0);
/* the 1 is a bitmap - i.e. 1 non-pointer word on the stack. */
-EXTFUN(stg_gc_unbx_r1_entry)
+EXTFUN(stg_gc_unbx_r1_ret)
{
FB_
R1.w = Sp[0];
/*-- F1 contains a float ------------------------------------------------- */
-INFO_TABLE_SRT_BITMAP(stg_gc_f1_info, stg_gc_f1_entry, 1/*BITMAP*/,
+INFO_TABLE_SRT_BITMAP(stg_gc_f1_info, stg_gc_f1_ret, 1/*BITMAP*/,
0/*SRT*/, 0/*SRT_OFF*/, 0/*SRT_LEN*/,
RET_SMALL,, EF_, 0, 0);
-EXTFUN(stg_gc_f1_entry)
+EXTFUN(stg_gc_f1_ret)
{
FB_
F1 = PK_FLT(Sp);
# define DBL_BITMAP 3
#endif
-INFO_TABLE_SRT_BITMAP(stg_gc_d1_info, stg_gc_d1_entry, DBL_BITMAP,
+INFO_TABLE_SRT_BITMAP(stg_gc_d1_info, stg_gc_d1_ret, DBL_BITMAP,
0/*SRT*/, 0/*SRT_OFF*/, 0/*SRT_LEN*/,
RET_SMALL,, EF_, 0, 0);
-EXTFUN(stg_gc_d1_entry)
+EXTFUN(stg_gc_d1_ret)
{
FB_
D1 = PK_DBL(Sp);
FE_
}
+
+/*-- L1 contains an int64 ------------------------------------------------- */
+
+/* we support int64s of either 1 or 2 words in size */
+
+#if SIZEOF_VOID_P == 8
+# define LLI_BITMAP 1
+#else
+# define LLI_BITMAP 3
+#endif
+
+INFO_TABLE_SRT_BITMAP(stg_gc_l1_info, stg_gc_l1_ret, LLI_BITMAP,
+ 0/*SRT*/, 0/*SRT_OFF*/, 0/*SRT_LEN*/,
+ RET_SMALL,, EF_, 0, 0);
+
+EXTFUN(stg_gc_l1_ret)
+{
+ FB_
+ L1 = PK_Int64(Sp);
+ Sp += sizeofW(StgWord64);
+ JMP_(ENTRY_CODE(Sp[0]));
+ FE_
+}
+
+EXTFUN(stg_gc_l1)
+{
+ FB_
+ Sp -= 1 + sizeofW(StgWord64);
+ ASSIGN_Int64(Sp+1,L1);
+ Sp[0] = (W_)&stg_gc_l1_info;
+ GC_GENERIC
+ FE_
+}
+
/* -----------------------------------------------------------------------------
Heap checks for unboxed tuple case alternatives
/*---- R1 contains a pointer: ------ */
-INFO_TABLE_SRT_BITMAP(stg_gc_ut_1_0_info, stg_gc_ut_1_0_entry, 1/*BITMAP*/,
+INFO_TABLE_SRT_BITMAP(stg_gc_ut_1_0_info, stg_gc_ut_1_0_ret, 1/*BITMAP*/,
0/*SRT*/, 0/*SRT_OFF*/, 0/*SRT_LEN*/,
RET_SMALL,, EF_, 0, 0);
-EXTFUN(stg_gc_ut_1_0_entry)
+EXTFUN(stg_gc_ut_1_0_ret)
{
FB_
R1.w = Sp[1];
/*---- R1 contains a non-pointer: ------ */
-INFO_TABLE_SRT_BITMAP(stg_gc_ut_0_1_info, stg_gc_ut_0_1_entry, 3/*BITMAP*/,
+INFO_TABLE_SRT_BITMAP(stg_gc_ut_0_1_info, stg_gc_ut_0_1_ret, 3/*BITMAP*/,
0/*SRT*/, 0/*SRT_OFF*/, 0/*SRT_LEN*/,
RET_SMALL,, EF_, 0, 0);
-EXTFUN(stg_gc_ut_0_1_entry)
+EXTFUN(stg_gc_ut_0_1_ret)
{
FB_
R1.w = Sp[1];
/*- 0 Regs -------------------------------------------------------------------*/
-EXTFUN(stg_chk_0)
+EXTFUN(__stg_chk_0)
{
FB_
Sp -= 1;
/*- 1 Reg --------------------------------------------------------------------*/
-EXTFUN(stg_chk_1)
+EXTFUN(__stg_chk_1)
{
FB_
Sp -= 2;
{
FB_
Sp--;
- Sp[0] = (W_)&stg_gc_noregs_ret_info;
+ Sp[0] = (W_)&stg_gc_noregs_info;
YIELD_GENERIC;
FE_
}
-FN_(stg_yield_to_Hugs)
+FN_(stg_yield_to_interpreter)
{
FB_
/* No need to save everything - no live registers */
- YIELD_TO_HUGS
+ YIELD_TO_INTERPRETER
FE_
}
{
FB_
Sp--;
- Sp[0] = (W_)&stg_gc_noregs_ret_info;
+ Sp[0] = (W_)&stg_gc_noregs_info;
BLOCK_GENERIC;
FE_
}
BLOCK_ENTER;
FE_
}
+
+/* -----------------------------------------------------------------------------
+ * takeMVar/putMVar-specific blocks
+ *
+ * Stack layout for a thread blocked in takeMVar:
+ *
+ * ret. addr
+ * ptr to MVar (R1)
+ * stg_block_takemvar_info
+ *
+ * Stack layout for a thread blocked in putMVar:
+ *
+ * ret. addr
+ * ptr to Value (R2)
+ * ptr to MVar (R1)
+ * stg_block_putmvar_info
+ *
+ * See PrimOps.hc for a description of the workings of take/putMVar.
+ *
+ * -------------------------------------------------------------------------- */
+
+INFO_TABLE_SRT_BITMAP(stg_block_takemvar_info, stg_block_takemvar_ret,
+ 0/*BITMAP*/, 0/*SRT*/, 0/*SRT_OFF*/, 0/*SRT_LEN*/,
+ RET_SMALL,, IF_, 0, 0);
+
+IF_(stg_block_takemvar_ret)
+{
+ FB_
+ R1.w = Sp[0];
+ Sp++;
+ JMP_(takeMVarzh_fast);
+ FE_
+}
+
+FN_(stg_block_takemvar)
+{
+ FB_
+ Sp -= 2;
+ Sp[1] = R1.w;
+ Sp[0] = (W_)&stg_block_takemvar_info;
+ BLOCK_GENERIC;
+ FE_
+}
+
+INFO_TABLE_SRT_BITMAP(stg_block_putmvar_info, stg_block_putmvar_ret,
+ 0/*BITMAP*/, 0/*SRT*/, 0/*SRT_OFF*/, 0/*SRT_LEN*/,
+ RET_SMALL,, IF_, 0, 0);
+
+IF_(stg_block_putmvar_ret)
+{
+ FB_
+ R2.w = Sp[1];
+ R1.w = Sp[0];
+ Sp += 2;
+ JMP_(putMVarzh_fast);
+ FE_
+}
+
+FN_(stg_block_putmvar)
+{
+ FB_
+ Sp -= 3;
+ Sp[2] = R2.w;
+ Sp[1] = R1.w;
+ Sp[0] = (W_)&stg_block_putmvar_info;
+ BLOCK_GENERIC;
+ FE_
+}