#define BLACKHOLE 42
#define SE_BLACKHOLE 43
#define SE_CAF_BLACKHOLE 44
-#define MVAR 45
-#define ARR_WORDS 46
-#define MUT_ARR_PTRS_CLEAN 47
-#define MUT_ARR_PTRS_DIRTY 48
-#define MUT_ARR_PTRS_FROZEN0 49
-#define MUT_ARR_PTRS_FROZEN 50
-#define MUT_VAR_CLEAN 51
-#define MUT_VAR_DIRTY 52
-#define WEAK 53
-#define STABLE_NAME 54
-#define TSO 55
-#define BLOCKED_FETCH 56
-#define FETCH_ME 57
-#define FETCH_ME_BQ 58
-#define RBH 59
-#define EVACUATED 60
-#define REMOTE_REF 61
-#define TVAR_WATCH_QUEUE 62
-#define INVARIANT_CHECK_QUEUE 63
-#define ATOMIC_INVARIANT 64
-#define TVAR 65
-#define TREC_CHUNK 66
-#define TREC_HEADER 67
-#define ATOMICALLY_FRAME 68
-#define CATCH_RETRY_FRAME 69
-#define CATCH_STM_FRAME 70
-#define N_CLOSURE_TYPES 71
+#define MVAR_CLEAN 45
+#define MVAR_DIRTY 46
+#define ARR_WORDS 47
+#define MUT_ARR_PTRS_CLEAN 48
+#define MUT_ARR_PTRS_DIRTY 49
+#define MUT_ARR_PTRS_FROZEN0 50
+#define MUT_ARR_PTRS_FROZEN 51
+#define MUT_VAR_CLEAN 52
+#define MUT_VAR_DIRTY 53
+#define WEAK 54
+#define STABLE_NAME 55
+#define TSO 56
+#define BLOCKED_FETCH 57
+#define FETCH_ME 58
+#define FETCH_ME_BQ 59
+#define RBH 60
+#define EVACUATED 61
+#define REMOTE_REF 62
+#define TVAR_WATCH_QUEUE 63
+#define INVARIANT_CHECK_QUEUE 64
+#define ATOMIC_INVARIANT 65
+#define TVAR 66
+#define TREC_CHUNK 67
+#define TREC_HEADER 68
+#define ATOMICALLY_FRAME 69
+#define CATCH_RETRY_FRAME 70
+#define CATCH_STM_FRAME 71
+#define N_CLOSURE_TYPES 72
#endif /* CLOSURETYPES_H */
extern HsInt64 getAllocations( void );
extern void revertCAFs( void );
extern void dirty_MUT_VAR(StgRegTable *reg, StgClosure *p);
+extern void dirty_MVAR(StgRegTable *reg, StgClosure *p);
+
+extern void dirty_TSO(StgClosure *tso);
#endif /* RTSEXTERNAL_H */
RTS_INFO(stg_WEAK_info);
RTS_INFO(stg_DEAD_WEAK_info);
RTS_INFO(stg_STABLE_NAME_info);
-RTS_INFO(stg_FULL_MVAR_info);
-RTS_INFO(stg_EMPTY_MVAR_info);
+RTS_INFO(stg_MVAR_CLEAN_info);
+RTS_INFO(stg_MVAR_DIRTY_info);
RTS_INFO(stg_TSO_info);
RTS_INFO(stg_ARR_WORDS_info);
RTS_INFO(stg_MUT_ARR_WORDS_info);
/* BLACKHOLE = */ ( _NS| _UPT ),
/* SE_BLACKHOLE = */ ( _NS| _UPT ),
/* SE_CAF_BLACKHOLE = */ ( _NS| _UPT ),
-/* MVAR = */ (_HNF| _NS| _MUT|_UPT ),
+/* MVAR_CLEAN = */ (_HNF| _NS| _MUT|_UPT ),
+/* MVAR_DIRTY = */ (_HNF| _NS| _MUT|_UPT ),
/* ARR_WORDS = */ (_HNF| _NS| _UPT ),
/* MUT_ARR_PTRS_CLEAN = */ (_HNF| _NS| _MUT|_UPT ),
/* MUT_ARR_PTRS_DIRTY = */ (_HNF| _NS| _MUT|_UPT ),
/* CATCH_STM_FRAME = */ ( _BTM )
};
-#if N_CLOSURE_TYPES != 71
+#if N_CLOSURE_TYPES != 72
#error Closure types changed: update ClosureFlags.c!
#endif
stg_block_takemvar_finally
{
#ifdef THREADED_RTS
- unlockClosure(R3, stg_EMPTY_MVAR_info);
+ unlockClosure(R3, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(R3, stg_MVAR_DIRTY_info);
#endif
jump StgReturn;
}
stg_block_putmvar_finally
{
#ifdef THREADED_RTS
- unlockClosure(R3, stg_FULL_MVAR_info);
+ unlockClosure(R3, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(R3, stg_MVAR_DIRTY_info);
#endif
jump StgReturn;
}
'inherently used' cases: do nothing.
*/
case TSO:
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
case MUT_ARR_PTRS_CLEAN:
case MUT_ARR_PTRS_DIRTY:
case MUT_ARR_PTRS_FROZEN:
SymX(stg_CAF_BLACKHOLE_info) \
SymX(awakenBlockedQueue) \
SymX(stg_CHARLIKE_closure) \
- SymX(stg_EMPTY_MVAR_info) \
+ SymX(stg_MVAR_CLEAN_info) \
+ SymX(stg_MVAR_DIRTY_info) \
SymX(stg_IND_STATIC_info) \
SymX(stg_INTLIKE_closure) \
SymX(stg_MUT_ARR_PTRS_DIRTY_info) \
{
/* args: R1 = MVar closure */
- if (GET_INFO(R1) == stg_EMPTY_MVAR_info) {
+ if (StgMVar_value(R1) == stg_END_TSO_QUEUE_closure) {
RET_N(1);
} else {
RET_N(0);
ALLOC_PRIM ( SIZEOF_StgMVar, NO_PTRS, newMVarzh_fast );
mvar = Hp - SIZEOF_StgMVar + WDS(1);
- SET_HDR(mvar,stg_EMPTY_MVAR_info,W_[CCCS]);
+ SET_HDR(mvar,stg_MVAR_DIRTY_info,W_[CCCS]);
+ // MVARs start dirty: generation 0 has no mutable list
StgMVar_head(mvar) = stg_END_TSO_QUEUE_closure;
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
#else
info = GET_INFO(mvar);
#endif
+
+ if (info == stg_MVAR_CLEAN_info) {
+ foreign "C" dirty_MVAR(BaseReg "ptr", mvar);
+ }
/* If the MVar is empty, put ourselves on its blocking queue,
* and wait until we're woken up.
*/
- if (info == stg_EMPTY_MVAR_info) {
+ if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_head(mvar) = CurrentTSO;
} else {
}
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_FULL_MVAR_info);
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
RET_P(val);
}
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_EMPTY_MVAR_info);
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
- SET_INFO(mvar,stg_EMPTY_MVAR_info);
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
RET_P(val);
info = GET_INFO(mvar);
#endif
- if (info == stg_EMPTY_MVAR_info) {
+ if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_EMPTY_MVAR_info);
+ unlockClosure(mvar, info);
#endif
/* HACK: we need a pointer to pass back,
* so we abuse NO_FINALIZER_closure
RET_NP(0, stg_NO_FINALIZER_closure);
}
+ if (info == stg_MVAR_CLEAN_info) {
+ foreign "C" dirty_MVAR(BaseReg "ptr", mvar);
+ }
+
/* we got the value... */
val = StgMVar_value(mvar);
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_FULL_MVAR_info);
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
}
else
/* No further putMVars, MVar is now empty */
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_EMPTY_MVAR_info);
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
- SET_INFO(mvar,stg_EMPTY_MVAR_info);
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
}
info = GET_INFO(mvar);
#endif
- if (info == stg_FULL_MVAR_info) {
+ if (info == stg_MVAR_CLEAN_info) {
+ foreign "C" dirty_MVAR(BaseReg "ptr", mvar);
+ }
+
+ if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_head(mvar) = CurrentTSO;
} else {
}
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_EMPTY_MVAR_info);
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
jump %ENTRY_CODE(Sp(0));
}
StgMVar_value(mvar) = R2;
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_FULL_MVAR_info);
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
- SET_INFO(mvar,stg_FULL_MVAR_info);
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
jump %ENTRY_CODE(Sp(0));
}
info = GET_INFO(mvar);
#endif
- if (info == stg_FULL_MVAR_info) {
+ if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_FULL_MVAR_info);
+ unlockClosure(mvar, info);
#endif
RET_N(0);
}
+ if (info == stg_MVAR_CLEAN_info) {
+ foreign "C" dirty_MVAR(BaseReg "ptr", mvar);
+ }
+
if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
/* There are takeMVar(s) waiting: wake up the first one
}
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_EMPTY_MVAR_info);
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
}
else
StgMVar_value(mvar) = R2;
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_FULL_MVAR_info);
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
- SET_INFO(mvar,stg_FULL_MVAR_info);
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
}
debugBelch("MUT_ARR_PTRS_FROZEN(size=%lu)\n", (lnat)((StgMutArrPtrs *)obj)->ptrs);
break;
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
{
StgMVar* mv = (StgMVar*)obj;
debugBelch("MVAR(head=%p, tail=%p, value=%p)\n", mv->head, mv->tail, mv->value);
"BLACKHOLE",
"SE_BLACKHOLE",
"SE_CAF_BLACKHOLE",
- "MVAR",
+ "MVAR_CLEAN",
+ "MVAR_DIRTY",
"ARR_WORDS",
"MUT_ARR_PTRS_CLEAN",
"MUT_ARR_PTRS_DIRTY",
size = bco_sizeW((StgBCO *)p);
break;
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
case WEAK:
case STABLE_NAME:
case MUT_VAR_CLEAN:
// ASSUMPTION: tso->block_info must always point to a
// closure. In the threaded RTS it does.
- if (get_itbl(mvar)->type != MVAR) goto retry;
+ switch (get_itbl(mvar)->type) {
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
+ break;
+ default:
+ goto retry;
+ }
info = lockClosure((StgClosure *)mvar);
// three children (fixed), no SRT
// need to push a stackElement
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
// head must be TSO and the head of a linked list of TSOs.
// Shoule it be a child? Seems to be yes.
*first_child = (StgClosure *)((StgMVar *)c)->head;
// three children (fixed), no SRT
// need to push a stackElement
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
if (se->info.next.step == 2) {
*c = (StgClosure *)((StgMVar *)se->c)->tail;
se->info.next.step++; // move to the next step
case TSO:
// mutable objects
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
case MUT_ARR_PTRS_CLEAN:
info = get_itbl(p);
switch (info->type) {
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
{
StgMVar *mvar = (StgMVar *)p;
ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
and entry code for each type.
------------------------------------------------------------------------- */
-INFO_TABLE(stg_FULL_MVAR,3,0,MVAR,"MVAR","MVAR")
-{ foreign "C" barf("FULL_MVAR object entered!") never returns; }
+INFO_TABLE(stg_MVAR_CLEAN,3,0,MVAR_CLEAN,"MVAR","MVAR")
+{ foreign "C" barf("MVAR object entered!") never returns; }
-INFO_TABLE(stg_EMPTY_MVAR,3,0,MVAR,"MVAR","MVAR")
-{ foreign "C" barf("EMPTY_MVAR object entered!") never returns; }
+INFO_TABLE(stg_MVAR_DIRTY,3,0,MVAR_DIRTY,"MVAR","MVAR")
+{ foreign "C" barf("MVAR object entered!") never returns; }
/* -----------------------------------------------------------------------------
STM
return p + sizeofW(StgWeak);
}
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
{
StgMVar *mvar = (StgMVar *)p;
thread_(&mvar->head);
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
return copy(q,sizeW_fromITBL(info),stp);
case CONSTR_0_1:
#ifdef DEBUG
nat mutlist_MUTVARS,
mutlist_MUTARRS,
+ mutlist_MVARS,
mutlist_OTHERS;
#endif
copied += mut_list_size;
debugTrace(DEBUG_gc,
- "mut_list_size: %lu (%d vars, %d arrays, %d others)",
+ "mut_list_size: %lu (%d vars, %d arrays, %d MVARs, %d others)",
(unsigned long)(mut_list_size * sizeof(W_)),
- mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS);
+ mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS);
}
for (s = 0; s < generations[g].n_steps; s++) {
extern lnat new_scavd_blocks; // ditto, but depth-first blocks
#ifdef DEBUG
-extern nat mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS;
+extern nat mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS;
#endif
StgClosure * isAlive(StgClosure *p);
q = p;
switch (info->type) {
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
{
+ rtsBool saved_eager_promotion = eager_promotion;
+
StgMVar *mvar = ((StgMVar *)p);
- evac_gen = 0;
+ eager_promotion = rtsFalse;
mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
mvar->value = evacuate((StgClosure *)mvar->value);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable.
+ eager_promotion = saved_eager_promotion;
+
+ if (failed_to_evac) {
+ mvar->header.info = &stg_MVAR_DIRTY_info;
+ } else {
+ mvar->header.info = &stg_MVAR_CLEAN_info;
+ }
p += sizeofW(StgMVar);
break;
}
q = p;
switch (info->type) {
- case MVAR:
- {
- StgMVar *mvar = ((StgMVar *)p);
- evac_gen = 0;
- mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
- mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
- mvar->value = evacuate((StgClosure *)mvar->value);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable.
- break;
- }
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
+ {
+ rtsBool saved_eager_promotion = eager_promotion;
+
+ StgMVar *mvar = ((StgMVar *)p);
+ eager_promotion = rtsFalse;
+ mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
+ mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
+ mvar->value = evacuate((StgClosure *)mvar->value);
+ eager_promotion = saved_eager_promotion;
+
+ if (failed_to_evac) {
+ mvar->header.info = &stg_MVAR_DIRTY_info;
+ } else {
+ mvar->header.info = &stg_MVAR_CLEAN_info;
+ }
+ break;
+ }
case FUN_2_0:
scavenge_fun_srt(info);
switch (info->type) {
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
{
+ rtsBool saved_eager_promotion = eager_promotion;
+
StgMVar *mvar = ((StgMVar *)p);
- evac_gen = 0;
+ eager_promotion = rtsFalse;
mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
mvar->value = evacuate((StgClosure *)mvar->value);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable.
+ eager_promotion = saved_eager_promotion;
+
+ if (failed_to_evac) {
+ mvar->header.info = &stg_MVAR_DIRTY_info;
+ } else {
+ mvar->header.info = &stg_MVAR_CLEAN_info;
+ }
break;
}
case MUT_ARR_PTRS_FROZEN:
case MUT_ARR_PTRS_FROZEN0:
mutlist_MUTARRS++; break;
+ case MVAR_CLEAN:
+ barf("MVAR_CLEAN on mutable list");
+ case MVAR_DIRTY:
+ mutlist_MVARS++; break;
default:
mutlist_OTHERS++; break;
}
}
/* -----------------------------------------------------------------------------
+ Write Barriers
+ -------------------------------------------------------------------------- */
+
+/*
This is the write barrier for MUT_VARs, a.k.a. IORefs. A
MUT_VAR_CLEAN object is not on the mutable list; a MUT_VAR_DIRTY
is. When written to, a MUT_VAR_CLEAN turns into a MUT_VAR_DIRTY
and is put on the mutable list.
- -------------------------------------------------------------------------- */
-
+*/
void
dirty_MUT_VAR(StgRegTable *reg, StgClosure *p)
{
}
}
+/*
+ This is the write barrier for MVARs. An MVAR_CLEAN objects is not
+ on the mutable list; a MVAR_DIRTY is. When written to, a
+ MVAR_CLEAN turns into a MVAR_DIRTY and is put on the mutable list.
+ The check for MVAR_CLEAN is inlined at the call site for speed,
+ this really does make a difference on concurrency-heavy benchmarks
+ such as Chaneneos and cheap-concurrency.
+*/
+void
+dirty_MVAR(StgRegTable *reg, StgClosure *p)
+{
+ Capability *cap = regTableToCapability(reg);
+ bdescr *bd;
+ bd = Bdescr((StgPtr)p);
+ if (bd->gen_no > 0) recordMutableCap(p,cap,bd->gen_no);
+}
+
/* -----------------------------------------------------------------------------
Allocation functions for GMP.