1 /* -----------------------------------------------------------------------------
2 * $Id: StgMiscClosures.hc,v 1.64 2001/02/11 17:51:08 simonmar Exp $
4 * (c) The GHC Team, 1998-2000
6 * Entry code for various built-in closure types.
8 * ---------------------------------------------------------------------------*/
13 #include "StgMiscClosures.h"
14 #include "HeapStackCheck.h" /* for stg_gen_yield */
16 #include "StoragePriv.h"
17 #include "Profiling.h"
21 #if defined(GRAN) || defined(PAR)
22 # include "GranSimRts.h" /* for DumpRawGranEvent */
23 # include "StgRun.h" /* for StgReturn and register saving */
30 /* ToDo: make the printing of panics more win32-friendly, i.e.,
31 * pop up some lovely message boxes (as well).
33 #define DUMP_ERRMSG(msg) STGCALL2(fprintf,stderr,msg)
36 Template for the entry code of non-enterable closures.
39 #define NON_ENTERABLE_ENTRY_CODE(type) \
40 STGFUN(stg_##type##_entry) \
43 DUMP_ERRMSG(#type " object entered!\n"); \
44 STGCALL1(shutdownHaskellAndExit, EXIT_FAILURE); \
50 /* -----------------------------------------------------------------------------
51 Support for the bytecode interpreter.
52 -------------------------------------------------------------------------- */
54 /* 9 bits of return code for constructors created by the interpreter. */
55 FN_(stg_interp_constr_entry)
57 /* R1 points at the constructor */
59 /* STGCALL2(fprintf,stderr,"stg_interp_constr_entry (direct return)!\n"); */
60 /* Pointless, since SET_TAG doesn't do anything */
61 SET_TAG( GET_TAG(GET_INFO(R1.cl)));
62 JMP_(ENTRY_CODE((P_)(*Sp)));
66 FN_(stg_interp_constr1_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),0)); FE_ }
67 FN_(stg_interp_constr2_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),1)); FE_ }
68 FN_(stg_interp_constr3_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),2)); FE_ }
69 FN_(stg_interp_constr4_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),3)); FE_ }
70 FN_(stg_interp_constr5_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),4)); FE_ }
71 FN_(stg_interp_constr6_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),5)); FE_ }
72 FN_(stg_interp_constr7_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),6)); FE_ }
73 FN_(stg_interp_constr8_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),7)); FE_ }
75 /* Some info tables to be used when compiled code returns a value to
76 the interpreter, i.e. the interpreter pushes one of these onto the
77 stack before entering a value. What the code does is to
78 impedance-match the compiled return convention (in R1p/R1n/F1/D1 etc) to
79 the interpreter's convention (returned value is on top of stack),
80 and then cause the scheduler to enter the interpreter.
82 On entry, the stack (growing down) looks like this:
84 ptr to BCO holding return continuation
85 ptr to one of these info tables.
87 The info table code, both direct and vectored, must:
88 * push R1/F1/D1 on the stack, and its tag if necessary
89 * push the BCO (so it's now on the stack twice)
90 * Yield, ie, go to the scheduler.
92 Scheduler examines the t.o.s, discovers it is a BCO, and proceeds
93 directly to the bytecode interpreter. That pops the top element
94 (the BCO, containing the return continuation), and interprets it.
95 Net result: return continuation gets interpreted, with the
99 ptr to the info table just jumped thru
102 which is just what we want -- the "standard" return layout for the
105 Don't ask me how unboxed tuple returns are supposed to work. We
106 haven't got a good story about that yet.
109 /* When the returned value is in R1 and it is a pointer, so doesn't
111 #define STG_CtoI_RET_R1p_Template(label) \
116 bco = ((StgPtr*)Sp)[1]; \
118 ((StgPtr*)Sp)[0] = R1.p; \
120 ((StgPtr*)Sp)[0] = bco; \
121 JMP_(stg_yield_to_interpreter); \
125 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_entry);
126 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_0_entry);
127 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_1_entry);
128 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_2_entry);
129 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_3_entry);
130 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_4_entry);
131 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_5_entry);
132 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_6_entry);
133 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_7_entry);
135 VEC_POLY_INFO_TABLE(stg_ctoi_ret_R1p,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
139 /* When the returned value is in R1 and it isn't a pointer. */
140 #define STG_CtoI_RET_R1n_Template(label) \
145 bco = ((StgPtr*)Sp)[1]; \
147 ((StgPtr*)Sp)[0] = (StgPtr)R1.i; \
149 ((StgPtr*)Sp)[0] = (StgPtr)1; /* tag */ \
151 ((StgPtr*)Sp)[0] = bco; \
152 JMP_(stg_yield_to_interpreter); \
156 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_entry);
157 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_0_entry);
158 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_1_entry);
159 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_2_entry);
160 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_3_entry);
161 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_4_entry);
162 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_5_entry);
163 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_6_entry);
164 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_7_entry);
166 VEC_POLY_INFO_TABLE(stg_ctoi_ret_R1n,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
170 /* When the returned value is in F1 ... */
171 #define STG_CtoI_RET_F1_Template(label) \
176 bco = ((StgPtr*)Sp)[1]; \
177 Sp -= sizeofW(StgFloat); \
178 ASSIGN_FLT((W_*)Sp, F1); \
180 ((StgPtr*)Sp)[0] = (StgPtr)sizeofW(StgFloat); \
182 ((StgPtr*)Sp)[0] = bco; \
183 JMP_(stg_yield_to_interpreter); \
187 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_entry);
188 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_0_entry);
189 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_1_entry);
190 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_2_entry);
191 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_3_entry);
192 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_4_entry);
193 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_5_entry);
194 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_6_entry);
195 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_7_entry);
197 VEC_POLY_INFO_TABLE(stg_ctoi_ret_F1,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
200 /* When the returned value is in D1 ... */
201 #define STG_CtoI_RET_D1_Template(label) \
206 bco = ((StgPtr*)Sp)[1]; \
207 Sp -= sizeofW(StgDouble); \
208 ASSIGN_DBL((W_*)Sp, D1); \
210 ((StgPtr*)Sp)[0] = (StgPtr)sizeofW(StgDouble); \
212 ((StgPtr*)Sp)[0] = bco; \
213 JMP_(stg_yield_to_interpreter); \
217 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_entry);
218 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_0_entry);
219 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_1_entry);
220 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_2_entry);
221 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_3_entry);
222 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_4_entry);
223 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_5_entry);
224 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_6_entry);
225 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_7_entry);
227 VEC_POLY_INFO_TABLE(stg_ctoi_ret_D1,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
229 /* The other way round: when the interpreter returns a value to
230 compiled code. The stack looks like this:
232 return info table (pushed by compiled code)
233 return value (pushed by interpreter)
235 If the value is ptr-rep'd, the interpreter simply returns to the
236 scheduler, instructing it to ThreadEnterGHC.
238 Otherwise (unboxed return value), we replace the top stack word,
239 which must be the tag, with stg_gc_unbx_r1_info (or f1_info or d1_info),
240 and return to the scheduler, instructing it to ThreadRunGHC.
242 No supporting code needed!
246 /* Entering a BCO. Heave it on the stack and defer to the
248 INFO_TABLE(stg_BCO_info,stg_BCO_entry,4,0,BCO,,EF_,"BCO","BCO");
249 STGFUN(stg_BCO_entry) {
253 JMP_(stg_yield_to_interpreter);
258 /* -----------------------------------------------------------------------------
259 Entry code for an indirection.
260 -------------------------------------------------------------------------- */
262 INFO_TABLE(stg_IND_info,stg_IND_entry,1,0,IND,,EF_,0,0);
263 STGFUN(stg_IND_entry)
266 TICK_ENT_IND(Node); /* tick */
268 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
270 JMP_(ENTRY_CODE(*R1.p));
274 INFO_TABLE(stg_IND_STATIC_info,stg_IND_STATIC_entry,1,0,IND_STATIC,,EF_,0,0);
275 STGFUN(stg_IND_STATIC_entry)
278 TICK_ENT_IND(Node); /* tick */
279 R1.p = (P_) ((StgIndStatic*)R1.p)->indirectee;
281 JMP_(ENTRY_CODE(*R1.p));
285 INFO_TABLE(stg_IND_PERM_info,stg_IND_PERM_entry,1,1,IND_PERM,,EF_,"IND_PERM","IND_PERM");
286 STGFUN(stg_IND_PERM_entry)
289 /* Don't add INDs to granularity cost */
290 /* Dont: TICK_ENT_IND(Node); for ticky-ticky; this ind is here only to help profiling */
292 #if defined(TICKY_TICKY) && !defined(PROFILING)
293 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
294 TICK_ENT_PERM_IND(R1.p); /* tick */
297 /* Enter PAP cost centre -- lexical scoping only */
298 ENTER_CCS_PAP_CL(R1.cl);
300 /* For ticky-ticky, change the perm_ind to a normal ind on first
301 * entry, so the number of ent_perm_inds is the number of *thunks*
302 * entered again, not the number of subsequent entries.
304 * Since this screws up cost centres, we die if profiling and
305 * ticky_ticky are on at the same time. KSW 1999-01.
310 # error Profiling and ticky-ticky do not mix at present!
311 # endif /* PROFILING */
312 SET_INFO((StgInd*)R1.p,&stg_IND_info);
313 #endif /* TICKY_TICKY */
315 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
317 /* Dont: TICK_ENT_VIA_NODE(); for ticky-ticky; as above */
319 #if defined(TICKY_TICKY) && !defined(PROFILING)
323 JMP_(ENTRY_CODE(*R1.p));
327 INFO_TABLE(stg_IND_OLDGEN_info,stg_IND_OLDGEN_entry,1,1,IND_OLDGEN,,EF_,0,0);
328 STGFUN(stg_IND_OLDGEN_entry)
331 TICK_ENT_IND(Node); /* tick */
333 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
335 JMP_(ENTRY_CODE(*R1.p));
339 INFO_TABLE(stg_IND_OLDGEN_PERM_info,stg_IND_OLDGEN_PERM_entry,1,1,IND_OLDGEN_PERM,,EF_,0,0);
340 STGFUN(stg_IND_OLDGEN_PERM_entry)
343 /* Dont: TICK_ENT_IND(Node); for ticky-ticky; this ind is here only to help profiling */
345 #if defined(TICKY_TICKY) && !defined(PROFILING)
346 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
347 TICK_ENT_PERM_IND(R1.p); /* tick */
350 /* Enter PAP cost centre -- lexical scoping only */
351 ENTER_CCS_PAP_CL(R1.cl);
353 /* see comment in IND_PERM */
356 # error Profiling and ticky-ticky do not mix at present!
357 # endif /* PROFILING */
358 SET_INFO((StgInd*)R1.p,&stg_IND_OLDGEN_info);
359 #endif /* TICKY_TICKY */
361 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
363 JMP_(ENTRY_CODE(*R1.p));
367 /* -----------------------------------------------------------------------------
368 Entry code for a black hole.
370 Entering a black hole normally causes a cyclic data dependency, but
371 in the concurrent world, black holes are synchronization points,
372 and they are turned into blocking queues when there are threads
373 waiting for the evaluation of the closure to finish.
374 -------------------------------------------------------------------------- */
376 /* Note: a BLACKHOLE and BLACKHOLE_BQ must be big enough to be
377 * overwritten with an indirection/evacuee/catch. Thus we claim it
378 * has 1 non-pointer word of payload (in addition to the pointer word
379 * for the blocking queue in a BQ), which should be big enough for an
380 * old-generation indirection.
383 INFO_TABLE(stg_BLACKHOLE_info, stg_BLACKHOLE_entry,0,2,BLACKHOLE,,EF_,"BLACKHOLE","BLACKHOLE");
384 STGFUN(stg_BLACKHOLE_entry)
388 /* Before overwriting TSO_LINK */
389 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
394 bdescr *bd = Bdescr(R1.p);
395 if (bd->back != (bdescr *)BaseReg) {
396 if (bd->gen->no >= 1 || bd->step->no >= 1) {
397 CMPXCHG(R1.cl->header.info, &stg_BLACKHOLE_info, &stg_WHITEHOLE_info);
399 EXTFUN_RTS(stg_gc_enter_1_hponly);
400 JMP_(stg_gc_enter_1_hponly);
407 /* Put ourselves on the blocking queue for this black hole */
408 #if defined(GRAN) || defined(PAR)
409 /* in fact, only difference is the type of the end-of-queue marker! */
410 CurrentTSO->link = END_BQ_QUEUE;
411 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
413 CurrentTSO->link = END_TSO_QUEUE;
414 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
416 /* jot down why and on what closure we are blocked */
417 CurrentTSO->why_blocked = BlockedOnBlackHole;
418 CurrentTSO->block_info.closure = R1.cl;
419 /* closure is mutable since something has just been added to its BQ */
420 recordMutable((StgMutClosure *)R1.cl);
421 /* Change the BLACKHOLE into a BLACKHOLE_BQ */
422 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
424 /* PAR: dumping of event now done in blockThread -- HWL */
426 /* stg_gen_block is too heavyweight, use a specialised one */
432 INFO_TABLE(stg_BLACKHOLE_BQ_info, stg_BLACKHOLE_BQ_entry,1,1,BLACKHOLE_BQ,,EF_,"BLACKHOLE","BLACKHOLE");
433 STGFUN(stg_BLACKHOLE_BQ_entry)
437 /* Before overwriting TSO_LINK */
438 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
443 bdescr *bd = Bdescr(R1.p);
444 if (bd->back != (bdescr *)BaseReg) {
445 if (bd->gen->no >= 1 || bd->step->no >= 1) {
446 CMPXCHG(R1.cl->header.info, &stg_BLACKHOLE_info, &stg_WHITEHOLE_info);
448 EXTFUN_RTS(stg_gc_enter_1_hponly);
449 JMP_(stg_gc_enter_1_hponly);
457 /* Put ourselves on the blocking queue for this black hole */
458 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
459 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
460 /* jot down why and on what closure we are blocked */
461 CurrentTSO->why_blocked = BlockedOnBlackHole;
462 CurrentTSO->block_info.closure = R1.cl;
464 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
467 /* PAR: dumping of event now done in blockThread -- HWL */
469 /* stg_gen_block is too heavyweight, use a specialised one */
475 Revertible black holes are needed in the parallel world, to handle
476 negative acknowledgements of messages containing updatable closures.
477 The idea is that when the original message is transmitted, the closure
478 is turned into a revertible black hole...an object which acts like a
479 black hole when local threads try to enter it, but which can be reverted
480 back to the original closure if necessary.
482 It's actually a lot like a blocking queue (BQ) entry, because revertible
483 black holes are initially set up with an empty blocking queue.
486 #if defined(PAR) || defined(GRAN)
488 INFO_TABLE(stg_RBH_info, stg_RBH_entry,1,1,RBH,,EF_,0,0);
489 STGFUN(stg_RBH_entry)
493 /* mainly statistics gathering for GranSim simulation */
494 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
497 /* exactly the same as a BLACKHOLE_BQ_entry -- HWL */
498 /* Put ourselves on the blocking queue for this black hole */
499 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
500 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
501 /* jot down why and on what closure we are blocked */
502 CurrentTSO->why_blocked = BlockedOnBlackHole;
503 CurrentTSO->block_info.closure = R1.cl;
505 /* PAR: dumping of event now done in blockThread -- HWL */
507 /* stg_gen_block is too heavyweight, use a specialised one */
512 INFO_TABLE(stg_RBH_Save_0_info, stg_RBH_Save_0_entry,0,2,CONSTR,,EF_,0,0);
513 NON_ENTERABLE_ENTRY_CODE(RBH_Save_0);
515 INFO_TABLE(stg_RBH_Save_1_info, stg_RBH_Save_1_entry,1,1,CONSTR,,EF_,0,0);
516 NON_ENTERABLE_ENTRY_CODE(RBH_Save_1);
518 INFO_TABLE(stg_RBH_Save_2_info, stg_RBH_Save_2_entry,2,0,CONSTR,,EF_,0,0);
519 NON_ENTERABLE_ENTRY_CODE(RBH_Save_2);
520 #endif /* defined(PAR) || defined(GRAN) */
522 /* identical to BLACKHOLEs except for the infotag */
523 INFO_TABLE(stg_CAF_BLACKHOLE_info, stg_CAF_BLACKHOLE_entry,0,2,CAF_BLACKHOLE,,EF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
524 STGFUN(stg_CAF_BLACKHOLE_entry)
528 /* mainly statistics gathering for GranSim simulation */
529 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
534 bdescr *bd = Bdescr(R1.p);
535 if (bd->back != (bdescr *)BaseReg) {
536 if (bd->gen->no >= 1 || bd->step->no >= 1) {
537 CMPXCHG(R1.cl->header.info, &stg_CAF_BLACKHOLE_info, &stg_WHITEHOLE_info);
539 EXTFUN_RTS(stg_gc_enter_1_hponly);
540 JMP_(stg_gc_enter_1_hponly);
548 /* Put ourselves on the blocking queue for this black hole */
549 #if defined(GRAN) || defined(PAR)
550 /* in fact, only difference is the type of the end-of-queue marker! */
551 CurrentTSO->link = END_BQ_QUEUE;
552 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
554 CurrentTSO->link = END_TSO_QUEUE;
555 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
557 /* jot down why and on what closure we are blocked */
558 CurrentTSO->why_blocked = BlockedOnBlackHole;
559 CurrentTSO->block_info.closure = R1.cl;
560 /* closure is mutable since something has just been added to its BQ */
561 recordMutable((StgMutClosure *)R1.cl);
562 /* Change the CAF_BLACKHOLE into a BLACKHOLE_BQ_STATIC */
563 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
565 /* PAR: dumping of event now done in blockThread -- HWL */
567 /* stg_gen_block is too heavyweight, use a specialised one */
573 INFO_TABLE(stg_SE_BLACKHOLE_info, stg_SE_BLACKHOLE_entry,0,2,SE_BLACKHOLE,,EF_,0,0);
574 STGFUN(stg_SE_BLACKHOLE_entry)
577 STGCALL3(fprintf,stderr,"SE_BLACKHOLE at %p entered!\n",R1.p);
578 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
582 INFO_TABLE(SE_CAF_BLACKHOLE_info, SE_CAF_BLACKHOLE_entry,0,2,SE_CAF_BLACKHOLE,,EF_,0,0);
583 STGFUN(stg_SE_CAF_BLACKHOLE_entry)
586 STGCALL3(fprintf,stderr,"SE_CAF_BLACKHOLE at %p entered!\n",R1.p);
587 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
593 INFO_TABLE(stg_WHITEHOLE_info, stg_WHITEHOLE_entry,0,2,CONSTR_NOCAF_STATIC,,EF_,0,0);
594 STGFUN(stg_WHITEHOLE_entry)
597 JMP_(GET_ENTRY(R1.cl));
602 /* -----------------------------------------------------------------------------
603 Some static info tables for things that don't get entered, and
604 therefore don't need entry code (i.e. boxed but unpointed objects)
605 NON_ENTERABLE_ENTRY_CODE now defined at the beginning of the file
606 -------------------------------------------------------------------------- */
608 INFO_TABLE(stg_TSO_info, stg_TSO_entry, 0,0,TSO,,EF_,"TSO","TSO");
609 NON_ENTERABLE_ENTRY_CODE(TSO);
611 /* -----------------------------------------------------------------------------
612 Evacuees are left behind by the garbage collector. Any attempt to enter
614 -------------------------------------------------------------------------- */
616 INFO_TABLE(stg_EVACUATED_info,stg_EVACUATED_entry,1,0,EVACUATED,,EF_,0,0);
617 NON_ENTERABLE_ENTRY_CODE(EVACUATED);
619 /* -----------------------------------------------------------------------------
622 Live weak pointers have a special closure type. Dead ones are just
623 nullary constructors (although they live on the heap - we overwrite
624 live weak pointers with dead ones).
625 -------------------------------------------------------------------------- */
627 INFO_TABLE(stg_WEAK_info,stg_WEAK_entry,0,4,WEAK,,EF_,"WEAK","WEAK");
628 NON_ENTERABLE_ENTRY_CODE(WEAK);
630 INFO_TABLE_CONSTR(stg_DEAD_WEAK_info,stg_DEAD_WEAK_entry,0,1,0,CONSTR,,EF_,"DEAD_WEAK","DEAD_WEAK");
631 NON_ENTERABLE_ENTRY_CODE(DEAD_WEAK);
633 /* -----------------------------------------------------------------------------
636 This is a static nullary constructor (like []) that we use to mark an empty
637 finalizer in a weak pointer object.
638 -------------------------------------------------------------------------- */
640 INFO_TABLE_CONSTR(stg_NO_FINALIZER_info,stg_NO_FINALIZER_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
641 NON_ENTERABLE_ENTRY_CODE(NO_FINALIZER);
643 SET_STATIC_HDR(stg_NO_FINALIZER_closure,stg_NO_FINALIZER_info,0/*CC*/,,EI_)
646 /* -----------------------------------------------------------------------------
647 Foreign Objects are unlifted and therefore never entered.
648 -------------------------------------------------------------------------- */
650 INFO_TABLE(stg_FOREIGN_info,stg_FOREIGN_entry,0,1,FOREIGN,,EF_,"FOREIGN","FOREIGN");
651 NON_ENTERABLE_ENTRY_CODE(FOREIGN);
653 /* -----------------------------------------------------------------------------
654 Stable Names are unlifted too.
655 -------------------------------------------------------------------------- */
657 INFO_TABLE(stg_STABLE_NAME_info,stg_STABLE_NAME_entry,0,1,STABLE_NAME,,EF_,"STABLE_NAME","STABLE_NAME");
658 NON_ENTERABLE_ENTRY_CODE(STABLE_NAME);
660 /* -----------------------------------------------------------------------------
663 There are two kinds of these: full and empty. We need an info table
664 and entry code for each type.
665 -------------------------------------------------------------------------- */
667 INFO_TABLE(stg_FULL_MVAR_info,stg_FULL_MVAR_entry,4,0,MVAR,,EF_,"MVAR","MVAR");
668 NON_ENTERABLE_ENTRY_CODE(FULL_MVAR);
670 INFO_TABLE(stg_EMPTY_MVAR_info,stg_EMPTY_MVAR_entry,4,0,MVAR,,EF_,"MVAR","MVAR");
671 NON_ENTERABLE_ENTRY_CODE(EMPTY_MVAR);
673 /* -----------------------------------------------------------------------------
676 This is a static nullary constructor (like []) that we use to mark the
677 end of a linked TSO queue.
678 -------------------------------------------------------------------------- */
680 INFO_TABLE_CONSTR(stg_END_TSO_QUEUE_info,stg_END_TSO_QUEUE_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
681 NON_ENTERABLE_ENTRY_CODE(END_TSO_QUEUE);
683 SET_STATIC_HDR(stg_END_TSO_QUEUE_closure,stg_END_TSO_QUEUE_info,0/*CC*/,,EI_)
686 /* -----------------------------------------------------------------------------
689 Mutable lists (used by the garbage collector) consist of a chain of
690 StgMutClosures connected through their mut_link fields, ending in
691 an END_MUT_LIST closure.
692 -------------------------------------------------------------------------- */
694 INFO_TABLE_CONSTR(stg_END_MUT_LIST_info,stg_END_MUT_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
695 NON_ENTERABLE_ENTRY_CODE(END_MUT_LIST);
697 SET_STATIC_HDR(stg_END_MUT_LIST_closure,stg_END_MUT_LIST_info,0/*CC*/,,EI_)
700 INFO_TABLE(stg_MUT_CONS_info, stg_MUT_CONS_entry, 1, 1, MUT_VAR, , EF_, 0, 0);
701 NON_ENTERABLE_ENTRY_CODE(MUT_CONS);
703 /* -----------------------------------------------------------------------------
705 -------------------------------------------------------------------------- */
707 INFO_TABLE_CONSTR(stg_END_EXCEPTION_LIST_info,stg_END_EXCEPTION_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
708 NON_ENTERABLE_ENTRY_CODE(END_EXCEPTION_LIST);
710 SET_STATIC_HDR(stg_END_EXCEPTION_LIST_closure,stg_END_EXCEPTION_LIST_info,0/*CC*/,,EI_)
713 INFO_TABLE(stg_EXCEPTION_CONS_info, stg_EXCEPTION_CONS_entry, 1, 1, CONSTR, , EF_, 0, 0);
714 NON_ENTERABLE_ENTRY_CODE(EXCEPTION_CONS);
716 /* -----------------------------------------------------------------------------
719 These come in two basic flavours: arrays of data (StgArrWords) and arrays of
720 pointers (StgArrPtrs). They all have a similar layout:
722 ___________________________
723 | Info | No. of | data....
725 ---------------------------
727 These are *unpointed* objects: i.e. they cannot be entered.
729 -------------------------------------------------------------------------- */
731 #define ArrayInfo(type) \
732 INFO_TABLE(stg_##type##_info, stg_##type##_entry, 0, 0, type, , EF_,"" # type "","" # type "");
734 ArrayInfo(ARR_WORDS);
735 NON_ENTERABLE_ENTRY_CODE(ARR_WORDS);
736 ArrayInfo(MUT_ARR_PTRS);
737 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS);
738 ArrayInfo(MUT_ARR_PTRS_FROZEN);
739 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS_FROZEN);
743 /* -----------------------------------------------------------------------------
745 -------------------------------------------------------------------------- */
747 INFO_TABLE(stg_MUT_VAR_info, stg_MUT_VAR_entry, 1, 1, MUT_VAR, , EF_, "MUT_VAR", "MUT_VAR");
748 NON_ENTERABLE_ENTRY_CODE(MUT_VAR);
750 /* -----------------------------------------------------------------------------
751 Standard Error Entry.
753 This is used for filling in vector-table entries that can never happen,
755 -------------------------------------------------------------------------- */
756 /* No longer used; we use NULL, because a) it never happens, right? and b)
757 Windows doesn't like DLL entry points being used as static initialisers
758 STGFUN(stg_error_entry) \
761 DUMP_ERRMSG("fatal: stg_error_entry"); \
762 STGCALL1(shutdownHaskellAndExit, EXIT_FAILURE); \
767 /* -----------------------------------------------------------------------------
770 Entering this closure will just return to the address on the top of the
771 stack. Useful for getting a thread in a canonical form where we can
772 just enter the top stack word to start the thread. (see deleteThread)
773 * -------------------------------------------------------------------------- */
775 INFO_TABLE(stg_dummy_ret_info, stg_dummy_ret_entry, 0, 0, CONSTR_NOCAF_STATIC, , EF_, 0, 0);
776 STGFUN(stg_dummy_ret_entry)
782 JMP_(ENTRY_CODE(ret_addr));
785 SET_STATIC_HDR(stg_dummy_ret_closure,stg_dummy_ret_info,CCS_DONT_CARE,,EI_)
788 /* -----------------------------------------------------------------------------
789 Strict IO application - performing an IO action and entering its result.
791 rts_evalIO() lets you perform Haskell IO actions from outside of Haskell-land,
792 returning back to you their result. Want this result to be evaluated to WHNF
793 by that time, so that we can easily get at the int/char/whatever using the
794 various get{Ty} functions provided by the RTS API.
796 forceIO takes care of this, performing the IO action and entering the
797 results that comes back.
799 * -------------------------------------------------------------------------- */
802 INFO_TABLE_SRT_BITMAP(stg_forceIO_ret_info,stg_forceIO_ret_entry,0,0,0,0,RET_SMALL,,EF_,0,0);
803 STGFUN(stg_forceIO_ret_entry)
807 Sp -= sizeofW(StgSeqFrame);
809 JMP_(GET_ENTRY(R1.cl));
812 INFO_TABLE_SRT_BITMAP(stg_forceIO_ret_info,stg_forceIO_ret_entry,0,0,0,0,RET_SMALL,,EF_,0,0);
813 STGFUN(stg_forceIO_ret_entry)
817 rval = (StgClosure *)Sp[0];
819 Sp -= sizeofW(StgSeqFrame);
822 JMP_(GET_ENTRY(R1.cl));
826 INFO_TABLE(stg_forceIO_info,stg_forceIO_entry,1,0,FUN_STATIC,,EF_,0,0);
827 FN_(stg_forceIO_entry)
830 /* Sp[0] contains the IO action we want to perform */
832 /* Replace it with the return continuation that enters the result. */
833 Sp[0] = (W_)&stg_forceIO_ret_info;
835 /* Push the RealWorld# tag and enter */
836 Sp[0] =(W_)REALWORLD_TAG;
837 JMP_(GET_ENTRY(R1.cl));
840 SET_STATIC_HDR(stg_forceIO_closure,stg_forceIO_info,CCS_DONT_CARE,,EI_)
844 /* -----------------------------------------------------------------------------
845 CHARLIKE and INTLIKE closures.
847 These are static representations of Chars and small Ints, so that
848 we can remove dynamic Chars and Ints during garbage collection and
849 replace them with references to the static objects.
850 -------------------------------------------------------------------------- */
852 #if defined(INTERPRETER) || defined(ENABLE_WIN32_DLL_SUPPORT)
854 * When sticking the RTS in a DLL, we delay populating the
855 * Charlike and Intlike tables until load-time, which is only
856 * when we've got the real addresses to the C# and I# closures.
859 static INFO_TBL_CONST StgInfoTable czh_static_info;
860 static INFO_TBL_CONST StgInfoTable izh_static_info;
861 #define Char_hash_static_info czh_static_info
862 #define Int_hash_static_info izh_static_info
864 #define Char_hash_static_info PrelBase_Czh_static_info
865 #define Int_hash_static_info PrelBase_Izh_static_info
868 #define CHARLIKE_HDR(n) \
870 STATIC_HDR(Char_hash_static_info, /* C# */ \
875 #define INTLIKE_HDR(n) \
877 STATIC_HDR(Int_hash_static_info, /* I# */ \
882 /* put these in the *data* section, since the garbage collector relies
883 * on the fact that static closures live in the data section.
886 /* end the name with _closure, to convince the mangler this is a closure */
888 StgIntCharlikeClosure stg_CHARLIKE_closure[] = {
1147 StgIntCharlikeClosure stg_INTLIKE_closure[] = {
1148 INTLIKE_HDR(-16), /* MIN_INTLIKE == -16 */
1180 INTLIKE_HDR(16) /* MAX_INTLIKE == 16 */