1 /* -----------------------------------------------------------------------------
2 * $Id: StgMiscClosures.hc,v 1.83 2003/01/08 12:37:45 simonmar Exp $
4 * (c) The GHC Team, 1998-2002
6 * Entry code for various built-in closure types.
8 * ---------------------------------------------------------------------------*/
14 #include "StgMiscClosures.h"
16 #include "StoragePriv.h"
17 #include "Profiling.h"
21 #if defined(GRAN) || defined(PAR)
22 # include "GranSimRts.h" /* for DumpRawGranEvent */
23 # include "StgRun.h" /* for StgReturn and register saving */
30 /* ToDo: make the printing of panics more win32-friendly, i.e.,
31 * pop up some lovely message boxes (as well).
33 #define DUMP_ERRMSG(msg) STGCALL2(fprintf,stderr,msg)
36 Template for the entry code of non-enterable closures.
39 #define NON_ENTERABLE_ENTRY_CODE(type) \
40 IF_(stg_##type##_entry) \
43 STGCALL1(barf, #type " object entered!"); \
48 /* -----------------------------------------------------------------------------
49 Support for the bytecode interpreter.
50 -------------------------------------------------------------------------- */
52 /* 9 bits of return code for constructors created by the interpreter. */
53 FN_(stg_interp_constr_entry)
55 /* R1 points at the constructor */
57 /* STGCALL2(fprintf,stderr,"stg_interp_constr_entry (direct return)!\n"); */
58 /* Pointless, since SET_TAG doesn't do anything */
59 SET_TAG( GET_TAG(GET_INFO(R1.cl)));
60 JMP_(ENTRY_CODE((P_)(*Sp)));
64 FN_(stg_interp_constr1_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),0)); FE_ }
65 FN_(stg_interp_constr2_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),1)); FE_ }
66 FN_(stg_interp_constr3_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),2)); FE_ }
67 FN_(stg_interp_constr4_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),3)); FE_ }
68 FN_(stg_interp_constr5_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),4)); FE_ }
69 FN_(stg_interp_constr6_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),5)); FE_ }
70 FN_(stg_interp_constr7_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),6)); FE_ }
71 FN_(stg_interp_constr8_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),7)); FE_ }
73 /* Some info tables to be used when compiled code returns a value to
74 the interpreter, i.e. the interpreter pushes one of these onto the
75 stack before entering a value. What the code does is to
76 impedance-match the compiled return convention (in R1p/R1n/F1/D1 etc) to
77 the interpreter's convention (returned value is on top of stack),
78 and then cause the scheduler to enter the interpreter.
80 On entry, the stack (growing down) looks like this:
82 ptr to BCO holding return continuation
83 ptr to one of these info tables.
85 The info table code, both direct and vectored, must:
86 * push R1/F1/D1 on the stack, and its tag if necessary
87 * push the BCO (so it's now on the stack twice)
88 * Yield, ie, go to the scheduler.
90 Scheduler examines the t.o.s, discovers it is a BCO, and proceeds
91 directly to the bytecode interpreter. That pops the top element
92 (the BCO, containing the return continuation), and interprets it.
93 Net result: return continuation gets interpreted, with the
97 ptr to the info table just jumped thru
100 which is just what we want -- the "standard" return layout for the
103 Don't ask me how unboxed tuple returns are supposed to work. We
104 haven't got a good story about that yet.
107 // When the returned value is a pointer in R1...
108 #define STG_CtoI_RET_R1p_Template(label) \
114 Sp[0] = (W_)&stg_enter_info; \
115 JMP_(stg_yield_to_interpreter); \
119 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_ret);
120 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_0_ret);
121 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_1_ret);
122 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_2_ret);
123 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_3_ret);
124 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_4_ret);
125 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_5_ret);
126 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_6_ret);
127 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_7_ret);
129 VEC_POLY_INFO_TABLE( stg_ctoi_ret_R1p, 0/* special layout */,
130 0/*srt*/, 0/*srt_off*/, 0/*srt_len*/,
133 // When the returned value is a pointer, but unlifted, in R1 ...
134 INFO_TABLE_RET( stg_ctoi_ret_R1unpt_info, stg_ctoi_ret_R1unpt_entry,
135 0/* special layout */,
136 0/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, IF_, 0, 0);
137 IF_(stg_ctoi_ret_R1unpt_entry)
142 Sp[0] = (W_)&stg_gc_unpt_r1_info;
143 JMP_(stg_yield_to_interpreter);
147 // When the returned value is a non-pointer in R1 ...
148 INFO_TABLE_RET( stg_ctoi_ret_R1n_info, stg_ctoi_ret_R1n_entry,
149 0/* special layout */,
150 0/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, IF_, 0, 0);
151 IF_(stg_ctoi_ret_R1n_entry)
156 Sp[0] = (W_)&stg_gc_unbx_r1_info;
157 JMP_(stg_yield_to_interpreter);
162 // When the returned value is in F1 ...
163 INFO_TABLE_RET( stg_ctoi_ret_F1_info, stg_ctoi_ret_F1_entry,
164 0/* special layout */,
165 0/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, IF_, 0, 0);
166 IF_(stg_ctoi_ret_F1_entry)
170 ASSIGN_FLT(Sp+1, F1);
171 Sp[0] = (W_)&stg_gc_f1_info;
172 JMP_(stg_yield_to_interpreter);
176 // When the returned value is in D1 ...
177 INFO_TABLE_RET( stg_ctoi_ret_D1_info, stg_ctoi_ret_D1_entry,
178 0/* special layout */,
179 0/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, IF_, 0, 0);
180 IF_(stg_ctoi_ret_D1_entry)
183 Sp -= 1 + sizeofW(StgDouble);
184 ASSIGN_DBL(Sp+1, D1);
185 Sp[0] = (W_)&stg_gc_d1_info;
186 JMP_(stg_yield_to_interpreter);
190 // When the returned value is in L1 ...
191 INFO_TABLE_RET( stg_ctoi_ret_L1_info, stg_ctoi_ret_L1_entry,
192 0/* special layout */,
193 0/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, IF_, 0, 0);
194 IF_(stg_ctoi_ret_L1_entry)
197 Sp -= 1 + sizeofW(StgInt64);
198 ASSIGN_Word64(Sp+1, L1);
199 Sp[0] = (W_)&stg_gc_l1_info;
200 JMP_(stg_yield_to_interpreter);
204 // When the returned value a VoidRep ...
205 INFO_TABLE_RET( stg_ctoi_ret_V_info, stg_ctoi_ret_V_entry,
206 0/* special layout */,
207 0/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, IF_, 0, 0);
208 IF_(stg_ctoi_ret_V_entry)
212 Sp[0] = (W_)&stg_gc_void_info;
213 JMP_(stg_yield_to_interpreter);
217 // Dummy info table pushed on the top of the stack when the interpreter
218 // should apply the BCO on the stack to its arguments, also on the stack.
219 INFO_TABLE_RET( stg_apply_interp_info, stg_apply_interp_entry,
220 0/* special layout */,
221 0/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, IF_, 0, 0);
222 IF_(stg_apply_interp_entry)
225 // Just in case we end up in here... (we shouldn't)
226 JMP_(stg_yield_to_interpreter);
230 /* -----------------------------------------------------------------------------
232 -------------------------------------------------------------------------- */
234 INFO_TABLE_FUN_GEN(stg_BCO_info,stg_BCO_entry,4,0,
236 ARG_BCO, 0/*dummy arity*/, 0/*dummy bitmap*/, NULL/*slow_apply*/,
237 BCO,,EF_,"BCO","BCO");
240 // entering a BCO means "apply it", same as a function
243 Sp[0] = (W_)&stg_apply_interp_info;
244 JMP_(stg_yield_to_interpreter);
248 /* -----------------------------------------------------------------------------
249 Entry code for an indirection.
250 -------------------------------------------------------------------------- */
252 INFO_TABLE(stg_IND_info,stg_IND_entry,1,0,IND,,IF_,"IND","IND");
256 TICK_ENT_DYN_IND(Node); /* tick */
257 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
259 JMP_(GET_ENTRY(R1.cl));
263 INFO_TABLE(stg_IND_STATIC_info,stg_IND_STATIC_entry,1,0,IND_STATIC,,IF_,"IND_STATIC","IND_STATIC");
264 IF_(stg_IND_STATIC_entry)
267 TICK_ENT_STATIC_IND(Node); /* tick */
268 R1.p = (P_) ((StgIndStatic*)R1.p)->indirectee;
270 JMP_(GET_ENTRY(R1.cl));
274 INFO_TABLE(stg_IND_PERM_info,stg_IND_PERM_entry,1,1,IND_PERM,,IF_,"IND_PERM","IND_PERM");
275 IF_(stg_IND_PERM_entry)
278 /* Don't add INDs to granularity cost */
279 /* Dont: TICK_ENT_STATIC_IND(Node); for ticky-ticky; this ind is here only to help profiling */
281 #if defined(TICKY_TICKY) && !defined(PROFILING)
282 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
283 TICK_ENT_PERM_IND(R1.p); /* tick */
286 LDV_ENTER((StgInd *)R1.p);
288 /* Enter PAP cost centre -- lexical scoping only */
289 ENTER_CCS_PAP_CL(R1.cl);
291 /* For ticky-ticky, change the perm_ind to a normal ind on first
292 * entry, so the number of ent_perm_inds is the number of *thunks*
293 * entered again, not the number of subsequent entries.
295 * Since this screws up cost centres, we die if profiling and
296 * ticky_ticky are on at the same time. KSW 1999-01.
301 # error Profiling and ticky-ticky do not mix at present!
302 # endif /* PROFILING */
303 SET_INFO((StgInd*)R1.p,&stg_IND_info);
304 #endif /* TICKY_TICKY */
306 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
308 /* Dont: TICK_ENT_VIA_NODE(); for ticky-ticky; as above */
310 #if defined(TICKY_TICKY) && !defined(PROFILING)
314 JMP_(GET_ENTRY(R1.cl));
318 INFO_TABLE(stg_IND_OLDGEN_info,stg_IND_OLDGEN_entry,1,1,IND_OLDGEN,,IF_,"IND_OLDGEN","IND_OLDGEN");
319 IF_(stg_IND_OLDGEN_entry)
322 TICK_ENT_STATIC_IND(Node); /* tick */
323 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
325 JMP_(GET_ENTRY(R1.cl));
329 INFO_TABLE(stg_IND_OLDGEN_PERM_info,stg_IND_OLDGEN_PERM_entry,1,1,IND_OLDGEN_PERM,,IF_,"IND_OLDGEN_PERM","IND_OLDGEN_PERM");
330 IF_(stg_IND_OLDGEN_PERM_entry)
333 /* Dont: TICK_ENT_STATIC_IND(Node); for ticky-ticky; this ind is here only to help profiling */
335 #if defined(TICKY_TICKY) && !defined(PROFILING)
336 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
337 TICK_ENT_PERM_IND(R1.p); /* tick */
340 LDV_ENTER((StgInd *)R1.p);
342 /* Enter PAP cost centre -- lexical scoping only */
343 ENTER_CCS_PAP_CL(R1.cl);
345 /* see comment in IND_PERM */
348 # error Profiling and ticky-ticky do not mix at present!
349 # endif /* PROFILING */
350 SET_INFO((StgInd*)R1.p,&stg_IND_OLDGEN_info);
351 #endif /* TICKY_TICKY */
353 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
355 JMP_(GET_ENTRY(R1.cl));
359 /* -----------------------------------------------------------------------------
360 Entry code for a black hole.
362 Entering a black hole normally causes a cyclic data dependency, but
363 in the concurrent world, black holes are synchronization points,
364 and they are turned into blocking queues when there are threads
365 waiting for the evaluation of the closure to finish.
366 -------------------------------------------------------------------------- */
368 /* Note: a BLACKHOLE and BLACKHOLE_BQ must be big enough to be
369 * overwritten with an indirection/evacuee/catch. Thus we claim it
370 * has 1 non-pointer word of payload (in addition to the pointer word
371 * for the blocking queue in a BQ), which should be big enough for an
372 * old-generation indirection.
375 INFO_TABLE(stg_BLACKHOLE_info, stg_BLACKHOLE_entry,0,2,BLACKHOLE,,IF_,"BLACKHOLE","BLACKHOLE");
376 IF_(stg_BLACKHOLE_entry)
380 /* Before overwriting TSO_LINK */
381 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
386 bdescr *bd = Bdescr(R1.p);
387 if (bd->u.back != (bdescr *)BaseReg) {
388 if (bd->gen_no >= 1 || bd->step->no >= 1) {
389 CMPXCHG(R1.cl->header.info, &stg_BLACKHOLE_info, &stg_WHITEHOLE_info);
391 EXTFUN_RTS(stg_gc_enter_1_hponly);
392 JMP_(stg_gc_enter_1_hponly);
399 // Actually this is not necessary because R1.p is about to be destroyed.
400 LDV_ENTER((StgClosure *)R1.p);
402 /* Put ourselves on the blocking queue for this black hole */
403 #if defined(GRAN) || defined(PAR)
404 // in fact, only difference is the type of the end-of-queue marker!
405 CurrentTSO->link = END_BQ_QUEUE;
406 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
408 CurrentTSO->link = END_TSO_QUEUE;
409 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
411 // jot down why and on what closure we are blocked
412 CurrentTSO->why_blocked = BlockedOnBlackHole;
413 CurrentTSO->block_info.closure = R1.cl;
415 /* Change the BLACKHOLE into a BLACKHOLE_BQ */
418 // The size remains the same, so we call LDV_recordDead() - no need to fill slop.
419 LDV_recordDead((StgClosure *)R1.p, BLACKHOLE_sizeW());
422 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
424 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
426 LDV_recordCreate((StgClosure *)R1.p);
429 // closure is mutable since something has just been added to its BQ
430 recordMutable((StgMutClosure *)R1.cl);
432 // PAR: dumping of event now done in blockThread -- HWL
434 // stg_gen_block is too heavyweight, use a specialised one
439 INFO_TABLE(stg_BLACKHOLE_BQ_info, stg_BLACKHOLE_BQ_entry,1,1,BLACKHOLE_BQ,,IF_,"BLACKHOLE","BLACKHOLE");
440 IF_(stg_BLACKHOLE_BQ_entry)
444 /* Before overwriting TSO_LINK */
445 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
450 bdescr *bd = Bdescr(R1.p);
451 if (bd->u.back != (bdescr *)BaseReg) {
452 if (bd->gen_no >= 1 || bd->step->no >= 1) {
453 CMPXCHG(R1.cl->header.info, &stg_BLACKHOLE_info, &stg_WHITEHOLE_info);
455 EXTFUN_RTS(stg_gc_enter_1_hponly);
456 JMP_(stg_gc_enter_1_hponly);
463 LDV_ENTER((StgClosure *)R1.p);
465 /* Put ourselves on the blocking queue for this black hole */
466 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
467 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
468 /* jot down why and on what closure we are blocked */
469 CurrentTSO->why_blocked = BlockedOnBlackHole;
470 CurrentTSO->block_info.closure = R1.cl;
472 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
475 /* PAR: dumping of event now done in blockThread -- HWL */
477 /* stg_gen_block is too heavyweight, use a specialised one */
483 Revertible black holes are needed in the parallel world, to handle
484 negative acknowledgements of messages containing updatable closures.
485 The idea is that when the original message is transmitted, the closure
486 is turned into a revertible black hole...an object which acts like a
487 black hole when local threads try to enter it, but which can be reverted
488 back to the original closure if necessary.
490 It's actually a lot like a blocking queue (BQ) entry, because revertible
491 black holes are initially set up with an empty blocking queue.
494 #if defined(PAR) || defined(GRAN)
496 INFO_TABLE(stg_RBH_info, stg_RBH_entry,1,1,RBH,,IF_,"RBH","RBH");
501 /* mainly statistics gathering for GranSim simulation */
502 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
505 /* exactly the same as a BLACKHOLE_BQ_entry -- HWL */
506 /* Put ourselves on the blocking queue for this black hole */
507 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
508 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
509 /* jot down why and on what closure we are blocked */
510 CurrentTSO->why_blocked = BlockedOnBlackHole;
511 CurrentTSO->block_info.closure = R1.cl;
513 /* PAR: dumping of event now done in blockThread -- HWL */
515 /* stg_gen_block is too heavyweight, use a specialised one */
520 INFO_TABLE(stg_RBH_Save_0_info, stg_RBH_Save_0_entry,0,2,CONSTR,,IF_,"RBH_Save_0","RBH_Save_0");
521 NON_ENTERABLE_ENTRY_CODE(RBH_Save_0);
523 INFO_TABLE(stg_RBH_Save_1_info, stg_RBH_Save_1_entry,1,1,CONSTR,,IF_,"RBH_Save_1","RBH_Save_1");
524 NON_ENTERABLE_ENTRY_CODE(RBH_Save_1);
526 INFO_TABLE(stg_RBH_Save_2_info, stg_RBH_Save_2_entry,2,0,CONSTR,,IF_,"RBH_Save_2","RBH_Save_2");
527 NON_ENTERABLE_ENTRY_CODE(RBH_Save_2);
528 #endif /* defined(PAR) || defined(GRAN) */
530 /* identical to BLACKHOLEs except for the infotag */
531 INFO_TABLE(stg_CAF_BLACKHOLE_info, stg_CAF_BLACKHOLE_entry,0,2,CAF_BLACKHOLE,,IF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
532 IF_(stg_CAF_BLACKHOLE_entry)
536 /* mainly statistics gathering for GranSim simulation */
537 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
542 bdescr *bd = Bdescr(R1.p);
543 if (bd->u.back != (bdescr *)BaseReg) {
544 if (bd->gen_no >= 1 || bd->step->no >= 1) {
545 CMPXCHG(R1.cl->header.info, &stg_CAF_BLACKHOLE_info, &stg_WHITEHOLE_info);
547 EXTFUN_RTS(stg_gc_enter_1_hponly);
548 JMP_(stg_gc_enter_1_hponly);
555 LDV_ENTER((StgClosure *)R1.p);
557 // Put ourselves on the blocking queue for this black hole
558 #if defined(GRAN) || defined(PAR)
559 // in fact, only difference is the type of the end-of-queue marker!
560 CurrentTSO->link = END_BQ_QUEUE;
561 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
563 CurrentTSO->link = END_TSO_QUEUE;
564 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
566 // jot down why and on what closure we are blocked
567 CurrentTSO->why_blocked = BlockedOnBlackHole;
568 CurrentTSO->block_info.closure = R1.cl;
570 // Change the CAF_BLACKHOLE into a BLACKHOLE_BQ_STATIC
571 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
573 // closure is mutable since something has just been added to its BQ
574 recordMutable((StgMutClosure *)R1.cl);
576 // PAR: dumping of event now done in blockThread -- HWL
578 // stg_gen_block is too heavyweight, use a specialised one
584 INFO_TABLE(stg_SE_BLACKHOLE_info, stg_SE_BLACKHOLE_entry,0,2,SE_BLACKHOLE,,IF_,"SE_BLACKHOLE","SE_BLACKHOLE");
585 IF_(stg_SE_BLACKHOLE_entry)
588 STGCALL3(fprintf,stderr,"SE_BLACKHOLE at %p entered!\n",R1.p);
589 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
593 INFO_TABLE(stg_SE_CAF_BLACKHOLE_info, SE_CAF_BLACKHOLE_entry,0,2,SE_CAF_BLACKHOLE,,IF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
594 IF_(stg_SE_CAF_BLACKHOLE_entry)
597 STGCALL3(fprintf,stderr,"SE_CAF_BLACKHOLE at %p entered!\n",R1.p);
598 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
604 INFO_TABLE(stg_WHITEHOLE_info, stg_WHITEHOLE_entry,0,2,CONSTR_NOCAF_STATIC,,IF_,"WHITEHOLE","WHITEHOLE");
605 IF_(stg_WHITEHOLE_entry)
608 JMP_(GET_ENTRY(R1.cl));
613 /* -----------------------------------------------------------------------------
614 Some static info tables for things that don't get entered, and
615 therefore don't need entry code (i.e. boxed but unpointed objects)
616 NON_ENTERABLE_ENTRY_CODE now defined at the beginning of the file
617 -------------------------------------------------------------------------- */
619 INFO_TABLE(stg_TSO_info, stg_TSO_entry, 0,0,TSO,,IF_,"TSO","TSO");
620 NON_ENTERABLE_ENTRY_CODE(TSO);
622 /* -----------------------------------------------------------------------------
623 Evacuees are left behind by the garbage collector. Any attempt to enter
625 -------------------------------------------------------------------------- */
627 INFO_TABLE(stg_EVACUATED_info,stg_EVACUATED_entry,1,0,EVACUATED,,IF_,"EVACUATED","EVACUATED");
628 NON_ENTERABLE_ENTRY_CODE(EVACUATED);
630 /* -----------------------------------------------------------------------------
633 Live weak pointers have a special closure type. Dead ones are just
634 nullary constructors (although they live on the heap - we overwrite
635 live weak pointers with dead ones).
636 -------------------------------------------------------------------------- */
638 INFO_TABLE(stg_WEAK_info,stg_WEAK_entry,0,4,WEAK,,IF_,"WEAK","WEAK");
639 NON_ENTERABLE_ENTRY_CODE(WEAK);
641 // It's important when turning an existing WEAK into a DEAD_WEAK
642 // (which is what finalizeWeak# does) that we don't lose the link
643 // field and break the linked list of weak pointers. Hence, we give
644 // DEAD_WEAK 4 non-pointer fields, the same as WEAK.
646 INFO_TABLE_CONSTR(stg_DEAD_WEAK_info,stg_DEAD_WEAK_entry,0,4,0,CONSTR,,IF_,"DEAD_WEAK","DEAD_WEAK");
647 NON_ENTERABLE_ENTRY_CODE(DEAD_WEAK);
649 /* -----------------------------------------------------------------------------
652 This is a static nullary constructor (like []) that we use to mark an empty
653 finalizer in a weak pointer object.
654 -------------------------------------------------------------------------- */
656 INFO_TABLE_CONSTR(stg_NO_FINALIZER_info,stg_NO_FINALIZER_entry,0,0,0,CONSTR_NOCAF_STATIC,,IF_,"NO_FINALIZER","NO_FINALIZER");
657 NON_ENTERABLE_ENTRY_CODE(NO_FINALIZER);
659 SET_STATIC_HDR(stg_NO_FINALIZER_closure,stg_NO_FINALIZER_info,0/*CC*/,,extern const StgInfoTable)
662 /* -----------------------------------------------------------------------------
663 Foreign Objects are unlifted and therefore never entered.
664 -------------------------------------------------------------------------- */
666 INFO_TABLE(stg_FOREIGN_info,stg_FOREIGN_entry,0,1,FOREIGN,,IF_,"FOREIGN","FOREIGN");
667 NON_ENTERABLE_ENTRY_CODE(FOREIGN);
669 /* -----------------------------------------------------------------------------
670 Stable Names are unlifted too.
671 -------------------------------------------------------------------------- */
673 INFO_TABLE(stg_STABLE_NAME_info,stg_STABLE_NAME_entry,0,1,STABLE_NAME,,IF_,"STABLE_NAME","STABLE_NAME");
674 NON_ENTERABLE_ENTRY_CODE(STABLE_NAME);
676 /* -----------------------------------------------------------------------------
679 There are two kinds of these: full and empty. We need an info table
680 and entry code for each type.
681 -------------------------------------------------------------------------- */
683 INFO_TABLE(stg_FULL_MVAR_info,stg_FULL_MVAR_entry,4,0,MVAR,,IF_,"MVAR","MVAR");
684 NON_ENTERABLE_ENTRY_CODE(FULL_MVAR);
686 INFO_TABLE(stg_EMPTY_MVAR_info,stg_EMPTY_MVAR_entry,4,0,MVAR,,IF_,"MVAR","MVAR");
687 NON_ENTERABLE_ENTRY_CODE(EMPTY_MVAR);
689 /* -----------------------------------------------------------------------------
692 This is a static nullary constructor (like []) that we use to mark the
693 end of a linked TSO queue.
694 -------------------------------------------------------------------------- */
696 INFO_TABLE_CONSTR(stg_END_TSO_QUEUE_info,stg_END_TSO_QUEUE_entry,0,0,0,CONSTR_NOCAF_STATIC,,IF_,"END_TSO_QUEUE","END_TSO_QUEUE");
697 NON_ENTERABLE_ENTRY_CODE(END_TSO_QUEUE);
699 SET_STATIC_HDR(stg_END_TSO_QUEUE_closure,stg_END_TSO_QUEUE_info,0/*CC*/,,extern const StgInfoTable)
702 /* -----------------------------------------------------------------------------
705 Mutable lists (used by the garbage collector) consist of a chain of
706 StgMutClosures connected through their mut_link fields, ending in
707 an END_MUT_LIST closure.
708 -------------------------------------------------------------------------- */
710 INFO_TABLE_CONSTR(stg_END_MUT_LIST_info,stg_END_MUT_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,IF_,"END_MUT_LIST","END_MUT_LIST");
711 NON_ENTERABLE_ENTRY_CODE(END_MUT_LIST);
713 SET_STATIC_HDR(stg_END_MUT_LIST_closure,stg_END_MUT_LIST_info,0/*CC*/,,extern const StgInfoTable)
716 INFO_TABLE(stg_MUT_CONS_info, stg_MUT_CONS_entry, 1, 1, MUT_CONS, , IF_, "MUT_CONS", "MUT_CONS");
717 NON_ENTERABLE_ENTRY_CODE(MUT_CONS);
719 /* -----------------------------------------------------------------------------
721 -------------------------------------------------------------------------- */
723 INFO_TABLE_CONSTR(stg_END_EXCEPTION_LIST_info,stg_END_EXCEPTION_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,IF_,"END_EXCEPTION_LIST","END_EXCEPTION_LIST");
724 NON_ENTERABLE_ENTRY_CODE(END_EXCEPTION_LIST);
726 SET_STATIC_HDR(stg_END_EXCEPTION_LIST_closure,stg_END_EXCEPTION_LIST_info,0/*CC*/,,extern const StgInfoTable)
729 INFO_TABLE(stg_EXCEPTION_CONS_info, stg_EXCEPTION_CONS_entry, 1, 1, CONSTR, , IF_, "EXCEPTION_CONS", "EXCEPTION_CONS");
730 NON_ENTERABLE_ENTRY_CODE(EXCEPTION_CONS);
732 /* -----------------------------------------------------------------------------
735 These come in two basic flavours: arrays of data (StgArrWords) and arrays of
736 pointers (StgArrPtrs). They all have a similar layout:
738 ___________________________
739 | Info | No. of | data....
741 ---------------------------
743 These are *unpointed* objects: i.e. they cannot be entered.
745 -------------------------------------------------------------------------- */
747 #define ArrayInfo(type) \
748 INFO_TABLE(stg_##type##_info, stg_##type##_entry, 0, 0, type, , IF_,"" # type "","" # type "");
750 ArrayInfo(ARR_WORDS);
751 NON_ENTERABLE_ENTRY_CODE(ARR_WORDS);
752 ArrayInfo(MUT_ARR_PTRS);
753 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS);
754 ArrayInfo(MUT_ARR_PTRS_FROZEN);
755 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS_FROZEN);
759 /* -----------------------------------------------------------------------------
761 -------------------------------------------------------------------------- */
763 INFO_TABLE(stg_MUT_VAR_info, stg_MUT_VAR_entry, 1, 1, MUT_VAR, , IF_, "MUT_VAR", "MUT_VAR");
764 NON_ENTERABLE_ENTRY_CODE(MUT_VAR);
766 /* -----------------------------------------------------------------------------
769 Entering this closure will just return to the address on the top of the
770 stack. Useful for getting a thread in a canonical form where we can
771 just enter the top stack word to start the thread. (see deleteThread)
772 * -------------------------------------------------------------------------- */
774 INFO_TABLE( stg_dummy_ret_info, stg_dummy_ret_entry,
775 0, 0, CONSTR_NOCAF_STATIC, , EF_, "DUMMY_RET", "DUMMY_RET");
777 STGFUN(stg_dummy_ret_entry)
780 JMP_(ENTRY_CODE(Sp[0]));
783 SET_STATIC_HDR(stg_dummy_ret_closure,stg_dummy_ret_info,CCS_DONT_CARE,,extern const StgInfoTable)
786 /* -----------------------------------------------------------------------------
787 CHARLIKE and INTLIKE closures.
789 These are static representations of Chars and small Ints, so that
790 we can remove dynamic Chars and Ints during garbage collection and
791 replace them with references to the static objects.
792 -------------------------------------------------------------------------- */
794 #if defined(INTERPRETER) || defined(ENABLE_WIN32_DLL_SUPPORT)
796 * When sticking the RTS in a DLL, we delay populating the
797 * Charlike and Intlike tables until load-time, which is only
798 * when we've got the real addresses to the C# and I# closures.
801 static INFO_TBL_CONST StgInfoTable czh_static_info;
802 static INFO_TBL_CONST StgInfoTable izh_static_info;
803 #define Char_hash_static_info czh_static_info
804 #define Int_hash_static_info izh_static_info
806 #define Char_hash_static_info GHCziBase_Czh_static_info
807 #define Int_hash_static_info GHCziBase_Izh_static_info
810 #define CHARLIKE_HDR(n) \
812 STATIC_HDR(Char_hash_static_info, /* C# */ \
817 #define INTLIKE_HDR(n) \
819 STATIC_HDR(Int_hash_static_info, /* I# */ \
824 /* put these in the *data* section, since the garbage collector relies
825 * on the fact that static closures live in the data section.
828 /* end the name with _closure, to convince the mangler this is a closure */
830 StgIntCharlikeClosure stg_CHARLIKE_closure[] = {
1089 StgIntCharlikeClosure stg_INTLIKE_closure[] = {
1090 INTLIKE_HDR(-16), /* MIN_INTLIKE == -16 */
1122 INTLIKE_HDR(16) /* MAX_INTLIKE == 16 */