1 /* ----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
5 * Entry code for various built-in closure types.
7 * This file is written in a subset of C--, extended with various
8 * features specific to GHC. It is compiled by GHC directly. For the
9 * syntax of .cmm files, see the parser in ghc/compiler/cmm/CmmParse.y.
11 * --------------------------------------------------------------------------*/
15 /* ----------------------------------------------------------------------------
16 Support for the bytecode interpreter.
17 ------------------------------------------------------------------------- */
19 /* 9 bits of return code for constructors created by the interpreter. */
20 stg_interp_constr_entry
22 /* R1 points at the constructor */
23 jump %ENTRY_CODE(Sp(0));
26 stg_interp_constr1_entry { jump %RET_VEC(Sp(0),0); }
27 stg_interp_constr2_entry { jump %RET_VEC(Sp(0),1); }
28 stg_interp_constr3_entry { jump %RET_VEC(Sp(0),2); }
29 stg_interp_constr4_entry { jump %RET_VEC(Sp(0),3); }
30 stg_interp_constr5_entry { jump %RET_VEC(Sp(0),4); }
31 stg_interp_constr6_entry { jump %RET_VEC(Sp(0),5); }
32 stg_interp_constr7_entry { jump %RET_VEC(Sp(0),6); }
33 stg_interp_constr8_entry { jump %RET_VEC(Sp(0),7); }
35 /* Some info tables to be used when compiled code returns a value to
36 the interpreter, i.e. the interpreter pushes one of these onto the
37 stack before entering a value. What the code does is to
38 impedance-match the compiled return convention (in R1p/R1n/F1/D1 etc) to
39 the interpreter's convention (returned value is on top of stack),
40 and then cause the scheduler to enter the interpreter.
42 On entry, the stack (growing down) looks like this:
44 ptr to BCO holding return continuation
45 ptr to one of these info tables.
47 The info table code, both direct and vectored, must:
48 * push R1/F1/D1 on the stack, and its tag if necessary
49 * push the BCO (so it's now on the stack twice)
50 * Yield, ie, go to the scheduler.
52 Scheduler examines the t.o.s, discovers it is a BCO, and proceeds
53 directly to the bytecode interpreter. That pops the top element
54 (the BCO, containing the return continuation), and interprets it.
55 Net result: return continuation gets interpreted, with the
59 ptr to the info table just jumped thru
62 which is just what we want -- the "standard" return layout for the
65 Don't ask me how unboxed tuple returns are supposed to work. We
66 haven't got a good story about that yet.
69 INFO_TABLE_RET( stg_ctoi_R1p,
70 0/*size*/, 0/*bitmap*/, /* special layout! */
72 RET_LBL(stg_ctoi_R1p),
73 RET_LBL(stg_ctoi_R1p),
74 RET_LBL(stg_ctoi_R1p),
75 RET_LBL(stg_ctoi_R1p),
76 RET_LBL(stg_ctoi_R1p),
77 RET_LBL(stg_ctoi_R1p),
78 RET_LBL(stg_ctoi_R1p),
79 RET_LBL(stg_ctoi_R1p))
83 Sp(0) = stg_enter_info;
84 jump stg_yield_to_interpreter;
87 #if MAX_VECTORED_RTN != 8
88 #error MAX_VECTORED_RTN has changed: please modify stg_ctoi_R1p too.
92 * When the returned value is a pointer, but unlifted, in R1 ...
94 INFO_TABLE_RET( stg_ctoi_R1unpt,
95 0/*size*/, 0/*bitmap*/, /* special layout! */
100 Sp(0) = stg_gc_unpt_r1_info;
101 jump stg_yield_to_interpreter;
105 * When the returned value is a non-pointer in R1 ...
107 INFO_TABLE_RET( stg_ctoi_R1n,
108 0/*size*/, 0/*bitmap*/, /* special layout! */
113 Sp(0) = stg_gc_unbx_r1_info;
114 jump stg_yield_to_interpreter;
118 * When the returned value is in F1
120 INFO_TABLE_RET( stg_ctoi_F1,
121 0/*size*/, 0/*bitmap*/, /* special layout! */
125 F_[Sp + WDS(1)] = F1;
126 Sp(0) = stg_gc_f1_info;
127 jump stg_yield_to_interpreter;
131 * When the returned value is in D1
133 INFO_TABLE_RET( stg_ctoi_D1,
134 0/*size*/, 0/*bitmap*/, /* special layout! */
137 Sp_adj(-1) - SIZEOF_DOUBLE;
138 D_[Sp + WDS(1)] = D1;
139 Sp(0) = stg_gc_d1_info;
140 jump stg_yield_to_interpreter;
144 * When the returned value is in L1
146 INFO_TABLE_RET( stg_ctoi_L1,
147 0/*size*/, 0/*bitmap*/, /* special layout! */
151 L_[Sp + WDS(1)] = L1;
152 Sp(0) = stg_gc_l1_info;
153 jump stg_yield_to_interpreter;
157 * When the returned value is a void
159 INFO_TABLE_RET( stg_ctoi_V,
160 0/*size*/, 0/*bitmap*/, /* special layout! */
164 Sp(0) = stg_gc_void_info;
165 jump stg_yield_to_interpreter;
169 * Dummy info table pushed on the top of the stack when the interpreter
170 * should apply the BCO on the stack to its arguments, also on the
173 INFO_TABLE_RET( stg_apply_interp,
174 0/*size*/, 0/*bitmap*/, /* special layout! */
177 /* Just in case we end up in here... (we shouldn't) */
178 jump stg_yield_to_interpreter;
181 /* ----------------------------------------------------------------------------
183 ------------------------------------------------------------------------- */
185 INFO_TABLE_FUN( stg_BCO, 4, 0, BCO, "BCO", "BCO", ARG_BCO )
187 /* entering a BCO means "apply it", same as a function */
190 Sp(0) = stg_apply_interp_info;
191 jump stg_yield_to_interpreter;
194 /* ----------------------------------------------------------------------------
195 Info tables for indirections.
197 SPECIALISED INDIRECTIONS: we have a specialised indirection for each
198 kind of return (direct, vectored 0-7), so that we can avoid entering
199 the object when we know what kind of return it will do. The update
200 code (Updates.hc) updates objects with the appropriate kind of
201 indirection. We only do this for young-gen indirections.
202 ------------------------------------------------------------------------- */
204 INFO_TABLE(stg_IND,1,0,IND,"IND","IND")
206 TICK_ENT_DYN_IND(); /* tick */
207 R1 = StgInd_indirectee(R1);
212 #define IND_SPEC(label,ret) \
213 INFO_TABLE(label,1,0,IND,"IND","IND") \
215 TICK_ENT_DYN_IND(); /* tick */ \
216 R1 = StgInd_indirectee(R1); \
217 TICK_ENT_VIA_NODE(); \
221 IND_SPEC(stg_IND_direct, %ENTRY_CODE(Sp(0)))
222 IND_SPEC(stg_IND_0, %RET_VEC(Sp(0),0))
223 IND_SPEC(stg_IND_1, %RET_VEC(Sp(0),1))
224 IND_SPEC(stg_IND_2, %RET_VEC(Sp(0),2))
225 IND_SPEC(stg_IND_3, %RET_VEC(Sp(0),3))
226 IND_SPEC(stg_IND_4, %RET_VEC(Sp(0),4))
227 IND_SPEC(stg_IND_5, %RET_VEC(Sp(0),5))
228 IND_SPEC(stg_IND_6, %RET_VEC(Sp(0),6))
229 IND_SPEC(stg_IND_7, %RET_VEC(Sp(0),7))
231 INFO_TABLE(stg_IND_STATIC,1,0,IND_STATIC,"IND_STATIC","IND_STATIC")
233 TICK_ENT_STATIC_IND(); /* tick */
234 R1 = StgInd_indirectee(R1);
239 INFO_TABLE(stg_IND_PERM,1,1,IND_PERM,"IND_PERM","IND_PERM")
241 /* Don't add INDs to granularity cost */
243 /* Don't: TICK_ENT_STATIC_IND(Node); for ticky-ticky; this ind is
244 here only to help profiling */
246 #if defined(TICKY_TICKY) && !defined(PROFILING)
247 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than
254 /* Enter PAP cost centre */
255 ENTER_CCS_PAP_CL(R1);
257 /* For ticky-ticky, change the perm_ind to a normal ind on first
258 * entry, so the number of ent_perm_inds is the number of *thunks*
259 * entered again, not the number of subsequent entries.
261 * Since this screws up cost centres, we die if profiling and
262 * ticky_ticky are on at the same time. KSW 1999-01.
266 # error Profiling and ticky-ticky do not mix at present!
267 # endif /* PROFILING */
268 StgHeader_info(R1) = stg_IND_info;
269 #endif /* TICKY_TICKY */
271 R1 = StgInd_indirectee(R1);
273 #if defined(TICKY_TICKY) && !defined(PROFILING)
281 INFO_TABLE(stg_IND_OLDGEN,1,1,IND_OLDGEN,"IND_OLDGEN","IND_OLDGEN")
283 TICK_ENT_STATIC_IND(); /* tick */
284 R1 = StgInd_indirectee(R1);
289 INFO_TABLE(stg_IND_OLDGEN_PERM,1,1,IND_OLDGEN_PERM,"IND_OLDGEN_PERM","IND_OLDGEN_PERM")
291 /* Don't: TICK_ENT_STATIC_IND(Node); for ticky-ticky;
292 this ind is here only to help profiling */
294 #if defined(TICKY_TICKY) && !defined(PROFILING)
295 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND,
296 rather than being extra */
297 TICK_ENT_PERM_IND(R1); /* tick */
302 /* Enter PAP cost centre -- lexical scoping only */
303 ENTER_CCS_PAP_CL(R1);
305 /* see comment in IND_PERM */
308 # error Profiling and ticky-ticky do not mix at present!
309 # endif /* PROFILING */
310 StgHeader_info(R1) = stg_IND_OLDGEN_info;
311 #endif /* TICKY_TICKY */
313 R1 = StgInd_indirectee(R1);
319 /* ----------------------------------------------------------------------------
322 Entering a black hole normally causes a cyclic data dependency, but
323 in the concurrent world, black holes are synchronization points,
324 and they are turned into blocking queues when there are threads
325 waiting for the evaluation of the closure to finish.
326 ------------------------------------------------------------------------- */
328 /* Note: a BLACKHOLE and BLACKHOLE_BQ must be big enough to be
329 * overwritten with an indirection/evacuee/catch. Thus we claim it
330 * has 1 non-pointer word of payload (in addition to the pointer word
331 * for the blocking queue in a BQ), which should be big enough for an
332 * old-generation indirection.
334 INFO_TABLE(stg_BLACKHOLE,0,2,BLACKHOLE,"BLACKHOLE","BLACKHOLE")
337 /* Before overwriting TSO_LINK */
338 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1 /*Node*/);
343 /* Actually this is not necessary because R1 is about to be destroyed. */
346 /* Put ourselves on the blocking queue for this black hole */
347 StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
348 StgBlockingQueue_blocking_queue(R1) = CurrentTSO;
350 /* jot down why and on what closure we are blocked */
351 StgTSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
352 StgTSO_block_info(CurrentTSO) = R1;
354 /* Change the BLACKHOLE into a BLACKHOLE_BQ */
356 /* The size remains the same, so we call LDV_recordDead() -
357 no need to fill slop. */
358 foreign "C" LDV_recordDead(R1 "ptr", BYTES_TO_WDS(SIZEOF_StgBlockingQueue));
361 * Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
363 StgHeader_info(R1) = stg_BLACKHOLE_BQ_info;
365 foreign "C" LDV_RECORD_CREATE(R1);
368 /* closure is mutable since something has just been added to its BQ */
369 foreign "C" recordMutable(R1 "ptr");
371 /* PAR: dumping of event now done in blockThread -- HWL */
373 /* stg_gen_block is too heavyweight, use a specialised one */
377 INFO_TABLE(stg_BLACKHOLE_BQ,1,1,BLACKHOLE_BQ,"BLACKHOLE_BQ","BLACKHOLE_BQ")
380 /* Before overwriting TSO_LINK */
381 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1 /*Node*/);
387 /* Put ourselves on the blocking queue for this black hole */
388 StgTSO_link(CurrentTSO) = StgBlockingQueue_blocking_queue(R1);
389 StgBlockingQueue_blocking_queue(R1) = CurrentTSO;
391 /* jot down why and on what closure we are blocked */
392 StgTSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
393 StgTSO_block_info(CurrentTSO) = R1;
395 /* PAR: dumping of event now done in blockThread -- HWL */
397 /* stg_gen_block is too heavyweight, use a specialised one */
402 Revertible black holes are needed in the parallel world, to handle
403 negative acknowledgements of messages containing updatable closures.
404 The idea is that when the original message is transmitted, the closure
405 is turned into a revertible black hole...an object which acts like a
406 black hole when local threads try to enter it, but which can be reverted
407 back to the original closure if necessary.
409 It's actually a lot like a blocking queue (BQ) entry, because revertible
410 black holes are initially set up with an empty blocking queue.
413 #if defined(PAR) || defined(GRAN)
415 INFO_TABLE(stg_RBH,1,1,RBH,"RBH","RBH")
418 /* mainly statistics gathering for GranSim simulation */
419 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1 /*Node*/);
422 /* exactly the same as a BLACKHOLE_BQ_entry -- HWL */
423 /* Put ourselves on the blocking queue for this black hole */
424 TSO_link(CurrentTSO) = StgBlockingQueue_blocking_queue(R1);
425 StgBlockingQueue_blocking_queue(R1) = CurrentTSO;
426 /* jot down why and on what closure we are blocked */
427 TSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
428 TSO_block_info(CurrentTSO) = R1;
430 /* PAR: dumping of event now done in blockThread -- HWL */
432 /* stg_gen_block is too heavyweight, use a specialised one */
436 INFO_TABLE(stg_RBH_Save_0,0,2,CONSTR,"RBH_Save_0","RBH_Save_0")
437 { foreign "C" barf("RBH_Save_0 object entered!"); }
439 INFO_TABLE(stg_RBH_Save_1,1,1,CONSTR,"RBH_Save_1","RBH_Save_1");
440 { foreign "C" barf("RBH_Save_1 object entered!"); }
442 INFO_TABLE(stg_RBH_Save_2,2,0,CONSTR,"RBH_Save_2","RBH_Save_2");
443 { foreign "C" barf("RBH_Save_2 object entered!"); }
445 #endif /* defined(PAR) || defined(GRAN) */
447 /* identical to BLACKHOLEs except for the infotag */
448 INFO_TABLE(stg_CAF_BLACKHOLE,0,2,CAF_BLACKHOLE,"CAF_BLACKHOLE","CAF_BLACKHOLE")
451 /* mainly statistics gathering for GranSim simulation */
452 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1 /*Node*/);
458 /* Put ourselves on the blocking queue for this black hole */
459 StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
460 StgBlockingQueue_blocking_queue(R1) = CurrentTSO;
462 /* jot down why and on what closure we are blocked */
463 StgTSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
464 StgTSO_block_info(CurrentTSO) = R1;
466 /* Change the CAF_BLACKHOLE into a BLACKHOLE_BQ_STATIC */
467 StgHeader_info(R1) = stg_BLACKHOLE_BQ_info;
469 /* closure is mutable since something has just been added to its BQ */
470 foreign "C" recordMutable(R1 "ptr");
472 /* PAR: dumping of event now done in blockThread -- HWL */
474 /* stg_gen_block is too heavyweight, use a specialised one */
478 #ifdef EAGER_BLACKHOLING
479 INFO_TABLE(stg_SE_BLACKHOLE_info, stg_SE_BLACKHOLE_entry,0,2,SE_BLACKHOLE,,IF_,"SE_BLACKHOLE","SE_BLACKHOLE");
480 IF_(stg_SE_BLACKHOLE_entry)
482 STGCALL3(fprintf,stderr,"SE_BLACKHOLE at %p entered!\n",R1);
483 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
486 INFO_TABLE(stg_SE_CAF_BLACKHOLE_info, SE_CAF_BLACKHOLE_entry,0,2,SE_CAF_BLACKHOLE,,IF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
487 IF_(stg_SE_CAF_BLACKHOLE_entry)
489 STGCALL3(fprintf,stderr,"SE_CAF_BLACKHOLE at %p entered!\n",R1);
490 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
494 /* ----------------------------------------------------------------------------
495 Some static info tables for things that don't get entered, and
496 therefore don't need entry code (i.e. boxed but unpointed objects)
497 NON_ENTERABLE_ENTRY_CODE now defined at the beginning of the file
498 ------------------------------------------------------------------------- */
500 INFO_TABLE(stg_TSO, 0,0,TSO, "TSO", "TSO")
501 { foreign "C" barf("TSO object entered!"); }
503 /* ----------------------------------------------------------------------------
504 Evacuees are left behind by the garbage collector. Any attempt to enter
506 ------------------------------------------------------------------------- */
508 INFO_TABLE(stg_EVACUATED,1,0,EVACUATED,"EVACUATED","EVACUATED")
509 { foreign "C" barf("EVACUATED object entered!"); }
511 /* ----------------------------------------------------------------------------
514 Live weak pointers have a special closure type. Dead ones are just
515 nullary constructors (although they live on the heap - we overwrite
516 live weak pointers with dead ones).
517 ------------------------------------------------------------------------- */
519 INFO_TABLE(stg_WEAK,0,4,WEAK,"WEAK","WEAK")
520 { foreign "C" barf("WEAK object entered!"); }
523 * It's important when turning an existing WEAK into a DEAD_WEAK
524 * (which is what finalizeWeak# does) that we don't lose the link
525 * field and break the linked list of weak pointers. Hence, we give
526 * DEAD_WEAK 4 non-pointer fields, the same as WEAK.
528 INFO_TABLE_CONSTR(stg_DEAD_WEAK,0,4,0,CONSTR,"DEAD_WEAK","DEAD_WEAK")
529 { foreign "C" barf("DEAD_WEAK object entered!"); }
531 /* ----------------------------------------------------------------------------
534 This is a static nullary constructor (like []) that we use to mark an empty
535 finalizer in a weak pointer object.
536 ------------------------------------------------------------------------- */
538 INFO_TABLE_CONSTR(stg_NO_FINALIZER,0,0,0,CONSTR_NOCAF_STATIC,"NO_FINALIZER","NO_FINALIZER")
539 { foreign "C" barf("NO_FINALIZER object entered!"); }
541 CLOSURE(stg_NO_FINALIZER_closure,stg_NO_FINALIZER);
543 /* ----------------------------------------------------------------------------
544 Foreign Objects are unlifted and therefore never entered.
545 ------------------------------------------------------------------------- */
547 INFO_TABLE(stg_FOREIGN,0,1,FOREIGN,"FOREIGN","FOREIGN")
548 { foreign "C" barf("FOREIGN object entered!"); }
550 /* ----------------------------------------------------------------------------
551 Stable Names are unlifted too.
552 ------------------------------------------------------------------------- */
554 INFO_TABLE(stg_STABLE_NAME,0,1,STABLE_NAME,"STABLE_NAME","STABLE_NAME")
555 { foreign "C" barf("STABLE_NAME object entered!"); }
557 /* ----------------------------------------------------------------------------
560 There are two kinds of these: full and empty. We need an info table
561 and entry code for each type.
562 ------------------------------------------------------------------------- */
564 INFO_TABLE(stg_FULL_MVAR,4,0,MVAR,"MVAR","MVAR")
565 { foreign "C" barf("FULL_MVAR object entered!"); }
567 INFO_TABLE(stg_EMPTY_MVAR,4,0,MVAR,"MVAR","MVAR")
568 { foreign "C" barf("EMPTY_MVAR object entered!"); }
570 /* ----------------------------------------------------------------------------
573 This is a static nullary constructor (like []) that we use to mark the
574 end of a linked TSO queue.
575 ------------------------------------------------------------------------- */
577 INFO_TABLE_CONSTR(stg_END_TSO_QUEUE,0,0,0,CONSTR_NOCAF_STATIC,"END_TSO_QUEUE","END_TSO_QUEUE")
578 { foreign "C" barf("END_TSO_QUEUE object entered!"); }
580 CLOSURE(stg_END_TSO_QUEUE_closure,stg_END_TSO_QUEUE);
582 /* ----------------------------------------------------------------------------
585 Mutable lists (used by the garbage collector) consist of a chain of
586 StgMutClosures connected through their mut_link fields, ending in
587 an END_MUT_LIST closure.
588 ------------------------------------------------------------------------- */
590 INFO_TABLE_CONSTR(stg_END_MUT_LIST,0,0,0,CONSTR_NOCAF_STATIC,"END_MUT_LIST","END_MUT_LIST")
591 { foreign "C" barf("END_MUT_LIST object entered!"); }
593 CLOSURE(stg_END_MUT_LIST_closure,stg_END_MUT_LIST);
595 INFO_TABLE(stg_MUT_CONS, 1, 1, MUT_CONS, "MUT_CONS", "MUT_CONS")
596 { foreign "C" barf("MUT_CONS object entered!"); }
598 /* ----------------------------------------------------------------------------
600 ------------------------------------------------------------------------- */
602 INFO_TABLE_CONSTR(stg_END_EXCEPTION_LIST,0,0,0,CONSTR_NOCAF_STATIC,"END_EXCEPTION_LIST","END_EXCEPTION_LIST")
603 { foreign "C" barf("END_EXCEPTION_LIST object entered!"); }
605 CLOSURE(stg_END_EXCEPTION_LIST_closure,stg_END_EXCEPTION_LIST);
607 INFO_TABLE(stg_EXCEPTION_CONS,1,1,CONSTR,"EXCEPTION_CONS","EXCEPTION_CONS")
608 { foreign "C" barf("EXCEPTION_CONS object entered!"); }
610 /* ----------------------------------------------------------------------------
613 These come in two basic flavours: arrays of data (StgArrWords) and arrays of
614 pointers (StgArrPtrs). They all have a similar layout:
616 ___________________________
617 | Info | No. of | data....
619 ---------------------------
621 These are *unpointed* objects: i.e. they cannot be entered.
623 ------------------------------------------------------------------------- */
625 INFO_TABLE(stg_ARR_WORDS, 0, 0, ARR_WORDS, "ARR_WORDS", "ARR_WORDS")
626 { foreign "C" barf("ARR_WORDS object entered!"); }
628 INFO_TABLE(stg_MUT_ARR_PTRS, 0, 0, MUT_ARR_PTRS, "MUT_ARR_PTRS", "MUT_ARR_PTRS")
629 { foreign "C" barf("MUT_ARR_PTRS object entered!"); }
631 INFO_TABLE(stg_MUT_ARR_PTRS_FROZEN, 0, 0, MUT_ARR_PTRS_FROZEN, "MUT_ARR_PTRS_FROZEN", "MUT_ARR_PTRS_FROZEN")
632 { foreign "C" barf("MUT_ARR_PTRS_FROZEN object entered!"); }
634 /* ----------------------------------------------------------------------------
636 ------------------------------------------------------------------------- */
638 INFO_TABLE(stg_MUT_VAR, 1, 1, MUT_VAR, "MUT_VAR", "MUT_VAR")
639 { foreign "C" barf("MUT_VAR object entered!"); }
641 /* ----------------------------------------------------------------------------
644 Entering this closure will just return to the address on the top of the
645 stack. Useful for getting a thread in a canonical form where we can
646 just enter the top stack word to start the thread. (see deleteThread)
647 * ------------------------------------------------------------------------- */
649 INFO_TABLE( stg_dummy_ret, 0, 0, CONSTR_NOCAF_STATIC, "DUMMY_RET", "DUMMY_RET")
651 jump %ENTRY_CODE(Sp(0));
653 CLOSURE(stg_dummy_ret_closure,stg_dummy_ret);
655 /* ----------------------------------------------------------------------------
656 CHARLIKE and INTLIKE closures.
658 These are static representations of Chars and small Ints, so that
659 we can remove dynamic Chars and Ints during garbage collection and
660 replace them with references to the static objects.
661 ------------------------------------------------------------------------- */
663 #if defined(ENABLE_WIN32_DLL_SUPPORT)
665 * When sticking the RTS in a DLL, we delay populating the
666 * Charlike and Intlike tables until load-time, which is only
667 * when we've got the real addresses to the C# and I# closures.
670 static INFO_TBL_CONST StgInfoTable czh_static_info;
671 static INFO_TBL_CONST StgInfoTable izh_static_info;
672 #define Char_hash_static_info czh_static_info
673 #define Int_hash_static_info izh_static_info
675 #define Char_hash_static_info GHCziBase_Czh_static
676 #define Int_hash_static_info GHCziBase_Izh_static
680 #define CHARLIKE_HDR(n) CLOSURE(Char_hash_static_info, n)
681 #define INTLIKE_HDR(n) CLOSURE(Int_hash_static_info, n)
683 /* put these in the *data* section, since the garbage collector relies
684 * on the fact that static closures live in the data section.
687 /* end the name with _closure, to convince the mangler this is a closure */
690 stg_CHARLIKE_closure:
951 INTLIKE_HDR(-16) /* MIN_INTLIKE == -16 */
983 INTLIKE_HDR(16) /* MAX_INTLIKE == 16 */