1 /* -----------------------------------------------------------------------------
2 * $Id: StgMiscClosures.hc,v 1.61 2001/01/29 17:23:41 simonmar Exp $
4 * (c) The GHC Team, 1998-2000
6 * Entry code for various built-in closure types.
8 * ---------------------------------------------------------------------------*/
13 #include "StgMiscClosures.h"
14 #include "HeapStackCheck.h" /* for stg_gen_yield */
16 #include "StoragePriv.h"
17 #include "Profiling.h"
21 #if defined(GRAN) || defined(PAR)
22 # include "GranSimRts.h" /* for DumpRawGranEvent */
23 # include "StgRun.h" /* for StgReturn and register saving */
30 /* ToDo: make the printing of panics more win32-friendly, i.e.,
31 * pop up some lovely message boxes (as well).
33 #define DUMP_ERRMSG(msg) STGCALL2(fprintf,stderr,msg)
36 Template for the entry code of non-enterable closures.
39 #define NON_ENTERABLE_ENTRY_CODE(type) \
40 STGFUN(stg_##type##_entry) \
43 DUMP_ERRMSG(#type " object entered!\n"); \
44 STGCALL1(shutdownHaskellAndExit, EXIT_FAILURE); \
50 /* -----------------------------------------------------------------------------
51 Support for the bytecode interpreter.
52 -------------------------------------------------------------------------- */
56 /* 9 bits of return code for constructors created by the interpreter. */
57 FN_(stg_interp_constr_entry)
59 /* R1 points at the constructor */
61 /* STGCALL2(fprintf,stderr,"stg_interp_constr_entry (direct return)!\n"); */
62 /* Pointless, since SET_TAG doesn't do anything */
63 SET_TAG( GET_TAG(GET_INFO(R1.cl)));
64 JMP_(ENTRY_CODE((P_)(*Sp)));
68 FN_(stg_interp_constr1_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),0)); FE_ }
69 FN_(stg_interp_constr2_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),1)); FE_ }
70 FN_(stg_interp_constr3_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),2)); FE_ }
71 FN_(stg_interp_constr4_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),3)); FE_ }
72 FN_(stg_interp_constr5_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),4)); FE_ }
73 FN_(stg_interp_constr6_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),5)); FE_ }
74 FN_(stg_interp_constr7_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),6)); FE_ }
75 FN_(stg_interp_constr8_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),7)); FE_ }
77 /* Some info tables to be used when compiled code returns a value to
78 the interpreter, i.e. the interpreter pushes one of these onto the
79 stack before entering a value. What the code does is to
80 impedance-match the compiled return convention (in R1p/R1n/F1/D1 etc) to
81 the interpreter's convention (returned value is on top of stack),
82 and then cause the scheduler to enter the interpreter.
84 On entry, the stack (growing down) looks like this:
86 ptr to BCO holding return continuation
87 ptr to one of these info tables.
89 The info table code, both direct and vectored, must:
90 * push R1/F1/D1 on the stack, and its tag if necessary
91 * push the BCO (so it's now on the stack twice)
92 * Yield, ie, go to the scheduler.
94 Scheduler examines the t.o.s, discovers it is a BCO, and proceeds
95 directly to the bytecode interpreter. That pops the top element
96 (the BCO, containing the return continuation), and interprets it.
97 Net result: return continuation gets interpreted, with the
101 ptr to the info table just jumped thru
104 which is just what we want -- the "standard" return layout for the
107 Don't ask me how unboxed tuple returns are supposed to work. We
108 haven't got a good story about that yet.
111 /* When the returned value is in R1 and it is a pointer, so doesn't
113 #define STG_CtoI_RET_R1p_Template(label) \
118 bco = ((StgPtr*)Sp)[1]; \
120 ((StgPtr*)Sp)[0] = R1.p; \
122 ((StgPtr*)Sp)[0] = bco; \
123 JMP_(stg_yield_to_interpreter); \
127 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_entry);
128 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_0_entry);
129 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_1_entry);
130 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_2_entry);
131 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_3_entry);
132 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_4_entry);
133 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_5_entry);
134 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_6_entry);
135 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_7_entry);
137 VEC_POLY_INFO_TABLE(stg_ctoi_ret_R1p,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
141 /* When the returned value is in R1 and it isn't a pointer. */
142 #define STG_CtoI_RET_R1n_Template(label) \
147 bco = ((StgPtr*)Sp)[1]; \
149 ((StgPtr*)Sp)[0] = (StgPtr)R1.i; \
151 ((StgPtr*)Sp)[0] = (StgPtr)1; /* tag */ \
153 ((StgPtr*)Sp)[0] = bco; \
154 JMP_(stg_yield_to_interpreter); \
158 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_entry);
159 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_0_entry);
160 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_1_entry);
161 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_2_entry);
162 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_3_entry);
163 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_4_entry);
164 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_5_entry);
165 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_6_entry);
166 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_7_entry);
168 VEC_POLY_INFO_TABLE(stg_ctoi_ret_R1n,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
172 /* When the returned value is in F1 ... */
173 #define STG_CtoI_RET_F1_Template(label) \
178 bco = ((StgPtr*)Sp)[1]; \
179 Sp -= sizeofW(StgFloat); \
180 ASSIGN_FLT((W_*)Sp, F1); \
182 ((StgPtr*)Sp)[0] = (StgPtr)sizeofW(StgFloat); \
184 ((StgPtr*)Sp)[0] = bco; \
185 JMP_(stg_yield_to_interpreter); \
189 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_entry);
190 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_0_entry);
191 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_1_entry);
192 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_2_entry);
193 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_3_entry);
194 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_4_entry);
195 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_5_entry);
196 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_6_entry);
197 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_7_entry);
199 VEC_POLY_INFO_TABLE(stg_ctoi_ret_F1,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
202 /* When the returned value is in D1 ... */
203 #define STG_CtoI_RET_D1_Template(label) \
208 bco = ((StgPtr*)Sp)[1]; \
209 Sp -= sizeofW(StgDouble); \
210 ASSIGN_DBL((W_*)Sp, D1); \
212 ((StgPtr*)Sp)[0] = (StgPtr)sizeofW(StgDouble); \
214 ((StgPtr*)Sp)[0] = bco; \
215 JMP_(stg_yield_to_interpreter); \
219 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_entry);
220 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_0_entry);
221 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_1_entry);
222 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_2_entry);
223 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_3_entry);
224 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_4_entry);
225 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_5_entry);
226 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_6_entry);
227 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_7_entry);
229 VEC_POLY_INFO_TABLE(stg_ctoi_ret_D1,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
231 /* The other way round: when the interpreter returns a value to
232 compiled code. The stack looks like this:
234 return info table (pushed by compiled code)
235 return value (pushed by interpreter)
237 If the value is ptr-rep'd, the interpreter simply returns to the
238 scheduler, instructing it to ThreadEnterGHC.
240 Otherwise (unboxed return value), we replace the top stack word,
241 which must be the tag, with stg_gc_unbx_r1_info (or f1_info or d1_info),
242 and return to the scheduler, instructing it to ThreadRunGHC.
244 No supporting code needed!
248 /* Entering a BCO. Heave it on the stack and defer to the
250 INFO_TABLE(stg_BCO_info,stg_BCO_entry,4,0,BCO,,EF_,"BCO","BCO");
251 STGFUN(stg_BCO_entry) {
255 JMP_(stg_yield_to_interpreter);
262 /* -----------------------------------------------------------------------------
263 Entry code for an indirection.
264 -------------------------------------------------------------------------- */
266 INFO_TABLE(stg_IND_info,stg_IND_entry,1,0,IND,,EF_,0,0);
267 STGFUN(stg_IND_entry)
270 TICK_ENT_IND(Node); /* tick */
272 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
274 JMP_(ENTRY_CODE(*R1.p));
278 INFO_TABLE(stg_IND_STATIC_info,stg_IND_STATIC_entry,1,0,IND_STATIC,,EF_,0,0);
279 STGFUN(stg_IND_STATIC_entry)
282 TICK_ENT_IND(Node); /* tick */
283 R1.p = (P_) ((StgIndStatic*)R1.p)->indirectee;
285 JMP_(ENTRY_CODE(*R1.p));
289 INFO_TABLE(stg_IND_PERM_info,stg_IND_PERM_entry,1,1,IND_PERM,,EF_,"IND_PERM","IND_PERM");
290 STGFUN(stg_IND_PERM_entry)
293 /* Don't add INDs to granularity cost */
294 /* Dont: TICK_ENT_IND(Node); for ticky-ticky; this ind is here only to help profiling */
296 #if defined(TICKY_TICKY) && !defined(PROFILING)
297 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
298 TICK_ENT_PERM_IND(R1.p); /* tick */
301 /* Enter PAP cost centre -- lexical scoping only */
302 ENTER_CCS_PAP_CL(R1.cl);
304 /* For ticky-ticky, change the perm_ind to a normal ind on first
305 * entry, so the number of ent_perm_inds is the number of *thunks*
306 * entered again, not the number of subsequent entries.
308 * Since this screws up cost centres, we die if profiling and
309 * ticky_ticky are on at the same time. KSW 1999-01.
314 # error Profiling and ticky-ticky do not mix at present!
315 # endif /* PROFILING */
316 SET_INFO((StgInd*)R1.p,&IND_info);
317 #endif /* TICKY_TICKY */
319 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
321 /* Dont: TICK_ENT_VIA_NODE(); for ticky-ticky; as above */
323 #if defined(TICKY_TICKY) && !defined(PROFILING)
327 JMP_(ENTRY_CODE(*R1.p));
331 INFO_TABLE(stg_IND_OLDGEN_info,stg_IND_OLDGEN_entry,1,1,IND_OLDGEN,,EF_,0,0);
332 STGFUN(stg_IND_OLDGEN_entry)
335 TICK_ENT_IND(Node); /* tick */
337 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
339 JMP_(ENTRY_CODE(*R1.p));
343 INFO_TABLE(stg_IND_OLDGEN_PERM_info,stg_IND_OLDGEN_PERM_entry,1,1,IND_OLDGEN_PERM,,EF_,0,0);
344 STGFUN(stg_IND_OLDGEN_PERM_entry)
347 /* Dont: TICK_ENT_IND(Node); for ticky-ticky; this ind is here only to help profiling */
349 #if defined(TICKY_TICKY) && !defined(PROFILING)
350 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
351 TICK_ENT_PERM_IND(R1.p); /* tick */
354 /* Enter PAP cost centre -- lexical scoping only */
355 ENTER_CCS_PAP_CL(R1.cl);
357 /* see comment in IND_PERM */
360 # error Profiling and ticky-ticky do not mix at present!
361 # endif /* PROFILING */
362 SET_INFO((StgInd*)R1.p,&IND_OLDGEN_info);
363 #endif /* TICKY_TICKY */
365 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
367 JMP_(ENTRY_CODE(*R1.p));
371 /* -----------------------------------------------------------------------------
372 Entry code for a black hole.
374 Entering a black hole normally causes a cyclic data dependency, but
375 in the concurrent world, black holes are synchronization points,
376 and they are turned into blocking queues when there are threads
377 waiting for the evaluation of the closure to finish.
378 -------------------------------------------------------------------------- */
380 /* Note: a BLACKHOLE and BLACKHOLE_BQ must be big enough to be
381 * overwritten with an indirection/evacuee/catch. Thus we claim it
382 * has 1 non-pointer word of payload (in addition to the pointer word
383 * for the blocking queue in a BQ), which should be big enough for an
384 * old-generation indirection.
387 INFO_TABLE(stg_BLACKHOLE_info, stg_BLACKHOLE_entry,0,2,BLACKHOLE,,EF_,"BLACKHOLE","BLACKHOLE");
388 STGFUN(stg_BLACKHOLE_entry)
392 /* Before overwriting TSO_LINK */
393 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
398 bdescr *bd = Bdescr(R1.p);
399 if (bd->back != (bdescr *)BaseReg) {
400 if (bd->gen->no >= 1 || bd->step->no >= 1) {
401 CMPXCHG(R1.cl->header.info, &BLACKHOLE_info, &WHITEHOLE_info);
403 EXTFUN_RTS(stg_gc_enter_1_hponly);
404 JMP_(stg_gc_enter_1_hponly);
411 /* Put ourselves on the blocking queue for this black hole */
412 #if defined(GRAN) || defined(PAR)
413 /* in fact, only difference is the type of the end-of-queue marker! */
414 CurrentTSO->link = END_BQ_QUEUE;
415 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
417 CurrentTSO->link = END_TSO_QUEUE;
418 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
420 /* jot down why and on what closure we are blocked */
421 CurrentTSO->why_blocked = BlockedOnBlackHole;
422 CurrentTSO->block_info.closure = R1.cl;
423 /* closure is mutable since something has just been added to its BQ */
424 recordMutable((StgMutClosure *)R1.cl);
425 /* Change the BLACKHOLE into a BLACKHOLE_BQ */
426 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
428 /* PAR: dumping of event now done in blockThread -- HWL */
430 /* stg_gen_block is too heavyweight, use a specialised one */
436 INFO_TABLE(stg_BLACKHOLE_BQ_info, stg_BLACKHOLE_BQ_entry,1,1,BLACKHOLE_BQ,,EF_,"BLACKHOLE","BLACKHOLE");
437 STGFUN(stg_BLACKHOLE_BQ_entry)
441 /* Before overwriting TSO_LINK */
442 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
447 bdescr *bd = Bdescr(R1.p);
448 if (bd->back != (bdescr *)BaseReg) {
449 if (bd->gen->no >= 1 || bd->step->no >= 1) {
450 CMPXCHG(R1.cl->header.info, &BLACKHOLE_info, &WHITEHOLE_info);
452 EXTFUN_RTS(stg_gc_enter_1_hponly);
453 JMP_(stg_gc_enter_1_hponly);
461 /* Put ourselves on the blocking queue for this black hole */
462 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
463 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
464 /* jot down why and on what closure we are blocked */
465 CurrentTSO->why_blocked = BlockedOnBlackHole;
466 CurrentTSO->block_info.closure = R1.cl;
468 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
471 /* PAR: dumping of event now done in blockThread -- HWL */
473 /* stg_gen_block is too heavyweight, use a specialised one */
479 Revertible black holes are needed in the parallel world, to handle
480 negative acknowledgements of messages containing updatable closures.
481 The idea is that when the original message is transmitted, the closure
482 is turned into a revertible black hole...an object which acts like a
483 black hole when local threads try to enter it, but which can be reverted
484 back to the original closure if necessary.
486 It's actually a lot like a blocking queue (BQ) entry, because revertible
487 black holes are initially set up with an empty blocking queue.
490 #if defined(PAR) || defined(GRAN)
492 INFO_TABLE(stg_RBH_info, stg_RBH_entry,1,1,RBH,,EF_,0,0);
493 STGFUN(stg_RBH_entry)
497 /* mainly statistics gathering for GranSim simulation */
498 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
501 /* exactly the same as a BLACKHOLE_BQ_entry -- HWL */
502 /* Put ourselves on the blocking queue for this black hole */
503 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
504 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
505 /* jot down why and on what closure we are blocked */
506 CurrentTSO->why_blocked = BlockedOnBlackHole;
507 CurrentTSO->block_info.closure = R1.cl;
509 /* PAR: dumping of event now done in blockThread -- HWL */
511 /* stg_gen_block is too heavyweight, use a specialised one */
516 INFO_TABLE(stg_RBH_Save_0_info, stg_RBH_Save_0_entry,0,2,CONSTR,,EF_,0,0);
517 NON_ENTERABLE_ENTRY_CODE(RBH_Save_0);
519 INFO_TABLE(stg_RBH_Save_1_info, stg_RBH_Save_1_entry,1,1,CONSTR,,EF_,0,0);
520 NON_ENTERABLE_ENTRY_CODE(RBH_Save_1);
522 INFO_TABLE(stg_RBH_Save_2_info, stg_RBH_Save_2_entry,2,0,CONSTR,,EF_,0,0);
523 NON_ENTERABLE_ENTRY_CODE(RBH_Save_2);
524 #endif /* defined(PAR) || defined(GRAN) */
526 /* identical to BLACKHOLEs except for the infotag */
527 INFO_TABLE(stg_CAF_BLACKHOLE_info, stg_CAF_BLACKHOLE_entry,0,2,CAF_BLACKHOLE,,EF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
528 STGFUN(stg_CAF_BLACKHOLE_entry)
532 /* mainly statistics gathering for GranSim simulation */
533 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
538 bdescr *bd = Bdescr(R1.p);
539 if (bd->back != (bdescr *)BaseReg) {
540 if (bd->gen->no >= 1 || bd->step->no >= 1) {
541 CMPXCHG(R1.cl->header.info, &CAF_BLACKHOLE_info, &WHITEHOLE_info);
543 EXTFUN_RTS(stg_gc_enter_1_hponly);
544 JMP_(stg_gc_enter_1_hponly);
552 /* Put ourselves on the blocking queue for this black hole */
553 #if defined(GRAN) || defined(PAR)
554 /* in fact, only difference is the type of the end-of-queue marker! */
555 CurrentTSO->link = END_BQ_QUEUE;
556 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
558 CurrentTSO->link = END_TSO_QUEUE;
559 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
561 /* jot down why and on what closure we are blocked */
562 CurrentTSO->why_blocked = BlockedOnBlackHole;
563 CurrentTSO->block_info.closure = R1.cl;
564 /* closure is mutable since something has just been added to its BQ */
565 recordMutable((StgMutClosure *)R1.cl);
566 /* Change the CAF_BLACKHOLE into a BLACKHOLE_BQ_STATIC */
567 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
569 /* PAR: dumping of event now done in blockThread -- HWL */
571 /* stg_gen_block is too heavyweight, use a specialised one */
577 INFO_TABLE(stg_SE_BLACKHOLE_info, stg_SE_BLACKHOLE_entry,0,2,SE_BLACKHOLE,,EF_,0,0);
578 STGFUN(stg_SE_BLACKHOLE_entry)
581 STGCALL3(fprintf,stderr,"SE_BLACKHOLE at %p entered!\n",R1.p);
582 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
586 INFO_TABLE(SE_CAF_BLACKHOLE_info, SE_CAF_BLACKHOLE_entry,0,2,SE_CAF_BLACKHOLE,,EF_,0,0);
587 STGFUN(stg_SE_CAF_BLACKHOLE_entry)
590 STGCALL3(fprintf,stderr,"SE_CAF_BLACKHOLE at %p entered!\n",R1.p);
591 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
597 INFO_TABLE(stg_WHITEHOLE_info, stg_WHITEHOLE_entry,0,2,CONSTR_NOCAF_STATIC,,EF_,0,0);
598 STGFUN(stg_WHITEHOLE_entry)
601 JMP_(GET_ENTRY(R1.cl));
606 /* -----------------------------------------------------------------------------
607 Some static info tables for things that don't get entered, and
608 therefore don't need entry code (i.e. boxed but unpointed objects)
609 NON_ENTERABLE_ENTRY_CODE now defined at the beginning of the file
610 -------------------------------------------------------------------------- */
612 INFO_TABLE(stg_TSO_info, stg_TSO_entry, 0,0,TSO,,EF_,"TSO","TSO");
613 NON_ENTERABLE_ENTRY_CODE(TSO);
615 /* -----------------------------------------------------------------------------
616 Evacuees are left behind by the garbage collector. Any attempt to enter
618 -------------------------------------------------------------------------- */
620 INFO_TABLE(stg_EVACUATED_info,stg_EVACUATED_entry,1,0,EVACUATED,,EF_,0,0);
621 NON_ENTERABLE_ENTRY_CODE(EVACUATED);
623 /* -----------------------------------------------------------------------------
626 Live weak pointers have a special closure type. Dead ones are just
627 nullary constructors (although they live on the heap - we overwrite
628 live weak pointers with dead ones).
629 -------------------------------------------------------------------------- */
631 INFO_TABLE(stg_WEAK_info,stg_WEAK_entry,0,4,WEAK,,EF_,"WEAK","WEAK");
632 NON_ENTERABLE_ENTRY_CODE(WEAK);
634 INFO_TABLE_CONSTR(stg_DEAD_WEAK_info,stg_DEAD_WEAK_entry,0,1,0,CONSTR,,EF_,"DEAD_WEAK","DEAD_WEAK");
635 NON_ENTERABLE_ENTRY_CODE(DEAD_WEAK);
637 /* -----------------------------------------------------------------------------
640 This is a static nullary constructor (like []) that we use to mark an empty
641 finalizer in a weak pointer object.
642 -------------------------------------------------------------------------- */
644 INFO_TABLE_CONSTR(stg_NO_FINALIZER_info,stg_NO_FINALIZER_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
645 NON_ENTERABLE_ENTRY_CODE(NO_FINALIZER);
647 SET_STATIC_HDR(stg_NO_FINALIZER_closure,stg_NO_FINALIZER_info,0/*CC*/,,EI_)
650 /* -----------------------------------------------------------------------------
651 Foreign Objects are unlifted and therefore never entered.
652 -------------------------------------------------------------------------- */
654 INFO_TABLE(stg_FOREIGN_info,stg_FOREIGN_entry,0,1,FOREIGN,,EF_,"FOREIGN","FOREIGN");
655 NON_ENTERABLE_ENTRY_CODE(FOREIGN);
657 /* -----------------------------------------------------------------------------
658 Stable Names are unlifted too.
659 -------------------------------------------------------------------------- */
661 INFO_TABLE(stg_STABLE_NAME_info,stg_STABLE_NAME_entry,0,1,STABLE_NAME,,EF_,"STABLE_NAME","STABLE_NAME");
662 NON_ENTERABLE_ENTRY_CODE(STABLE_NAME);
664 /* -----------------------------------------------------------------------------
667 There are two kinds of these: full and empty. We need an info table
668 and entry code for each type.
669 -------------------------------------------------------------------------- */
671 INFO_TABLE(stg_FULL_MVAR_info,stg_FULL_MVAR_entry,4,0,MVAR,,EF_,"MVAR","MVAR");
672 NON_ENTERABLE_ENTRY_CODE(FULL_MVAR);
674 INFO_TABLE(stg_EMPTY_MVAR_info,stg_EMPTY_MVAR_entry,4,0,MVAR,,EF_,"MVAR","MVAR");
675 NON_ENTERABLE_ENTRY_CODE(EMPTY_MVAR);
677 /* -----------------------------------------------------------------------------
680 This is a static nullary constructor (like []) that we use to mark the
681 end of a linked TSO queue.
682 -------------------------------------------------------------------------- */
684 INFO_TABLE_CONSTR(stg_END_TSO_QUEUE_info,stg_END_TSO_QUEUE_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
685 NON_ENTERABLE_ENTRY_CODE(END_TSO_QUEUE);
687 SET_STATIC_HDR(stg_END_TSO_QUEUE_closure,stg_END_TSO_QUEUE_info,0/*CC*/,,EI_)
690 /* -----------------------------------------------------------------------------
693 Mutable lists (used by the garbage collector) consist of a chain of
694 StgMutClosures connected through their mut_link fields, ending in
695 an END_MUT_LIST closure.
696 -------------------------------------------------------------------------- */
698 INFO_TABLE_CONSTR(stg_END_MUT_LIST_info,stg_END_MUT_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
699 NON_ENTERABLE_ENTRY_CODE(END_MUT_LIST);
701 SET_STATIC_HDR(stg_END_MUT_LIST_closure,stg_END_MUT_LIST_info,0/*CC*/,,EI_)
704 INFO_TABLE(stg_MUT_CONS_info, stg_MUT_CONS_entry, 1, 1, MUT_VAR, , EF_, 0, 0);
705 NON_ENTERABLE_ENTRY_CODE(MUT_CONS);
707 /* -----------------------------------------------------------------------------
709 -------------------------------------------------------------------------- */
711 INFO_TABLE_CONSTR(stg_END_EXCEPTION_LIST_info,stg_END_EXCEPTION_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
712 NON_ENTERABLE_ENTRY_CODE(END_EXCEPTION_LIST);
714 SET_STATIC_HDR(stg_END_EXCEPTION_LIST_closure,stg_END_EXCEPTION_LIST_info,0/*CC*/,,EI_)
717 INFO_TABLE(stg_EXCEPTION_CONS_info, stg_EXCEPTION_CONS_entry, 1, 1, CONSTR, , EF_, 0, 0);
718 NON_ENTERABLE_ENTRY_CODE(EXCEPTION_CONS);
720 /* -----------------------------------------------------------------------------
723 These come in two basic flavours: arrays of data (StgArrWords) and arrays of
724 pointers (StgArrPtrs). They all have a similar layout:
726 ___________________________
727 | Info | No. of | data....
729 ---------------------------
731 These are *unpointed* objects: i.e. they cannot be entered.
733 -------------------------------------------------------------------------- */
735 #define ArrayInfo(type) \
736 INFO_TABLE(stg_##type##_info, stg_##type##_entry, 0, 0, type, , EF_,"" # type "","" # type "");
738 ArrayInfo(ARR_WORDS);
739 NON_ENTERABLE_ENTRY_CODE(ARR_WORDS);
740 ArrayInfo(MUT_ARR_PTRS);
741 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS);
742 ArrayInfo(MUT_ARR_PTRS_FROZEN);
743 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS_FROZEN);
747 /* -----------------------------------------------------------------------------
749 -------------------------------------------------------------------------- */
751 INFO_TABLE(stg_MUT_VAR_info, stg_MUT_VAR_entry, 1, 1, MUT_VAR, , EF_, "MUT_VAR", "MUT_VAR");
752 NON_ENTERABLE_ENTRY_CODE(MUT_VAR);
754 /* -----------------------------------------------------------------------------
755 Standard Error Entry.
757 This is used for filling in vector-table entries that can never happen,
759 -------------------------------------------------------------------------- */
760 /* No longer used; we use NULL, because a) it never happens, right? and b)
761 Windows doesn't like DLL entry points being used as static initialisers
762 STGFUN(stg_error_entry) \
765 DUMP_ERRMSG("fatal: stg_error_entry"); \
766 STGCALL1(shutdownHaskellAndExit, EXIT_FAILURE); \
771 /* -----------------------------------------------------------------------------
774 Entering this closure will just return to the address on the top of the
775 stack. Useful for getting a thread in a canonical form where we can
776 just enter the top stack word to start the thread. (see deleteThread)
777 * -------------------------------------------------------------------------- */
779 INFO_TABLE(stg_dummy_ret_info, stg_dummy_ret_entry, 0, 0, CONSTR_NOCAF_STATIC, , EF_, 0, 0);
780 STGFUN(stg_dummy_ret_entry)
786 JMP_(ENTRY_CODE(ret_addr));
789 SET_STATIC_HDR(stg_dummy_ret_closure,stg_dummy_ret_info,CCS_DONT_CARE,,EI_)
792 /* -----------------------------------------------------------------------------
793 Strict IO application - performing an IO action and entering its result.
795 rts_evalIO() lets you perform Haskell IO actions from outside of Haskell-land,
796 returning back to you their result. Want this result to be evaluated to WHNF
797 by that time, so that we can easily get at the int/char/whatever using the
798 various get{Ty} functions provided by the RTS API.
800 forceIO takes care of this, performing the IO action and entering the
801 results that comes back.
803 * -------------------------------------------------------------------------- */
806 INFO_TABLE_SRT_BITMAP(stg_forceIO_ret_info,stg_forceIO_ret_entry,0,0,0,0,RET_SMALL,,EF_,0,0);
807 STGFUN(stg_forceIO_ret_entry)
811 Sp -= sizeofW(StgSeqFrame);
813 JMP_(GET_ENTRY(R1.cl));
816 INFO_TABLE_SRT_BITMAP(stg_forceIO_ret_info,stg_forceIO_ret_entry,0,0,0,0,RET_SMALL,,EF_,0,0);
817 STGFUN(stg_forceIO_ret_entry)
821 rval = (StgClosure *)Sp[0];
823 Sp -= sizeofW(StgSeqFrame);
826 JMP_(GET_ENTRY(R1.cl));
830 INFO_TABLE(stg_forceIO_info,stg_forceIO_entry,1,0,FUN_STATIC,,EF_,0,0);
831 FN_(stg_forceIO_entry)
834 /* Sp[0] contains the IO action we want to perform */
836 /* Replace it with the return continuation that enters the result. */
837 Sp[0] = (W_)&stg_forceIO_ret_info;
839 /* Push the RealWorld# tag and enter */
840 Sp[0] =(W_)REALWORLD_TAG;
841 JMP_(GET_ENTRY(R1.cl));
844 SET_STATIC_HDR(stg_forceIO_closure,stg_forceIO_info,CCS_DONT_CARE,,EI_)
848 /* -----------------------------------------------------------------------------
849 CHARLIKE and INTLIKE closures.
851 These are static representations of Chars and small Ints, so that
852 we can remove dynamic Chars and Ints during garbage collection and
853 replace them with references to the static objects.
854 -------------------------------------------------------------------------- */
856 #if defined(INTERPRETER) || defined(ENABLE_WIN32_DLL_SUPPORT)
858 * When sticking the RTS in a DLL, we delay populating the
859 * Charlike and Intlike tables until load-time, which is only
860 * when we've got the real addresses to the C# and I# closures.
863 static INFO_TBL_CONST StgInfoTable czh_static_info;
864 static INFO_TBL_CONST StgInfoTable izh_static_info;
865 #define Char_hash_static_info czh_static_info
866 #define Int_hash_static_info izh_static_info
868 #define Char_hash_static_info PrelBase_Czh_static_info
869 #define Int_hash_static_info PrelBase_Izh_static_info
872 #define CHARLIKE_HDR(n) \
874 STATIC_HDR(Char_hash_static_info, /* C# */ \
879 #define INTLIKE_HDR(n) \
881 STATIC_HDR(Int_hash_static_info, /* I# */ \
886 /* put these in the *data* section, since the garbage collector relies
887 * on the fact that static closures live in the data section.
890 /* end the name with _closure, to convince the mangler this is a closure */
892 StgIntCharlikeClosure stg_CHARLIKE_closure[] = {
1151 StgIntCharlikeClosure stg_INTLIKE_closure[] = {
1152 INTLIKE_HDR(-16), /* MIN_INTLIKE == -16 */
1184 INTLIKE_HDR(16) /* MAX_INTLIKE == 16 */