1 /* -----------------------------------------------------------------------------
2 * $Id: StgMiscClosures.hc,v 1.68 2001/08/10 09:41:17 simonmar Exp $
4 * (c) The GHC Team, 1998-2000
6 * Entry code for various built-in closure types.
8 * ---------------------------------------------------------------------------*/
14 #include "StgMiscClosures.h"
15 #include "HeapStackCheck.h" /* for stg_gen_yield */
17 #include "StoragePriv.h"
18 #include "Profiling.h"
22 #if defined(GRAN) || defined(PAR)
23 # include "GranSimRts.h" /* for DumpRawGranEvent */
24 # include "StgRun.h" /* for StgReturn and register saving */
31 /* ToDo: make the printing of panics more win32-friendly, i.e.,
32 * pop up some lovely message boxes (as well).
34 #define DUMP_ERRMSG(msg) STGCALL2(fprintf,stderr,msg)
37 Template for the entry code of non-enterable closures.
40 #define NON_ENTERABLE_ENTRY_CODE(type) \
41 STGFUN(stg_##type##_entry) \
44 DUMP_ERRMSG(#type " object entered!\n"); \
45 STGCALL1(shutdownHaskellAndExit, EXIT_FAILURE); \
51 /* -----------------------------------------------------------------------------
52 Support for the bytecode interpreter.
53 -------------------------------------------------------------------------- */
55 /* 9 bits of return code for constructors created by the interpreter. */
56 FN_(stg_interp_constr_entry)
58 /* R1 points at the constructor */
60 /* STGCALL2(fprintf,stderr,"stg_interp_constr_entry (direct return)!\n"); */
61 /* Pointless, since SET_TAG doesn't do anything */
62 SET_TAG( GET_TAG(GET_INFO(R1.cl)));
63 JMP_(ENTRY_CODE((P_)(*Sp)));
67 FN_(stg_interp_constr1_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),0)); FE_ }
68 FN_(stg_interp_constr2_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),1)); FE_ }
69 FN_(stg_interp_constr3_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),2)); FE_ }
70 FN_(stg_interp_constr4_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),3)); FE_ }
71 FN_(stg_interp_constr5_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),4)); FE_ }
72 FN_(stg_interp_constr6_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),5)); FE_ }
73 FN_(stg_interp_constr7_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),6)); FE_ }
74 FN_(stg_interp_constr8_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),7)); FE_ }
76 /* Some info tables to be used when compiled code returns a value to
77 the interpreter, i.e. the interpreter pushes one of these onto the
78 stack before entering a value. What the code does is to
79 impedance-match the compiled return convention (in R1p/R1n/F1/D1 etc) to
80 the interpreter's convention (returned value is on top of stack),
81 and then cause the scheduler to enter the interpreter.
83 On entry, the stack (growing down) looks like this:
85 ptr to BCO holding return continuation
86 ptr to one of these info tables.
88 The info table code, both direct and vectored, must:
89 * push R1/F1/D1 on the stack, and its tag if necessary
90 * push the BCO (so it's now on the stack twice)
91 * Yield, ie, go to the scheduler.
93 Scheduler examines the t.o.s, discovers it is a BCO, and proceeds
94 directly to the bytecode interpreter. That pops the top element
95 (the BCO, containing the return continuation), and interprets it.
96 Net result: return continuation gets interpreted, with the
100 ptr to the info table just jumped thru
103 which is just what we want -- the "standard" return layout for the
106 Don't ask me how unboxed tuple returns are supposed to work. We
107 haven't got a good story about that yet.
110 /* When the returned value is in R1 and it is a pointer, so doesn't
112 #define STG_CtoI_RET_R1p_Template(label) \
117 bco = ((StgPtr*)Sp)[1]; \
119 ((StgPtr*)Sp)[0] = R1.p; \
121 ((StgPtr*)Sp)[0] = bco; \
122 JMP_(stg_yield_to_interpreter); \
126 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_entry);
127 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_0_entry);
128 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_1_entry);
129 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_2_entry);
130 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_3_entry);
131 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_4_entry);
132 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_5_entry);
133 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_6_entry);
134 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_7_entry);
136 VEC_POLY_INFO_TABLE(stg_ctoi_ret_R1p,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
140 /* When the returned value is in R1 and it isn't a pointer. */
141 #define STG_CtoI_RET_R1n_Template(label) \
146 bco = ((StgPtr*)Sp)[1]; \
148 ((StgPtr*)Sp)[0] = (StgPtr)R1.i; \
150 ((StgPtr*)Sp)[0] = (StgPtr)1; /* tag */ \
152 ((StgPtr*)Sp)[0] = bco; \
153 JMP_(stg_yield_to_interpreter); \
157 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_entry);
158 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_0_entry);
159 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_1_entry);
160 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_2_entry);
161 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_3_entry);
162 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_4_entry);
163 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_5_entry);
164 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_6_entry);
165 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_7_entry);
167 VEC_POLY_INFO_TABLE(stg_ctoi_ret_R1n,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
171 /* When the returned value is in F1 ... */
172 #define STG_CtoI_RET_F1_Template(label) \
177 bco = ((StgPtr*)Sp)[1]; \
178 Sp -= sizeofW(StgFloat); \
179 ASSIGN_FLT((W_*)Sp, F1); \
181 ((StgPtr*)Sp)[0] = (StgPtr)sizeofW(StgFloat); \
183 ((StgPtr*)Sp)[0] = bco; \
184 JMP_(stg_yield_to_interpreter); \
188 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_entry);
189 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_0_entry);
190 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_1_entry);
191 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_2_entry);
192 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_3_entry);
193 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_4_entry);
194 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_5_entry);
195 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_6_entry);
196 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_7_entry);
198 VEC_POLY_INFO_TABLE(stg_ctoi_ret_F1,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
201 /* When the returned value is in D1 ... */
202 #define STG_CtoI_RET_D1_Template(label) \
207 bco = ((StgPtr*)Sp)[1]; \
208 Sp -= sizeofW(StgDouble); \
209 ASSIGN_DBL((W_*)Sp, D1); \
211 ((StgPtr*)Sp)[0] = (StgPtr)sizeofW(StgDouble); \
213 ((StgPtr*)Sp)[0] = bco; \
214 JMP_(stg_yield_to_interpreter); \
218 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_entry);
219 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_0_entry);
220 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_1_entry);
221 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_2_entry);
222 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_3_entry);
223 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_4_entry);
224 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_5_entry);
225 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_6_entry);
226 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_7_entry);
228 VEC_POLY_INFO_TABLE(stg_ctoi_ret_D1,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
231 /* When the returned value a VoidRep ... */
232 #define STG_CtoI_RET_V_Template(label) \
237 bco = ((StgPtr*)Sp)[1]; \
239 ((StgPtr*)Sp)[0] = 0; /* VoidRep tag */ \
241 ((StgPtr*)Sp)[0] = bco; \
242 JMP_(stg_yield_to_interpreter); \
246 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_entry);
247 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_0_entry);
248 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_1_entry);
249 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_2_entry);
250 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_3_entry);
251 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_4_entry);
252 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_5_entry);
253 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_6_entry);
254 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_7_entry);
256 VEC_POLY_INFO_TABLE(stg_ctoi_ret_V,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
259 /* The other way round: when the interpreter returns a value to
260 compiled code. The stack looks like this:
262 return info table (pushed by compiled code)
263 return value (pushed by interpreter)
265 If the value is ptr-rep'd, the interpreter simply returns to the
266 scheduler, instructing it to ThreadEnterGHC.
268 Otherwise (unboxed return value), we replace the top stack word,
269 which must be the tag, with stg_gc_unbx_r1_info (or f1_info or d1_info),
270 and return to the scheduler, instructing it to ThreadRunGHC.
272 No supporting code needed!
276 /* Entering a BCO. Heave it on the stack and defer to the
278 INFO_TABLE(stg_BCO_info,stg_BCO_entry,4,0,BCO,,EF_,"BCO","BCO");
279 STGFUN(stg_BCO_entry) {
283 JMP_(stg_yield_to_interpreter);
288 /* -----------------------------------------------------------------------------
289 Entry code for an indirection.
290 -------------------------------------------------------------------------- */
292 INFO_TABLE(stg_IND_info,stg_IND_entry,1,0,IND,,EF_,0,0);
293 STGFUN(stg_IND_entry)
296 TICK_ENT_IND(Node); /* tick */
298 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
300 JMP_(ENTRY_CODE(*R1.p));
304 INFO_TABLE(stg_IND_STATIC_info,stg_IND_STATIC_entry,1,0,IND_STATIC,,EF_,0,0);
305 STGFUN(stg_IND_STATIC_entry)
308 TICK_ENT_IND(Node); /* tick */
309 R1.p = (P_) ((StgIndStatic*)R1.p)->indirectee;
311 JMP_(ENTRY_CODE(*R1.p));
315 INFO_TABLE(stg_IND_PERM_info,stg_IND_PERM_entry,1,1,IND_PERM,,EF_,"IND_PERM","IND_PERM");
316 STGFUN(stg_IND_PERM_entry)
319 /* Don't add INDs to granularity cost */
320 /* Dont: TICK_ENT_IND(Node); for ticky-ticky; this ind is here only to help profiling */
322 #if defined(TICKY_TICKY) && !defined(PROFILING)
323 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
324 TICK_ENT_PERM_IND(R1.p); /* tick */
327 /* Enter PAP cost centre -- lexical scoping only */
328 ENTER_CCS_PAP_CL(R1.cl);
330 /* For ticky-ticky, change the perm_ind to a normal ind on first
331 * entry, so the number of ent_perm_inds is the number of *thunks*
332 * entered again, not the number of subsequent entries.
334 * Since this screws up cost centres, we die if profiling and
335 * ticky_ticky are on at the same time. KSW 1999-01.
340 # error Profiling and ticky-ticky do not mix at present!
341 # endif /* PROFILING */
342 SET_INFO((StgInd*)R1.p,&stg_IND_info);
343 #endif /* TICKY_TICKY */
345 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
347 /* Dont: TICK_ENT_VIA_NODE(); for ticky-ticky; as above */
349 #if defined(TICKY_TICKY) && !defined(PROFILING)
353 JMP_(ENTRY_CODE(*R1.p));
357 INFO_TABLE(stg_IND_OLDGEN_info,stg_IND_OLDGEN_entry,1,1,IND_OLDGEN,,EF_,0,0);
358 STGFUN(stg_IND_OLDGEN_entry)
361 TICK_ENT_IND(Node); /* tick */
363 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
365 JMP_(ENTRY_CODE(*R1.p));
369 INFO_TABLE(stg_IND_OLDGEN_PERM_info,stg_IND_OLDGEN_PERM_entry,1,1,IND_OLDGEN_PERM,,EF_,0,0);
370 STGFUN(stg_IND_OLDGEN_PERM_entry)
373 /* Dont: TICK_ENT_IND(Node); for ticky-ticky; this ind is here only to help profiling */
375 #if defined(TICKY_TICKY) && !defined(PROFILING)
376 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
377 TICK_ENT_PERM_IND(R1.p); /* tick */
380 /* Enter PAP cost centre -- lexical scoping only */
381 ENTER_CCS_PAP_CL(R1.cl);
383 /* see comment in IND_PERM */
386 # error Profiling and ticky-ticky do not mix at present!
387 # endif /* PROFILING */
388 SET_INFO((StgInd*)R1.p,&stg_IND_OLDGEN_info);
389 #endif /* TICKY_TICKY */
391 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
393 JMP_(ENTRY_CODE(*R1.p));
397 /* -----------------------------------------------------------------------------
398 Entry code for a black hole.
400 Entering a black hole normally causes a cyclic data dependency, but
401 in the concurrent world, black holes are synchronization points,
402 and they are turned into blocking queues when there are threads
403 waiting for the evaluation of the closure to finish.
404 -------------------------------------------------------------------------- */
406 /* Note: a BLACKHOLE and BLACKHOLE_BQ must be big enough to be
407 * overwritten with an indirection/evacuee/catch. Thus we claim it
408 * has 1 non-pointer word of payload (in addition to the pointer word
409 * for the blocking queue in a BQ), which should be big enough for an
410 * old-generation indirection.
413 INFO_TABLE(stg_BLACKHOLE_info, stg_BLACKHOLE_entry,0,2,BLACKHOLE,,EF_,"BLACKHOLE","BLACKHOLE");
414 STGFUN(stg_BLACKHOLE_entry)
418 /* Before overwriting TSO_LINK */
419 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
424 bdescr *bd = Bdescr(R1.p);
425 if (bd->back != (bdescr *)BaseReg) {
426 if (bd->gen->no >= 1 || bd->step->no >= 1) {
427 CMPXCHG(R1.cl->header.info, &stg_BLACKHOLE_info, &stg_WHITEHOLE_info);
429 EXTFUN_RTS(stg_gc_enter_1_hponly);
430 JMP_(stg_gc_enter_1_hponly);
437 // Put ourselves on the blocking queue for this black hole
438 #if defined(GRAN) || defined(PAR)
439 // in fact, only difference is the type of the end-of-queue marker!
440 CurrentTSO->link = END_BQ_QUEUE;
441 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
443 CurrentTSO->link = END_TSO_QUEUE;
444 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
446 // jot down why and on what closure we are blocked
447 CurrentTSO->why_blocked = BlockedOnBlackHole;
448 CurrentTSO->block_info.closure = R1.cl;
450 // Change the CAF_BLACKHOLE into a BLACKHOLE_BQ_STATIC
451 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
453 // closure is mutable since something has just been added to its BQ
454 recordMutable((StgMutClosure *)R1.cl);
456 // PAR: dumping of event now done in blockThread -- HWL
458 // stg_gen_block is too heavyweight, use a specialised one
463 INFO_TABLE(stg_BLACKHOLE_BQ_info, stg_BLACKHOLE_BQ_entry,1,1,BLACKHOLE_BQ,,EF_,"BLACKHOLE","BLACKHOLE");
464 STGFUN(stg_BLACKHOLE_BQ_entry)
468 /* Before overwriting TSO_LINK */
469 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
474 bdescr *bd = Bdescr(R1.p);
475 if (bd->back != (bdescr *)BaseReg) {
476 if (bd->gen->no >= 1 || bd->step->no >= 1) {
477 CMPXCHG(R1.cl->header.info, &stg_BLACKHOLE_info, &stg_WHITEHOLE_info);
479 EXTFUN_RTS(stg_gc_enter_1_hponly);
480 JMP_(stg_gc_enter_1_hponly);
488 /* Put ourselves on the blocking queue for this black hole */
489 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
490 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
491 /* jot down why and on what closure we are blocked */
492 CurrentTSO->why_blocked = BlockedOnBlackHole;
493 CurrentTSO->block_info.closure = R1.cl;
495 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
498 /* PAR: dumping of event now done in blockThread -- HWL */
500 /* stg_gen_block is too heavyweight, use a specialised one */
506 Revertible black holes are needed in the parallel world, to handle
507 negative acknowledgements of messages containing updatable closures.
508 The idea is that when the original message is transmitted, the closure
509 is turned into a revertible black hole...an object which acts like a
510 black hole when local threads try to enter it, but which can be reverted
511 back to the original closure if necessary.
513 It's actually a lot like a blocking queue (BQ) entry, because revertible
514 black holes are initially set up with an empty blocking queue.
517 #if defined(PAR) || defined(GRAN)
519 INFO_TABLE(stg_RBH_info, stg_RBH_entry,1,1,RBH,,EF_,0,0);
520 STGFUN(stg_RBH_entry)
524 /* mainly statistics gathering for GranSim simulation */
525 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
528 /* exactly the same as a BLACKHOLE_BQ_entry -- HWL */
529 /* Put ourselves on the blocking queue for this black hole */
530 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
531 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
532 /* jot down why and on what closure we are blocked */
533 CurrentTSO->why_blocked = BlockedOnBlackHole;
534 CurrentTSO->block_info.closure = R1.cl;
536 /* PAR: dumping of event now done in blockThread -- HWL */
538 /* stg_gen_block is too heavyweight, use a specialised one */
543 INFO_TABLE(stg_RBH_Save_0_info, stg_RBH_Save_0_entry,0,2,CONSTR,,EF_,0,0);
544 NON_ENTERABLE_ENTRY_CODE(RBH_Save_0);
546 INFO_TABLE(stg_RBH_Save_1_info, stg_RBH_Save_1_entry,1,1,CONSTR,,EF_,0,0);
547 NON_ENTERABLE_ENTRY_CODE(RBH_Save_1);
549 INFO_TABLE(stg_RBH_Save_2_info, stg_RBH_Save_2_entry,2,0,CONSTR,,EF_,0,0);
550 NON_ENTERABLE_ENTRY_CODE(RBH_Save_2);
551 #endif /* defined(PAR) || defined(GRAN) */
553 /* identical to BLACKHOLEs except for the infotag */
554 INFO_TABLE(stg_CAF_BLACKHOLE_info, stg_CAF_BLACKHOLE_entry,0,2,CAF_BLACKHOLE,,EF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
555 STGFUN(stg_CAF_BLACKHOLE_entry)
559 /* mainly statistics gathering for GranSim simulation */
560 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
565 bdescr *bd = Bdescr(R1.p);
566 if (bd->back != (bdescr *)BaseReg) {
567 if (bd->gen_no >= 1 || bd->step->no >= 1) {
568 CMPXCHG(R1.cl->header.info, &stg_CAF_BLACKHOLE_info, &stg_WHITEHOLE_info);
570 EXTFUN_RTS(stg_gc_enter_1_hponly);
571 JMP_(stg_gc_enter_1_hponly);
579 // Put ourselves on the blocking queue for this black hole
580 #if defined(GRAN) || defined(PAR)
581 // in fact, only difference is the type of the end-of-queue marker!
582 CurrentTSO->link = END_BQ_QUEUE;
583 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
585 CurrentTSO->link = END_TSO_QUEUE;
586 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
588 // jot down why and on what closure we are blocked
589 CurrentTSO->why_blocked = BlockedOnBlackHole;
590 CurrentTSO->block_info.closure = R1.cl;
592 // Change the CAF_BLACKHOLE into a BLACKHOLE_BQ_STATIC
593 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
595 // closure is mutable since something has just been added to its BQ
596 recordMutable((StgMutClosure *)R1.cl);
598 // PAR: dumping of event now done in blockThread -- HWL
600 // stg_gen_block is too heavyweight, use a specialised one
606 INFO_TABLE(stg_SE_BLACKHOLE_info, stg_SE_BLACKHOLE_entry,0,2,SE_BLACKHOLE,,EF_,0,0);
607 STGFUN(stg_SE_BLACKHOLE_entry)
610 STGCALL3(fprintf,stderr,"SE_BLACKHOLE at %p entered!\n",R1.p);
611 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
615 INFO_TABLE(SE_CAF_BLACKHOLE_info, SE_CAF_BLACKHOLE_entry,0,2,SE_CAF_BLACKHOLE,,EF_,0,0);
616 STGFUN(stg_SE_CAF_BLACKHOLE_entry)
619 STGCALL3(fprintf,stderr,"SE_CAF_BLACKHOLE at %p entered!\n",R1.p);
620 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
626 INFO_TABLE(stg_WHITEHOLE_info, stg_WHITEHOLE_entry,0,2,CONSTR_NOCAF_STATIC,,EF_,0,0);
627 STGFUN(stg_WHITEHOLE_entry)
630 JMP_(GET_ENTRY(R1.cl));
635 /* -----------------------------------------------------------------------------
636 Some static info tables for things that don't get entered, and
637 therefore don't need entry code (i.e. boxed but unpointed objects)
638 NON_ENTERABLE_ENTRY_CODE now defined at the beginning of the file
639 -------------------------------------------------------------------------- */
641 INFO_TABLE(stg_TSO_info, stg_TSO_entry, 0,0,TSO,,EF_,"TSO","TSO");
642 NON_ENTERABLE_ENTRY_CODE(TSO);
644 /* -----------------------------------------------------------------------------
645 Evacuees are left behind by the garbage collector. Any attempt to enter
647 -------------------------------------------------------------------------- */
649 INFO_TABLE(stg_EVACUATED_info,stg_EVACUATED_entry,1,0,EVACUATED,,EF_,0,0);
650 NON_ENTERABLE_ENTRY_CODE(EVACUATED);
652 /* -----------------------------------------------------------------------------
655 Live weak pointers have a special closure type. Dead ones are just
656 nullary constructors (although they live on the heap - we overwrite
657 live weak pointers with dead ones).
658 -------------------------------------------------------------------------- */
660 INFO_TABLE(stg_WEAK_info,stg_WEAK_entry,0,4,WEAK,,EF_,"WEAK","WEAK");
661 NON_ENTERABLE_ENTRY_CODE(WEAK);
663 INFO_TABLE_CONSTR(stg_DEAD_WEAK_info,stg_DEAD_WEAK_entry,0,1,0,CONSTR,,EF_,"DEAD_WEAK","DEAD_WEAK");
664 NON_ENTERABLE_ENTRY_CODE(DEAD_WEAK);
666 /* -----------------------------------------------------------------------------
669 This is a static nullary constructor (like []) that we use to mark an empty
670 finalizer in a weak pointer object.
671 -------------------------------------------------------------------------- */
673 INFO_TABLE_CONSTR(stg_NO_FINALIZER_info,stg_NO_FINALIZER_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
674 NON_ENTERABLE_ENTRY_CODE(NO_FINALIZER);
676 SET_STATIC_HDR(stg_NO_FINALIZER_closure,stg_NO_FINALIZER_info,0/*CC*/,,EI_)
679 /* -----------------------------------------------------------------------------
680 Foreign Objects are unlifted and therefore never entered.
681 -------------------------------------------------------------------------- */
683 INFO_TABLE(stg_FOREIGN_info,stg_FOREIGN_entry,0,1,FOREIGN,,EF_,"FOREIGN","FOREIGN");
684 NON_ENTERABLE_ENTRY_CODE(FOREIGN);
686 /* -----------------------------------------------------------------------------
687 Stable Names are unlifted too.
688 -------------------------------------------------------------------------- */
690 INFO_TABLE(stg_STABLE_NAME_info,stg_STABLE_NAME_entry,0,1,STABLE_NAME,,EF_,"STABLE_NAME","STABLE_NAME");
691 NON_ENTERABLE_ENTRY_CODE(STABLE_NAME);
693 /* -----------------------------------------------------------------------------
696 There are two kinds of these: full and empty. We need an info table
697 and entry code for each type.
698 -------------------------------------------------------------------------- */
700 INFO_TABLE(stg_FULL_MVAR_info,stg_FULL_MVAR_entry,4,0,MVAR,,EF_,"MVAR","MVAR");
701 NON_ENTERABLE_ENTRY_CODE(FULL_MVAR);
703 INFO_TABLE(stg_EMPTY_MVAR_info,stg_EMPTY_MVAR_entry,4,0,MVAR,,EF_,"MVAR","MVAR");
704 NON_ENTERABLE_ENTRY_CODE(EMPTY_MVAR);
706 /* -----------------------------------------------------------------------------
709 This is a static nullary constructor (like []) that we use to mark the
710 end of a linked TSO queue.
711 -------------------------------------------------------------------------- */
713 INFO_TABLE_CONSTR(stg_END_TSO_QUEUE_info,stg_END_TSO_QUEUE_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
714 NON_ENTERABLE_ENTRY_CODE(END_TSO_QUEUE);
716 SET_STATIC_HDR(stg_END_TSO_QUEUE_closure,stg_END_TSO_QUEUE_info,0/*CC*/,,EI_)
719 /* -----------------------------------------------------------------------------
722 Mutable lists (used by the garbage collector) consist of a chain of
723 StgMutClosures connected through their mut_link fields, ending in
724 an END_MUT_LIST closure.
725 -------------------------------------------------------------------------- */
727 INFO_TABLE_CONSTR(stg_END_MUT_LIST_info,stg_END_MUT_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
728 NON_ENTERABLE_ENTRY_CODE(END_MUT_LIST);
730 SET_STATIC_HDR(stg_END_MUT_LIST_closure,stg_END_MUT_LIST_info,0/*CC*/,,EI_)
733 INFO_TABLE(stg_MUT_CONS_info, stg_MUT_CONS_entry, 1, 1, MUT_CONS, , EF_, 0, 0);
734 NON_ENTERABLE_ENTRY_CODE(MUT_CONS);
736 /* -----------------------------------------------------------------------------
738 -------------------------------------------------------------------------- */
740 INFO_TABLE_CONSTR(stg_END_EXCEPTION_LIST_info,stg_END_EXCEPTION_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
741 NON_ENTERABLE_ENTRY_CODE(END_EXCEPTION_LIST);
743 SET_STATIC_HDR(stg_END_EXCEPTION_LIST_closure,stg_END_EXCEPTION_LIST_info,0/*CC*/,,EI_)
746 INFO_TABLE(stg_EXCEPTION_CONS_info, stg_EXCEPTION_CONS_entry, 1, 1, CONSTR, , EF_, 0, 0);
747 NON_ENTERABLE_ENTRY_CODE(EXCEPTION_CONS);
749 /* -----------------------------------------------------------------------------
752 These come in two basic flavours: arrays of data (StgArrWords) and arrays of
753 pointers (StgArrPtrs). They all have a similar layout:
755 ___________________________
756 | Info | No. of | data....
758 ---------------------------
760 These are *unpointed* objects: i.e. they cannot be entered.
762 -------------------------------------------------------------------------- */
764 #define ArrayInfo(type) \
765 INFO_TABLE(stg_##type##_info, stg_##type##_entry, 0, 0, type, , EF_,"" # type "","" # type "");
767 ArrayInfo(ARR_WORDS);
768 NON_ENTERABLE_ENTRY_CODE(ARR_WORDS);
769 ArrayInfo(MUT_ARR_PTRS);
770 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS);
771 ArrayInfo(MUT_ARR_PTRS_FROZEN);
772 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS_FROZEN);
776 /* -----------------------------------------------------------------------------
778 -------------------------------------------------------------------------- */
780 INFO_TABLE(stg_MUT_VAR_info, stg_MUT_VAR_entry, 1, 1, MUT_VAR, , EF_, "MUT_VAR", "MUT_VAR");
781 NON_ENTERABLE_ENTRY_CODE(MUT_VAR);
783 /* -----------------------------------------------------------------------------
784 Standard Error Entry.
786 This is used for filling in vector-table entries that can never happen,
788 -------------------------------------------------------------------------- */
789 /* No longer used; we use NULL, because a) it never happens, right? and b)
790 Windows doesn't like DLL entry points being used as static initialisers
791 STGFUN(stg_error_entry) \
794 DUMP_ERRMSG("fatal: stg_error_entry"); \
795 STGCALL1(shutdownHaskellAndExit, EXIT_FAILURE); \
800 /* -----------------------------------------------------------------------------
803 Entering this closure will just return to the address on the top of the
804 stack. Useful for getting a thread in a canonical form where we can
805 just enter the top stack word to start the thread. (see deleteThread)
806 * -------------------------------------------------------------------------- */
808 INFO_TABLE(stg_dummy_ret_info, stg_dummy_ret_entry, 0, 0, CONSTR_NOCAF_STATIC, , EF_, 0, 0);
809 STGFUN(stg_dummy_ret_entry)
815 JMP_(ENTRY_CODE(ret_addr));
818 SET_STATIC_HDR(stg_dummy_ret_closure,stg_dummy_ret_info,CCS_DONT_CARE,,EI_)
821 /* -----------------------------------------------------------------------------
822 Strict IO application - performing an IO action and entering its result.
824 rts_evalIO() lets you perform Haskell IO actions from outside of Haskell-land,
825 returning back to you their result. Want this result to be evaluated to WHNF
826 by that time, so that we can easily get at the int/char/whatever using the
827 various get{Ty} functions provided by the RTS API.
829 forceIO takes care of this, performing the IO action and entering the
830 results that comes back.
832 * -------------------------------------------------------------------------- */
835 INFO_TABLE_SRT_BITMAP(stg_forceIO_ret_info,stg_forceIO_ret_entry,0,0,0,0,RET_SMALL,,EF_,0,0);
836 STGFUN(stg_forceIO_ret_entry)
840 Sp -= sizeofW(StgSeqFrame);
842 JMP_(GET_ENTRY(R1.cl));
845 INFO_TABLE_SRT_BITMAP(stg_forceIO_ret_info,stg_forceIO_ret_entry,0,0,0,0,RET_SMALL,,EF_,0,0);
846 STGFUN(stg_forceIO_ret_entry)
850 rval = (StgClosure *)Sp[0];
852 Sp -= sizeofW(StgSeqFrame);
855 JMP_(GET_ENTRY(R1.cl));
859 INFO_TABLE(stg_forceIO_info,stg_forceIO_entry,1,0,FUN_STATIC,,EF_,0,0);
860 FN_(stg_forceIO_entry)
863 /* Sp[0] contains the IO action we want to perform */
865 /* Replace it with the return continuation that enters the result. */
866 Sp[0] = (W_)&stg_forceIO_ret_info;
868 /* Push the RealWorld# tag and enter */
869 Sp[0] =(W_)REALWORLD_TAG;
870 JMP_(GET_ENTRY(R1.cl));
873 SET_STATIC_HDR(stg_forceIO_closure,stg_forceIO_info,CCS_DONT_CARE,,EI_)
877 /* -----------------------------------------------------------------------------
878 CHARLIKE and INTLIKE closures.
880 These are static representations of Chars and small Ints, so that
881 we can remove dynamic Chars and Ints during garbage collection and
882 replace them with references to the static objects.
883 -------------------------------------------------------------------------- */
885 #if defined(INTERPRETER) || defined(ENABLE_WIN32_DLL_SUPPORT)
887 * When sticking the RTS in a DLL, we delay populating the
888 * Charlike and Intlike tables until load-time, which is only
889 * when we've got the real addresses to the C# and I# closures.
892 static INFO_TBL_CONST StgInfoTable czh_static_info;
893 static INFO_TBL_CONST StgInfoTable izh_static_info;
894 #define Char_hash_static_info czh_static_info
895 #define Int_hash_static_info izh_static_info
897 #define Char_hash_static_info PrelBase_Czh_static_info
898 #define Int_hash_static_info PrelBase_Izh_static_info
901 #define CHARLIKE_HDR(n) \
903 STATIC_HDR(Char_hash_static_info, /* C# */ \
908 #define INTLIKE_HDR(n) \
910 STATIC_HDR(Int_hash_static_info, /* I# */ \
915 /* put these in the *data* section, since the garbage collector relies
916 * on the fact that static closures live in the data section.
919 /* end the name with _closure, to convince the mangler this is a closure */
921 StgIntCharlikeClosure stg_CHARLIKE_closure[] = {
1180 StgIntCharlikeClosure stg_INTLIKE_closure[] = {
1181 INTLIKE_HDR(-16), /* MIN_INTLIKE == -16 */
1213 INTLIKE_HDR(16) /* MAX_INTLIKE == 16 */