1 /* -----------------------------------------------------------------------------
2 * $Id: StgMiscClosures.hc,v 1.70 2001/11/22 14:25:12 simonmar Exp $
4 * (c) The GHC Team, 1998-2000
6 * Entry code for various built-in closure types.
8 * ---------------------------------------------------------------------------*/
14 #include "StgMiscClosures.h"
16 #include "StoragePriv.h"
17 #include "Profiling.h"
21 #if defined(GRAN) || defined(PAR)
22 # include "GranSimRts.h" /* for DumpRawGranEvent */
23 # include "StgRun.h" /* for StgReturn and register saving */
30 /* ToDo: make the printing of panics more win32-friendly, i.e.,
31 * pop up some lovely message boxes (as well).
33 #define DUMP_ERRMSG(msg) STGCALL2(fprintf,stderr,msg)
36 Template for the entry code of non-enterable closures.
39 #define NON_ENTERABLE_ENTRY_CODE(type) \
40 STGFUN(stg_##type##_entry) \
43 DUMP_ERRMSG(#type " object entered!\n"); \
44 STGCALL1(shutdownHaskellAndExit, EXIT_FAILURE); \
50 /* -----------------------------------------------------------------------------
51 Support for the bytecode interpreter.
52 -------------------------------------------------------------------------- */
54 /* 9 bits of return code for constructors created by the interpreter. */
55 FN_(stg_interp_constr_entry)
57 /* R1 points at the constructor */
59 /* STGCALL2(fprintf,stderr,"stg_interp_constr_entry (direct return)!\n"); */
60 /* Pointless, since SET_TAG doesn't do anything */
61 SET_TAG( GET_TAG(GET_INFO(R1.cl)));
62 JMP_(ENTRY_CODE((P_)(*Sp)));
66 FN_(stg_interp_constr1_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),0)); FE_ }
67 FN_(stg_interp_constr2_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),1)); FE_ }
68 FN_(stg_interp_constr3_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),2)); FE_ }
69 FN_(stg_interp_constr4_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),3)); FE_ }
70 FN_(stg_interp_constr5_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),4)); FE_ }
71 FN_(stg_interp_constr6_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),5)); FE_ }
72 FN_(stg_interp_constr7_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),6)); FE_ }
73 FN_(stg_interp_constr8_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),7)); FE_ }
75 /* Some info tables to be used when compiled code returns a value to
76 the interpreter, i.e. the interpreter pushes one of these onto the
77 stack before entering a value. What the code does is to
78 impedance-match the compiled return convention (in R1p/R1n/F1/D1 etc) to
79 the interpreter's convention (returned value is on top of stack),
80 and then cause the scheduler to enter the interpreter.
82 On entry, the stack (growing down) looks like this:
84 ptr to BCO holding return continuation
85 ptr to one of these info tables.
87 The info table code, both direct and vectored, must:
88 * push R1/F1/D1 on the stack, and its tag if necessary
89 * push the BCO (so it's now on the stack twice)
90 * Yield, ie, go to the scheduler.
92 Scheduler examines the t.o.s, discovers it is a BCO, and proceeds
93 directly to the bytecode interpreter. That pops the top element
94 (the BCO, containing the return continuation), and interprets it.
95 Net result: return continuation gets interpreted, with the
99 ptr to the info table just jumped thru
102 which is just what we want -- the "standard" return layout for the
105 Don't ask me how unboxed tuple returns are supposed to work. We
106 haven't got a good story about that yet.
109 /* When the returned value is in R1 and it is a pointer, so doesn't
111 #define STG_CtoI_RET_R1p_Template(label) \
116 bco = ((StgPtr*)Sp)[1]; \
118 ((StgPtr*)Sp)[0] = R1.p; \
120 ((StgPtr*)Sp)[0] = bco; \
121 JMP_(stg_yield_to_interpreter); \
125 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_entry);
126 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_0_entry);
127 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_1_entry);
128 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_2_entry);
129 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_3_entry);
130 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_4_entry);
131 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_5_entry);
132 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_6_entry);
133 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_7_entry);
135 VEC_POLY_INFO_TABLE(stg_ctoi_ret_R1p,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
139 /* When the returned value is in R1 and it isn't a pointer. */
140 #define STG_CtoI_RET_R1n_Template(label) \
145 bco = ((StgPtr*)Sp)[1]; \
147 ((StgPtr*)Sp)[0] = (StgPtr)R1.i; \
149 ((StgPtr*)Sp)[0] = (StgPtr)1; /* tag */ \
151 ((StgPtr*)Sp)[0] = bco; \
152 JMP_(stg_yield_to_interpreter); \
156 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_entry);
157 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_0_entry);
158 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_1_entry);
159 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_2_entry);
160 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_3_entry);
161 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_4_entry);
162 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_5_entry);
163 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_6_entry);
164 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_7_entry);
166 VEC_POLY_INFO_TABLE(stg_ctoi_ret_R1n,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
170 /* When the returned value is in F1 ... */
171 #define STG_CtoI_RET_F1_Template(label) \
176 bco = ((StgPtr*)Sp)[1]; \
177 Sp -= sizeofW(StgFloat); \
178 ASSIGN_FLT((W_*)Sp, F1); \
180 ((StgPtr*)Sp)[0] = (StgPtr)sizeofW(StgFloat); \
182 ((StgPtr*)Sp)[0] = bco; \
183 JMP_(stg_yield_to_interpreter); \
187 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_entry);
188 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_0_entry);
189 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_1_entry);
190 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_2_entry);
191 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_3_entry);
192 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_4_entry);
193 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_5_entry);
194 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_6_entry);
195 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_7_entry);
197 VEC_POLY_INFO_TABLE(stg_ctoi_ret_F1,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
200 /* When the returned value is in D1 ... */
201 #define STG_CtoI_RET_D1_Template(label) \
206 bco = ((StgPtr*)Sp)[1]; \
207 Sp -= sizeofW(StgDouble); \
208 ASSIGN_DBL((W_*)Sp, D1); \
210 ((StgPtr*)Sp)[0] = (StgPtr)sizeofW(StgDouble); \
212 ((StgPtr*)Sp)[0] = bco; \
213 JMP_(stg_yield_to_interpreter); \
217 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_entry);
218 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_0_entry);
219 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_1_entry);
220 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_2_entry);
221 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_3_entry);
222 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_4_entry);
223 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_5_entry);
224 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_6_entry);
225 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_7_entry);
227 VEC_POLY_INFO_TABLE(stg_ctoi_ret_D1,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
230 /* When the returned value a VoidRep ... */
231 #define STG_CtoI_RET_V_Template(label) \
236 bco = ((StgPtr*)Sp)[1]; \
238 ((StgPtr*)Sp)[0] = 0; /* VoidRep tag */ \
240 ((StgPtr*)Sp)[0] = bco; \
241 JMP_(stg_yield_to_interpreter); \
245 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_entry);
246 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_0_entry);
247 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_1_entry);
248 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_2_entry);
249 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_3_entry);
250 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_4_entry);
251 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_5_entry);
252 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_6_entry);
253 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_7_entry);
255 VEC_POLY_INFO_TABLE(stg_ctoi_ret_V,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
258 /* The other way round: when the interpreter returns a value to
259 compiled code. The stack looks like this:
261 return info table (pushed by compiled code)
262 return value (pushed by interpreter)
264 If the value is ptr-rep'd, the interpreter simply returns to the
265 scheduler, instructing it to ThreadEnterGHC.
267 Otherwise (unboxed return value), we replace the top stack word,
268 which must be the tag, with stg_gc_unbx_r1_info (or f1_info or d1_info),
269 and return to the scheduler, instructing it to ThreadRunGHC.
271 No supporting code needed!
275 /* Entering a BCO. Heave it on the stack and defer to the
277 INFO_TABLE(stg_BCO_info,stg_BCO_entry,4,0,BCO,,EF_,"BCO","BCO");
278 STGFUN(stg_BCO_entry) {
282 JMP_(stg_yield_to_interpreter);
287 /* -----------------------------------------------------------------------------
288 Entry code for an indirection.
289 -------------------------------------------------------------------------- */
291 INFO_TABLE(stg_IND_info,stg_IND_entry,1,0,IND,,EF_,"IND","IND");
292 STGFUN(stg_IND_entry)
295 TICK_ENT_IND(Node); /* tick */
296 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
298 JMP_(ENTRY_CODE(*R1.p));
302 INFO_TABLE(stg_IND_STATIC_info,stg_IND_STATIC_entry,1,0,IND_STATIC,,EF_,"IND_STATIC","IND_STATIC");
303 STGFUN(stg_IND_STATIC_entry)
306 TICK_ENT_IND(Node); /* tick */
307 R1.p = (P_) ((StgIndStatic*)R1.p)->indirectee;
309 JMP_(ENTRY_CODE(*R1.p));
313 INFO_TABLE(stg_IND_PERM_info,stg_IND_PERM_entry,1,1,IND_PERM,,EF_,"IND_PERM","IND_PERM");
314 STGFUN(stg_IND_PERM_entry)
317 /* Don't add INDs to granularity cost */
318 /* Dont: TICK_ENT_IND(Node); for ticky-ticky; this ind is here only to help profiling */
320 #if defined(TICKY_TICKY) && !defined(PROFILING)
321 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
322 TICK_ENT_PERM_IND(R1.p); /* tick */
325 LDV_ENTER((StgInd *)R1.p);
327 /* Enter PAP cost centre -- lexical scoping only */
328 ENTER_CCS_PAP_CL(R1.cl);
330 /* For ticky-ticky, change the perm_ind to a normal ind on first
331 * entry, so the number of ent_perm_inds is the number of *thunks*
332 * entered again, not the number of subsequent entries.
334 * Since this screws up cost centres, we die if profiling and
335 * ticky_ticky are on at the same time. KSW 1999-01.
340 # error Profiling and ticky-ticky do not mix at present!
341 # endif /* PROFILING */
342 SET_INFO((StgInd*)R1.p,&stg_IND_info);
343 #endif /* TICKY_TICKY */
345 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
347 /* Dont: TICK_ENT_VIA_NODE(); for ticky-ticky; as above */
349 #if defined(TICKY_TICKY) && !defined(PROFILING)
353 JMP_(ENTRY_CODE(*R1.p));
357 INFO_TABLE(stg_IND_OLDGEN_info,stg_IND_OLDGEN_entry,1,1,IND_OLDGEN,,EF_,"IND_OLDGEN","IND_OLDGEN");
358 STGFUN(stg_IND_OLDGEN_entry)
361 TICK_ENT_IND(Node); /* tick */
362 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
364 JMP_(ENTRY_CODE(*R1.p));
368 INFO_TABLE(stg_IND_OLDGEN_PERM_info,stg_IND_OLDGEN_PERM_entry,1,1,IND_OLDGEN_PERM,,EF_,"IND_OLDGEN_PERM","IND_OLDGEN_PERM");
369 STGFUN(stg_IND_OLDGEN_PERM_entry)
372 /* Dont: TICK_ENT_IND(Node); for ticky-ticky; this ind is here only to help profiling */
374 #if defined(TICKY_TICKY) && !defined(PROFILING)
375 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
376 TICK_ENT_PERM_IND(R1.p); /* tick */
379 LDV_ENTER((StgInd *)R1.p);
381 /* Enter PAP cost centre -- lexical scoping only */
382 ENTER_CCS_PAP_CL(R1.cl);
384 /* see comment in IND_PERM */
387 # error Profiling and ticky-ticky do not mix at present!
388 # endif /* PROFILING */
389 SET_INFO((StgInd*)R1.p,&stg_IND_OLDGEN_info);
390 #endif /* TICKY_TICKY */
392 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
394 JMP_(ENTRY_CODE(*R1.p));
398 /* -----------------------------------------------------------------------------
399 Entry code for a black hole.
401 Entering a black hole normally causes a cyclic data dependency, but
402 in the concurrent world, black holes are synchronization points,
403 and they are turned into blocking queues when there are threads
404 waiting for the evaluation of the closure to finish.
405 -------------------------------------------------------------------------- */
407 /* Note: a BLACKHOLE and BLACKHOLE_BQ must be big enough to be
408 * overwritten with an indirection/evacuee/catch. Thus we claim it
409 * has 1 non-pointer word of payload (in addition to the pointer word
410 * for the blocking queue in a BQ), which should be big enough for an
411 * old-generation indirection.
414 INFO_TABLE(stg_BLACKHOLE_info, stg_BLACKHOLE_entry,0,2,BLACKHOLE,,EF_,"BLACKHOLE","BLACKHOLE");
415 STGFUN(stg_BLACKHOLE_entry)
419 /* Before overwriting TSO_LINK */
420 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
425 bdescr *bd = Bdescr(R1.p);
426 if (bd->back != (bdescr *)BaseReg) {
427 if (bd->gen->no >= 1 || bd->step->no >= 1) {
428 CMPXCHG(R1.cl->header.info, &stg_BLACKHOLE_info, &stg_WHITEHOLE_info);
430 EXTFUN_RTS(stg_gc_enter_1_hponly);
431 JMP_(stg_gc_enter_1_hponly);
438 // Actually this is not necessary because R1.p is about to be destroyed.
439 LDV_ENTER((StgClosure *)R1.p);
441 /* Put ourselves on the blocking queue for this black hole */
442 #if defined(GRAN) || defined(PAR)
443 // in fact, only difference is the type of the end-of-queue marker!
444 CurrentTSO->link = END_BQ_QUEUE;
445 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
447 CurrentTSO->link = END_TSO_QUEUE;
448 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
450 // jot down why and on what closure we are blocked
451 CurrentTSO->why_blocked = BlockedOnBlackHole;
452 CurrentTSO->block_info.closure = R1.cl;
454 /* Change the BLACKHOLE into a BLACKHOLE_BQ */
457 // The size remains the same, so we call LDV_recordDead() - no need to fill slop.
458 LDV_recordDead((StgClosure *)R1.p, BLACKHOLE_sizeW());
461 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
463 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
465 LDV_recordCreate((StgClosure *)R1.p);
468 // closure is mutable since something has just been added to its BQ
469 recordMutable((StgMutClosure *)R1.cl);
471 // PAR: dumping of event now done in blockThread -- HWL
473 // stg_gen_block is too heavyweight, use a specialised one
478 INFO_TABLE(stg_BLACKHOLE_BQ_info, stg_BLACKHOLE_BQ_entry,1,1,BLACKHOLE_BQ,,EF_,"BLACKHOLE","BLACKHOLE");
479 STGFUN(stg_BLACKHOLE_BQ_entry)
483 /* Before overwriting TSO_LINK */
484 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
489 bdescr *bd = Bdescr(R1.p);
490 if (bd->back != (bdescr *)BaseReg) {
491 if (bd->gen->no >= 1 || bd->step->no >= 1) {
492 CMPXCHG(R1.cl->header.info, &stg_BLACKHOLE_info, &stg_WHITEHOLE_info);
494 EXTFUN_RTS(stg_gc_enter_1_hponly);
495 JMP_(stg_gc_enter_1_hponly);
502 LDV_ENTER((StgClosure *)R1.p);
504 /* Put ourselves on the blocking queue for this black hole */
505 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
506 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
507 /* jot down why and on what closure we are blocked */
508 CurrentTSO->why_blocked = BlockedOnBlackHole;
509 CurrentTSO->block_info.closure = R1.cl;
511 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
514 /* PAR: dumping of event now done in blockThread -- HWL */
516 /* stg_gen_block is too heavyweight, use a specialised one */
522 Revertible black holes are needed in the parallel world, to handle
523 negative acknowledgements of messages containing updatable closures.
524 The idea is that when the original message is transmitted, the closure
525 is turned into a revertible black hole...an object which acts like a
526 black hole when local threads try to enter it, but which can be reverted
527 back to the original closure if necessary.
529 It's actually a lot like a blocking queue (BQ) entry, because revertible
530 black holes are initially set up with an empty blocking queue.
533 #if defined(PAR) || defined(GRAN)
535 INFO_TABLE(stg_RBH_info, stg_RBH_entry,1,1,RBH,,EF_,"RBH","RBH");
536 STGFUN(stg_RBH_entry)
540 /* mainly statistics gathering for GranSim simulation */
541 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
544 /* exactly the same as a BLACKHOLE_BQ_entry -- HWL */
545 /* Put ourselves on the blocking queue for this black hole */
546 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
547 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
548 /* jot down why and on what closure we are blocked */
549 CurrentTSO->why_blocked = BlockedOnBlackHole;
550 CurrentTSO->block_info.closure = R1.cl;
552 /* PAR: dumping of event now done in blockThread -- HWL */
554 /* stg_gen_block is too heavyweight, use a specialised one */
559 INFO_TABLE(stg_RBH_Save_0_info, stg_RBH_Save_0_entry,0,2,CONSTR,,EF_,"RBH_Save_0","RBH_Save_0");
560 NON_ENTERABLE_ENTRY_CODE(RBH_Save_0);
562 INFO_TABLE(stg_RBH_Save_1_info, stg_RBH_Save_1_entry,1,1,CONSTR,,EF_,"RBH_Save_1","RBH_Save_1");
563 NON_ENTERABLE_ENTRY_CODE(RBH_Save_1);
565 INFO_TABLE(stg_RBH_Save_2_info, stg_RBH_Save_2_entry,2,0,CONSTR,,EF_,"RBH_Save_2","RBH_Save_2");
566 NON_ENTERABLE_ENTRY_CODE(RBH_Save_2);
567 #endif /* defined(PAR) || defined(GRAN) */
569 /* identical to BLACKHOLEs except for the infotag */
570 INFO_TABLE(stg_CAF_BLACKHOLE_info, stg_CAF_BLACKHOLE_entry,0,2,CAF_BLACKHOLE,,EF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
571 STGFUN(stg_CAF_BLACKHOLE_entry)
575 /* mainly statistics gathering for GranSim simulation */
576 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
581 bdescr *bd = Bdescr(R1.p);
582 if (bd->back != (bdescr *)BaseReg) {
583 if (bd->gen_no >= 1 || bd->step->no >= 1) {
584 CMPXCHG(R1.cl->header.info, &stg_CAF_BLACKHOLE_info, &stg_WHITEHOLE_info);
586 EXTFUN_RTS(stg_gc_enter_1_hponly);
587 JMP_(stg_gc_enter_1_hponly);
594 LDV_ENTER((StgClosure *)R1.p);
596 // Put ourselves on the blocking queue for this black hole
597 #if defined(GRAN) || defined(PAR)
598 // in fact, only difference is the type of the end-of-queue marker!
599 CurrentTSO->link = END_BQ_QUEUE;
600 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
602 CurrentTSO->link = END_TSO_QUEUE;
603 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
605 // jot down why and on what closure we are blocked
606 CurrentTSO->why_blocked = BlockedOnBlackHole;
607 CurrentTSO->block_info.closure = R1.cl;
609 // Change the CAF_BLACKHOLE into a BLACKHOLE_BQ_STATIC
610 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
612 // closure is mutable since something has just been added to its BQ
613 recordMutable((StgMutClosure *)R1.cl);
615 // PAR: dumping of event now done in blockThread -- HWL
617 // stg_gen_block is too heavyweight, use a specialised one
623 INFO_TABLE(stg_SE_BLACKHOLE_info, stg_SE_BLACKHOLE_entry,0,2,SE_BLACKHOLE,,EF_,"SE_BLACKHOLE","SE_BLACKHOLE");
624 STGFUN(stg_SE_BLACKHOLE_entry)
627 STGCALL3(fprintf,stderr,"SE_BLACKHOLE at %p entered!\n",R1.p);
628 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
632 INFO_TABLE(SE_CAF_BLACKHOLE_info, SE_CAF_BLACKHOLE_entry,0,2,SE_CAF_BLACKHOLE,,EF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
633 STGFUN(stg_SE_CAF_BLACKHOLE_entry)
636 STGCALL3(fprintf,stderr,"SE_CAF_BLACKHOLE at %p entered!\n",R1.p);
637 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
643 INFO_TABLE(stg_WHITEHOLE_info, stg_WHITEHOLE_entry,0,2,CONSTR_NOCAF_STATIC,,EF_,"WHITEHOLE","WHITEHOLE");
644 STGFUN(stg_WHITEHOLE_entry)
647 JMP_(GET_ENTRY(R1.cl));
652 /* -----------------------------------------------------------------------------
653 Some static info tables for things that don't get entered, and
654 therefore don't need entry code (i.e. boxed but unpointed objects)
655 NON_ENTERABLE_ENTRY_CODE now defined at the beginning of the file
656 -------------------------------------------------------------------------- */
658 INFO_TABLE(stg_TSO_info, stg_TSO_entry, 0,0,TSO,,EF_,"TSO","TSO");
659 NON_ENTERABLE_ENTRY_CODE(TSO);
661 /* -----------------------------------------------------------------------------
662 Evacuees are left behind by the garbage collector. Any attempt to enter
664 -------------------------------------------------------------------------- */
666 INFO_TABLE(stg_EVACUATED_info,stg_EVACUATED_entry,1,0,EVACUATED,,EF_,"EVACUATED","EVACUATED");
667 NON_ENTERABLE_ENTRY_CODE(EVACUATED);
669 /* -----------------------------------------------------------------------------
672 Live weak pointers have a special closure type. Dead ones are just
673 nullary constructors (although they live on the heap - we overwrite
674 live weak pointers with dead ones).
675 -------------------------------------------------------------------------- */
677 INFO_TABLE(stg_WEAK_info,stg_WEAK_entry,0,4,WEAK,,EF_,"WEAK","WEAK");
678 NON_ENTERABLE_ENTRY_CODE(WEAK);
680 // XXX! The garbage collector replaces a WEAK with a DEAD_WEAK
681 // in-place, which causes problems if the heap is scanned linearly
682 // after GC (certain kinds of profiling do this). So when profiling,
683 // we set the size of a DEAD_WEAK to 4 non-pointers, rather than its
687 #define DEAD_WEAK_PAYLOAD_WORDS 4
689 #define DEAD_WEAK_PAYLOAD_WORDS 1
692 INFO_TABLE_CONSTR(stg_DEAD_WEAK_info,stg_DEAD_WEAK_entry,0,DEAD_WEAK_PAYLOAD_WORDS,0,CONSTR,,EF_,"DEAD_WEAK","DEAD_WEAK");
693 NON_ENTERABLE_ENTRY_CODE(DEAD_WEAK);
695 /* -----------------------------------------------------------------------------
698 This is a static nullary constructor (like []) that we use to mark an empty
699 finalizer in a weak pointer object.
700 -------------------------------------------------------------------------- */
702 INFO_TABLE_CONSTR(stg_NO_FINALIZER_info,stg_NO_FINALIZER_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,"NO_FINALIZER","NO_FINALIZER");
703 NON_ENTERABLE_ENTRY_CODE(NO_FINALIZER);
705 SET_STATIC_HDR(stg_NO_FINALIZER_closure,stg_NO_FINALIZER_info,0/*CC*/,,EI_)
708 /* -----------------------------------------------------------------------------
709 Foreign Objects are unlifted and therefore never entered.
710 -------------------------------------------------------------------------- */
712 INFO_TABLE(stg_FOREIGN_info,stg_FOREIGN_entry,0,1,FOREIGN,,EF_,"FOREIGN","FOREIGN");
713 NON_ENTERABLE_ENTRY_CODE(FOREIGN);
715 /* -----------------------------------------------------------------------------
716 Stable Names are unlifted too.
717 -------------------------------------------------------------------------- */
719 INFO_TABLE(stg_STABLE_NAME_info,stg_STABLE_NAME_entry,0,1,STABLE_NAME,,EF_,"STABLE_NAME","STABLE_NAME");
720 NON_ENTERABLE_ENTRY_CODE(STABLE_NAME);
722 /* -----------------------------------------------------------------------------
725 There are two kinds of these: full and empty. We need an info table
726 and entry code for each type.
727 -------------------------------------------------------------------------- */
729 INFO_TABLE(stg_FULL_MVAR_info,stg_FULL_MVAR_entry,4,0,MVAR,,EF_,"MVAR","MVAR");
730 NON_ENTERABLE_ENTRY_CODE(FULL_MVAR);
732 INFO_TABLE(stg_EMPTY_MVAR_info,stg_EMPTY_MVAR_entry,4,0,MVAR,,EF_,"MVAR","MVAR");
733 NON_ENTERABLE_ENTRY_CODE(EMPTY_MVAR);
735 /* -----------------------------------------------------------------------------
738 This is a static nullary constructor (like []) that we use to mark the
739 end of a linked TSO queue.
740 -------------------------------------------------------------------------- */
742 INFO_TABLE_CONSTR(stg_END_TSO_QUEUE_info,stg_END_TSO_QUEUE_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,"END_TSO_QUEUE","END_TSO_QUEUE");
743 NON_ENTERABLE_ENTRY_CODE(END_TSO_QUEUE);
745 SET_STATIC_HDR(stg_END_TSO_QUEUE_closure,stg_END_TSO_QUEUE_info,0/*CC*/,,EI_)
748 /* -----------------------------------------------------------------------------
751 Mutable lists (used by the garbage collector) consist of a chain of
752 StgMutClosures connected through their mut_link fields, ending in
753 an END_MUT_LIST closure.
754 -------------------------------------------------------------------------- */
756 INFO_TABLE_CONSTR(stg_END_MUT_LIST_info,stg_END_MUT_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,"END_MUT_LIST","END_MUT_LIST");
757 NON_ENTERABLE_ENTRY_CODE(END_MUT_LIST);
759 SET_STATIC_HDR(stg_END_MUT_LIST_closure,stg_END_MUT_LIST_info,0/*CC*/,,EI_)
762 INFO_TABLE(stg_MUT_CONS_info, stg_MUT_CONS_entry, 1, 1, MUT_CONS, , EF_, "MUT_CONS", "MUT_CONS");
763 NON_ENTERABLE_ENTRY_CODE(MUT_CONS);
765 /* -----------------------------------------------------------------------------
767 -------------------------------------------------------------------------- */
769 INFO_TABLE_CONSTR(stg_END_EXCEPTION_LIST_info,stg_END_EXCEPTION_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,"END_EXCEPTION_LIST","END_EXCEPTION_LIST");
770 NON_ENTERABLE_ENTRY_CODE(END_EXCEPTION_LIST);
772 SET_STATIC_HDR(stg_END_EXCEPTION_LIST_closure,stg_END_EXCEPTION_LIST_info,0/*CC*/,,EI_)
775 INFO_TABLE(stg_EXCEPTION_CONS_info, stg_EXCEPTION_CONS_entry, 1, 1, CONSTR, , EF_, "EXCEPTION_CONS", "EXCEPTION_CONS");
776 NON_ENTERABLE_ENTRY_CODE(EXCEPTION_CONS);
778 /* -----------------------------------------------------------------------------
781 These come in two basic flavours: arrays of data (StgArrWords) and arrays of
782 pointers (StgArrPtrs). They all have a similar layout:
784 ___________________________
785 | Info | No. of | data....
787 ---------------------------
789 These are *unpointed* objects: i.e. they cannot be entered.
791 -------------------------------------------------------------------------- */
793 #define ArrayInfo(type) \
794 INFO_TABLE(stg_##type##_info, stg_##type##_entry, 0, 0, type, , EF_,"" # type "","" # type "");
796 ArrayInfo(ARR_WORDS);
797 NON_ENTERABLE_ENTRY_CODE(ARR_WORDS);
798 ArrayInfo(MUT_ARR_PTRS);
799 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS);
800 ArrayInfo(MUT_ARR_PTRS_FROZEN);
801 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS_FROZEN);
805 /* -----------------------------------------------------------------------------
807 -------------------------------------------------------------------------- */
809 INFO_TABLE(stg_MUT_VAR_info, stg_MUT_VAR_entry, 1, 1, MUT_VAR, , EF_, "MUT_VAR", "MUT_VAR");
810 NON_ENTERABLE_ENTRY_CODE(MUT_VAR);
812 /* -----------------------------------------------------------------------------
813 Standard Error Entry.
815 This is used for filling in vector-table entries that can never happen,
817 -------------------------------------------------------------------------- */
818 /* No longer used; we use NULL, because a) it never happens, right? and b)
819 Windows doesn't like DLL entry points being used as static initialisers
820 STGFUN(stg_error_entry) \
823 DUMP_ERRMSG("fatal: stg_error_entry"); \
824 STGCALL1(shutdownHaskellAndExit, EXIT_FAILURE); \
829 /* -----------------------------------------------------------------------------
832 Entering this closure will just return to the address on the top of the
833 stack. Useful for getting a thread in a canonical form where we can
834 just enter the top stack word to start the thread. (see deleteThread)
835 * -------------------------------------------------------------------------- */
837 INFO_TABLE(stg_dummy_ret_info, stg_dummy_ret_entry, 0, 0, CONSTR_NOCAF_STATIC, , EF_, "DUMMY_RET", "DUMMY_RET");
838 STGFUN(stg_dummy_ret_entry)
844 JMP_(ENTRY_CODE(ret_addr));
847 SET_STATIC_HDR(stg_dummy_ret_closure,stg_dummy_ret_info,CCS_DONT_CARE,,EI_)
850 /* -----------------------------------------------------------------------------
851 Strict IO application - performing an IO action and entering its result.
853 rts_evalIO() lets you perform Haskell IO actions from outside of Haskell-land,
854 returning back to you their result. Want this result to be evaluated to WHNF
855 by that time, so that we can easily get at the int/char/whatever using the
856 various get{Ty} functions provided by the RTS API.
858 forceIO takes care of this, performing the IO action and entering the
859 results that comes back.
861 * -------------------------------------------------------------------------- */
864 INFO_TABLE_SRT_BITMAP(stg_forceIO_ret_info,stg_forceIO_ret_entry,0,0,0,0,RET_SMALL,,EF_,0,0);
865 STGFUN(stg_forceIO_ret_entry)
869 Sp -= sizeofW(StgSeqFrame);
871 JMP_(GET_ENTRY(R1.cl));
874 INFO_TABLE_SRT_BITMAP(stg_forceIO_ret_info,stg_forceIO_ret_entry,0,0,0,0,RET_SMALL,,EF_,0,0);
875 STGFUN(stg_forceIO_ret_entry)
879 rval = (StgClosure *)Sp[0];
881 Sp -= sizeofW(StgSeqFrame);
884 JMP_(GET_ENTRY(R1.cl));
888 INFO_TABLE(stg_forceIO_info,stg_forceIO_entry,1,0,FUN_STATIC,,EF_,"FORCE_IO","FORCE_IO");
889 FN_(stg_forceIO_entry)
892 /* Sp[0] contains the IO action we want to perform */
894 /* Replace it with the return continuation that enters the result. */
895 Sp[0] = (W_)&stg_forceIO_ret_info;
897 /* Push the RealWorld# tag and enter */
898 Sp[0] =(W_)REALWORLD_TAG;
899 JMP_(GET_ENTRY(R1.cl));
902 SET_STATIC_HDR(stg_forceIO_closure,stg_forceIO_info,CCS_DONT_CARE,,EI_)
906 /* -----------------------------------------------------------------------------
907 CHARLIKE and INTLIKE closures.
909 These are static representations of Chars and small Ints, so that
910 we can remove dynamic Chars and Ints during garbage collection and
911 replace them with references to the static objects.
912 -------------------------------------------------------------------------- */
914 #if defined(INTERPRETER) || defined(ENABLE_WIN32_DLL_SUPPORT)
916 * When sticking the RTS in a DLL, we delay populating the
917 * Charlike and Intlike tables until load-time, which is only
918 * when we've got the real addresses to the C# and I# closures.
921 static INFO_TBL_CONST StgInfoTable czh_static_info;
922 static INFO_TBL_CONST StgInfoTable izh_static_info;
923 #define Char_hash_static_info czh_static_info
924 #define Int_hash_static_info izh_static_info
926 #define Char_hash_static_info PrelBase_Czh_static_info
927 #define Int_hash_static_info PrelBase_Izh_static_info
930 #define CHARLIKE_HDR(n) \
932 STATIC_HDR(Char_hash_static_info, /* C# */ \
937 #define INTLIKE_HDR(n) \
939 STATIC_HDR(Int_hash_static_info, /* I# */ \
944 /* put these in the *data* section, since the garbage collector relies
945 * on the fact that static closures live in the data section.
948 /* end the name with _closure, to convince the mangler this is a closure */
950 StgIntCharlikeClosure stg_CHARLIKE_closure[] = {
1209 StgIntCharlikeClosure stg_INTLIKE_closure[] = {
1210 INTLIKE_HDR(-16), /* MIN_INTLIKE == -16 */
1242 INTLIKE_HDR(16) /* MAX_INTLIKE == 16 */