1 /* -----------------------------------------------------------------------------
2 * $Id: StgMiscClosures.hc,v 1.78 2002/09/17 12:32:40 simonmar Exp $
4 * (c) The GHC Team, 1998-2000
6 * Entry code for various built-in closure types.
8 * ---------------------------------------------------------------------------*/
14 #include "StgMiscClosures.h"
16 #include "StoragePriv.h"
17 #include "Profiling.h"
21 #if defined(GRAN) || defined(PAR)
22 # include "GranSimRts.h" /* for DumpRawGranEvent */
23 # include "StgRun.h" /* for StgReturn and register saving */
30 /* ToDo: make the printing of panics more win32-friendly, i.e.,
31 * pop up some lovely message boxes (as well).
33 #define DUMP_ERRMSG(msg) STGCALL2(fprintf,stderr,msg)
36 Template for the entry code of non-enterable closures.
39 #define NON_ENTERABLE_ENTRY_CODE(type) \
40 STGFUN(stg_##type##_entry) \
43 barf(#type " object entered!\n"); \
49 /* -----------------------------------------------------------------------------
50 Support for the bytecode interpreter.
51 -------------------------------------------------------------------------- */
53 /* 9 bits of return code for constructors created by the interpreter. */
54 FN_(stg_interp_constr_entry)
56 /* R1 points at the constructor */
58 /* STGCALL2(fprintf,stderr,"stg_interp_constr_entry (direct return)!\n"); */
59 /* Pointless, since SET_TAG doesn't do anything */
60 SET_TAG( GET_TAG(GET_INFO(R1.cl)));
61 JMP_(ENTRY_CODE((P_)(*Sp)));
65 FN_(stg_interp_constr1_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),0)); FE_ }
66 FN_(stg_interp_constr2_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),1)); FE_ }
67 FN_(stg_interp_constr3_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),2)); FE_ }
68 FN_(stg_interp_constr4_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),3)); FE_ }
69 FN_(stg_interp_constr5_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),4)); FE_ }
70 FN_(stg_interp_constr6_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),5)); FE_ }
71 FN_(stg_interp_constr7_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),6)); FE_ }
72 FN_(stg_interp_constr8_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),7)); FE_ }
74 /* Some info tables to be used when compiled code returns a value to
75 the interpreter, i.e. the interpreter pushes one of these onto the
76 stack before entering a value. What the code does is to
77 impedance-match the compiled return convention (in R1p/R1n/F1/D1 etc) to
78 the interpreter's convention (returned value is on top of stack),
79 and then cause the scheduler to enter the interpreter.
81 On entry, the stack (growing down) looks like this:
83 ptr to BCO holding return continuation
84 ptr to one of these info tables.
86 The info table code, both direct and vectored, must:
87 * push R1/F1/D1 on the stack, and its tag if necessary
88 * push the BCO (so it's now on the stack twice)
89 * Yield, ie, go to the scheduler.
91 Scheduler examines the t.o.s, discovers it is a BCO, and proceeds
92 directly to the bytecode interpreter. That pops the top element
93 (the BCO, containing the return continuation), and interprets it.
94 Net result: return continuation gets interpreted, with the
98 ptr to the info table just jumped thru
101 which is just what we want -- the "standard" return layout for the
104 Don't ask me how unboxed tuple returns are supposed to work. We
105 haven't got a good story about that yet.
108 /* When the returned value is in R1 and it is a pointer, so doesn't
110 #define STG_CtoI_RET_R1p_Template(label) \
115 bco = ((StgPtr*)Sp)[1]; \
117 ((StgPtr*)Sp)[0] = R1.p; \
119 ((StgPtr*)Sp)[0] = bco; \
120 JMP_(stg_yield_to_interpreter); \
124 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_entry);
125 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_0_entry);
126 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_1_entry);
127 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_2_entry);
128 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_3_entry);
129 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_4_entry);
130 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_5_entry);
131 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_6_entry);
132 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_7_entry);
134 VEC_POLY_INFO_TABLE(stg_ctoi_ret_R1p,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
138 /* When the returned value is in R1 and it isn't a pointer. */
139 #define STG_CtoI_RET_R1n_Template(label) \
144 bco = ((StgPtr*)Sp)[1]; \
146 ((StgPtr*)Sp)[0] = (StgPtr)R1.i; \
148 ((StgPtr*)Sp)[0] = (StgPtr)1; /* tag */ \
150 ((StgPtr*)Sp)[0] = bco; \
151 JMP_(stg_yield_to_interpreter); \
155 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_entry);
156 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_0_entry);
157 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_1_entry);
158 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_2_entry);
159 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_3_entry);
160 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_4_entry);
161 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_5_entry);
162 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_6_entry);
163 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_7_entry);
165 VEC_POLY_INFO_TABLE(stg_ctoi_ret_R1n,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
169 /* When the returned value is in F1 ... */
170 #define STG_CtoI_RET_F1_Template(label) \
175 bco = ((StgPtr*)Sp)[1]; \
176 Sp -= sizeofW(StgFloat); \
177 ASSIGN_FLT((W_*)Sp, F1); \
179 ((StgPtr*)Sp)[0] = (StgPtr)sizeofW(StgFloat); \
181 ((StgPtr*)Sp)[0] = bco; \
182 JMP_(stg_yield_to_interpreter); \
186 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_entry);
187 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_0_entry);
188 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_1_entry);
189 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_2_entry);
190 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_3_entry);
191 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_4_entry);
192 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_5_entry);
193 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_6_entry);
194 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_7_entry);
196 VEC_POLY_INFO_TABLE(stg_ctoi_ret_F1,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
199 /* When the returned value is in D1 ... */
200 #define STG_CtoI_RET_D1_Template(label) \
205 bco = ((StgPtr*)Sp)[1]; \
206 Sp -= sizeofW(StgDouble); \
207 ASSIGN_DBL((W_*)Sp, D1); \
209 ((StgPtr*)Sp)[0] = (StgPtr)sizeofW(StgDouble); \
211 ((StgPtr*)Sp)[0] = bco; \
212 JMP_(stg_yield_to_interpreter); \
216 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_entry);
217 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_0_entry);
218 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_1_entry);
219 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_2_entry);
220 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_3_entry);
221 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_4_entry);
222 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_5_entry);
223 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_6_entry);
224 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_7_entry);
226 VEC_POLY_INFO_TABLE(stg_ctoi_ret_D1,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
229 /* When the returned value a VoidRep ... */
230 #define STG_CtoI_RET_V_Template(label) \
235 bco = ((StgPtr*)Sp)[1]; \
237 ((StgPtr*)Sp)[0] = 0; /* VoidRep tag */ \
239 ((StgPtr*)Sp)[0] = bco; \
240 JMP_(stg_yield_to_interpreter); \
244 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_entry);
245 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_0_entry);
246 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_1_entry);
247 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_2_entry);
248 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_3_entry);
249 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_4_entry);
250 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_5_entry);
251 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_6_entry);
252 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_7_entry);
254 VEC_POLY_INFO_TABLE(stg_ctoi_ret_V,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
257 /* The other way round: when the interpreter returns a value to
258 compiled code. The stack looks like this:
260 return info table (pushed by compiled code)
261 return value (pushed by interpreter)
263 If the value is ptr-rep'd, the interpreter simply returns to the
264 scheduler, instructing it to ThreadEnterGHC.
266 Otherwise (unboxed return value), we replace the top stack word,
267 which must be the tag, with stg_gc_unbx_r1_info (or f1_info or d1_info),
268 and return to the scheduler, instructing it to ThreadRunGHC.
270 No supporting code needed!
274 /* Entering a BCO. Heave it on the stack and defer to the
276 INFO_TABLE(stg_BCO_info,stg_BCO_entry,4,0,BCO,,EF_,"BCO","BCO");
277 STGFUN(stg_BCO_entry) {
281 JMP_(stg_yield_to_interpreter);
286 /* -----------------------------------------------------------------------------
287 Entry code for an indirection.
288 -------------------------------------------------------------------------- */
290 INFO_TABLE(stg_IND_info,stg_IND_entry,1,0,IND,,EF_,"IND","IND");
291 STGFUN(stg_IND_entry)
294 TICK_ENT_DYN_IND(Node); /* tick */
295 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
297 JMP_(GET_ENTRY(R1.cl));
301 INFO_TABLE(stg_IND_STATIC_info,stg_IND_STATIC_entry,1,0,IND_STATIC,,EF_,"IND_STATIC","IND_STATIC");
302 STGFUN(stg_IND_STATIC_entry)
305 TICK_ENT_STATIC_IND(Node); /* tick */
306 R1.p = (P_) ((StgIndStatic*)R1.p)->indirectee;
308 JMP_(GET_ENTRY(R1.cl));
312 INFO_TABLE(stg_IND_PERM_info,stg_IND_PERM_entry,1,1,IND_PERM,,EF_,"IND_PERM","IND_PERM");
313 STGFUN(stg_IND_PERM_entry)
316 /* Don't add INDs to granularity cost */
317 /* Dont: TICK_ENT_STATIC_IND(Node); for ticky-ticky; this ind is here only to help profiling */
319 #if defined(TICKY_TICKY) && !defined(PROFILING)
320 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
321 TICK_ENT_PERM_IND(R1.p); /* tick */
324 LDV_ENTER((StgInd *)R1.p);
326 /* Enter PAP cost centre -- lexical scoping only */
327 ENTER_CCS_PAP_CL(R1.cl);
329 /* For ticky-ticky, change the perm_ind to a normal ind on first
330 * entry, so the number of ent_perm_inds is the number of *thunks*
331 * entered again, not the number of subsequent entries.
333 * Since this screws up cost centres, we die if profiling and
334 * ticky_ticky are on at the same time. KSW 1999-01.
339 # error Profiling and ticky-ticky do not mix at present!
340 # endif /* PROFILING */
341 SET_INFO((StgInd*)R1.p,&stg_IND_info);
342 #endif /* TICKY_TICKY */
344 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
346 /* Dont: TICK_ENT_VIA_NODE(); for ticky-ticky; as above */
348 #if defined(TICKY_TICKY) && !defined(PROFILING)
352 JMP_(GET_ENTRY(R1.cl));
356 INFO_TABLE(stg_IND_OLDGEN_info,stg_IND_OLDGEN_entry,1,1,IND_OLDGEN,,EF_,"IND_OLDGEN","IND_OLDGEN");
357 STGFUN(stg_IND_OLDGEN_entry)
360 TICK_ENT_STATIC_IND(Node); /* tick */
361 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
363 JMP_(GET_ENTRY(R1.cl));
367 INFO_TABLE(stg_IND_OLDGEN_PERM_info,stg_IND_OLDGEN_PERM_entry,1,1,IND_OLDGEN_PERM,,EF_,"IND_OLDGEN_PERM","IND_OLDGEN_PERM");
368 STGFUN(stg_IND_OLDGEN_PERM_entry)
371 /* Dont: TICK_ENT_STATIC_IND(Node); for ticky-ticky; this ind is here only to help profiling */
373 #if defined(TICKY_TICKY) && !defined(PROFILING)
374 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
375 TICK_ENT_PERM_IND(R1.p); /* tick */
378 LDV_ENTER((StgInd *)R1.p);
380 /* Enter PAP cost centre -- lexical scoping only */
381 ENTER_CCS_PAP_CL(R1.cl);
383 /* see comment in IND_PERM */
386 # error Profiling and ticky-ticky do not mix at present!
387 # endif /* PROFILING */
388 SET_INFO((StgInd*)R1.p,&stg_IND_OLDGEN_info);
389 #endif /* TICKY_TICKY */
391 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
393 JMP_(GET_ENTRY(R1.cl));
397 /* -----------------------------------------------------------------------------
398 Entry code for a black hole.
400 Entering a black hole normally causes a cyclic data dependency, but
401 in the concurrent world, black holes are synchronization points,
402 and they are turned into blocking queues when there are threads
403 waiting for the evaluation of the closure to finish.
404 -------------------------------------------------------------------------- */
406 /* Note: a BLACKHOLE and BLACKHOLE_BQ must be big enough to be
407 * overwritten with an indirection/evacuee/catch. Thus we claim it
408 * has 1 non-pointer word of payload (in addition to the pointer word
409 * for the blocking queue in a BQ), which should be big enough for an
410 * old-generation indirection.
413 INFO_TABLE(stg_BLACKHOLE_info, stg_BLACKHOLE_entry,0,2,BLACKHOLE,,EF_,"BLACKHOLE","BLACKHOLE");
414 STGFUN(stg_BLACKHOLE_entry)
418 /* Before overwriting TSO_LINK */
419 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
424 bdescr *bd = Bdescr(R1.p);
425 if (bd->u.back != (bdescr *)BaseReg) {
426 if (bd->gen_no >= 1 || bd->step->no >= 1) {
427 CMPXCHG(R1.cl->header.info, &stg_BLACKHOLE_info, &stg_WHITEHOLE_info);
429 EXTFUN_RTS(stg_gc_enter_1_hponly);
430 JMP_(stg_gc_enter_1_hponly);
437 // Actually this is not necessary because R1.p is about to be destroyed.
438 LDV_ENTER((StgClosure *)R1.p);
440 /* Put ourselves on the blocking queue for this black hole */
441 #if defined(GRAN) || defined(PAR)
442 // in fact, only difference is the type of the end-of-queue marker!
443 CurrentTSO->link = END_BQ_QUEUE;
444 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
446 CurrentTSO->link = END_TSO_QUEUE;
447 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
449 // jot down why and on what closure we are blocked
450 CurrentTSO->why_blocked = BlockedOnBlackHole;
451 CurrentTSO->block_info.closure = R1.cl;
453 /* Change the BLACKHOLE into a BLACKHOLE_BQ */
456 // The size remains the same, so we call LDV_recordDead() - no need to fill slop.
457 LDV_recordDead((StgClosure *)R1.p, BLACKHOLE_sizeW());
460 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
462 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
464 LDV_recordCreate((StgClosure *)R1.p);
467 // closure is mutable since something has just been added to its BQ
468 recordMutable((StgMutClosure *)R1.cl);
470 // PAR: dumping of event now done in blockThread -- HWL
472 // stg_gen_block is too heavyweight, use a specialised one
477 INFO_TABLE(stg_BLACKHOLE_BQ_info, stg_BLACKHOLE_BQ_entry,1,1,BLACKHOLE_BQ,,EF_,"BLACKHOLE","BLACKHOLE");
478 STGFUN(stg_BLACKHOLE_BQ_entry)
482 /* Before overwriting TSO_LINK */
483 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
488 bdescr *bd = Bdescr(R1.p);
489 if (bd->u.back != (bdescr *)BaseReg) {
490 if (bd->gen_no >= 1 || bd->step->no >= 1) {
491 CMPXCHG(R1.cl->header.info, &stg_BLACKHOLE_info, &stg_WHITEHOLE_info);
493 EXTFUN_RTS(stg_gc_enter_1_hponly);
494 JMP_(stg_gc_enter_1_hponly);
501 LDV_ENTER((StgClosure *)R1.p);
503 /* Put ourselves on the blocking queue for this black hole */
504 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
505 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
506 /* jot down why and on what closure we are blocked */
507 CurrentTSO->why_blocked = BlockedOnBlackHole;
508 CurrentTSO->block_info.closure = R1.cl;
510 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
513 /* PAR: dumping of event now done in blockThread -- HWL */
515 /* stg_gen_block is too heavyweight, use a specialised one */
521 Revertible black holes are needed in the parallel world, to handle
522 negative acknowledgements of messages containing updatable closures.
523 The idea is that when the original message is transmitted, the closure
524 is turned into a revertible black hole...an object which acts like a
525 black hole when local threads try to enter it, but which can be reverted
526 back to the original closure if necessary.
528 It's actually a lot like a blocking queue (BQ) entry, because revertible
529 black holes are initially set up with an empty blocking queue.
532 #if defined(PAR) || defined(GRAN)
534 INFO_TABLE(stg_RBH_info, stg_RBH_entry,1,1,RBH,,EF_,"RBH","RBH");
535 STGFUN(stg_RBH_entry)
539 /* mainly statistics gathering for GranSim simulation */
540 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
543 /* exactly the same as a BLACKHOLE_BQ_entry -- HWL */
544 /* Put ourselves on the blocking queue for this black hole */
545 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
546 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
547 /* jot down why and on what closure we are blocked */
548 CurrentTSO->why_blocked = BlockedOnBlackHole;
549 CurrentTSO->block_info.closure = R1.cl;
551 /* PAR: dumping of event now done in blockThread -- HWL */
553 /* stg_gen_block is too heavyweight, use a specialised one */
558 INFO_TABLE(stg_RBH_Save_0_info, stg_RBH_Save_0_entry,0,2,CONSTR,,EF_,"RBH_Save_0","RBH_Save_0");
559 NON_ENTERABLE_ENTRY_CODE(RBH_Save_0);
561 INFO_TABLE(stg_RBH_Save_1_info, stg_RBH_Save_1_entry,1,1,CONSTR,,EF_,"RBH_Save_1","RBH_Save_1");
562 NON_ENTERABLE_ENTRY_CODE(RBH_Save_1);
564 INFO_TABLE(stg_RBH_Save_2_info, stg_RBH_Save_2_entry,2,0,CONSTR,,EF_,"RBH_Save_2","RBH_Save_2");
565 NON_ENTERABLE_ENTRY_CODE(RBH_Save_2);
566 #endif /* defined(PAR) || defined(GRAN) */
568 /* identical to BLACKHOLEs except for the infotag */
569 INFO_TABLE(stg_CAF_BLACKHOLE_info, stg_CAF_BLACKHOLE_entry,0,2,CAF_BLACKHOLE,,EF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
570 STGFUN(stg_CAF_BLACKHOLE_entry)
574 /* mainly statistics gathering for GranSim simulation */
575 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
580 bdescr *bd = Bdescr(R1.p);
581 if (bd->u.back != (bdescr *)BaseReg) {
582 if (bd->gen_no >= 1 || bd->step->no >= 1) {
583 CMPXCHG(R1.cl->header.info, &stg_CAF_BLACKHOLE_info, &stg_WHITEHOLE_info);
585 EXTFUN_RTS(stg_gc_enter_1_hponly);
586 JMP_(stg_gc_enter_1_hponly);
593 LDV_ENTER((StgClosure *)R1.p);
595 // Put ourselves on the blocking queue for this black hole
596 #if defined(GRAN) || defined(PAR)
597 // in fact, only difference is the type of the end-of-queue marker!
598 CurrentTSO->link = END_BQ_QUEUE;
599 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
601 CurrentTSO->link = END_TSO_QUEUE;
602 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
604 // jot down why and on what closure we are blocked
605 CurrentTSO->why_blocked = BlockedOnBlackHole;
606 CurrentTSO->block_info.closure = R1.cl;
608 // Change the CAF_BLACKHOLE into a BLACKHOLE_BQ_STATIC
609 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
611 // closure is mutable since something has just been added to its BQ
612 recordMutable((StgMutClosure *)R1.cl);
614 // PAR: dumping of event now done in blockThread -- HWL
616 // stg_gen_block is too heavyweight, use a specialised one
622 INFO_TABLE(stg_SE_BLACKHOLE_info, stg_SE_BLACKHOLE_entry,0,2,SE_BLACKHOLE,,EF_,"SE_BLACKHOLE","SE_BLACKHOLE");
623 STGFUN(stg_SE_BLACKHOLE_entry)
626 STGCALL3(fprintf,stderr,"SE_BLACKHOLE at %p entered!\n",R1.p);
627 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
631 INFO_TABLE(stg_SE_CAF_BLACKHOLE_info, SE_CAF_BLACKHOLE_entry,0,2,SE_CAF_BLACKHOLE,,EF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
632 STGFUN(stg_SE_CAF_BLACKHOLE_entry)
635 STGCALL3(fprintf,stderr,"SE_CAF_BLACKHOLE at %p entered!\n",R1.p);
636 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
642 INFO_TABLE(stg_WHITEHOLE_info, stg_WHITEHOLE_entry,0,2,CONSTR_NOCAF_STATIC,,EF_,"WHITEHOLE","WHITEHOLE");
643 STGFUN(stg_WHITEHOLE_entry)
646 JMP_(GET_ENTRY(R1.cl));
651 /* -----------------------------------------------------------------------------
652 Some static info tables for things that don't get entered, and
653 therefore don't need entry code (i.e. boxed but unpointed objects)
654 NON_ENTERABLE_ENTRY_CODE now defined at the beginning of the file
655 -------------------------------------------------------------------------- */
657 INFO_TABLE(stg_TSO_info, stg_TSO_entry, 0,0,TSO,,EF_,"TSO","TSO");
658 NON_ENTERABLE_ENTRY_CODE(TSO);
660 /* -----------------------------------------------------------------------------
661 Evacuees are left behind by the garbage collector. Any attempt to enter
663 -------------------------------------------------------------------------- */
665 INFO_TABLE(stg_EVACUATED_info,stg_EVACUATED_entry,1,0,EVACUATED,,EF_,"EVACUATED","EVACUATED");
666 NON_ENTERABLE_ENTRY_CODE(EVACUATED);
668 /* -----------------------------------------------------------------------------
671 Live weak pointers have a special closure type. Dead ones are just
672 nullary constructors (although they live on the heap - we overwrite
673 live weak pointers with dead ones).
674 -------------------------------------------------------------------------- */
676 INFO_TABLE(stg_WEAK_info,stg_WEAK_entry,0,4,WEAK,,EF_,"WEAK","WEAK");
677 NON_ENTERABLE_ENTRY_CODE(WEAK);
679 // It's important when turning an existing WEAK into a DEAD_WEAK
680 // (which is what finalizeWeak# does) that we don't lose the link
681 // field and break the linked list of weak pointers. Hence, we give
682 // DEAD_WEAK 4 non-pointer fields, the same as WEAK.
684 INFO_TABLE_CONSTR(stg_DEAD_WEAK_info,stg_DEAD_WEAK_entry,0,4,0,CONSTR,,EF_,"DEAD_WEAK","DEAD_WEAK");
685 NON_ENTERABLE_ENTRY_CODE(DEAD_WEAK);
687 /* -----------------------------------------------------------------------------
690 This is a static nullary constructor (like []) that we use to mark an empty
691 finalizer in a weak pointer object.
692 -------------------------------------------------------------------------- */
694 INFO_TABLE_CONSTR(stg_NO_FINALIZER_info,stg_NO_FINALIZER_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,"NO_FINALIZER","NO_FINALIZER");
695 NON_ENTERABLE_ENTRY_CODE(NO_FINALIZER);
697 SET_STATIC_HDR(stg_NO_FINALIZER_closure,stg_NO_FINALIZER_info,0/*CC*/,,EI_)
700 /* -----------------------------------------------------------------------------
701 Foreign Objects are unlifted and therefore never entered.
702 -------------------------------------------------------------------------- */
704 INFO_TABLE(stg_FOREIGN_info,stg_FOREIGN_entry,0,1,FOREIGN,,EF_,"FOREIGN","FOREIGN");
705 NON_ENTERABLE_ENTRY_CODE(FOREIGN);
707 /* -----------------------------------------------------------------------------
708 Stable Names are unlifted too.
709 -------------------------------------------------------------------------- */
711 INFO_TABLE(stg_STABLE_NAME_info,stg_STABLE_NAME_entry,0,1,STABLE_NAME,,EF_,"STABLE_NAME","STABLE_NAME");
712 NON_ENTERABLE_ENTRY_CODE(STABLE_NAME);
714 /* -----------------------------------------------------------------------------
717 There are two kinds of these: full and empty. We need an info table
718 and entry code for each type.
719 -------------------------------------------------------------------------- */
721 INFO_TABLE(stg_FULL_MVAR_info,stg_FULL_MVAR_entry,4,0,MVAR,,EF_,"MVAR","MVAR");
722 NON_ENTERABLE_ENTRY_CODE(FULL_MVAR);
724 INFO_TABLE(stg_EMPTY_MVAR_info,stg_EMPTY_MVAR_entry,4,0,MVAR,,EF_,"MVAR","MVAR");
725 NON_ENTERABLE_ENTRY_CODE(EMPTY_MVAR);
727 /* -----------------------------------------------------------------------------
730 This is a static nullary constructor (like []) that we use to mark the
731 end of a linked TSO queue.
732 -------------------------------------------------------------------------- */
734 INFO_TABLE_CONSTR(stg_END_TSO_QUEUE_info,stg_END_TSO_QUEUE_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,"END_TSO_QUEUE","END_TSO_QUEUE");
735 NON_ENTERABLE_ENTRY_CODE(END_TSO_QUEUE);
737 SET_STATIC_HDR(stg_END_TSO_QUEUE_closure,stg_END_TSO_QUEUE_info,0/*CC*/,,EI_)
740 /* -----------------------------------------------------------------------------
743 Mutable lists (used by the garbage collector) consist of a chain of
744 StgMutClosures connected through their mut_link fields, ending in
745 an END_MUT_LIST closure.
746 -------------------------------------------------------------------------- */
748 INFO_TABLE_CONSTR(stg_END_MUT_LIST_info,stg_END_MUT_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,"END_MUT_LIST","END_MUT_LIST");
749 NON_ENTERABLE_ENTRY_CODE(END_MUT_LIST);
751 SET_STATIC_HDR(stg_END_MUT_LIST_closure,stg_END_MUT_LIST_info,0/*CC*/,,EI_)
754 INFO_TABLE(stg_MUT_CONS_info, stg_MUT_CONS_entry, 1, 1, MUT_CONS, , EF_, "MUT_CONS", "MUT_CONS");
755 NON_ENTERABLE_ENTRY_CODE(MUT_CONS);
757 /* -----------------------------------------------------------------------------
759 -------------------------------------------------------------------------- */
761 INFO_TABLE_CONSTR(stg_END_EXCEPTION_LIST_info,stg_END_EXCEPTION_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,"END_EXCEPTION_LIST","END_EXCEPTION_LIST");
762 NON_ENTERABLE_ENTRY_CODE(END_EXCEPTION_LIST);
764 SET_STATIC_HDR(stg_END_EXCEPTION_LIST_closure,stg_END_EXCEPTION_LIST_info,0/*CC*/,,EI_)
767 INFO_TABLE(stg_EXCEPTION_CONS_info, stg_EXCEPTION_CONS_entry, 1, 1, CONSTR, , EF_, "EXCEPTION_CONS", "EXCEPTION_CONS");
768 NON_ENTERABLE_ENTRY_CODE(EXCEPTION_CONS);
770 /* -----------------------------------------------------------------------------
773 These come in two basic flavours: arrays of data (StgArrWords) and arrays of
774 pointers (StgArrPtrs). They all have a similar layout:
776 ___________________________
777 | Info | No. of | data....
779 ---------------------------
781 These are *unpointed* objects: i.e. they cannot be entered.
783 -------------------------------------------------------------------------- */
785 #define ArrayInfo(type) \
786 INFO_TABLE(stg_##type##_info, stg_##type##_entry, 0, 0, type, , EF_,"" # type "","" # type "");
788 ArrayInfo(ARR_WORDS);
789 NON_ENTERABLE_ENTRY_CODE(ARR_WORDS);
790 ArrayInfo(MUT_ARR_PTRS);
791 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS);
792 ArrayInfo(MUT_ARR_PTRS_FROZEN);
793 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS_FROZEN);
797 /* -----------------------------------------------------------------------------
799 -------------------------------------------------------------------------- */
801 INFO_TABLE(stg_MUT_VAR_info, stg_MUT_VAR_entry, 1, 1, MUT_VAR, , EF_, "MUT_VAR", "MUT_VAR");
802 NON_ENTERABLE_ENTRY_CODE(MUT_VAR);
804 /* -----------------------------------------------------------------------------
805 Standard Error Entry.
807 This is used for filling in vector-table entries that can never happen,
809 -------------------------------------------------------------------------- */
810 /* No longer used; we use NULL, because a) it never happens, right? and b)
811 Windows doesn't like DLL entry points being used as static initialisers
812 STGFUN(stg_error_entry) \
815 DUMP_ERRMSG("fatal: stg_error_entry"); \
816 STGCALL1(shutdownHaskellAndExit, EXIT_FAILURE); \
821 /* -----------------------------------------------------------------------------
824 Entering this closure will just return to the address on the top of the
825 stack. Useful for getting a thread in a canonical form where we can
826 just enter the top stack word to start the thread. (see deleteThread)
827 * -------------------------------------------------------------------------- */
829 INFO_TABLE(stg_dummy_ret_info, stg_dummy_ret_entry, 0, 0, CONSTR_NOCAF_STATIC, , EF_, "DUMMY_RET", "DUMMY_RET");
830 STGFUN(stg_dummy_ret_entry)
836 JMP_(ENTRY_CODE(ret_addr));
839 SET_STATIC_HDR(stg_dummy_ret_closure,stg_dummy_ret_info,CCS_DONT_CARE,,EI_)
842 /* -----------------------------------------------------------------------------
843 Strict IO application - performing an IO action and entering its result.
845 rts_evalIO() lets you perform Haskell IO actions from outside of Haskell-land,
846 returning back to you their result. Want this result to be evaluated to WHNF
847 by that time, so that we can easily get at the int/char/whatever using the
848 various get{Ty} functions provided by the RTS API.
850 forceIO takes care of this, performing the IO action and entering the
851 results that comes back.
853 * -------------------------------------------------------------------------- */
856 INFO_TABLE_SRT_BITMAP(stg_forceIO_ret_info,stg_forceIO_ret_entry,0,0,0,0,RET_SMALL,,EF_,0,0);
857 STGFUN(stg_forceIO_ret_entry)
861 Sp -= sizeofW(StgSeqFrame);
863 JMP_(GET_ENTRY(R1.cl));
866 INFO_TABLE_SRT_BITMAP(stg_forceIO_ret_info,stg_forceIO_ret_entry,0,0,0,0,RET_SMALL,,EF_,0,0);
867 STGFUN(stg_forceIO_ret_entry)
871 rval = (StgClosure *)Sp[0];
873 Sp -= sizeofW(StgSeqFrame);
876 JMP_(GET_ENTRY(R1.cl));
880 INFO_TABLE(stg_forceIO_info,stg_forceIO_entry,1,0,FUN_STATIC,,EF_,"FORCE_IO","FORCE_IO");
881 FN_(stg_forceIO_entry)
884 /* Sp[0] contains the IO action we want to perform */
886 /* Replace it with the return continuation that enters the result. */
887 Sp[0] = (W_)&stg_forceIO_ret_info;
889 /* Push the RealWorld# tag and enter */
890 Sp[0] =(W_)REALWORLD_TAG;
891 JMP_(GET_ENTRY(R1.cl));
894 SET_STATIC_HDR(stg_forceIO_closure,stg_forceIO_info,CCS_DONT_CARE,,EI_)
898 /* -----------------------------------------------------------------------------
899 CHARLIKE and INTLIKE closures.
901 These are static representations of Chars and small Ints, so that
902 we can remove dynamic Chars and Ints during garbage collection and
903 replace them with references to the static objects.
904 -------------------------------------------------------------------------- */
906 #if defined(INTERPRETER) || defined(ENABLE_WIN32_DLL_SUPPORT)
908 * When sticking the RTS in a DLL, we delay populating the
909 * Charlike and Intlike tables until load-time, which is only
910 * when we've got the real addresses to the C# and I# closures.
913 static INFO_TBL_CONST StgInfoTable czh_static_info;
914 static INFO_TBL_CONST StgInfoTable izh_static_info;
915 #define Char_hash_static_info czh_static_info
916 #define Int_hash_static_info izh_static_info
918 #define Char_hash_static_info GHCziBase_Czh_static_info
919 #define Int_hash_static_info GHCziBase_Izh_static_info
922 #define CHARLIKE_HDR(n) \
924 STATIC_HDR(Char_hash_static_info, /* C# */ \
929 #define INTLIKE_HDR(n) \
931 STATIC_HDR(Int_hash_static_info, /* I# */ \
936 /* put these in the *data* section, since the garbage collector relies
937 * on the fact that static closures live in the data section.
940 /* end the name with _closure, to convince the mangler this is a closure */
942 StgIntCharlikeClosure stg_CHARLIKE_closure[] = {
1201 StgIntCharlikeClosure stg_INTLIKE_closure[] = {
1202 INTLIKE_HDR(-16), /* MIN_INTLIKE == -16 */
1234 INTLIKE_HDR(16) /* MAX_INTLIKE == 16 */