1 /* -----------------------------------------------------------------------------
2 * $Id: StgMiscClosures.hc,v 1.80 2002/09/17 12:34:31 simonmar Exp $
4 * (c) The GHC Team, 1998-2000
6 * Entry code for various built-in closure types.
8 * ---------------------------------------------------------------------------*/
14 #include "StgMiscClosures.h"
16 #include "StoragePriv.h"
17 #include "Profiling.h"
21 #if defined(GRAN) || defined(PAR)
22 # include "GranSimRts.h" /* for DumpRawGranEvent */
23 # include "StgRun.h" /* for StgReturn and register saving */
30 /* ToDo: make the printing of panics more win32-friendly, i.e.,
31 * pop up some lovely message boxes (as well).
33 #define DUMP_ERRMSG(msg) STGCALL2(fprintf,stderr,msg)
36 Template for the entry code of non-enterable closures.
39 #define NON_ENTERABLE_ENTRY_CODE(type) \
40 STGFUN(stg_##type##_entry) \
43 STGCALL1(barf, #type " object entered!\n"); \
48 /* -----------------------------------------------------------------------------
49 Support for the bytecode interpreter.
50 -------------------------------------------------------------------------- */
52 /* 9 bits of return code for constructors created by the interpreter. */
53 FN_(stg_interp_constr_entry)
55 /* R1 points at the constructor */
57 /* STGCALL2(fprintf,stderr,"stg_interp_constr_entry (direct return)!\n"); */
58 /* Pointless, since SET_TAG doesn't do anything */
59 SET_TAG( GET_TAG(GET_INFO(R1.cl)));
60 JMP_(ENTRY_CODE((P_)(*Sp)));
64 FN_(stg_interp_constr1_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),0)); FE_ }
65 FN_(stg_interp_constr2_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),1)); FE_ }
66 FN_(stg_interp_constr3_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),2)); FE_ }
67 FN_(stg_interp_constr4_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),3)); FE_ }
68 FN_(stg_interp_constr5_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),4)); FE_ }
69 FN_(stg_interp_constr6_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),5)); FE_ }
70 FN_(stg_interp_constr7_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),6)); FE_ }
71 FN_(stg_interp_constr8_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),7)); FE_ }
73 /* Some info tables to be used when compiled code returns a value to
74 the interpreter, i.e. the interpreter pushes one of these onto the
75 stack before entering a value. What the code does is to
76 impedance-match the compiled return convention (in R1p/R1n/F1/D1 etc) to
77 the interpreter's convention (returned value is on top of stack),
78 and then cause the scheduler to enter the interpreter.
80 On entry, the stack (growing down) looks like this:
82 ptr to BCO holding return continuation
83 ptr to one of these info tables.
85 The info table code, both direct and vectored, must:
86 * push R1/F1/D1 on the stack, and its tag if necessary
87 * push the BCO (so it's now on the stack twice)
88 * Yield, ie, go to the scheduler.
90 Scheduler examines the t.o.s, discovers it is a BCO, and proceeds
91 directly to the bytecode interpreter. That pops the top element
92 (the BCO, containing the return continuation), and interprets it.
93 Net result: return continuation gets interpreted, with the
97 ptr to the info table just jumped thru
100 which is just what we want -- the "standard" return layout for the
103 Don't ask me how unboxed tuple returns are supposed to work. We
104 haven't got a good story about that yet.
107 /* When the returned value is in R1 and it is a pointer, so doesn't
109 #define STG_CtoI_RET_R1p_Template(label) \
114 bco = ((StgPtr*)Sp)[1]; \
116 ((StgPtr*)Sp)[0] = R1.p; \
118 ((StgPtr*)Sp)[0] = bco; \
119 JMP_(stg_yield_to_interpreter); \
123 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_entry);
124 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_0_entry);
125 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_1_entry);
126 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_2_entry);
127 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_3_entry);
128 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_4_entry);
129 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_5_entry);
130 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_6_entry);
131 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_7_entry);
133 VEC_POLY_INFO_TABLE(stg_ctoi_ret_R1p,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
137 /* When the returned value is in R1 and it isn't a pointer. */
138 #define STG_CtoI_RET_R1n_Template(label) \
143 bco = ((StgPtr*)Sp)[1]; \
145 ((StgPtr*)Sp)[0] = (StgPtr)R1.i; \
147 ((StgPtr*)Sp)[0] = (StgPtr)1; /* tag */ \
149 ((StgPtr*)Sp)[0] = bco; \
150 JMP_(stg_yield_to_interpreter); \
154 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_entry);
155 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_0_entry);
156 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_1_entry);
157 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_2_entry);
158 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_3_entry);
159 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_4_entry);
160 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_5_entry);
161 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_6_entry);
162 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_7_entry);
164 VEC_POLY_INFO_TABLE(stg_ctoi_ret_R1n,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
168 /* When the returned value is in F1 ... */
169 #define STG_CtoI_RET_F1_Template(label) \
174 bco = ((StgPtr*)Sp)[1]; \
175 Sp -= sizeofW(StgFloat); \
176 ASSIGN_FLT((W_*)Sp, F1); \
178 ((StgPtr*)Sp)[0] = (StgPtr)sizeofW(StgFloat); \
180 ((StgPtr*)Sp)[0] = bco; \
181 JMP_(stg_yield_to_interpreter); \
185 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_entry);
186 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_0_entry);
187 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_1_entry);
188 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_2_entry);
189 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_3_entry);
190 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_4_entry);
191 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_5_entry);
192 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_6_entry);
193 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_7_entry);
195 VEC_POLY_INFO_TABLE(stg_ctoi_ret_F1,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
198 /* When the returned value is in D1 ... */
199 #define STG_CtoI_RET_D1_Template(label) \
204 bco = ((StgPtr*)Sp)[1]; \
205 Sp -= sizeofW(StgDouble); \
206 ASSIGN_DBL((W_*)Sp, D1); \
208 ((StgPtr*)Sp)[0] = (StgPtr)sizeofW(StgDouble); \
210 ((StgPtr*)Sp)[0] = bco; \
211 JMP_(stg_yield_to_interpreter); \
215 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_entry);
216 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_0_entry);
217 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_1_entry);
218 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_2_entry);
219 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_3_entry);
220 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_4_entry);
221 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_5_entry);
222 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_6_entry);
223 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_7_entry);
225 VEC_POLY_INFO_TABLE(stg_ctoi_ret_D1,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
228 /* When the returned value a VoidRep ... */
229 #define STG_CtoI_RET_V_Template(label) \
234 bco = ((StgPtr*)Sp)[1]; \
236 ((StgPtr*)Sp)[0] = 0; /* VoidRep tag */ \
238 ((StgPtr*)Sp)[0] = bco; \
239 JMP_(stg_yield_to_interpreter); \
243 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_entry);
244 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_0_entry);
245 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_1_entry);
246 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_2_entry);
247 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_3_entry);
248 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_4_entry);
249 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_5_entry);
250 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_6_entry);
251 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_7_entry);
253 VEC_POLY_INFO_TABLE(stg_ctoi_ret_V,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
256 /* The other way round: when the interpreter returns a value to
257 compiled code. The stack looks like this:
259 return info table (pushed by compiled code)
260 return value (pushed by interpreter)
262 If the value is ptr-rep'd, the interpreter simply returns to the
263 scheduler, instructing it to ThreadEnterGHC.
265 Otherwise (unboxed return value), we replace the top stack word,
266 which must be the tag, with stg_gc_unbx_r1_info (or f1_info or d1_info),
267 and return to the scheduler, instructing it to ThreadRunGHC.
269 No supporting code needed!
273 /* Entering a BCO. Heave it on the stack and defer to the
275 INFO_TABLE(stg_BCO_info,stg_BCO_entry,4,0,BCO,,EF_,"BCO","BCO");
276 STGFUN(stg_BCO_entry) {
280 JMP_(stg_yield_to_interpreter);
285 /* -----------------------------------------------------------------------------
286 Entry code for an indirection.
287 -------------------------------------------------------------------------- */
289 INFO_TABLE(stg_IND_info,stg_IND_entry,1,0,IND,,EF_,"IND","IND");
290 STGFUN(stg_IND_entry)
293 TICK_ENT_DYN_IND(Node); /* tick */
294 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
296 JMP_(GET_ENTRY(R1.cl));
300 INFO_TABLE(stg_IND_STATIC_info,stg_IND_STATIC_entry,1,0,IND_STATIC,,EF_,"IND_STATIC","IND_STATIC");
301 STGFUN(stg_IND_STATIC_entry)
304 TICK_ENT_STATIC_IND(Node); /* tick */
305 R1.p = (P_) ((StgIndStatic*)R1.p)->indirectee;
307 JMP_(GET_ENTRY(R1.cl));
311 INFO_TABLE(stg_IND_PERM_info,stg_IND_PERM_entry,1,1,IND_PERM,,EF_,"IND_PERM","IND_PERM");
312 STGFUN(stg_IND_PERM_entry)
315 /* Don't add INDs to granularity cost */
316 /* Dont: TICK_ENT_STATIC_IND(Node); for ticky-ticky; this ind is here only to help profiling */
318 #if defined(TICKY_TICKY) && !defined(PROFILING)
319 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
320 TICK_ENT_PERM_IND(R1.p); /* tick */
323 LDV_ENTER((StgInd *)R1.p);
325 /* Enter PAP cost centre -- lexical scoping only */
326 ENTER_CCS_PAP_CL(R1.cl);
328 /* For ticky-ticky, change the perm_ind to a normal ind on first
329 * entry, so the number of ent_perm_inds is the number of *thunks*
330 * entered again, not the number of subsequent entries.
332 * Since this screws up cost centres, we die if profiling and
333 * ticky_ticky are on at the same time. KSW 1999-01.
338 # error Profiling and ticky-ticky do not mix at present!
339 # endif /* PROFILING */
340 SET_INFO((StgInd*)R1.p,&stg_IND_info);
341 #endif /* TICKY_TICKY */
343 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
345 /* Dont: TICK_ENT_VIA_NODE(); for ticky-ticky; as above */
347 #if defined(TICKY_TICKY) && !defined(PROFILING)
351 JMP_(GET_ENTRY(R1.cl));
355 INFO_TABLE(stg_IND_OLDGEN_info,stg_IND_OLDGEN_entry,1,1,IND_OLDGEN,,EF_,"IND_OLDGEN","IND_OLDGEN");
356 STGFUN(stg_IND_OLDGEN_entry)
359 TICK_ENT_STATIC_IND(Node); /* tick */
360 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
362 JMP_(GET_ENTRY(R1.cl));
366 INFO_TABLE(stg_IND_OLDGEN_PERM_info,stg_IND_OLDGEN_PERM_entry,1,1,IND_OLDGEN_PERM,,EF_,"IND_OLDGEN_PERM","IND_OLDGEN_PERM");
367 STGFUN(stg_IND_OLDGEN_PERM_entry)
370 /* Dont: TICK_ENT_STATIC_IND(Node); for ticky-ticky; this ind is here only to help profiling */
372 #if defined(TICKY_TICKY) && !defined(PROFILING)
373 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
374 TICK_ENT_PERM_IND(R1.p); /* tick */
377 LDV_ENTER((StgInd *)R1.p);
379 /* Enter PAP cost centre -- lexical scoping only */
380 ENTER_CCS_PAP_CL(R1.cl);
382 /* see comment in IND_PERM */
385 # error Profiling and ticky-ticky do not mix at present!
386 # endif /* PROFILING */
387 SET_INFO((StgInd*)R1.p,&stg_IND_OLDGEN_info);
388 #endif /* TICKY_TICKY */
390 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
392 JMP_(GET_ENTRY(R1.cl));
396 /* -----------------------------------------------------------------------------
397 Entry code for a black hole.
399 Entering a black hole normally causes a cyclic data dependency, but
400 in the concurrent world, black holes are synchronization points,
401 and they are turned into blocking queues when there are threads
402 waiting for the evaluation of the closure to finish.
403 -------------------------------------------------------------------------- */
405 /* Note: a BLACKHOLE and BLACKHOLE_BQ must be big enough to be
406 * overwritten with an indirection/evacuee/catch. Thus we claim it
407 * has 1 non-pointer word of payload (in addition to the pointer word
408 * for the blocking queue in a BQ), which should be big enough for an
409 * old-generation indirection.
412 INFO_TABLE(stg_BLACKHOLE_info, stg_BLACKHOLE_entry,0,2,BLACKHOLE,,EF_,"BLACKHOLE","BLACKHOLE");
413 STGFUN(stg_BLACKHOLE_entry)
417 /* Before overwriting TSO_LINK */
418 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
423 bdescr *bd = Bdescr(R1.p);
424 if (bd->u.back != (bdescr *)BaseReg) {
425 if (bd->gen_no >= 1 || bd->step->no >= 1) {
426 CMPXCHG(R1.cl->header.info, &stg_BLACKHOLE_info, &stg_WHITEHOLE_info);
428 EXTFUN_RTS(stg_gc_enter_1_hponly);
429 JMP_(stg_gc_enter_1_hponly);
436 // Actually this is not necessary because R1.p is about to be destroyed.
437 LDV_ENTER((StgClosure *)R1.p);
439 /* Put ourselves on the blocking queue for this black hole */
440 #if defined(GRAN) || defined(PAR)
441 // in fact, only difference is the type of the end-of-queue marker!
442 CurrentTSO->link = END_BQ_QUEUE;
443 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
445 CurrentTSO->link = END_TSO_QUEUE;
446 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
448 // jot down why and on what closure we are blocked
449 CurrentTSO->why_blocked = BlockedOnBlackHole;
450 CurrentTSO->block_info.closure = R1.cl;
452 /* Change the BLACKHOLE into a BLACKHOLE_BQ */
455 // The size remains the same, so we call LDV_recordDead() - no need to fill slop.
456 LDV_recordDead((StgClosure *)R1.p, BLACKHOLE_sizeW());
459 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
461 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
463 LDV_recordCreate((StgClosure *)R1.p);
466 // closure is mutable since something has just been added to its BQ
467 recordMutable((StgMutClosure *)R1.cl);
469 // PAR: dumping of event now done in blockThread -- HWL
471 // stg_gen_block is too heavyweight, use a specialised one
476 INFO_TABLE(stg_BLACKHOLE_BQ_info, stg_BLACKHOLE_BQ_entry,1,1,BLACKHOLE_BQ,,EF_,"BLACKHOLE","BLACKHOLE");
477 STGFUN(stg_BLACKHOLE_BQ_entry)
481 /* Before overwriting TSO_LINK */
482 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
487 bdescr *bd = Bdescr(R1.p);
488 if (bd->u.back != (bdescr *)BaseReg) {
489 if (bd->gen_no >= 1 || bd->step->no >= 1) {
490 CMPXCHG(R1.cl->header.info, &stg_BLACKHOLE_info, &stg_WHITEHOLE_info);
492 EXTFUN_RTS(stg_gc_enter_1_hponly);
493 JMP_(stg_gc_enter_1_hponly);
500 LDV_ENTER((StgClosure *)R1.p);
502 /* Put ourselves on the blocking queue for this black hole */
503 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
504 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
505 /* jot down why and on what closure we are blocked */
506 CurrentTSO->why_blocked = BlockedOnBlackHole;
507 CurrentTSO->block_info.closure = R1.cl;
509 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
512 /* PAR: dumping of event now done in blockThread -- HWL */
514 /* stg_gen_block is too heavyweight, use a specialised one */
520 Revertible black holes are needed in the parallel world, to handle
521 negative acknowledgements of messages containing updatable closures.
522 The idea is that when the original message is transmitted, the closure
523 is turned into a revertible black hole...an object which acts like a
524 black hole when local threads try to enter it, but which can be reverted
525 back to the original closure if necessary.
527 It's actually a lot like a blocking queue (BQ) entry, because revertible
528 black holes are initially set up with an empty blocking queue.
531 #if defined(PAR) || defined(GRAN)
533 INFO_TABLE(stg_RBH_info, stg_RBH_entry,1,1,RBH,,EF_,"RBH","RBH");
534 STGFUN(stg_RBH_entry)
538 /* mainly statistics gathering for GranSim simulation */
539 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
542 /* exactly the same as a BLACKHOLE_BQ_entry -- HWL */
543 /* Put ourselves on the blocking queue for this black hole */
544 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
545 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
546 /* jot down why and on what closure we are blocked */
547 CurrentTSO->why_blocked = BlockedOnBlackHole;
548 CurrentTSO->block_info.closure = R1.cl;
550 /* PAR: dumping of event now done in blockThread -- HWL */
552 /* stg_gen_block is too heavyweight, use a specialised one */
557 INFO_TABLE(stg_RBH_Save_0_info, stg_RBH_Save_0_entry,0,2,CONSTR,,EF_,"RBH_Save_0","RBH_Save_0");
558 NON_ENTERABLE_ENTRY_CODE(RBH_Save_0);
560 INFO_TABLE(stg_RBH_Save_1_info, stg_RBH_Save_1_entry,1,1,CONSTR,,EF_,"RBH_Save_1","RBH_Save_1");
561 NON_ENTERABLE_ENTRY_CODE(RBH_Save_1);
563 INFO_TABLE(stg_RBH_Save_2_info, stg_RBH_Save_2_entry,2,0,CONSTR,,EF_,"RBH_Save_2","RBH_Save_2");
564 NON_ENTERABLE_ENTRY_CODE(RBH_Save_2);
565 #endif /* defined(PAR) || defined(GRAN) */
567 /* identical to BLACKHOLEs except for the infotag */
568 INFO_TABLE(stg_CAF_BLACKHOLE_info, stg_CAF_BLACKHOLE_entry,0,2,CAF_BLACKHOLE,,EF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
569 STGFUN(stg_CAF_BLACKHOLE_entry)
573 /* mainly statistics gathering for GranSim simulation */
574 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
579 bdescr *bd = Bdescr(R1.p);
580 if (bd->u.back != (bdescr *)BaseReg) {
581 if (bd->gen_no >= 1 || bd->step->no >= 1) {
582 CMPXCHG(R1.cl->header.info, &stg_CAF_BLACKHOLE_info, &stg_WHITEHOLE_info);
584 EXTFUN_RTS(stg_gc_enter_1_hponly);
585 JMP_(stg_gc_enter_1_hponly);
592 LDV_ENTER((StgClosure *)R1.p);
594 // Put ourselves on the blocking queue for this black hole
595 #if defined(GRAN) || defined(PAR)
596 // in fact, only difference is the type of the end-of-queue marker!
597 CurrentTSO->link = END_BQ_QUEUE;
598 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
600 CurrentTSO->link = END_TSO_QUEUE;
601 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
603 // jot down why and on what closure we are blocked
604 CurrentTSO->why_blocked = BlockedOnBlackHole;
605 CurrentTSO->block_info.closure = R1.cl;
607 // Change the CAF_BLACKHOLE into a BLACKHOLE_BQ_STATIC
608 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
610 // closure is mutable since something has just been added to its BQ
611 recordMutable((StgMutClosure *)R1.cl);
613 // PAR: dumping of event now done in blockThread -- HWL
615 // stg_gen_block is too heavyweight, use a specialised one
621 INFO_TABLE(stg_SE_BLACKHOLE_info, stg_SE_BLACKHOLE_entry,0,2,SE_BLACKHOLE,,EF_,"SE_BLACKHOLE","SE_BLACKHOLE");
622 STGFUN(stg_SE_BLACKHOLE_entry)
625 STGCALL3(fprintf,stderr,"SE_BLACKHOLE at %p entered!\n",R1.p);
626 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
630 INFO_TABLE(stg_SE_CAF_BLACKHOLE_info, SE_CAF_BLACKHOLE_entry,0,2,SE_CAF_BLACKHOLE,,EF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
631 STGFUN(stg_SE_CAF_BLACKHOLE_entry)
634 STGCALL3(fprintf,stderr,"SE_CAF_BLACKHOLE at %p entered!\n",R1.p);
635 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
641 INFO_TABLE(stg_WHITEHOLE_info, stg_WHITEHOLE_entry,0,2,CONSTR_NOCAF_STATIC,,EF_,"WHITEHOLE","WHITEHOLE");
642 STGFUN(stg_WHITEHOLE_entry)
645 JMP_(GET_ENTRY(R1.cl));
650 /* -----------------------------------------------------------------------------
651 Some static info tables for things that don't get entered, and
652 therefore don't need entry code (i.e. boxed but unpointed objects)
653 NON_ENTERABLE_ENTRY_CODE now defined at the beginning of the file
654 -------------------------------------------------------------------------- */
656 INFO_TABLE(stg_TSO_info, stg_TSO_entry, 0,0,TSO,,EF_,"TSO","TSO");
657 NON_ENTERABLE_ENTRY_CODE(TSO);
659 /* -----------------------------------------------------------------------------
660 Evacuees are left behind by the garbage collector. Any attempt to enter
662 -------------------------------------------------------------------------- */
664 INFO_TABLE(stg_EVACUATED_info,stg_EVACUATED_entry,1,0,EVACUATED,,EF_,"EVACUATED","EVACUATED");
665 NON_ENTERABLE_ENTRY_CODE(EVACUATED);
667 /* -----------------------------------------------------------------------------
670 Live weak pointers have a special closure type. Dead ones are just
671 nullary constructors (although they live on the heap - we overwrite
672 live weak pointers with dead ones).
673 -------------------------------------------------------------------------- */
675 INFO_TABLE(stg_WEAK_info,stg_WEAK_entry,0,4,WEAK,,EF_,"WEAK","WEAK");
676 NON_ENTERABLE_ENTRY_CODE(WEAK);
678 // It's important when turning an existing WEAK into a DEAD_WEAK
679 // (which is what finalizeWeak# does) that we don't lose the link
680 // field and break the linked list of weak pointers. Hence, we give
681 // DEAD_WEAK 4 non-pointer fields, the same as WEAK.
683 INFO_TABLE_CONSTR(stg_DEAD_WEAK_info,stg_DEAD_WEAK_entry,0,4,0,CONSTR,,EF_,"DEAD_WEAK","DEAD_WEAK");
684 NON_ENTERABLE_ENTRY_CODE(DEAD_WEAK);
686 /* -----------------------------------------------------------------------------
689 This is a static nullary constructor (like []) that we use to mark an empty
690 finalizer in a weak pointer object.
691 -------------------------------------------------------------------------- */
693 INFO_TABLE_CONSTR(stg_NO_FINALIZER_info,stg_NO_FINALIZER_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,"NO_FINALIZER","NO_FINALIZER");
694 NON_ENTERABLE_ENTRY_CODE(NO_FINALIZER);
696 SET_STATIC_HDR(stg_NO_FINALIZER_closure,stg_NO_FINALIZER_info,0/*CC*/,,EI_)
699 /* -----------------------------------------------------------------------------
700 Foreign Objects are unlifted and therefore never entered.
701 -------------------------------------------------------------------------- */
703 INFO_TABLE(stg_FOREIGN_info,stg_FOREIGN_entry,0,1,FOREIGN,,EF_,"FOREIGN","FOREIGN");
704 NON_ENTERABLE_ENTRY_CODE(FOREIGN);
706 /* -----------------------------------------------------------------------------
707 Stable Names are unlifted too.
708 -------------------------------------------------------------------------- */
710 INFO_TABLE(stg_STABLE_NAME_info,stg_STABLE_NAME_entry,0,1,STABLE_NAME,,EF_,"STABLE_NAME","STABLE_NAME");
711 NON_ENTERABLE_ENTRY_CODE(STABLE_NAME);
713 /* -----------------------------------------------------------------------------
716 There are two kinds of these: full and empty. We need an info table
717 and entry code for each type.
718 -------------------------------------------------------------------------- */
720 INFO_TABLE(stg_FULL_MVAR_info,stg_FULL_MVAR_entry,4,0,MVAR,,EF_,"MVAR","MVAR");
721 NON_ENTERABLE_ENTRY_CODE(FULL_MVAR);
723 INFO_TABLE(stg_EMPTY_MVAR_info,stg_EMPTY_MVAR_entry,4,0,MVAR,,EF_,"MVAR","MVAR");
724 NON_ENTERABLE_ENTRY_CODE(EMPTY_MVAR);
726 /* -----------------------------------------------------------------------------
729 This is a static nullary constructor (like []) that we use to mark the
730 end of a linked TSO queue.
731 -------------------------------------------------------------------------- */
733 INFO_TABLE_CONSTR(stg_END_TSO_QUEUE_info,stg_END_TSO_QUEUE_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,"END_TSO_QUEUE","END_TSO_QUEUE");
734 NON_ENTERABLE_ENTRY_CODE(END_TSO_QUEUE);
736 SET_STATIC_HDR(stg_END_TSO_QUEUE_closure,stg_END_TSO_QUEUE_info,0/*CC*/,,EI_)
739 /* -----------------------------------------------------------------------------
742 Mutable lists (used by the garbage collector) consist of a chain of
743 StgMutClosures connected through their mut_link fields, ending in
744 an END_MUT_LIST closure.
745 -------------------------------------------------------------------------- */
747 INFO_TABLE_CONSTR(stg_END_MUT_LIST_info,stg_END_MUT_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,"END_MUT_LIST","END_MUT_LIST");
748 NON_ENTERABLE_ENTRY_CODE(END_MUT_LIST);
750 SET_STATIC_HDR(stg_END_MUT_LIST_closure,stg_END_MUT_LIST_info,0/*CC*/,,EI_)
753 INFO_TABLE(stg_MUT_CONS_info, stg_MUT_CONS_entry, 1, 1, MUT_CONS, , EF_, "MUT_CONS", "MUT_CONS");
754 NON_ENTERABLE_ENTRY_CODE(MUT_CONS);
756 /* -----------------------------------------------------------------------------
758 -------------------------------------------------------------------------- */
760 INFO_TABLE_CONSTR(stg_END_EXCEPTION_LIST_info,stg_END_EXCEPTION_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,"END_EXCEPTION_LIST","END_EXCEPTION_LIST");
761 NON_ENTERABLE_ENTRY_CODE(END_EXCEPTION_LIST);
763 SET_STATIC_HDR(stg_END_EXCEPTION_LIST_closure,stg_END_EXCEPTION_LIST_info,0/*CC*/,,EI_)
766 INFO_TABLE(stg_EXCEPTION_CONS_info, stg_EXCEPTION_CONS_entry, 1, 1, CONSTR, , EF_, "EXCEPTION_CONS", "EXCEPTION_CONS");
767 NON_ENTERABLE_ENTRY_CODE(EXCEPTION_CONS);
769 /* -----------------------------------------------------------------------------
772 These come in two basic flavours: arrays of data (StgArrWords) and arrays of
773 pointers (StgArrPtrs). They all have a similar layout:
775 ___________________________
776 | Info | No. of | data....
778 ---------------------------
780 These are *unpointed* objects: i.e. they cannot be entered.
782 -------------------------------------------------------------------------- */
784 #define ArrayInfo(type) \
785 INFO_TABLE(stg_##type##_info, stg_##type##_entry, 0, 0, type, , EF_,"" # type "","" # type "");
787 ArrayInfo(ARR_WORDS);
788 NON_ENTERABLE_ENTRY_CODE(ARR_WORDS);
789 ArrayInfo(MUT_ARR_PTRS);
790 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS);
791 ArrayInfo(MUT_ARR_PTRS_FROZEN);
792 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS_FROZEN);
796 /* -----------------------------------------------------------------------------
798 -------------------------------------------------------------------------- */
800 INFO_TABLE(stg_MUT_VAR_info, stg_MUT_VAR_entry, 1, 1, MUT_VAR, , EF_, "MUT_VAR", "MUT_VAR");
801 NON_ENTERABLE_ENTRY_CODE(MUT_VAR);
803 /* -----------------------------------------------------------------------------
804 Standard Error Entry.
806 This is used for filling in vector-table entries that can never happen,
808 -------------------------------------------------------------------------- */
809 /* No longer used; we use NULL, because a) it never happens, right? and b)
810 Windows doesn't like DLL entry points being used as static initialisers
811 STGFUN(stg_error_entry) \
814 DUMP_ERRMSG("fatal: stg_error_entry"); \
815 STGCALL1(shutdownHaskellAndExit, EXIT_FAILURE); \
820 /* -----------------------------------------------------------------------------
823 Entering this closure will just return to the address on the top of the
824 stack. Useful for getting a thread in a canonical form where we can
825 just enter the top stack word to start the thread. (see deleteThread)
826 * -------------------------------------------------------------------------- */
828 INFO_TABLE(stg_dummy_ret_info, stg_dummy_ret_entry, 0, 0, CONSTR_NOCAF_STATIC, , EF_, "DUMMY_RET", "DUMMY_RET");
829 STGFUN(stg_dummy_ret_entry)
835 JMP_(ENTRY_CODE(ret_addr));
838 SET_STATIC_HDR(stg_dummy_ret_closure,stg_dummy_ret_info,CCS_DONT_CARE,,EI_)
841 /* -----------------------------------------------------------------------------
842 Strict IO application - performing an IO action and entering its result.
844 rts_evalIO() lets you perform Haskell IO actions from outside of Haskell-land,
845 returning back to you their result. Want this result to be evaluated to WHNF
846 by that time, so that we can easily get at the int/char/whatever using the
847 various get{Ty} functions provided by the RTS API.
849 forceIO takes care of this, performing the IO action and entering the
850 results that comes back.
852 * -------------------------------------------------------------------------- */
855 INFO_TABLE_SRT_BITMAP(stg_forceIO_ret_info,stg_forceIO_ret_entry,0,0,0,0,RET_SMALL,,EF_,0,0);
856 STGFUN(stg_forceIO_ret_entry)
860 Sp -= sizeofW(StgSeqFrame);
862 JMP_(GET_ENTRY(R1.cl));
865 INFO_TABLE_SRT_BITMAP(stg_forceIO_ret_info,stg_forceIO_ret_entry,0,0,0,0,RET_SMALL,,EF_,0,0);
866 STGFUN(stg_forceIO_ret_entry)
870 rval = (StgClosure *)Sp[0];
872 Sp -= sizeofW(StgSeqFrame);
875 JMP_(GET_ENTRY(R1.cl));
879 INFO_TABLE(stg_forceIO_info,stg_forceIO_entry,1,0,FUN_STATIC,,EF_,"FORCE_IO","FORCE_IO");
880 FN_(stg_forceIO_entry)
883 /* Sp[0] contains the IO action we want to perform */
885 /* Replace it with the return continuation that enters the result. */
886 Sp[0] = (W_)&stg_forceIO_ret_info;
888 /* Push the RealWorld# tag and enter */
889 Sp[0] =(W_)REALWORLD_TAG;
890 JMP_(GET_ENTRY(R1.cl));
893 SET_STATIC_HDR(stg_forceIO_closure,stg_forceIO_info,CCS_DONT_CARE,,EI_)
897 /* -----------------------------------------------------------------------------
898 CHARLIKE and INTLIKE closures.
900 These are static representations of Chars and small Ints, so that
901 we can remove dynamic Chars and Ints during garbage collection and
902 replace them with references to the static objects.
903 -------------------------------------------------------------------------- */
905 #if defined(INTERPRETER) || defined(ENABLE_WIN32_DLL_SUPPORT)
907 * When sticking the RTS in a DLL, we delay populating the
908 * Charlike and Intlike tables until load-time, which is only
909 * when we've got the real addresses to the C# and I# closures.
912 static INFO_TBL_CONST StgInfoTable czh_static_info;
913 static INFO_TBL_CONST StgInfoTable izh_static_info;
914 #define Char_hash_static_info czh_static_info
915 #define Int_hash_static_info izh_static_info
917 #define Char_hash_static_info GHCziBase_Czh_static_info
918 #define Int_hash_static_info GHCziBase_Izh_static_info
921 #define CHARLIKE_HDR(n) \
923 STATIC_HDR(Char_hash_static_info, /* C# */ \
928 #define INTLIKE_HDR(n) \
930 STATIC_HDR(Int_hash_static_info, /* I# */ \
935 /* put these in the *data* section, since the garbage collector relies
936 * on the fact that static closures live in the data section.
939 /* end the name with _closure, to convince the mangler this is a closure */
941 StgIntCharlikeClosure stg_CHARLIKE_closure[] = {
1200 StgIntCharlikeClosure stg_INTLIKE_closure[] = {
1201 INTLIKE_HDR(-16), /* MIN_INTLIKE == -16 */
1233 INTLIKE_HDR(16) /* MAX_INTLIKE == 16 */