1 /* -----------------------------------------------------------------------------
2 * $Id: StgMiscClosures.hc,v 1.65 2001/02/15 14:30:07 sewardj Exp $
4 * (c) The GHC Team, 1998-2000
6 * Entry code for various built-in closure types.
8 * ---------------------------------------------------------------------------*/
13 #include "StgMiscClosures.h"
14 #include "HeapStackCheck.h" /* for stg_gen_yield */
16 #include "StoragePriv.h"
17 #include "Profiling.h"
21 #if defined(GRAN) || defined(PAR)
22 # include "GranSimRts.h" /* for DumpRawGranEvent */
23 # include "StgRun.h" /* for StgReturn and register saving */
30 /* ToDo: make the printing of panics more win32-friendly, i.e.,
31 * pop up some lovely message boxes (as well).
33 #define DUMP_ERRMSG(msg) STGCALL2(fprintf,stderr,msg)
36 Template for the entry code of non-enterable closures.
39 #define NON_ENTERABLE_ENTRY_CODE(type) \
40 STGFUN(stg_##type##_entry) \
43 DUMP_ERRMSG(#type " object entered!\n"); \
44 STGCALL1(shutdownHaskellAndExit, EXIT_FAILURE); \
50 /* -----------------------------------------------------------------------------
51 Support for the bytecode interpreter.
52 -------------------------------------------------------------------------- */
54 /* 9 bits of return code for constructors created by the interpreter. */
55 FN_(stg_interp_constr_entry)
57 /* R1 points at the constructor */
59 /* STGCALL2(fprintf,stderr,"stg_interp_constr_entry (direct return)!\n"); */
60 /* Pointless, since SET_TAG doesn't do anything */
61 SET_TAG( GET_TAG(GET_INFO(R1.cl)));
62 JMP_(ENTRY_CODE((P_)(*Sp)));
66 FN_(stg_interp_constr1_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),0)); FE_ }
67 FN_(stg_interp_constr2_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),1)); FE_ }
68 FN_(stg_interp_constr3_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),2)); FE_ }
69 FN_(stg_interp_constr4_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),3)); FE_ }
70 FN_(stg_interp_constr5_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),4)); FE_ }
71 FN_(stg_interp_constr6_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),5)); FE_ }
72 FN_(stg_interp_constr7_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),6)); FE_ }
73 FN_(stg_interp_constr8_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),7)); FE_ }
75 /* Some info tables to be used when compiled code returns a value to
76 the interpreter, i.e. the interpreter pushes one of these onto the
77 stack before entering a value. What the code does is to
78 impedance-match the compiled return convention (in R1p/R1n/F1/D1 etc) to
79 the interpreter's convention (returned value is on top of stack),
80 and then cause the scheduler to enter the interpreter.
82 On entry, the stack (growing down) looks like this:
84 ptr to BCO holding return continuation
85 ptr to one of these info tables.
87 The info table code, both direct and vectored, must:
88 * push R1/F1/D1 on the stack, and its tag if necessary
89 * push the BCO (so it's now on the stack twice)
90 * Yield, ie, go to the scheduler.
92 Scheduler examines the t.o.s, discovers it is a BCO, and proceeds
93 directly to the bytecode interpreter. That pops the top element
94 (the BCO, containing the return continuation), and interprets it.
95 Net result: return continuation gets interpreted, with the
99 ptr to the info table just jumped thru
102 which is just what we want -- the "standard" return layout for the
105 Don't ask me how unboxed tuple returns are supposed to work. We
106 haven't got a good story about that yet.
109 /* When the returned value is in R1 and it is a pointer, so doesn't
111 #define STG_CtoI_RET_R1p_Template(label) \
116 bco = ((StgPtr*)Sp)[1]; \
118 ((StgPtr*)Sp)[0] = R1.p; \
120 ((StgPtr*)Sp)[0] = bco; \
121 JMP_(stg_yield_to_interpreter); \
125 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_entry);
126 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_0_entry);
127 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_1_entry);
128 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_2_entry);
129 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_3_entry);
130 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_4_entry);
131 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_5_entry);
132 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_6_entry);
133 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_7_entry);
135 VEC_POLY_INFO_TABLE(stg_ctoi_ret_R1p,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
139 /* When the returned value is in R1 and it isn't a pointer. */
140 #define STG_CtoI_RET_R1n_Template(label) \
145 bco = ((StgPtr*)Sp)[1]; \
147 ((StgPtr*)Sp)[0] = (StgPtr)R1.i; \
149 ((StgPtr*)Sp)[0] = (StgPtr)1; /* tag */ \
151 ((StgPtr*)Sp)[0] = bco; \
152 JMP_(stg_yield_to_interpreter); \
156 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_entry);
157 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_0_entry);
158 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_1_entry);
159 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_2_entry);
160 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_3_entry);
161 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_4_entry);
162 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_5_entry);
163 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_6_entry);
164 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_7_entry);
166 VEC_POLY_INFO_TABLE(stg_ctoi_ret_R1n,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
170 /* When the returned value is in F1 ... */
171 #define STG_CtoI_RET_F1_Template(label) \
176 bco = ((StgPtr*)Sp)[1]; \
177 Sp -= sizeofW(StgFloat); \
178 ASSIGN_FLT((W_*)Sp, F1); \
180 ((StgPtr*)Sp)[0] = (StgPtr)sizeofW(StgFloat); \
182 ((StgPtr*)Sp)[0] = bco; \
183 JMP_(stg_yield_to_interpreter); \
187 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_entry);
188 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_0_entry);
189 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_1_entry);
190 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_2_entry);
191 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_3_entry);
192 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_4_entry);
193 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_5_entry);
194 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_6_entry);
195 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_7_entry);
197 VEC_POLY_INFO_TABLE(stg_ctoi_ret_F1,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
200 /* When the returned value is in D1 ... */
201 #define STG_CtoI_RET_D1_Template(label) \
206 bco = ((StgPtr*)Sp)[1]; \
207 Sp -= sizeofW(StgDouble); \
208 ASSIGN_DBL((W_*)Sp, D1); \
210 ((StgPtr*)Sp)[0] = (StgPtr)sizeofW(StgDouble); \
212 ((StgPtr*)Sp)[0] = bco; \
213 JMP_(stg_yield_to_interpreter); \
217 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_entry);
218 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_0_entry);
219 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_1_entry);
220 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_2_entry);
221 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_3_entry);
222 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_4_entry);
223 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_5_entry);
224 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_6_entry);
225 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_7_entry);
227 VEC_POLY_INFO_TABLE(stg_ctoi_ret_D1,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
230 /* When the returned value a VoidRep ... */
231 #define STG_CtoI_RET_V_Template(label) \
236 bco = ((StgPtr*)Sp)[1]; \
238 ((StgPtr*)Sp)[0] = 0; /* VoidRep tag */ \
240 ((StgPtr*)Sp)[0] = bco; \
241 JMP_(stg_yield_to_interpreter); \
245 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_entry);
246 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_0_entry);
247 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_1_entry);
248 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_2_entry);
249 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_3_entry);
250 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_4_entry);
251 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_5_entry);
252 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_6_entry);
253 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_7_entry);
255 VEC_POLY_INFO_TABLE(stg_ctoi_ret_V,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
258 /* The other way round: when the interpreter returns a value to
259 compiled code. The stack looks like this:
261 return info table (pushed by compiled code)
262 return value (pushed by interpreter)
264 If the value is ptr-rep'd, the interpreter simply returns to the
265 scheduler, instructing it to ThreadEnterGHC.
267 Otherwise (unboxed return value), we replace the top stack word,
268 which must be the tag, with stg_gc_unbx_r1_info (or f1_info or d1_info),
269 and return to the scheduler, instructing it to ThreadRunGHC.
271 No supporting code needed!
275 /* Entering a BCO. Heave it on the stack and defer to the
277 INFO_TABLE(stg_BCO_info,stg_BCO_entry,4,0,BCO,,EF_,"BCO","BCO");
278 STGFUN(stg_BCO_entry) {
282 JMP_(stg_yield_to_interpreter);
287 /* -----------------------------------------------------------------------------
288 Entry code for an indirection.
289 -------------------------------------------------------------------------- */
291 INFO_TABLE(stg_IND_info,stg_IND_entry,1,0,IND,,EF_,0,0);
292 STGFUN(stg_IND_entry)
295 TICK_ENT_IND(Node); /* tick */
297 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
299 JMP_(ENTRY_CODE(*R1.p));
303 INFO_TABLE(stg_IND_STATIC_info,stg_IND_STATIC_entry,1,0,IND_STATIC,,EF_,0,0);
304 STGFUN(stg_IND_STATIC_entry)
307 TICK_ENT_IND(Node); /* tick */
308 R1.p = (P_) ((StgIndStatic*)R1.p)->indirectee;
310 JMP_(ENTRY_CODE(*R1.p));
314 INFO_TABLE(stg_IND_PERM_info,stg_IND_PERM_entry,1,1,IND_PERM,,EF_,"IND_PERM","IND_PERM");
315 STGFUN(stg_IND_PERM_entry)
318 /* Don't add INDs to granularity cost */
319 /* Dont: TICK_ENT_IND(Node); for ticky-ticky; this ind is here only to help profiling */
321 #if defined(TICKY_TICKY) && !defined(PROFILING)
322 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
323 TICK_ENT_PERM_IND(R1.p); /* tick */
326 /* Enter PAP cost centre -- lexical scoping only */
327 ENTER_CCS_PAP_CL(R1.cl);
329 /* For ticky-ticky, change the perm_ind to a normal ind on first
330 * entry, so the number of ent_perm_inds is the number of *thunks*
331 * entered again, not the number of subsequent entries.
333 * Since this screws up cost centres, we die if profiling and
334 * ticky_ticky are on at the same time. KSW 1999-01.
339 # error Profiling and ticky-ticky do not mix at present!
340 # endif /* PROFILING */
341 SET_INFO((StgInd*)R1.p,&stg_IND_info);
342 #endif /* TICKY_TICKY */
344 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
346 /* Dont: TICK_ENT_VIA_NODE(); for ticky-ticky; as above */
348 #if defined(TICKY_TICKY) && !defined(PROFILING)
352 JMP_(ENTRY_CODE(*R1.p));
356 INFO_TABLE(stg_IND_OLDGEN_info,stg_IND_OLDGEN_entry,1,1,IND_OLDGEN,,EF_,0,0);
357 STGFUN(stg_IND_OLDGEN_entry)
360 TICK_ENT_IND(Node); /* tick */
362 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
364 JMP_(ENTRY_CODE(*R1.p));
368 INFO_TABLE(stg_IND_OLDGEN_PERM_info,stg_IND_OLDGEN_PERM_entry,1,1,IND_OLDGEN_PERM,,EF_,0,0);
369 STGFUN(stg_IND_OLDGEN_PERM_entry)
372 /* Dont: TICK_ENT_IND(Node); for ticky-ticky; this ind is here only to help profiling */
374 #if defined(TICKY_TICKY) && !defined(PROFILING)
375 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
376 TICK_ENT_PERM_IND(R1.p); /* tick */
379 /* Enter PAP cost centre -- lexical scoping only */
380 ENTER_CCS_PAP_CL(R1.cl);
382 /* see comment in IND_PERM */
385 # error Profiling and ticky-ticky do not mix at present!
386 # endif /* PROFILING */
387 SET_INFO((StgInd*)R1.p,&stg_IND_OLDGEN_info);
388 #endif /* TICKY_TICKY */
390 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
392 JMP_(ENTRY_CODE(*R1.p));
396 /* -----------------------------------------------------------------------------
397 Entry code for a black hole.
399 Entering a black hole normally causes a cyclic data dependency, but
400 in the concurrent world, black holes are synchronization points,
401 and they are turned into blocking queues when there are threads
402 waiting for the evaluation of the closure to finish.
403 -------------------------------------------------------------------------- */
405 /* Note: a BLACKHOLE and BLACKHOLE_BQ must be big enough to be
406 * overwritten with an indirection/evacuee/catch. Thus we claim it
407 * has 1 non-pointer word of payload (in addition to the pointer word
408 * for the blocking queue in a BQ), which should be big enough for an
409 * old-generation indirection.
412 INFO_TABLE(stg_BLACKHOLE_info, stg_BLACKHOLE_entry,0,2,BLACKHOLE,,EF_,"BLACKHOLE","BLACKHOLE");
413 STGFUN(stg_BLACKHOLE_entry)
417 /* Before overwriting TSO_LINK */
418 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
423 bdescr *bd = Bdescr(R1.p);
424 if (bd->back != (bdescr *)BaseReg) {
425 if (bd->gen->no >= 1 || bd->step->no >= 1) {
426 CMPXCHG(R1.cl->header.info, &stg_BLACKHOLE_info, &stg_WHITEHOLE_info);
428 EXTFUN_RTS(stg_gc_enter_1_hponly);
429 JMP_(stg_gc_enter_1_hponly);
436 /* Put ourselves on the blocking queue for this black hole */
437 #if defined(GRAN) || defined(PAR)
438 /* in fact, only difference is the type of the end-of-queue marker! */
439 CurrentTSO->link = END_BQ_QUEUE;
440 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
442 CurrentTSO->link = END_TSO_QUEUE;
443 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
445 /* jot down why and on what closure we are blocked */
446 CurrentTSO->why_blocked = BlockedOnBlackHole;
447 CurrentTSO->block_info.closure = R1.cl;
448 /* closure is mutable since something has just been added to its BQ */
449 recordMutable((StgMutClosure *)R1.cl);
450 /* Change the BLACKHOLE into a BLACKHOLE_BQ */
451 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
453 /* PAR: dumping of event now done in blockThread -- HWL */
455 /* stg_gen_block is too heavyweight, use a specialised one */
461 INFO_TABLE(stg_BLACKHOLE_BQ_info, stg_BLACKHOLE_BQ_entry,1,1,BLACKHOLE_BQ,,EF_,"BLACKHOLE","BLACKHOLE");
462 STGFUN(stg_BLACKHOLE_BQ_entry)
466 /* Before overwriting TSO_LINK */
467 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
472 bdescr *bd = Bdescr(R1.p);
473 if (bd->back != (bdescr *)BaseReg) {
474 if (bd->gen->no >= 1 || bd->step->no >= 1) {
475 CMPXCHG(R1.cl->header.info, &stg_BLACKHOLE_info, &stg_WHITEHOLE_info);
477 EXTFUN_RTS(stg_gc_enter_1_hponly);
478 JMP_(stg_gc_enter_1_hponly);
486 /* Put ourselves on the blocking queue for this black hole */
487 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
488 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
489 /* jot down why and on what closure we are blocked */
490 CurrentTSO->why_blocked = BlockedOnBlackHole;
491 CurrentTSO->block_info.closure = R1.cl;
493 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
496 /* PAR: dumping of event now done in blockThread -- HWL */
498 /* stg_gen_block is too heavyweight, use a specialised one */
504 Revertible black holes are needed in the parallel world, to handle
505 negative acknowledgements of messages containing updatable closures.
506 The idea is that when the original message is transmitted, the closure
507 is turned into a revertible black hole...an object which acts like a
508 black hole when local threads try to enter it, but which can be reverted
509 back to the original closure if necessary.
511 It's actually a lot like a blocking queue (BQ) entry, because revertible
512 black holes are initially set up with an empty blocking queue.
515 #if defined(PAR) || defined(GRAN)
517 INFO_TABLE(stg_RBH_info, stg_RBH_entry,1,1,RBH,,EF_,0,0);
518 STGFUN(stg_RBH_entry)
522 /* mainly statistics gathering for GranSim simulation */
523 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
526 /* exactly the same as a BLACKHOLE_BQ_entry -- HWL */
527 /* Put ourselves on the blocking queue for this black hole */
528 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
529 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
530 /* jot down why and on what closure we are blocked */
531 CurrentTSO->why_blocked = BlockedOnBlackHole;
532 CurrentTSO->block_info.closure = R1.cl;
534 /* PAR: dumping of event now done in blockThread -- HWL */
536 /* stg_gen_block is too heavyweight, use a specialised one */
541 INFO_TABLE(stg_RBH_Save_0_info, stg_RBH_Save_0_entry,0,2,CONSTR,,EF_,0,0);
542 NON_ENTERABLE_ENTRY_CODE(RBH_Save_0);
544 INFO_TABLE(stg_RBH_Save_1_info, stg_RBH_Save_1_entry,1,1,CONSTR,,EF_,0,0);
545 NON_ENTERABLE_ENTRY_CODE(RBH_Save_1);
547 INFO_TABLE(stg_RBH_Save_2_info, stg_RBH_Save_2_entry,2,0,CONSTR,,EF_,0,0);
548 NON_ENTERABLE_ENTRY_CODE(RBH_Save_2);
549 #endif /* defined(PAR) || defined(GRAN) */
551 /* identical to BLACKHOLEs except for the infotag */
552 INFO_TABLE(stg_CAF_BLACKHOLE_info, stg_CAF_BLACKHOLE_entry,0,2,CAF_BLACKHOLE,,EF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
553 STGFUN(stg_CAF_BLACKHOLE_entry)
557 /* mainly statistics gathering for GranSim simulation */
558 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
563 bdescr *bd = Bdescr(R1.p);
564 if (bd->back != (bdescr *)BaseReg) {
565 if (bd->gen->no >= 1 || bd->step->no >= 1) {
566 CMPXCHG(R1.cl->header.info, &stg_CAF_BLACKHOLE_info, &stg_WHITEHOLE_info);
568 EXTFUN_RTS(stg_gc_enter_1_hponly);
569 JMP_(stg_gc_enter_1_hponly);
577 /* Put ourselves on the blocking queue for this black hole */
578 #if defined(GRAN) || defined(PAR)
579 /* in fact, only difference is the type of the end-of-queue marker! */
580 CurrentTSO->link = END_BQ_QUEUE;
581 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
583 CurrentTSO->link = END_TSO_QUEUE;
584 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
586 /* jot down why and on what closure we are blocked */
587 CurrentTSO->why_blocked = BlockedOnBlackHole;
588 CurrentTSO->block_info.closure = R1.cl;
589 /* closure is mutable since something has just been added to its BQ */
590 recordMutable((StgMutClosure *)R1.cl);
591 /* Change the CAF_BLACKHOLE into a BLACKHOLE_BQ_STATIC */
592 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
594 /* PAR: dumping of event now done in blockThread -- HWL */
596 /* stg_gen_block is too heavyweight, use a specialised one */
602 INFO_TABLE(stg_SE_BLACKHOLE_info, stg_SE_BLACKHOLE_entry,0,2,SE_BLACKHOLE,,EF_,0,0);
603 STGFUN(stg_SE_BLACKHOLE_entry)
606 STGCALL3(fprintf,stderr,"SE_BLACKHOLE at %p entered!\n",R1.p);
607 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
611 INFO_TABLE(SE_CAF_BLACKHOLE_info, SE_CAF_BLACKHOLE_entry,0,2,SE_CAF_BLACKHOLE,,EF_,0,0);
612 STGFUN(stg_SE_CAF_BLACKHOLE_entry)
615 STGCALL3(fprintf,stderr,"SE_CAF_BLACKHOLE at %p entered!\n",R1.p);
616 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
622 INFO_TABLE(stg_WHITEHOLE_info, stg_WHITEHOLE_entry,0,2,CONSTR_NOCAF_STATIC,,EF_,0,0);
623 STGFUN(stg_WHITEHOLE_entry)
626 JMP_(GET_ENTRY(R1.cl));
631 /* -----------------------------------------------------------------------------
632 Some static info tables for things that don't get entered, and
633 therefore don't need entry code (i.e. boxed but unpointed objects)
634 NON_ENTERABLE_ENTRY_CODE now defined at the beginning of the file
635 -------------------------------------------------------------------------- */
637 INFO_TABLE(stg_TSO_info, stg_TSO_entry, 0,0,TSO,,EF_,"TSO","TSO");
638 NON_ENTERABLE_ENTRY_CODE(TSO);
640 /* -----------------------------------------------------------------------------
641 Evacuees are left behind by the garbage collector. Any attempt to enter
643 -------------------------------------------------------------------------- */
645 INFO_TABLE(stg_EVACUATED_info,stg_EVACUATED_entry,1,0,EVACUATED,,EF_,0,0);
646 NON_ENTERABLE_ENTRY_CODE(EVACUATED);
648 /* -----------------------------------------------------------------------------
651 Live weak pointers have a special closure type. Dead ones are just
652 nullary constructors (although they live on the heap - we overwrite
653 live weak pointers with dead ones).
654 -------------------------------------------------------------------------- */
656 INFO_TABLE(stg_WEAK_info,stg_WEAK_entry,0,4,WEAK,,EF_,"WEAK","WEAK");
657 NON_ENTERABLE_ENTRY_CODE(WEAK);
659 INFO_TABLE_CONSTR(stg_DEAD_WEAK_info,stg_DEAD_WEAK_entry,0,1,0,CONSTR,,EF_,"DEAD_WEAK","DEAD_WEAK");
660 NON_ENTERABLE_ENTRY_CODE(DEAD_WEAK);
662 /* -----------------------------------------------------------------------------
665 This is a static nullary constructor (like []) that we use to mark an empty
666 finalizer in a weak pointer object.
667 -------------------------------------------------------------------------- */
669 INFO_TABLE_CONSTR(stg_NO_FINALIZER_info,stg_NO_FINALIZER_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
670 NON_ENTERABLE_ENTRY_CODE(NO_FINALIZER);
672 SET_STATIC_HDR(stg_NO_FINALIZER_closure,stg_NO_FINALIZER_info,0/*CC*/,,EI_)
675 /* -----------------------------------------------------------------------------
676 Foreign Objects are unlifted and therefore never entered.
677 -------------------------------------------------------------------------- */
679 INFO_TABLE(stg_FOREIGN_info,stg_FOREIGN_entry,0,1,FOREIGN,,EF_,"FOREIGN","FOREIGN");
680 NON_ENTERABLE_ENTRY_CODE(FOREIGN);
682 /* -----------------------------------------------------------------------------
683 Stable Names are unlifted too.
684 -------------------------------------------------------------------------- */
686 INFO_TABLE(stg_STABLE_NAME_info,stg_STABLE_NAME_entry,0,1,STABLE_NAME,,EF_,"STABLE_NAME","STABLE_NAME");
687 NON_ENTERABLE_ENTRY_CODE(STABLE_NAME);
689 /* -----------------------------------------------------------------------------
692 There are two kinds of these: full and empty. We need an info table
693 and entry code for each type.
694 -------------------------------------------------------------------------- */
696 INFO_TABLE(stg_FULL_MVAR_info,stg_FULL_MVAR_entry,4,0,MVAR,,EF_,"MVAR","MVAR");
697 NON_ENTERABLE_ENTRY_CODE(FULL_MVAR);
699 INFO_TABLE(stg_EMPTY_MVAR_info,stg_EMPTY_MVAR_entry,4,0,MVAR,,EF_,"MVAR","MVAR");
700 NON_ENTERABLE_ENTRY_CODE(EMPTY_MVAR);
702 /* -----------------------------------------------------------------------------
705 This is a static nullary constructor (like []) that we use to mark the
706 end of a linked TSO queue.
707 -------------------------------------------------------------------------- */
709 INFO_TABLE_CONSTR(stg_END_TSO_QUEUE_info,stg_END_TSO_QUEUE_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
710 NON_ENTERABLE_ENTRY_CODE(END_TSO_QUEUE);
712 SET_STATIC_HDR(stg_END_TSO_QUEUE_closure,stg_END_TSO_QUEUE_info,0/*CC*/,,EI_)
715 /* -----------------------------------------------------------------------------
718 Mutable lists (used by the garbage collector) consist of a chain of
719 StgMutClosures connected through their mut_link fields, ending in
720 an END_MUT_LIST closure.
721 -------------------------------------------------------------------------- */
723 INFO_TABLE_CONSTR(stg_END_MUT_LIST_info,stg_END_MUT_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
724 NON_ENTERABLE_ENTRY_CODE(END_MUT_LIST);
726 SET_STATIC_HDR(stg_END_MUT_LIST_closure,stg_END_MUT_LIST_info,0/*CC*/,,EI_)
729 INFO_TABLE(stg_MUT_CONS_info, stg_MUT_CONS_entry, 1, 1, MUT_VAR, , EF_, 0, 0);
730 NON_ENTERABLE_ENTRY_CODE(MUT_CONS);
732 /* -----------------------------------------------------------------------------
734 -------------------------------------------------------------------------- */
736 INFO_TABLE_CONSTR(stg_END_EXCEPTION_LIST_info,stg_END_EXCEPTION_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
737 NON_ENTERABLE_ENTRY_CODE(END_EXCEPTION_LIST);
739 SET_STATIC_HDR(stg_END_EXCEPTION_LIST_closure,stg_END_EXCEPTION_LIST_info,0/*CC*/,,EI_)
742 INFO_TABLE(stg_EXCEPTION_CONS_info, stg_EXCEPTION_CONS_entry, 1, 1, CONSTR, , EF_, 0, 0);
743 NON_ENTERABLE_ENTRY_CODE(EXCEPTION_CONS);
745 /* -----------------------------------------------------------------------------
748 These come in two basic flavours: arrays of data (StgArrWords) and arrays of
749 pointers (StgArrPtrs). They all have a similar layout:
751 ___________________________
752 | Info | No. of | data....
754 ---------------------------
756 These are *unpointed* objects: i.e. they cannot be entered.
758 -------------------------------------------------------------------------- */
760 #define ArrayInfo(type) \
761 INFO_TABLE(stg_##type##_info, stg_##type##_entry, 0, 0, type, , EF_,"" # type "","" # type "");
763 ArrayInfo(ARR_WORDS);
764 NON_ENTERABLE_ENTRY_CODE(ARR_WORDS);
765 ArrayInfo(MUT_ARR_PTRS);
766 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS);
767 ArrayInfo(MUT_ARR_PTRS_FROZEN);
768 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS_FROZEN);
772 /* -----------------------------------------------------------------------------
774 -------------------------------------------------------------------------- */
776 INFO_TABLE(stg_MUT_VAR_info, stg_MUT_VAR_entry, 1, 1, MUT_VAR, , EF_, "MUT_VAR", "MUT_VAR");
777 NON_ENTERABLE_ENTRY_CODE(MUT_VAR);
779 /* -----------------------------------------------------------------------------
780 Standard Error Entry.
782 This is used for filling in vector-table entries that can never happen,
784 -------------------------------------------------------------------------- */
785 /* No longer used; we use NULL, because a) it never happens, right? and b)
786 Windows doesn't like DLL entry points being used as static initialisers
787 STGFUN(stg_error_entry) \
790 DUMP_ERRMSG("fatal: stg_error_entry"); \
791 STGCALL1(shutdownHaskellAndExit, EXIT_FAILURE); \
796 /* -----------------------------------------------------------------------------
799 Entering this closure will just return to the address on the top of the
800 stack. Useful for getting a thread in a canonical form where we can
801 just enter the top stack word to start the thread. (see deleteThread)
802 * -------------------------------------------------------------------------- */
804 INFO_TABLE(stg_dummy_ret_info, stg_dummy_ret_entry, 0, 0, CONSTR_NOCAF_STATIC, , EF_, 0, 0);
805 STGFUN(stg_dummy_ret_entry)
811 JMP_(ENTRY_CODE(ret_addr));
814 SET_STATIC_HDR(stg_dummy_ret_closure,stg_dummy_ret_info,CCS_DONT_CARE,,EI_)
817 /* -----------------------------------------------------------------------------
818 Strict IO application - performing an IO action and entering its result.
820 rts_evalIO() lets you perform Haskell IO actions from outside of Haskell-land,
821 returning back to you their result. Want this result to be evaluated to WHNF
822 by that time, so that we can easily get at the int/char/whatever using the
823 various get{Ty} functions provided by the RTS API.
825 forceIO takes care of this, performing the IO action and entering the
826 results that comes back.
828 * -------------------------------------------------------------------------- */
831 INFO_TABLE_SRT_BITMAP(stg_forceIO_ret_info,stg_forceIO_ret_entry,0,0,0,0,RET_SMALL,,EF_,0,0);
832 STGFUN(stg_forceIO_ret_entry)
836 Sp -= sizeofW(StgSeqFrame);
838 JMP_(GET_ENTRY(R1.cl));
841 INFO_TABLE_SRT_BITMAP(stg_forceIO_ret_info,stg_forceIO_ret_entry,0,0,0,0,RET_SMALL,,EF_,0,0);
842 STGFUN(stg_forceIO_ret_entry)
846 rval = (StgClosure *)Sp[0];
848 Sp -= sizeofW(StgSeqFrame);
851 JMP_(GET_ENTRY(R1.cl));
855 INFO_TABLE(stg_forceIO_info,stg_forceIO_entry,1,0,FUN_STATIC,,EF_,0,0);
856 FN_(stg_forceIO_entry)
859 /* Sp[0] contains the IO action we want to perform */
861 /* Replace it with the return continuation that enters the result. */
862 Sp[0] = (W_)&stg_forceIO_ret_info;
864 /* Push the RealWorld# tag and enter */
865 Sp[0] =(W_)REALWORLD_TAG;
866 JMP_(GET_ENTRY(R1.cl));
869 SET_STATIC_HDR(stg_forceIO_closure,stg_forceIO_info,CCS_DONT_CARE,,EI_)
873 /* -----------------------------------------------------------------------------
874 CHARLIKE and INTLIKE closures.
876 These are static representations of Chars and small Ints, so that
877 we can remove dynamic Chars and Ints during garbage collection and
878 replace them with references to the static objects.
879 -------------------------------------------------------------------------- */
881 #if defined(INTERPRETER) || defined(ENABLE_WIN32_DLL_SUPPORT)
883 * When sticking the RTS in a DLL, we delay populating the
884 * Charlike and Intlike tables until load-time, which is only
885 * when we've got the real addresses to the C# and I# closures.
888 static INFO_TBL_CONST StgInfoTable czh_static_info;
889 static INFO_TBL_CONST StgInfoTable izh_static_info;
890 #define Char_hash_static_info czh_static_info
891 #define Int_hash_static_info izh_static_info
893 #define Char_hash_static_info PrelBase_Czh_static_info
894 #define Int_hash_static_info PrelBase_Izh_static_info
897 #define CHARLIKE_HDR(n) \
899 STATIC_HDR(Char_hash_static_info, /* C# */ \
904 #define INTLIKE_HDR(n) \
906 STATIC_HDR(Int_hash_static_info, /* I# */ \
911 /* put these in the *data* section, since the garbage collector relies
912 * on the fact that static closures live in the data section.
915 /* end the name with _closure, to convince the mangler this is a closure */
917 StgIntCharlikeClosure stg_CHARLIKE_closure[] = {
1176 StgIntCharlikeClosure stg_INTLIKE_closure[] = {
1177 INTLIKE_HDR(-16), /* MIN_INTLIKE == -16 */
1209 INTLIKE_HDR(16) /* MAX_INTLIKE == 16 */