1 /* -----------------------------------------------------------------------------
2 * $Id: StgMiscClosures.hc,v 1.66 2001/03/23 16:36:21 simonmar Exp $
4 * (c) The GHC Team, 1998-2000
6 * Entry code for various built-in closure types.
8 * ---------------------------------------------------------------------------*/
14 #include "StgMiscClosures.h"
15 #include "HeapStackCheck.h" /* for stg_gen_yield */
17 #include "StoragePriv.h"
18 #include "Profiling.h"
22 #if defined(GRAN) || defined(PAR)
23 # include "GranSimRts.h" /* for DumpRawGranEvent */
24 # include "StgRun.h" /* for StgReturn and register saving */
31 /* ToDo: make the printing of panics more win32-friendly, i.e.,
32 * pop up some lovely message boxes (as well).
34 #define DUMP_ERRMSG(msg) STGCALL2(fprintf,stderr,msg)
37 Template for the entry code of non-enterable closures.
40 #define NON_ENTERABLE_ENTRY_CODE(type) \
41 STGFUN(stg_##type##_entry) \
44 DUMP_ERRMSG(#type " object entered!\n"); \
45 STGCALL1(shutdownHaskellAndExit, EXIT_FAILURE); \
51 /* -----------------------------------------------------------------------------
52 Support for the bytecode interpreter.
53 -------------------------------------------------------------------------- */
55 /* 9 bits of return code for constructors created by the interpreter. */
56 FN_(stg_interp_constr_entry)
58 /* R1 points at the constructor */
60 /* STGCALL2(fprintf,stderr,"stg_interp_constr_entry (direct return)!\n"); */
61 /* Pointless, since SET_TAG doesn't do anything */
62 SET_TAG( GET_TAG(GET_INFO(R1.cl)));
63 JMP_(ENTRY_CODE((P_)(*Sp)));
67 FN_(stg_interp_constr1_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),0)); FE_ }
68 FN_(stg_interp_constr2_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),1)); FE_ }
69 FN_(stg_interp_constr3_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),2)); FE_ }
70 FN_(stg_interp_constr4_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),3)); FE_ }
71 FN_(stg_interp_constr5_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),4)); FE_ }
72 FN_(stg_interp_constr6_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),5)); FE_ }
73 FN_(stg_interp_constr7_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),6)); FE_ }
74 FN_(stg_interp_constr8_entry) { FB_ JMP_(RET_VEC((P_)(*Sp),7)); FE_ }
76 /* Some info tables to be used when compiled code returns a value to
77 the interpreter, i.e. the interpreter pushes one of these onto the
78 stack before entering a value. What the code does is to
79 impedance-match the compiled return convention (in R1p/R1n/F1/D1 etc) to
80 the interpreter's convention (returned value is on top of stack),
81 and then cause the scheduler to enter the interpreter.
83 On entry, the stack (growing down) looks like this:
85 ptr to BCO holding return continuation
86 ptr to one of these info tables.
88 The info table code, both direct and vectored, must:
89 * push R1/F1/D1 on the stack, and its tag if necessary
90 * push the BCO (so it's now on the stack twice)
91 * Yield, ie, go to the scheduler.
93 Scheduler examines the t.o.s, discovers it is a BCO, and proceeds
94 directly to the bytecode interpreter. That pops the top element
95 (the BCO, containing the return continuation), and interprets it.
96 Net result: return continuation gets interpreted, with the
100 ptr to the info table just jumped thru
103 which is just what we want -- the "standard" return layout for the
106 Don't ask me how unboxed tuple returns are supposed to work. We
107 haven't got a good story about that yet.
110 /* When the returned value is in R1 and it is a pointer, so doesn't
112 #define STG_CtoI_RET_R1p_Template(label) \
117 bco = ((StgPtr*)Sp)[1]; \
119 ((StgPtr*)Sp)[0] = R1.p; \
121 ((StgPtr*)Sp)[0] = bco; \
122 JMP_(stg_yield_to_interpreter); \
126 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_entry);
127 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_0_entry);
128 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_1_entry);
129 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_2_entry);
130 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_3_entry);
131 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_4_entry);
132 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_5_entry);
133 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_6_entry);
134 STG_CtoI_RET_R1p_Template(stg_ctoi_ret_R1p_7_entry);
136 VEC_POLY_INFO_TABLE(stg_ctoi_ret_R1p,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
140 /* When the returned value is in R1 and it isn't a pointer. */
141 #define STG_CtoI_RET_R1n_Template(label) \
146 bco = ((StgPtr*)Sp)[1]; \
148 ((StgPtr*)Sp)[0] = (StgPtr)R1.i; \
150 ((StgPtr*)Sp)[0] = (StgPtr)1; /* tag */ \
152 ((StgPtr*)Sp)[0] = bco; \
153 JMP_(stg_yield_to_interpreter); \
157 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_entry);
158 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_0_entry);
159 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_1_entry);
160 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_2_entry);
161 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_3_entry);
162 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_4_entry);
163 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_5_entry);
164 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_6_entry);
165 STG_CtoI_RET_R1n_Template(stg_ctoi_ret_R1n_7_entry);
167 VEC_POLY_INFO_TABLE(stg_ctoi_ret_R1n,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
171 /* When the returned value is in F1 ... */
172 #define STG_CtoI_RET_F1_Template(label) \
177 bco = ((StgPtr*)Sp)[1]; \
178 Sp -= sizeofW(StgFloat); \
179 ASSIGN_FLT((W_*)Sp, F1); \
181 ((StgPtr*)Sp)[0] = (StgPtr)sizeofW(StgFloat); \
183 ((StgPtr*)Sp)[0] = bco; \
184 JMP_(stg_yield_to_interpreter); \
188 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_entry);
189 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_0_entry);
190 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_1_entry);
191 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_2_entry);
192 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_3_entry);
193 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_4_entry);
194 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_5_entry);
195 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_6_entry);
196 STG_CtoI_RET_F1_Template(stg_ctoi_ret_F1_7_entry);
198 VEC_POLY_INFO_TABLE(stg_ctoi_ret_F1,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
201 /* When the returned value is in D1 ... */
202 #define STG_CtoI_RET_D1_Template(label) \
207 bco = ((StgPtr*)Sp)[1]; \
208 Sp -= sizeofW(StgDouble); \
209 ASSIGN_DBL((W_*)Sp, D1); \
211 ((StgPtr*)Sp)[0] = (StgPtr)sizeofW(StgDouble); \
213 ((StgPtr*)Sp)[0] = bco; \
214 JMP_(stg_yield_to_interpreter); \
218 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_entry);
219 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_0_entry);
220 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_1_entry);
221 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_2_entry);
222 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_3_entry);
223 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_4_entry);
224 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_5_entry);
225 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_6_entry);
226 STG_CtoI_RET_D1_Template(stg_ctoi_ret_D1_7_entry);
228 VEC_POLY_INFO_TABLE(stg_ctoi_ret_D1,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
231 /* When the returned value a VoidRep ... */
232 #define STG_CtoI_RET_V_Template(label) \
237 bco = ((StgPtr*)Sp)[1]; \
239 ((StgPtr*)Sp)[0] = 0; /* VoidRep tag */ \
241 ((StgPtr*)Sp)[0] = bco; \
242 JMP_(stg_yield_to_interpreter); \
246 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_entry);
247 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_0_entry);
248 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_1_entry);
249 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_2_entry);
250 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_3_entry);
251 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_4_entry);
252 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_5_entry);
253 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_6_entry);
254 STG_CtoI_RET_V_Template(stg_ctoi_ret_V_7_entry);
256 VEC_POLY_INFO_TABLE(stg_ctoi_ret_V,0, NULL/*srt*/, 0/*srt_off*/, 0/*srt_len*/, RET_BCO,, EF_);
259 /* The other way round: when the interpreter returns a value to
260 compiled code. The stack looks like this:
262 return info table (pushed by compiled code)
263 return value (pushed by interpreter)
265 If the value is ptr-rep'd, the interpreter simply returns to the
266 scheduler, instructing it to ThreadEnterGHC.
268 Otherwise (unboxed return value), we replace the top stack word,
269 which must be the tag, with stg_gc_unbx_r1_info (or f1_info or d1_info),
270 and return to the scheduler, instructing it to ThreadRunGHC.
272 No supporting code needed!
276 /* Entering a BCO. Heave it on the stack and defer to the
278 INFO_TABLE(stg_BCO_info,stg_BCO_entry,4,0,BCO,,EF_,"BCO","BCO");
279 STGFUN(stg_BCO_entry) {
283 JMP_(stg_yield_to_interpreter);
288 /* -----------------------------------------------------------------------------
289 Entry code for an indirection.
290 -------------------------------------------------------------------------- */
292 INFO_TABLE(stg_IND_info,stg_IND_entry,1,0,IND,,EF_,0,0);
293 STGFUN(stg_IND_entry)
296 TICK_ENT_IND(Node); /* tick */
298 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
300 JMP_(ENTRY_CODE(*R1.p));
304 INFO_TABLE(stg_IND_STATIC_info,stg_IND_STATIC_entry,1,0,IND_STATIC,,EF_,0,0);
305 STGFUN(stg_IND_STATIC_entry)
308 TICK_ENT_IND(Node); /* tick */
309 R1.p = (P_) ((StgIndStatic*)R1.p)->indirectee;
311 JMP_(ENTRY_CODE(*R1.p));
315 INFO_TABLE(stg_IND_PERM_info,stg_IND_PERM_entry,1,1,IND_PERM,,EF_,"IND_PERM","IND_PERM");
316 STGFUN(stg_IND_PERM_entry)
319 /* Don't add INDs to granularity cost */
320 /* Dont: TICK_ENT_IND(Node); for ticky-ticky; this ind is here only to help profiling */
322 #if defined(TICKY_TICKY) && !defined(PROFILING)
323 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
324 TICK_ENT_PERM_IND(R1.p); /* tick */
327 /* Enter PAP cost centre -- lexical scoping only */
328 ENTER_CCS_PAP_CL(R1.cl);
330 /* For ticky-ticky, change the perm_ind to a normal ind on first
331 * entry, so the number of ent_perm_inds is the number of *thunks*
332 * entered again, not the number of subsequent entries.
334 * Since this screws up cost centres, we die if profiling and
335 * ticky_ticky are on at the same time. KSW 1999-01.
340 # error Profiling and ticky-ticky do not mix at present!
341 # endif /* PROFILING */
342 SET_INFO((StgInd*)R1.p,&stg_IND_info);
343 #endif /* TICKY_TICKY */
345 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
347 /* Dont: TICK_ENT_VIA_NODE(); for ticky-ticky; as above */
349 #if defined(TICKY_TICKY) && !defined(PROFILING)
353 JMP_(ENTRY_CODE(*R1.p));
357 INFO_TABLE(stg_IND_OLDGEN_info,stg_IND_OLDGEN_entry,1,1,IND_OLDGEN,,EF_,0,0);
358 STGFUN(stg_IND_OLDGEN_entry)
361 TICK_ENT_IND(Node); /* tick */
363 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
365 JMP_(ENTRY_CODE(*R1.p));
369 INFO_TABLE(stg_IND_OLDGEN_PERM_info,stg_IND_OLDGEN_PERM_entry,1,1,IND_OLDGEN_PERM,,EF_,0,0);
370 STGFUN(stg_IND_OLDGEN_PERM_entry)
373 /* Dont: TICK_ENT_IND(Node); for ticky-ticky; this ind is here only to help profiling */
375 #if defined(TICKY_TICKY) && !defined(PROFILING)
376 /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than being extra */
377 TICK_ENT_PERM_IND(R1.p); /* tick */
380 /* Enter PAP cost centre -- lexical scoping only */
381 ENTER_CCS_PAP_CL(R1.cl);
383 /* see comment in IND_PERM */
386 # error Profiling and ticky-ticky do not mix at present!
387 # endif /* PROFILING */
388 SET_INFO((StgInd*)R1.p,&stg_IND_OLDGEN_info);
389 #endif /* TICKY_TICKY */
391 R1.p = (P_) ((StgInd*)R1.p)->indirectee;
393 JMP_(ENTRY_CODE(*R1.p));
397 /* -----------------------------------------------------------------------------
398 Entry code for a black hole.
400 Entering a black hole normally causes a cyclic data dependency, but
401 in the concurrent world, black holes are synchronization points,
402 and they are turned into blocking queues when there are threads
403 waiting for the evaluation of the closure to finish.
404 -------------------------------------------------------------------------- */
406 /* Note: a BLACKHOLE and BLACKHOLE_BQ must be big enough to be
407 * overwritten with an indirection/evacuee/catch. Thus we claim it
408 * has 1 non-pointer word of payload (in addition to the pointer word
409 * for the blocking queue in a BQ), which should be big enough for an
410 * old-generation indirection.
413 INFO_TABLE(stg_BLACKHOLE_info, stg_BLACKHOLE_entry,0,2,BLACKHOLE,,EF_,"BLACKHOLE","BLACKHOLE");
414 STGFUN(stg_BLACKHOLE_entry)
418 /* Before overwriting TSO_LINK */
419 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
424 bdescr *bd = Bdescr(R1.p);
425 if (bd->back != (bdescr *)BaseReg) {
426 if (bd->gen->no >= 1 || bd->step->no >= 1) {
427 CMPXCHG(R1.cl->header.info, &stg_BLACKHOLE_info, &stg_WHITEHOLE_info);
429 EXTFUN_RTS(stg_gc_enter_1_hponly);
430 JMP_(stg_gc_enter_1_hponly);
437 /* Put ourselves on the blocking queue for this black hole */
438 #if defined(GRAN) || defined(PAR)
439 /* in fact, only difference is the type of the end-of-queue marker! */
440 CurrentTSO->link = END_BQ_QUEUE;
441 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
443 CurrentTSO->link = END_TSO_QUEUE;
444 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
446 /* jot down why and on what closure we are blocked */
447 CurrentTSO->why_blocked = BlockedOnBlackHole;
448 CurrentTSO->block_info.closure = R1.cl;
449 /* closure is mutable since something has just been added to its BQ */
450 recordMutable((StgMutClosure *)R1.cl);
451 /* Change the BLACKHOLE into a BLACKHOLE_BQ */
452 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
454 /* PAR: dumping of event now done in blockThread -- HWL */
456 /* stg_gen_block is too heavyweight, use a specialised one */
462 INFO_TABLE(stg_BLACKHOLE_BQ_info, stg_BLACKHOLE_BQ_entry,1,1,BLACKHOLE_BQ,,EF_,"BLACKHOLE","BLACKHOLE");
463 STGFUN(stg_BLACKHOLE_BQ_entry)
467 /* Before overwriting TSO_LINK */
468 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
473 bdescr *bd = Bdescr(R1.p);
474 if (bd->back != (bdescr *)BaseReg) {
475 if (bd->gen->no >= 1 || bd->step->no >= 1) {
476 CMPXCHG(R1.cl->header.info, &stg_BLACKHOLE_info, &stg_WHITEHOLE_info);
478 EXTFUN_RTS(stg_gc_enter_1_hponly);
479 JMP_(stg_gc_enter_1_hponly);
487 /* Put ourselves on the blocking queue for this black hole */
488 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
489 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
490 /* jot down why and on what closure we are blocked */
491 CurrentTSO->why_blocked = BlockedOnBlackHole;
492 CurrentTSO->block_info.closure = R1.cl;
494 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
497 /* PAR: dumping of event now done in blockThread -- HWL */
499 /* stg_gen_block is too heavyweight, use a specialised one */
505 Revertible black holes are needed in the parallel world, to handle
506 negative acknowledgements of messages containing updatable closures.
507 The idea is that when the original message is transmitted, the closure
508 is turned into a revertible black hole...an object which acts like a
509 black hole when local threads try to enter it, but which can be reverted
510 back to the original closure if necessary.
512 It's actually a lot like a blocking queue (BQ) entry, because revertible
513 black holes are initially set up with an empty blocking queue.
516 #if defined(PAR) || defined(GRAN)
518 INFO_TABLE(stg_RBH_info, stg_RBH_entry,1,1,RBH,,EF_,0,0);
519 STGFUN(stg_RBH_entry)
523 /* mainly statistics gathering for GranSim simulation */
524 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
527 /* exactly the same as a BLACKHOLE_BQ_entry -- HWL */
528 /* Put ourselves on the blocking queue for this black hole */
529 CurrentTSO->link = ((StgBlockingQueue *)R1.p)->blocking_queue;
530 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
531 /* jot down why and on what closure we are blocked */
532 CurrentTSO->why_blocked = BlockedOnBlackHole;
533 CurrentTSO->block_info.closure = R1.cl;
535 /* PAR: dumping of event now done in blockThread -- HWL */
537 /* stg_gen_block is too heavyweight, use a specialised one */
542 INFO_TABLE(stg_RBH_Save_0_info, stg_RBH_Save_0_entry,0,2,CONSTR,,EF_,0,0);
543 NON_ENTERABLE_ENTRY_CODE(RBH_Save_0);
545 INFO_TABLE(stg_RBH_Save_1_info, stg_RBH_Save_1_entry,1,1,CONSTR,,EF_,0,0);
546 NON_ENTERABLE_ENTRY_CODE(RBH_Save_1);
548 INFO_TABLE(stg_RBH_Save_2_info, stg_RBH_Save_2_entry,2,0,CONSTR,,EF_,0,0);
549 NON_ENTERABLE_ENTRY_CODE(RBH_Save_2);
550 #endif /* defined(PAR) || defined(GRAN) */
552 /* identical to BLACKHOLEs except for the infotag */
553 INFO_TABLE(stg_CAF_BLACKHOLE_info, stg_CAF_BLACKHOLE_entry,0,2,CAF_BLACKHOLE,,EF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
554 STGFUN(stg_CAF_BLACKHOLE_entry)
558 /* mainly statistics gathering for GranSim simulation */
559 STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1.p /*Node*/);
564 bdescr *bd = Bdescr(R1.p);
565 if (bd->back != (bdescr *)BaseReg) {
566 if (bd->gen->no >= 1 || bd->step->no >= 1) {
567 CMPXCHG(R1.cl->header.info, &stg_CAF_BLACKHOLE_info, &stg_WHITEHOLE_info);
569 EXTFUN_RTS(stg_gc_enter_1_hponly);
570 JMP_(stg_gc_enter_1_hponly);
578 /* Put ourselves on the blocking queue for this black hole */
579 #if defined(GRAN) || defined(PAR)
580 /* in fact, only difference is the type of the end-of-queue marker! */
581 CurrentTSO->link = END_BQ_QUEUE;
582 ((StgBlockingQueue *)R1.p)->blocking_queue = (StgBlockingQueueElement *)CurrentTSO;
584 CurrentTSO->link = END_TSO_QUEUE;
585 ((StgBlockingQueue *)R1.p)->blocking_queue = CurrentTSO;
587 /* jot down why and on what closure we are blocked */
588 CurrentTSO->why_blocked = BlockedOnBlackHole;
589 CurrentTSO->block_info.closure = R1.cl;
590 /* closure is mutable since something has just been added to its BQ */
591 recordMutable((StgMutClosure *)R1.cl);
592 /* Change the CAF_BLACKHOLE into a BLACKHOLE_BQ_STATIC */
593 ((StgBlockingQueue *)R1.p)->header.info = &stg_BLACKHOLE_BQ_info;
595 /* PAR: dumping of event now done in blockThread -- HWL */
597 /* stg_gen_block is too heavyweight, use a specialised one */
603 INFO_TABLE(stg_SE_BLACKHOLE_info, stg_SE_BLACKHOLE_entry,0,2,SE_BLACKHOLE,,EF_,0,0);
604 STGFUN(stg_SE_BLACKHOLE_entry)
607 STGCALL3(fprintf,stderr,"SE_BLACKHOLE at %p entered!\n",R1.p);
608 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
612 INFO_TABLE(SE_CAF_BLACKHOLE_info, SE_CAF_BLACKHOLE_entry,0,2,SE_CAF_BLACKHOLE,,EF_,0,0);
613 STGFUN(stg_SE_CAF_BLACKHOLE_entry)
616 STGCALL3(fprintf,stderr,"SE_CAF_BLACKHOLE at %p entered!\n",R1.p);
617 STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
623 INFO_TABLE(stg_WHITEHOLE_info, stg_WHITEHOLE_entry,0,2,CONSTR_NOCAF_STATIC,,EF_,0,0);
624 STGFUN(stg_WHITEHOLE_entry)
627 JMP_(GET_ENTRY(R1.cl));
632 /* -----------------------------------------------------------------------------
633 Some static info tables for things that don't get entered, and
634 therefore don't need entry code (i.e. boxed but unpointed objects)
635 NON_ENTERABLE_ENTRY_CODE now defined at the beginning of the file
636 -------------------------------------------------------------------------- */
638 INFO_TABLE(stg_TSO_info, stg_TSO_entry, 0,0,TSO,,EF_,"TSO","TSO");
639 NON_ENTERABLE_ENTRY_CODE(TSO);
641 /* -----------------------------------------------------------------------------
642 Evacuees are left behind by the garbage collector. Any attempt to enter
644 -------------------------------------------------------------------------- */
646 INFO_TABLE(stg_EVACUATED_info,stg_EVACUATED_entry,1,0,EVACUATED,,EF_,0,0);
647 NON_ENTERABLE_ENTRY_CODE(EVACUATED);
649 /* -----------------------------------------------------------------------------
652 Live weak pointers have a special closure type. Dead ones are just
653 nullary constructors (although they live on the heap - we overwrite
654 live weak pointers with dead ones).
655 -------------------------------------------------------------------------- */
657 INFO_TABLE(stg_WEAK_info,stg_WEAK_entry,0,4,WEAK,,EF_,"WEAK","WEAK");
658 NON_ENTERABLE_ENTRY_CODE(WEAK);
660 INFO_TABLE_CONSTR(stg_DEAD_WEAK_info,stg_DEAD_WEAK_entry,0,1,0,CONSTR,,EF_,"DEAD_WEAK","DEAD_WEAK");
661 NON_ENTERABLE_ENTRY_CODE(DEAD_WEAK);
663 /* -----------------------------------------------------------------------------
666 This is a static nullary constructor (like []) that we use to mark an empty
667 finalizer in a weak pointer object.
668 -------------------------------------------------------------------------- */
670 INFO_TABLE_CONSTR(stg_NO_FINALIZER_info,stg_NO_FINALIZER_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
671 NON_ENTERABLE_ENTRY_CODE(NO_FINALIZER);
673 SET_STATIC_HDR(stg_NO_FINALIZER_closure,stg_NO_FINALIZER_info,0/*CC*/,,EI_)
676 /* -----------------------------------------------------------------------------
677 Foreign Objects are unlifted and therefore never entered.
678 -------------------------------------------------------------------------- */
680 INFO_TABLE(stg_FOREIGN_info,stg_FOREIGN_entry,0,1,FOREIGN,,EF_,"FOREIGN","FOREIGN");
681 NON_ENTERABLE_ENTRY_CODE(FOREIGN);
683 /* -----------------------------------------------------------------------------
684 Stable Names are unlifted too.
685 -------------------------------------------------------------------------- */
687 INFO_TABLE(stg_STABLE_NAME_info,stg_STABLE_NAME_entry,0,1,STABLE_NAME,,EF_,"STABLE_NAME","STABLE_NAME");
688 NON_ENTERABLE_ENTRY_CODE(STABLE_NAME);
690 /* -----------------------------------------------------------------------------
693 There are two kinds of these: full and empty. We need an info table
694 and entry code for each type.
695 -------------------------------------------------------------------------- */
697 INFO_TABLE(stg_FULL_MVAR_info,stg_FULL_MVAR_entry,4,0,MVAR,,EF_,"MVAR","MVAR");
698 NON_ENTERABLE_ENTRY_CODE(FULL_MVAR);
700 INFO_TABLE(stg_EMPTY_MVAR_info,stg_EMPTY_MVAR_entry,4,0,MVAR,,EF_,"MVAR","MVAR");
701 NON_ENTERABLE_ENTRY_CODE(EMPTY_MVAR);
703 /* -----------------------------------------------------------------------------
706 This is a static nullary constructor (like []) that we use to mark the
707 end of a linked TSO queue.
708 -------------------------------------------------------------------------- */
710 INFO_TABLE_CONSTR(stg_END_TSO_QUEUE_info,stg_END_TSO_QUEUE_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
711 NON_ENTERABLE_ENTRY_CODE(END_TSO_QUEUE);
713 SET_STATIC_HDR(stg_END_TSO_QUEUE_closure,stg_END_TSO_QUEUE_info,0/*CC*/,,EI_)
716 /* -----------------------------------------------------------------------------
719 Mutable lists (used by the garbage collector) consist of a chain of
720 StgMutClosures connected through their mut_link fields, ending in
721 an END_MUT_LIST closure.
722 -------------------------------------------------------------------------- */
724 INFO_TABLE_CONSTR(stg_END_MUT_LIST_info,stg_END_MUT_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
725 NON_ENTERABLE_ENTRY_CODE(END_MUT_LIST);
727 SET_STATIC_HDR(stg_END_MUT_LIST_closure,stg_END_MUT_LIST_info,0/*CC*/,,EI_)
730 INFO_TABLE(stg_MUT_CONS_info, stg_MUT_CONS_entry, 1, 1, MUT_VAR, , EF_, 0, 0);
731 NON_ENTERABLE_ENTRY_CODE(MUT_CONS);
733 /* -----------------------------------------------------------------------------
735 -------------------------------------------------------------------------- */
737 INFO_TABLE_CONSTR(stg_END_EXCEPTION_LIST_info,stg_END_EXCEPTION_LIST_entry,0,0,0,CONSTR_NOCAF_STATIC,,EF_,0,0);
738 NON_ENTERABLE_ENTRY_CODE(END_EXCEPTION_LIST);
740 SET_STATIC_HDR(stg_END_EXCEPTION_LIST_closure,stg_END_EXCEPTION_LIST_info,0/*CC*/,,EI_)
743 INFO_TABLE(stg_EXCEPTION_CONS_info, stg_EXCEPTION_CONS_entry, 1, 1, CONSTR, , EF_, 0, 0);
744 NON_ENTERABLE_ENTRY_CODE(EXCEPTION_CONS);
746 /* -----------------------------------------------------------------------------
749 These come in two basic flavours: arrays of data (StgArrWords) and arrays of
750 pointers (StgArrPtrs). They all have a similar layout:
752 ___________________________
753 | Info | No. of | data....
755 ---------------------------
757 These are *unpointed* objects: i.e. they cannot be entered.
759 -------------------------------------------------------------------------- */
761 #define ArrayInfo(type) \
762 INFO_TABLE(stg_##type##_info, stg_##type##_entry, 0, 0, type, , EF_,"" # type "","" # type "");
764 ArrayInfo(ARR_WORDS);
765 NON_ENTERABLE_ENTRY_CODE(ARR_WORDS);
766 ArrayInfo(MUT_ARR_PTRS);
767 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS);
768 ArrayInfo(MUT_ARR_PTRS_FROZEN);
769 NON_ENTERABLE_ENTRY_CODE(MUT_ARR_PTRS_FROZEN);
773 /* -----------------------------------------------------------------------------
775 -------------------------------------------------------------------------- */
777 INFO_TABLE(stg_MUT_VAR_info, stg_MUT_VAR_entry, 1, 1, MUT_VAR, , EF_, "MUT_VAR", "MUT_VAR");
778 NON_ENTERABLE_ENTRY_CODE(MUT_VAR);
780 /* -----------------------------------------------------------------------------
781 Standard Error Entry.
783 This is used for filling in vector-table entries that can never happen,
785 -------------------------------------------------------------------------- */
786 /* No longer used; we use NULL, because a) it never happens, right? and b)
787 Windows doesn't like DLL entry points being used as static initialisers
788 STGFUN(stg_error_entry) \
791 DUMP_ERRMSG("fatal: stg_error_entry"); \
792 STGCALL1(shutdownHaskellAndExit, EXIT_FAILURE); \
797 /* -----------------------------------------------------------------------------
800 Entering this closure will just return to the address on the top of the
801 stack. Useful for getting a thread in a canonical form where we can
802 just enter the top stack word to start the thread. (see deleteThread)
803 * -------------------------------------------------------------------------- */
805 INFO_TABLE(stg_dummy_ret_info, stg_dummy_ret_entry, 0, 0, CONSTR_NOCAF_STATIC, , EF_, 0, 0);
806 STGFUN(stg_dummy_ret_entry)
812 JMP_(ENTRY_CODE(ret_addr));
815 SET_STATIC_HDR(stg_dummy_ret_closure,stg_dummy_ret_info,CCS_DONT_CARE,,EI_)
818 /* -----------------------------------------------------------------------------
819 Strict IO application - performing an IO action and entering its result.
821 rts_evalIO() lets you perform Haskell IO actions from outside of Haskell-land,
822 returning back to you their result. Want this result to be evaluated to WHNF
823 by that time, so that we can easily get at the int/char/whatever using the
824 various get{Ty} functions provided by the RTS API.
826 forceIO takes care of this, performing the IO action and entering the
827 results that comes back.
829 * -------------------------------------------------------------------------- */
832 INFO_TABLE_SRT_BITMAP(stg_forceIO_ret_info,stg_forceIO_ret_entry,0,0,0,0,RET_SMALL,,EF_,0,0);
833 STGFUN(stg_forceIO_ret_entry)
837 Sp -= sizeofW(StgSeqFrame);
839 JMP_(GET_ENTRY(R1.cl));
842 INFO_TABLE_SRT_BITMAP(stg_forceIO_ret_info,stg_forceIO_ret_entry,0,0,0,0,RET_SMALL,,EF_,0,0);
843 STGFUN(stg_forceIO_ret_entry)
847 rval = (StgClosure *)Sp[0];
849 Sp -= sizeofW(StgSeqFrame);
852 JMP_(GET_ENTRY(R1.cl));
856 INFO_TABLE(stg_forceIO_info,stg_forceIO_entry,1,0,FUN_STATIC,,EF_,0,0);
857 FN_(stg_forceIO_entry)
860 /* Sp[0] contains the IO action we want to perform */
862 /* Replace it with the return continuation that enters the result. */
863 Sp[0] = (W_)&stg_forceIO_ret_info;
865 /* Push the RealWorld# tag and enter */
866 Sp[0] =(W_)REALWORLD_TAG;
867 JMP_(GET_ENTRY(R1.cl));
870 SET_STATIC_HDR(stg_forceIO_closure,stg_forceIO_info,CCS_DONT_CARE,,EI_)
874 /* -----------------------------------------------------------------------------
875 CHARLIKE and INTLIKE closures.
877 These are static representations of Chars and small Ints, so that
878 we can remove dynamic Chars and Ints during garbage collection and
879 replace them with references to the static objects.
880 -------------------------------------------------------------------------- */
882 #if defined(INTERPRETER) || defined(ENABLE_WIN32_DLL_SUPPORT)
884 * When sticking the RTS in a DLL, we delay populating the
885 * Charlike and Intlike tables until load-time, which is only
886 * when we've got the real addresses to the C# and I# closures.
889 static INFO_TBL_CONST StgInfoTable czh_static_info;
890 static INFO_TBL_CONST StgInfoTable izh_static_info;
891 #define Char_hash_static_info czh_static_info
892 #define Int_hash_static_info izh_static_info
894 #define Char_hash_static_info PrelBase_Czh_static_info
895 #define Int_hash_static_info PrelBase_Izh_static_info
898 #define CHARLIKE_HDR(n) \
900 STATIC_HDR(Char_hash_static_info, /* C# */ \
905 #define INTLIKE_HDR(n) \
907 STATIC_HDR(Int_hash_static_info, /* I# */ \
912 /* put these in the *data* section, since the garbage collector relies
913 * on the fact that static closures live in the data section.
916 /* end the name with _closure, to convince the mangler this is a closure */
918 StgIntCharlikeClosure stg_CHARLIKE_closure[] = {
1177 StgIntCharlikeClosure stg_INTLIKE_closure[] = {
1178 INTLIKE_HDR(-16), /* MIN_INTLIKE == -16 */
1210 INTLIKE_HDR(16) /* MAX_INTLIKE == 16 */