1 /* -----------------------------------------------------------------------------
2 * $Id: StgMacros.h,v 1.57 2003/11/12 17:27:04 sof Exp $
4 * (c) The GHC Team, 1998-1999
6 * Macros used for writing STG-ish C code.
8 * ---------------------------------------------------------------------------*/
13 /* -----------------------------------------------------------------------------
14 The following macros create function headers.
16 Each basic block is represented by a C function with no arguments.
17 We therefore always begin with either
25 The macros can be used either to define the function itself, or to provide
26 prototypes (by following with a ';').
28 Note: the various I*_ shorthands in the second block below are used to
29 declare forward references to local symbols. These shorthands *have* to
30 use the 'extern' type specifier and not 'static'. The reason for this is
31 that 'static' declares a reference as being a static/local variable,
32 and *not* as a forward reference to a static variable.
34 This might seem obvious, but it had me stumped as to why my info tables
35 were suddenly all filled with 0s.
39 --------------------------------------------------------------------------- */
41 #define STGFUN(f) StgFunPtr f(void)
42 #define EXTFUN(f) extern StgFunPtr f(void)
43 #define EXTFUN_RTS(f) extern DLL_IMPORT_RTS StgFunPtr f(void)
44 #define FN_(f) F_ f(void)
45 #define IF_(f) static F_ f(void)
46 #define EF_(f) extern F_ f(void)
47 #define EDF_(f) extern DLLIMPORT F_ f(void)
49 #define EXTINFO_RTS extern DLL_IMPORT_RTS const StgInfoTable
50 #define ETI_RTS extern DLL_IMPORT_RTS const StgThunkInfoTable
52 // Info tables as generated by the compiler are simply arrays of words.
53 typedef StgWord StgWordArray[];
56 #define EDD_ extern DLLIMPORT
57 #define ED_RO_ extern const
59 #define ID_RO_ static const
60 #define EI_ extern StgWordArray
61 #define ERI_ extern const StgRetInfoTable
62 #define II_ static StgWordArray
63 #define IRI_ static const StgRetInfoTable
64 #define EC_ extern StgClosure
65 #define EDC_ extern DLLIMPORT StgClosure
66 #define IC_ static StgClosure
67 #define ECP_(x) extern const StgClosure *(x)[]
68 #define EDCP_(x) extern DLLIMPORT StgClosure *(x)[]
69 #define ICP_(x) static const StgClosure *(x)[]
71 /* -----------------------------------------------------------------------------
74 It isn't safe to "enter" every closure. Functions in particular
75 have no entry code as such; their entry point contains the code to
77 -------------------------------------------------------------------------- */
82 switch (get_itbl(R1.cl)->type) { \
86 case IND_OLDGEN_PERM: \
88 R1.cl = ((StgInd *)R1.cl)->indirectee; \
99 JMP_(ENTRY_CODE(Sp[0])); \
101 JMP_(GET_ENTRY(R1.cl)); \
105 /* -----------------------------------------------------------------------------
108 When failing a check, we save a return address on the stack and
109 jump to a pre-compiled code fragment that saves the live registers
110 and returns to the scheduler.
112 The return address in most cases will be the beginning of the basic
113 block in which the check resides, since we need to perform the check
114 again on re-entry because someone else might have stolen the resource
116 ------------------------------------------------------------------------- */
118 #define STK_CHK_FUN(headroom,assts) \
119 if (Sp - headroom < SpLim) { \
124 #define HP_CHK_FUN(headroom,assts) \
125 DO_GRAN_ALLOCATE(headroom) \
126 if ((Hp += headroom) > HpLim) { \
127 HpAlloc = (headroom); \
132 // When doing both a heap and a stack check, don't move the heap
133 // pointer unless the stack check succeeds. Otherwise we might end up
134 // with slop at the end of the current block, which can confuse the
136 #define HP_STK_CHK_FUN(stk_headroom,hp_headroom,assts) \
137 DO_GRAN_ALLOCATE(hp_headroom) \
138 if (Sp - stk_headroom < SpLim || (Hp += hp_headroom) > HpLim) { \
139 HpAlloc = (hp_headroom); \
144 /* -----------------------------------------------------------------------------
145 A Heap Check in a case alternative are much simpler: everything is
146 on the stack and covered by a liveness mask already, and there is
147 even a return address with an SRT info table there as well.
149 Just push R1 and return to the scheduler saying 'EnterGHC'
151 {STK,HP,HP_STK}_CHK_NP are the various checking macros for
152 bog-standard case alternatives, thunks, and non-top-level
153 functions. In all these cases, node points to a closure that we
154 can just enter to restart the heap check (the NP stands for 'node points').
156 In the NP case GranSim absolutely has to check whether the current node
157 resides on the current processor. Otherwise a FETCH event has to be
158 scheduled. All that is done in GranSimFetch. -- HWL
160 HpLim points to the LAST WORD of valid allocation space.
161 -------------------------------------------------------------------------- */
163 #define STK_CHK_NP(headroom,tag_assts) \
164 if ((Sp - (headroom)) < SpLim) { \
166 JMP_(stg_gc_enter_1); \
169 #define HP_CHK_NP(headroom,tag_assts) \
170 DO_GRAN_ALLOCATE(headroom) \
171 if ((Hp += (headroom)) > HpLim) { \
172 HpAlloc = (headroom); \
174 JMP_(stg_gc_enter_1); \
177 // See comment on HP_STK_CHK_FUN above.
178 #define HP_STK_CHK_NP(stk_headroom, hp_headroom, tag_assts) \
179 DO_GRAN_ALLOCATE(hp_headroom) \
180 if ((Sp - (stk_headroom)) < SpLim || (Hp += (hp_headroom)) > HpLim) { \
181 HpAlloc = (hp_headroom); \
183 JMP_(stg_gc_enter_1); \
187 /* Heap checks for branches of a primitive case / unboxed tuple return */
189 #define GEN_HP_CHK_ALT(headroom,lbl,tag_assts) \
190 DO_GRAN_ALLOCATE(headroom) \
191 if ((Hp += (headroom)) > HpLim) { \
192 HpAlloc = (headroom); \
197 #define HP_CHK_NOREGS(headroom,tag_assts) \
198 GEN_HP_CHK_ALT(headroom,stg_gc_noregs,tag_assts);
199 #define HP_CHK_UNPT_R1(headroom,tag_assts) \
200 GEN_HP_CHK_ALT(headroom,stg_gc_unpt_r1,tag_assts);
201 #define HP_CHK_UNBX_R1(headroom,tag_assts) \
202 GEN_HP_CHK_ALT(headroom,stg_gc_unbx_r1,tag_assts);
203 #define HP_CHK_F1(headroom,tag_assts) \
204 GEN_HP_CHK_ALT(headroom,stg_gc_f1,tag_assts);
205 #define HP_CHK_D1(headroom,tag_assts) \
206 GEN_HP_CHK_ALT(headroom,stg_gc_d1,tag_assts);
207 #define HP_CHK_L1(headroom,tag_assts) \
208 GEN_HP_CHK_ALT(headroom,stg_gc_l1,tag_assts);
210 /* -----------------------------------------------------------------------------
213 These are slow, but have the advantage of being usable in a variety
216 The one restriction is that any relevant SRTs must already be pointed
217 to from the stack. The return address doesn't need to have an info
218 table attached: hence it can be any old code pointer.
220 The liveness mask is a logical 'XOR' of NO_PTRS and zero or more
221 Rn_PTR constants defined below. All registers will be saved, but
222 the garbage collector needs to know which ones contain pointers.
224 Good places to use a generic heap check:
226 - case alternatives (the return address with an SRT is already
229 - primitives (no SRT required).
231 The stack frame layout for a RET_DYN is like this:
233 some pointers |-- GET_PTRS(liveness) words
234 some nonpointers |-- GET_NONPTRS(liveness) words
237 D1-2 |-- RET_DYN_NONPTR_REGS_SIZE words
240 R1-8 |-- RET_DYN_BITMAP_SIZE words
243 liveness mask |-- StgRetDyn structure
246 we assume that the size of a double is always 2 pointers (wasting a
247 word when it is only one pointer, but avoiding lots of #ifdefs).
249 NOTE: if you change the layout of RET_DYN stack frames, then you
250 might also need to adjust the value of RESERVED_STACK_WORDS in
252 -------------------------------------------------------------------------- */
254 // VERY MAGIC CONSTANTS!
255 // must agree with code in HeapStackCheck.c, stg_gen_chk, and
256 // RESERVED_STACK_WORDS in Constants.h.
258 #define RET_DYN_BITMAP_SIZE 8
259 #define RET_DYN_NONPTR_REGS_SIZE 10
260 #define ALL_NON_PTRS 0xff
262 // Sanity check that RESERVED_STACK_WORDS is reasonable. We can't
263 // just derive RESERVED_STACK_WORDS because it's used in Haskell code
265 #if RESERVED_STACK_WORDS != (3 + RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE)
266 #error RESERVED_STACK_WORDS may be wrong!
269 #define LIVENESS_MASK(ptr_regs) (ALL_NON_PTRS ^ (ptr_regs))
271 // We can have up to 255 pointers and 255 nonpointers in the stack
273 #define N_NONPTRS(n) ((n)<<16)
274 #define N_PTRS(n) ((n)<<24)
276 #define GET_NONPTRS(l) ((l)>>16 & 0xff)
277 #define GET_PTRS(l) ((l)>>24 & 0xff)
278 #define GET_LIVENESS(l) ((l) & 0xffff)
290 #define HP_CHK_UNBX_TUPLE(headroom,liveness,code) \
291 if ((Hp += (headroom)) > HpLim ) { \
292 HpAlloc = (headroom); \
294 R9.w = (W_)LIVENESS_MASK(liveness); \
298 #define HP_CHK_GEN(headroom,liveness,reentry) \
299 if ((Hp += (headroom)) > HpLim ) { \
300 HpAlloc = (headroom); \
301 R9.w = (W_)LIVENESS_MASK(liveness); \
302 R10.w = (W_)reentry; \
306 #define HP_CHK_GEN_TICKY(headroom,liveness,reentry) \
307 HP_CHK_GEN(headroom,liveness,reentry); \
308 TICK_ALLOC_HEAP_NOCTR(headroom)
310 #define STK_CHK_GEN(headroom,liveness,reentry) \
311 if ((Sp - (headroom)) < SpLim) { \
312 R9.w = (W_)LIVENESS_MASK(liveness); \
313 R10.w = (W_)reentry; \
317 #define MAYBE_GC(liveness,reentry) \
318 if (doYouWantToGC()) { \
319 R9.w = (W_)LIVENESS_MASK(liveness); \
320 R10.w = (W_)reentry; \
321 JMP_(stg_gc_gen_hp); \
324 /* -----------------------------------------------------------------------------
325 Voluntary Yields/Blocks
327 We only have a generic version of this at the moment - if it turns
328 out to be slowing us down we can make specialised ones.
329 -------------------------------------------------------------------------- */
331 EXTFUN_RTS(stg_gen_yield);
332 EXTFUN_RTS(stg_gen_block);
334 #define YIELD(liveness,reentry) \
336 R9.w = (W_)LIVENESS_MASK(liveness); \
337 R10.w = (W_)reentry; \
338 JMP_(stg_gen_yield); \
341 #define BLOCK(liveness,reentry) \
343 R9.w = (W_)LIVENESS_MASK(liveness); \
344 R10.w = (W_)reentry; \
345 JMP_(stg_gen_block); \
348 #define BLOCK_NP(ptrs) \
350 EXTFUN_RTS(stg_block_##ptrs); \
351 JMP_(stg_block_##ptrs); \
356 Similar to BLOCK_NP but separates the saving of the thread state from the
357 actual jump via an StgReturn
360 #define SAVE_THREAD_STATE(ptrs) \
366 #define THREAD_RETURN(ptrs) \
368 CurrentTSO->what_next = ThreadEnterGHC; \
369 R1.i = ThreadBlocked; \
373 /* -----------------------------------------------------------------------------
374 CCall_GC needs to push a dummy stack frame containing the contents
375 of volatile registers and variables.
377 We use a RET_DYN frame the same as for a dynamic heap check.
378 ------------------------------------------------------------------------- */
380 /* -----------------------------------------------------------------------------
383 RETVEC(p,t) where 'p' is a pointer to the info table for a
384 vectored return address, returns the address of the return code for
387 Return vectors are placed in *reverse order* immediately before the info
388 table for the return address. Hence the formula for computing the
389 actual return address is (addr - sizeof(RetInfoTable) - tag - 1).
390 The extra subtraction of one word is because tags start at zero.
391 -------------------------------------------------------------------------- */
393 #ifdef TABLES_NEXT_TO_CODE
394 #define RET_VEC(p,t) (*((P_)(p) - sizeofW(StgRetInfoTable) - t - 1))
396 #define RET_VEC(p,t) (((StgRetInfoTable *)p)->vector[t])
399 /* -----------------------------------------------------------------------------
401 -------------------------------------------------------------------------- */
404 /* set the tag register (if we have one) */
405 #define SET_TAG(t) /* nothing */
407 #ifdef EAGER_BLACKHOLING
409 # define UPD_BH_UPDATABLE(info) \
410 TICK_UPD_BH_UPDATABLE(); \
412 bdescr *bd = Bdescr(R1.p); \
413 if (bd->u.back != (bdescr *)BaseReg) { \
414 if (bd->gen_no >= 1 || bd->step->no >= 1) { \
417 EXTFUN_RTS(stg_gc_enter_1_hponly); \
418 JMP_(stg_gc_enter_1_hponly); \
422 SET_INFO(R1.cl,&stg_BLACKHOLE_info)
423 # define UPD_BH_SINGLE_ENTRY(info) \
424 TICK_UPD_BH_SINGLE_ENTRY(); \
426 bdescr *bd = Bdescr(R1.p); \
427 if (bd->u.back != (bdescr *)BaseReg) { \
428 if (bd->gen_no >= 1 || bd->step->no >= 1) { \
431 EXTFUN_RTS(stg_gc_enter_1_hponly); \
432 JMP_(stg_gc_enter_1_hponly); \
436 SET_INFO(R1.cl,&stg_BLACKHOLE_info)
439 # define UPD_BH_UPDATABLE(info) \
440 TICK_UPD_BH_UPDATABLE(); \
441 SET_INFO(R1.cl,&stg_BLACKHOLE_info)
442 # define UPD_BH_SINGLE_ENTRY(info) \
443 TICK_UPD_BH_SINGLE_ENTRY(); \
444 SET_INFO(R1.cl,&stg_SE_BLACKHOLE_info)
446 // An object is replaced by a blackhole, so we fill the slop with zeros.
448 // This looks like it can't work - we're overwriting the contents of
449 // the THUNK with slop! Perhaps this never worked??? --SDM
450 // The problem is that with eager-black-holing we currently perform
451 // the black-holing operation at the *beginning* of the basic block,
452 // when we still need the contents of the thunk.
453 // Perhaps the thing to do is to overwrite it at the *end* of the
454 // basic block, when we've already sucked out the thunk's contents? -- SLPJ
456 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
458 # define UPD_BH_UPDATABLE(info) \
459 TICK_UPD_BH_UPDATABLE(); \
460 LDV_recordDead_FILL_SLOP_DYNAMIC(R1.cl); \
461 SET_INFO(R1.cl,&stg_BLACKHOLE_info); \
462 LDV_recordCreate(R1.cl)
463 # define UPD_BH_SINGLE_ENTRY(info) \
464 TICK_UPD_BH_SINGLE_ENTRY(); \
465 LDV_recordDead_FILL_SLOP_DYNAMIC(R1.cl); \
466 SET_INFO(R1.cl,&stg_SE_BLACKHOLE_info) \
467 LDV_recordCreate(R1.cl)
468 # endif /* PROFILING */
470 #else /* !EAGER_BLACKHOLING */
471 # define UPD_BH_UPDATABLE(thunk) /* nothing */
472 # define UPD_BH_SINGLE_ENTRY(thunk) /* nothing */
473 #endif /* EAGER_BLACKHOLING */
475 /* -----------------------------------------------------------------------------
476 Moving Floats and Doubles
478 ASSIGN_FLT is for assigning a float to memory (usually the
479 stack/heap). The memory address is guaranteed to be
480 StgWord aligned (currently == sizeof(void *)).
482 PK_FLT is for pulling a float out of memory. The memory is
483 guaranteed to be StgWord aligned.
484 -------------------------------------------------------------------------- */
486 INLINE_HEADER void ASSIGN_FLT (W_ [], StgFloat);
487 INLINE_HEADER StgFloat PK_FLT (W_ []);
489 #if ALIGNMENT_FLOAT <= ALIGNMENT_LONG
491 INLINE_HEADER void ASSIGN_FLT(W_ p_dest[], StgFloat src) { *(StgFloat *)p_dest = src; }
492 INLINE_HEADER StgFloat PK_FLT (W_ p_src[]) { return *(StgFloat *)p_src; }
494 #else /* ALIGNMENT_FLOAT > ALIGNMENT_UNSIGNED_INT */
496 INLINE_HEADER void ASSIGN_FLT(W_ p_dest[], StgFloat src)
503 INLINE_HEADER StgFloat PK_FLT(W_ p_src[])
510 #endif /* ALIGNMENT_FLOAT > ALIGNMENT_LONG */
512 #if ALIGNMENT_DOUBLE <= ALIGNMENT_LONG
514 INLINE_HEADER void ASSIGN_DBL (W_ [], StgDouble);
515 INLINE_HEADER StgDouble PK_DBL (W_ []);
517 INLINE_HEADER void ASSIGN_DBL(W_ p_dest[], StgDouble src) { *(StgDouble *)p_dest = src; }
518 INLINE_HEADER StgDouble PK_DBL (W_ p_src[]) { return *(StgDouble *)p_src; }
520 #else /* ALIGNMENT_DOUBLE > ALIGNMENT_LONG */
522 /* Sparc uses two floating point registers to hold a double. We can
523 * write ASSIGN_DBL and PK_DBL by directly accessing the registers
524 * independently - unfortunately this code isn't writable in C, we
525 * have to use inline assembler.
527 #if sparc_TARGET_ARCH
529 #define ASSIGN_DBL(dst0,src) \
530 { StgPtr dst = (StgPtr)(dst0); \
531 __asm__("st %2,%0\n\tst %R2,%1" : "=m" (((P_)(dst))[0]), \
532 "=m" (((P_)(dst))[1]) : "f" (src)); \
535 #define PK_DBL(src0) \
536 ( { StgPtr src = (StgPtr)(src0); \
538 __asm__("ld %1,%0\n\tld %2,%R0" : "=f" (d) : \
539 "m" (((P_)(src))[0]), "m" (((P_)(src))[1])); d; \
542 #else /* ! sparc_TARGET_ARCH */
544 INLINE_HEADER void ASSIGN_DBL (W_ [], StgDouble);
545 INLINE_HEADER StgDouble PK_DBL (W_ []);
557 INLINE_HEADER void ASSIGN_DBL(W_ p_dest[], StgDouble src)
561 p_dest[0] = y.du.dhi;
562 p_dest[1] = y.du.dlo;
565 /* GCC also works with this version, but it generates
566 the same code as the previous one, and is not ANSI
568 #define ASSIGN_DBL( p_dest, src ) \
569 *p_dest = ((double_thing) src).du.dhi; \
570 *(p_dest+1) = ((double_thing) src).du.dlo \
573 INLINE_HEADER StgDouble PK_DBL(W_ p_src[])
581 #endif /* ! sparc_TARGET_ARCH */
583 #endif /* ALIGNMENT_DOUBLE > ALIGNMENT_UNSIGNED_INT */
585 #ifdef SUPPORT_LONG_LONGS
590 } unpacked_double_word;
594 unpacked_double_word iu;
599 unpacked_double_word wu;
602 INLINE_HEADER void ASSIGN_Word64(W_ p_dest[], StgWord64 src)
606 p_dest[0] = y.wu.dhi;
607 p_dest[1] = y.wu.dlo;
610 INLINE_HEADER StgWord64 PK_Word64(W_ p_src[])
618 INLINE_HEADER void ASSIGN_Int64(W_ p_dest[], StgInt64 src)
622 p_dest[0] = y.iu.dhi;
623 p_dest[1] = y.iu.dlo;
626 INLINE_HEADER StgInt64 PK_Int64(W_ p_src[])
634 #elif SIZEOF_VOID_P == 8
636 INLINE_HEADER void ASSIGN_Word64(W_ p_dest[], StgWord64 src)
641 INLINE_HEADER StgWord64 PK_Word64(W_ p_src[])
646 INLINE_HEADER void ASSIGN_Int64(W_ p_dest[], StgInt64 src)
651 INLINE_HEADER StgInt64 PK_Int64(W_ p_src[])
658 /* -----------------------------------------------------------------------------
660 -------------------------------------------------------------------------- */
662 extern DLL_IMPORT_RTS const StgPolyInfoTable stg_catch_frame_info;
664 /* -----------------------------------------------------------------------------
666 -------------------------------------------------------------------------- */
668 #if defined(USE_SPLIT_MARKERS)
669 #if defined(LEADING_UNDERSCORE)
670 #define __STG_SPLIT_MARKER __asm__("\n___stg_split_marker:");
672 #define __STG_SPLIT_MARKER __asm__("\n__stg_split_marker:");
675 #define __STG_SPLIT_MARKER /* nothing */
678 /* -----------------------------------------------------------------------------
679 Closure and Info Macros with casting.
681 We don't want to mess around with casts in the generated C code, so
682 we use this casting versions of the closure macro.
684 This version of SET_HDR also includes CCS_ALLOC for profiling - the
685 reason we don't use two separate macros is that the cost centre
686 field is sometimes a non-simple expression and we want to share its
687 value between SET_HDR and CCS_ALLOC.
688 -------------------------------------------------------------------------- */
690 #define SET_HDR_(c,info,ccs,size) \
692 CostCentreStack *tmp = (ccs); \
693 SET_HDR((StgClosure *)(c),(StgInfoTable *)(info),tmp); \
694 CCS_ALLOC(tmp,size); \
697 /* -----------------------------------------------------------------------------
698 Saving context for exit from the STG world, and loading up context
699 on entry to STG code.
701 We save all the STG registers (that is, the ones that are mapped to
702 machine registers) in their places in the TSO.
704 The stack registers go into the current stack object, and the
705 current nursery is updated from the heap pointer.
707 These functions assume that BaseReg is loaded appropriately (if
709 -------------------------------------------------------------------------- */
714 SaveThreadState(void)
718 /* Don't need to save REG_Base, it won't have changed. */
724 #ifdef REG_CurrentTSO
725 SAVE_CurrentTSO = tso;
727 #ifdef REG_CurrentNursery
728 SAVE_CurrentNursery = CurrentNursery;
730 #if defined(PROFILING)
731 CurrentTSO->prof.CCCS = CCCS;
736 LoadThreadState (void)
740 #ifdef REG_CurrentTSO
741 CurrentTSO = SAVE_CurrentTSO;
746 SpLim = (P_)&(tso->stack) + RESERVED_STACK_WORDS;
747 OpenNursery(Hp,HpLim);
749 #ifdef REG_CurrentNursery
750 CurrentNursery = SAVE_CurrentNursery;
752 # if defined(PROFILING)
753 CCCS = CurrentTSO->prof.CCCS;
759 /* -----------------------------------------------------------------------------
760 Module initialisation
762 The module initialisation code looks like this, roughly:
765 JMP_(__stginit_Foo_1_p)
768 FN(__stginit_Foo_1_p) {
772 We have one version of the init code with a module version and the
773 'way' attached to it. The version number helps to catch cases
774 where modules are not compiled in dependency order before being
775 linked: if a module has been compiled since any modules which depend on
776 it, then the latter modules will refer to a different version in their
777 init blocks and a link error will ensue.
779 The 'way' suffix helps to catch cases where modules compiled in different
780 ways are linked together (eg. profiled and non-profiled).
782 We provide a plain, unadorned, version of the module init code
783 which just jumps to the version with the label and way attached. The
784 reason for this is that when using foreign exports, the caller of
785 startupHaskell() must supply the name of the init function for the "top"
786 module in the program, and we don't want to require that this name
787 has the version and way info appended to it.
788 -------------------------------------------------------------------------- */
790 #define PUSH_INIT_STACK(reg_function) \
791 *(Sp++) = (W_)reg_function
793 #define POP_INIT_STACK() \
796 #define MOD_INIT_WRAPPER(label,real_init) \
799 #define START_MOD_INIT(plain_lbl, real_lbl) \
800 static int _module_registered = 0; \
809 if (! _module_registered) { \
810 _module_registered = 1; \
812 /* extern decls go here, followed by init code */
814 #define REGISTER_FOREIGN_EXPORT(reg_fe_binder) \
815 STGCALL1(getStablePtr,reg_fe_binder)
817 #define REGISTER_IMPORT(reg_mod_name) \
818 PUSH_INIT_STACK(reg_mod_name)
820 #define END_MOD_INIT() \
822 JMP_(POP_INIT_STACK()); \
825 /* -----------------------------------------------------------------------------
826 Support for _ccall_GC_ and _casm_GC.
827 -------------------------------------------------------------------------- */
830 * Suspending/resuming threads for doing external C-calls (_ccall_GC).
831 * These functions are defined in rts/Schedule.c.
833 StgInt suspendThread ( StgRegTable *, rtsBool);
834 StgRegTable * resumeThread ( StgInt, rtsBool );
836 #define SUSPEND_THREAD(token,threaded) \
838 token = suspendThread(BaseReg,threaded);
841 #define RESUME_THREAD(token,threaded) \
842 BaseReg = resumeThread(token,threaded); \
845 #define RESUME_THREAD(token,threaded) \
846 (void)resumeThread(token,threaded); \
850 #endif /* STGMACROS_H */