1 /* -----------------------------------------------------------------------------
4 * Copyright (c) The GHC Team, 1994-2002.
5 * ---------------------------------------------------------------------------*/
7 #include "PosixSource.h"
10 #include "rts/Bytecodes.h"
13 #include "sm/Storage.h"
14 #include "sm/Sanity.h"
21 #include "Disassembler.h"
22 #include "Interpreter.h"
23 #include "ThreadPaused.h"
25 #include <string.h> /* for memcpy */
30 // When building the RTS in the non-dyn way on Windows, we don't
31 // want declspec(__dllimport__) on the front of function prototypes
33 #if defined(mingw32_HOST_OS) && !defined(__PIC__)
34 # define LIBFFI_NOT_DLL
39 /* --------------------------------------------------------------------------
40 * The bytecode interpreter
41 * ------------------------------------------------------------------------*/
43 /* Gather stats about entry, opcode, opcode-pair frequencies. For
44 tuning the interpreter. */
46 /* #define INTERP_STATS */
49 /* Sp points to the lowest live word on the stack. */
51 #define BCO_NEXT instrs[bciPtr++]
52 #define BCO_NEXT_32 (bciPtr += 2, (((StgWord) instrs[bciPtr-2]) << 16) + ((StgWord) instrs[bciPtr-1]))
53 #define BCO_NEXT_64 (bciPtr += 4, (((StgWord) instrs[bciPtr-4]) << 48) + (((StgWord) instrs[bciPtr-3]) << 32) + (((StgWord) instrs[bciPtr-2]) << 16) + ((StgWord) instrs[bciPtr-1]))
54 #if WORD_SIZE_IN_BITS == 32
55 #define BCO_NEXT_WORD BCO_NEXT_32
56 #elif WORD_SIZE_IN_BITS == 64
57 #define BCO_NEXT_WORD BCO_NEXT_64
59 #error Cannot cope with WORD_SIZE_IN_BITS being nether 32 nor 64
61 #define BCO_GET_LARGE_ARG ((bci & bci_FLAG_LARGE_ARGS) ? BCO_NEXT_WORD : BCO_NEXT)
63 #define BCO_PTR(n) (W_)ptrs[n]
64 #define BCO_LIT(n) literals[n]
66 #define LOAD_STACK_POINTERS \
67 Sp = cap->r.rCurrentTSO->sp; \
68 /* We don't change this ... */ \
69 SpLim = cap->r.rCurrentTSO->stack + RESERVED_STACK_WORDS;
71 #define SAVE_STACK_POINTERS \
73 cap->r.rCurrentTSO->sp = Sp
75 #define RETURN_TO_SCHEDULER(todo,retcode) \
76 SAVE_STACK_POINTERS; \
77 cap->r.rCurrentTSO->what_next = (todo); \
78 threadPaused(cap,cap->r.rCurrentTSO); \
79 cap->r.rRet = (retcode); \
82 #define RETURN_TO_SCHEDULER_NO_PAUSE(todo,retcode) \
83 SAVE_STACK_POINTERS; \
84 cap->r.rCurrentTSO->what_next = (todo); \
85 cap->r.rRet = (retcode); \
90 allocate_NONUPD (Capability *cap, int n_words)
92 return allocate(cap, stg_max(sizeofW(StgHeader)+MIN_PAYLOAD_SIZE, n_words));
95 int rts_stop_next_breakpoint = 0;
96 int rts_stop_on_exception = 0;
100 /* Hacky stats, for tuning the interpreter ... */
101 int it_unknown_entries[N_CLOSURE_TYPES];
102 int it_total_unknown_entries;
103 int it_total_entries;
114 int it_oofreq[27][27];
118 #define INTERP_TICK(n) (n)++
120 void interp_startup ( void )
123 it_retto_BCO = it_retto_UPDATE = it_retto_other = 0;
124 it_total_entries = it_total_unknown_entries = 0;
125 for (i = 0; i < N_CLOSURE_TYPES; i++)
126 it_unknown_entries[i] = 0;
127 it_slides = it_insns = it_BCO_entries = 0;
128 for (i = 0; i < 27; i++) it_ofreq[i] = 0;
129 for (i = 0; i < 27; i++)
130 for (j = 0; j < 27; j++)
135 void interp_shutdown ( void )
137 int i, j, k, o_max, i_max, j_max;
138 debugBelch("%d constrs entered -> (%d BCO, %d UPD, %d ??? )\n",
139 it_retto_BCO + it_retto_UPDATE + it_retto_other,
140 it_retto_BCO, it_retto_UPDATE, it_retto_other );
141 debugBelch("%d total entries, %d unknown entries \n",
142 it_total_entries, it_total_unknown_entries);
143 for (i = 0; i < N_CLOSURE_TYPES; i++) {
144 if (it_unknown_entries[i] == 0) continue;
145 debugBelch(" type %2d: unknown entries (%4.1f%%) == %d\n",
146 i, 100.0 * ((double)it_unknown_entries[i]) /
147 ((double)it_total_unknown_entries),
148 it_unknown_entries[i]);
150 debugBelch("%d insns, %d slides, %d BCO_entries\n",
151 it_insns, it_slides, it_BCO_entries);
152 for (i = 0; i < 27; i++)
153 debugBelch("opcode %2d got %d\n", i, it_ofreq[i] );
155 for (k = 1; k < 20; k++) {
158 for (i = 0; i < 27; i++) {
159 for (j = 0; j < 27; j++) {
160 if (it_oofreq[i][j] > o_max) {
161 o_max = it_oofreq[i][j];
162 i_max = i; j_max = j;
167 debugBelch("%d: count (%4.1f%%) %6d is %d then %d\n",
168 k, ((double)o_max) * 100.0 / ((double)it_insns), o_max,
170 it_oofreq[i_max][j_max] = 0;
175 #else // !INTERP_STATS
177 #define INTERP_TICK(n) /* nothing */
181 static StgWord app_ptrs_itbl[] = {
184 (W_)&stg_ap_ppp_info,
185 (W_)&stg_ap_pppp_info,
186 (W_)&stg_ap_ppppp_info,
187 (W_)&stg_ap_pppppp_info,
190 HsStablePtr rts_breakpoint_io_action; // points to the IO action which is executed on a breakpoint
191 // it is set in main/GHC.hs:runStmt
194 interpretBCO (Capability* cap)
196 // Use of register here is primarily to make it clear to compilers
197 // that these entities are non-aliasable.
198 register StgPtr Sp; // local state -- stack pointer
199 register StgPtr SpLim; // local state -- stack lim pointer
200 register StgClosure *tagged_obj = 0, *obj;
205 cap->r.rHpLim = (P_)1; // HpLim is the context-switch flag; when it
206 // goes to zero we must return to the scheduler.
208 // ------------------------------------------------------------------------
211 // We have a closure to evaluate. Stack looks like:
215 // Sp | -------------------> closure
218 if (Sp[0] == (W_)&stg_enter_info) {
223 // ------------------------------------------------------------------------
226 // We have a BCO application to perform. Stack looks like:
237 else if (Sp[0] == (W_)&stg_apply_interp_info) {
238 obj = UNTAG_CLOSURE((StgClosure *)Sp[1]);
243 // ------------------------------------------------------------------------
246 // We have an unboxed value to return. See comment before
247 // do_return_unboxed, below.
250 goto do_return_unboxed;
253 // Evaluate the object on top of the stack.
255 tagged_obj = (StgClosure*)Sp[0]; Sp++;
258 obj = UNTAG_CLOSURE(tagged_obj);
259 INTERP_TICK(it_total_evals);
261 IF_DEBUG(interpreter,
263 "\n---------------------------------------------------------------\n");
264 debugBelch("Evaluating: "); printObj(obj);
265 debugBelch("Sp = %p\n", Sp);
268 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
272 // IF_DEBUG(sanity,checkStackChunk(Sp, cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size));
273 IF_DEBUG(sanity,checkStackFrame(Sp));
275 switch ( get_itbl(obj)->type ) {
280 case IND_OLDGEN_PERM:
283 tagged_obj = ((StgInd*)obj)->indirectee;
294 case CONSTR_NOCAF_STATIC:
308 ASSERT(((StgBCO *)obj)->arity > 0);
312 case AP: /* Copied from stg_AP_entry. */
321 if (Sp - (words+sizeofW(StgUpdateFrame)) < SpLim) {
323 Sp[1] = (W_)tagged_obj;
324 Sp[0] = (W_)&stg_enter_info;
325 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
328 /* Ok; we're safe. Party on. Push an update frame. */
329 Sp -= sizeofW(StgUpdateFrame);
331 StgUpdateFrame *__frame;
332 __frame = (StgUpdateFrame *)Sp;
333 SET_INFO(__frame, (StgInfoTable *)&stg_upd_frame_info);
334 __frame->updatee = (StgClosure *)(ap);
337 /* Reload the stack */
339 for (i=0; i < words; i++) {
340 Sp[i] = (W_)ap->payload[i];
343 obj = UNTAG_CLOSURE((StgClosure*)ap->fun);
344 ASSERT(get_itbl(obj)->type == BCO);
353 j = get_itbl(obj)->type;
354 ASSERT(j >= 0 && j < N_CLOSURE_TYPES);
355 it_unknown_entries[j]++;
356 it_total_unknown_entries++;
360 // Can't handle this object; yield to scheduler
361 IF_DEBUG(interpreter,
362 debugBelch("evaluating unknown closure -- yielding to sched\n");
366 Sp[1] = (W_)tagged_obj;
367 Sp[0] = (W_)&stg_enter_info;
368 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
372 // ------------------------------------------------------------------------
373 // We now have an evaluated object (tagged_obj). The next thing to
374 // do is return it to the stack frame on top of the stack.
376 obj = UNTAG_CLOSURE(tagged_obj);
377 ASSERT(closure_HNF(obj));
379 IF_DEBUG(interpreter,
381 "\n---------------------------------------------------------------\n");
382 debugBelch("Returning: "); printObj(obj);
383 debugBelch("Sp = %p\n", Sp);
385 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
389 IF_DEBUG(sanity,checkStackChunk(Sp, cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size));
391 switch (get_itbl((StgClosure *)Sp)->type) {
394 const StgInfoTable *info;
396 // NOTE: not using get_itbl().
397 info = ((StgClosure *)Sp)->header.info;
398 if (info == (StgInfoTable *)&stg_ap_v_info) {
399 n = 1; m = 0; goto do_apply;
401 if (info == (StgInfoTable *)&stg_ap_f_info) {
402 n = 1; m = 1; goto do_apply;
404 if (info == (StgInfoTable *)&stg_ap_d_info) {
405 n = 1; m = sizeofW(StgDouble); goto do_apply;
407 if (info == (StgInfoTable *)&stg_ap_l_info) {
408 n = 1; m = sizeofW(StgInt64); goto do_apply;
410 if (info == (StgInfoTable *)&stg_ap_n_info) {
411 n = 1; m = 1; goto do_apply;
413 if (info == (StgInfoTable *)&stg_ap_p_info) {
414 n = 1; m = 1; goto do_apply;
416 if (info == (StgInfoTable *)&stg_ap_pp_info) {
417 n = 2; m = 2; goto do_apply;
419 if (info == (StgInfoTable *)&stg_ap_ppp_info) {
420 n = 3; m = 3; goto do_apply;
422 if (info == (StgInfoTable *)&stg_ap_pppp_info) {
423 n = 4; m = 4; goto do_apply;
425 if (info == (StgInfoTable *)&stg_ap_ppppp_info) {
426 n = 5; m = 5; goto do_apply;
428 if (info == (StgInfoTable *)&stg_ap_pppppp_info) {
429 n = 6; m = 6; goto do_apply;
431 goto do_return_unrecognised;
435 // Returning to an update frame: do the update, pop the update
436 // frame, and continue with the next stack frame.
438 // NB. we must update with the *tagged* pointer. Some tags
439 // are not optional, and if we omit the tag bits when updating
440 // then bad things can happen (albeit very rarely). See #1925.
441 // What happened was an indirection was created with an
442 // untagged pointer, and this untagged pointer was propagated
443 // to a PAP by the GC, violating the invariant that PAPs
444 // always contain a tagged pointer to the function.
445 INTERP_TICK(it_retto_UPDATE);
446 UPD_IND(cap, ((StgUpdateFrame *)Sp)->updatee, tagged_obj);
447 Sp += sizeofW(StgUpdateFrame);
451 // Returning to an interpreted continuation: put the object on
452 // the stack, and start executing the BCO.
453 INTERP_TICK(it_retto_BCO);
456 // NB. return the untagged object; the bytecode expects it to
457 // be untagged. XXX this doesn't seem right.
458 obj = (StgClosure*)Sp[2];
459 ASSERT(get_itbl(obj)->type == BCO);
463 do_return_unrecognised:
465 // Can't handle this return address; yield to scheduler
466 INTERP_TICK(it_retto_other);
467 IF_DEBUG(interpreter,
468 debugBelch("returning to unknown frame -- yielding to sched\n");
469 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
472 Sp[1] = (W_)tagged_obj;
473 Sp[0] = (W_)&stg_enter_info;
474 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
478 // -------------------------------------------------------------------------
479 // Returning an unboxed value. The stack looks like this:
496 // where XXXX_info is one of the stg_gc_unbx_r1_info family.
498 // We're only interested in the case when the real return address
499 // is a BCO; otherwise we'll return to the scheduler.
505 ASSERT( Sp[0] == (W_)&stg_gc_unbx_r1_info
506 || Sp[0] == (W_)&stg_gc_unpt_r1_info
507 || Sp[0] == (W_)&stg_gc_f1_info
508 || Sp[0] == (W_)&stg_gc_d1_info
509 || Sp[0] == (W_)&stg_gc_l1_info
510 || Sp[0] == (W_)&stg_gc_void_info // VoidRep
513 // get the offset of the stg_ctoi_ret_XXX itbl
514 offset = stack_frame_sizeW((StgClosure *)Sp);
516 switch (get_itbl((StgClosure *)Sp+offset)->type) {
519 // Returning to an interpreted continuation: put the object on
520 // the stack, and start executing the BCO.
521 INTERP_TICK(it_retto_BCO);
522 obj = (StgClosure*)Sp[offset+1];
523 ASSERT(get_itbl(obj)->type == BCO);
524 goto run_BCO_return_unboxed;
528 // Can't handle this return address; yield to scheduler
529 INTERP_TICK(it_retto_other);
530 IF_DEBUG(interpreter,
531 debugBelch("returning to unknown frame -- yielding to sched\n");
532 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
534 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
541 // -------------------------------------------------------------------------
545 ASSERT(obj == UNTAG_CLOSURE(tagged_obj));
546 // we have a function to apply (obj), and n arguments taking up m
547 // words on the stack. The info table (stg_ap_pp_info or whatever)
548 // is on top of the arguments on the stack.
550 switch (get_itbl(obj)->type) {
558 // we only cope with PAPs whose function is a BCO
559 if (get_itbl(UNTAG_CLOSURE(pap->fun))->type != BCO) {
560 goto defer_apply_to_sched;
563 // Stack check: we're about to unpack the PAP onto the
564 // stack. The (+1) is for the (arity < n) case, where we
565 // also need space for an extra info pointer.
566 if (Sp - (pap->n_args + 1) < SpLim) {
568 Sp[1] = (W_)tagged_obj;
569 Sp[0] = (W_)&stg_enter_info;
570 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
577 // n must be greater than 1, and the only kinds of
578 // application we support with more than one argument
579 // are all pointers...
581 // Shuffle the args for this function down, and put
582 // the appropriate info table in the gap.
583 for (i = 0; i < arity; i++) {
584 Sp[(int)i-1] = Sp[i];
585 // ^^^^^ careful, i-1 might be negative, but i in unsigned
587 Sp[arity-1] = app_ptrs_itbl[n-arity-1];
589 // unpack the PAP's arguments onto the stack
591 for (i = 0; i < pap->n_args; i++) {
592 Sp[i] = (W_)pap->payload[i];
594 obj = UNTAG_CLOSURE(pap->fun);
597 else if (arity == n) {
599 for (i = 0; i < pap->n_args; i++) {
600 Sp[i] = (W_)pap->payload[i];
602 obj = UNTAG_CLOSURE(pap->fun);
605 else /* arity > n */ {
606 // build a new PAP and return it.
608 new_pap = (StgPAP *)allocate(cap, PAP_sizeW(pap->n_args + m));
609 SET_HDR(new_pap,&stg_PAP_info,CCCS);
610 new_pap->arity = pap->arity - n;
611 new_pap->n_args = pap->n_args + m;
612 new_pap->fun = pap->fun;
613 for (i = 0; i < pap->n_args; i++) {
614 new_pap->payload[i] = pap->payload[i];
616 for (i = 0; i < m; i++) {
617 new_pap->payload[pap->n_args + i] = (StgClosure *)Sp[i];
619 tagged_obj = (StgClosure *)new_pap;
629 arity = ((StgBCO *)obj)->arity;
632 // n must be greater than 1, and the only kinds of
633 // application we support with more than one argument
634 // are all pointers...
636 // Shuffle the args for this function down, and put
637 // the appropriate info table in the gap.
638 for (i = 0; i < arity; i++) {
639 Sp[(int)i-1] = Sp[i];
640 // ^^^^^ careful, i-1 might be negative, but i in unsigned
642 Sp[arity-1] = app_ptrs_itbl[n-arity-1];
646 else if (arity == n) {
649 else /* arity > n */ {
650 // build a PAP and return it.
653 pap = (StgPAP *)allocate(cap, PAP_sizeW(m));
654 SET_HDR(pap, &stg_PAP_info,CCCS);
655 pap->arity = arity - n;
658 for (i = 0; i < m; i++) {
659 pap->payload[i] = (StgClosure *)Sp[i];
661 tagged_obj = (StgClosure *)pap;
667 // No point in us applying machine-code functions
669 defer_apply_to_sched:
671 Sp[1] = (W_)tagged_obj;
672 Sp[0] = (W_)&stg_enter_info;
673 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
676 // ------------------------------------------------------------------------
677 // Ok, we now have a bco (obj), and its arguments are all on the
678 // stack. We can start executing the byte codes.
680 // The stack is in one of two states. First, if this BCO is a
690 // Second, if this BCO is a continuation:
705 // where retval is the value being returned to this continuation.
706 // In the event of a stack check, heap check, or context switch,
707 // we need to leave the stack in a sane state so the garbage
708 // collector can find all the pointers.
710 // (1) BCO is a function: the BCO's bitmap describes the
711 // pointerhood of the arguments.
713 // (2) BCO is a continuation: BCO's bitmap describes the
714 // pointerhood of the free variables.
716 // Sadly we have three different kinds of stack/heap/cswitch check
722 if (doYouWantToGC(cap)) {
723 Sp--; Sp[0] = (W_)&stg_enter_info;
724 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
726 // Stack checks aren't necessary at return points, the stack use
727 // is aggregated into the enclosing function entry point.
731 run_BCO_return_unboxed:
733 if (doYouWantToGC(cap)) {
734 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
736 // Stack checks aren't necessary at return points, the stack use
737 // is aggregated into the enclosing function entry point.
745 Sp[0] = (W_)&stg_apply_interp_info;
746 checkStackChunk(Sp,SpLim);
751 if (doYouWantToGC(cap)) {
754 Sp[0] = (W_)&stg_apply_interp_info; // placeholder, really
755 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
759 if (Sp - INTERP_STACK_CHECK_THRESH < SpLim) {
762 Sp[0] = (W_)&stg_apply_interp_info; // placeholder, really
763 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
768 // Now, actually interpret the BCO... (no returning to the
769 // scheduler again until the stack is in an orderly state).
771 INTERP_TICK(it_BCO_entries);
773 register int bciPtr = 0; /* instruction pointer */
774 register StgWord16 bci;
775 register StgBCO* bco = (StgBCO*)obj;
776 register StgWord16* instrs = (StgWord16*)(bco->instrs->payload);
777 register StgWord* literals = (StgWord*)(&bco->literals->payload[0]);
778 register StgPtr* ptrs = (StgPtr*)(&bco->ptrs->payload[0]);
780 bcoSize = BCO_NEXT_WORD;
781 IF_DEBUG(interpreter,debugBelch("bcoSize = %d\n", bcoSize));
784 it_lastopc = 0; /* no opcode */
788 ASSERT(bciPtr < bcoSize);
789 IF_DEBUG(interpreter,
790 //if (do_print_stack) {
791 //debugBelch("\n-- BEGIN stack\n");
792 //printStack(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size,iSu);
793 //debugBelch("-- END stack\n\n");
795 debugBelch("Sp = %p pc = %d ", Sp, bciPtr);
796 disInstr(bco,bciPtr);
799 for (i = 8; i >= 0; i--) {
800 debugBelch("%d %p\n", i, (StgPtr)(*(Sp+i)));
804 //if (do_print_stack) checkStack(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size,iSu);
808 INTERP_TICK(it_insns);
811 ASSERT( (int)instrs[bciPtr] >= 0 && (int)instrs[bciPtr] < 27 );
812 it_ofreq[ (int)instrs[bciPtr] ] ++;
813 it_oofreq[ it_lastopc ][ (int)instrs[bciPtr] ] ++;
814 it_lastopc = (int)instrs[bciPtr];
818 /* We use the high 8 bits for flags, only the highest of which is
819 * currently allocated */
820 ASSERT((bci & 0xFF00) == (bci & 0x8000));
822 switch (bci & 0xFF) {
824 /* check for a breakpoint on the beginning of a let binding */
827 int arg1_brk_array, arg2_array_index, arg3_freeVars;
828 StgArrWords *breakPoints;
829 int returning_from_break; // are we resuming execution from a breakpoint?
830 // if yes, then don't break this time around
831 StgClosure *ioAction; // the io action to run at a breakpoint
833 StgAP_STACK *new_aps; // a closure to save the top stack frame on the heap
837 arg1_brk_array = BCO_NEXT; // 1st arg of break instruction
838 arg2_array_index = BCO_NEXT; // 2nd arg of break instruction
839 arg3_freeVars = BCO_NEXT; // 3rd arg of break instruction
841 // check if we are returning from a breakpoint - this info
842 // is stored in the flags field of the current TSO
843 returning_from_break = cap->r.rCurrentTSO->flags & TSO_STOPPED_ON_BREAKPOINT;
845 // if we are returning from a break then skip this section
846 // and continue executing
847 if (!returning_from_break)
849 breakPoints = (StgArrWords *) BCO_PTR(arg1_brk_array);
851 // stop the current thread if either the
852 // "rts_stop_next_breakpoint" flag is true OR if the
853 // breakpoint flag for this particular expression is
855 if (rts_stop_next_breakpoint == rtsTrue ||
856 breakPoints->payload[arg2_array_index] == rtsTrue)
858 // make sure we don't automatically stop at the
860 rts_stop_next_breakpoint = rtsFalse;
862 // allocate memory for a new AP_STACK, enough to
863 // store the top stack frame plus an
864 // stg_apply_interp_info pointer and a pointer to
866 size_words = BCO_BITMAP_SIZE(obj) + 2;
867 new_aps = (StgAP_STACK *) allocate(cap, AP_STACK_sizeW(size_words));
868 SET_HDR(new_aps,&stg_AP_STACK_info,CCS_SYSTEM);
869 new_aps->size = size_words;
870 new_aps->fun = &stg_dummy_ret_closure;
872 // fill in the payload of the AP_STACK
873 new_aps->payload[0] = (StgClosure *)&stg_apply_interp_info;
874 new_aps->payload[1] = (StgClosure *)obj;
876 // copy the contents of the top stack frame into the AP_STACK
877 for (i = 2; i < size_words; i++)
879 new_aps->payload[i] = (StgClosure *)Sp[i-2];
882 // prepare the stack so that we can call the
883 // rts_breakpoint_io_action and ensure that the stack is
884 // in a reasonable state for the GC and so that
885 // execution of this BCO can continue when we resume
886 ioAction = (StgClosure *) deRefStablePtr (rts_breakpoint_io_action);
889 Sp[7] = (W_)&stg_apply_interp_info;
890 Sp[6] = (W_)&stg_noforceIO_info; // see [unreg] below
891 Sp[5] = (W_)new_aps; // the AP_STACK
892 Sp[4] = (W_)BCO_PTR(arg3_freeVars); // the info about local vars of the breakpoint
893 Sp[3] = (W_)False_closure; // True <=> a breakpoint
894 Sp[2] = (W_)&stg_ap_pppv_info;
895 Sp[1] = (W_)ioAction; // apply the IO action to its two arguments above
896 Sp[0] = (W_)&stg_enter_info; // get ready to run the IO action
897 // Note [unreg]: in unregisterised mode, the return
898 // convention for IO is different. The
899 // stg_noForceIO_info stack frame is necessary to
900 // account for this difference.
902 // set the flag in the TSO to say that we are now
903 // stopping at a breakpoint so that when we resume
904 // we don't stop on the same breakpoint that we
905 // already stopped at just now
906 cap->r.rCurrentTSO->flags |= TSO_STOPPED_ON_BREAKPOINT;
908 // stop this thread and return to the scheduler -
909 // eventually we will come back and the IO action on
910 // the top of the stack will be executed
911 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
914 // record that this thread is not stopped at a breakpoint anymore
915 cap->r.rCurrentTSO->flags &= ~TSO_STOPPED_ON_BREAKPOINT;
917 // continue normal execution of the byte code instructions
922 // Explicit stack check at the beginning of a function
923 // *only* (stack checks in case alternatives are
924 // propagated to the enclosing function).
925 StgWord stk_words_reqd = BCO_GET_LARGE_ARG + 1;
926 if (Sp - stk_words_reqd < SpLim) {
929 Sp[0] = (W_)&stg_apply_interp_info;
930 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
965 Sp[-1] = BCO_PTR(o1);
970 case bci_PUSH_ALTS: {
971 int o_bco = BCO_NEXT;
972 Sp[-2] = (W_)&stg_ctoi_R1p_info;
973 Sp[-1] = BCO_PTR(o_bco);
978 case bci_PUSH_ALTS_P: {
979 int o_bco = BCO_NEXT;
980 Sp[-2] = (W_)&stg_ctoi_R1unpt_info;
981 Sp[-1] = BCO_PTR(o_bco);
986 case bci_PUSH_ALTS_N: {
987 int o_bco = BCO_NEXT;
988 Sp[-2] = (W_)&stg_ctoi_R1n_info;
989 Sp[-1] = BCO_PTR(o_bco);
994 case bci_PUSH_ALTS_F: {
995 int o_bco = BCO_NEXT;
996 Sp[-2] = (W_)&stg_ctoi_F1_info;
997 Sp[-1] = BCO_PTR(o_bco);
1002 case bci_PUSH_ALTS_D: {
1003 int o_bco = BCO_NEXT;
1004 Sp[-2] = (W_)&stg_ctoi_D1_info;
1005 Sp[-1] = BCO_PTR(o_bco);
1010 case bci_PUSH_ALTS_L: {
1011 int o_bco = BCO_NEXT;
1012 Sp[-2] = (W_)&stg_ctoi_L1_info;
1013 Sp[-1] = BCO_PTR(o_bco);
1018 case bci_PUSH_ALTS_V: {
1019 int o_bco = BCO_NEXT;
1020 Sp[-2] = (W_)&stg_ctoi_V_info;
1021 Sp[-1] = BCO_PTR(o_bco);
1026 case bci_PUSH_APPLY_N:
1027 Sp--; Sp[0] = (W_)&stg_ap_n_info;
1029 case bci_PUSH_APPLY_V:
1030 Sp--; Sp[0] = (W_)&stg_ap_v_info;
1032 case bci_PUSH_APPLY_F:
1033 Sp--; Sp[0] = (W_)&stg_ap_f_info;
1035 case bci_PUSH_APPLY_D:
1036 Sp--; Sp[0] = (W_)&stg_ap_d_info;
1038 case bci_PUSH_APPLY_L:
1039 Sp--; Sp[0] = (W_)&stg_ap_l_info;
1041 case bci_PUSH_APPLY_P:
1042 Sp--; Sp[0] = (W_)&stg_ap_p_info;
1044 case bci_PUSH_APPLY_PP:
1045 Sp--; Sp[0] = (W_)&stg_ap_pp_info;
1047 case bci_PUSH_APPLY_PPP:
1048 Sp--; Sp[0] = (W_)&stg_ap_ppp_info;
1050 case bci_PUSH_APPLY_PPPP:
1051 Sp--; Sp[0] = (W_)&stg_ap_pppp_info;
1053 case bci_PUSH_APPLY_PPPPP:
1054 Sp--; Sp[0] = (W_)&stg_ap_ppppp_info;
1056 case bci_PUSH_APPLY_PPPPPP:
1057 Sp--; Sp[0] = (W_)&stg_ap_pppppp_info;
1060 case bci_PUSH_UBX: {
1062 int o_lits = BCO_NEXT;
1063 int n_words = BCO_NEXT;
1065 for (i = 0; i < n_words; i++) {
1066 Sp[i] = (W_)BCO_LIT(o_lits+i);
1074 /* a_1, .. a_n, b_1, .. b_by, s => a_1, .. a_n, s */
1079 INTERP_TICK(it_slides);
1083 case bci_ALLOC_AP: {
1085 int n_payload = BCO_NEXT;
1086 ap = (StgAP*)allocate(cap, AP_sizeW(n_payload));
1088 ap->n_args = n_payload;
1089 SET_HDR(ap, &stg_AP_info, CCS_SYSTEM/*ToDo*/)
1094 case bci_ALLOC_AP_NOUPD: {
1096 int n_payload = BCO_NEXT;
1097 ap = (StgAP*)allocate(cap, AP_sizeW(n_payload));
1099 ap->n_args = n_payload;
1100 SET_HDR(ap, &stg_AP_NOUPD_info, CCS_SYSTEM/*ToDo*/)
1105 case bci_ALLOC_PAP: {
1107 int arity = BCO_NEXT;
1108 int n_payload = BCO_NEXT;
1109 pap = (StgPAP*)allocate(cap, PAP_sizeW(n_payload));
1111 pap->n_args = n_payload;
1113 SET_HDR(pap, &stg_PAP_info, CCS_SYSTEM/*ToDo*/)
1120 int stkoff = BCO_NEXT;
1121 int n_payload = BCO_NEXT;
1122 StgAP* ap = (StgAP*)Sp[stkoff];
1123 ASSERT((int)ap->n_args == n_payload);
1124 ap->fun = (StgClosure*)Sp[0];
1126 // The function should be a BCO, and its bitmap should
1127 // cover the payload of the AP correctly.
1128 ASSERT(get_itbl(ap->fun)->type == BCO
1129 && BCO_BITMAP_SIZE(ap->fun) == ap->n_args);
1131 for (i = 0; i < n_payload; i++)
1132 ap->payload[i] = (StgClosure*)Sp[i+1];
1134 IF_DEBUG(interpreter,
1135 debugBelch("\tBuilt ");
1136 printObj((StgClosure*)ap);
1143 int stkoff = BCO_NEXT;
1144 int n_payload = BCO_NEXT;
1145 StgPAP* pap = (StgPAP*)Sp[stkoff];
1146 ASSERT((int)pap->n_args == n_payload);
1147 pap->fun = (StgClosure*)Sp[0];
1149 // The function should be a BCO
1150 ASSERT(get_itbl(pap->fun)->type == BCO);
1152 for (i = 0; i < n_payload; i++)
1153 pap->payload[i] = (StgClosure*)Sp[i+1];
1155 IF_DEBUG(interpreter,
1156 debugBelch("\tBuilt ");
1157 printObj((StgClosure*)pap);
1163 /* Unpack N ptr words from t.o.s constructor */
1165 int n_words = BCO_NEXT;
1166 StgClosure* con = (StgClosure*)Sp[0];
1168 for (i = 0; i < n_words; i++) {
1169 Sp[i] = (W_)con->payload[i];
1176 int o_itbl = BCO_NEXT;
1177 int n_words = BCO_NEXT;
1178 StgInfoTable* itbl = INFO_PTR_TO_STRUCT(BCO_LIT(o_itbl));
1179 int request = CONSTR_sizeW( itbl->layout.payload.ptrs,
1180 itbl->layout.payload.nptrs );
1181 StgClosure* con = (StgClosure*)allocate_NONUPD(cap,request);
1182 ASSERT( itbl->layout.payload.ptrs + itbl->layout.payload.nptrs > 0);
1183 SET_HDR(con, (StgInfoTable*)BCO_LIT(o_itbl), CCS_SYSTEM/*ToDo*/);
1184 for (i = 0; i < n_words; i++) {
1185 con->payload[i] = (StgClosure*)Sp[i];
1190 IF_DEBUG(interpreter,
1191 debugBelch("\tBuilt ");
1192 printObj((StgClosure*)con);
1197 case bci_TESTLT_P: {
1198 unsigned int discr = BCO_NEXT;
1199 int failto = BCO_GET_LARGE_ARG;
1200 StgClosure* con = (StgClosure*)Sp[0];
1201 if (GET_TAG(con) >= discr) {
1207 case bci_TESTEQ_P: {
1208 unsigned int discr = BCO_NEXT;
1209 int failto = BCO_GET_LARGE_ARG;
1210 StgClosure* con = (StgClosure*)Sp[0];
1211 if (GET_TAG(con) != discr) {
1217 case bci_TESTLT_I: {
1218 // There should be an Int at Sp[1], and an info table at Sp[0].
1219 int discr = BCO_NEXT;
1220 int failto = BCO_GET_LARGE_ARG;
1221 I_ stackInt = (I_)Sp[1];
1222 if (stackInt >= (I_)BCO_LIT(discr))
1227 case bci_TESTEQ_I: {
1228 // There should be an Int at Sp[1], and an info table at Sp[0].
1229 int discr = BCO_NEXT;
1230 int failto = BCO_GET_LARGE_ARG;
1231 I_ stackInt = (I_)Sp[1];
1232 if (stackInt != (I_)BCO_LIT(discr)) {
1238 case bci_TESTLT_W: {
1239 // There should be an Int at Sp[1], and an info table at Sp[0].
1240 int discr = BCO_NEXT;
1241 int failto = BCO_GET_LARGE_ARG;
1242 W_ stackWord = (W_)Sp[1];
1243 if (stackWord >= (W_)BCO_LIT(discr))
1248 case bci_TESTEQ_W: {
1249 // There should be an Int at Sp[1], and an info table at Sp[0].
1250 int discr = BCO_NEXT;
1251 int failto = BCO_GET_LARGE_ARG;
1252 W_ stackWord = (W_)Sp[1];
1253 if (stackWord != (W_)BCO_LIT(discr)) {
1259 case bci_TESTLT_D: {
1260 // There should be a Double at Sp[1], and an info table at Sp[0].
1261 int discr = BCO_NEXT;
1262 int failto = BCO_GET_LARGE_ARG;
1263 StgDouble stackDbl, discrDbl;
1264 stackDbl = PK_DBL( & Sp[1] );
1265 discrDbl = PK_DBL( & BCO_LIT(discr) );
1266 if (stackDbl >= discrDbl) {
1272 case bci_TESTEQ_D: {
1273 // There should be a Double at Sp[1], and an info table at Sp[0].
1274 int discr = BCO_NEXT;
1275 int failto = BCO_GET_LARGE_ARG;
1276 StgDouble stackDbl, discrDbl;
1277 stackDbl = PK_DBL( & Sp[1] );
1278 discrDbl = PK_DBL( & BCO_LIT(discr) );
1279 if (stackDbl != discrDbl) {
1285 case bci_TESTLT_F: {
1286 // There should be a Float at Sp[1], and an info table at Sp[0].
1287 int discr = BCO_NEXT;
1288 int failto = BCO_GET_LARGE_ARG;
1289 StgFloat stackFlt, discrFlt;
1290 stackFlt = PK_FLT( & Sp[1] );
1291 discrFlt = PK_FLT( & BCO_LIT(discr) );
1292 if (stackFlt >= discrFlt) {
1298 case bci_TESTEQ_F: {
1299 // There should be a Float at Sp[1], and an info table at Sp[0].
1300 int discr = BCO_NEXT;
1301 int failto = BCO_GET_LARGE_ARG;
1302 StgFloat stackFlt, discrFlt;
1303 stackFlt = PK_FLT( & Sp[1] );
1304 discrFlt = PK_FLT( & BCO_LIT(discr) );
1305 if (stackFlt != discrFlt) {
1311 // Control-flow ish things
1313 // Context-switch check. We put it here to ensure that
1314 // the interpreter has done at least *some* work before
1315 // context switching: sometimes the scheduler can invoke
1316 // the interpreter with context_switch == 1, particularly
1317 // if the -C0 flag has been given on the cmd line.
1318 if (cap->r.rHpLim == NULL) {
1319 Sp--; Sp[0] = (W_)&stg_enter_info;
1320 RETURN_TO_SCHEDULER(ThreadInterpret, ThreadYielding);
1325 tagged_obj = (StgClosure *)Sp[0];
1331 Sp[0] = (W_)&stg_gc_unpt_r1_info;
1332 goto do_return_unboxed;
1335 Sp[0] = (W_)&stg_gc_unbx_r1_info;
1336 goto do_return_unboxed;
1339 Sp[0] = (W_)&stg_gc_f1_info;
1340 goto do_return_unboxed;
1343 Sp[0] = (W_)&stg_gc_d1_info;
1344 goto do_return_unboxed;
1347 Sp[0] = (W_)&stg_gc_l1_info;
1348 goto do_return_unboxed;
1351 Sp[0] = (W_)&stg_gc_void_info;
1352 goto do_return_unboxed;
1355 int stkoff = BCO_NEXT;
1356 signed short n = (signed short)(BCO_NEXT);
1357 Sp[stkoff] += (W_)n;
1363 int stk_offset = BCO_NEXT;
1364 int o_itbl = BCO_NEXT;
1365 void(*marshall_fn)(void*) = (void (*)(void*))BCO_LIT(o_itbl);
1367 RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE
1368 + sizeofW(StgRetDyn);
1370 /* the stack looks like this:
1372 | | <- Sp + stk_offset
1376 | | <- Sp + ret_size + 1
1378 | C fun | <- Sp + ret_size
1383 ret is a placeholder for the return address, and may be
1386 We need to copy the args out of the TSO, because when
1387 we call suspendThread() we no longer own the TSO stack,
1388 and it may move at any time - indeed suspendThread()
1389 itself may do stack squeezing and move our args.
1390 So we make a copy of the argument block.
1393 #define ROUND_UP_WDS(p) ((((StgWord)(p)) + sizeof(W_)-1)/sizeof(W_))
1395 ffi_cif *cif = (ffi_cif *)marshall_fn;
1396 nat nargs = cif->nargs;
1400 W_ ret[2]; // max needed
1401 W_ *arguments[stk_offset]; // max needed
1402 void *argptrs[nargs];
1405 if (cif->rtype->type == FFI_TYPE_VOID) {
1406 // necessary because cif->rtype->size == 1 for void,
1407 // but the bytecode generator has not pushed a
1408 // placeholder in this case.
1411 ret_size = ROUND_UP_WDS(cif->rtype->size);
1414 memcpy(arguments, Sp+ret_size+1,
1415 sizeof(W_) * (stk_offset-1-ret_size));
1417 // libffi expects the args as an array of pointers to
1418 // values, so we have to construct this array before making
1420 p = (StgPtr)arguments;
1421 for (i = 0; i < nargs; i++) {
1422 argptrs[i] = (void *)p;
1423 // get the size from the cif
1424 p += ROUND_UP_WDS(cif->arg_types[i]->size);
1427 // this is the function we're going to call
1428 fn = (void(*)(void))Sp[ret_size];
1430 // Restore the Haskell thread's current value of errno
1431 errno = cap->r.rCurrentTSO->saved_errno;
1433 // There are a bunch of non-ptr words on the stack (the
1434 // ccall args, the ccall fun address and space for the
1435 // result), which we need to cover with an info table
1436 // since we might GC during this call.
1438 // We know how many (non-ptr) words there are before the
1439 // next valid stack frame: it is the stk_offset arg to the
1440 // CCALL instruction. So we build a RET_DYN stack frame
1441 // on the stack frame to describe this chunk of stack.
1444 ((StgRetDyn *)Sp)->liveness = R1_PTR | N_NONPTRS(stk_offset);
1445 ((StgRetDyn *)Sp)->info = (StgInfoTable *)&stg_gc_gen_info;
1447 // save obj (pointer to the current BCO), since this
1448 // might move during the call. We use the R1 slot in the
1449 // RET_DYN frame for this, hence R1_PTR above.
1450 ((StgRetDyn *)Sp)->payload[0] = (StgClosure *)obj;
1452 SAVE_STACK_POINTERS;
1453 tok = suspendThread(&cap->r);
1455 // We already made a copy of the arguments above.
1456 ffi_call(cif, fn, ret, argptrs);
1458 // And restart the thread again, popping the RET_DYN frame.
1459 cap = (Capability *)((void *)((unsigned char*)resumeThread(tok) - STG_FIELD_OFFSET(Capability,r)));
1460 LOAD_STACK_POINTERS;
1462 // Re-load the pointer to the BCO from the RET_DYN frame,
1463 // it might have moved during the call. Also reload the
1464 // pointers to the components of the BCO.
1465 obj = ((StgRetDyn *)Sp)->payload[0];
1467 instrs = (StgWord16*)(bco->instrs->payload);
1468 literals = (StgWord*)(&bco->literals->payload[0]);
1469 ptrs = (StgPtr*)(&bco->ptrs->payload[0]);
1473 // Save the Haskell thread's current value of errno
1474 cap->r.rCurrentTSO->saved_errno = errno;
1476 // Copy the return value back to the TSO stack. It is at
1477 // most 2 words large, and resides at arguments[0].
1478 memcpy(Sp, ret, sizeof(W_) * stg_min(stk_offset,ret_size));
1484 /* BCO_NEXT modifies bciPtr, so be conservative. */
1485 int nextpc = BCO_GET_LARGE_ARG;
1491 barf("interpretBCO: hit a CASEFAIL");
1495 barf("interpretBCO: unknown or unimplemented opcode %d",
1498 } /* switch on opcode */
1502 barf("interpretBCO: fell off end of the interpreter");