1 /* -----------------------------------------------------------------------------
4 * Copyright (c) The GHC Team, 1994-2002.
5 * ---------------------------------------------------------------------------*/
7 #include "PosixSource.h"
10 #include "rts/Bytecodes.h"
13 #include "sm/Storage.h"
21 #include "Disassembler.h"
22 #include "Interpreter.h"
23 #include "ThreadPaused.h"
25 #include <string.h> /* for memcpy */
30 // When building the RTS in the non-dyn way on Windows, we don't
31 // want declspec(__dllimport__) on the front of function prototypes
33 #if defined(mingw32_HOST_OS) && !defined(__PIC__)
34 # define LIBFFI_NOT_DLL
39 /* --------------------------------------------------------------------------
40 * The bytecode interpreter
41 * ------------------------------------------------------------------------*/
43 /* Gather stats about entry, opcode, opcode-pair frequencies. For
44 tuning the interpreter. */
46 /* #define INTERP_STATS */
49 /* Sp points to the lowest live word on the stack. */
51 #define BCO_NEXT instrs[bciPtr++]
52 #define BCO_NEXT_32 (bciPtr += 2, (((StgWord) instrs[bciPtr-2]) << 16) + ((StgWord) instrs[bciPtr-1]))
53 #define BCO_NEXT_64 (bciPtr += 4, (((StgWord) instrs[bciPtr-4]) << 48) + (((StgWord) instrs[bciPtr-3]) << 32) + (((StgWord) instrs[bciPtr-2]) << 16) + ((StgWord) instrs[bciPtr-1]))
54 #if WORD_SIZE_IN_BITS == 32
55 #define BCO_NEXT_WORD BCO_NEXT_32
56 #elif WORD_SIZE_IN_BITS == 64
57 #define BCO_NEXT_WORD BCO_NEXT_64
59 #error Cannot cope with WORD_SIZE_IN_BITS being nether 32 nor 64
61 #define BCO_GET_LARGE_ARG ((bci & bci_FLAG_LARGE_ARGS) ? BCO_NEXT_WORD : BCO_NEXT)
63 #define BCO_PTR(n) (W_)ptrs[n]
64 #define BCO_LIT(n) literals[n]
66 #define LOAD_STACK_POINTERS \
67 Sp = cap->r.rCurrentTSO->sp; \
68 /* We don't change this ... */ \
69 SpLim = cap->r.rCurrentTSO->stack + RESERVED_STACK_WORDS;
71 #define SAVE_STACK_POINTERS \
73 cap->r.rCurrentTSO->sp = Sp
75 #define RETURN_TO_SCHEDULER(todo,retcode) \
76 SAVE_STACK_POINTERS; \
77 cap->r.rCurrentTSO->what_next = (todo); \
78 threadPaused(cap,cap->r.rCurrentTSO); \
79 cap->r.rRet = (retcode); \
82 #define RETURN_TO_SCHEDULER_NO_PAUSE(todo,retcode) \
83 SAVE_STACK_POINTERS; \
84 cap->r.rCurrentTSO->what_next = (todo); \
85 cap->r.rRet = (retcode); \
90 allocate_NONUPD (Capability *cap, int n_words)
92 return allocateLocal(cap, stg_max(sizeofW(StgHeader)+MIN_PAYLOAD_SIZE, n_words));
95 int rts_stop_next_breakpoint = 0;
96 int rts_stop_on_exception = 0;
100 /* Hacky stats, for tuning the interpreter ... */
101 int it_unknown_entries[N_CLOSURE_TYPES];
102 int it_total_unknown_entries;
103 int it_total_entries;
114 int it_oofreq[27][27];
118 #define INTERP_TICK(n) (n)++
120 void interp_startup ( void )
123 it_retto_BCO = it_retto_UPDATE = it_retto_other = 0;
124 it_total_entries = it_total_unknown_entries = 0;
125 for (i = 0; i < N_CLOSURE_TYPES; i++)
126 it_unknown_entries[i] = 0;
127 it_slides = it_insns = it_BCO_entries = 0;
128 for (i = 0; i < 27; i++) it_ofreq[i] = 0;
129 for (i = 0; i < 27; i++)
130 for (j = 0; j < 27; j++)
135 void interp_shutdown ( void )
137 int i, j, k, o_max, i_max, j_max;
138 debugBelch("%d constrs entered -> (%d BCO, %d UPD, %d ??? )\n",
139 it_retto_BCO + it_retto_UPDATE + it_retto_other,
140 it_retto_BCO, it_retto_UPDATE, it_retto_other );
141 debugBelch("%d total entries, %d unknown entries \n",
142 it_total_entries, it_total_unknown_entries);
143 for (i = 0; i < N_CLOSURE_TYPES; i++) {
144 if (it_unknown_entries[i] == 0) continue;
145 debugBelch(" type %2d: unknown entries (%4.1f%%) == %d\n",
146 i, 100.0 * ((double)it_unknown_entries[i]) /
147 ((double)it_total_unknown_entries),
148 it_unknown_entries[i]);
150 debugBelch("%d insns, %d slides, %d BCO_entries\n",
151 it_insns, it_slides, it_BCO_entries);
152 for (i = 0; i < 27; i++)
153 debugBelch("opcode %2d got %d\n", i, it_ofreq[i] );
155 for (k = 1; k < 20; k++) {
158 for (i = 0; i < 27; i++) {
159 for (j = 0; j < 27; j++) {
160 if (it_oofreq[i][j] > o_max) {
161 o_max = it_oofreq[i][j];
162 i_max = i; j_max = j;
167 debugBelch("%d: count (%4.1f%%) %6d is %d then %d\n",
168 k, ((double)o_max) * 100.0 / ((double)it_insns), o_max,
170 it_oofreq[i_max][j_max] = 0;
175 #else // !INTERP_STATS
177 #define INTERP_TICK(n) /* nothing */
181 static StgWord app_ptrs_itbl[] = {
184 (W_)&stg_ap_ppp_info,
185 (W_)&stg_ap_pppp_info,
186 (W_)&stg_ap_ppppp_info,
187 (W_)&stg_ap_pppppp_info,
190 HsStablePtr rts_breakpoint_io_action; // points to the IO action which is executed on a breakpoint
191 // it is set in main/GHC.hs:runStmt
194 interpretBCO (Capability* cap)
196 // Use of register here is primarily to make it clear to compilers
197 // that these entities are non-aliasable.
198 register StgPtr Sp; // local state -- stack pointer
199 register StgPtr SpLim; // local state -- stack lim pointer
200 register StgClosure *tagged_obj = 0, *obj;
205 cap->r.rHpLim = (P_)1; // HpLim is the context-switch flag; when it
206 // goes to zero we must return to the scheduler.
208 // ------------------------------------------------------------------------
211 // We have a closure to evaluate. Stack looks like:
215 // Sp | -------------------> closure
218 if (Sp[0] == (W_)&stg_enter_info) {
223 // ------------------------------------------------------------------------
226 // We have a BCO application to perform. Stack looks like:
237 else if (Sp[0] == (W_)&stg_apply_interp_info) {
238 obj = UNTAG_CLOSURE((StgClosure *)Sp[1]);
243 // ------------------------------------------------------------------------
246 // We have an unboxed value to return. See comment before
247 // do_return_unboxed, below.
250 goto do_return_unboxed;
253 // Evaluate the object on top of the stack.
255 tagged_obj = (StgClosure*)Sp[0]; Sp++;
258 obj = UNTAG_CLOSURE(tagged_obj);
259 INTERP_TICK(it_total_evals);
261 IF_DEBUG(interpreter,
263 "\n---------------------------------------------------------------\n");
264 debugBelch("Evaluating: "); printObj(obj);
265 debugBelch("Sp = %p\n", Sp);
268 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
272 IF_DEBUG(sanity,checkStackChunk(Sp, cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size));
274 switch ( get_itbl(obj)->type ) {
279 case IND_OLDGEN_PERM:
282 tagged_obj = ((StgInd*)obj)->indirectee;
293 case CONSTR_NOCAF_STATIC:
307 ASSERT(((StgBCO *)obj)->arity > 0);
311 case AP: /* Copied from stg_AP_entry. */
320 if (Sp - (words+sizeofW(StgUpdateFrame)) < SpLim) {
322 Sp[1] = (W_)tagged_obj;
323 Sp[0] = (W_)&stg_enter_info;
324 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
327 /* Ok; we're safe. Party on. Push an update frame. */
328 Sp -= sizeofW(StgUpdateFrame);
330 StgUpdateFrame *__frame;
331 __frame = (StgUpdateFrame *)Sp;
332 SET_INFO(__frame, (StgInfoTable *)&stg_upd_frame_info);
333 __frame->updatee = (StgClosure *)(ap);
336 /* Reload the stack */
338 for (i=0; i < words; i++) {
339 Sp[i] = (W_)ap->payload[i];
342 obj = UNTAG_CLOSURE((StgClosure*)ap->fun);
343 ASSERT(get_itbl(obj)->type == BCO);
352 j = get_itbl(obj)->type;
353 ASSERT(j >= 0 && j < N_CLOSURE_TYPES);
354 it_unknown_entries[j]++;
355 it_total_unknown_entries++;
359 // Can't handle this object; yield to scheduler
360 IF_DEBUG(interpreter,
361 debugBelch("evaluating unknown closure -- yielding to sched\n");
365 Sp[1] = (W_)tagged_obj;
366 Sp[0] = (W_)&stg_enter_info;
367 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
371 // ------------------------------------------------------------------------
372 // We now have an evaluated object (tagged_obj). The next thing to
373 // do is return it to the stack frame on top of the stack.
375 obj = UNTAG_CLOSURE(tagged_obj);
376 ASSERT(closure_HNF(obj));
378 IF_DEBUG(interpreter,
380 "\n---------------------------------------------------------------\n");
381 debugBelch("Returning: "); printObj(obj);
382 debugBelch("Sp = %p\n", Sp);
384 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
388 IF_DEBUG(sanity,checkStackChunk(Sp, cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size));
390 switch (get_itbl((StgClosure *)Sp)->type) {
393 const StgInfoTable *info;
395 // NOTE: not using get_itbl().
396 info = ((StgClosure *)Sp)->header.info;
397 if (info == (StgInfoTable *)&stg_ap_v_info) {
398 n = 1; m = 0; goto do_apply;
400 if (info == (StgInfoTable *)&stg_ap_f_info) {
401 n = 1; m = 1; goto do_apply;
403 if (info == (StgInfoTable *)&stg_ap_d_info) {
404 n = 1; m = sizeofW(StgDouble); goto do_apply;
406 if (info == (StgInfoTable *)&stg_ap_l_info) {
407 n = 1; m = sizeofW(StgInt64); goto do_apply;
409 if (info == (StgInfoTable *)&stg_ap_n_info) {
410 n = 1; m = 1; goto do_apply;
412 if (info == (StgInfoTable *)&stg_ap_p_info) {
413 n = 1; m = 1; goto do_apply;
415 if (info == (StgInfoTable *)&stg_ap_pp_info) {
416 n = 2; m = 2; goto do_apply;
418 if (info == (StgInfoTable *)&stg_ap_ppp_info) {
419 n = 3; m = 3; goto do_apply;
421 if (info == (StgInfoTable *)&stg_ap_pppp_info) {
422 n = 4; m = 4; goto do_apply;
424 if (info == (StgInfoTable *)&stg_ap_ppppp_info) {
425 n = 5; m = 5; goto do_apply;
427 if (info == (StgInfoTable *)&stg_ap_pppppp_info) {
428 n = 6; m = 6; goto do_apply;
430 goto do_return_unrecognised;
434 // Returning to an update frame: do the update, pop the update
435 // frame, and continue with the next stack frame.
437 // NB. we must update with the *tagged* pointer. Some tags
438 // are not optional, and if we omit the tag bits when updating
439 // then bad things can happen (albeit very rarely). See #1925.
440 // What happened was an indirection was created with an
441 // untagged pointer, and this untagged pointer was propagated
442 // to a PAP by the GC, violating the invariant that PAPs
443 // always contain a tagged pointer to the function.
444 INTERP_TICK(it_retto_UPDATE);
445 UPD_IND(((StgUpdateFrame *)Sp)->updatee, tagged_obj);
446 Sp += sizeofW(StgUpdateFrame);
450 // Returning to an interpreted continuation: put the object on
451 // the stack, and start executing the BCO.
452 INTERP_TICK(it_retto_BCO);
455 // NB. return the untagged object; the bytecode expects it to
456 // be untagged. XXX this doesn't seem right.
457 obj = (StgClosure*)Sp[2];
458 ASSERT(get_itbl(obj)->type == BCO);
462 do_return_unrecognised:
464 // Can't handle this return address; yield to scheduler
465 INTERP_TICK(it_retto_other);
466 IF_DEBUG(interpreter,
467 debugBelch("returning to unknown frame -- yielding to sched\n");
468 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
471 Sp[1] = (W_)tagged_obj;
472 Sp[0] = (W_)&stg_enter_info;
473 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
477 // -------------------------------------------------------------------------
478 // Returning an unboxed value. The stack looks like this:
495 // where XXXX_info is one of the stg_gc_unbx_r1_info family.
497 // We're only interested in the case when the real return address
498 // is a BCO; otherwise we'll return to the scheduler.
504 ASSERT( Sp[0] == (W_)&stg_gc_unbx_r1_info
505 || Sp[0] == (W_)&stg_gc_unpt_r1_info
506 || Sp[0] == (W_)&stg_gc_f1_info
507 || Sp[0] == (W_)&stg_gc_d1_info
508 || Sp[0] == (W_)&stg_gc_l1_info
509 || Sp[0] == (W_)&stg_gc_void_info // VoidRep
512 // get the offset of the stg_ctoi_ret_XXX itbl
513 offset = stack_frame_sizeW((StgClosure *)Sp);
515 switch (get_itbl((StgClosure *)Sp+offset)->type) {
518 // Returning to an interpreted continuation: put the object on
519 // the stack, and start executing the BCO.
520 INTERP_TICK(it_retto_BCO);
521 obj = (StgClosure*)Sp[offset+1];
522 ASSERT(get_itbl(obj)->type == BCO);
523 goto run_BCO_return_unboxed;
527 // Can't handle this return address; yield to scheduler
528 INTERP_TICK(it_retto_other);
529 IF_DEBUG(interpreter,
530 debugBelch("returning to unknown frame -- yielding to sched\n");
531 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
533 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
540 // -------------------------------------------------------------------------
544 ASSERT(obj == UNTAG_CLOSURE(tagged_obj));
545 // we have a function to apply (obj), and n arguments taking up m
546 // words on the stack. The info table (stg_ap_pp_info or whatever)
547 // is on top of the arguments on the stack.
549 switch (get_itbl(obj)->type) {
557 // we only cope with PAPs whose function is a BCO
558 if (get_itbl(UNTAG_CLOSURE(pap->fun))->type != BCO) {
559 goto defer_apply_to_sched;
562 // Stack check: we're about to unpack the PAP onto the
563 // stack. The (+1) is for the (arity < n) case, where we
564 // also need space for an extra info pointer.
565 if (Sp - (pap->n_args + 1) < SpLim) {
567 Sp[1] = (W_)tagged_obj;
568 Sp[0] = (W_)&stg_enter_info;
569 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
576 // n must be greater than 1, and the only kinds of
577 // application we support with more than one argument
578 // are all pointers...
580 // Shuffle the args for this function down, and put
581 // the appropriate info table in the gap.
582 for (i = 0; i < arity; i++) {
583 Sp[(int)i-1] = Sp[i];
584 // ^^^^^ careful, i-1 might be negative, but i in unsigned
586 Sp[arity-1] = app_ptrs_itbl[n-arity-1];
588 // unpack the PAP's arguments onto the stack
590 for (i = 0; i < pap->n_args; i++) {
591 Sp[i] = (W_)pap->payload[i];
593 obj = UNTAG_CLOSURE(pap->fun);
596 else if (arity == n) {
598 for (i = 0; i < pap->n_args; i++) {
599 Sp[i] = (W_)pap->payload[i];
601 obj = UNTAG_CLOSURE(pap->fun);
604 else /* arity > n */ {
605 // build a new PAP and return it.
607 new_pap = (StgPAP *)allocateLocal(cap, PAP_sizeW(pap->n_args + m));
608 SET_HDR(new_pap,&stg_PAP_info,CCCS);
609 new_pap->arity = pap->arity - n;
610 new_pap->n_args = pap->n_args + m;
611 new_pap->fun = pap->fun;
612 for (i = 0; i < pap->n_args; i++) {
613 new_pap->payload[i] = pap->payload[i];
615 for (i = 0; i < m; i++) {
616 new_pap->payload[pap->n_args + i] = (StgClosure *)Sp[i];
618 tagged_obj = (StgClosure *)new_pap;
628 arity = ((StgBCO *)obj)->arity;
631 // n must be greater than 1, and the only kinds of
632 // application we support with more than one argument
633 // are all pointers...
635 // Shuffle the args for this function down, and put
636 // the appropriate info table in the gap.
637 for (i = 0; i < arity; i++) {
638 Sp[(int)i-1] = Sp[i];
639 // ^^^^^ careful, i-1 might be negative, but i in unsigned
641 Sp[arity-1] = app_ptrs_itbl[n-arity-1];
645 else if (arity == n) {
648 else /* arity > n */ {
649 // build a PAP and return it.
652 pap = (StgPAP *)allocateLocal(cap, PAP_sizeW(m));
653 SET_HDR(pap, &stg_PAP_info,CCCS);
654 pap->arity = arity - n;
657 for (i = 0; i < m; i++) {
658 pap->payload[i] = (StgClosure *)Sp[i];
660 tagged_obj = (StgClosure *)pap;
666 // No point in us applying machine-code functions
668 defer_apply_to_sched:
670 Sp[1] = (W_)tagged_obj;
671 Sp[0] = (W_)&stg_enter_info;
672 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
675 // ------------------------------------------------------------------------
676 // Ok, we now have a bco (obj), and its arguments are all on the
677 // stack. We can start executing the byte codes.
679 // The stack is in one of two states. First, if this BCO is a
689 // Second, if this BCO is a continuation:
704 // where retval is the value being returned to this continuation.
705 // In the event of a stack check, heap check, or context switch,
706 // we need to leave the stack in a sane state so the garbage
707 // collector can find all the pointers.
709 // (1) BCO is a function: the BCO's bitmap describes the
710 // pointerhood of the arguments.
712 // (2) BCO is a continuation: BCO's bitmap describes the
713 // pointerhood of the free variables.
715 // Sadly we have three different kinds of stack/heap/cswitch check
721 if (doYouWantToGC()) {
722 Sp--; Sp[0] = (W_)&stg_enter_info;
723 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
725 // Stack checks aren't necessary at return points, the stack use
726 // is aggregated into the enclosing function entry point.
730 run_BCO_return_unboxed:
732 if (doYouWantToGC()) {
733 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
735 // Stack checks aren't necessary at return points, the stack use
736 // is aggregated into the enclosing function entry point.
744 Sp[0] = (W_)&stg_apply_interp_info;
745 checkStackChunk(Sp,SpLim);
750 if (doYouWantToGC()) {
753 Sp[0] = (W_)&stg_apply_interp_info; // placeholder, really
754 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
758 if (Sp - INTERP_STACK_CHECK_THRESH < SpLim) {
761 Sp[0] = (W_)&stg_apply_interp_info; // placeholder, really
762 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
767 // Now, actually interpret the BCO... (no returning to the
768 // scheduler again until the stack is in an orderly state).
770 INTERP_TICK(it_BCO_entries);
772 register int bciPtr = 0; /* instruction pointer */
773 register StgWord16 bci;
774 register StgBCO* bco = (StgBCO*)obj;
775 register StgWord16* instrs = (StgWord16*)(bco->instrs->payload);
776 register StgWord* literals = (StgWord*)(&bco->literals->payload[0]);
777 register StgPtr* ptrs = (StgPtr*)(&bco->ptrs->payload[0]);
779 bcoSize = BCO_NEXT_WORD;
780 IF_DEBUG(interpreter,debugBelch("bcoSize = %d\n", bcoSize));
783 it_lastopc = 0; /* no opcode */
787 ASSERT(bciPtr < bcoSize);
788 IF_DEBUG(interpreter,
789 //if (do_print_stack) {
790 //debugBelch("\n-- BEGIN stack\n");
791 //printStack(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size,iSu);
792 //debugBelch("-- END stack\n\n");
794 debugBelch("Sp = %p pc = %d ", Sp, bciPtr);
795 disInstr(bco,bciPtr);
798 for (i = 8; i >= 0; i--) {
799 debugBelch("%d %p\n", i, (StgPtr)(*(Sp+i)));
803 //if (do_print_stack) checkStack(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size,iSu);
807 INTERP_TICK(it_insns);
810 ASSERT( (int)instrs[bciPtr] >= 0 && (int)instrs[bciPtr] < 27 );
811 it_ofreq[ (int)instrs[bciPtr] ] ++;
812 it_oofreq[ it_lastopc ][ (int)instrs[bciPtr] ] ++;
813 it_lastopc = (int)instrs[bciPtr];
817 /* We use the high 8 bits for flags, only the highest of which is
818 * currently allocated */
819 ASSERT((bci & 0xFF00) == (bci & 0x8000));
821 switch (bci & 0xFF) {
823 /* check for a breakpoint on the beginning of a let binding */
826 int arg1_brk_array, arg2_array_index, arg3_freeVars;
827 StgArrWords *breakPoints;
828 int returning_from_break; // are we resuming execution from a breakpoint?
829 // if yes, then don't break this time around
830 StgClosure *ioAction; // the io action to run at a breakpoint
832 StgAP_STACK *new_aps; // a closure to save the top stack frame on the heap
836 arg1_brk_array = BCO_NEXT; // 1st arg of break instruction
837 arg2_array_index = BCO_NEXT; // 2nd arg of break instruction
838 arg3_freeVars = BCO_NEXT; // 3rd arg of break instruction
840 // check if we are returning from a breakpoint - this info
841 // is stored in the flags field of the current TSO
842 returning_from_break = cap->r.rCurrentTSO->flags & TSO_STOPPED_ON_BREAKPOINT;
844 // if we are returning from a break then skip this section
845 // and continue executing
846 if (!returning_from_break)
848 breakPoints = (StgArrWords *) BCO_PTR(arg1_brk_array);
850 // stop the current thread if either the
851 // "rts_stop_next_breakpoint" flag is true OR if the
852 // breakpoint flag for this particular expression is
854 if (rts_stop_next_breakpoint == rtsTrue ||
855 breakPoints->payload[arg2_array_index] == rtsTrue)
857 // make sure we don't automatically stop at the
859 rts_stop_next_breakpoint = rtsFalse;
861 // allocate memory for a new AP_STACK, enough to
862 // store the top stack frame plus an
863 // stg_apply_interp_info pointer and a pointer to
865 size_words = BCO_BITMAP_SIZE(obj) + 2;
866 new_aps = (StgAP_STACK *) allocateLocal(cap, AP_STACK_sizeW(size_words));
867 SET_HDR(new_aps,&stg_AP_STACK_info,CCS_SYSTEM);
868 new_aps->size = size_words;
869 new_aps->fun = &stg_dummy_ret_closure;
871 // fill in the payload of the AP_STACK
872 new_aps->payload[0] = (StgClosure *)&stg_apply_interp_info;
873 new_aps->payload[1] = (StgClosure *)obj;
875 // copy the contents of the top stack frame into the AP_STACK
876 for (i = 2; i < size_words; i++)
878 new_aps->payload[i] = (StgClosure *)Sp[i-2];
881 // prepare the stack so that we can call the
882 // rts_breakpoint_io_action and ensure that the stack is
883 // in a reasonable state for the GC and so that
884 // execution of this BCO can continue when we resume
885 ioAction = (StgClosure *) deRefStablePtr (rts_breakpoint_io_action);
888 Sp[7] = (W_)&stg_apply_interp_info;
889 Sp[6] = (W_)&stg_noforceIO_info; // see [unreg] below
890 Sp[5] = (W_)new_aps; // the AP_STACK
891 Sp[4] = (W_)BCO_PTR(arg3_freeVars); // the info about local vars of the breakpoint
892 Sp[3] = (W_)False_closure; // True <=> a breakpoint
893 Sp[2] = (W_)&stg_ap_pppv_info;
894 Sp[1] = (W_)ioAction; // apply the IO action to its two arguments above
895 Sp[0] = (W_)&stg_enter_info; // get ready to run the IO action
896 // Note [unreg]: in unregisterised mode, the return
897 // convention for IO is different. The
898 // stg_noForceIO_info stack frame is necessary to
899 // account for this difference.
901 // set the flag in the TSO to say that we are now
902 // stopping at a breakpoint so that when we resume
903 // we don't stop on the same breakpoint that we
904 // already stopped at just now
905 cap->r.rCurrentTSO->flags |= TSO_STOPPED_ON_BREAKPOINT;
907 // stop this thread and return to the scheduler -
908 // eventually we will come back and the IO action on
909 // the top of the stack will be executed
910 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
913 // record that this thread is not stopped at a breakpoint anymore
914 cap->r.rCurrentTSO->flags &= ~TSO_STOPPED_ON_BREAKPOINT;
916 // continue normal execution of the byte code instructions
921 // Explicit stack check at the beginning of a function
922 // *only* (stack checks in case alternatives are
923 // propagated to the enclosing function).
924 StgWord stk_words_reqd = BCO_GET_LARGE_ARG + 1;
925 if (Sp - stk_words_reqd < SpLim) {
928 Sp[0] = (W_)&stg_apply_interp_info;
929 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
964 Sp[-1] = BCO_PTR(o1);
969 case bci_PUSH_ALTS: {
970 int o_bco = BCO_NEXT;
971 Sp[-2] = (W_)&stg_ctoi_R1p_info;
972 Sp[-1] = BCO_PTR(o_bco);
977 case bci_PUSH_ALTS_P: {
978 int o_bco = BCO_NEXT;
979 Sp[-2] = (W_)&stg_ctoi_R1unpt_info;
980 Sp[-1] = BCO_PTR(o_bco);
985 case bci_PUSH_ALTS_N: {
986 int o_bco = BCO_NEXT;
987 Sp[-2] = (W_)&stg_ctoi_R1n_info;
988 Sp[-1] = BCO_PTR(o_bco);
993 case bci_PUSH_ALTS_F: {
994 int o_bco = BCO_NEXT;
995 Sp[-2] = (W_)&stg_ctoi_F1_info;
996 Sp[-1] = BCO_PTR(o_bco);
1001 case bci_PUSH_ALTS_D: {
1002 int o_bco = BCO_NEXT;
1003 Sp[-2] = (W_)&stg_ctoi_D1_info;
1004 Sp[-1] = BCO_PTR(o_bco);
1009 case bci_PUSH_ALTS_L: {
1010 int o_bco = BCO_NEXT;
1011 Sp[-2] = (W_)&stg_ctoi_L1_info;
1012 Sp[-1] = BCO_PTR(o_bco);
1017 case bci_PUSH_ALTS_V: {
1018 int o_bco = BCO_NEXT;
1019 Sp[-2] = (W_)&stg_ctoi_V_info;
1020 Sp[-1] = BCO_PTR(o_bco);
1025 case bci_PUSH_APPLY_N:
1026 Sp--; Sp[0] = (W_)&stg_ap_n_info;
1028 case bci_PUSH_APPLY_V:
1029 Sp--; Sp[0] = (W_)&stg_ap_v_info;
1031 case bci_PUSH_APPLY_F:
1032 Sp--; Sp[0] = (W_)&stg_ap_f_info;
1034 case bci_PUSH_APPLY_D:
1035 Sp--; Sp[0] = (W_)&stg_ap_d_info;
1037 case bci_PUSH_APPLY_L:
1038 Sp--; Sp[0] = (W_)&stg_ap_l_info;
1040 case bci_PUSH_APPLY_P:
1041 Sp--; Sp[0] = (W_)&stg_ap_p_info;
1043 case bci_PUSH_APPLY_PP:
1044 Sp--; Sp[0] = (W_)&stg_ap_pp_info;
1046 case bci_PUSH_APPLY_PPP:
1047 Sp--; Sp[0] = (W_)&stg_ap_ppp_info;
1049 case bci_PUSH_APPLY_PPPP:
1050 Sp--; Sp[0] = (W_)&stg_ap_pppp_info;
1052 case bci_PUSH_APPLY_PPPPP:
1053 Sp--; Sp[0] = (W_)&stg_ap_ppppp_info;
1055 case bci_PUSH_APPLY_PPPPPP:
1056 Sp--; Sp[0] = (W_)&stg_ap_pppppp_info;
1059 case bci_PUSH_UBX: {
1061 int o_lits = BCO_NEXT;
1062 int n_words = BCO_NEXT;
1064 for (i = 0; i < n_words; i++) {
1065 Sp[i] = (W_)BCO_LIT(o_lits+i);
1073 /* a_1, .. a_n, b_1, .. b_by, s => a_1, .. a_n, s */
1078 INTERP_TICK(it_slides);
1082 case bci_ALLOC_AP: {
1084 int n_payload = BCO_NEXT;
1085 ap = (StgAP*)allocateLocal(cap, AP_sizeW(n_payload));
1087 ap->n_args = n_payload;
1088 SET_HDR(ap, &stg_AP_info, CCS_SYSTEM/*ToDo*/)
1093 case bci_ALLOC_AP_NOUPD: {
1095 int n_payload = BCO_NEXT;
1096 ap = (StgAP*)allocateLocal(cap, AP_sizeW(n_payload));
1098 ap->n_args = n_payload;
1099 SET_HDR(ap, &stg_AP_NOUPD_info, CCS_SYSTEM/*ToDo*/)
1104 case bci_ALLOC_PAP: {
1106 int arity = BCO_NEXT;
1107 int n_payload = BCO_NEXT;
1108 pap = (StgPAP*)allocateLocal(cap, PAP_sizeW(n_payload));
1110 pap->n_args = n_payload;
1112 SET_HDR(pap, &stg_PAP_info, CCS_SYSTEM/*ToDo*/)
1119 int stkoff = BCO_NEXT;
1120 int n_payload = BCO_NEXT;
1121 StgAP* ap = (StgAP*)Sp[stkoff];
1122 ASSERT((int)ap->n_args == n_payload);
1123 ap->fun = (StgClosure*)Sp[0];
1125 // The function should be a BCO, and its bitmap should
1126 // cover the payload of the AP correctly.
1127 ASSERT(get_itbl(ap->fun)->type == BCO
1128 && BCO_BITMAP_SIZE(ap->fun) == ap->n_args);
1130 for (i = 0; i < n_payload; i++)
1131 ap->payload[i] = (StgClosure*)Sp[i+1];
1133 IF_DEBUG(interpreter,
1134 debugBelch("\tBuilt ");
1135 printObj((StgClosure*)ap);
1142 int stkoff = BCO_NEXT;
1143 int n_payload = BCO_NEXT;
1144 StgPAP* pap = (StgPAP*)Sp[stkoff];
1145 ASSERT((int)pap->n_args == n_payload);
1146 pap->fun = (StgClosure*)Sp[0];
1148 // The function should be a BCO
1149 ASSERT(get_itbl(pap->fun)->type == BCO);
1151 for (i = 0; i < n_payload; i++)
1152 pap->payload[i] = (StgClosure*)Sp[i+1];
1154 IF_DEBUG(interpreter,
1155 debugBelch("\tBuilt ");
1156 printObj((StgClosure*)pap);
1162 /* Unpack N ptr words from t.o.s constructor */
1164 int n_words = BCO_NEXT;
1165 StgClosure* con = (StgClosure*)Sp[0];
1167 for (i = 0; i < n_words; i++) {
1168 Sp[i] = (W_)con->payload[i];
1175 int o_itbl = BCO_NEXT;
1176 int n_words = BCO_NEXT;
1177 StgInfoTable* itbl = INFO_PTR_TO_STRUCT(BCO_LIT(o_itbl));
1178 int request = CONSTR_sizeW( itbl->layout.payload.ptrs,
1179 itbl->layout.payload.nptrs );
1180 StgClosure* con = (StgClosure*)allocate_NONUPD(cap,request);
1181 ASSERT( itbl->layout.payload.ptrs + itbl->layout.payload.nptrs > 0);
1182 SET_HDR(con, (StgInfoTable*)BCO_LIT(o_itbl), CCS_SYSTEM/*ToDo*/);
1183 for (i = 0; i < n_words; i++) {
1184 con->payload[i] = (StgClosure*)Sp[i];
1189 IF_DEBUG(interpreter,
1190 debugBelch("\tBuilt ");
1191 printObj((StgClosure*)con);
1196 case bci_TESTLT_P: {
1197 unsigned int discr = BCO_NEXT;
1198 int failto = BCO_GET_LARGE_ARG;
1199 StgClosure* con = (StgClosure*)Sp[0];
1200 if (GET_TAG(con) >= discr) {
1206 case bci_TESTEQ_P: {
1207 unsigned int discr = BCO_NEXT;
1208 int failto = BCO_GET_LARGE_ARG;
1209 StgClosure* con = (StgClosure*)Sp[0];
1210 if (GET_TAG(con) != discr) {
1216 case bci_TESTLT_I: {
1217 // There should be an Int at Sp[1], and an info table at Sp[0].
1218 int discr = BCO_NEXT;
1219 int failto = BCO_GET_LARGE_ARG;
1220 I_ stackInt = (I_)Sp[1];
1221 if (stackInt >= (I_)BCO_LIT(discr))
1226 case bci_TESTEQ_I: {
1227 // There should be an Int at Sp[1], and an info table at Sp[0].
1228 int discr = BCO_NEXT;
1229 int failto = BCO_GET_LARGE_ARG;
1230 I_ stackInt = (I_)Sp[1];
1231 if (stackInt != (I_)BCO_LIT(discr)) {
1237 case bci_TESTLT_W: {
1238 // There should be an Int at Sp[1], and an info table at Sp[0].
1239 int discr = BCO_NEXT;
1240 int failto = BCO_GET_LARGE_ARG;
1241 W_ stackWord = (W_)Sp[1];
1242 if (stackWord >= (W_)BCO_LIT(discr))
1247 case bci_TESTEQ_W: {
1248 // There should be an Int at Sp[1], and an info table at Sp[0].
1249 int discr = BCO_NEXT;
1250 int failto = BCO_GET_LARGE_ARG;
1251 W_ stackWord = (W_)Sp[1];
1252 if (stackWord != (W_)BCO_LIT(discr)) {
1258 case bci_TESTLT_D: {
1259 // There should be a Double at Sp[1], and an info table at Sp[0].
1260 int discr = BCO_NEXT;
1261 int failto = BCO_GET_LARGE_ARG;
1262 StgDouble stackDbl, discrDbl;
1263 stackDbl = PK_DBL( & Sp[1] );
1264 discrDbl = PK_DBL( & BCO_LIT(discr) );
1265 if (stackDbl >= discrDbl) {
1271 case bci_TESTEQ_D: {
1272 // There should be a Double at Sp[1], and an info table at Sp[0].
1273 int discr = BCO_NEXT;
1274 int failto = BCO_GET_LARGE_ARG;
1275 StgDouble stackDbl, discrDbl;
1276 stackDbl = PK_DBL( & Sp[1] );
1277 discrDbl = PK_DBL( & BCO_LIT(discr) );
1278 if (stackDbl != discrDbl) {
1284 case bci_TESTLT_F: {
1285 // There should be a Float at Sp[1], and an info table at Sp[0].
1286 int discr = BCO_NEXT;
1287 int failto = BCO_GET_LARGE_ARG;
1288 StgFloat stackFlt, discrFlt;
1289 stackFlt = PK_FLT( & Sp[1] );
1290 discrFlt = PK_FLT( & BCO_LIT(discr) );
1291 if (stackFlt >= discrFlt) {
1297 case bci_TESTEQ_F: {
1298 // There should be a Float at Sp[1], and an info table at Sp[0].
1299 int discr = BCO_NEXT;
1300 int failto = BCO_GET_LARGE_ARG;
1301 StgFloat stackFlt, discrFlt;
1302 stackFlt = PK_FLT( & Sp[1] );
1303 discrFlt = PK_FLT( & BCO_LIT(discr) );
1304 if (stackFlt != discrFlt) {
1310 // Control-flow ish things
1312 // Context-switch check. We put it here to ensure that
1313 // the interpreter has done at least *some* work before
1314 // context switching: sometimes the scheduler can invoke
1315 // the interpreter with context_switch == 1, particularly
1316 // if the -C0 flag has been given on the cmd line.
1317 if (cap->r.rHpLim == NULL) {
1318 Sp--; Sp[0] = (W_)&stg_enter_info;
1319 RETURN_TO_SCHEDULER(ThreadInterpret, ThreadYielding);
1324 tagged_obj = (StgClosure *)Sp[0];
1330 Sp[0] = (W_)&stg_gc_unpt_r1_info;
1331 goto do_return_unboxed;
1334 Sp[0] = (W_)&stg_gc_unbx_r1_info;
1335 goto do_return_unboxed;
1338 Sp[0] = (W_)&stg_gc_f1_info;
1339 goto do_return_unboxed;
1342 Sp[0] = (W_)&stg_gc_d1_info;
1343 goto do_return_unboxed;
1346 Sp[0] = (W_)&stg_gc_l1_info;
1347 goto do_return_unboxed;
1350 Sp[0] = (W_)&stg_gc_void_info;
1351 goto do_return_unboxed;
1354 int stkoff = BCO_NEXT;
1355 signed short n = (signed short)(BCO_NEXT);
1356 Sp[stkoff] += (W_)n;
1362 int stk_offset = BCO_NEXT;
1363 int o_itbl = BCO_NEXT;
1364 void(*marshall_fn)(void*) = (void (*)(void*))BCO_LIT(o_itbl);
1366 RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE
1367 + sizeofW(StgRetDyn);
1369 /* the stack looks like this:
1371 | | <- Sp + stk_offset
1375 | | <- Sp + ret_size + 1
1377 | C fun | <- Sp + ret_size
1382 ret is a placeholder for the return address, and may be
1385 We need to copy the args out of the TSO, because when
1386 we call suspendThread() we no longer own the TSO stack,
1387 and it may move at any time - indeed suspendThread()
1388 itself may do stack squeezing and move our args.
1389 So we make a copy of the argument block.
1392 #define ROUND_UP_WDS(p) ((((StgWord)(p)) + sizeof(W_)-1)/sizeof(W_))
1394 ffi_cif *cif = (ffi_cif *)marshall_fn;
1395 nat nargs = cif->nargs;
1399 W_ ret[2]; // max needed
1400 W_ *arguments[stk_offset]; // max needed
1401 void *argptrs[nargs];
1404 if (cif->rtype->type == FFI_TYPE_VOID) {
1405 // necessary because cif->rtype->size == 1 for void,
1406 // but the bytecode generator has not pushed a
1407 // placeholder in this case.
1410 ret_size = ROUND_UP_WDS(cif->rtype->size);
1413 memcpy(arguments, Sp+ret_size+1,
1414 sizeof(W_) * (stk_offset-1-ret_size));
1416 // libffi expects the args as an array of pointers to
1417 // values, so we have to construct this array before making
1419 p = (StgPtr)arguments;
1420 for (i = 0; i < nargs; i++) {
1421 argptrs[i] = (void *)p;
1422 // get the size from the cif
1423 p += ROUND_UP_WDS(cif->arg_types[i]->size);
1426 // this is the function we're going to call
1427 fn = (void(*)(void))Sp[ret_size];
1429 // Restore the Haskell thread's current value of errno
1430 errno = cap->r.rCurrentTSO->saved_errno;
1432 // There are a bunch of non-ptr words on the stack (the
1433 // ccall args, the ccall fun address and space for the
1434 // result), which we need to cover with an info table
1435 // since we might GC during this call.
1437 // We know how many (non-ptr) words there are before the
1438 // next valid stack frame: it is the stk_offset arg to the
1439 // CCALL instruction. So we build a RET_DYN stack frame
1440 // on the stack frame to describe this chunk of stack.
1443 ((StgRetDyn *)Sp)->liveness = R1_PTR | N_NONPTRS(stk_offset);
1444 ((StgRetDyn *)Sp)->info = (StgInfoTable *)&stg_gc_gen_info;
1446 // save obj (pointer to the current BCO), since this
1447 // might move during the call. We use the R1 slot in the
1448 // RET_DYN frame for this, hence R1_PTR above.
1449 ((StgRetDyn *)Sp)->payload[0] = (StgClosure *)obj;
1451 SAVE_STACK_POINTERS;
1452 tok = suspendThread(&cap->r);
1454 // We already made a copy of the arguments above.
1455 ffi_call(cif, fn, ret, argptrs);
1457 // And restart the thread again, popping the RET_DYN frame.
1458 cap = (Capability *)((void *)((unsigned char*)resumeThread(tok) - STG_FIELD_OFFSET(Capability,r)));
1459 LOAD_STACK_POINTERS;
1461 // Re-load the pointer to the BCO from the RET_DYN frame,
1462 // it might have moved during the call. Also reload the
1463 // pointers to the components of the BCO.
1464 obj = ((StgRetDyn *)Sp)->payload[0];
1466 instrs = (StgWord16*)(bco->instrs->payload);
1467 literals = (StgWord*)(&bco->literals->payload[0]);
1468 ptrs = (StgPtr*)(&bco->ptrs->payload[0]);
1472 // Save the Haskell thread's current value of errno
1473 cap->r.rCurrentTSO->saved_errno = errno;
1475 // Copy the return value back to the TSO stack. It is at
1476 // most 2 words large, and resides at arguments[0].
1477 memcpy(Sp, ret, sizeof(W_) * stg_min(stk_offset,ret_size));
1483 /* BCO_NEXT modifies bciPtr, so be conservative. */
1484 int nextpc = BCO_GET_LARGE_ARG;
1490 barf("interpretBCO: hit a CASEFAIL");
1494 barf("interpretBCO: unknown or unimplemented opcode %d",
1497 } /* switch on opcode */
1501 barf("interpretBCO: fell off end of the interpreter");