1 /* -----------------------------------------------------------------------------
4 * Copyright (c) The GHC Team, 1994-2002.
5 * ---------------------------------------------------------------------------*/
7 #include "PosixSource.h"
15 #include "LdvProfile.h"
21 #include "Bytecodes.h"
23 #include "Disassembler.h"
24 #include "Interpreter.h"
26 #include <string.h> /* for memcpy */
33 /* --------------------------------------------------------------------------
34 * The bytecode interpreter
35 * ------------------------------------------------------------------------*/
37 /* Gather stats about entry, opcode, opcode-pair frequencies. For
38 tuning the interpreter. */
40 /* #define INTERP_STATS */
43 /* Sp points to the lowest live word on the stack. */
45 #define BCO_NEXT instrs[bciPtr++]
46 #define BCO_NEXT_32 (bciPtr += 2, (((StgWord) instrs[bciPtr-2]) << 16) + ((StgWord) instrs[bciPtr-1]))
47 #define BCO_NEXT_64 (bciPtr += 4, (((StgWord) instrs[bciPtr-4]) << 48) + (((StgWord) instrs[bciPtr-3]) << 32) + (((StgWord) instrs[bciPtr-2]) << 16) + ((StgWord) instrs[bciPtr-1]))
48 #if WORD_SIZE_IN_BITS == 32
49 #define BCO_NEXT_WORD BCO_NEXT_32
50 #elif WORD_SIZE_IN_BITS == 64
51 #define BCO_NEXT_WORD BCO_NEXT_64
53 #error Cannot cope with WORD_SIZE_IN_BITS being nether 32 nor 64
55 #define BCO_GET_LARGE_ARG ((bci & bci_FLAG_LARGE_ARGS) ? BCO_NEXT_WORD : BCO_NEXT)
57 #define BCO_PTR(n) (W_)ptrs[n]
58 #define BCO_LIT(n) literals[n]
60 #define LOAD_STACK_POINTERS \
61 Sp = cap->r.rCurrentTSO->sp; \
62 /* We don't change this ... */ \
63 SpLim = cap->r.rCurrentTSO->stack + RESERVED_STACK_WORDS;
65 #define SAVE_STACK_POINTERS \
66 cap->r.rCurrentTSO->sp = Sp
68 #define RETURN_TO_SCHEDULER(todo,retcode) \
69 SAVE_STACK_POINTERS; \
70 cap->r.rCurrentTSO->what_next = (todo); \
71 threadPaused(cap,cap->r.rCurrentTSO); \
72 cap->r.rRet = (retcode); \
75 #define RETURN_TO_SCHEDULER_NO_PAUSE(todo,retcode) \
76 SAVE_STACK_POINTERS; \
77 cap->r.rCurrentTSO->what_next = (todo); \
78 cap->r.rRet = (retcode); \
83 allocate_NONUPD (int n_words)
85 return allocate(stg_max(sizeofW(StgHeader)+MIN_PAYLOAD_SIZE, n_words));
88 int rts_stop_next_breakpoint = 0;
89 int rts_stop_on_exception = 0;
93 /* Hacky stats, for tuning the interpreter ... */
94 int it_unknown_entries[N_CLOSURE_TYPES];
95 int it_total_unknown_entries;
107 int it_oofreq[27][27];
111 #define INTERP_TICK(n) (n)++
113 void interp_startup ( void )
116 it_retto_BCO = it_retto_UPDATE = it_retto_other = 0;
117 it_total_entries = it_total_unknown_entries = 0;
118 for (i = 0; i < N_CLOSURE_TYPES; i++)
119 it_unknown_entries[i] = 0;
120 it_slides = it_insns = it_BCO_entries = 0;
121 for (i = 0; i < 27; i++) it_ofreq[i] = 0;
122 for (i = 0; i < 27; i++)
123 for (j = 0; j < 27; j++)
128 void interp_shutdown ( void )
130 int i, j, k, o_max, i_max, j_max;
131 debugBelch("%d constrs entered -> (%d BCO, %d UPD, %d ??? )\n",
132 it_retto_BCO + it_retto_UPDATE + it_retto_other,
133 it_retto_BCO, it_retto_UPDATE, it_retto_other );
134 debugBelch("%d total entries, %d unknown entries \n",
135 it_total_entries, it_total_unknown_entries);
136 for (i = 0; i < N_CLOSURE_TYPES; i++) {
137 if (it_unknown_entries[i] == 0) continue;
138 debugBelch(" type %2d: unknown entries (%4.1f%%) == %d\n",
139 i, 100.0 * ((double)it_unknown_entries[i]) /
140 ((double)it_total_unknown_entries),
141 it_unknown_entries[i]);
143 debugBelch("%d insns, %d slides, %d BCO_entries\n",
144 it_insns, it_slides, it_BCO_entries);
145 for (i = 0; i < 27; i++)
146 debugBelch("opcode %2d got %d\n", i, it_ofreq[i] );
148 for (k = 1; k < 20; k++) {
151 for (i = 0; i < 27; i++) {
152 for (j = 0; j < 27; j++) {
153 if (it_oofreq[i][j] > o_max) {
154 o_max = it_oofreq[i][j];
155 i_max = i; j_max = j;
160 debugBelch("%d: count (%4.1f%%) %6d is %d then %d\n",
161 k, ((double)o_max) * 100.0 / ((double)it_insns), o_max,
163 it_oofreq[i_max][j_max] = 0;
168 #else // !INTERP_STATS
170 #define INTERP_TICK(n) /* nothing */
174 static StgWord app_ptrs_itbl[] = {
177 (W_)&stg_ap_ppp_info,
178 (W_)&stg_ap_pppp_info,
179 (W_)&stg_ap_ppppp_info,
180 (W_)&stg_ap_pppppp_info,
183 HsStablePtr rts_breakpoint_io_action; // points to the IO action which is executed on a breakpoint
184 // it is set in main/GHC.hs:runStmt
187 interpretBCO (Capability* cap)
189 // Use of register here is primarily to make it clear to compilers
190 // that these entities are non-aliasable.
191 register StgPtr Sp; // local state -- stack pointer
192 register StgPtr SpLim; // local state -- stack lim pointer
193 register StgClosure *tagged_obj = 0, *obj;
198 // ------------------------------------------------------------------------
201 // We have a closure to evaluate. Stack looks like:
205 // Sp | -------------------> closure
208 if (Sp[0] == (W_)&stg_enter_info) {
213 // ------------------------------------------------------------------------
216 // We have a BCO application to perform. Stack looks like:
227 else if (Sp[0] == (W_)&stg_apply_interp_info) {
228 obj = UNTAG_CLOSURE((StgClosure *)Sp[1]);
233 // ------------------------------------------------------------------------
236 // We have an unboxed value to return. See comment before
237 // do_return_unboxed, below.
240 goto do_return_unboxed;
243 // Evaluate the object on top of the stack.
245 tagged_obj = (StgClosure*)Sp[0]; Sp++;
248 obj = UNTAG_CLOSURE(tagged_obj);
249 INTERP_TICK(it_total_evals);
251 IF_DEBUG(interpreter,
253 "\n---------------------------------------------------------------\n");
254 debugBelch("Evaluating: "); printObj(obj);
255 debugBelch("Sp = %p\n", Sp);
258 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
262 IF_DEBUG(sanity,checkStackChunk(Sp, cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size));
264 switch ( get_itbl(obj)->type ) {
269 case IND_OLDGEN_PERM:
272 tagged_obj = ((StgInd*)obj)->indirectee;
283 case CONSTR_NOCAF_STATIC:
297 ASSERT(((StgBCO *)obj)->arity > 0);
301 case AP: /* Copied from stg_AP_entry. */
310 if (Sp - (words+sizeofW(StgUpdateFrame)) < SpLim) {
312 Sp[1] = (W_)tagged_obj;
313 Sp[0] = (W_)&stg_enter_info;
314 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
317 /* Ok; we're safe. Party on. Push an update frame. */
318 Sp -= sizeofW(StgUpdateFrame);
320 StgUpdateFrame *__frame;
321 __frame = (StgUpdateFrame *)Sp;
322 SET_INFO(__frame, (StgInfoTable *)&stg_upd_frame_info);
323 __frame->updatee = (StgClosure *)(ap);
326 /* Reload the stack */
328 for (i=0; i < words; i++) {
329 Sp[i] = (W_)ap->payload[i];
332 obj = UNTAG_CLOSURE((StgClosure*)ap->fun);
333 ASSERT(get_itbl(obj)->type == BCO);
342 j = get_itbl(obj)->type;
343 ASSERT(j >= 0 && j < N_CLOSURE_TYPES);
344 it_unknown_entries[j]++;
345 it_total_unknown_entries++;
349 // Can't handle this object; yield to scheduler
350 IF_DEBUG(interpreter,
351 debugBelch("evaluating unknown closure -- yielding to sched\n");
355 Sp[1] = (W_)tagged_obj;
356 Sp[0] = (W_)&stg_enter_info;
357 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
361 // ------------------------------------------------------------------------
362 // We now have an evaluated object (tagged_obj). The next thing to
363 // do is return it to the stack frame on top of the stack.
365 obj = UNTAG_CLOSURE(tagged_obj);
366 ASSERT(closure_HNF(obj));
368 IF_DEBUG(interpreter,
370 "\n---------------------------------------------------------------\n");
371 debugBelch("Returning: "); printObj(obj);
372 debugBelch("Sp = %p\n", Sp);
374 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
378 IF_DEBUG(sanity,checkStackChunk(Sp, cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size));
380 switch (get_itbl((StgClosure *)Sp)->type) {
383 const StgInfoTable *info;
385 // NOTE: not using get_itbl().
386 info = ((StgClosure *)Sp)->header.info;
387 if (info == (StgInfoTable *)&stg_ap_v_info) {
388 n = 1; m = 0; goto do_apply;
390 if (info == (StgInfoTable *)&stg_ap_f_info) {
391 n = 1; m = 1; goto do_apply;
393 if (info == (StgInfoTable *)&stg_ap_d_info) {
394 n = 1; m = sizeofW(StgDouble); goto do_apply;
396 if (info == (StgInfoTable *)&stg_ap_l_info) {
397 n = 1; m = sizeofW(StgInt64); goto do_apply;
399 if (info == (StgInfoTable *)&stg_ap_n_info) {
400 n = 1; m = 1; goto do_apply;
402 if (info == (StgInfoTable *)&stg_ap_p_info) {
403 n = 1; m = 1; goto do_apply;
405 if (info == (StgInfoTable *)&stg_ap_pp_info) {
406 n = 2; m = 2; goto do_apply;
408 if (info == (StgInfoTable *)&stg_ap_ppp_info) {
409 n = 3; m = 3; goto do_apply;
411 if (info == (StgInfoTable *)&stg_ap_pppp_info) {
412 n = 4; m = 4; goto do_apply;
414 if (info == (StgInfoTable *)&stg_ap_ppppp_info) {
415 n = 5; m = 5; goto do_apply;
417 if (info == (StgInfoTable *)&stg_ap_pppppp_info) {
418 n = 6; m = 6; goto do_apply;
420 goto do_return_unrecognised;
424 // Returning to an update frame: do the update, pop the update
425 // frame, and continue with the next stack frame.
427 // NB. we must update with the *tagged* pointer. Some tags
428 // are not optional, and if we omit the tag bits when updating
429 // then bad things can happen (albeit very rarely). See #1925.
430 // What happened was an indirection was created with an
431 // untagged pointer, and this untagged pointer was propagated
432 // to a PAP by the GC, violating the invariant that PAPs
433 // always contain a tagged pointer to the function.
434 INTERP_TICK(it_retto_UPDATE);
435 UPD_IND(((StgUpdateFrame *)Sp)->updatee, tagged_obj);
436 Sp += sizeofW(StgUpdateFrame);
440 // Returning to an interpreted continuation: put the object on
441 // the stack, and start executing the BCO.
442 INTERP_TICK(it_retto_BCO);
445 // NB. return the untagged object; the bytecode expects it to
446 // be untagged. XXX this doesn't seem right.
447 obj = (StgClosure*)Sp[2];
448 ASSERT(get_itbl(obj)->type == BCO);
452 do_return_unrecognised:
454 // Can't handle this return address; yield to scheduler
455 INTERP_TICK(it_retto_other);
456 IF_DEBUG(interpreter,
457 debugBelch("returning to unknown frame -- yielding to sched\n");
458 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
461 Sp[1] = (W_)tagged_obj;
462 Sp[0] = (W_)&stg_enter_info;
463 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
467 // -------------------------------------------------------------------------
468 // Returning an unboxed value. The stack looks like this:
485 // where XXXX_info is one of the stg_gc_unbx_r1_info family.
487 // We're only interested in the case when the real return address
488 // is a BCO; otherwise we'll return to the scheduler.
494 ASSERT( Sp[0] == (W_)&stg_gc_unbx_r1_info
495 || Sp[0] == (W_)&stg_gc_unpt_r1_info
496 || Sp[0] == (W_)&stg_gc_f1_info
497 || Sp[0] == (W_)&stg_gc_d1_info
498 || Sp[0] == (W_)&stg_gc_l1_info
499 || Sp[0] == (W_)&stg_gc_void_info // VoidRep
502 // get the offset of the stg_ctoi_ret_XXX itbl
503 offset = stack_frame_sizeW((StgClosure *)Sp);
505 switch (get_itbl((StgClosure *)Sp+offset)->type) {
508 // Returning to an interpreted continuation: put the object on
509 // the stack, and start executing the BCO.
510 INTERP_TICK(it_retto_BCO);
511 obj = (StgClosure*)Sp[offset+1];
512 ASSERT(get_itbl(obj)->type == BCO);
513 goto run_BCO_return_unboxed;
517 // Can't handle this return address; yield to scheduler
518 INTERP_TICK(it_retto_other);
519 IF_DEBUG(interpreter,
520 debugBelch("returning to unknown frame -- yielding to sched\n");
521 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
523 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
530 // -------------------------------------------------------------------------
534 ASSERT(obj == UNTAG_CLOSURE(tagged_obj));
535 // we have a function to apply (obj), and n arguments taking up m
536 // words on the stack. The info table (stg_ap_pp_info or whatever)
537 // is on top of the arguments on the stack.
539 switch (get_itbl(obj)->type) {
547 // we only cope with PAPs whose function is a BCO
548 if (get_itbl(UNTAG_CLOSURE(pap->fun))->type != BCO) {
549 goto defer_apply_to_sched;
556 // n must be greater than 1, and the only kinds of
557 // application we support with more than one argument
558 // are all pointers...
560 // Shuffle the args for this function down, and put
561 // the appropriate info table in the gap.
562 for (i = 0; i < arity; i++) {
563 Sp[(int)i-1] = Sp[i];
564 // ^^^^^ careful, i-1 might be negative, but i in unsigned
566 Sp[arity-1] = app_ptrs_itbl[n-arity-1];
568 // unpack the PAP's arguments onto the stack
570 for (i = 0; i < pap->n_args; i++) {
571 Sp[i] = (W_)pap->payload[i];
573 obj = UNTAG_CLOSURE(pap->fun);
576 else if (arity == n) {
578 for (i = 0; i < pap->n_args; i++) {
579 Sp[i] = (W_)pap->payload[i];
581 obj = UNTAG_CLOSURE(pap->fun);
584 else /* arity > n */ {
585 // build a new PAP and return it.
587 new_pap = (StgPAP *)allocate(PAP_sizeW(pap->n_args + m));
588 SET_HDR(new_pap,&stg_PAP_info,CCCS);
589 new_pap->arity = pap->arity - n;
590 new_pap->n_args = pap->n_args + m;
591 new_pap->fun = pap->fun;
592 for (i = 0; i < pap->n_args; i++) {
593 new_pap->payload[i] = pap->payload[i];
595 for (i = 0; i < m; i++) {
596 new_pap->payload[pap->n_args + i] = (StgClosure *)Sp[i];
598 tagged_obj = (StgClosure *)new_pap;
608 arity = ((StgBCO *)obj)->arity;
611 // n must be greater than 1, and the only kinds of
612 // application we support with more than one argument
613 // are all pointers...
615 // Shuffle the args for this function down, and put
616 // the appropriate info table in the gap.
617 for (i = 0; i < arity; i++) {
618 Sp[(int)i-1] = Sp[i];
619 // ^^^^^ careful, i-1 might be negative, but i in unsigned
621 Sp[arity-1] = app_ptrs_itbl[n-arity-1];
625 else if (arity == n) {
628 else /* arity > n */ {
629 // build a PAP and return it.
632 pap = (StgPAP *)allocate(PAP_sizeW(m));
633 SET_HDR(pap, &stg_PAP_info,CCCS);
634 pap->arity = arity - n;
637 for (i = 0; i < m; i++) {
638 pap->payload[i] = (StgClosure *)Sp[i];
640 tagged_obj = (StgClosure *)pap;
646 // No point in us applying machine-code functions
648 defer_apply_to_sched:
650 Sp[1] = (W_)tagged_obj;
651 Sp[0] = (W_)&stg_enter_info;
652 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
655 // ------------------------------------------------------------------------
656 // Ok, we now have a bco (obj), and its arguments are all on the
657 // stack. We can start executing the byte codes.
659 // The stack is in one of two states. First, if this BCO is a
669 // Second, if this BCO is a continuation:
684 // where retval is the value being returned to this continuation.
685 // In the event of a stack check, heap check, or context switch,
686 // we need to leave the stack in a sane state so the garbage
687 // collector can find all the pointers.
689 // (1) BCO is a function: the BCO's bitmap describes the
690 // pointerhood of the arguments.
692 // (2) BCO is a continuation: BCO's bitmap describes the
693 // pointerhood of the free variables.
695 // Sadly we have three different kinds of stack/heap/cswitch check
701 if (doYouWantToGC()) {
702 Sp--; Sp[0] = (W_)&stg_enter_info;
703 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
705 // Stack checks aren't necessary at return points, the stack use
706 // is aggregated into the enclosing function entry point.
710 run_BCO_return_unboxed:
712 if (doYouWantToGC()) {
713 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
715 // Stack checks aren't necessary at return points, the stack use
716 // is aggregated into the enclosing function entry point.
724 Sp[0] = (W_)&stg_apply_interp_info;
725 checkStackChunk(Sp,SpLim);
730 if (doYouWantToGC()) {
733 Sp[0] = (W_)&stg_apply_interp_info; // placeholder, really
734 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
738 if (Sp - INTERP_STACK_CHECK_THRESH < SpLim) {
741 Sp[0] = (W_)&stg_apply_interp_info; // placeholder, really
742 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
747 // Now, actually interpret the BCO... (no returning to the
748 // scheduler again until the stack is in an orderly state).
750 INTERP_TICK(it_BCO_entries);
752 register int bciPtr = 1; /* instruction pointer */
753 register StgWord16 bci;
754 register StgBCO* bco = (StgBCO*)obj;
755 register StgWord16* instrs = (StgWord16*)(bco->instrs->payload);
756 register StgWord* literals = (StgWord*)(&bco->literals->payload[0]);
757 register StgPtr* ptrs = (StgPtr*)(&bco->ptrs->payload[0]);
760 it_lastopc = 0; /* no opcode */
764 ASSERT(bciPtr <= instrs[0]);
765 IF_DEBUG(interpreter,
766 //if (do_print_stack) {
767 //debugBelch("\n-- BEGIN stack\n");
768 //printStack(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size,iSu);
769 //debugBelch("-- END stack\n\n");
771 debugBelch("Sp = %p pc = %d ", Sp, bciPtr);
772 disInstr(bco,bciPtr);
775 for (i = 8; i >= 0; i--) {
776 debugBelch("%d %p\n", i, (StgPtr)(*(Sp+i)));
780 //if (do_print_stack) checkStack(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size,iSu);
784 INTERP_TICK(it_insns);
787 ASSERT( (int)instrs[bciPtr] >= 0 && (int)instrs[bciPtr] < 27 );
788 it_ofreq[ (int)instrs[bciPtr] ] ++;
789 it_oofreq[ it_lastopc ][ (int)instrs[bciPtr] ] ++;
790 it_lastopc = (int)instrs[bciPtr];
794 /* We use the high 8 bits for flags, only the highest of which is
795 * currently allocated */
796 ASSERT((bci & 0xFF00) == (bci & 0x8000));
798 switch (bci & 0xFF) {
800 /* check for a breakpoint on the beginning of a let binding */
803 int arg1_brk_array, arg2_array_index, arg3_freeVars;
804 StgArrWords *breakPoints;
805 int returning_from_break; // are we resuming execution from a breakpoint?
806 // if yes, then don't break this time around
807 StgClosure *ioAction; // the io action to run at a breakpoint
809 StgAP_STACK *new_aps; // a closure to save the top stack frame on the heap
813 arg1_brk_array = BCO_NEXT; // 1st arg of break instruction
814 arg2_array_index = BCO_NEXT; // 2nd arg of break instruction
815 arg3_freeVars = BCO_NEXT; // 3rd arg of break instruction
817 // check if we are returning from a breakpoint - this info
818 // is stored in the flags field of the current TSO
819 returning_from_break = cap->r.rCurrentTSO->flags & TSO_STOPPED_ON_BREAKPOINT;
821 // if we are returning from a break then skip this section
822 // and continue executing
823 if (!returning_from_break)
825 breakPoints = (StgArrWords *) BCO_PTR(arg1_brk_array);
827 // stop the current thread if either the
828 // "rts_stop_next_breakpoint" flag is true OR if the
829 // breakpoint flag for this particular expression is
831 if (rts_stop_next_breakpoint == rtsTrue ||
832 breakPoints->payload[arg2_array_index] == rtsTrue)
834 // make sure we don't automatically stop at the
836 rts_stop_next_breakpoint = rtsFalse;
838 // allocate memory for a new AP_STACK, enough to
839 // store the top stack frame plus an
840 // stg_apply_interp_info pointer and a pointer to
842 size_words = BCO_BITMAP_SIZE(obj) + 2;
843 new_aps = (StgAP_STACK *) allocate (AP_STACK_sizeW(size_words));
844 SET_HDR(new_aps,&stg_AP_STACK_info,CCS_SYSTEM);
845 new_aps->size = size_words;
846 new_aps->fun = &stg_dummy_ret_closure;
848 // fill in the payload of the AP_STACK
849 new_aps->payload[0] = (StgClosure *)&stg_apply_interp_info;
850 new_aps->payload[1] = (StgClosure *)obj;
852 // copy the contents of the top stack frame into the AP_STACK
853 for (i = 2; i < size_words; i++)
855 new_aps->payload[i] = (StgClosure *)Sp[i-2];
858 // prepare the stack so that we can call the
859 // rts_breakpoint_io_action and ensure that the stack is
860 // in a reasonable state for the GC and so that
861 // execution of this BCO can continue when we resume
862 ioAction = (StgClosure *) deRefStablePtr (rts_breakpoint_io_action);
865 Sp[7] = (W_)&stg_apply_interp_info;
866 Sp[6] = (W_)&stg_noforceIO_info; // see [unreg] below
867 Sp[5] = (W_)new_aps; // the AP_STACK
868 Sp[4] = (W_)BCO_PTR(arg3_freeVars); // the info about local vars of the breakpoint
869 Sp[3] = (W_)False_closure; // True <=> a breakpoint
870 Sp[2] = (W_)&stg_ap_pppv_info;
871 Sp[1] = (W_)ioAction; // apply the IO action to its two arguments above
872 Sp[0] = (W_)&stg_enter_info; // get ready to run the IO action
873 // Note [unreg]: in unregisterised mode, the return
874 // convention for IO is different. The
875 // stg_noForceIO_info stack frame is necessary to
876 // account for this difference.
878 // set the flag in the TSO to say that we are now
879 // stopping at a breakpoint so that when we resume
880 // we don't stop on the same breakpoint that we
881 // already stopped at just now
882 cap->r.rCurrentTSO->flags |= TSO_STOPPED_ON_BREAKPOINT;
884 // stop this thread and return to the scheduler -
885 // eventually we will come back and the IO action on
886 // the top of the stack will be executed
887 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
890 // record that this thread is not stopped at a breakpoint anymore
891 cap->r.rCurrentTSO->flags &= ~TSO_STOPPED_ON_BREAKPOINT;
893 // continue normal execution of the byte code instructions
898 // Explicit stack check at the beginning of a function
899 // *only* (stack checks in case alternatives are
900 // propagated to the enclosing function).
901 StgWord stk_words_reqd = BCO_GET_LARGE_ARG + 1;
902 if (Sp - stk_words_reqd < SpLim) {
905 Sp[0] = (W_)&stg_apply_interp_info;
906 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
941 Sp[-1] = BCO_PTR(o1);
946 case bci_PUSH_ALTS: {
947 int o_bco = BCO_NEXT;
948 Sp[-2] = (W_)&stg_ctoi_R1p_info;
949 Sp[-1] = BCO_PTR(o_bco);
954 case bci_PUSH_ALTS_P: {
955 int o_bco = BCO_NEXT;
956 Sp[-2] = (W_)&stg_ctoi_R1unpt_info;
957 Sp[-1] = BCO_PTR(o_bco);
962 case bci_PUSH_ALTS_N: {
963 int o_bco = BCO_NEXT;
964 Sp[-2] = (W_)&stg_ctoi_R1n_info;
965 Sp[-1] = BCO_PTR(o_bco);
970 case bci_PUSH_ALTS_F: {
971 int o_bco = BCO_NEXT;
972 Sp[-2] = (W_)&stg_ctoi_F1_info;
973 Sp[-1] = BCO_PTR(o_bco);
978 case bci_PUSH_ALTS_D: {
979 int o_bco = BCO_NEXT;
980 Sp[-2] = (W_)&stg_ctoi_D1_info;
981 Sp[-1] = BCO_PTR(o_bco);
986 case bci_PUSH_ALTS_L: {
987 int o_bco = BCO_NEXT;
988 Sp[-2] = (W_)&stg_ctoi_L1_info;
989 Sp[-1] = BCO_PTR(o_bco);
994 case bci_PUSH_ALTS_V: {
995 int o_bco = BCO_NEXT;
996 Sp[-2] = (W_)&stg_ctoi_V_info;
997 Sp[-1] = BCO_PTR(o_bco);
1002 case bci_PUSH_APPLY_N:
1003 Sp--; Sp[0] = (W_)&stg_ap_n_info;
1005 case bci_PUSH_APPLY_V:
1006 Sp--; Sp[0] = (W_)&stg_ap_v_info;
1008 case bci_PUSH_APPLY_F:
1009 Sp--; Sp[0] = (W_)&stg_ap_f_info;
1011 case bci_PUSH_APPLY_D:
1012 Sp--; Sp[0] = (W_)&stg_ap_d_info;
1014 case bci_PUSH_APPLY_L:
1015 Sp--; Sp[0] = (W_)&stg_ap_l_info;
1017 case bci_PUSH_APPLY_P:
1018 Sp--; Sp[0] = (W_)&stg_ap_p_info;
1020 case bci_PUSH_APPLY_PP:
1021 Sp--; Sp[0] = (W_)&stg_ap_pp_info;
1023 case bci_PUSH_APPLY_PPP:
1024 Sp--; Sp[0] = (W_)&stg_ap_ppp_info;
1026 case bci_PUSH_APPLY_PPPP:
1027 Sp--; Sp[0] = (W_)&stg_ap_pppp_info;
1029 case bci_PUSH_APPLY_PPPPP:
1030 Sp--; Sp[0] = (W_)&stg_ap_ppppp_info;
1032 case bci_PUSH_APPLY_PPPPPP:
1033 Sp--; Sp[0] = (W_)&stg_ap_pppppp_info;
1036 case bci_PUSH_UBX: {
1038 int o_lits = BCO_NEXT;
1039 int n_words = BCO_NEXT;
1041 for (i = 0; i < n_words; i++) {
1042 Sp[i] = (W_)BCO_LIT(o_lits+i);
1050 /* a_1, .. a_n, b_1, .. b_by, s => a_1, .. a_n, s */
1055 INTERP_TICK(it_slides);
1059 case bci_ALLOC_AP: {
1061 int n_payload = BCO_NEXT;
1062 ap = (StgAP*)allocate(AP_sizeW(n_payload));
1064 ap->n_args = n_payload;
1065 SET_HDR(ap, &stg_AP_info, CCS_SYSTEM/*ToDo*/)
1070 case bci_ALLOC_AP_NOUPD: {
1072 int n_payload = BCO_NEXT;
1073 ap = (StgAP*)allocate(AP_sizeW(n_payload));
1075 ap->n_args = n_payload;
1076 SET_HDR(ap, &stg_AP_NOUPD_info, CCS_SYSTEM/*ToDo*/)
1081 case bci_ALLOC_PAP: {
1083 int arity = BCO_NEXT;
1084 int n_payload = BCO_NEXT;
1085 pap = (StgPAP*)allocate(PAP_sizeW(n_payload));
1087 pap->n_args = n_payload;
1089 SET_HDR(pap, &stg_PAP_info, CCS_SYSTEM/*ToDo*/)
1096 int stkoff = BCO_NEXT;
1097 int n_payload = BCO_NEXT;
1098 StgAP* ap = (StgAP*)Sp[stkoff];
1099 ASSERT((int)ap->n_args == n_payload);
1100 ap->fun = (StgClosure*)Sp[0];
1102 // The function should be a BCO, and its bitmap should
1103 // cover the payload of the AP correctly.
1104 ASSERT(get_itbl(ap->fun)->type == BCO
1105 && BCO_BITMAP_SIZE(ap->fun) == ap->n_args);
1107 for (i = 0; i < n_payload; i++)
1108 ap->payload[i] = (StgClosure*)Sp[i+1];
1110 IF_DEBUG(interpreter,
1111 debugBelch("\tBuilt ");
1112 printObj((StgClosure*)ap);
1119 int stkoff = BCO_NEXT;
1120 int n_payload = BCO_NEXT;
1121 StgPAP* pap = (StgPAP*)Sp[stkoff];
1122 ASSERT((int)pap->n_args == n_payload);
1123 pap->fun = (StgClosure*)Sp[0];
1125 // The function should be a BCO
1126 ASSERT(get_itbl(pap->fun)->type == BCO);
1128 for (i = 0; i < n_payload; i++)
1129 pap->payload[i] = (StgClosure*)Sp[i+1];
1131 IF_DEBUG(interpreter,
1132 debugBelch("\tBuilt ");
1133 printObj((StgClosure*)pap);
1139 /* Unpack N ptr words from t.o.s constructor */
1141 int n_words = BCO_NEXT;
1142 StgClosure* con = (StgClosure*)Sp[0];
1144 for (i = 0; i < n_words; i++) {
1145 Sp[i] = (W_)con->payload[i];
1152 int o_itbl = BCO_NEXT;
1153 int n_words = BCO_NEXT;
1154 StgInfoTable* itbl = INFO_PTR_TO_STRUCT(BCO_LIT(o_itbl));
1155 int request = CONSTR_sizeW( itbl->layout.payload.ptrs,
1156 itbl->layout.payload.nptrs );
1157 StgClosure* con = (StgClosure*)allocate_NONUPD(request);
1158 ASSERT( itbl->layout.payload.ptrs + itbl->layout.payload.nptrs > 0);
1159 SET_HDR(con, (StgInfoTable*)BCO_LIT(o_itbl), CCS_SYSTEM/*ToDo*/);
1160 for (i = 0; i < n_words; i++) {
1161 con->payload[i] = (StgClosure*)Sp[i];
1166 IF_DEBUG(interpreter,
1167 debugBelch("\tBuilt ");
1168 printObj((StgClosure*)con);
1173 case bci_TESTLT_P: {
1174 unsigned int discr = BCO_NEXT;
1175 int failto = BCO_NEXT;
1176 StgClosure* con = (StgClosure*)Sp[0];
1177 if (GET_TAG(con) >= discr) {
1183 case bci_TESTEQ_P: {
1184 unsigned int discr = BCO_NEXT;
1185 int failto = BCO_NEXT;
1186 StgClosure* con = (StgClosure*)Sp[0];
1187 if (GET_TAG(con) != discr) {
1193 case bci_TESTLT_I: {
1194 // There should be an Int at Sp[1], and an info table at Sp[0].
1195 int discr = BCO_NEXT;
1196 int failto = BCO_NEXT;
1197 I_ stackInt = (I_)Sp[1];
1198 if (stackInt >= (I_)BCO_LIT(discr))
1203 case bci_TESTEQ_I: {
1204 // There should be an Int at Sp[1], and an info table at Sp[0].
1205 int discr = BCO_NEXT;
1206 int failto = BCO_NEXT;
1207 I_ stackInt = (I_)Sp[1];
1208 if (stackInt != (I_)BCO_LIT(discr)) {
1214 case bci_TESTLT_D: {
1215 // There should be a Double at Sp[1], and an info table at Sp[0].
1216 int discr = BCO_NEXT;
1217 int failto = BCO_NEXT;
1218 StgDouble stackDbl, discrDbl;
1219 stackDbl = PK_DBL( & Sp[1] );
1220 discrDbl = PK_DBL( & BCO_LIT(discr) );
1221 if (stackDbl >= discrDbl) {
1227 case bci_TESTEQ_D: {
1228 // There should be a Double at Sp[1], and an info table at Sp[0].
1229 int discr = BCO_NEXT;
1230 int failto = BCO_NEXT;
1231 StgDouble stackDbl, discrDbl;
1232 stackDbl = PK_DBL( & Sp[1] );
1233 discrDbl = PK_DBL( & BCO_LIT(discr) );
1234 if (stackDbl != discrDbl) {
1240 case bci_TESTLT_F: {
1241 // There should be a Float at Sp[1], and an info table at Sp[0].
1242 int discr = BCO_NEXT;
1243 int failto = BCO_NEXT;
1244 StgFloat stackFlt, discrFlt;
1245 stackFlt = PK_FLT( & Sp[1] );
1246 discrFlt = PK_FLT( & BCO_LIT(discr) );
1247 if (stackFlt >= discrFlt) {
1253 case bci_TESTEQ_F: {
1254 // There should be a Float at Sp[1], and an info table at Sp[0].
1255 int discr = BCO_NEXT;
1256 int failto = BCO_NEXT;
1257 StgFloat stackFlt, discrFlt;
1258 stackFlt = PK_FLT( & Sp[1] );
1259 discrFlt = PK_FLT( & BCO_LIT(discr) );
1260 if (stackFlt != discrFlt) {
1266 // Control-flow ish things
1268 // Context-switch check. We put it here to ensure that
1269 // the interpreter has done at least *some* work before
1270 // context switching: sometimes the scheduler can invoke
1271 // the interpreter with context_switch == 1, particularly
1272 // if the -C0 flag has been given on the cmd line.
1273 if (context_switch) {
1274 Sp--; Sp[0] = (W_)&stg_enter_info;
1275 RETURN_TO_SCHEDULER(ThreadInterpret, ThreadYielding);
1280 tagged_obj = (StgClosure *)Sp[0];
1286 Sp[0] = (W_)&stg_gc_unpt_r1_info;
1287 goto do_return_unboxed;
1290 Sp[0] = (W_)&stg_gc_unbx_r1_info;
1291 goto do_return_unboxed;
1294 Sp[0] = (W_)&stg_gc_f1_info;
1295 goto do_return_unboxed;
1298 Sp[0] = (W_)&stg_gc_d1_info;
1299 goto do_return_unboxed;
1302 Sp[0] = (W_)&stg_gc_l1_info;
1303 goto do_return_unboxed;
1306 Sp[0] = (W_)&stg_gc_void_info;
1307 goto do_return_unboxed;
1310 int stkoff = BCO_NEXT;
1311 signed short n = (signed short)(BCO_NEXT);
1312 Sp[stkoff] += (W_)n;
1318 int stk_offset = BCO_NEXT;
1319 int o_itbl = BCO_NEXT;
1320 void(*marshall_fn)(void*) = (void (*)(void*))BCO_LIT(o_itbl);
1322 RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE
1323 + sizeofW(StgRetDyn);
1325 /* the stack looks like this:
1327 | | <- Sp + stk_offset
1331 | | <- Sp + ret_size + 1
1333 | C fun | <- Sp + ret_size
1338 ret is a placeholder for the return address, and may be
1341 We need to copy the args out of the TSO, because when
1342 we call suspendThread() we no longer own the TSO stack,
1343 and it may move at any time - indeed suspendThread()
1344 itself may do stack squeezing and move our args.
1345 So we make a copy of the argument block.
1348 #define ROUND_UP_WDS(p) ((((StgWord)(p)) + sizeof(W_)-1)/sizeof(W_))
1350 ffi_cif *cif = (ffi_cif *)marshall_fn;
1351 nat nargs = cif->nargs;
1355 W_ ret[2]; // max needed
1356 W_ *arguments[stk_offset]; // max needed
1357 void *argptrs[nargs];
1360 if (cif->rtype->type == FFI_TYPE_VOID) {
1361 // necessary because cif->rtype->size == 1 for void,
1362 // but the bytecode generator has not pushed a
1363 // placeholder in this case.
1366 ret_size = ROUND_UP_WDS(cif->rtype->size);
1369 memcpy(arguments, Sp+ret_size+1,
1370 sizeof(W_) * (stk_offset-1-ret_size));
1372 // libffi expects the args as an array of pointers to
1373 // values, so we have to construct this array before making
1375 p = (StgPtr)arguments;
1376 for (i = 0; i < nargs; i++) {
1377 argptrs[i] = (void *)p;
1378 // get the size from the cif
1379 p += ROUND_UP_WDS(cif->arg_types[i]->size);
1382 // this is the function we're going to call
1383 fn = (void(*)(void))Sp[ret_size];
1385 // Restore the Haskell thread's current value of errno
1386 errno = cap->r.rCurrentTSO->saved_errno;
1388 // There are a bunch of non-ptr words on the stack (the
1389 // ccall args, the ccall fun address and space for the
1390 // result), which we need to cover with an info table
1391 // since we might GC during this call.
1393 // We know how many (non-ptr) words there are before the
1394 // next valid stack frame: it is the stk_offset arg to the
1395 // CCALL instruction. So we build a RET_DYN stack frame
1396 // on the stack frame to describe this chunk of stack.
1399 ((StgRetDyn *)Sp)->liveness = R1_PTR | N_NONPTRS(stk_offset);
1400 ((StgRetDyn *)Sp)->info = (StgInfoTable *)&stg_gc_gen_info;
1402 // save obj (pointer to the current BCO), since this
1403 // might move during the call. We use the R1 slot in the
1404 // RET_DYN frame for this, hence R1_PTR above.
1405 ((StgRetDyn *)Sp)->payload[0] = (StgClosure *)obj;
1407 SAVE_STACK_POINTERS;
1408 tok = suspendThread(&cap->r);
1410 // We already made a copy of the arguments above.
1411 ffi_call(cif, fn, ret, argptrs);
1413 // And restart the thread again, popping the RET_DYN frame.
1414 cap = (Capability *)((void *)((unsigned char*)resumeThread(tok) - sizeof(StgFunTable)));
1415 LOAD_STACK_POINTERS;
1417 // Re-load the pointer to the BCO from the RET_DYN frame,
1418 // it might have moved during the call. Also reload the
1419 // pointers to the components of the BCO.
1420 obj = ((StgRetDyn *)Sp)->payload[0];
1422 instrs = (StgWord16*)(bco->instrs->payload);
1423 literals = (StgWord*)(&bco->literals->payload[0]);
1424 ptrs = (StgPtr*)(&bco->ptrs->payload[0]);
1428 // Save the Haskell thread's current value of errno
1429 cap->r.rCurrentTSO->saved_errno = errno;
1431 // Copy the return value back to the TSO stack. It is at
1432 // most 2 words large, and resides at arguments[0].
1433 memcpy(Sp, ret, sizeof(W_) * stg_min(stk_offset,ret_size));
1439 /* BCO_NEXT modifies bciPtr, so be conservative. */
1440 int nextpc = BCO_NEXT;
1446 barf("interpretBCO: hit a CASEFAIL");
1450 barf("interpretBCO: unknown or unimplemented opcode %d",
1453 } /* switch on opcode */
1457 barf("interpretBCO: fell off end of the interpreter");