1 /* -----------------------------------------------------------------------------
4 * Copyright (c) The GHC Team, 1994-2002.
5 * ---------------------------------------------------------------------------*/
7 #include "PosixSource.h"
15 #include "LdvProfile.h"
21 #include "Bytecodes.h"
23 #include "Disassembler.h"
24 #include "Interpreter.h"
26 #include <string.h> /* for memcpy */
35 /* --------------------------------------------------------------------------
36 * The bytecode interpreter
37 * ------------------------------------------------------------------------*/
39 /* Gather stats about entry, opcode, opcode-pair frequencies. For
40 tuning the interpreter. */
42 /* #define INTERP_STATS */
45 /* Sp points to the lowest live word on the stack. */
47 #define BCO_NEXT instrs[bciPtr++]
48 #define BCO_NEXT_32 (bciPtr += 2, (((StgWord) instrs[bciPtr-2]) << 16) + ((StgWord) instrs[bciPtr-1]))
49 #define BCO_NEXT_64 (bciPtr += 4, (((StgWord) instrs[bciPtr-4]) << 48) + (((StgWord) instrs[bciPtr-3]) << 32) + (((StgWord) instrs[bciPtr-2]) << 16) + ((StgWord) instrs[bciPtr-1]))
50 #if WORD_SIZE_IN_BITS == 32
51 #define BCO_NEXT_WORD BCO_NEXT_32
52 #elif WORD_SIZE_IN_BITS == 64
53 #define BCO_NEXT_WORD BCO_NEXT_64
55 #error Cannot cope with WORD_SIZE_IN_BITS being nether 32 nor 64
57 #define BCO_GET_LARGE_ARG ((bci & bci_FLAG_LARGE_ARGS) ? BCO_NEXT_WORD : BCO_NEXT)
59 #define BCO_PTR(n) (W_)ptrs[n]
60 #define BCO_LIT(n) literals[n]
62 #define LOAD_STACK_POINTERS \
63 Sp = cap->r.rCurrentTSO->sp; \
64 /* We don't change this ... */ \
65 SpLim = cap->r.rCurrentTSO->stack + RESERVED_STACK_WORDS;
67 #define SAVE_STACK_POINTERS \
68 cap->r.rCurrentTSO->sp = Sp
70 #define RETURN_TO_SCHEDULER(todo,retcode) \
71 SAVE_STACK_POINTERS; \
72 cap->r.rCurrentTSO->what_next = (todo); \
73 threadPaused(cap,cap->r.rCurrentTSO); \
74 cap->r.rRet = (retcode); \
77 #define RETURN_TO_SCHEDULER_NO_PAUSE(todo,retcode) \
78 SAVE_STACK_POINTERS; \
79 cap->r.rCurrentTSO->what_next = (todo); \
80 cap->r.rRet = (retcode); \
85 allocate_NONUPD (int n_words)
87 return allocate(stg_max(sizeofW(StgHeader)+MIN_PAYLOAD_SIZE, n_words));
90 int rts_stop_next_breakpoint = 0;
91 int rts_stop_on_exception = 0;
95 /* Hacky stats, for tuning the interpreter ... */
96 int it_unknown_entries[N_CLOSURE_TYPES];
97 int it_total_unknown_entries;
109 int it_oofreq[27][27];
113 #define INTERP_TICK(n) (n)++
115 void interp_startup ( void )
118 it_retto_BCO = it_retto_UPDATE = it_retto_other = 0;
119 it_total_entries = it_total_unknown_entries = 0;
120 for (i = 0; i < N_CLOSURE_TYPES; i++)
121 it_unknown_entries[i] = 0;
122 it_slides = it_insns = it_BCO_entries = 0;
123 for (i = 0; i < 27; i++) it_ofreq[i] = 0;
124 for (i = 0; i < 27; i++)
125 for (j = 0; j < 27; j++)
130 void interp_shutdown ( void )
132 int i, j, k, o_max, i_max, j_max;
133 debugBelch("%d constrs entered -> (%d BCO, %d UPD, %d ??? )\n",
134 it_retto_BCO + it_retto_UPDATE + it_retto_other,
135 it_retto_BCO, it_retto_UPDATE, it_retto_other );
136 debugBelch("%d total entries, %d unknown entries \n",
137 it_total_entries, it_total_unknown_entries);
138 for (i = 0; i < N_CLOSURE_TYPES; i++) {
139 if (it_unknown_entries[i] == 0) continue;
140 debugBelch(" type %2d: unknown entries (%4.1f%%) == %d\n",
141 i, 100.0 * ((double)it_unknown_entries[i]) /
142 ((double)it_total_unknown_entries),
143 it_unknown_entries[i]);
145 debugBelch("%d insns, %d slides, %d BCO_entries\n",
146 it_insns, it_slides, it_BCO_entries);
147 for (i = 0; i < 27; i++)
148 debugBelch("opcode %2d got %d\n", i, it_ofreq[i] );
150 for (k = 1; k < 20; k++) {
153 for (i = 0; i < 27; i++) {
154 for (j = 0; j < 27; j++) {
155 if (it_oofreq[i][j] > o_max) {
156 o_max = it_oofreq[i][j];
157 i_max = i; j_max = j;
162 debugBelch("%d: count (%4.1f%%) %6d is %d then %d\n",
163 k, ((double)o_max) * 100.0 / ((double)it_insns), o_max,
165 it_oofreq[i_max][j_max] = 0;
170 #else // !INTERP_STATS
172 #define INTERP_TICK(n) /* nothing */
176 static StgWord app_ptrs_itbl[] = {
179 (W_)&stg_ap_ppp_info,
180 (W_)&stg_ap_pppp_info,
181 (W_)&stg_ap_ppppp_info,
182 (W_)&stg_ap_pppppp_info,
185 HsStablePtr rts_breakpoint_io_action; // points to the IO action which is executed on a breakpoint
186 // it is set in main/GHC.hs:runStmt
189 interpretBCO (Capability* cap)
191 // Use of register here is primarily to make it clear to compilers
192 // that these entities are non-aliasable.
193 register StgPtr Sp; // local state -- stack pointer
194 register StgPtr SpLim; // local state -- stack lim pointer
195 register StgClosure *tagged_obj = 0, *obj;
200 // ------------------------------------------------------------------------
203 // We have a closure to evaluate. Stack looks like:
207 // Sp | -------------------> closure
210 if (Sp[0] == (W_)&stg_enter_info) {
215 // ------------------------------------------------------------------------
218 // We have a BCO application to perform. Stack looks like:
229 else if (Sp[0] == (W_)&stg_apply_interp_info) {
230 obj = UNTAG_CLOSURE((StgClosure *)Sp[1]);
235 // ------------------------------------------------------------------------
238 // We have an unboxed value to return. See comment before
239 // do_return_unboxed, below.
242 goto do_return_unboxed;
245 // Evaluate the object on top of the stack.
247 tagged_obj = (StgClosure*)Sp[0]; Sp++;
250 obj = UNTAG_CLOSURE(tagged_obj);
251 INTERP_TICK(it_total_evals);
253 IF_DEBUG(interpreter,
255 "\n---------------------------------------------------------------\n");
256 debugBelch("Evaluating: "); printObj(obj);
257 debugBelch("Sp = %p\n", Sp);
260 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
264 IF_DEBUG(sanity,checkStackChunk(Sp, cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size));
266 switch ( get_itbl(obj)->type ) {
271 case IND_OLDGEN_PERM:
274 tagged_obj = ((StgInd*)obj)->indirectee;
285 case CONSTR_NOCAF_STATIC:
299 ASSERT(((StgBCO *)obj)->arity > 0);
303 case AP: /* Copied from stg_AP_entry. */
312 if (Sp - (words+sizeofW(StgUpdateFrame)) < SpLim) {
314 Sp[1] = (W_)tagged_obj;
315 Sp[0] = (W_)&stg_enter_info;
316 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
319 /* Ok; we're safe. Party on. Push an update frame. */
320 Sp -= sizeofW(StgUpdateFrame);
322 StgUpdateFrame *__frame;
323 __frame = (StgUpdateFrame *)Sp;
324 SET_INFO(__frame, (StgInfoTable *)&stg_upd_frame_info);
325 __frame->updatee = (StgClosure *)(ap);
328 /* Reload the stack */
330 for (i=0; i < words; i++) {
331 Sp[i] = (W_)ap->payload[i];
334 obj = UNTAG_CLOSURE((StgClosure*)ap->fun);
335 ASSERT(get_itbl(obj)->type == BCO);
344 j = get_itbl(obj)->type;
345 ASSERT(j >= 0 && j < N_CLOSURE_TYPES);
346 it_unknown_entries[j]++;
347 it_total_unknown_entries++;
351 // Can't handle this object; yield to scheduler
352 IF_DEBUG(interpreter,
353 debugBelch("evaluating unknown closure -- yielding to sched\n");
357 Sp[1] = (W_)tagged_obj;
358 Sp[0] = (W_)&stg_enter_info;
359 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
363 // ------------------------------------------------------------------------
364 // We now have an evaluated object (tagged_obj). The next thing to
365 // do is return it to the stack frame on top of the stack.
367 obj = UNTAG_CLOSURE(tagged_obj);
368 ASSERT(closure_HNF(obj));
370 IF_DEBUG(interpreter,
372 "\n---------------------------------------------------------------\n");
373 debugBelch("Returning: "); printObj(obj);
374 debugBelch("Sp = %p\n", Sp);
376 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
380 IF_DEBUG(sanity,checkStackChunk(Sp, cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size));
382 switch (get_itbl((StgClosure *)Sp)->type) {
385 const StgInfoTable *info;
387 // NOTE: not using get_itbl().
388 info = ((StgClosure *)Sp)->header.info;
389 if (info == (StgInfoTable *)&stg_ap_v_info) {
390 n = 1; m = 0; goto do_apply;
392 if (info == (StgInfoTable *)&stg_ap_f_info) {
393 n = 1; m = 1; goto do_apply;
395 if (info == (StgInfoTable *)&stg_ap_d_info) {
396 n = 1; m = sizeofW(StgDouble); goto do_apply;
398 if (info == (StgInfoTable *)&stg_ap_l_info) {
399 n = 1; m = sizeofW(StgInt64); goto do_apply;
401 if (info == (StgInfoTable *)&stg_ap_n_info) {
402 n = 1; m = 1; goto do_apply;
404 if (info == (StgInfoTable *)&stg_ap_p_info) {
405 n = 1; m = 1; goto do_apply;
407 if (info == (StgInfoTable *)&stg_ap_pp_info) {
408 n = 2; m = 2; goto do_apply;
410 if (info == (StgInfoTable *)&stg_ap_ppp_info) {
411 n = 3; m = 3; goto do_apply;
413 if (info == (StgInfoTable *)&stg_ap_pppp_info) {
414 n = 4; m = 4; goto do_apply;
416 if (info == (StgInfoTable *)&stg_ap_ppppp_info) {
417 n = 5; m = 5; goto do_apply;
419 if (info == (StgInfoTable *)&stg_ap_pppppp_info) {
420 n = 6; m = 6; goto do_apply;
422 goto do_return_unrecognised;
426 // Returning to an update frame: do the update, pop the update
427 // frame, and continue with the next stack frame.
429 // NB. we must update with the *tagged* pointer. Some tags
430 // are not optional, and if we omit the tag bits when updating
431 // then bad things can happen (albeit very rarely). See #1925.
432 // What happened was an indirection was created with an
433 // untagged pointer, and this untagged pointer was propagated
434 // to a PAP by the GC, violating the invariant that PAPs
435 // always contain a tagged pointer to the function.
436 INTERP_TICK(it_retto_UPDATE);
437 UPD_IND(((StgUpdateFrame *)Sp)->updatee, tagged_obj);
438 Sp += sizeofW(StgUpdateFrame);
442 // Returning to an interpreted continuation: put the object on
443 // the stack, and start executing the BCO.
444 INTERP_TICK(it_retto_BCO);
447 // NB. return the untagged object; the bytecode expects it to
448 // be untagged. XXX this doesn't seem right.
449 obj = (StgClosure*)Sp[2];
450 ASSERT(get_itbl(obj)->type == BCO);
454 do_return_unrecognised:
456 // Can't handle this return address; yield to scheduler
457 INTERP_TICK(it_retto_other);
458 IF_DEBUG(interpreter,
459 debugBelch("returning to unknown frame -- yielding to sched\n");
460 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
463 Sp[1] = (W_)tagged_obj;
464 Sp[0] = (W_)&stg_enter_info;
465 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
469 // -------------------------------------------------------------------------
470 // Returning an unboxed value. The stack looks like this:
487 // where XXXX_info is one of the stg_gc_unbx_r1_info family.
489 // We're only interested in the case when the real return address
490 // is a BCO; otherwise we'll return to the scheduler.
496 ASSERT( Sp[0] == (W_)&stg_gc_unbx_r1_info
497 || Sp[0] == (W_)&stg_gc_unpt_r1_info
498 || Sp[0] == (W_)&stg_gc_f1_info
499 || Sp[0] == (W_)&stg_gc_d1_info
500 || Sp[0] == (W_)&stg_gc_l1_info
501 || Sp[0] == (W_)&stg_gc_void_info // VoidRep
504 // get the offset of the stg_ctoi_ret_XXX itbl
505 offset = stack_frame_sizeW((StgClosure *)Sp);
507 switch (get_itbl((StgClosure *)Sp+offset)->type) {
510 // Returning to an interpreted continuation: put the object on
511 // the stack, and start executing the BCO.
512 INTERP_TICK(it_retto_BCO);
513 obj = (StgClosure*)Sp[offset+1];
514 ASSERT(get_itbl(obj)->type == BCO);
515 goto run_BCO_return_unboxed;
519 // Can't handle this return address; yield to scheduler
520 INTERP_TICK(it_retto_other);
521 IF_DEBUG(interpreter,
522 debugBelch("returning to unknown frame -- yielding to sched\n");
523 printStackChunk(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size);
525 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
532 // -------------------------------------------------------------------------
536 ASSERT(obj == UNTAG_CLOSURE(tagged_obj));
537 // we have a function to apply (obj), and n arguments taking up m
538 // words on the stack. The info table (stg_ap_pp_info or whatever)
539 // is on top of the arguments on the stack.
541 switch (get_itbl(obj)->type) {
549 // we only cope with PAPs whose function is a BCO
550 if (get_itbl(UNTAG_CLOSURE(pap->fun))->type != BCO) {
551 goto defer_apply_to_sched;
558 // n must be greater than 1, and the only kinds of
559 // application we support with more than one argument
560 // are all pointers...
562 // Shuffle the args for this function down, and put
563 // the appropriate info table in the gap.
564 for (i = 0; i < arity; i++) {
565 Sp[(int)i-1] = Sp[i];
566 // ^^^^^ careful, i-1 might be negative, but i in unsigned
568 Sp[arity-1] = app_ptrs_itbl[n-arity-1];
570 // unpack the PAP's arguments onto the stack
572 for (i = 0; i < pap->n_args; i++) {
573 Sp[i] = (W_)pap->payload[i];
575 obj = UNTAG_CLOSURE(pap->fun);
578 else if (arity == n) {
580 for (i = 0; i < pap->n_args; i++) {
581 Sp[i] = (W_)pap->payload[i];
583 obj = UNTAG_CLOSURE(pap->fun);
586 else /* arity > n */ {
587 // build a new PAP and return it.
589 new_pap = (StgPAP *)allocate(PAP_sizeW(pap->n_args + m));
590 SET_HDR(new_pap,&stg_PAP_info,CCCS);
591 new_pap->arity = pap->arity - n;
592 new_pap->n_args = pap->n_args + m;
593 new_pap->fun = pap->fun;
594 for (i = 0; i < pap->n_args; i++) {
595 new_pap->payload[i] = pap->payload[i];
597 for (i = 0; i < m; i++) {
598 new_pap->payload[pap->n_args + i] = (StgClosure *)Sp[i];
600 tagged_obj = (StgClosure *)new_pap;
610 arity = ((StgBCO *)obj)->arity;
613 // n must be greater than 1, and the only kinds of
614 // application we support with more than one argument
615 // are all pointers...
617 // Shuffle the args for this function down, and put
618 // the appropriate info table in the gap.
619 for (i = 0; i < arity; i++) {
620 Sp[(int)i-1] = Sp[i];
621 // ^^^^^ careful, i-1 might be negative, but i in unsigned
623 Sp[arity-1] = app_ptrs_itbl[n-arity-1];
627 else if (arity == n) {
630 else /* arity > n */ {
631 // build a PAP and return it.
634 pap = (StgPAP *)allocate(PAP_sizeW(m));
635 SET_HDR(pap, &stg_PAP_info,CCCS);
636 pap->arity = arity - n;
639 for (i = 0; i < m; i++) {
640 pap->payload[i] = (StgClosure *)Sp[i];
642 tagged_obj = (StgClosure *)pap;
648 // No point in us applying machine-code functions
650 defer_apply_to_sched:
652 Sp[1] = (W_)tagged_obj;
653 Sp[0] = (W_)&stg_enter_info;
654 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
657 // ------------------------------------------------------------------------
658 // Ok, we now have a bco (obj), and its arguments are all on the
659 // stack. We can start executing the byte codes.
661 // The stack is in one of two states. First, if this BCO is a
671 // Second, if this BCO is a continuation:
686 // where retval is the value being returned to this continuation.
687 // In the event of a stack check, heap check, or context switch,
688 // we need to leave the stack in a sane state so the garbage
689 // collector can find all the pointers.
691 // (1) BCO is a function: the BCO's bitmap describes the
692 // pointerhood of the arguments.
694 // (2) BCO is a continuation: BCO's bitmap describes the
695 // pointerhood of the free variables.
697 // Sadly we have three different kinds of stack/heap/cswitch check
703 if (doYouWantToGC()) {
704 Sp--; Sp[0] = (W_)&stg_enter_info;
705 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
707 // Stack checks aren't necessary at return points, the stack use
708 // is aggregated into the enclosing function entry point.
712 run_BCO_return_unboxed:
714 if (doYouWantToGC()) {
715 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
717 // Stack checks aren't necessary at return points, the stack use
718 // is aggregated into the enclosing function entry point.
726 Sp[0] = (W_)&stg_apply_interp_info;
727 checkStackChunk(Sp,SpLim);
732 if (doYouWantToGC()) {
735 Sp[0] = (W_)&stg_apply_interp_info; // placeholder, really
736 RETURN_TO_SCHEDULER(ThreadInterpret, HeapOverflow);
740 if (Sp - INTERP_STACK_CHECK_THRESH < SpLim) {
743 Sp[0] = (W_)&stg_apply_interp_info; // placeholder, really
744 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
749 // Now, actually interpret the BCO... (no returning to the
750 // scheduler again until the stack is in an orderly state).
752 INTERP_TICK(it_BCO_entries);
754 register int bciPtr = 1; /* instruction pointer */
755 register StgWord16 bci;
756 register StgBCO* bco = (StgBCO*)obj;
757 register StgWord16* instrs = (StgWord16*)(bco->instrs->payload);
758 register StgWord* literals = (StgWord*)(&bco->literals->payload[0]);
759 register StgPtr* ptrs = (StgPtr*)(&bco->ptrs->payload[0]);
762 it_lastopc = 0; /* no opcode */
766 ASSERT(bciPtr <= instrs[0]);
767 IF_DEBUG(interpreter,
768 //if (do_print_stack) {
769 //debugBelch("\n-- BEGIN stack\n");
770 //printStack(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size,iSu);
771 //debugBelch("-- END stack\n\n");
773 debugBelch("Sp = %p pc = %d ", Sp, bciPtr);
774 disInstr(bco,bciPtr);
777 for (i = 8; i >= 0; i--) {
778 debugBelch("%d %p\n", i, (StgPtr)(*(Sp+i)));
782 //if (do_print_stack) checkStack(Sp,cap->r.rCurrentTSO->stack+cap->r.rCurrentTSO->stack_size,iSu);
786 INTERP_TICK(it_insns);
789 ASSERT( (int)instrs[bciPtr] >= 0 && (int)instrs[bciPtr] < 27 );
790 it_ofreq[ (int)instrs[bciPtr] ] ++;
791 it_oofreq[ it_lastopc ][ (int)instrs[bciPtr] ] ++;
792 it_lastopc = (int)instrs[bciPtr];
796 /* We use the high 8 bits for flags, only the highest of which is
797 * currently allocated */
798 ASSERT((bci & 0xFF00) == (bci & 0x8000));
800 switch (bci & 0xFF) {
802 /* check for a breakpoint on the beginning of a let binding */
805 int arg1_brk_array, arg2_array_index, arg3_freeVars;
806 StgArrWords *breakPoints;
807 int returning_from_break; // are we resuming execution from a breakpoint?
808 // if yes, then don't break this time around
809 StgClosure *ioAction; // the io action to run at a breakpoint
811 StgAP_STACK *new_aps; // a closure to save the top stack frame on the heap
815 arg1_brk_array = BCO_NEXT; // 1st arg of break instruction
816 arg2_array_index = BCO_NEXT; // 2nd arg of break instruction
817 arg3_freeVars = BCO_NEXT; // 3rd arg of break instruction
819 // check if we are returning from a breakpoint - this info
820 // is stored in the flags field of the current TSO
821 returning_from_break = cap->r.rCurrentTSO->flags & TSO_STOPPED_ON_BREAKPOINT;
823 // if we are returning from a break then skip this section
824 // and continue executing
825 if (!returning_from_break)
827 breakPoints = (StgArrWords *) BCO_PTR(arg1_brk_array);
829 // stop the current thread if either the
830 // "rts_stop_next_breakpoint" flag is true OR if the
831 // breakpoint flag for this particular expression is
833 if (rts_stop_next_breakpoint == rtsTrue ||
834 breakPoints->payload[arg2_array_index] == rtsTrue)
836 // make sure we don't automatically stop at the
838 rts_stop_next_breakpoint = rtsFalse;
840 // allocate memory for a new AP_STACK, enough to
841 // store the top stack frame plus an
842 // stg_apply_interp_info pointer and a pointer to
844 size_words = BCO_BITMAP_SIZE(obj) + 2;
845 new_aps = (StgAP_STACK *) allocate (AP_STACK_sizeW(size_words));
846 SET_HDR(new_aps,&stg_AP_STACK_info,CCS_SYSTEM);
847 new_aps->size = size_words;
848 new_aps->fun = &stg_dummy_ret_closure;
850 // fill in the payload of the AP_STACK
851 new_aps->payload[0] = (StgClosure *)&stg_apply_interp_info;
852 new_aps->payload[1] = (StgClosure *)obj;
854 // copy the contents of the top stack frame into the AP_STACK
855 for (i = 2; i < size_words; i++)
857 new_aps->payload[i] = (StgClosure *)Sp[i-2];
860 // prepare the stack so that we can call the
861 // rts_breakpoint_io_action and ensure that the stack is
862 // in a reasonable state for the GC and so that
863 // execution of this BCO can continue when we resume
864 ioAction = (StgClosure *) deRefStablePtr (rts_breakpoint_io_action);
867 Sp[7] = (W_)&stg_apply_interp_info;
868 Sp[6] = (W_)&stg_noforceIO_info; // see [unreg] below
869 Sp[5] = (W_)new_aps; // the AP_STACK
870 Sp[4] = (W_)BCO_PTR(arg3_freeVars); // the info about local vars of the breakpoint
871 Sp[3] = (W_)False_closure; // True <=> a breakpoint
872 Sp[2] = (W_)&stg_ap_pppv_info;
873 Sp[1] = (W_)ioAction; // apply the IO action to its two arguments above
874 Sp[0] = (W_)&stg_enter_info; // get ready to run the IO action
875 // Note [unreg]: in unregisterised mode, the return
876 // convention for IO is different. The
877 // stg_noForceIO_info stack frame is necessary to
878 // account for this difference.
880 // set the flag in the TSO to say that we are now
881 // stopping at a breakpoint so that when we resume
882 // we don't stop on the same breakpoint that we
883 // already stopped at just now
884 cap->r.rCurrentTSO->flags |= TSO_STOPPED_ON_BREAKPOINT;
886 // stop this thread and return to the scheduler -
887 // eventually we will come back and the IO action on
888 // the top of the stack will be executed
889 RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
892 // record that this thread is not stopped at a breakpoint anymore
893 cap->r.rCurrentTSO->flags &= ~TSO_STOPPED_ON_BREAKPOINT;
895 // continue normal execution of the byte code instructions
900 // Explicit stack check at the beginning of a function
901 // *only* (stack checks in case alternatives are
902 // propagated to the enclosing function).
903 StgWord stk_words_reqd = BCO_GET_LARGE_ARG + 1;
904 if (Sp - stk_words_reqd < SpLim) {
907 Sp[0] = (W_)&stg_apply_interp_info;
908 RETURN_TO_SCHEDULER(ThreadInterpret, StackOverflow);
943 Sp[-1] = BCO_PTR(o1);
948 case bci_PUSH_ALTS: {
949 int o_bco = BCO_NEXT;
950 Sp[-2] = (W_)&stg_ctoi_R1p_info;
951 Sp[-1] = BCO_PTR(o_bco);
956 case bci_PUSH_ALTS_P: {
957 int o_bco = BCO_NEXT;
958 Sp[-2] = (W_)&stg_ctoi_R1unpt_info;
959 Sp[-1] = BCO_PTR(o_bco);
964 case bci_PUSH_ALTS_N: {
965 int o_bco = BCO_NEXT;
966 Sp[-2] = (W_)&stg_ctoi_R1n_info;
967 Sp[-1] = BCO_PTR(o_bco);
972 case bci_PUSH_ALTS_F: {
973 int o_bco = BCO_NEXT;
974 Sp[-2] = (W_)&stg_ctoi_F1_info;
975 Sp[-1] = BCO_PTR(o_bco);
980 case bci_PUSH_ALTS_D: {
981 int o_bco = BCO_NEXT;
982 Sp[-2] = (W_)&stg_ctoi_D1_info;
983 Sp[-1] = BCO_PTR(o_bco);
988 case bci_PUSH_ALTS_L: {
989 int o_bco = BCO_NEXT;
990 Sp[-2] = (W_)&stg_ctoi_L1_info;
991 Sp[-1] = BCO_PTR(o_bco);
996 case bci_PUSH_ALTS_V: {
997 int o_bco = BCO_NEXT;
998 Sp[-2] = (W_)&stg_ctoi_V_info;
999 Sp[-1] = BCO_PTR(o_bco);
1004 case bci_PUSH_APPLY_N:
1005 Sp--; Sp[0] = (W_)&stg_ap_n_info;
1007 case bci_PUSH_APPLY_V:
1008 Sp--; Sp[0] = (W_)&stg_ap_v_info;
1010 case bci_PUSH_APPLY_F:
1011 Sp--; Sp[0] = (W_)&stg_ap_f_info;
1013 case bci_PUSH_APPLY_D:
1014 Sp--; Sp[0] = (W_)&stg_ap_d_info;
1016 case bci_PUSH_APPLY_L:
1017 Sp--; Sp[0] = (W_)&stg_ap_l_info;
1019 case bci_PUSH_APPLY_P:
1020 Sp--; Sp[0] = (W_)&stg_ap_p_info;
1022 case bci_PUSH_APPLY_PP:
1023 Sp--; Sp[0] = (W_)&stg_ap_pp_info;
1025 case bci_PUSH_APPLY_PPP:
1026 Sp--; Sp[0] = (W_)&stg_ap_ppp_info;
1028 case bci_PUSH_APPLY_PPPP:
1029 Sp--; Sp[0] = (W_)&stg_ap_pppp_info;
1031 case bci_PUSH_APPLY_PPPPP:
1032 Sp--; Sp[0] = (W_)&stg_ap_ppppp_info;
1034 case bci_PUSH_APPLY_PPPPPP:
1035 Sp--; Sp[0] = (W_)&stg_ap_pppppp_info;
1038 case bci_PUSH_UBX: {
1040 int o_lits = BCO_NEXT;
1041 int n_words = BCO_NEXT;
1043 for (i = 0; i < n_words; i++) {
1044 Sp[i] = (W_)BCO_LIT(o_lits+i);
1052 /* a_1, .. a_n, b_1, .. b_by, s => a_1, .. a_n, s */
1057 INTERP_TICK(it_slides);
1061 case bci_ALLOC_AP: {
1063 int n_payload = BCO_NEXT;
1064 ap = (StgAP*)allocate(AP_sizeW(n_payload));
1066 ap->n_args = n_payload;
1067 SET_HDR(ap, &stg_AP_info, CCS_SYSTEM/*ToDo*/)
1072 case bci_ALLOC_AP_NOUPD: {
1074 int n_payload = BCO_NEXT;
1075 ap = (StgAP*)allocate(AP_sizeW(n_payload));
1077 ap->n_args = n_payload;
1078 SET_HDR(ap, &stg_AP_NOUPD_info, CCS_SYSTEM/*ToDo*/)
1083 case bci_ALLOC_PAP: {
1085 int arity = BCO_NEXT;
1086 int n_payload = BCO_NEXT;
1087 pap = (StgPAP*)allocate(PAP_sizeW(n_payload));
1089 pap->n_args = n_payload;
1091 SET_HDR(pap, &stg_PAP_info, CCS_SYSTEM/*ToDo*/)
1098 int stkoff = BCO_NEXT;
1099 int n_payload = BCO_NEXT;
1100 StgAP* ap = (StgAP*)Sp[stkoff];
1101 ASSERT((int)ap->n_args == n_payload);
1102 ap->fun = (StgClosure*)Sp[0];
1104 // The function should be a BCO, and its bitmap should
1105 // cover the payload of the AP correctly.
1106 ASSERT(get_itbl(ap->fun)->type == BCO
1107 && BCO_BITMAP_SIZE(ap->fun) == ap->n_args);
1109 for (i = 0; i < n_payload; i++)
1110 ap->payload[i] = (StgClosure*)Sp[i+1];
1112 IF_DEBUG(interpreter,
1113 debugBelch("\tBuilt ");
1114 printObj((StgClosure*)ap);
1121 int stkoff = BCO_NEXT;
1122 int n_payload = BCO_NEXT;
1123 StgPAP* pap = (StgPAP*)Sp[stkoff];
1124 ASSERT((int)pap->n_args == n_payload);
1125 pap->fun = (StgClosure*)Sp[0];
1127 // The function should be a BCO
1128 ASSERT(get_itbl(pap->fun)->type == BCO);
1130 for (i = 0; i < n_payload; i++)
1131 pap->payload[i] = (StgClosure*)Sp[i+1];
1133 IF_DEBUG(interpreter,
1134 debugBelch("\tBuilt ");
1135 printObj((StgClosure*)pap);
1141 /* Unpack N ptr words from t.o.s constructor */
1143 int n_words = BCO_NEXT;
1144 StgClosure* con = (StgClosure*)Sp[0];
1146 for (i = 0; i < n_words; i++) {
1147 Sp[i] = (W_)con->payload[i];
1154 int o_itbl = BCO_NEXT;
1155 int n_words = BCO_NEXT;
1156 StgInfoTable* itbl = INFO_PTR_TO_STRUCT(BCO_LIT(o_itbl));
1157 int request = CONSTR_sizeW( itbl->layout.payload.ptrs,
1158 itbl->layout.payload.nptrs );
1159 StgClosure* con = (StgClosure*)allocate_NONUPD(request);
1160 ASSERT( itbl->layout.payload.ptrs + itbl->layout.payload.nptrs > 0);
1161 SET_HDR(con, (StgInfoTable*)BCO_LIT(o_itbl), CCS_SYSTEM/*ToDo*/);
1162 for (i = 0; i < n_words; i++) {
1163 con->payload[i] = (StgClosure*)Sp[i];
1168 IF_DEBUG(interpreter,
1169 debugBelch("\tBuilt ");
1170 printObj((StgClosure*)con);
1175 case bci_TESTLT_P: {
1176 unsigned int discr = BCO_NEXT;
1177 int failto = BCO_NEXT;
1178 StgClosure* con = (StgClosure*)Sp[0];
1179 if (GET_TAG(con) >= discr) {
1185 case bci_TESTEQ_P: {
1186 unsigned int discr = BCO_NEXT;
1187 int failto = BCO_NEXT;
1188 StgClosure* con = (StgClosure*)Sp[0];
1189 if (GET_TAG(con) != discr) {
1195 case bci_TESTLT_I: {
1196 // There should be an Int at Sp[1], and an info table at Sp[0].
1197 int discr = BCO_NEXT;
1198 int failto = BCO_NEXT;
1199 I_ stackInt = (I_)Sp[1];
1200 if (stackInt >= (I_)BCO_LIT(discr))
1205 case bci_TESTEQ_I: {
1206 // There should be an Int at Sp[1], and an info table at Sp[0].
1207 int discr = BCO_NEXT;
1208 int failto = BCO_NEXT;
1209 I_ stackInt = (I_)Sp[1];
1210 if (stackInt != (I_)BCO_LIT(discr)) {
1216 case bci_TESTLT_D: {
1217 // There should be a Double at Sp[1], and an info table at Sp[0].
1218 int discr = BCO_NEXT;
1219 int failto = BCO_NEXT;
1220 StgDouble stackDbl, discrDbl;
1221 stackDbl = PK_DBL( & Sp[1] );
1222 discrDbl = PK_DBL( & BCO_LIT(discr) );
1223 if (stackDbl >= discrDbl) {
1229 case bci_TESTEQ_D: {
1230 // There should be a Double at Sp[1], and an info table at Sp[0].
1231 int discr = BCO_NEXT;
1232 int failto = BCO_NEXT;
1233 StgDouble stackDbl, discrDbl;
1234 stackDbl = PK_DBL( & Sp[1] );
1235 discrDbl = PK_DBL( & BCO_LIT(discr) );
1236 if (stackDbl != discrDbl) {
1242 case bci_TESTLT_F: {
1243 // There should be a Float at Sp[1], and an info table at Sp[0].
1244 int discr = BCO_NEXT;
1245 int failto = BCO_NEXT;
1246 StgFloat stackFlt, discrFlt;
1247 stackFlt = PK_FLT( & Sp[1] );
1248 discrFlt = PK_FLT( & BCO_LIT(discr) );
1249 if (stackFlt >= discrFlt) {
1255 case bci_TESTEQ_F: {
1256 // There should be a Float at Sp[1], and an info table at Sp[0].
1257 int discr = BCO_NEXT;
1258 int failto = BCO_NEXT;
1259 StgFloat stackFlt, discrFlt;
1260 stackFlt = PK_FLT( & Sp[1] );
1261 discrFlt = PK_FLT( & BCO_LIT(discr) );
1262 if (stackFlt != discrFlt) {
1268 // Control-flow ish things
1270 // Context-switch check. We put it here to ensure that
1271 // the interpreter has done at least *some* work before
1272 // context switching: sometimes the scheduler can invoke
1273 // the interpreter with context_switch == 1, particularly
1274 // if the -C0 flag has been given on the cmd line.
1275 if (context_switch) {
1276 Sp--; Sp[0] = (W_)&stg_enter_info;
1277 RETURN_TO_SCHEDULER(ThreadInterpret, ThreadYielding);
1282 tagged_obj = (StgClosure *)Sp[0];
1288 Sp[0] = (W_)&stg_gc_unpt_r1_info;
1289 goto do_return_unboxed;
1292 Sp[0] = (W_)&stg_gc_unbx_r1_info;
1293 goto do_return_unboxed;
1296 Sp[0] = (W_)&stg_gc_f1_info;
1297 goto do_return_unboxed;
1300 Sp[0] = (W_)&stg_gc_d1_info;
1301 goto do_return_unboxed;
1304 Sp[0] = (W_)&stg_gc_l1_info;
1305 goto do_return_unboxed;
1308 Sp[0] = (W_)&stg_gc_void_info;
1309 goto do_return_unboxed;
1312 int stkoff = BCO_NEXT;
1313 signed short n = (signed short)(BCO_NEXT);
1314 Sp[stkoff] += (W_)n;
1320 int stk_offset = BCO_NEXT;
1321 int o_itbl = BCO_NEXT;
1322 void(*marshall_fn)(void*) = (void (*)(void*))BCO_LIT(o_itbl);
1324 RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE
1325 + sizeofW(StgRetDyn);
1327 /* the stack looks like this:
1329 | | <- Sp + stk_offset
1333 | | <- Sp + ret_size + 1
1335 | C fun | <- Sp + ret_size
1340 ret is a placeholder for the return address, and may be
1343 We need to copy the args out of the TSO, because when
1344 we call suspendThread() we no longer own the TSO stack,
1345 and it may move at any time - indeed suspendThread()
1346 itself may do stack squeezing and move our args.
1347 So we make a copy of the argument block.
1351 #define ROUND_UP_WDS(p) ((((StgWord)(p)) + sizeof(W_)-1)/sizeof(W_))
1353 ffi_cif *cif = (ffi_cif *)marshall_fn;
1354 nat nargs = cif->nargs;
1358 W_ ret[2]; // max needed
1359 W_ *arguments[stk_offset]; // max needed
1360 void *argptrs[nargs];
1363 if (cif->rtype->type == FFI_TYPE_VOID) {
1364 // necessary because cif->rtype->size == 1 for void,
1365 // but the bytecode generator has not pushed a
1366 // placeholder in this case.
1369 ret_size = ROUND_UP_WDS(cif->rtype->size);
1372 memcpy(arguments, Sp+ret_size+1,
1373 sizeof(W_) * (stk_offset-1-ret_size));
1375 // libffi expects the args as an array of pointers to
1376 // values, so we have to construct this array before making
1378 p = (StgPtr)arguments;
1379 for (i = 0; i < nargs; i++) {
1380 argptrs[i] = (void *)p;
1381 // get the size from the cif
1382 p += ROUND_UP_WDS(cif->arg_types[i]->size);
1385 // this is the function we're going to call
1386 fn = (void(*)(void))Sp[ret_size];
1388 W_ arguments[stk_offset];
1389 memcpy(arguments, Sp, sizeof(W_) * stk_offset);
1392 // Restore the Haskell thread's current value of errno
1393 errno = cap->r.rCurrentTSO->saved_errno;
1395 // There are a bunch of non-ptr words on the stack (the
1396 // ccall args, the ccall fun address and space for the
1397 // result), which we need to cover with an info table
1398 // since we might GC during this call.
1400 // We know how many (non-ptr) words there are before the
1401 // next valid stack frame: it is the stk_offset arg to the
1402 // CCALL instruction. So we build a RET_DYN stack frame
1403 // on the stack frame to describe this chunk of stack.
1406 ((StgRetDyn *)Sp)->liveness = R1_PTR | N_NONPTRS(stk_offset);
1407 ((StgRetDyn *)Sp)->info = (StgInfoTable *)&stg_gc_gen_info;
1409 // save obj (pointer to the current BCO), since this
1410 // might move during the call. We use the R1 slot in the
1411 // RET_DYN frame for this, hence R1_PTR above.
1412 ((StgRetDyn *)Sp)->payload[0] = (StgClosure *)obj;
1414 SAVE_STACK_POINTERS;
1415 tok = suspendThread(&cap->r);
1417 // We already made a copy of the arguments above.
1419 ffi_call(cif, fn, ret, argptrs);
1421 marshall_fn ( arguments );
1424 // And restart the thread again, popping the RET_DYN frame.
1425 cap = (Capability *)((void *)((unsigned char*)resumeThread(tok) - sizeof(StgFunTable)));
1426 LOAD_STACK_POINTERS;
1428 // Re-load the pointer to the BCO from the RET_DYN frame,
1429 // it might have moved during the call. Also reload the
1430 // pointers to the components of the BCO.
1431 obj = ((StgRetDyn *)Sp)->payload[0];
1433 instrs = (StgWord16*)(bco->instrs->payload);
1434 literals = (StgWord*)(&bco->literals->payload[0]);
1435 ptrs = (StgPtr*)(&bco->ptrs->payload[0]);
1439 // Save the Haskell thread's current value of errno
1440 cap->r.rCurrentTSO->saved_errno = errno;
1442 // Copy the return value back to the TSO stack. It is at
1443 // most 2 words large, and resides at arguments[0].
1445 memcpy(Sp, ret, sizeof(W_) * stg_min(stk_offset,ret_size));
1447 memcpy(Sp, arguments, sizeof(W_) * stg_min(stk_offset,2));
1454 /* BCO_NEXT modifies bciPtr, so be conservative. */
1455 int nextpc = BCO_NEXT;
1461 barf("interpretBCO: hit a CASEFAIL");
1465 barf("interpretBCO: unknown or unimplemented opcode %d",
1468 } /* switch on opcode */
1472 barf("interpretBCO: fell off end of the interpreter");