- change the type of StgRun(): now we return the Capability that the
thread currently holds. The return status of the thread is now
stored in cap->r.rRet (a new slot in the reg table).
This was necessary because on return from StgRun(), the current
TSO may be blocked, so it no longer belongs to us. If it is a bound
thread, then the Task may have been already woken up on another
Capability, so the scheduler can't use task->cap to find the
capability it currently owns.
- when shutting down, allow a bound thread to remove its TSO from
the run queue when exiting (eliminates an error condition in
releaseCapability()).
MP_INT rmp_tmp2;
MP_INT rmp_result1;
MP_INT rmp_result2;
MP_INT rmp_tmp2;
MP_INT rmp_result1;
MP_INT rmp_result2;
+ StgWord rRet; // holds the return code of the thread
#if defined(SMP) || defined(PAR)
StgSparkPool rSparks; /* per-task spark pool */
#endif
#if defined(SMP) || defined(PAR)
StgSparkPool rSparks; /* per-task spark pool */
#endif
- // If this flag is set, we are running Haskell code. Used to detect
- // uses of 'foreign import unsafe' that should be 'safe'.
} StgRegTable;
#if IN_STG_CODE
} StgRegTable;
#if IN_STG_CODE
field_offset(StgRegTable, rCurrentTSO);
field_offset(StgRegTable, rCurrentNursery);
field_offset(StgRegTable, rHpAlloc);
field_offset(StgRegTable, rCurrentTSO);
field_offset(StgRegTable, rCurrentNursery);
field_offset(StgRegTable, rHpAlloc);
+ struct_field(StgRegTable, rRet);
// Needed for SMP builds
field_offset(StgRegTable, rmp_tmp_w);
// Needed for SMP builds
field_offset(StgRegTable, rmp_tmp_w);
* at all, it won't yield. Hopefully this won't be a problem in practice.
*/
* at all, it won't yield. Hopefully this won't be a problem in practice.
*/
+#define PRE_RETURN(why,what_next) \
+ StgTSO_what_next(CurrentTSO) = what_next::I16; \
+ StgRegTable_rRet(BaseReg) = why; \
+ R1 = BaseReg;
+
/* Remember that the return address is *removed* when returning to a
* ThreadRunGHC thread.
*/
/* Remember that the return address is *removed* when returning to a
* ThreadRunGHC thread.
*/
R1 = StackOverflow; \
} \
sched: \
R1 = StackOverflow; \
} \
sched: \
- StgTSO_what_next(CurrentTSO) = ThreadRunGHC::I16; \
+ PRE_RETURN(R1,ThreadRunGHC); \
-#define PRE_RETURN(why,what_next) \
- StgTSO_what_next(CurrentTSO) = what_next::I16; \
- R1 = why;
-
#define HP_GENERIC \
PRE_RETURN(HeapOverflow, ThreadRunGHC) \
jump stg_returnToSched;
#define HP_GENERIC \
PRE_RETURN(HeapOverflow, ThreadRunGHC) \
jump stg_returnToSched;
SAVE_STACK_POINTERS; \
cap->r.rCurrentTSO->what_next = (todo); \
threadPaused(cap->r.rCurrentTSO); \
SAVE_STACK_POINTERS; \
cap->r.rCurrentTSO->what_next = (todo); \
threadPaused(cap->r.rCurrentTSO); \
+ cap->r.rRet = (retcode); \
+ return cap;
#define RETURN_TO_SCHEDULER_NO_PAUSE(todo,retcode) \
#define RETURN_TO_SCHEDULER_NO_PAUSE(todo,retcode) \
- SAVE_STACK_POINTERS; \
- cap->r.rCurrentTSO->what_next = (todo); \
- return (retcode);
+ SAVE_STACK_POINTERS; \
+ cap->r.rCurrentTSO->what_next = (todo); \
+ cap->r.rRet = (retcode); \
+ return cap;
(W_)&stg_ap_pppppp_info,
};
(W_)&stg_ap_pppppp_info,
};
interpretBCO (Capability* cap)
{
// Use of register here is primarily to make it clear to compilers
interpretBCO (Capability* cap)
{
// Use of register here is primarily to make it clear to compilers
#ifndef INTERPRETER_H
#define INTERPRETER_H
#ifndef INTERPRETER_H
#define INTERPRETER_H
-extern StgThreadReturnCode interpretBCO (Capability* cap);
+extern Capability *interpretBCO (Capability* cap);
#endif /* INTERPRETER_H */
#endif /* INTERPRETER_H */
while (TERMINATION_CONDITION) {
while (TERMINATION_CONDITION) {
- ASSERT(cap->running_task == task);
- ASSERT(task->cap == cap);
- ASSERT(myTask() == task);
-
#if defined(GRAN)
/* Choose the processor with the next event */
CurrentProc = event->proc;
#if defined(GRAN)
/* Choose the processor with the next event */
CurrentProc = event->proc;
yieldCapability(&cap, task);
}
#endif
yieldCapability(&cap, task);
}
#endif
+
+ ASSERT(cap->running_task == task);
+ ASSERT(task->cap == cap);
+ ASSERT(myTask() == task);
// Check whether we have re-entered the RTS from Haskell without
// going via suspendThread()/resumeThread (i.e. a 'safe' foreign
// Check whether we have re-entered the RTS from Haskell without
// going via suspendThread()/resumeThread (i.e. a 'safe' foreign
deleteRunQueue(cap);
if (shutting_down_scheduler) {
IF_DEBUG(scheduler, sched_belch("shutting down"));
deleteRunQueue(cap);
if (shutting_down_scheduler) {
IF_DEBUG(scheduler, sched_belch("shutting down"));
- if (task->tso) { // we are bound
- task->stat = Interrupted;
- task->ret = NULL;
- }
- return cap;
} else {
IF_DEBUG(scheduler, sched_belch("interrupted"));
}
} else {
IF_DEBUG(scheduler, sched_belch("interrupted"));
}
recent_activity = ACTIVITY_YES;
switch (prev_what_next) {
recent_activity = ACTIVITY_YES;
switch (prev_what_next) {
case ThreadKilled:
case ThreadComplete:
/* Thread already finished, return to scheduler. */
ret = ThreadFinished;
break;
case ThreadKilled:
case ThreadComplete:
/* Thread already finished, return to scheduler. */
ret = ThreadFinished;
break;
- ret = StgRun((StgFunPtr) stg_returnToStackTop, &cap->r);
+ {
+ StgRegTable *r;
+ r = StgRun((StgFunPtr) stg_returnToStackTop, &cap->r);
+ cap = regTableToCapability(r);
+ ret = r->rRet;
- ret = interpretBCO(cap);
+ cap = interpretBCO(cap);
+ ret = cap->r.rRet;
- barf("schedule: invalid what_next field");
+ barf("schedule: invalid what_next field");
- // in SMP mode, we might return with a different capability than
- // we started with, if the Haskell thread made a foreign call. So
- // let's find out what our current Capability is:
- cap = task->cap;
-
cap->in_haskell = rtsFalse;
cap->in_haskell = rtsFalse;
+#ifdef SMP
+ // If ret is ThreadBlocked, and this Task is bound to the TSO that
+ // blocked, we are in limbo - the TSO is now owned by whatever it
+ // is blocked on, and may in fact already have been woken up,
+ // perhaps even on a different Capability. It may be the case
+ // that task->cap != cap. We better yield this Capability
+ // immediately and return to normaility.
+ if (ret == ThreadBlocked) continue;
+#endif
+
+ ASSERT(cap->running_task == task);
+ ASSERT(task->cap == cap);
+ ASSERT(myTask() == task);
+
// The TSO might have moved, eg. if it re-entered the RTS and a GC
// happened. So find the new location:
t = cap->r.rCurrentTSO;
// The TSO might have moved, eg. if it re-entered the RTS and a GC
// happened. So find the new location:
t = cap->r.rCurrentTSO;
any architecture (using miniinterpreter)
-------------------------------------------------------------------------- */
any architecture (using miniinterpreter)
-------------------------------------------------------------------------- */
-StgThreadReturnCode StgRun(StgFunPtr f, StgRegTable *basereg STG_UNUSED)
+StgRegTable * StgRun(StgFunPtr f, StgRegTable *basereg STG_UNUSED)
{
while (f) {
IF_DEBUG(interpreter,
{
while (f) {
IF_DEBUG(interpreter,
);
f = (StgFunPtr) (f)();
}
);
f = (StgFunPtr) (f)();
}
- return (StgThreadReturnCode)R1.i;
+ return (StgRegTable *)R1.p;
}
StgFunPtr StgReturn(void)
}
StgFunPtr StgReturn(void)
#define STG_GLOBAL ".global "
#endif
#define STG_GLOBAL ".global "
#endif
StgRun(StgFunPtr f, StgRegTable *basereg) {
unsigned char space[ RESERVED_C_STACK_BYTES + 4*sizeof(void *) ];
StgRun(StgFunPtr f, StgRegTable *basereg) {
unsigned char space[ RESERVED_C_STACK_BYTES + 4*sizeof(void *) ];
-extern StgThreadReturnCode StgRun(StgFunPtr f, StgRegTable *basereg);
+extern StgRegTable * StgRun(StgFunPtr f, StgRegTable *basereg);
static void StgRunIsImplementedInAssembler(void)
{
static void StgRunIsImplementedInAssembler(void)
{
StgRun(StgFunPtr f, StgRegTable *basereg) {
unsigned char space[RESERVED_C_STACK_BYTES];
StgRun(StgFunPtr f, StgRegTable *basereg) {
unsigned char space[RESERVED_C_STACK_BYTES];
__asm__ volatile ("ld %1,%0"
: "=r" (i7) : "m" (((void **)(space))[100]));
#endif
__asm__ volatile ("ld %1,%0"
: "=r" (i7) : "m" (((void **)(space))[100]));
#endif
- return (StgThreadReturnCode)R1.i;
+ return (StgRegTable *)R1.i;
StgRun(StgFunPtr f, StgRegTable *basereg)
{
register long real_ra __asm__("$26"); volatile long save_ra;
StgRun(StgFunPtr f, StgRegTable *basereg)
{
register long real_ra __asm__("$26"); volatile long save_ra;
register StgFunPtr real_pv __asm__("$27");
register StgFunPtr real_pv __asm__("$27");
- StgThreadReturnCode ret;
save_ra = real_ra;
save_gp = real_gp;
save_ra = real_ra;
save_gp = real_gp;
StgRun(StgFunPtr f, StgRegTable *basereg)
{
StgChar space[RESERVED_C_STACK_BYTES+16*sizeof(long)+10*sizeof(double)];
StgRun(StgFunPtr f, StgRegTable *basereg)
{
StgChar space[RESERVED_C_STACK_BYTES+16*sizeof(long)+10*sizeof(double)];
- StgThreadReturnCode ret;
__asm__ volatile ("ldo %0(%%r30),%%r19\n"
"\tstw %%r3, 0(0,%%r19)\n"
__asm__ volatile ("ldo %0(%%r30),%%r19\n"
"\tstw %%r3, 0(0,%%r19)\n"
-extern StgThreadReturnCode StgRun(StgFunPtr f, StgRegTable *basereg);
+extern StgRegTable * StgRun(StgFunPtr f, StgRegTable *basereg);
#ifdef darwin_HOST_OS
static void StgRunIsImplementedInAssembler(void)
#ifdef darwin_HOST_OS
static void StgRunIsImplementedInAssembler(void)
#ifdef powerpc64_HOST_ARCH
#ifdef linux_HOST_OS
#ifdef powerpc64_HOST_ARCH
#ifdef linux_HOST_OS
-extern StgThreadReturnCode StgRun(StgFunPtr f, StgRegTable *basereg);
+extern StgRegTable * StgRun(StgFunPtr f, StgRegTable *basereg);
static void StgRunIsImplementedInAssembler(void)
{
static void StgRunIsImplementedInAssembler(void)
{
#ifndef STGRUN_H
#define STGRUN_H
#ifndef STGRUN_H
#define STGRUN_H
-extern StgThreadReturnCode StgRun(StgFunPtr f, StgRegTable *basereg);
+extern StgRegTable * StgRun(StgFunPtr f, StgRegTable *basereg);
- /* R1 contains the return value of the thread */
- R1 = ThreadFinished;
+ /* The return code goes in BaseReg->rRet, and BaseReg is returned in R1 */
+ StgRegTable_rRet(BaseReg) = ThreadFinished;
+ R1 = BaseReg;