/* if this flag is set as well, give up execution */
rtsBool interrupted = rtsFalse;
+/* If this flag is set, we are running Haskell code. Used to detect
+ * uses of 'foreign import unsafe' that should be 'safe'.
+ */
+rtsBool in_haskell = rtsFalse;
+
/* Next thread ID to allocate.
* Locks required: thread_id_mutex
*/
// We might have a capability, passed in as initialCapability.
cap = initialCapability;
+ // Check whether we have re-entered the RTS from Haskell without
+ // going via suspendThread()/resumeThread (i.e. a 'safe' foreign
+ // call).
+ if (in_haskell) {
+ errorBelch("schedule: re-entered unsafely.\n"
+ " Perhaps a 'foreign import unsafe' should be 'safe'?");
+ stg_exit(1);
+ }
+
#if defined(RTS_SUPPORTS_THREADS)
//
// in the threaded case, the capability is either passed in via the
// ToDo: add deadlock detection in GUM (similar to SMP) -- HWL
#endif
-#if defined(RTS_SUPPORTS_THREADS)
+#if defined(RTS_SUPPORTS_THREADS) || defined(mingw32_HOST_OS)
+ /* win32: might be back here due to awaitEvent() being abandoned
+ * as a result of a console event having been delivered.
+ */
if ( EMPTY_RUN_QUEUE() ) {
continue; // nothing to do
}
prev_what_next = t->what_next;
errno = t->saved_errno;
+ in_haskell = rtsTrue;
switch (prev_what_next) {
barf("schedule: invalid what_next field");
}
+ in_haskell = rtsFalse;
+
// The TSO might have moved, so find the new location:
t = cap->r.rCurrentTSO;
// partially-evaluated thunks on the heap.
raiseAsync_(t, NULL, rtsTrue);
+#ifdef REG_R1
ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
+#endif
}
}
}
* Singleton fork(). Do not copy any running threads.
* ------------------------------------------------------------------------- */
-#ifndef mingw32_TARGET_OS
+#ifndef mingw32_HOST_OS
#define FORKPROCESS_PRIMOP_SUPPORTED
#endif
RELEASE_LOCK(&sched_mutex);
errno = saved_errno;
+ in_haskell = rtsFalse;
return tok;
}
cap->r.rCurrentTSO = tso;
RELEASE_LOCK(&sched_mutex);
errno = saved_errno;
+ in_haskell = rtsTrue;
return &cap->r;
}
case BlockedOnRead:
case BlockedOnWrite:
-#if defined(mingw32_TARGET_OS)
+#if defined(mingw32_HOST_OS)
case BlockedOnDoProc:
#endif
{
case BlockedOnRead:
case BlockedOnWrite:
-#if defined(mingw32_TARGET_OS)
+#if defined(mingw32_HOST_OS)
case BlockedOnDoProc:
#endif
{
ASSERT(stop_at_atomically);
ASSERT(stmGetEnclosingTRec(tso->trec) == NO_TREC);
stmCondemnTransaction(tso -> trec);
+#ifdef REG_R1
tso->sp = frame;
+#else
+ // R1 is not a register: the return convention for IO in
+ // this case puts the return value on the stack, so we
+ // need to set up the stack to return to the atomically
+ // frame properly...
+ tso->sp = frame - 2;
+ tso->sp[1] = (StgWord) &stg_NO_FINALIZER_closure; // why not?
+ tso->sp[0] = (StgWord) &stg_ut_1_0_unreg_info;
+#endif
tso->what_next = ThreadRunGHC;
return;
}
}
}
-
+
/* -----------------------------------------------------------------------------
resurrectThreads is called after garbage collection on the list of
threads found to be garbage. Each of these threads will be woken
case BlockedOnWrite:
debugBelch("is blocked on write to fd %d", tso->block_info.fd);
break;
-#if defined(mingw32_TARGET_OS)
+#if defined(mingw32_HOST_OS)
case BlockedOnDoProc:
debugBelch("is blocked on proc (request: %d)", tso->block_info.async_result->reqID);
break;