#include "SMP.h"
#include "STM.h"
#include "Storage.h"
+#include "Trace.h"
#include <stdlib.h>
#include <stdio.h>
// If SHAKE is defined then validation will sometime spuriously fail. They helps test
// unusualy code paths if genuine contention is rare
-#if defined(DEBUG)
-#define SHAKE
-#if defined(THREADED_RTS)
-#define TRACE(_x...) IF_DEBUG(stm, debugBelch("STM (task %p): ", (void *)(unsigned long)(unsigned int)osThreadId()); debugBelch ( _x ))
-#else
-#define TRACE(_x...) IF_DEBUG(stm, debugBelch ( _x ))
-#endif
-#else
-#define TRACE(_x...) /*Nothing*/
-#endif
+#define TRACE(_x...) debugTrace(DEBUG_stm, "STM: " _x)
#ifdef SHAKE
static const int do_shake = TRUE;
}
static void unpark_tso(Capability *cap, StgTSO *tso) {
- // We will continue unparking threads while they remain on one of the wait
- // queues: it's up to the thread itself to remove it from the wait queues
- // if it decides to do so when it is scheduled.
- if (tso -> why_blocked == BlockedOnSTM) {
- TRACE("unpark_tso on tso=%p\n", tso);
- unblockOne(cap,tso);
- } else {
- TRACE("spurious unpark_tso on tso=%p\n", tso);
- }
+ // We will continue unparking threads while they remain on one of the wait
+ // queues: it's up to the thread itself to remove it from the wait queues
+ // if it decides to do so when it is scheduled.
+
+ // Unblocking a TSO from BlockedOnSTM is done under the TSO lock,
+ // to avoid multiple CPUs unblocking the same TSO, and also to
+ // synchronise with throwTo().
+ lockTSO(tso);
+ if (tso -> why_blocked == BlockedOnSTM) {
+ TRACE("unpark_tso on tso=%p\n", tso);
+ unblockOne(cap,tso);
+ } else {
+ TRACE("spurious unpark_tso on tso=%p\n", tso);
+ }
+ unlockTSO(tso);
}
static void unpark_waiters_on(Capability *cap, StgTVar *s) {