In GHC 6.12.x I found a rare deadlock caused by this
lock-order-reversal:
AQ cap->lock
startWorkerTask
newTask
AQ sched_mutex
scheduleCheckBlackHoles
AQ sched_mutex
unblockOne_
wakeupThreadOnCapabilty
AQ cap->lock
so sched_mutex and cap->lock are taken in a different order in two
places.
This doesn't happen in the HEAD because we don't have
scheduleCheckBlackHoles, but I thought it would be prudent to make
this less likely to happen in the future by using a different mutex in
newTask. We can clearly see that the all_tasks mutex cannot be
involved in a deadlock, becasue we never call anything else while
holding it.
#endif
// Task lists and global counters.
#endif
// Task lists and global counters.
-// Locks required: sched_mutex.
+// Locks required: all_tasks_mutex.
Task *all_tasks = NULL;
static nat taskCount;
static int tasksInitialized = 0;
Task *all_tasks = NULL;
static nat taskCount;
static int tasksInitialized = 0;
static Task * allocTask (void);
static Task * newTask (rtsBool);
static Task * allocTask (void);
static Task * newTask (rtsBool);
+#if defined(THREADED_RTS)
+static Mutex all_tasks_mutex;
+#endif
+
/* -----------------------------------------------------------------------------
* Remembering the current thread's Task
* -------------------------------------------------------------------------- */
/* -----------------------------------------------------------------------------
* Remembering the current thread's Task
* -------------------------------------------------------------------------- */
tasksInitialized = 1;
#if defined(THREADED_RTS) && !defined(MYTASK_USE_TLV)
newThreadLocalKey(¤tTaskKey);
tasksInitialized = 1;
#if defined(THREADED_RTS) && !defined(MYTASK_USE_TLV)
newThreadLocalKey(¤tTaskKey);
+ initMutex(&all_tasks_mutex);
Task *task, *next;
nat tasksRunning = 0;
Task *task, *next;
nat tasksRunning = 0;
- ASSERT_LOCK_HELD(&sched_mutex);
+ ACQUIRE_LOCK(&all_tasks_mutex);
for (task = all_tasks; task != NULL; task = next) {
next = task->all_link;
for (task = all_tasks; task != NULL; task = next) {
next = task->all_link;
tasksRunning);
all_tasks = NULL;
tasksRunning);
all_tasks = NULL;
+
+ RELEASE_LOCK(&all_tasks_mutex);
+
#if defined(THREADED_RTS) && !defined(MYTASK_USE_TLV)
#if defined(THREADED_RTS) && !defined(MYTASK_USE_TLV)
+ closeMutex(&all_tasks_mutex);
freeThreadLocalKey(¤tTaskKey);
#endif
freeThreadLocalKey(¤tTaskKey);
#endif
- ACQUIRE_LOCK(&sched_mutex);
+ ACQUIRE_LOCK(&all_tasks_mutex);
task->all_link = all_tasks;
all_tasks = task;
taskCount++;
task->all_link = all_tasks;
all_tasks = task;
taskCount++;
- RELEASE_LOCK(&sched_mutex);
+ RELEASE_LOCK(&all_tasks_mutex);
Task *task, *next;
// Wipe the task list, except the current Task.
Task *task, *next;
// Wipe the task list, except the current Task.
- ACQUIRE_LOCK(&sched_mutex);
+ ACQUIRE_LOCK(&all_tasks_mutex);
for (task = all_tasks; task != NULL; task=next) {
next = task->all_link;
if (task != keep) {
for (task = all_tasks; task != NULL; task=next) {
next = task->all_link;
if (task != keep) {
}
all_tasks = keep;
keep->all_link = NULL;
}
all_tasks = keep;
keep->all_link = NULL;
- RELEASE_LOCK(&sched_mutex);
+ RELEASE_LOCK(&all_tasks_mutex);