3 * Non-blocking / asynchronous I/O for Win32.
8 #if !defined(THREADED_RTS)
11 #include "IOManager.h"
12 #include "WorkQueue.h"
13 #include "ConsoleHandler.h"
21 * Internal state maintained by the IO manager.
23 typedef struct IOManagerState {
30 unsigned int requestID;
31 /* fields for keeping track of active WorkItems */
32 CritSection active_work_lock;
33 WorkItem* active_work_items;
36 /* ToDo: wrap up this state via a IOManager handle instead? */
37 static IOManagerState* ioMan;
39 static void RegisterWorkItem ( IOManagerState* iom, WorkItem* wi);
40 static void DeregisterWorkItem( IOManagerState* iom, WorkItem* wi);
43 * The routine executed by each worker thread.
48 IOWorkerProc(PVOID param)
52 IOManagerState* iom = (IOManagerState*)param;
53 WorkQueue* pq = iom->workQueue;
59 hWaits[0] = (HANDLE)iom->hExitEvent;
60 hWaits[1] = GetWorkQueueHandle(pq);
63 /* The error code is communicated back on completion of request; reset. */
66 EnterCriticalSection(&iom->manLock);
67 /* Signal that the worker is idle.
69 * 'workersIdle' is used when determining whether or not to
70 * increase the worker thread pool when adding a new request.
71 * (see addIORequest().)
74 LeaveCriticalSection(&iom->manLock);
77 * A possible future refinement is to make long-term idle threads
78 * wake up and decide to shut down should the number of idle threads
79 * be above some threshold.
82 rc = WaitForMultipleObjects( 2, hWaits, FALSE, INFINITE );
84 if (rc == WAIT_OBJECT_0) {
85 // we received the exit event
86 EnterCriticalSection(&iom->manLock);
88 LeaveCriticalSection(&iom->manLock);
92 EnterCriticalSection(&iom->manLock);
93 /* Signal that the thread is 'non-idle' and about to consume
98 LeaveCriticalSection(&iom->manLock);
100 if ( rc == (WAIT_OBJECT_0 + 1) ) {
101 /* work item available, fetch it. */
102 if (FetchWork(pq,(void**)&work)) {
104 RegisterWorkItem(iom,work);
105 if ( work->workKind & WORKER_READ ) {
106 if ( work->workKind & WORKER_FOR_SOCKET ) {
107 len = recv(work->workData.ioData.fd,
108 work->workData.ioData.buf,
109 work->workData.ioData.len,
111 if (len == SOCKET_ERROR) {
112 errCode = WSAGetLastError();
116 /* Do the read(), with extra-special handling for Ctrl+C */
117 len = read(work->workData.ioData.fd,
118 work->workData.ioData.buf,
119 work->workData.ioData.len);
120 if ( len == 0 && work->workData.ioData.len != 0 ) {
121 /* Given the following scenario:
122 * - a console handler has been registered that handles Ctrl+C
124 * - we've not tweaked the 'console mode' settings to turn on
125 * ENABLE_PROCESSED_INPUT.
126 * - we're blocked waiting on input from standard input.
127 * - the user hits Ctrl+C.
129 * The OS will invoke the console handler (in a separate OS thread),
130 * and the above read() (i.e., under the hood, a ReadFile() op) returns
131 * 0, with the error set to ERROR_OPERATION_ABORTED. We don't
132 * want to percolate this error condition back to the Haskell user.
133 * Do this by waiting for the completion of the Haskell console handler.
134 * If upon completion of the console handler routine, the Haskell thread
135 * that issued the request is found to have been thrown an exception,
136 * the worker abandons the request (since that's what the Haskell thread
137 * has done.) If the Haskell thread hasn't been interrupted, the worker
138 * retries the read request as if nothing happened.
140 if ( (GetLastError()) == ERROR_OPERATION_ABORTED ) {
141 /* For now, only abort when dealing with the standard input handle.
142 * i.e., for all others, an error is raised.
144 HANDLE h = (HANDLE)GetStdHandle(STD_INPUT_HANDLE);
145 if ( _get_osfhandle(work->workData.ioData.fd) == (long)h ) {
146 if (rts_waitConsoleHandlerCompletion()) {
147 /* If the Scheduler has set work->abandonOp, the Haskell thread has
148 * been thrown an exception (=> the worker must abandon this request.)
149 * We test for this below before invoking the on-completion routine.
151 if (work->abandonOp) {
158 break; /* Treat it like an error */
167 if (len == -1) { errCode = errno; }
169 complData = work->workData.ioData.buf;
170 fd = work->workData.ioData.fd;
171 } else if ( work->workKind & WORKER_WRITE ) {
172 if ( work->workKind & WORKER_FOR_SOCKET ) {
173 len = send(work->workData.ioData.fd,
174 work->workData.ioData.buf,
175 work->workData.ioData.len,
177 if (len == SOCKET_ERROR) {
178 errCode = WSAGetLastError();
181 len = write(work->workData.ioData.fd,
182 work->workData.ioData.buf,
183 work->workData.ioData.len);
184 if (len == -1) { errCode = errno; }
186 complData = work->workData.ioData.buf;
187 fd = work->workData.ioData.fd;
188 } else if ( work->workKind & WORKER_DELAY ) {
189 /* Approximate implementation of threadDelay;
191 * Note: Sleep() is in milliseconds, not micros.
193 Sleep((work->workData.delayData.msecs + 999) / 1000);
194 len = work->workData.delayData.msecs;
198 } else if ( work->workKind & WORKER_DO_PROC ) {
199 /* perform operation/proc on behalf of Haskell thread. */
200 if (work->workData.procData.proc) {
201 /* The procedure is assumed to encode result + success/failure
204 errCode=work->workData.procData.proc(work->workData.procData.param);
208 complData = work->workData.procData.param;
210 fprintf(stderr, "unknown work request type (%d) , ignoring.\n", work->workKind);
214 if (!work->abandonOp) {
215 work->onCompletion(work->requestID,
221 /* Free the WorkItem */
222 DeregisterWorkItem(iom,work);
225 fprintf(stderr, "unable to fetch work; fatal.\n"); fflush(stderr);
226 EnterCriticalSection(&iom->manLock);
228 LeaveCriticalSection(&iom->manLock);
232 fprintf(stderr, "waiting failed (%lu); fatal.\n", rc); fflush(stderr);
233 EnterCriticalSection(&iom->manLock);
235 LeaveCriticalSection(&iom->manLock);
244 NewIOWorkerThread(IOManagerState* iom)
247 return ( 0 != _beginthreadex(NULL,
262 if ( !wq ) return FALSE;
264 ioMan = (IOManagerState*)malloc(sizeof(IOManagerState));
271 /* A manual-reset event */
272 hExit = CreateEvent ( NULL, TRUE, FALSE, NULL );
279 ioMan->hExitEvent = hExit;
280 InitializeCriticalSection(&ioMan->manLock);
281 ioMan->workQueue = wq;
282 ioMan->numWorkers = 0;
283 ioMan->workersIdle = 0;
284 ioMan->queueSize = 0;
285 ioMan->requestID = 1;
286 InitializeCriticalSection(&ioMan->active_work_lock);
287 ioMan->active_work_items = NULL;
293 * Function: depositWorkItem()
295 * Local function which deposits a WorkItem onto a work queue,
296 * deciding in the process whether or not the thread pool needs
297 * to be augmented with another thread to handle the new request.
302 depositWorkItem( unsigned int reqID,
305 EnterCriticalSection(&ioMan->manLock);
308 fprintf(stderr, "depositWorkItem: %d/%d\n", ioMan->workersIdle, ioMan->numWorkers);
311 /* A new worker thread is created when there are fewer idle threads
312 * than non-consumed queue requests. This ensures that requests will
313 * be dealt with in a timely manner.
315 * [Long explanation of why the previous thread pool policy lead to
318 * Previously, the thread pool was augmented iff no idle worker threads
319 * were available. That strategy runs the risk of repeatedly adding to
320 * the request queue without expanding the thread pool to handle this
321 * sudden spike in queued requests.
322 * [How? Assume workersIdle is 1, and addIORequest() is called. No new
323 * thread is created and the request is simply queued. If addIORequest()
324 * is called again _before the OS schedules a worker thread to pull the
325 * request off the queue_, workersIdle is still 1 and another request is
326 * simply added to the queue. Once the worker thread is run, only one
327 * request is de-queued, leaving the 2nd request in the queue]
329 * Assuming none of the queued requests take an inordinate amount of to
330 * complete, the request queue would eventually be drained. But if that's
331 * not the case, the later requests will end up languishing in the queue
332 * indefinitely. The non-timely handling of requests may cause CH applications
333 * to misbehave / hang; bad.
337 if ( (ioMan->workersIdle < ioMan->queueSize) ) {
338 /* see if giving up our quantum ferrets out some idle threads.
340 LeaveCriticalSection(&ioMan->manLock);
342 EnterCriticalSection(&ioMan->manLock);
343 if ( (ioMan->workersIdle < ioMan->queueSize) ) {
344 /* No, go ahead and create another. */
346 if (!NewIOWorkerThread(ioMan)) {
351 LeaveCriticalSection(&ioMan->manLock);
353 if (SubmitWork(ioMan->workQueue,wItem)) {
354 /* Note: the work item has potentially been consumed by a worker thread
355 * (and freed) at this point, so we cannot use wItem's requestID.
364 * Function: AddIORequest()
366 * Conduit to underlying WorkQueue's SubmitWork(); adds IO
367 * request to work queue, deciding whether or not to augment
368 * the thread pool in the process.
371 AddIORequest ( int fd,
376 CompletionProc onCompletion)
378 WorkItem* wItem = (WorkItem*)malloc(sizeof(WorkItem));
379 unsigned int reqID = ioMan->requestID++;
380 if (!ioMan || !wItem) return 0;
382 /* Fill in the blanks */
383 wItem->workKind = ( isSocket ? WORKER_FOR_SOCKET : 0 ) |
384 ( forWriting ? WORKER_WRITE : WORKER_READ );
385 wItem->workData.ioData.fd = fd;
386 wItem->workData.ioData.len = len;
387 wItem->workData.ioData.buf = buffer;
390 wItem->onCompletion = onCompletion;
391 wItem->requestID = reqID;
393 return depositWorkItem(reqID, wItem);
397 * Function: AddDelayRequest()
399 * Like AddIORequest(), but this time adding a delay request to
403 AddDelayRequest ( unsigned int msecs,
404 CompletionProc onCompletion)
406 WorkItem* wItem = (WorkItem*)malloc(sizeof(WorkItem));
407 unsigned int reqID = ioMan->requestID++;
408 if (!ioMan || !wItem) return FALSE;
410 /* Fill in the blanks */
411 wItem->workKind = WORKER_DELAY;
412 wItem->workData.delayData.msecs = msecs;
413 wItem->onCompletion = onCompletion;
414 wItem->requestID = reqID;
417 return depositWorkItem(reqID, wItem);
421 * Function: AddProcRequest()
423 * Add an asynchronous procedure request.
426 AddProcRequest ( void* proc,
428 CompletionProc onCompletion)
430 WorkItem* wItem = (WorkItem*)malloc(sizeof(WorkItem));
431 unsigned int reqID = ioMan->requestID++;
432 if (!ioMan || !wItem) return FALSE;
434 /* Fill in the blanks */
435 wItem->workKind = WORKER_DO_PROC;
436 wItem->workData.procData.proc = proc;
437 wItem->workData.procData.param = param;
438 wItem->onCompletion = onCompletion;
439 wItem->requestID = reqID;
440 wItem->abandonOp = 0;
443 return depositWorkItem(reqID, wItem);
446 void ShutdownIOManager ( void )
450 SetEvent(ioMan->hExitEvent);
452 /* Wait for all worker threads to die. */
454 EnterCriticalSection(&ioMan->manLock);
455 num = ioMan->numWorkers;
456 LeaveCriticalSection(&ioMan->manLock);
461 FreeWorkQueue(ioMan->workQueue);
462 CloseHandle(ioMan->hExitEvent);
467 /* Keep track of WorkItems currently being serviced. */
470 RegisterWorkItem(IOManagerState* ioMan,
473 EnterCriticalSection(&ioMan->active_work_lock);
474 wi->link = ioMan->active_work_items;
475 ioMan->active_work_items = wi;
476 LeaveCriticalSection(&ioMan->active_work_lock);
481 DeregisterWorkItem(IOManagerState* ioMan,
484 WorkItem *ptr, *prev;
486 EnterCriticalSection(&ioMan->active_work_lock);
487 for(prev=NULL,ptr=ioMan->active_work_items;ptr;prev=ptr,ptr=ptr->link) {
488 if (wi->requestID == ptr->requestID) {
490 ioMan->active_work_items = ptr->link;
492 prev->link = ptr->link;
494 LeaveCriticalSection(&ioMan->active_work_lock);
498 fprintf(stderr, "DeregisterWorkItem: unable to locate work item %d\n", wi->requestID);
499 LeaveCriticalSection(&ioMan->active_work_lock);
504 * Function: abandonWorkRequest()
506 * Signal that a work request isn't of interest. Called by the Scheduler
507 * if a blocked Haskell thread has an exception thrown to it.
509 * Note: we're not aborting the system call that a worker might be blocked on
510 * here, just disabling the propagation of its result once its finished. We
511 * may have to go the whole hog here and switch to overlapped I/O so that we
512 * can abort blocked system calls.
515 abandonWorkRequest ( int reqID )
518 EnterCriticalSection(&ioMan->active_work_lock);
519 for(ptr=ioMan->active_work_items;ptr;ptr=ptr->link) {
520 if (ptr->requestID == (unsigned int)reqID ) {
522 LeaveCriticalSection(&ioMan->active_work_lock);
526 /* Note: if the request ID isn't present, the worker will have
527 * finished sometime since awaitRequests() last drained the completed
528 * request table; i.e., not an error.
530 LeaveCriticalSection(&ioMan->active_work_lock);