3 * Non-blocking / asynchronous I/O for Win32.
10 #include "ConsoleHandler.h"
18 * Internal state maintained by the IO manager.
20 typedef struct IOManagerState {
27 unsigned int requestID;
28 /* fields for keeping track of active WorkItems */
29 CritSection active_work_lock;
30 WorkItem* active_work_items;
33 /* ToDo: wrap up this state via a IOManager handle instead? */
34 static IOManagerState* ioMan;
36 static void RegisterWorkItem ( IOManagerState* iom, WorkItem* wi);
37 static void DeregisterWorkItem( IOManagerState* iom, WorkItem* wi);
40 * The routine executed by each worker thread.
45 IOWorkerProc(PVOID param)
49 IOManagerState* iom = (IOManagerState*)param;
50 WorkQueue* pq = iom->workQueue;
56 hWaits[0] = (HANDLE)iom->hExitEvent;
57 hWaits[1] = GetWorkQueueHandle(pq);
60 /* The error code is communicated back on completion of request; reset. */
63 EnterCriticalSection(&iom->manLock);
64 /* Signal that the worker is idle.
66 * 'workersIdle' is used when determining whether or not to
67 * increase the worker thread pool when adding a new request.
68 * (see addIORequest().)
71 LeaveCriticalSection(&iom->manLock);
74 * A possible future refinement is to make long-term idle threads
75 * wake up and decide to shut down should the number of idle threads
76 * be above some threshold.
79 rc = WaitForMultipleObjects( 2, hWaits, FALSE, INFINITE );
81 if (rc == WAIT_OBJECT_0) {
82 // we received the exit event
83 EnterCriticalSection(&iom->manLock);
85 LeaveCriticalSection(&iom->manLock);
89 EnterCriticalSection(&iom->manLock);
90 /* Signal that the thread is 'non-idle' and about to consume
95 LeaveCriticalSection(&iom->manLock);
97 if ( rc == (WAIT_OBJECT_0 + 1) ) {
98 /* work item available, fetch it. */
99 if (FetchWork(pq,(void**)&work)) {
101 RegisterWorkItem(iom,work);
102 if ( work->workKind & WORKER_READ ) {
103 if ( work->workKind & WORKER_FOR_SOCKET ) {
104 len = recv(work->workData.ioData.fd,
105 work->workData.ioData.buf,
106 work->workData.ioData.len,
108 if (len == SOCKET_ERROR) {
109 errCode = WSAGetLastError();
113 /* Do the read(), with extra-special handling for Ctrl+C */
114 len = read(work->workData.ioData.fd,
115 work->workData.ioData.buf,
116 work->workData.ioData.len);
117 if ( len == 0 && work->workData.ioData.len != 0 ) {
118 /* Given the following scenario:
119 * - a console handler has been registered that handles Ctrl+C
121 * - we've not tweaked the 'console mode' settings to turn on
122 * ENABLE_PROCESSED_INPUT.
123 * - we're blocked waiting on input from standard input.
124 * - the user hits Ctrl+C.
126 * The OS will invoke the console handler (in a separate OS thread),
127 * and the above read() (i.e., under the hood, a ReadFile() op) returns
128 * 0, with the error set to ERROR_OPERATION_ABORTED. We don't
129 * want to percolate this error condition back to the Haskell user.
130 * Do this by waiting for the completion of the Haskell console handler.
131 * If upon completion of the console handler routine, the Haskell thread
132 * that issued the request is found to have been thrown an exception,
133 * the worker abandons the request (since that's what the Haskell thread
134 * has done.) If the Haskell thread hasn't been interrupted, the worker
135 * retries the read request as if nothing happened.
137 if ( (GetLastError()) == ERROR_OPERATION_ABORTED ) {
138 /* For now, only abort when dealing with the standard input handle.
139 * i.e., for all others, an error is raised.
141 HANDLE h = (HANDLE)GetStdHandle(STD_INPUT_HANDLE);
142 if ( _get_osfhandle(work->workData.ioData.fd) == (long)h ) {
143 if (rts_waitConsoleHandlerCompletion()) {
144 /* If the Scheduler has set work->abandonOp, the Haskell thread has
145 * been thrown an exception (=> the worker must abandon this request.)
146 * We test for this below before invoking the on-completion routine.
148 if (work->abandonOp) {
155 break; /* Treat it like an error */
164 if (len == -1) { errCode = errno; }
166 complData = work->workData.ioData.buf;
167 fd = work->workData.ioData.fd;
168 } else if ( work->workKind & WORKER_WRITE ) {
169 if ( work->workKind & WORKER_FOR_SOCKET ) {
170 len = send(work->workData.ioData.fd,
171 work->workData.ioData.buf,
172 work->workData.ioData.len,
174 if (len == SOCKET_ERROR) {
175 errCode = WSAGetLastError();
178 len = write(work->workData.ioData.fd,
179 work->workData.ioData.buf,
180 work->workData.ioData.len);
181 if (len == -1) { errCode = errno; }
183 complData = work->workData.ioData.buf;
184 fd = work->workData.ioData.fd;
185 } else if ( work->workKind & WORKER_DELAY ) {
186 /* Approximate implementation of threadDelay;
188 * Note: Sleep() is in milliseconds, not micros.
190 Sleep(work->workData.delayData.msecs / 1000);
191 len = work->workData.delayData.msecs;
195 } else if ( work->workKind & WORKER_DO_PROC ) {
196 /* perform operation/proc on behalf of Haskell thread. */
197 if (work->workData.procData.proc) {
198 /* The procedure is assumed to encode result + success/failure
201 errCode=work->workData.procData.proc(work->workData.procData.param);
205 complData = work->workData.procData.param;
207 fprintf(stderr, "unknown work request type (%d) , ignoring.\n", work->workKind);
211 if (!work->abandonOp) {
212 work->onCompletion(work->requestID,
218 /* Free the WorkItem */
219 DeregisterWorkItem(iom,work);
222 fprintf(stderr, "unable to fetch work; fatal.\n"); fflush(stderr);
226 fprintf(stderr, "waiting failed (%lu); fatal.\n", rc); fflush(stderr);
235 NewIOWorkerThread(IOManagerState* iom)
238 return ( 0 != _beginthreadex(NULL,
253 if ( !wq ) return FALSE;
255 ioMan = (IOManagerState*)malloc(sizeof(IOManagerState));
262 /* A manual-reset event */
263 hExit = CreateEvent ( NULL, TRUE, FALSE, NULL );
270 ioMan->hExitEvent = hExit;
271 InitializeCriticalSection(&ioMan->manLock);
272 ioMan->workQueue = wq;
273 ioMan->numWorkers = 0;
274 ioMan->workersIdle = 0;
275 ioMan->queueSize = 0;
276 ioMan->requestID = 1;
277 InitializeCriticalSection(&ioMan->active_work_lock);
278 ioMan->active_work_items = NULL;
284 * Function: depositWorkItem()
286 * Local function which deposits a WorkItem onto a work queue,
287 * deciding in the process whether or not the thread pool needs
288 * to be augmented with another thread to handle the new request.
293 depositWorkItem( unsigned int reqID,
296 EnterCriticalSection(&ioMan->manLock);
299 fprintf(stderr, "depositWorkItem: %d/%d\n", ioMan->workersIdle, ioMan->numWorkers);
302 /* A new worker thread is created when there are fewer idle threads
303 * than non-consumed queue requests. This ensures that requests will
304 * be dealt with in a timely manner.
306 * [Long explanation of why the previous thread pool policy lead to
309 * Previously, the thread pool was augmented iff no idle worker threads
310 * were available. That strategy runs the risk of repeatedly adding to
311 * the request queue without expanding the thread pool to handle this
312 * sudden spike in queued requests.
313 * [How? Assume workersIdle is 1, and addIORequest() is called. No new
314 * thread is created and the request is simply queued. If addIORequest()
315 * is called again _before the OS schedules a worker thread to pull the
316 * request off the queue_, workersIdle is still 1 and another request is
317 * simply added to the queue. Once the worker thread is run, only one
318 * request is de-queued, leaving the 2nd request in the queue]
320 * Assuming none of the queued requests take an inordinate amount of to
321 * complete, the request queue would eventually be drained. But if that's
322 * not the case, the later requests will end up languishing in the queue
323 * indefinitely. The non-timely handling of requests may cause CH applications
324 * to misbehave / hang; bad.
328 if ( (ioMan->workersIdle < ioMan->queueSize) ) {
329 /* see if giving up our quantum ferrets out some idle threads.
331 LeaveCriticalSection(&ioMan->manLock);
333 EnterCriticalSection(&ioMan->manLock);
334 if ( (ioMan->workersIdle < ioMan->queueSize) ) {
335 /* No, go ahead and create another. */
337 LeaveCriticalSection(&ioMan->manLock);
338 NewIOWorkerThread(ioMan);
340 LeaveCriticalSection(&ioMan->manLock);
343 LeaveCriticalSection(&ioMan->manLock);
346 if (SubmitWork(ioMan->workQueue,wItem)) {
347 /* Note: the work item has potentially been consumed by a worker thread
348 * (and freed) at this point, so we cannot use wItem's requestID.
357 * Function: AddIORequest()
359 * Conduit to underlying WorkQueue's SubmitWork(); adds IO
360 * request to work queue, deciding whether or not to augment
361 * the thread pool in the process.
364 AddIORequest ( int fd,
369 CompletionProc onCompletion)
371 WorkItem* wItem = (WorkItem*)malloc(sizeof(WorkItem));
372 unsigned int reqID = ioMan->requestID++;
373 if (!ioMan || !wItem) return 0;
375 /* Fill in the blanks */
376 wItem->workKind = ( isSocket ? WORKER_FOR_SOCKET : 0 ) |
377 ( forWriting ? WORKER_WRITE : WORKER_READ );
378 wItem->workData.ioData.fd = fd;
379 wItem->workData.ioData.len = len;
380 wItem->workData.ioData.buf = buffer;
383 wItem->onCompletion = onCompletion;
384 wItem->requestID = reqID;
386 return depositWorkItem(reqID, wItem);
390 * Function: AddDelayRequest()
392 * Like AddIORequest(), but this time adding a delay request to
396 AddDelayRequest ( unsigned int msecs,
397 CompletionProc onCompletion)
399 WorkItem* wItem = (WorkItem*)malloc(sizeof(WorkItem));
400 unsigned int reqID = ioMan->requestID++;
401 if (!ioMan || !wItem) return FALSE;
403 /* Fill in the blanks */
404 wItem->workKind = WORKER_DELAY;
405 wItem->workData.delayData.msecs = msecs;
406 wItem->onCompletion = onCompletion;
407 wItem->requestID = reqID;
410 return depositWorkItem(reqID, wItem);
414 * Function: AddProcRequest()
416 * Add an asynchronous procedure request.
419 AddProcRequest ( void* proc,
421 CompletionProc onCompletion)
423 WorkItem* wItem = (WorkItem*)malloc(sizeof(WorkItem));
424 unsigned int reqID = ioMan->requestID++;
425 if (!ioMan || !wItem) return FALSE;
427 /* Fill in the blanks */
428 wItem->workKind = WORKER_DO_PROC;
429 wItem->workData.procData.proc = proc;
430 wItem->workData.procData.param = param;
431 wItem->onCompletion = onCompletion;
432 wItem->requestID = reqID;
433 wItem->abandonOp = 0;
436 return depositWorkItem(reqID, wItem);
439 void ShutdownIOManager ( void )
443 SetEvent(ioMan->hExitEvent);
445 /* Wait for all worker threads to die. */
447 EnterCriticalSection(&ioMan->manLock);
448 num = ioMan->numWorkers;
449 LeaveCriticalSection(&ioMan->manLock);
454 FreeWorkQueue(ioMan->workQueue);
455 CloseHandle(ioMan->hExitEvent);
460 /* Keep track of WorkItems currently being serviced. */
463 RegisterWorkItem(IOManagerState* ioMan,
466 EnterCriticalSection(&ioMan->active_work_lock);
467 wi->link = ioMan->active_work_items;
468 ioMan->active_work_items = wi;
469 LeaveCriticalSection(&ioMan->active_work_lock);
474 DeregisterWorkItem(IOManagerState* ioMan,
477 WorkItem *ptr, *prev;
479 EnterCriticalSection(&ioMan->active_work_lock);
480 for(prev=NULL,ptr=ioMan->active_work_items;ptr;prev=ptr,ptr=ptr->link) {
481 if (wi->requestID == ptr->requestID) {
483 ioMan->active_work_items = ptr->link;
485 prev->link = ptr->link;
487 LeaveCriticalSection(&ioMan->active_work_lock);
491 fprintf(stderr, "DeregisterWorkItem: unable to locate work item %d\n", wi->requestID);
492 LeaveCriticalSection(&ioMan->active_work_lock);
497 * Function: abandonWorkRequest()
499 * Signal that a work request isn't of interest. Called by the Scheduler
500 * if a blocked Haskell thread has an exception thrown to it.
502 * Note: we're not aborting the system call that a worker might be blocked on
503 * here, just disabling the propagation of its result once its finished. We
504 * may have to go the whole hog here and switch to overlapped I/O so that we
505 * can abort blocked system calls.
508 abandonWorkRequest ( int reqID )
511 EnterCriticalSection(&ioMan->active_work_lock);
512 for(ptr=ioMan->active_work_items;ptr;ptr=ptr->link) {
513 if (ptr->requestID == (unsigned int)reqID ) {
515 LeaveCriticalSection(&ioMan->active_work_lock);
519 /* Note: if the request ID isn't present, the worker will have
520 * finished sometime since awaitRequests() last drained the completed
521 * request table; i.e., not an error.
523 LeaveCriticalSection(&ioMan->active_work_lock);