1 /* -----------------------------------------------------------------------------
2 * $Id: Select.c,v 1.10 2000/03/20 09:42:50 andy Exp $
4 * (c) The GHC Team 1995-1999
6 * Support for concurrent non-blocking I/O and thread waiting.
8 * ---------------------------------------------------------------------------*/
10 /* we're outside the realms of POSIX here... */
11 #define NON_POSIX_SOURCE
20 # if defined(HAVE_SYS_TYPES_H)
21 # include <sys/types.h>
24 # ifdef HAVE_SYS_TIME_H
25 # include <sys/time.h>
28 nat ticks_since_select = 0;
30 /* Argument 'wait' says whether to wait for I/O to become available,
31 * or whether to just check and return immediately. If there are
32 * other threads ready to run, we normally do the non-waiting variety,
33 * otherwise we wait (see Schedule.c).
35 * SMP note: must be called with sched_mutex locked.
38 awaitEvent(rtsBool wait)
40 #ifdef mingw32_TARGET_OS
42 * Win32 doesn't support select(). ToDo: use MsgWaitForMultipleObjects()
43 * to achieve (similar) effect.
49 StgTSO *tso, *prev, *next;
57 #ifndef linux_TARGET_OS
58 struct timeval tv_before,tv_after;
61 IF_DEBUG(scheduler,belch("Checking for threads blocked on I/O...\n"));
63 /* loop until we've woken up some threads. This loop is needed
64 * because the select timing isn't accurate, we sometimes sleep
65 * for a while but not long enough to wake up a thread in
70 /* see how long it's been since we last checked the blocked queue.
71 * ToDo: make this check atomic, so we don't lose any ticks.
73 delta = ticks_since_select;
74 ticks_since_select = 0;
75 delta = delta * TICK_MILLISECS * 1000;
77 min = wait == rtsTrue ? 0x7fffffff : 0;
80 * Collect all of the fd's that we're interested in, and capture
81 * the minimum waiting time (in microseconds) for the delayed threads.
86 for(tso = blocked_queue_hd; tso != END_TSO_QUEUE; tso = next) {
89 switch (tso->why_blocked) {
92 int fd = tso->block_info.fd;
93 maxfd = (fd > maxfd) ? fd : maxfd;
100 int fd = tso->block_info.fd;
101 maxfd = (fd > maxfd) ? fd : maxfd;
108 int candidate; /* signed int is intentional */
109 #if defined(HAVE_SETITIMER)
110 candidate = tso->block_info.delay;
112 candidate = tso->block_info.target - getourtimeofday();
117 if ((nat)candidate < min) {
128 /* Release the scheduler lock while we do the poll.
129 * this means that someone might muck with the blocked_queue
130 * while we do this, but it shouldn't matter:
132 * - another task might poll for I/O and remove one
133 * or more threads from the blocked_queue.
134 * - more I/O threads may be added to blocked_queue.
135 * - more delayed threads may be added to blocked_queue. We'll
136 * just subtract delta from their delays after the poll.
138 * I believe none of these cases lead to trouble --SDM.
140 RELEASE_LOCK(&sched_mutex);
142 /* Check for any interesting events */
144 tv.tv_sec = min / 1000000;
145 tv.tv_usec = min % 1000000;
147 #ifndef linux_TARGET_OS
148 gettimeofday(&tv_before, (struct timezone *) NULL);
151 while (!interrupted &&
152 (numFound = select(maxfd+1, &rfd, &wfd, NULL, &tv)) < 0) {
153 if (errno != EINTR) {
154 /* fflush(stdout); */
156 barf("select failed");
158 ACQUIRE_LOCK(&sched_mutex);
160 /* We got a signal; could be one of ours. If so, we need
161 * to start up the signal handler straight away, otherwise
162 * we could block for a long time before the signal is
165 if (signals_pending()) {
166 RELEASE_LOCK(&sched_mutex);
167 start_signal_handlers();
171 /* If new runnable threads have arrived, stop waiting for
174 if (run_queue_hd != END_TSO_QUEUE) {
175 RELEASE_LOCK(&sched_mutex);
179 RELEASE_LOCK(&sched_mutex);
182 #ifdef linux_TARGET_OS
183 /* on Linux, tv is set to indicate the amount of time not
184 * slept, so we don't need to gettimeofday() to find out.
186 delta += min - (tv.tv_sec * 1000000 + tv.tv_usec);
188 gettimeofday(&tv_after, (struct timezone *) NULL);
189 delta += (tv_after.tv_sec - tv_before.tv_sec) * 1000000 +
190 tv_after.tv_usec - tv_before.tv_usec;
194 if (delta != 0) { fprintf(stderr,"waited: %d %d %d\n", min, delta,
198 ACQUIRE_LOCK(&sched_mutex);
200 /* Step through the waiting queue, unblocking every thread that now has
201 * a file descriptor in a ready state.
203 * For the delayed threads, decrement the number of microsecs
204 * we've been blocked for. Unblock the threads that have thusly expired.
208 for(tso = blocked_queue_hd; tso != END_TSO_QUEUE; tso = next) {
210 switch (tso->why_blocked) {
212 ready = FD_ISSET(tso->block_info.fd, &rfd);
216 ready = FD_ISSET(tso->block_info.fd, &wfd);
221 int candidate; /* signed int is intentional */
222 #if defined(HAVE_SETITIMER)
223 if (tso->block_info.delay > delta) {
224 tso->block_info.delay -= delta;
227 tso->block_info.delay = 0;
231 candidate = tso->block_info.target - getourtimeofday();
235 if ((nat)candidate > delta) {
249 IF_DEBUG(scheduler,belch("Waking up thread %d\n", tso->id));
250 tso->why_blocked = NotBlocked;
251 tso->link = END_TSO_QUEUE;
252 PUSH_ON_RUN_QUEUE(tso);
255 blocked_queue_hd = tso;
263 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
265 prev->link = END_TSO_QUEUE;
266 blocked_queue_tl = prev;
269 } while (wait && run_queue_hd == END_TSO_QUEUE);