1 /* -----------------------------------------------------------------------------
2 * $Id: Select.c,v 1.12 2000/04/03 15:24:21 rrt Exp $
4 * (c) The GHC Team 1995-1999
6 * Support for concurrent non-blocking I/O and thread waiting.
8 * ---------------------------------------------------------------------------*/
10 /* we're outside the realms of POSIX here... */
11 #define NON_POSIX_SOURCE
20 # if defined(HAVE_SYS_TYPES_H)
21 # include <sys/types.h>
24 # ifdef HAVE_SYS_TIME_H
25 # include <sys/time.h>
28 nat ticks_since_select = 0;
30 /* Argument 'wait' says whether to wait for I/O to become available,
31 * or whether to just check and return immediately. If there are
32 * other threads ready to run, we normally do the non-waiting variety,
33 * otherwise we wait (see Schedule.c).
35 * SMP note: must be called with sched_mutex locked.
38 awaitEvent(rtsBool wait)
40 #ifdef mingw32_TARGET_OS
42 * Win32 doesn't support select(). ToDo: use MsgWaitForMultipleObjects()
43 * to achieve (similar) effect.
49 StgTSO *tso, *prev, *next;
55 rtsBool select_succeeded = rtsTrue;
58 #ifndef linux_TARGET_OS
59 struct timeval tv_before,tv_after;
62 IF_DEBUG(scheduler,belch("Checking for threads blocked on I/O...\n"));
64 /* loop until we've woken up some threads. This loop is needed
65 * because the select timing isn't accurate, we sometimes sleep
66 * for a while but not long enough to wake up a thread in
71 /* see how long it's been since we last checked the blocked queue.
72 * ToDo: make this check atomic, so we don't lose any ticks.
74 delta = ticks_since_select;
75 ticks_since_select = 0;
76 delta = delta * TICK_MILLISECS * 1000;
78 min = wait == rtsTrue ? 0x7fffffff : 0;
81 * Collect all of the fd's that we're interested in, and capture
82 * the minimum waiting time (in microseconds) for the delayed threads.
87 for(tso = blocked_queue_hd; tso != END_TSO_QUEUE; tso = next) {
90 switch (tso->why_blocked) {
93 int fd = tso->block_info.fd;
94 maxfd = (fd > maxfd) ? fd : maxfd;
101 int fd = tso->block_info.fd;
102 maxfd = (fd > maxfd) ? fd : maxfd;
109 int candidate; /* signed int is intentional */
110 #if defined(HAVE_SETITIMER) || defined(mingw32_TARGET_OS)
111 candidate = tso->block_info.delay;
113 candidate = tso->block_info.target - getourtimeofday();
118 if ((nat)candidate < min) {
129 /* Release the scheduler lock while we do the poll.
130 * this means that someone might muck with the blocked_queue
131 * while we do this, but it shouldn't matter:
133 * - another task might poll for I/O and remove one
134 * or more threads from the blocked_queue.
135 * - more I/O threads may be added to blocked_queue.
136 * - more delayed threads may be added to blocked_queue. We'll
137 * just subtract delta from their delays after the poll.
139 * I believe none of these cases lead to trouble --SDM.
141 RELEASE_LOCK(&sched_mutex);
143 /* Check for any interesting events */
145 tv.tv_sec = min / 1000000;
146 tv.tv_usec = min % 1000000;
148 #ifndef linux_TARGET_OS
149 gettimeofday(&tv_before, (struct timezone *) NULL);
152 while (!interrupted &&
153 (numFound = select(maxfd+1, &rfd, &wfd, NULL, &tv)) < 0) {
154 if (errno != EINTR) {
155 /* fflush(stdout); */
157 barf("select failed");
159 ACQUIRE_LOCK(&sched_mutex);
161 /* We got a signal; could be one of ours. If so, we need
162 * to start up the signal handler straight away, otherwise
163 * we could block for a long time before the signal is
166 if (signals_pending()) {
167 RELEASE_LOCK(&sched_mutex);
168 start_signal_handlers();
169 /* Don't wake up any other threads that were waiting on I/O */
170 select_succeeded = rtsFalse;
174 /* If new runnable threads have arrived, stop waiting for
177 if (run_queue_hd != END_TSO_QUEUE) {
178 RELEASE_LOCK(&sched_mutex);
179 select_succeeded = rtsFalse;
183 RELEASE_LOCK(&sched_mutex);
186 #ifdef linux_TARGET_OS
187 /* on Linux, tv is set to indicate the amount of time not
188 * slept, so we don't need to gettimeofday() to find out.
190 delta += min - (tv.tv_sec * 1000000 + tv.tv_usec);
192 gettimeofday(&tv_after, (struct timezone *) NULL);
193 delta += (tv_after.tv_sec - tv_before.tv_sec) * 1000000 +
194 tv_after.tv_usec - tv_before.tv_usec;
198 if (delta != 0) { fprintf(stderr,"waited: %d %d %d\n", min, delta,
202 ACQUIRE_LOCK(&sched_mutex);
204 /* Step through the waiting queue, unblocking every thread that now has
205 * a file descriptor in a ready state.
207 * For the delayed threads, decrement the number of microsecs
208 * we've been blocked for. Unblock the threads that have thusly expired.
212 for(tso = blocked_queue_hd; tso != END_TSO_QUEUE; tso = next) {
214 switch (tso->why_blocked) {
216 ready = select_succeeded && FD_ISSET(tso->block_info.fd, &rfd);
220 ready = select_succeeded && FD_ISSET(tso->block_info.fd, &wfd);
225 #if defined(HAVE_SETITIMER) || defined(mingw32_TARGET_OS)
226 if (tso->block_info.delay > delta) {
227 tso->block_info.delay -= delta;
230 tso->block_info.delay = 0;
234 int candidate; /* signed int is intentional */
235 candidate = tso->block_info.target - getourtimeofday();
239 if ((nat)candidate > delta) {
253 IF_DEBUG(scheduler,belch("Waking up thread %d\n", tso->id));
254 tso->why_blocked = NotBlocked;
255 tso->link = END_TSO_QUEUE;
256 PUSH_ON_RUN_QUEUE(tso);
259 blocked_queue_hd = tso;
267 blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
269 prev->link = END_TSO_QUEUE;
270 blocked_queue_tl = prev;
273 } while (wait && run_queue_hd == END_TSO_QUEUE);