1 /* ----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2001
5 * API for invoking Haskell functions via the RTS
7 * --------------------------------------------------------------------------*/
9 #include "PosixSource.h"
11 #include "OSThreads.h"
18 #include "Capability.h"
23 /* ----------------------------------------------------------------------------
24 Building Haskell objects from C datatypes.
26 TODO: Currently this code does not tag created pointers,
27 however it is not unsafe (the contructor code will do it)
29 ------------------------------------------------------------------------- */
31 rts_mkChar (Capability *cap, HsChar c)
33 StgClosure *p = (StgClosure *)allocateLocal(cap, CONSTR_sizeW(0,1));
34 SET_HDR(p, Czh_con_info, CCS_SYSTEM);
35 p->payload[0] = (StgClosure *)(StgWord)(StgChar)c;
40 rts_mkInt (Capability *cap, HsInt i)
42 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
43 SET_HDR(p, Izh_con_info, CCS_SYSTEM);
44 p->payload[0] = (StgClosure *)(StgInt)i;
49 rts_mkInt8 (Capability *cap, HsInt8 i)
51 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
52 SET_HDR(p, I8zh_con_info, CCS_SYSTEM);
53 /* Make sure we mask out the bits above the lowest 8 */
54 p->payload[0] = (StgClosure *)(StgInt)i;
59 rts_mkInt16 (Capability *cap, HsInt16 i)
61 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
62 SET_HDR(p, I16zh_con_info, CCS_SYSTEM);
63 /* Make sure we mask out the relevant bits */
64 p->payload[0] = (StgClosure *)(StgInt)i;
69 rts_mkInt32 (Capability *cap, HsInt32 i)
71 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
72 SET_HDR(p, I32zh_con_info, CCS_SYSTEM);
73 p->payload[0] = (StgClosure *)(StgInt)i;
78 #ifdef sparc_HOST_ARCH
79 /* The closures returned by allocateLocal are only guaranteed to be 32 bit
80 aligned, because that's the size of pointers. SPARC v9 can't do
81 misaligned loads/stores, so we have to write the 64bit word in chunks */
84 rts_mkInt64 (Capability *cap, HsInt64 i_)
86 StgInt64 i = (StgInt64)i_;
89 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
90 SET_HDR(p, I64zh_con_info, CCS_SYSTEM);
92 tmp = (StgInt32*)&(p->payload[0]);
94 tmp[0] = (StgInt32)((StgInt64)i >> 32);
95 tmp[1] = (StgInt32)i; /* truncate high 32 bits */
103 rts_mkInt64 (Capability *cap, HsInt64 i)
106 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
107 SET_HDR(p, I64zh_con_info, CCS_SYSTEM);
108 tmp = (llong*)&(p->payload[0]);
113 #endif /* sparc_HOST_ARCH */
117 rts_mkWord (Capability *cap, HsWord i)
119 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
120 SET_HDR(p, Wzh_con_info, CCS_SYSTEM);
121 p->payload[0] = (StgClosure *)(StgWord)i;
126 rts_mkWord8 (Capability *cap, HsWord8 w)
128 /* see rts_mkInt* comments */
129 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
130 SET_HDR(p, W8zh_con_info, CCS_SYSTEM);
131 p->payload[0] = (StgClosure *)(StgWord)(w & 0xff);
136 rts_mkWord16 (Capability *cap, HsWord16 w)
138 /* see rts_mkInt* comments */
139 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
140 SET_HDR(p, W16zh_con_info, CCS_SYSTEM);
141 p->payload[0] = (StgClosure *)(StgWord)(w & 0xffff);
146 rts_mkWord32 (Capability *cap, HsWord32 w)
148 /* see rts_mkInt* comments */
149 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
150 SET_HDR(p, W32zh_con_info, CCS_SYSTEM);
151 p->payload[0] = (StgClosure *)(StgWord)(w & 0xffffffff);
156 rts_mkWord64 (Capability *cap, HsWord64 w)
160 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
161 /* see mk_Int8 comment */
162 SET_HDR(p, W64zh_con_info, CCS_SYSTEM);
163 tmp = (ullong*)&(p->payload[0]);
169 rts_mkFloat (Capability *cap, HsFloat f)
171 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
172 SET_HDR(p, Fzh_con_info, CCS_SYSTEM);
173 ASSIGN_FLT((P_)p->payload, (StgFloat)f);
178 rts_mkDouble (Capability *cap, HsDouble d)
180 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,sizeofW(StgDouble)));
181 SET_HDR(p, Dzh_con_info, CCS_SYSTEM);
182 ASSIGN_DBL((P_)p->payload, (StgDouble)d);
187 rts_mkStablePtr (Capability *cap, HsStablePtr s)
189 StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
190 SET_HDR(p, StablePtr_con_info, CCS_SYSTEM);
191 p->payload[0] = (StgClosure *)s;
196 rts_mkPtr (Capability *cap, HsPtr a)
198 StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
199 SET_HDR(p, Ptr_con_info, CCS_SYSTEM);
200 p->payload[0] = (StgClosure *)a;
205 rts_mkFunPtr (Capability *cap, HsFunPtr a)
207 StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
208 SET_HDR(p, FunPtr_con_info, CCS_SYSTEM);
209 p->payload[0] = (StgClosure *)a;
214 rts_mkBool (Capability *cap STG_UNUSED, HsBool b)
217 return (StgClosure *)True_closure;
219 return (StgClosure *)False_closure;
224 rts_mkString (Capability *cap, char *s)
226 return rts_apply(cap, (StgClosure *)unpackCString_closure, rts_mkPtr(cap,s));
230 rts_apply (Capability *cap, HaskellObj f, HaskellObj arg)
234 ap = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk) + 2);
235 SET_HDR(ap, (StgInfoTable *)&stg_ap_2_upd_info, CCS_SYSTEM);
237 ap->payload[1] = arg;
238 return (StgClosure *)ap;
241 /* ----------------------------------------------------------------------------
242 Deconstructing Haskell objects
244 We would like to assert that we have the right kind of object in
245 each case, but this is problematic because in GHCi the info table
246 for the D# constructor (say) might be dynamically loaded. Hence we
247 omit these assertions for now.
248 ------------------------------------------------------------------------- */
251 rts_getChar (HaskellObj p)
253 // See comment above:
254 // ASSERT(p->header.info == Czh_con_info ||
255 // p->header.info == Czh_static_info);
256 return (StgChar)(StgWord)(UNTAG_CLOSURE(p)->payload[0]);
260 rts_getInt (HaskellObj p)
262 // See comment above:
263 // ASSERT(p->header.info == Izh_con_info ||
264 // p->header.info == Izh_static_info);
265 return (HsInt)(UNTAG_CLOSURE(p)->payload[0]);
269 rts_getInt8 (HaskellObj p)
271 // See comment above:
272 // ASSERT(p->header.info == I8zh_con_info ||
273 // p->header.info == I8zh_static_info);
274 return (HsInt8)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
278 rts_getInt16 (HaskellObj p)
280 // See comment above:
281 // ASSERT(p->header.info == I16zh_con_info ||
282 // p->header.info == I16zh_static_info);
283 return (HsInt16)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
287 rts_getInt32 (HaskellObj p)
289 // See comment above:
290 // ASSERT(p->header.info == I32zh_con_info ||
291 // p->header.info == I32zh_static_info);
292 return (HsInt32)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
296 #ifdef sparc_HOST_ARCH
297 /* The closures returned by allocateLocal are only guaranteed to be 32 bit
298 aligned, because that's the size of pointers. SPARC v9 can't do
299 misaligned loads/stores, so we have to read the 64bit word in chunks */
302 rts_getInt64 (HaskellObj p)
305 // See comment above:
306 // ASSERT(p->header.info == I64zh_con_info ||
307 // p->header.info == I64zh_static_info);
308 tmp = (HsInt32*)&(UNTAG_CLOSURE(p)->payload[0]);
310 HsInt64 i = (HsInt64)(tmp[0] << 32) | (HsInt64)tmp[1];
317 rts_getInt64 (HaskellObj p)
320 // See comment above:
321 // ASSERT(p->header.info == I64zh_con_info ||
322 // p->header.info == I64zh_static_info);
323 tmp = (HsInt64*)&(UNTAG_CLOSURE(p)->payload[0]);
327 #endif /* sparc_HOST_ARCH */
331 rts_getWord (HaskellObj p)
333 // See comment above:
334 // ASSERT(p->header.info == Wzh_con_info ||
335 // p->header.info == Wzh_static_info);
336 return (HsWord)(UNTAG_CLOSURE(p)->payload[0]);
340 rts_getWord8 (HaskellObj p)
342 // See comment above:
343 // ASSERT(p->header.info == W8zh_con_info ||
344 // p->header.info == W8zh_static_info);
345 return (HsWord8)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
349 rts_getWord16 (HaskellObj p)
351 // See comment above:
352 // ASSERT(p->header.info == W16zh_con_info ||
353 // p->header.info == W16zh_static_info);
354 return (HsWord16)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
358 rts_getWord32 (HaskellObj p)
360 // See comment above:
361 // ASSERT(p->header.info == W32zh_con_info ||
362 // p->header.info == W32zh_static_info);
363 return (HsWord32)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
368 rts_getWord64 (HaskellObj p)
371 // See comment above:
372 // ASSERT(p->header.info == W64zh_con_info ||
373 // p->header.info == W64zh_static_info);
374 tmp = (HsWord64*)&(UNTAG_CLOSURE(p)->payload[0]);
379 rts_getFloat (HaskellObj p)
381 // See comment above:
382 // ASSERT(p->header.info == Fzh_con_info ||
383 // p->header.info == Fzh_static_info);
384 return (float)(PK_FLT((P_)UNTAG_CLOSURE(p)->payload));
388 rts_getDouble (HaskellObj p)
390 // See comment above:
391 // ASSERT(p->header.info == Dzh_con_info ||
392 // p->header.info == Dzh_static_info);
393 return (double)(PK_DBL((P_)UNTAG_CLOSURE(p)->payload));
397 rts_getStablePtr (HaskellObj p)
399 // See comment above:
400 // ASSERT(p->header.info == StablePtr_con_info ||
401 // p->header.info == StablePtr_static_info);
402 return (StgStablePtr)(UNTAG_CLOSURE(p)->payload[0]);
406 rts_getPtr (HaskellObj p)
408 // See comment above:
409 // ASSERT(p->header.info == Ptr_con_info ||
410 // p->header.info == Ptr_static_info);
411 return (Capability *)(UNTAG_CLOSURE(p)->payload[0]);
415 rts_getFunPtr (HaskellObj p)
417 // See comment above:
418 // ASSERT(p->header.info == FunPtr_con_info ||
419 // p->header.info == FunPtr_static_info);
420 return (void *)(UNTAG_CLOSURE(p)->payload[0]);
424 rts_getBool (HaskellObj p)
428 info = get_itbl((StgClosure *)UNTAG_CLOSURE(p));
429 if (info->srt_bitmap == 0) { // srt_bitmap is the constructor tag
436 /* -----------------------------------------------------------------------------
438 -------------------------------------------------------------------------- */
440 INLINE_HEADER void pushClosure (StgTSO *tso, StgWord c) {
446 createGenThread (Capability *cap, nat stack_size, StgClosure *closure)
450 t = createThread (cap, stack_size, NO_PRI);
452 t = createThread (cap, stack_size);
454 pushClosure(t, (W_)closure);
455 pushClosure(t, (W_)&stg_enter_info);
460 createIOThread (Capability *cap, nat stack_size, StgClosure *closure)
464 t = createThread (cap, stack_size, NO_PRI);
466 t = createThread (cap, stack_size);
468 pushClosure(t, (W_)&stg_noforceIO_info);
469 pushClosure(t, (W_)&stg_ap_v_info);
470 pushClosure(t, (W_)closure);
471 pushClosure(t, (W_)&stg_enter_info);
476 * Same as above, but also evaluate the result of the IO action
477 * to whnf while we're at it.
481 createStrictIOThread(Capability *cap, nat stack_size, StgClosure *closure)
485 t = createThread(cap, stack_size, NO_PRI);
487 t = createThread(cap, stack_size);
489 pushClosure(t, (W_)&stg_forceIO_info);
490 pushClosure(t, (W_)&stg_ap_v_info);
491 pushClosure(t, (W_)closure);
492 pushClosure(t, (W_)&stg_enter_info);
496 /* ----------------------------------------------------------------------------
497 Evaluating Haskell expressions
498 ------------------------------------------------------------------------- */
501 rts_eval (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
505 tso = createGenThread(cap, RtsFlags.GcFlags.initialStkSize, p);
506 return scheduleWaitThread(tso,ret,cap);
510 rts_eval_ (Capability *cap, HaskellObj p, unsigned int stack_size,
511 /*out*/HaskellObj *ret)
515 tso = createGenThread(cap, stack_size, p);
516 return scheduleWaitThread(tso,ret,cap);
520 * rts_evalIO() evaluates a value of the form (IO a), forcing the action's
521 * result to WHNF before returning.
524 rts_evalIO (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
528 tso = createStrictIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
529 return scheduleWaitThread(tso,ret,cap);
533 * rts_evalStableIO() is suitable for calling from Haskell. It
534 * evaluates a value of the form (StablePtr (IO a)), forcing the
535 * action's result to WHNF before returning. The result is returned
539 rts_evalStableIO (Capability *cap, HsStablePtr s, /*out*/HsStablePtr *ret)
543 SchedulerStatus stat;
545 p = (StgClosure *)deRefStablePtr(s);
546 tso = createStrictIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
547 // async exceptions are always blocked by default in the created
548 // thread. See #1048.
549 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
550 cap = scheduleWaitThread(tso,&r,cap);
551 stat = rts_getSchedStatus(cap);
553 if (stat == Success && ret != NULL) {
555 *ret = getStablePtr((StgPtr)r);
562 * Like rts_evalIO(), but doesn't force the action's result.
565 rts_evalLazyIO (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
569 tso = createIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
570 return scheduleWaitThread(tso,ret,cap);
574 rts_evalLazyIO_ (Capability *cap, HaskellObj p, unsigned int stack_size,
575 /*out*/HaskellObj *ret)
579 tso = createIOThread(cap, stack_size, p);
580 return scheduleWaitThread(tso,ret,cap);
583 /* Convenience function for decoding the returned status. */
586 rts_checkSchedStatus (char* site, Capability *cap)
588 SchedulerStatus rc = cap->running_task->stat;
593 errorBelch("%s: uncaught exception",site);
594 stg_exit(EXIT_FAILURE);
596 errorBelch("%s: interrupted", site);
597 stg_exit(EXIT_FAILURE);
599 errorBelch("%s: Return code (%d) not ok",(site),(rc));
600 stg_exit(EXIT_FAILURE);
605 rts_getSchedStatus (Capability *cap)
607 return cap->running_task->stat;
616 // ToDo: get rid of this lock in the common case. We could store
617 // a free Task in thread-local storage, for example. That would
618 // leave just one lock on the path into the RTS: cap->lock when
619 // acquiring the Capability.
620 ACQUIRE_LOCK(&sched_mutex);
621 task = newBoundTask();
622 RELEASE_LOCK(&sched_mutex);
625 waitForReturnCapability(&cap, task);
626 return (Capability *)cap;
629 // Exiting the RTS: we hold a Capability that is not necessarily the
630 // same one that was originally returned by rts_lock(), because
631 // rts_evalIO() etc. may return a new one. Now that we have
632 // investigated the return value, we can release the Capability,
633 // and free the Task (in that order).
636 rts_unlock (Capability *cap)
640 task = cap->running_task;
641 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
643 // Now release the Capability. With the capability released, GC
644 // may happen. NB. does not try to put the current Task on the
646 // NB. keep cap->lock held while we call boundTaskExiting(). This
647 // is necessary during shutdown, where we want the invariant that
648 // after shutdownCapability(), all the Tasks associated with the
649 // Capability have completed their shutdown too. Otherwise we
650 // could have boundTaskExiting()/workerTaskStop() running at some
651 // random point in the future, which causes problems for
652 // freeTaskManager().
653 ACQUIRE_LOCK(&cap->lock);
654 releaseCapability_(cap,rtsFalse);
656 // Finally, we can release the Task to the free list.
657 boundTaskExiting(task);
658 RELEASE_LOCK(&cap->lock);