1 /* ----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2001
5 * API for invoking Haskell functions via the RTS
7 * --------------------------------------------------------------------------*/
9 #include "PosixSource.h"
11 #include "OSThreads.h"
18 #include "Capability.h"
23 /* ----------------------------------------------------------------------------
24 Building Haskell objects from C datatypes.
26 TODO: Currently this code does not tag created pointers,
27 however it is not unsafe (the contructor code will do it)
29 ------------------------------------------------------------------------- */
31 rts_mkChar (Capability *cap, HsChar c)
33 StgClosure *p = (StgClosure *)allocateLocal(cap, CONSTR_sizeW(0,1));
34 SET_HDR(p, Czh_con_info, CCS_SYSTEM);
35 p->payload[0] = (StgClosure *)(StgWord)(StgChar)c;
40 rts_mkInt (Capability *cap, HsInt i)
42 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
43 SET_HDR(p, Izh_con_info, CCS_SYSTEM);
44 p->payload[0] = (StgClosure *)(StgInt)i;
49 rts_mkInt8 (Capability *cap, HsInt8 i)
51 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
52 SET_HDR(p, I8zh_con_info, CCS_SYSTEM);
53 /* Make sure we mask out the bits above the lowest 8 */
54 p->payload[0] = (StgClosure *)(StgInt)i;
59 rts_mkInt16 (Capability *cap, HsInt16 i)
61 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
62 SET_HDR(p, I16zh_con_info, CCS_SYSTEM);
63 /* Make sure we mask out the relevant bits */
64 p->payload[0] = (StgClosure *)(StgInt)i;
69 rts_mkInt32 (Capability *cap, HsInt32 i)
71 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
72 SET_HDR(p, I32zh_con_info, CCS_SYSTEM);
73 p->payload[0] = (StgClosure *)(StgInt)i;
78 #ifdef sparc_HOST_ARCH
79 /* The closures returned by allocateLocal are only guaranteed to be 32 bit
80 aligned, because that's the size of pointers. SPARC v9 can't do
81 misaligned loads/stores, so we have to write the 64bit word in chunks */
84 rts_mkInt64 (Capability *cap, HsInt64 i_)
86 StgInt64 i = (StgInt64)i_;
89 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
90 SET_HDR(p, I64zh_con_info, CCS_SYSTEM);
92 tmp = (StgInt32*)&(p->payload[0]);
94 tmp[0] = (StgInt32)((StgInt64)i >> 32);
95 tmp[1] = (StgInt32)i; /* truncate high 32 bits */
103 rts_mkInt64 (Capability *cap, HsInt64 i)
106 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
107 SET_HDR(p, I64zh_con_info, CCS_SYSTEM);
108 tmp = (llong*)&(p->payload[0]);
113 #endif /* sparc_HOST_ARCH */
117 rts_mkWord (Capability *cap, HsWord i)
119 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
120 SET_HDR(p, Wzh_con_info, CCS_SYSTEM);
121 p->payload[0] = (StgClosure *)(StgWord)i;
126 rts_mkWord8 (Capability *cap, HsWord8 w)
128 /* see rts_mkInt* comments */
129 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
130 SET_HDR(p, W8zh_con_info, CCS_SYSTEM);
131 p->payload[0] = (StgClosure *)(StgWord)(w & 0xff);
136 rts_mkWord16 (Capability *cap, HsWord16 w)
138 /* see rts_mkInt* comments */
139 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
140 SET_HDR(p, W16zh_con_info, CCS_SYSTEM);
141 p->payload[0] = (StgClosure *)(StgWord)(w & 0xffff);
146 rts_mkWord32 (Capability *cap, HsWord32 w)
148 /* see rts_mkInt* comments */
149 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
150 SET_HDR(p, W32zh_con_info, CCS_SYSTEM);
151 p->payload[0] = (StgClosure *)(StgWord)(w & 0xffffffff);
156 #ifdef sparc_HOST_ARCH
157 /* The closures returned by allocateLocal are only guaranteed to be 32 bit
158 aligned, because that's the size of pointers. SPARC v9 can't do
159 misaligned loads/stores, so we have to write the 64bit word in chunks */
162 rts_mkWord64 (Capability *cap, HsWord64 w_)
164 StgWord64 w = (StgWord64)w_;
167 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
168 /* see mk_Int8 comment */
169 SET_HDR(p, W64zh_con_info, CCS_SYSTEM);
171 tmp = (StgWord32*)&(p->payload[0]);
173 tmp[0] = (StgWord32)((StgWord64)w >> 32);
174 tmp[1] = (StgWord32)w; /* truncate high 32 bits */
181 rts_mkWord64 (Capability *cap, HsWord64 w)
185 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
186 /* see mk_Int8 comment */
187 SET_HDR(p, W64zh_con_info, CCS_SYSTEM);
188 tmp = (ullong*)&(p->payload[0]);
197 rts_mkFloat (Capability *cap, HsFloat f)
199 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
200 SET_HDR(p, Fzh_con_info, CCS_SYSTEM);
201 ASSIGN_FLT((P_)p->payload, (StgFloat)f);
206 rts_mkDouble (Capability *cap, HsDouble d)
208 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,sizeofW(StgDouble)));
209 SET_HDR(p, Dzh_con_info, CCS_SYSTEM);
210 ASSIGN_DBL((P_)p->payload, (StgDouble)d);
215 rts_mkStablePtr (Capability *cap, HsStablePtr s)
217 StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
218 SET_HDR(p, StablePtr_con_info, CCS_SYSTEM);
219 p->payload[0] = (StgClosure *)s;
224 rts_mkPtr (Capability *cap, HsPtr a)
226 StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
227 SET_HDR(p, Ptr_con_info, CCS_SYSTEM);
228 p->payload[0] = (StgClosure *)a;
233 rts_mkFunPtr (Capability *cap, HsFunPtr a)
235 StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
236 SET_HDR(p, FunPtr_con_info, CCS_SYSTEM);
237 p->payload[0] = (StgClosure *)a;
242 rts_mkBool (Capability *cap STG_UNUSED, HsBool b)
245 return (StgClosure *)True_closure;
247 return (StgClosure *)False_closure;
252 rts_mkString (Capability *cap, char *s)
254 return rts_apply(cap, (StgClosure *)unpackCString_closure, rts_mkPtr(cap,s));
258 rts_apply (Capability *cap, HaskellObj f, HaskellObj arg)
262 ap = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk) + 2);
263 SET_HDR(ap, (StgInfoTable *)&stg_ap_2_upd_info, CCS_SYSTEM);
265 ap->payload[1] = arg;
266 return (StgClosure *)ap;
269 /* ----------------------------------------------------------------------------
270 Deconstructing Haskell objects
272 We would like to assert that we have the right kind of object in
273 each case, but this is problematic because in GHCi the info table
274 for the D# constructor (say) might be dynamically loaded. Hence we
275 omit these assertions for now.
276 ------------------------------------------------------------------------- */
279 rts_getChar (HaskellObj p)
281 // See comment above:
282 // ASSERT(p->header.info == Czh_con_info ||
283 // p->header.info == Czh_static_info);
284 return (StgChar)(StgWord)(UNTAG_CLOSURE(p)->payload[0]);
288 rts_getInt (HaskellObj p)
290 // See comment above:
291 // ASSERT(p->header.info == Izh_con_info ||
292 // p->header.info == Izh_static_info);
293 return (HsInt)(UNTAG_CLOSURE(p)->payload[0]);
297 rts_getInt8 (HaskellObj p)
299 // See comment above:
300 // ASSERT(p->header.info == I8zh_con_info ||
301 // p->header.info == I8zh_static_info);
302 return (HsInt8)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
306 rts_getInt16 (HaskellObj p)
308 // See comment above:
309 // ASSERT(p->header.info == I16zh_con_info ||
310 // p->header.info == I16zh_static_info);
311 return (HsInt16)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
315 rts_getInt32 (HaskellObj p)
317 // See comment above:
318 // ASSERT(p->header.info == I32zh_con_info ||
319 // p->header.info == I32zh_static_info);
320 return (HsInt32)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
324 #ifdef sparc_HOST_ARCH
325 /* The closures returned by allocateLocal are only guaranteed to be 32 bit
326 aligned, because that's the size of pointers. SPARC v9 can't do
327 misaligned loads/stores, so we have to read the 64bit word in chunks */
330 rts_getInt64 (HaskellObj p)
333 // See comment above:
334 // ASSERT(p->header.info == I64zh_con_info ||
335 // p->header.info == I64zh_static_info);
336 tmp = (HsInt32*)&(UNTAG_CLOSURE(p)->payload[0]);
338 HsInt64 i = (HsInt64)((HsInt64)(tmp[0]) << 32) | (HsInt64)tmp[1];
345 rts_getInt64 (HaskellObj p)
348 // See comment above:
349 // ASSERT(p->header.info == I64zh_con_info ||
350 // p->header.info == I64zh_static_info);
351 tmp = (HsInt64*)&(UNTAG_CLOSURE(p)->payload[0]);
355 #endif /* sparc_HOST_ARCH */
359 rts_getWord (HaskellObj p)
361 // See comment above:
362 // ASSERT(p->header.info == Wzh_con_info ||
363 // p->header.info == Wzh_static_info);
364 return (HsWord)(UNTAG_CLOSURE(p)->payload[0]);
368 rts_getWord8 (HaskellObj p)
370 // See comment above:
371 // ASSERT(p->header.info == W8zh_con_info ||
372 // p->header.info == W8zh_static_info);
373 return (HsWord8)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
377 rts_getWord16 (HaskellObj p)
379 // See comment above:
380 // ASSERT(p->header.info == W16zh_con_info ||
381 // p->header.info == W16zh_static_info);
382 return (HsWord16)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
386 rts_getWord32 (HaskellObj p)
388 // See comment above:
389 // ASSERT(p->header.info == W32zh_con_info ||
390 // p->header.info == W32zh_static_info);
391 return (HsWord32)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
395 #ifdef sparc_HOST_ARCH
396 /* The closures returned by allocateLocal are only guaranteed to be 32 bit
397 aligned, because that's the size of pointers. SPARC v9 can't do
398 misaligned loads/stores, so we have to write the 64bit word in chunks */
401 rts_getWord64 (HaskellObj p)
404 // See comment above:
405 // ASSERT(p->header.info == I64zh_con_info ||
406 // p->header.info == I64zh_static_info);
407 tmp = (HsInt32*)&(UNTAG_CLOSURE(p)->payload[0]);
409 HsInt64 i = (HsWord64)((HsWord64)(tmp[0]) << 32) | (HsWord64)tmp[1];
416 rts_getWord64 (HaskellObj p)
419 // See comment above:
420 // ASSERT(p->header.info == W64zh_con_info ||
421 // p->header.info == W64zh_static_info);
422 tmp = (HsWord64*)&(UNTAG_CLOSURE(p)->payload[0]);
430 rts_getFloat (HaskellObj p)
432 // See comment above:
433 // ASSERT(p->header.info == Fzh_con_info ||
434 // p->header.info == Fzh_static_info);
435 return (float)(PK_FLT((P_)UNTAG_CLOSURE(p)->payload));
439 rts_getDouble (HaskellObj p)
441 // See comment above:
442 // ASSERT(p->header.info == Dzh_con_info ||
443 // p->header.info == Dzh_static_info);
444 return (double)(PK_DBL((P_)UNTAG_CLOSURE(p)->payload));
448 rts_getStablePtr (HaskellObj p)
450 // See comment above:
451 // ASSERT(p->header.info == StablePtr_con_info ||
452 // p->header.info == StablePtr_static_info);
453 return (StgStablePtr)(UNTAG_CLOSURE(p)->payload[0]);
457 rts_getPtr (HaskellObj p)
459 // See comment above:
460 // ASSERT(p->header.info == Ptr_con_info ||
461 // p->header.info == Ptr_static_info);
462 return (Capability *)(UNTAG_CLOSURE(p)->payload[0]);
466 rts_getFunPtr (HaskellObj p)
468 // See comment above:
469 // ASSERT(p->header.info == FunPtr_con_info ||
470 // p->header.info == FunPtr_static_info);
471 return (void *)(UNTAG_CLOSURE(p)->payload[0]);
475 rts_getBool (HaskellObj p)
479 info = get_itbl((StgClosure *)UNTAG_CLOSURE(p));
480 if (info->srt_bitmap == 0) { // srt_bitmap is the constructor tag
487 /* -----------------------------------------------------------------------------
489 -------------------------------------------------------------------------- */
491 INLINE_HEADER void pushClosure (StgTSO *tso, StgWord c) {
497 createGenThread (Capability *cap, nat stack_size, StgClosure *closure)
501 t = createThread (cap, stack_size, NO_PRI);
503 t = createThread (cap, stack_size);
505 pushClosure(t, (W_)closure);
506 pushClosure(t, (W_)&stg_enter_info);
511 createIOThread (Capability *cap, nat stack_size, StgClosure *closure)
515 t = createThread (cap, stack_size, NO_PRI);
517 t = createThread (cap, stack_size);
519 pushClosure(t, (W_)&stg_noforceIO_info);
520 pushClosure(t, (W_)&stg_ap_v_info);
521 pushClosure(t, (W_)closure);
522 pushClosure(t, (W_)&stg_enter_info);
527 * Same as above, but also evaluate the result of the IO action
528 * to whnf while we're at it.
532 createStrictIOThread(Capability *cap, nat stack_size, StgClosure *closure)
536 t = createThread(cap, stack_size, NO_PRI);
538 t = createThread(cap, stack_size);
540 pushClosure(t, (W_)&stg_forceIO_info);
541 pushClosure(t, (W_)&stg_ap_v_info);
542 pushClosure(t, (W_)closure);
543 pushClosure(t, (W_)&stg_enter_info);
547 /* ----------------------------------------------------------------------------
548 Evaluating Haskell expressions
549 ------------------------------------------------------------------------- */
552 rts_eval (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
556 tso = createGenThread(cap, RtsFlags.GcFlags.initialStkSize, p);
557 return scheduleWaitThread(tso,ret,cap);
561 rts_eval_ (Capability *cap, HaskellObj p, unsigned int stack_size,
562 /*out*/HaskellObj *ret)
566 tso = createGenThread(cap, stack_size, p);
567 return scheduleWaitThread(tso,ret,cap);
571 * rts_evalIO() evaluates a value of the form (IO a), forcing the action's
572 * result to WHNF before returning.
575 rts_evalIO (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
579 tso = createStrictIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
580 return scheduleWaitThread(tso,ret,cap);
584 * rts_evalStableIO() is suitable for calling from Haskell. It
585 * evaluates a value of the form (StablePtr (IO a)), forcing the
586 * action's result to WHNF before returning. The result is returned
590 rts_evalStableIO (Capability *cap, HsStablePtr s, /*out*/HsStablePtr *ret)
594 SchedulerStatus stat;
596 p = (StgClosure *)deRefStablePtr(s);
597 tso = createStrictIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
598 // async exceptions are always blocked by default in the created
599 // thread. See #1048.
600 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
601 cap = scheduleWaitThread(tso,&r,cap);
602 stat = rts_getSchedStatus(cap);
604 if (stat == Success && ret != NULL) {
606 *ret = getStablePtr((StgPtr)r);
613 * Like rts_evalIO(), but doesn't force the action's result.
616 rts_evalLazyIO (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
620 tso = createIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
621 return scheduleWaitThread(tso,ret,cap);
625 rts_evalLazyIO_ (Capability *cap, HaskellObj p, unsigned int stack_size,
626 /*out*/HaskellObj *ret)
630 tso = createIOThread(cap, stack_size, p);
631 return scheduleWaitThread(tso,ret,cap);
634 /* Convenience function for decoding the returned status. */
637 rts_checkSchedStatus (char* site, Capability *cap)
639 SchedulerStatus rc = cap->running_task->stat;
644 errorBelch("%s: uncaught exception",site);
645 stg_exit(EXIT_FAILURE);
647 errorBelch("%s: interrupted", site);
648 stg_exit(EXIT_FAILURE);
650 errorBelch("%s: Return code (%d) not ok",(site),(rc));
651 stg_exit(EXIT_FAILURE);
656 rts_getSchedStatus (Capability *cap)
658 return cap->running_task->stat;
667 // ToDo: get rid of this lock in the common case. We could store
668 // a free Task in thread-local storage, for example. That would
669 // leave just one lock on the path into the RTS: cap->lock when
670 // acquiring the Capability.
671 ACQUIRE_LOCK(&sched_mutex);
672 task = newBoundTask();
673 RELEASE_LOCK(&sched_mutex);
676 waitForReturnCapability(&cap, task);
677 return (Capability *)cap;
680 // Exiting the RTS: we hold a Capability that is not necessarily the
681 // same one that was originally returned by rts_lock(), because
682 // rts_evalIO() etc. may return a new one. Now that we have
683 // investigated the return value, we can release the Capability,
684 // and free the Task (in that order).
687 rts_unlock (Capability *cap)
691 task = cap->running_task;
692 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
694 // Now release the Capability. With the capability released, GC
695 // may happen. NB. does not try to put the current Task on the
697 // NB. keep cap->lock held while we call boundTaskExiting(). This
698 // is necessary during shutdown, where we want the invariant that
699 // after shutdownCapability(), all the Tasks associated with the
700 // Capability have completed their shutdown too. Otherwise we
701 // could have boundTaskExiting()/workerTaskStop() running at some
702 // random point in the future, which causes problems for
703 // freeTaskManager().
704 ACQUIRE_LOCK(&cap->lock);
705 releaseCapability_(cap,rtsFalse);
707 // Finally, we can release the Task to the free list.
708 boundTaskExiting(task);
709 RELEASE_LOCK(&cap->lock);