SPARC NCG: Also do misaligned reads
[ghc-hetmet.git] / rts / RtsAPI.c
1 /* ----------------------------------------------------------------------------
2  *
3  * (c) The GHC Team, 1998-2001
4  *
5  * API for invoking Haskell functions via the RTS
6  *
7  * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11 #include "OSThreads.h"
12 #include "RtsAPI.h"
13 #include "SchedAPI.h"
14 #include "RtsFlags.h"
15 #include "RtsUtils.h"
16 #include "Prelude.h"
17 #include "Schedule.h"
18 #include "Capability.h"
19 #include "Stable.h"
20
21 #include <stdlib.h>
22
23 /* ----------------------------------------------------------------------------
24    Building Haskell objects from C datatypes.
25
26    TODO: Currently this code does not tag created pointers,
27          however it is not unsafe (the contructor code will do it)
28          just inefficient.
29    ------------------------------------------------------------------------- */
30 HaskellObj
31 rts_mkChar (Capability *cap, HsChar c)
32 {
33   StgClosure *p = (StgClosure *)allocateLocal(cap, CONSTR_sizeW(0,1));
34   SET_HDR(p, Czh_con_info, CCS_SYSTEM);
35   p->payload[0]  = (StgClosure *)(StgWord)(StgChar)c;
36   return p;
37 }
38
39 HaskellObj
40 rts_mkInt (Capability *cap, HsInt i)
41 {
42   StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
43   SET_HDR(p, Izh_con_info, CCS_SYSTEM);
44   p->payload[0]  = (StgClosure *)(StgInt)i;
45   return p;
46 }
47
48 HaskellObj
49 rts_mkInt8 (Capability *cap, HsInt8 i)
50 {
51   StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
52   SET_HDR(p, I8zh_con_info, CCS_SYSTEM);
53   /* Make sure we mask out the bits above the lowest 8 */
54   p->payload[0]  = (StgClosure *)(StgInt)i;
55   return p;
56 }
57
58 HaskellObj
59 rts_mkInt16 (Capability *cap, HsInt16 i)
60 {
61   StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
62   SET_HDR(p, I16zh_con_info, CCS_SYSTEM);
63   /* Make sure we mask out the relevant bits */
64   p->payload[0]  = (StgClosure *)(StgInt)i;
65   return p;
66 }
67
68 HaskellObj
69 rts_mkInt32 (Capability *cap, HsInt32 i)
70 {
71   StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
72   SET_HDR(p, I32zh_con_info, CCS_SYSTEM);
73   p->payload[0]  = (StgClosure *)(StgInt)i;
74   return p;
75 }
76
77
78 #ifdef sparc_HOST_ARCH
79 /* The closures returned by allocateLocal are only guaranteed to be 32 bit
80    aligned, because that's the size of pointers. SPARC v9 can't do
81    misaligned loads/stores, so we have to write the 64bit word in chunks         */
82
83 HaskellObj
84 rts_mkInt64 (Capability *cap, HsInt64 i_)
85 {
86   StgInt64 i   = (StgInt64)i_;
87   StgInt32 *tmp;
88
89   StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
90   SET_HDR(p, I64zh_con_info, CCS_SYSTEM);
91
92   tmp    = (StgInt32*)&(p->payload[0]);
93
94   tmp[0] = (StgInt32)((StgInt64)i >> 32);
95   tmp[1] = (StgInt32)i;         /* truncate high 32 bits */
96
97   return p;
98 }
99
100 #else
101
102 HaskellObj
103 rts_mkInt64 (Capability *cap, HsInt64 i)
104 {
105   llong *tmp;
106   StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
107   SET_HDR(p, I64zh_con_info, CCS_SYSTEM);
108   tmp  = (llong*)&(p->payload[0]);
109   *tmp = (StgInt64)i;
110   return p;
111 }
112
113 #endif /* sparc_HOST_ARCH */
114
115
116 HaskellObj
117 rts_mkWord (Capability *cap, HsWord i)
118 {
119   StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
120   SET_HDR(p, Wzh_con_info, CCS_SYSTEM);
121   p->payload[0]  = (StgClosure *)(StgWord)i;
122   return p;
123 }
124
125 HaskellObj
126 rts_mkWord8 (Capability *cap, HsWord8 w)
127 {
128   /* see rts_mkInt* comments */
129   StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
130   SET_HDR(p, W8zh_con_info, CCS_SYSTEM);
131   p->payload[0]  = (StgClosure *)(StgWord)(w & 0xff);
132   return p;
133 }
134
135 HaskellObj
136 rts_mkWord16 (Capability *cap, HsWord16 w)
137 {
138   /* see rts_mkInt* comments */
139   StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
140   SET_HDR(p, W16zh_con_info, CCS_SYSTEM);
141   p->payload[0]  = (StgClosure *)(StgWord)(w & 0xffff);
142   return p;
143 }
144
145 HaskellObj
146 rts_mkWord32 (Capability *cap, HsWord32 w)
147 {
148   /* see rts_mkInt* comments */
149   StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
150   SET_HDR(p, W32zh_con_info, CCS_SYSTEM);
151   p->payload[0]  = (StgClosure *)(StgWord)(w & 0xffffffff);
152   return p;
153 }
154
155 HaskellObj
156 rts_mkWord64 (Capability *cap, HsWord64 w)
157 {
158   ullong *tmp;
159
160   StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
161   /* see mk_Int8 comment */
162   SET_HDR(p, W64zh_con_info, CCS_SYSTEM);
163   tmp  = (ullong*)&(p->payload[0]);
164   *tmp = (StgWord64)w;
165   return p;
166 }
167
168 HaskellObj
169 rts_mkFloat (Capability *cap, HsFloat f)
170 {
171   StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
172   SET_HDR(p, Fzh_con_info, CCS_SYSTEM);
173   ASSIGN_FLT((P_)p->payload, (StgFloat)f);
174   return p;
175 }
176
177 HaskellObj
178 rts_mkDouble (Capability *cap, HsDouble d)
179 {
180   StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,sizeofW(StgDouble)));
181   SET_HDR(p, Dzh_con_info, CCS_SYSTEM);
182   ASSIGN_DBL((P_)p->payload, (StgDouble)d);
183   return p;
184 }
185
186 HaskellObj
187 rts_mkStablePtr (Capability *cap, HsStablePtr s)
188 {
189   StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
190   SET_HDR(p, StablePtr_con_info, CCS_SYSTEM);
191   p->payload[0]  = (StgClosure *)s;
192   return p;
193 }
194
195 HaskellObj
196 rts_mkPtr (Capability *cap, HsPtr a)
197 {
198   StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
199   SET_HDR(p, Ptr_con_info, CCS_SYSTEM);
200   p->payload[0]  = (StgClosure *)a;
201   return p;
202 }
203
204 HaskellObj
205 rts_mkFunPtr (Capability *cap, HsFunPtr a)
206 {
207   StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
208   SET_HDR(p, FunPtr_con_info, CCS_SYSTEM);
209   p->payload[0]  = (StgClosure *)a;
210   return p;
211 }
212
213 HaskellObj
214 rts_mkBool (Capability *cap STG_UNUSED, HsBool b)
215 {
216   if (b) {
217     return (StgClosure *)True_closure;
218   } else {
219     return (StgClosure *)False_closure;
220   }
221 }
222
223 HaskellObj
224 rts_mkString (Capability *cap, char *s)
225 {
226   return rts_apply(cap, (StgClosure *)unpackCString_closure, rts_mkPtr(cap,s));
227 }
228
229 HaskellObj
230 rts_apply (Capability *cap, HaskellObj f, HaskellObj arg)
231 {
232     StgThunk *ap;
233
234     ap = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk) + 2);
235     SET_HDR(ap, (StgInfoTable *)&stg_ap_2_upd_info, CCS_SYSTEM);
236     ap->payload[0] = f;
237     ap->payload[1] = arg;
238     return (StgClosure *)ap;
239 }
240
241 /* ----------------------------------------------------------------------------
242    Deconstructing Haskell objects
243
244    We would like to assert that we have the right kind of object in
245    each case, but this is problematic because in GHCi the info table
246    for the D# constructor (say) might be dynamically loaded.  Hence we
247    omit these assertions for now.
248    ------------------------------------------------------------------------- */
249
250 HsChar
251 rts_getChar (HaskellObj p)
252 {
253     // See comment above:
254     // ASSERT(p->header.info == Czh_con_info ||
255     //        p->header.info == Czh_static_info);
256     return (StgChar)(StgWord)(UNTAG_CLOSURE(p)->payload[0]);
257 }
258
259 HsInt
260 rts_getInt (HaskellObj p)
261 {
262     // See comment above:
263     // ASSERT(p->header.info == Izh_con_info ||
264     //        p->header.info == Izh_static_info);
265     return (HsInt)(UNTAG_CLOSURE(p)->payload[0]);
266 }
267
268 HsInt8
269 rts_getInt8 (HaskellObj p)
270 {
271     // See comment above:
272     // ASSERT(p->header.info == I8zh_con_info ||
273     //        p->header.info == I8zh_static_info);
274     return (HsInt8)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
275 }
276
277 HsInt16
278 rts_getInt16 (HaskellObj p)
279 {
280     // See comment above:
281     // ASSERT(p->header.info == I16zh_con_info ||
282     //        p->header.info == I16zh_static_info);
283     return (HsInt16)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
284 }
285
286 HsInt32
287 rts_getInt32 (HaskellObj p)
288 {
289     // See comment above:
290     // ASSERT(p->header.info == I32zh_con_info ||
291     //        p->header.info == I32zh_static_info);
292   return (HsInt32)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
293 }
294
295
296 #ifdef sparc_HOST_ARCH
297 /* The closures returned by allocateLocal are only guaranteed to be 32 bit
298    aligned, because that's the size of pointers. SPARC v9 can't do
299    misaligned loads/stores, so we have to read the 64bit word in chunks         */
300
301 HsInt64
302 rts_getInt64 (HaskellObj p)
303 {
304     HsInt64* tmp;
305     // See comment above:
306     // ASSERT(p->header.info == I64zh_con_info ||
307     //        p->header.info == I64zh_static_info);
308     tmp = (HsInt64*)&(UNTAG_CLOSURE(p)->payload[0]);
309     return *tmp;
310 }
311
312 #else
313
314 HsInt64
315 rts_getInt64 (HaskellObj p)
316 {
317     HsInt32* tmp;
318     // See comment above:
319     // ASSERT(p->header.info == I64zh_con_info ||
320     //        p->header.info == I64zh_static_info);
321     tmp = (HsInt32*)&(UNTAG_CLOSURE(p)->payload[0]);
322
323     HsInt64 i   = (HsInt64)(tmp[0] << 32) | (HsInt64)tmp[1];
324     return i
325 }
326
327 #endif /* sparc_HOST_ARCH */
328
329
330 HsWord
331 rts_getWord (HaskellObj p)
332 {
333     // See comment above:
334     // ASSERT(p->header.info == Wzh_con_info ||
335     //        p->header.info == Wzh_static_info);
336     return (HsWord)(UNTAG_CLOSURE(p)->payload[0]);
337 }
338
339 HsWord8
340 rts_getWord8 (HaskellObj p)
341 {
342     // See comment above:
343     // ASSERT(p->header.info == W8zh_con_info ||
344     //        p->header.info == W8zh_static_info);
345     return (HsWord8)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
346 }
347
348 HsWord16
349 rts_getWord16 (HaskellObj p)
350 {
351     // See comment above:
352     // ASSERT(p->header.info == W16zh_con_info ||
353     //        p->header.info == W16zh_static_info);
354     return (HsWord16)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
355 }
356
357 HsWord32
358 rts_getWord32 (HaskellObj p)
359 {
360     // See comment above:
361     // ASSERT(p->header.info == W32zh_con_info ||
362     //        p->header.info == W32zh_static_info);
363     return (HsWord32)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
364 }
365
366
367 HsWord64
368 rts_getWord64 (HaskellObj p)
369 {
370     HsWord64* tmp;
371     // See comment above:
372     // ASSERT(p->header.info == W64zh_con_info ||
373     //        p->header.info == W64zh_static_info);
374     tmp = (HsWord64*)&(UNTAG_CLOSURE(p)->payload[0]);
375     return *tmp;
376 }
377
378 HsFloat
379 rts_getFloat (HaskellObj p)
380 {
381     // See comment above:
382     // ASSERT(p->header.info == Fzh_con_info ||
383     //        p->header.info == Fzh_static_info);
384     return (float)(PK_FLT((P_)UNTAG_CLOSURE(p)->payload));
385 }
386
387 HsDouble
388 rts_getDouble (HaskellObj p)
389 {
390     // See comment above:
391     // ASSERT(p->header.info == Dzh_con_info ||
392     //        p->header.info == Dzh_static_info);
393     return (double)(PK_DBL((P_)UNTAG_CLOSURE(p)->payload));
394 }
395
396 HsStablePtr
397 rts_getStablePtr (HaskellObj p)
398 {
399     // See comment above:
400     // ASSERT(p->header.info == StablePtr_con_info ||
401     //        p->header.info == StablePtr_static_info);
402     return (StgStablePtr)(UNTAG_CLOSURE(p)->payload[0]);
403 }
404
405 HsPtr
406 rts_getPtr (HaskellObj p)
407 {
408     // See comment above:
409     // ASSERT(p->header.info == Ptr_con_info ||
410     //        p->header.info == Ptr_static_info);
411     return (Capability *)(UNTAG_CLOSURE(p)->payload[0]);
412 }
413
414 HsFunPtr
415 rts_getFunPtr (HaskellObj p)
416 {
417     // See comment above:
418     // ASSERT(p->header.info == FunPtr_con_info ||
419     //        p->header.info == FunPtr_static_info);
420     return (void *)(UNTAG_CLOSURE(p)->payload[0]);
421 }
422
423 HsBool
424 rts_getBool (HaskellObj p)
425 {
426     StgInfoTable *info;
427
428     info = get_itbl((StgClosure *)UNTAG_CLOSURE(p));
429     if (info->srt_bitmap == 0) { // srt_bitmap is the constructor tag
430         return 0;
431     } else {
432         return 1;
433     }
434 }
435
436 /* -----------------------------------------------------------------------------
437    Creating threads
438    -------------------------------------------------------------------------- */
439
440 INLINE_HEADER void pushClosure   (StgTSO *tso, StgWord c) {
441   tso->sp--;
442   tso->sp[0] = (W_) c;
443 }
444
445 StgTSO *
446 createGenThread (Capability *cap, nat stack_size,  StgClosure *closure)
447 {
448   StgTSO *t;
449 #if defined(GRAN)
450   t = createThread (cap, stack_size, NO_PRI);
451 #else
452   t = createThread (cap, stack_size);
453 #endif
454   pushClosure(t, (W_)closure);
455   pushClosure(t, (W_)&stg_enter_info);
456   return t;
457 }
458
459 StgTSO *
460 createIOThread (Capability *cap, nat stack_size,  StgClosure *closure)
461 {
462   StgTSO *t;
463 #if defined(GRAN)
464   t = createThread (cap, stack_size, NO_PRI);
465 #else
466   t = createThread (cap, stack_size);
467 #endif
468   pushClosure(t, (W_)&stg_noforceIO_info);
469   pushClosure(t, (W_)&stg_ap_v_info);
470   pushClosure(t, (W_)closure);
471   pushClosure(t, (W_)&stg_enter_info);
472   return t;
473 }
474
475 /*
476  * Same as above, but also evaluate the result of the IO action
477  * to whnf while we're at it.
478  */
479
480 StgTSO *
481 createStrictIOThread(Capability *cap, nat stack_size,  StgClosure *closure)
482 {
483   StgTSO *t;
484 #if defined(GRAN)
485   t = createThread(cap, stack_size, NO_PRI);
486 #else
487   t = createThread(cap, stack_size);
488 #endif
489   pushClosure(t, (W_)&stg_forceIO_info);
490   pushClosure(t, (W_)&stg_ap_v_info);
491   pushClosure(t, (W_)closure);
492   pushClosure(t, (W_)&stg_enter_info);
493   return t;
494 }
495
496 /* ----------------------------------------------------------------------------
497    Evaluating Haskell expressions
498    ------------------------------------------------------------------------- */
499
500 Capability *
501 rts_eval (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
502 {
503     StgTSO *tso;
504     
505     tso = createGenThread(cap, RtsFlags.GcFlags.initialStkSize, p);
506     return scheduleWaitThread(tso,ret,cap);
507 }
508
509 Capability *
510 rts_eval_ (Capability *cap, HaskellObj p, unsigned int stack_size, 
511            /*out*/HaskellObj *ret)
512 {
513     StgTSO *tso;
514
515     tso = createGenThread(cap, stack_size, p);
516     return scheduleWaitThread(tso,ret,cap);
517 }
518
519 /*
520  * rts_evalIO() evaluates a value of the form (IO a), forcing the action's
521  * result to WHNF before returning.
522  */
523 Capability *
524 rts_evalIO (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
525 {
526     StgTSO* tso; 
527     
528     tso = createStrictIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
529     return scheduleWaitThread(tso,ret,cap);
530 }
531
532 /*
533  * rts_evalStableIO() is suitable for calling from Haskell.  It
534  * evaluates a value of the form (StablePtr (IO a)), forcing the
535  * action's result to WHNF before returning.  The result is returned
536  * in a StablePtr.
537  */
538 Capability *
539 rts_evalStableIO (Capability *cap, HsStablePtr s, /*out*/HsStablePtr *ret)
540 {
541     StgTSO* tso;
542     StgClosure *p, *r;
543     SchedulerStatus stat;
544     
545     p = (StgClosure *)deRefStablePtr(s);
546     tso = createStrictIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
547     // async exceptions are always blocked by default in the created
548     // thread.  See #1048.
549     tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
550     cap = scheduleWaitThread(tso,&r,cap);
551     stat = rts_getSchedStatus(cap);
552
553     if (stat == Success && ret != NULL) {
554         ASSERT(r != NULL);
555         *ret = getStablePtr((StgPtr)r);
556     }
557
558     return cap;
559 }
560
561 /*
562  * Like rts_evalIO(), but doesn't force the action's result.
563  */
564 Capability *
565 rts_evalLazyIO (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
566 {
567     StgTSO *tso;
568
569     tso = createIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
570     return scheduleWaitThread(tso,ret,cap);
571 }
572
573 Capability *
574 rts_evalLazyIO_ (Capability *cap, HaskellObj p, unsigned int stack_size, 
575                  /*out*/HaskellObj *ret)
576 {
577     StgTSO *tso;
578
579     tso = createIOThread(cap, stack_size, p);
580     return scheduleWaitThread(tso,ret,cap);
581 }
582
583 /* Convenience function for decoding the returned status. */
584
585 void
586 rts_checkSchedStatus (char* site, Capability *cap)
587 {
588     SchedulerStatus rc = cap->running_task->stat;
589     switch (rc) {
590     case Success:
591         return;
592     case Killed:
593         errorBelch("%s: uncaught exception",site);
594         stg_exit(EXIT_FAILURE);
595     case Interrupted:
596         errorBelch("%s: interrupted", site);
597         stg_exit(EXIT_FAILURE);
598     default:
599         errorBelch("%s: Return code (%d) not ok",(site),(rc));  
600         stg_exit(EXIT_FAILURE);
601     }
602 }
603
604 SchedulerStatus
605 rts_getSchedStatus (Capability *cap)
606 {
607     return cap->running_task->stat;
608 }
609
610 Capability *
611 rts_lock (void)
612 {
613     Capability *cap;
614     Task *task;
615
616     // ToDo: get rid of this lock in the common case.  We could store
617     // a free Task in thread-local storage, for example.  That would
618     // leave just one lock on the path into the RTS: cap->lock when
619     // acquiring the Capability.
620     ACQUIRE_LOCK(&sched_mutex);
621     task = newBoundTask();
622     RELEASE_LOCK(&sched_mutex);
623
624     cap = NULL;
625     waitForReturnCapability(&cap, task);
626     return (Capability *)cap;
627 }
628
629 // Exiting the RTS: we hold a Capability that is not necessarily the
630 // same one that was originally returned by rts_lock(), because
631 // rts_evalIO() etc. may return a new one.  Now that we have
632 // investigated the return value, we can release the Capability,
633 // and free the Task (in that order).
634
635 void
636 rts_unlock (Capability *cap)
637 {
638     Task *task;
639
640     task = cap->running_task;
641     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
642
643     // Now release the Capability.  With the capability released, GC
644     // may happen.  NB. does not try to put the current Task on the
645     // worker queue.
646     // NB. keep cap->lock held while we call boundTaskExiting().  This
647     // is necessary during shutdown, where we want the invariant that
648     // after shutdownCapability(), all the Tasks associated with the
649     // Capability have completed their shutdown too.  Otherwise we
650     // could have boundTaskExiting()/workerTaskStop() running at some
651     // random point in the future, which causes problems for
652     // freeTaskManager().
653     ACQUIRE_LOCK(&cap->lock);
654     releaseCapability_(cap,rtsFalse);
655
656     // Finally, we can release the Task to the free list.
657     boundTaskExiting(task);
658     RELEASE_LOCK(&cap->lock);
659 }