[project @ 1999-12-29 12:17:36 by simonpj]
[ghc-hetmet.git] / ghc / rts / PrimOps.hc
index 0a18aaf..1f48c3b 100644 (file)
@@ -1,5 +1,5 @@
 /* -----------------------------------------------------------------------------
- * $Id: PrimOps.hc,v 1.31 1999/10/13 16:39:23 simonmar Exp $
+ * $Id: PrimOps.hc,v 1.36 1999/12/08 14:21:52 simonmar Exp $
  *
  * (c) The GHC Team, 1998-1999
  *
@@ -55,7 +55,7 @@ W_ GHC_ZCCReturnable_static_info[0];
  */
 
 /*------ All Regs available */
-#ifdef REG_R8
+#if defined(REG_R8)
 # define RET_P(a)     R1.w = (W_)(a); JMP_(ENTRY_CODE(Sp[0]));
 # define RET_N(a)     RET_P(a)
 
@@ -80,15 +80,60 @@ W_ GHC_ZCCReturnable_static_info[0];
         R4.w = (W_)(d); R5.w = (W_)(e); R6.w = (W_)(f); \
        JMP_(ENTRY_CODE(Sp[0]));
 
-#else
-
-#if defined(REG_R7) || defined(REG_R6) || defined(REG_R5) || \
-    defined(REG_R4) || defined(REG_R3) || defined(REG_R2)
+#elif defined(REG_R7) || defined(REG_R6) || defined(REG_R5) || \
+      defined(REG_R4) || defined(REG_R3)
 # error RET_n macros not defined for this setup.
-#else
+
+/*------ 2 Registers available */
+#elif defined(REG_R2)
+
+# define RET_P(a)     R1.w = (W_)(a); JMP_(ENTRY_CODE(Sp[0]));
+# define RET_N(a)     RET_P(a)
+
+# define RET_PP(a,b)   R1.w = (W_)(a); R2.w = (W_)(b); \
+                      JMP_(ENTRY_CODE(Sp[0]));
+# define RET_NN(a,b)   RET_PP(a,b)
+# define RET_NP(a,b)   RET_PP(a,b)
+
+# define RET_PPP(a,b,c) \
+       R1.w = (W_)(a); R2.w = (W_)(b); Sp[-1] = (W_)(c); Sp -= 1; \
+       JMP_(ENTRY_CODE(Sp[1]));
+# define RET_NNP(a,b,c) \
+       R1.w = (W_)(a); R2.w = (W_)(b); Sp[-1] = (W_)(c); Sp -= 1; \
+       JMP_(ENTRY_CODE(Sp[1]));
+
+# define RET_NNNP(a,b,c,d)                     \
+       R1.w = (W_)(a);                         \
+        R2.w = (W_)(b);                        \
+    /*  Sp[-3] = ARGTAG(1); */                 \
+        Sp[-2] = (W_)(c);                      \
+        Sp[-1] = (W_)(d);                      \
+        Sp -= 3;                               \
+        JMP_(ENTRY_CODE(Sp[3]));
+
+# define RET_NPNP(a,b,c,d)                     \
+       R1.w = (W_)(a);                         \
+        R2.w = (W_)(b);                        \
+    /*  Sp[-3] = ARGTAG(1); */                 \
+        Sp[-2] = (W_)(c);                      \
+        Sp[-1] = (W_)(d);                      \
+        Sp -= 3;                               \
+        JMP_(ENTRY_CODE(Sp[3]));
+
+# define RET_NNPNNP(a,b,c,d,e,f)               \
+        R1.w = (W_)(a);                                \
+       R2.w = (W_)(b);                         \
+       Sp[-6] = (W_)(c);                       \
+       /* Sp[-5] = ARGTAG(1); */               \
+       Sp[-4] = (W_)(d);                       \
+       /* Sp[-3] = ARGTAG(1); */               \
+       Sp[-2] = (W_)(e);                       \
+       Sp[-1] = (W_)(f);                       \
+       Sp -= 6;                                \
+       JMP_(ENTRY_CODE(Sp[6]));
 
 /*------ 1 Register available */
-#ifdef REG_R1
+#elif defined(REG_R1)
 # define RET_P(a)     R1.w = (W_)(a); JMP_(ENTRY_CODE(Sp[0]));
 # define RET_N(a)     RET_P(a)
 
@@ -176,9 +221,6 @@ W_ GHC_ZCCReturnable_static_info[0];
 
 #endif
 
-#endif
-#endif
-
 /*-----------------------------------------------------------------------------
   Array Primitives
 
@@ -662,10 +704,13 @@ FN_(name)                                                         \
   FE_                                                                  \
 }
 
-GMP_TAKE2_RET1(plusIntegerzh_fast,  mpz_add);
-GMP_TAKE2_RET1(minusIntegerzh_fast, mpz_sub);
-GMP_TAKE2_RET1(timesIntegerzh_fast, mpz_mul);
-GMP_TAKE2_RET1(gcdIntegerzh_fast,   mpz_gcd);
+GMP_TAKE2_RET1(plusIntegerzh_fast,     mpz_add);
+GMP_TAKE2_RET1(minusIntegerzh_fast,    mpz_sub);
+GMP_TAKE2_RET1(timesIntegerzh_fast,    mpz_mul);
+GMP_TAKE2_RET1(gcdIntegerzh_fast,      mpz_gcd);
+GMP_TAKE2_RET1(quotIntegerzh_fast,     mpz_tdiv_q);
+GMP_TAKE2_RET1(remIntegerzh_fast,      mpz_tdiv_r);
+GMP_TAKE2_RET1(divExactIntegerzh_fast, mpz_divexact);
 
 GMP_TAKE2_RET2(quotRemIntegerzh_fast, mpz_tdiv_qr);
 GMP_TAKE2_RET2(divModIntegerzh_fast,  mpz_fdiv_qr);
@@ -748,6 +793,7 @@ FN_(forkzh_fast)
   /* create it right now, return ThreadID in R1 */
   R1.t = RET_STGCALL2(StgTSO *, createIOThread, 
                      RtsFlags.GcFlags.initialStkSize, R1.cl);
+  STGCALL1(scheduleThread, R1.t);
       
   /* switch at the earliest opportunity */ 
   context_switch = 1;
@@ -763,43 +809,6 @@ FN_(yieldzh_fast)
   FE_
 }
 
-FN_(killThreadzh_fast)
-{
-  FB_
-  /* args: R1.p = TSO to kill, R2.p = Exception */
-
-  /* The thread is dead, but the TSO sticks around for a while.  That's why
-   * we don't have to explicitly remove it from any queues it might be on.
-   */
-
-  /* We might have killed ourselves.  In which case, better be *very*
-   * careful.  If the exception killed us, then return to the scheduler.
-   * If the exception went to a catch frame, we'll just continue from
-   * the handler.
-   */
-  if (R1.t == CurrentTSO) {
-       SaveThreadState();      /* inline! */
-       STGCALL2(raiseAsync, R1.t, R2.cl);
-       if (CurrentTSO->whatNext == ThreadKilled) {
-               R1.w = ThreadYielding;
-               JMP_(StgReturn);
-       }
-       LoadThreadState();
-       if (CurrentTSO->whatNext == ThreadEnterGHC) {
-               R1.w = Sp[0];
-               Sp++;
-               JMP_(GET_ENTRY(R1.cl));
-       } else {
-               barf("killThreadzh_fast");
-       }
-  } else {
-       STGCALL2(raiseAsync, R1.t, R2.cl);
-  }
-
-  JMP_(ENTRY_CODE(Sp[0]));
-  FE_
-}
-
 FN_(newMVarzh_fast)
 {
   StgMVar *mvar;
@@ -826,16 +835,23 @@ FN_(takeMVarzh_fast)
 {
   StgMVar *mvar;
   StgClosure *val;
+  const StgInfoTable *info;
 
   FB_
   /* args: R1 = MVar closure */
 
   mvar = (StgMVar *)R1.p;
 
+#ifdef SMP
+  info = LOCK_CLOSURE(mvar);
+#else
+  info = GET_INFO(mvar);
+#endif
+
   /* If the MVar is empty, put ourselves on its blocking queue,
    * and wait until we're woken up.
    */
-  if (GET_INFO(mvar) != &FULL_MVAR_info) {
+  if (info == &EMPTY_MVAR_info) {
     if (mvar->head == (StgTSO *)&END_TSO_QUEUE_closure) {
       mvar->head = CurrentTSO;
     } else {
@@ -846,13 +862,21 @@ FN_(takeMVarzh_fast)
     CurrentTSO->block_info.closure = (StgClosure *)mvar;
     mvar->tail = CurrentTSO;
 
+#ifdef SMP
+    /* unlock the MVar */
+    mvar->header.info = &EMPTY_MVAR_info;
+#endif
     BLOCK(R1_PTR, takeMVarzh_fast);
   }
 
-  SET_INFO(mvar,&EMPTY_MVAR_info);
   val = mvar->value;
   mvar->value = (StgClosure *)&END_TSO_QUEUE_closure;
 
+  /* do this last... we might have locked the MVar in the SMP case,
+   * and writing the info pointer will unlock it.
+   */
+  SET_INFO(mvar,&EMPTY_MVAR_info);
+
   TICK_RET_UNBOXED_TUP(1);
   RET_P(val);
   FE_
@@ -861,17 +885,24 @@ FN_(takeMVarzh_fast)
 FN_(putMVarzh_fast)
 {
   StgMVar *mvar;
+  const StgInfoTable *info;
 
   FB_
   /* args: R1 = MVar, R2 = value */
 
   mvar = (StgMVar *)R1.p;
-  if (GET_INFO(mvar) == &FULL_MVAR_info) {
+
+#ifdef SMP
+  info = LOCK_CLOSURE(mvar);
+#else
+  info = GET_INFO(mvar);
+#endif
+
+  if (info == &FULL_MVAR_info) {
     fprintf(stderr, "putMVar#: MVar already full.\n");
     stg_exit(EXIT_FAILURE);
   }
   
-  SET_INFO(mvar,&FULL_MVAR_info);
   mvar->value = R2.cl;
 
   /* wake up the first thread on the queue, it will continue with the
@@ -885,6 +916,9 @@ FN_(putMVarzh_fast)
     }
   }
 
+  /* unlocks the MVar in the SMP case */
+  SET_INFO(mvar,&FULL_MVAR_info);
+
   /* ToDo: yield here for better communication performance? */
   JMP_(ENTRY_CODE(Sp[0]));
   FE_
@@ -932,7 +966,9 @@ FN_(waitReadzh_fast)
     ASSERT(CurrentTSO->why_blocked == NotBlocked);
     CurrentTSO->why_blocked = BlockedOnRead;
     CurrentTSO->block_info.fd = R1.i;
-    PUSH_ON_BLOCKED_QUEUE(CurrentTSO);
+    ACQUIRE_LOCK(&sched_mutex);
+    APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
+    RELEASE_LOCK(&sched_mutex);
     JMP_(stg_block_noregs);
   FE_
 }
@@ -944,7 +980,9 @@ FN_(waitWritezh_fast)
     ASSERT(CurrentTSO->why_blocked == NotBlocked);
     CurrentTSO->why_blocked = BlockedOnWrite;
     CurrentTSO->block_info.fd = R1.i;
-    PUSH_ON_BLOCKED_QUEUE(CurrentTSO);
+    ACQUIRE_LOCK(&sched_mutex);
+    APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
+    RELEASE_LOCK(&sched_mutex);
     JMP_(stg_block_noregs);
   FE_
 }
@@ -956,12 +994,16 @@ FN_(delayzh_fast)
     ASSERT(CurrentTSO->why_blocked == NotBlocked);
     CurrentTSO->why_blocked = BlockedOnDelay;
 
+    ACQUIRE_LOCK(&sched_mutex);
+
     /* Add on ticks_since_select, since these will be subtracted at
      * the next awaitEvent call.
      */
     CurrentTSO->block_info.delay = R1.i + ticks_since_select;
 
-    PUSH_ON_BLOCKED_QUEUE(CurrentTSO);
+    APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
+
+    RELEASE_LOCK(&sched_mutex);
     JMP_(stg_block_noregs);
   FE_
 }