update submodules for GHC.HetMet.GArrow -> Control.GArrow renaming
[ghc-hetmet.git] / rts / Exception.cmm
index c2f0dde..24da1c6 100644 (file)
@@ -13,9 +13,7 @@
 #include "Cmm.h"
 #include "RaiseAsync.h"
 
-#ifdef __PIC__
-import ghczmprim_GHCziBool_True_closure;
-#endif
+import ghczmprim_GHCziTypes_True_closure;
 
 /* -----------------------------------------------------------------------------
    Exception Primitives
@@ -23,12 +21,12 @@ import ghczmprim_GHCziBool_True_closure;
    A thread can request that asynchronous exceptions not be delivered
    ("blocked") for the duration of an I/O computation.  The primitive
    
-       blockAsyncExceptions# :: IO a -> IO a
+       maskAsyncExceptions# :: IO a -> IO a
 
    is used for this purpose.  During a blocked section, asynchronous
    exceptions may be unblocked again temporarily:
 
-       unblockAsyncExceptions# :: IO a -> IO a
+       unmaskAsyncExceptions# :: IO a -> IO a
 
    Furthermore, asynchronous exceptions are blocked automatically during
    the execution of an exception handler.  Both of these primitives
@@ -41,40 +39,36 @@ import ghczmprim_GHCziBool_True_closure;
    the threads waiting to deliver exceptions to that thread.
 
    NB. there's a bug in here.  If a thread is inside an
-   unsafePerformIO, and inside blockAsyncExceptions# (there is an
-   unblockAsyncExceptions_ret on the stack), and it is blocked in an
+   unsafePerformIO, and inside maskAsyncExceptions# (there is an
+   unmaskAsyncExceptions_ret on the stack), and it is blocked in an
    interruptible operation, and it receives an exception, then the
    unsafePerformIO thunk will be updated with a stack object
-   containing the unblockAsyncExceptions_ret frame.  Later, when
+   containing the unmaskAsyncExceptions_ret frame.  Later, when
    someone else evaluates this thunk, the blocked exception state is
    not restored.
 
    -------------------------------------------------------------------------- */
 
-INFO_TABLE_RET( stg_unblockAsyncExceptionszh_ret, RET_SMALL )
+
+INFO_TABLE_RET(stg_unmaskAsyncExceptionszh_ret, RET_SMALL)
 {
     CInt r;
 
-    StgTSO_flags(CurrentTSO) = StgTSO_flags(CurrentTSO) & 
-       ~(TSO_BLOCKEX::I32|TSO_INTERRUPTIBLE::I32);
+    StgTSO_flags(CurrentTSO) = %lobits32(
+      TO_W_(StgTSO_flags(CurrentTSO)) & ~(TSO_BLOCKEX|TSO_INTERRUPTIBLE));
 
     /* Eagerly raise a blocked exception, if there is one */
     if (StgTSO_blocked_exceptions(CurrentTSO) != END_TSO_QUEUE) {
+
+        STK_CHK_GEN( WDS(2), R1_PTR, stg_unmaskAsyncExceptionszh_ret_info);
         /* 
          * We have to be very careful here, as in killThread#, since
          * we are about to raise an async exception in the current
          * thread, which might result in the thread being killed.
          */
-
-#ifndef REG_R1
-        /*
-         * raiseAsync assumes that the stack is in ThreadRunGHC state,
-         * i.e. with a return address on the top.  In unreg mode, the
-         * return value for IO is on top of the return address, so we
-         * need to make a small adjustment here.
-         */
-        Sp_adj(1);
-#endif
+        Sp_adj(-2);
+        Sp(1) = R1;
+        Sp(0) = stg_gc_unpt_r1_info;
         SAVE_THREAD_STATE();
         (r) = foreign "C" maybePerformBlockedException (MyCapability() "ptr", 
                                                      CurrentTSO "ptr") [R1];
@@ -88,76 +82,129 @@ INFO_TABLE_RET( stg_unblockAsyncExceptionszh_ret, RET_SMALL )
                 jump %ENTRY_CODE(Sp(0));
             }
         }
-#ifndef REG_R1
-        /* 
-         * Readjust stack in unregisterised mode if we didn't raise an
-         * exception, see above
-         */
         else {
-            Sp_adj(-1);
+            /*
+               the thread might have been removed from the
+               blocked_exception list by someone else in the meantime.
+               Just restore the stack pointer and continue.  
+            */   
+            Sp_adj(2);
         }
-#endif
     }
 
-#ifdef REG_R1
     Sp_adj(1);
     jump %ENTRY_CODE(Sp(0));
-#else
-    Sp(1) = Sp(0);
-    Sp_adj(1);
-    jump %ENTRY_CODE(Sp(1));
-#endif
 }
 
-INFO_TABLE_RET( stg_blockAsyncExceptionszh_ret, RET_SMALL )
+INFO_TABLE_RET(stg_maskAsyncExceptionszh_ret, RET_SMALL)
 {
     StgTSO_flags(CurrentTSO) = 
-       StgTSO_flags(CurrentTSO) | TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32;
+       %lobits32(
+        TO_W_(StgTSO_flags(CurrentTSO))
+          | TSO_BLOCKEX | TSO_INTERRUPTIBLE
+      );
 
-#ifdef REG_R1
     Sp_adj(1);
     jump %ENTRY_CODE(Sp(0));
-#else
-    Sp(1) = Sp(0);
+}
+
+INFO_TABLE_RET(stg_maskUninterruptiblezh_ret, RET_SMALL)
+{
+    StgTSO_flags(CurrentTSO) = 
+       %lobits32(
+       (TO_W_(StgTSO_flags(CurrentTSO))
+          | TSO_BLOCKEX)
+          & ~TSO_INTERRUPTIBLE
+       );
+
     Sp_adj(1);
-    jump %ENTRY_CODE(Sp(1));
-#endif
+    jump %ENTRY_CODE(Sp(0));
 }
 
-blockAsyncExceptionszh_fast
+stg_maskAsyncExceptionszh
 {
     /* Args: R1 :: IO a */
-    STK_CHK_GEN( WDS(2)/* worst case */, R1_PTR, blockAsyncExceptionszh_fast);
+    STK_CHK_GEN( WDS(1)/* worst case */, R1_PTR, stg_maskAsyncExceptionszh);
 
     if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX) == 0) {
-       
-       StgTSO_flags(CurrentTSO) = 
-          StgTSO_flags(CurrentTSO) | TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32;
+        /* avoid growing the stack unnecessarily */
+        if (Sp(0) == stg_maskAsyncExceptionszh_ret_info) {
+            Sp_adj(1);
+        } else {
+            Sp_adj(-1);
+            Sp(0) = stg_unmaskAsyncExceptionszh_ret_info;
+        }
+    } else {
+        if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_INTERRUPTIBLE) == 0) {
+            Sp_adj(-1);
+            Sp(0) = stg_maskUninterruptiblezh_ret_info;
+        }
+    }
 
-       /* avoid growing the stack unnecessarily */
-       if (Sp(0) == stg_blockAsyncExceptionszh_ret_info) {
-           Sp_adj(1);
-       } else {
-           Sp_adj(-1);
-           Sp(0) = stg_unblockAsyncExceptionszh_ret_info;
-       }
+    StgTSO_flags(CurrentTSO) = %lobits32(
+        TO_W_(StgTSO_flags(CurrentTSO)) | TSO_BLOCKEX | TSO_INTERRUPTIBLE);
+
+    TICK_UNKNOWN_CALL();
+    TICK_SLOW_CALL_v();
+    jump stg_ap_v_fast;
+}
+
+stg_maskUninterruptiblezh
+{
+    /* Args: R1 :: IO a */
+    STK_CHK_GEN( WDS(1)/* worst case */, R1_PTR, stg_maskAsyncExceptionszh);
+
+    if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX) == 0) {
+        /* avoid growing the stack unnecessarily */
+        if (Sp(0) == stg_maskUninterruptiblezh_ret_info) {
+            Sp_adj(1);
+        } else {
+            Sp_adj(-1);
+            Sp(0) = stg_unmaskAsyncExceptionszh_ret_info;
+        }
+    } else {
+        if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_INTERRUPTIBLE) != 0) {
+            Sp_adj(-1);
+            Sp(0) = stg_maskAsyncExceptionszh_ret_info;
+        }
     }
+
+    StgTSO_flags(CurrentTSO) = %lobits32(
+        (TO_W_(StgTSO_flags(CurrentTSO)) | TSO_BLOCKEX) & ~TSO_INTERRUPTIBLE);
+
     TICK_UNKNOWN_CALL();
     TICK_SLOW_CALL_v();
     jump stg_ap_v_fast;
 }
 
-unblockAsyncExceptionszh_fast
+stg_unmaskAsyncExceptionszh
 {
     CInt r;
+    W_ level;
 
     /* Args: R1 :: IO a */
-    STK_CHK_GEN( WDS(2), R1_PTR, unblockAsyncExceptionszh_fast);
+    STK_CHK_GEN( WDS(4), R1_PTR, stg_unmaskAsyncExceptionszh);
+    /* 4 words: one for the unblock frame, 3 for setting up the
+     * stack to call maybePerformBlockedException() below.
+     */
 
+    /* If exceptions are already unblocked, there's nothing to do */
     if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX) != 0) {
 
-       StgTSO_flags(CurrentTSO) = StgTSO_flags(CurrentTSO) & 
-          ~(TSO_BLOCKEX::I32|TSO_INTERRUPTIBLE::I32);
+       /* avoid growing the stack unnecessarily */
+       if (Sp(0) == stg_unmaskAsyncExceptionszh_ret_info) {
+           Sp_adj(1);
+       } else {
+           Sp_adj(-1);
+            if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_INTERRUPTIBLE) != 0) {
+                Sp(0) = stg_maskAsyncExceptionszh_ret_info;
+            } else {
+                Sp(0) = stg_maskUninterruptiblezh_ret_info;
+            }
+       }
+
+       StgTSO_flags(CurrentTSO) = %lobits32(
+            TO_W_(StgTSO_flags(CurrentTSO)) & ~(TSO_BLOCKEX|TSO_INTERRUPTIBLE));
 
         /* Eagerly raise a blocked exception, if there is one */
         if (StgTSO_blocked_exceptions(CurrentTSO) != END_TSO_QUEUE) {
@@ -165,7 +212,18 @@ unblockAsyncExceptionszh_fast
              * We have to be very careful here, as in killThread#, since
              * we are about to raise an async exception in the current
              * thread, which might result in the thread being killed.
+             *
+             * Now, if we are to raise an exception in the current
+             * thread, there might be an update frame above us on the
+             * stack due to unsafePerformIO.  Hence, the stack must
+             * make sense, because it is about to be snapshotted into
+             * an AP_STACK.
              */
+            Sp_adj(-3);
+            Sp(2) = stg_ap_v_info;
+            Sp(1) = R1;
+            Sp(0) = stg_enter_info;
+
             SAVE_THREAD_STATE();
             (r) = foreign "C" maybePerformBlockedException (MyCapability() "ptr", 
                                                      CurrentTSO "ptr") [R1];
@@ -178,16 +236,12 @@ unblockAsyncExceptionszh_fast
                    ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
                    jump %ENTRY_CODE(Sp(0));
                }
+            } else {
+                /* we'll just call R1 directly, below */
+                Sp_adj(3);
             }
         }
 
-       /* avoid growing the stack unnecessarily */
-       if (Sp(0) == stg_unblockAsyncExceptionszh_ret_info) {
-           Sp_adj(1);
-       } else {
-           Sp_adj(-1);
-           Sp(0) = stg_blockAsyncExceptionszh_ret_info;
-       }
     }
     TICK_UNKNOWN_CALL();
     TICK_SLOW_CALL_v();
@@ -195,7 +249,19 @@ unblockAsyncExceptionszh_fast
 }
 
 
-killThreadzh_fast
+stg_getMaskingStatezh
+{
+    /* args: none */
+    /* 
+       returns: 0 == unmasked,
+                1 == masked, non-interruptible,
+                2 == masked, interruptible
+    */
+    RET_N(((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX) != 0) +
+          ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_INTERRUPTIBLE) != 0));
+}
+
+stg_killThreadzh
 {
     /* args: R1 = TSO to kill, R2 = Exception */
 
@@ -206,7 +272,10 @@ killThreadzh_fast
     target = R1;
     exception = R2;
     
-    STK_CHK_GEN( WDS(3), R1_PTR & R2_PTR, killThreadzh_fast);
+    /* Needs 3 words because throwToSingleThreaded uses some stack */
+    STK_CHK_GEN( WDS(3), R1_PTR & R2_PTR, stg_killThreadzh);
+    /* We call allocate in throwTo(), so better check for GC */
+    MAYBE_GC(R1_PTR & R2_PTR, stg_killThreadzh);
 
     /* 
      * We might have killed ourselves.  In which case, better be *very*
@@ -214,12 +283,15 @@ killThreadzh_fast
      * If the exception went to a catch frame, we'll just continue from
      * the handler.
      */
-  loop:
-    if (StgTSO_what_next(target) == ThreadRelocated::I16) {
-        target = StgTSO_link(target);
-        goto loop;
-    }
     if (target == CurrentTSO) {
+        /*
+         * So what should happen if a thread calls "throwTo self" inside
+         * unsafePerformIO, and later the closure is evaluated by another
+         * thread?  Presumably it should behave as if throwTo just returned,
+         * and then continue from there.  See #3279, #3288.  This is what
+         * happens: on resumption, we will just jump to the next frame on
+         * the stack, which is the return point for stg_killThreadzh.
+         */
        SAVE_THREAD_STATE();
        /* ToDo: what if the current thread is blocking exceptions? */
        foreign "C" throwToSingleThreaded(MyCapability() "ptr", 
@@ -233,27 +305,22 @@ killThreadzh_fast
        }
     } else {
        W_ out;
-       W_ retcode;
-       out = BaseReg + OFFSET_StgRegTable_rmp_tmp_w;
-       
-       (retcode) = foreign "C" throwTo(MyCapability() "ptr",
-                                     CurrentTSO "ptr",
-                                     target "ptr",
-                                     exception "ptr",
-                                     out "ptr") [R1,R2];
-       
-       switch [THROWTO_SUCCESS .. THROWTO_BLOCKED] (retcode) {
+       W_ msg;
+       out = Sp - WDS(1); /* ok to re-use stack space here */
 
-       case THROWTO_SUCCESS: {
+       (msg) = foreign "C" throwTo(MyCapability() "ptr",
+                                    CurrentTSO "ptr",
+                                    target "ptr",
+                                    exception "ptr") [R1,R2];
+       
+        if (msg == NULL) {
            jump %ENTRY_CODE(Sp(0));
-       }
-
-       case THROWTO_BLOCKED: {
-           R3 = W_[out];
-           // we must block, and call throwToReleaseTarget() before returning
+       } else {
+            StgTSO_why_blocked(CurrentTSO) = BlockedOnMsgThrowTo;
+            StgTSO_block_info(CurrentTSO) = msg;
+           // we must block, and unlock the message before returning
            jump stg_block_throwto;
        }
-       }
     }
 }
 
@@ -261,11 +328,7 @@ killThreadzh_fast
    Catch frames
    -------------------------------------------------------------------------- */
 
-#ifdef REG_R1
 #define SP_OFF 0
-#else
-#define SP_OFF 1
-#endif
 
 /* Catch frames are very similar to update frames, but when entering
  * one we just pop the frame off the stack and perform the correct
@@ -276,21 +339,11 @@ INFO_TABLE_RET(stg_catch_frame, CATCH_FRAME,
 #if defined(PROFILING)
   W_ unused1, W_ unused2,
 #endif
-  W_ unused3, "ptr" W_ unused4)
-#ifdef REG_R1
+  W_ unused3, P_ unused4)
    {
       Sp = Sp + SIZEOF_StgCatchFrame;
       jump %ENTRY_CODE(Sp(SP_OFF));
    }
-#else
-   {
-      W_ rval;
-      rval = Sp(0);
-      Sp = Sp + SIZEOF_StgCatchFrame;
-      Sp(0) = rval;
-      jump %ENTRY_CODE(Sp(SP_OFF));
-   }
-#endif
 
 /* -----------------------------------------------------------------------------
  * The catch infotable
@@ -306,20 +359,21 @@ INFO_TABLE(stg_catch,2,0,FUN,"catch","catch")
 {
   R2 = StgClosure_payload(R1,1); /* h */
   R1 = StgClosure_payload(R1,0); /* x */
-  jump catchzh_fast;
+  jump stg_catchzh;
 }
 
-catchzh_fast
+stg_catchzh
 {
     /* args: R1 = m :: IO a, R2 = handler :: Exception -> IO a */
-    STK_CHK_GEN(SIZEOF_StgCatchFrame + WDS(1), R1_PTR & R2_PTR, catchzh_fast);
+    STK_CHK_GEN(SIZEOF_StgCatchFrame + WDS(1), R1_PTR & R2_PTR, stg_catchzh);
   
     /* Set up the catch frame */
     Sp = Sp - SIZEOF_StgCatchFrame;
     SET_HDR(Sp,stg_catch_frame_info,W_[CCCS]);
     
     StgCatchFrame_handler(Sp) = R2;
-    StgCatchFrame_exceptions_blocked(Sp) = TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX;
+    StgCatchFrame_exceptions_blocked(Sp) = 
+        TO_W_(StgTSO_flags(CurrentTSO)) & (TSO_BLOCKEX | TSO_INTERRUPTIBLE);
     TICK_CATCHF_PUSHED();
 
     /* Apply R1 to the realworld token */
@@ -335,28 +389,28 @@ catchzh_fast
  *
  *   raise = {err} \n {} -> raise#{err}
  *
- * It is used in raisezh_fast to update thunks on the update list
+ * It is used in stg_raisezh to update thunks on the update list
  * -------------------------------------------------------------------------- */
 
 INFO_TABLE(stg_raise,1,0,THUNK_1_0,"raise","raise")
 {
   R1 = StgThunk_payload(R1,0);
-  jump raisezh_fast;
+  jump stg_raisezh;
 }
 
 section "data" {
   no_break_on_exception: W_[1];
 }
 
-INFO_TABLE_RET(stg_raise_ret, RET_SMALL, "ptr" W_ arg1)
+INFO_TABLE_RET(stg_raise_ret, RET_SMALL, P_ arg1)
 {
   R1 = Sp(1);
   Sp = Sp + WDS(2);
   W_[no_break_on_exception] = 1;  
-  jump raisezh_fast;
+  jump stg_raisezh;
 }
 
-raisezh_fast
+stg_raisezh
 {
     W_ handler;
     W_ frame_type;
@@ -377,9 +431,9 @@ raisezh_fast
 #endif
     
 retry_pop_stack:
-    StgTSO_sp(CurrentTSO) = Sp;
+    SAVE_THREAD_STATE();
     (frame_type) = foreign "C" raiseExceptionHelper(BaseReg "ptr", CurrentTSO "ptr", exception "ptr") [];
-    Sp = StgTSO_sp(CurrentTSO);
+    LOAD_THREAD_STATE();
     if (frame_type == ATOMICALLY_FRAME) {
       /* The exception has reached the edge of a memory transaction.  Check that 
        * the transaction is valid.  If not then perhaps the exception should
@@ -393,7 +447,7 @@ retry_pop_stack:
       W_ r;
       trec = StgTSO_trec(CurrentTSO);
       (r) = foreign "C" stmValidateNestOfTransactions(trec "ptr") [];
-      ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
+      outer  = StgTRecHeader_enclosing_trec(trec);
       foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
       foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
 
@@ -433,15 +487,14 @@ retry_pop_stack:
             // deadlock if an exception is raised in InteractiveUI,
             // for exmplae.  Perhaps the stop_on_exception flag should
             // be per-thread.
-            W_[rts_stop_on_exception] = 0;
+            CInt[rts_stop_on_exception] = 0;
             ("ptr" ioAction) = foreign "C" deRefStablePtr (W_[rts_breakpoint_io_action] "ptr") [];
-            Sp = Sp - WDS(7);
-            Sp(6) = exception;
-            Sp(5) = stg_raise_ret_info;
-            Sp(4) = stg_noforceIO_info;    // required for unregisterised
+            Sp = Sp - WDS(6);
+            Sp(5) = exception;
+            Sp(4) = stg_raise_ret_info;
             Sp(3) = exception;             // the AP_STACK
-            Sp(2) = ghczmprim_GHCziBool_True_closure; // dummy breakpoint info
-            Sp(1) = ghczmprim_GHCziBool_True_closure; // True <=> a breakpoint
+            Sp(2) = ghczmprim_GHCziTypes_True_closure; // dummy breakpoint info
+            Sp(1) = ghczmprim_GHCziTypes_True_closure; // True <=> a breakpoint
             R1 = ioAction;
             jump RET_LBL(stg_ap_pppv);
         }
@@ -453,8 +506,10 @@ retry_pop_stack:
         * We will leave the stack in a GC'able state, see the stg_stop_thread
         * entry code in StgStartup.cmm.
         */
-       Sp = CurrentTSO + TSO_OFFSET_StgTSO_stack 
-               + WDS(TO_W_(StgTSO_stack_size(CurrentTSO))) - WDS(2);
+        W_ stack;
+        stack = StgTSO_stackobj(CurrentTSO);
+        Sp = stack + OFFSET_StgStack_stack
+                + WDS(TO_W_(StgStack_stack_size(stack))) - WDS(2);
        Sp(1) = exception;      /* save the exception */
        Sp(0) = stg_enter_info; /* so that GC can traverse this stack */
        StgTSO_what_next(CurrentTSO) = ThreadKilled::I16;
@@ -477,7 +532,7 @@ retry_pop_stack:
      *
      * If exceptions were unblocked, arrange that they are unblocked
      * again after executing the handler by pushing an
-     * unblockAsyncExceptions_ret stack frame.
+     * unmaskAsyncExceptions_ret stack frame.
      *
      * If we've reached an STM catch frame then roll back the nested
      * transaction we were using.
@@ -486,14 +541,14 @@ retry_pop_stack:
     frame = Sp;
     if (frame_type == CATCH_FRAME) {
       Sp = Sp + SIZEOF_StgCatchFrame;
-      if (StgCatchFrame_exceptions_blocked(frame) == 0) {
-        Sp_adj(-1);
-        Sp(0) = stg_unblockAsyncExceptionszh_ret_info;
+      if ((StgCatchFrame_exceptions_blocked(frame) & TSO_BLOCKEX) == 0) {
+          Sp_adj(-1);
+          Sp(0) = stg_unmaskAsyncExceptionszh_ret_info;
       }
     } else {
       W_ trec, outer;
       trec = StgTSO_trec(CurrentTSO);
-      ("ptr" outer) = foreign "C" stmGetEnclosingTRec(trec "ptr") [];
+      outer  = StgTRecHeader_enclosing_trec(trec);
       foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
       foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
       StgTSO_trec(CurrentTSO) = outer;
@@ -501,9 +556,18 @@ retry_pop_stack:
     }
 
     /* Ensure that async excpetions are blocked when running the handler.
+     * The interruptible state is inherited from the context of the
+     * catch frame.
     */
-    StgTSO_flags(CurrentTSO) = 
-       StgTSO_flags(CurrentTSO) | TSO_BLOCKEX::I32 | TSO_INTERRUPTIBLE::I32;
+    StgTSO_flags(CurrentTSO) = %lobits32(
+       TO_W_(StgTSO_flags(CurrentTSO)) | TSO_BLOCKEX);
+    if ((StgCatchFrame_exceptions_blocked(frame) & TSO_INTERRUPTIBLE) == 0) {
+        StgTSO_flags(CurrentTSO) = %lobits32(
+            TO_W_(StgTSO_flags(CurrentTSO)) & ~TSO_INTERRUPTIBLE);
+    } else {
+        StgTSO_flags(CurrentTSO) = %lobits32(
+            TO_W_(StgTSO_flags(CurrentTSO)) | TSO_INTERRUPTIBLE);
+    }
 
     /* Call the handler, passing the exception value and a realworld
      * token as arguments.
@@ -517,8 +581,8 @@ retry_pop_stack:
     jump RET_LBL(stg_ap_pv);
 }
 
-raiseIOzh_fast
+stg_raiseIOzh
 {
   /* Args :: R1 :: Exception */
-  jump raisezh_fast;
+  jump stg_raisezh;
 }