[commit: ghc] ghc-lwc2: Fixed the races in stg_atomicSwitch. Once the atomicSwitch transaction has succeeded, we spin wait till any parallelly running capability that might be giving up control of the target TSO we are switching to has time to perform the necessary cleanup. (f24ff1e)

Sivaramakrishnan Krishnamoorthy Chandrasekaran t-sichan at microsoft.com
Mon May 6 21:41:06 CEST 2013


Repository : http://darcs.haskell.org/ghc.git/

On branch  : ghc-lwc2

https://github.com/ghc/ghc/commit/f24ff1ea92a77ad09485dbd2425716489202c4b2

>---------------------------------------------------------------

commit f24ff1ea92a77ad09485dbd2425716489202c4b2
Author: KC Sivaramakrishnan <chandras at cs.purdue.edu>
Date:   Mon May 6 15:35:31 2013 -0400

    Fixed the races in stg_atomicSwitch. Once the atomicSwitch transaction
    has succeeded, we spin wait till any parallelly running capability that
    might be giving up control of the target TSO we are switching to has
    time to perform the necessary cleanup.

>---------------------------------------------------------------

 rts/PrimOps.cmm                     | 55 ++++++++++++++++++++-----------------
 rts/Threads.c                       |  6 +++-
 tests/Benchmarks/Sieve/sieve-lwc.hs |  2 +-
 3 files changed, 36 insertions(+), 27 deletions(-)

diff --git a/rts/PrimOps.cmm b/rts/PrimOps.cmm
index 32821cf..4ce22bf 100644
--- a/rts/PrimOps.cmm
+++ b/rts/PrimOps.cmm
@@ -679,14 +679,36 @@ again: MAYBE_GC(again);
     if (valid != 0) {
       /* Transaction was valid: commit succeeded */
 
-      //KC -- operating on CurrentTSO is unsafe since the transaction has
-      //been committed and mucking with CurrentTSO info violates atomicity.
+			/* Spin wait till target tso is completely blocked. This is needed to
+			 * protect entry into the critical section on CurrentTSO that follows. At
+			 * the end of the critical section, target tso's state will be changed
+			 * from NotBlocked to something else. */
+			if (CurrentTSO != tso) {
+			  retry:
+			  if (StgTSO_why_blocked (tso) == NotBlocked::I16) {
+					IF_DEBUG (scheduler, ccall debugBelch ("stg_atomicSwitch: spinning\n"));
+					goto retry;
+				}
+        ASSERT (StgTSO_why_blocked(tso) != NotBlocked::I16); //Thread is blocked
+			}
 
-      //XXX KC -- Start unsafe section
+			/* --- Begin critical section on CurrentTSO --- */
 
-      if (CurrentTSO != tso) {
-        ASSERT (StgTSO_why_blocked(tso) != NotBlocked::I16); //Thread is blocked
+			StgTSO_trec(CurrentTSO) = NO_TREC;
+      StgStack_sp(StgTSO_stackobj(CurrentTSO)) = Sp;
+
+			/* Swap release ULS flag: If the current thread needs to give up the
+			 * scheduler, we make sure that the target thread (thread being switched
+			 * to), inherits that flag. When we enter the RTS schedule loop at the
+			 * end of this block, scheduleHandleThreadSwitch takes care of releasing
+			 * the scheduler. Swapping the flag to ensure that the requests are not
+			 * lost in case both threads need to lose thier schedulers (Unlikely?).
+			 */
+			tmpReleaseULS = StgTSO_release_ULS(CurrentTSO);
+			StgTSO_release_ULS(CurrentTSO) = StgTSO_release_ULS(tso);
+			StgTSO_release_ULS(tso) = tmpReleaseULS;
 
+      if (CurrentTSO != tso) {
         //Set the current thread status
         if (switch_reason == 1) {
           StgTSO_why_blocked(CurrentTSO) = Yielded::I16;
@@ -711,27 +733,14 @@ again: MAYBE_GC(again);
           }
         }
       }
-      StgTSO_trec(CurrentTSO) = NO_TREC;
-      StgStack_sp(StgTSO_stackobj(CurrentTSO)) = Sp;
-
-			/* Swap release ULS flag: If the current thread needs to give up the
-			 * scheduler, we make sure that the target thread (thread being switched
-			 * to), inherits that flag. When we enter the RTS schedule loop at the
-			 * end of this block, scheduleHandleThreadSwitch takes care of releasing
-			 * the scheduler. Swapping the flag to ensure that the requests are not
-			 * lost in case both threads need to lose thier schedulers (Unlikely?).
-			 */
-			tmpReleaseULS = StgTSO_release_ULS(CurrentTSO);
-			StgTSO_release_ULS(CurrentTSO) = StgTSO_release_ULS(tso);
-			StgTSO_release_ULS(tso) = tmpReleaseULS;
 
-      //XXX KC -- End unsafe section
+			/* --- End critical section on CurrentTSO --- */
 
       CurrentTSO = tso;
-      StgTSO_why_blocked(CurrentTSO) = NotBlocked::I16;
+      StgTSO_why_blocked(tso) = NotBlocked::I16;
       ("ptr" limit)  = ccall tso_SpLim (tso "ptr");
       SpLim = limit;
-      Sp = StgStack_sp(StgTSO_stackobj(CurrentTSO));
+      Sp = StgStack_sp(StgTSO_stackobj(tso));
 
       //Enter the scheduler. This is necessary for several reasons:
       //  - tso might be bound, or current task might be bound in which case we
@@ -873,10 +882,6 @@ stg_newSContzh ( gcptr closure )
 				closure "ptr");
   StgTSO_why_blocked (threadid) = Yielded::I16;
 
-  // context switch soon, but not immediately: we don't want every
-  // newSCont to force a context-switch.
-  // Capability_context_switch(MyCapability()) = 1 :: CInt;
-
   return (threadid);
 }
 
diff --git a/rts/Threads.c b/rts/Threads.c
index 63ca915..22b3032 100644
--- a/rts/Threads.c
+++ b/rts/Threads.c
@@ -334,7 +334,10 @@ tryWakeupThread (Capability *cap, StgTSO *tso)
         goto unblock2;
 
     case ThreadMigrating:
-      goto unblock2;
+      if (hasHaskellScheduler (tso))
+        goto unblock1;
+      else
+        goto unblock2;
 
     case BlockedInHaskell:
     case Yielded:
@@ -348,6 +351,7 @@ unblock1:
   /* XXX KC -- upcall threads needs to be appended to the run queue and would
    * not have an attached upcall, which would make getResumeThreadUpcall fail.
    * But is this correct? */
+    barf ("This branch is not expected to be taken?");
     goto unblock2;
   }
   tso->why_blocked = Yielded;
diff --git a/tests/Benchmarks/Sieve/sieve-lwc.hs b/tests/Benchmarks/Sieve/sieve-lwc.hs
index 1260ebf..475a8ce 100644
--- a/tests/Benchmarks/Sieve/sieve-lwc.hs
+++ b/tests/Benchmarks/Sieve/sieve-lwc.hs
@@ -42,6 +42,6 @@ main = do
 linkFilter :: MVar Int -> MVar Int -> IO (MVar Int)
 linkFilter mIn mOut = do
   prime <- takeMVar mIn
-  debugPrint $ show prime
+  putStrLn $ show prime
   forkIO $ primeFilter mIn mOut prime
   return mOut





More information about the ghc-commits mailing list