[commit: ghc] ghc-lwc2: Fixes for safe-foreign calls. tso->release_ULS added to indicate that the thread resuming after a safe-foreign function all that its user-level scheduler has been already resumed. Hence, the next time this thread enters the scheduler loop, it gives up its scheduler. (b18199d)

Sivaramakrishnan Krishnamoorthy Chandrasekaran t-sichan at microsoft.com
Sat May 4 20:39:07 CEST 2013


Repository : http://darcs.haskell.org/ghc.git/

On branch  : ghc-lwc2

https://github.com/ghc/ghc/commit/b18199d60c1ccd0c8b02c1f4596b8580700c5854

>---------------------------------------------------------------

commit b18199d60c1ccd0c8b02c1f4596b8580700c5854
Author: KC Sivaramakrishnan <chandras at cs.purdue.edu>
Date:   Fri May 3 23:26:05 2013 -0400

    Fixes for safe-foreign calls. tso->release_ULS added to indicate that
    the thread resuming after a safe-foreign function all that its
    user-level scheduler has been already resumed. Hence, the next time this
    thread enters the scheduler loop, it gives up its scheduler.

>---------------------------------------------------------------

 includes/rts/storage/TSO.h |  1 +
 rts/Schedule.c             | 39 ++++++++++++++++++---------------------
 rts/Threads.c              |  1 +
 rts/Upcalls.c              | 15 +++++++++++++--
 4 files changed, 33 insertions(+), 23 deletions(-)

diff --git a/includes/rts/storage/TSO.h b/includes/rts/storage/TSO.h
index 15450b3..05470b4 100644
--- a/includes/rts/storage/TSO.h
+++ b/includes/rts/storage/TSO.h
@@ -131,6 +131,7 @@ typedef struct StgTSO_ {
     StgWord32               dirty;          /* non-zero => dirty */
     StgWord32               is_sleeping;
     StgWord32               is_upcall_thread;
+    StgWord32               release_ULS;
     struct InCall_*         bound;
     struct Capability_*     cap;
 
diff --git a/rts/Schedule.c b/rts/Schedule.c
index edf6872..47f0dc6 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -960,8 +960,7 @@ scheduleResumeBlockedOnForeignCall(Capability *cap USED_IF_THREADS)
     //Safely work
     ACQUIRE_LOCK (&cap->lock);
     incall = cap->suspended_ccalls_hd;
-    if (rtsFalse &&
-        incall && //incall is not NULL
+    if (incall && //incall is not NULL
         incall->uls_stat == UserLevelSchedulerBlocked) {
 
         debugTrace (DEBUG_sched, "resuming scheduler associated with task %p"
@@ -1177,11 +1176,12 @@ schedulePostRunThread (Capability *cap, StgTSO *t)
             debugTrace(DEBUG_sched | DEBUG_stm,
                        "trec %p found wasting its time", t);
 
-            // strip the stack back to the ATOMICALLY_FRAME, aborting the (nested)
-            // transaction, and saving the stack of any partially-evaluated thunks on
-            // the heap.
-            //XXX KC -- We do not need to add an upcall since t is the current running
-            //thread.
+            // strip the stack back to the ATOMICALLY_FRAME, aborting the
+            // (nested) transaction, and saving the stack of any
+            // partially-evaluated thunks on the heap.
+
+            //XXX KC -- We do not need to add an upcall since t is the current
+            //running thread.
             throwToSingleThreaded_(cap, t, NULL, rtsTrue);
             //            ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
         }
@@ -1316,9 +1316,9 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
     if (cap->context_switch != 0) {
         cap->context_switch = 0;
         if (hasHaskellScheduler (t)) {
-            pushUpcallNonReturning (cap, getSwitchToNextThreadUpcall (cap, t));
-            pushUpcallReturning (cap, getResumeThreadUpcall (cap, t));
-            t->why_blocked = Yielded;
+          pushUpcallNonReturning (cap, getSwitchToNextThreadUpcall (cap, t));
+          pushUpcallReturning (cap, getResumeThreadUpcall (cap, t));
+          t->why_blocked = Yielded;
         }
         else
             appendToRunQueue(cap,t);
@@ -2457,10 +2457,6 @@ resumeThread (void *task_)
         }
     }
 
-    /* We might have GC'd, mark the TSO dirty again */
-    dirty_TSO(cap,tso);
-    dirty_STACK(cap,tso->stackobj);
-    IF_DEBUG(sanity, checkTSO(tso));
 
 #if defined(THREADED_RTS)
     //Check whether a worker has resumed our scheduler
@@ -2468,16 +2464,17 @@ resumeThread (void *task_)
         //Evaluate the unblock action on the upcall thread
         debugTrace (DEBUG_sched, "cap %d: resumeThread: ULS for thread %d already resumed. errno=%d.",
                     (int)cap->no, tso->id, errno);
-
-        tso->why_blocked = Yielded;
-        tso->saved_errno = saved_errno;
-        pushUpcallReturning (cap, getResumeThreadUpcall (cap, tso));
-
-        tso = prepareUpcallThread (cap, (StgTSO*)END_TSO_QUEUE);
-        saved_errno = tso->saved_errno;
+        //Mark this task to release the scheduler
+        tso->release_ULS = rtsTrue;
+        cap->context_switch = 1;
     }
 #endif
 
+    /* We might have GC'd, mark the TSO dirty again */
+    dirty_TSO(cap,tso);
+    dirty_STACK(cap,tso->stackobj);
+    IF_DEBUG(sanity, checkTSO(tso));
+
     traceEventRunThread(cap, tso);
 
     cap->r.rCurrentTSO = tso;
diff --git a/rts/Threads.c b/rts/Threads.c
index 23d4000..9155cda 100644
--- a/rts/Threads.c
+++ b/rts/Threads.c
@@ -110,6 +110,7 @@ createThread(Capability *cap, W_ size)
   tso->dirty = 1;
   tso->is_upcall_thread = 0;
   tso->is_sleeping = rtsFalse;
+  tso->release_ULS = rtsFalse;
   tso->_link = END_TSO_QUEUE;
 
   tso->saved_errno = 0;
diff --git a/rts/Upcalls.c b/rts/Upcalls.c
index 7de17fa..ef01d4d 100644
--- a/rts/Upcalls.c
+++ b/rts/Upcalls.c
@@ -35,6 +35,9 @@ pushUpcallReturning (Capability* cap, Upcall uc)
 void
 pushUpcallNonReturning (Capability* cap, Upcall uc)
 {
+  if ((StgClosure*)uc == (StgClosure*)defaultUpcall_closure)
+    return; //See getSwitchToNextThreadUpcall
+
   if (!pushWSDeque (cap->upcall_queue_non_returning, uc))
     barf ("pushUpcall overflow!!");
   debugTrace (DEBUG_sched, "Adding new non returning upcall %p (queue size = %d)",
@@ -67,8 +70,16 @@ getSwitchToNextThreadUpcall (Capability* cap, StgTSO* t)
   ASSERT (!t->is_upcall_thread);
   ASSERT (t->yield_control_action != (StgClosure*)defaultUpcall_closure);
 
-  p = rts_apply (cap, (StgClosure*)yieldControlActionRts_closure,
-                 rts_mkSCont (cap, t));
+  if (t->release_ULS) {
+    debugTrace (DEBUG_sched, "cap %d: returning dummy switch to next thread upcall",
+                cap->no);
+    p = (StgClosure*)defaultUpcall_closure;
+    t->release_ULS = rtsFalse;
+  }
+  else {
+   p = rts_apply (cap, (StgClosure*)yieldControlActionRts_closure,
+                  rts_mkSCont (cap, t));
+  }
 
   debugTrace (DEBUG_sched, "cap %d: getSwitchToNextThreadupcall(%p) for thread %d",
               cap->no, (void*)p, t->id);





More information about the ghc-commits mailing list