[Git][ghc/ghc][master] 3 commits: rts/Trace: Ensure that debugTrace arguments are used

Marge Bot (@marge-bot) gitlab at gitlab.haskell.org
Thu Jun 29 22:47:08 UTC 2023



Marge Bot pushed to branch master at Glasgow Haskell Compiler / GHC


Commits:
7c7d1f66 by Ben Gamari at 2023-06-29T18:46:48-04:00
rts/Trace: Ensure that debugTrace arguments are used

As debugTrace is a macro we must take care to ensure that
the fact is clear to the compiler lest we see warnings.

- - - - -
cb92051e by Ben Gamari at 2023-06-29T18:46:48-04:00
rts: Various warnings fixes

- - - - -
dec81dd1 by Ben Gamari at 2023-06-29T18:46:48-04:00
hadrian: Ignore warnings in unix and semaphore-compat

- - - - -


8 changed files:

- hadrian/src/Flavour.hs
- rts/Schedule.c
- rts/Sparks.c
- rts/Trace.h
- rts/TraverseHeap.c
- rts/sm/GC.c
- rts/sm/NonMoving.c
- rts/sm/NonMovingMark.c


Changes:

=====================================
hadrian/src/Flavour.hs
=====================================
@@ -128,9 +128,13 @@ werror =
         ? notStage0
         ? mconcat
           [ arg "-Werror"
-          , flag CrossCompiling
-              ? package unix
+            -- unix has many unused imports
+          , package unix
               ? mconcat [arg "-Wwarn=unused-imports", arg "-Wwarn=unused-top-binds"]
+            -- semaphore-compat relies on sem_getvalue as provided by unix, which is
+            -- not implemented on Darwin and therefore throws a deprecation warning
+          , package semaphoreCompat
+              ? mconcat [arg "-Wwarn=deprecations"]
           ]
     , builder Ghc
         ? package rts


=====================================
rts/Schedule.c
=====================================
@@ -1160,9 +1160,11 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
             barf("allocation of %ld bytes too large (GHC should have complained at compile-time)", (long)cap->r.rHpAlloc);
         }
 
+#if defined(DEBUG)
         debugTrace(DEBUG_sched,
                    "--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n",
                    (long)t->id, what_next_strs[t->what_next], blocks);
+#endif
 
         // don't do this if the nursery is (nearly) full, we'll GC first.
         if (cap->r.rCurrentNursery->link != NULL ||
@@ -1231,9 +1233,11 @@ scheduleHandleYield( Capability *cap, StgTSO *t, uint32_t prev_what_next )
     // Shortcut if we're just switching evaluators: just run the thread.  See
     // Note [avoiding threadPaused] in Interpreter.c.
     if (t->what_next != prev_what_next) {
+#if defined(DEBUG)
         debugTrace(DEBUG_sched,
                    "--<< thread %ld (%s) stopped to switch evaluators",
                    (long)t->id, what_next_strs[t->what_next]);
+#endif
         return true;
     }
 
@@ -1806,7 +1810,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
                 }
             }
         }
-        debugTrace(DEBUG_sched, "%d idle caps", n_idle_caps);
+        debugTrace(DEBUG_sched, "%d idle caps, %d failed grabs", n_idle_caps, n_failed_trygrab_idles);
 
         for (i=0; i < n_capabilities; i++) {
             NONATOMIC_ADD(&getCapability(i)->idle, 1);
@@ -2643,7 +2647,6 @@ void
 scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability **pcap)
 {
     Task *task;
-    DEBUG_ONLY( StgThreadID id );
     Capability *cap;
 
     cap = *pcap;
@@ -2662,8 +2665,9 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability **pcap)
 
     appendToRunQueue(cap,tso);
 
-    DEBUG_ONLY( id = tso->id );
-    debugTrace(DEBUG_sched, "new bound thread (%" FMT_StgThreadID ")", id);
+    DEBUG_ONLY(
+        debugTrace(DEBUG_sched, "new bound thread (%" FMT_StgThreadID ")", (StgThreadID) tso->id);
+    );
 
     // As the TSO is bound and on the run queue, schedule() will run the TSO.
     cap = schedule(cap,task);
@@ -2671,7 +2675,7 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability **pcap)
     ASSERT(task->incall->rstat != NoStatus);
     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
 
-    debugTrace(DEBUG_sched, "bound thread (%" FMT_StgThreadID ") finished", id);
+    debugTrace(DEBUG_sched, "bound thread (%" FMT_StgThreadID ") finished", (StgThreadID) tso->id);
     *pcap = cap;
 }
 
@@ -2793,9 +2797,6 @@ exitScheduler (bool wait_foreign USED_IF_THREADS)
 
     shutdownCapabilities(task, wait_foreign);
 
-    // debugBelch("n_failed_trygrab_idles = %d, n_idle_caps = %d\n",
-    //            n_failed_trygrab_idles, n_idle_caps);
-
     exitMyTask();
 }
 


=====================================
rts/Sparks.c
=====================================
@@ -119,11 +119,10 @@ pruneSparkQueue (bool nonmovingMarkFinished, Capability *cap)
 {
     SparkPool *pool;
     StgClosurePtr spark, tmp, *elements;
-    uint32_t n, pruned_sparks; // stats only
+    uint32_t pruned_sparks; // stats only
     StgInt botInd,oldBotInd,currInd; // indices in array (always < size)
     const StgInfoTable *info;
 
-    n = 0;
     pruned_sparks = 0;
 
     pool = cap->sparks;
@@ -216,7 +215,6 @@ pruneSparkQueue (bool nonmovingMarkFinished, Capability *cap)
               if (closure_SHOULD_SPARK(tmp)) {
                   elements[botInd] = tmp; // keep entry (new address)
                   botInd++;
-                  n++;
               } else {
                   pruned_sparks++; // discard spark
                   cap->spark_stats.fizzled++;
@@ -246,7 +244,6 @@ pruneSparkQueue (bool nonmovingMarkFinished, Capability *cap)
                   if (closure_SHOULD_SPARK(spark)) {
                       elements[botInd] = spark; // keep entry (new address)
                       botInd++;
-                      n++;
                   } else {
                       pruned_sparks++; // discard spark
                       cap->spark_stats.fizzled++;
@@ -264,7 +261,6 @@ pruneSparkQueue (bool nonmovingMarkFinished, Capability *cap)
                   // isAlive() also ignores static closures (see GCAux.c)
                   elements[botInd] = spark; // keep entry (new address)
                   botInd++;
-                  n++;
               } else {
                   pruned_sparks++; // discard spark
                   cap->spark_stats.fizzled++;


=====================================
rts/Trace.h
=====================================
@@ -235,26 +235,25 @@ void traceThreadLabel_(Capability *cap,
                        char       *label,
                        size_t      len);
 
+
+#if defined(DEBUG)
+#define DEBUG_RTS 1
+#else
+#define DEBUG_RTS 0
+#endif
+
 /*
  * Emit a debug message (only when DEBUG is defined)
  */
-#if defined(DEBUG)
 #define debugTrace(class, msg, ...)             \
-    if (RTS_UNLIKELY(class)) {                  \
+    if (DEBUG_RTS && RTS_UNLIKELY(class)) {     \
         trace_(msg, ##__VA_ARGS__);             \
     }
-#else
-#define debugTrace(class, str, ...) /* nothing */
-#endif
 
-#if defined(DEBUG)
-#define debugTraceCap(class, cap, msg, ...)      \
-    if (RTS_UNLIKELY(class)) {                  \
+#define debugTraceCap(class, cap, msg, ...)     \
+    if (DEBUG_RTS && RTS_UNLIKELY(class)) {     \
         traceCap_(cap, msg, ##__VA_ARGS__);     \
     }
-#else
-#define debugTraceCap(class, cap, str, ...) /* nothing */
-#endif
 
 /*
  * Emit a message/event describing the state of a thread


=====================================
rts/TraverseHeap.c
=====================================
@@ -48,7 +48,7 @@ static void debug(const char *s, ...)
     va_end(ap);
 }
 #else
-#define debug(...)
+static void debug(const char *s STG_UNUSED, ...) {}
 #endif
 
 // number of blocks allocated for one stack


=====================================
rts/sm/GC.c
=====================================
@@ -691,6 +691,7 @@ GarbageCollect (struct GcConfig config,
         }
         copied +=  mut_list_size;
 
+#if defined(DEBUG)
         debugTrace(DEBUG_gc,
                    "mut_list_size: %lu (%d vars, %d arrays, %d MVARs, %d TVARs, %d TVAR_WATCH_QUEUEs, %d TREC_CHUNKs, %d TREC_HEADERs, %d others)",
                    (unsigned long)(mut_list_size * sizeof(W_)),
@@ -702,6 +703,7 @@ GarbageCollect (struct GcConfig config,
                    mutlist_scav_stats.n_TREC_CHUNK,
                    mutlist_scav_stats.n_TREC_HEADER,
                    mutlist_scav_stats.n_OTHERS);
+#endif
     }
 
     bdescr *next, *prev;


=====================================
rts/sm/NonMoving.c
=====================================
@@ -901,14 +901,12 @@ static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO *
     // updated their snapshot pointers and move them to the sweep list.
     for (int alloca_idx = 0; alloca_idx < NONMOVING_ALLOCA_CNT; ++alloca_idx) {
         struct NonmovingSegment *filled = nonmovingHeap.allocators[alloca_idx].saved_filled;
-        uint32_t n_filled = 0;
         if (filled) {
             struct NonmovingSegment *seg = filled;
             while (true) {
                 // Set snapshot
                 nonmovingSegmentInfo(seg)->next_free_snap = seg->next_free;
                 SET_SEGMENT_STATE(seg, FILLED_SWEEPING);
-                n_filled++;
                 if (seg->link) {
                     seg = seg->link;
                 } else {
@@ -1161,24 +1159,20 @@ void assert_in_nonmoving_heap(StgPtr p)
         }
 
         // Search active segments
-        int seg_idx = 0;
         struct NonmovingSegment *seg = alloca->active;
         while (seg) {
             if (p >= (P_)seg && p < (((P_)seg) + NONMOVING_SEGMENT_SIZE_W)) {
                 return;
             }
-            seg_idx++;
             seg = seg->link;
         }
 
         // Search filled segments
-        seg_idx = 0;
         seg = alloca->filled;
         while (seg) {
             if (p >= (P_)seg && p < (((P_)seg) + NONMOVING_SEGMENT_SIZE_W)) {
                 return;
             }
-            seg_idx++;
             seg = seg->link;
         }
     }


=====================================
rts/sm/NonMovingMark.c
=====================================
@@ -268,7 +268,7 @@ void nonmovingMarkInit() {
 #endif
 }
 
-#if defined(THREADED_RTS) && defined(DEBUG)
+#if defined(THREADED_RTS)
 static uint32_t markQueueLength(MarkQueue *q);
 #endif
 static void init_mark_queue_(MarkQueue *queue);
@@ -985,7 +985,7 @@ void freeMarkQueue (MarkQueue *queue)
     freeChain_lock(queue->blocks);
 }
 
-#if defined(THREADED_RTS) && defined(DEBUG)
+#if defined(THREADED_RTS)
 static uint32_t
 markQueueLength (MarkQueue *q)
 {



View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/d7ef1704aeba451bd3e0efbdaaab2638ee1f0bc8...dec81dd1fd0475dde4929baae625d155387300bb

-- 
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/compare/d7ef1704aeba451bd3e0efbdaaab2638ee1f0bc8...dec81dd1fd0475dde4929baae625d155387300bb
You're receiving this email because of your account on gitlab.haskell.org.


-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.haskell.org/pipermail/ghc-commits/attachments/20230629/25979f32/attachment-0001.html>


More information about the ghc-commits mailing list