[commit: ghc] master: Rts: Consistently use StgWord for sizes of bitmaps (43b3bab)

git at git.haskell.org git at git.haskell.org
Tue Apr 29 19:35:17 UTC 2014


Repository : ssh://git@git.haskell.org/ghc

On branch  : master
Link       : http://ghc.haskell.org/trac/ghc/changeset/43b3bab38eafef8c02a09fb4ff1e757f5cba6073/ghc

>---------------------------------------------------------------

commit 43b3bab38eafef8c02a09fb4ff1e757f5cba6073
Author: Arash Rouhani <rarash at student.chalmers.se>
Date:   Thu Feb 13 15:28:11 2014 +0100

    Rts: Consistently use StgWord for sizes of bitmaps
    
    A long debate is in issue #8742, but the main motivation is that this
    allows for applying a patch to reuse the function scavenge_small_bitmap
    without changing the .o-file output.
    
    Similarly, I changed the types in rts/sm/Compact.c, so I can create
    a STATIC_INLINE function for the redundant code block:
    
            while (size > 0) {
                if ((bitmap & 1) == 0) {
                    thread((StgClosure **)p);
                }
                p++;
                bitmap = bitmap >> 1;
                size--;
            }


>---------------------------------------------------------------

43b3bab38eafef8c02a09fb4ff1e757f5cba6073
 includes/rts/storage/ClosureMacros.h |    5 +++++
 rts/sm/Compact.c                     |   14 +++++++-------
 rts/sm/Scav.c                        |   16 ++++++++--------
 3 files changed, 20 insertions(+), 15 deletions(-)

diff --git a/includes/rts/storage/ClosureMacros.h b/includes/rts/storage/ClosureMacros.h
index 92b78de..3407b71 100644
--- a/includes/rts/storage/ClosureMacros.h
+++ b/includes/rts/storage/ClosureMacros.h
@@ -338,6 +338,11 @@ EXTERN_INLINE StgWord bco_sizeW ( StgBCO *bco );
 EXTERN_INLINE StgWord bco_sizeW ( StgBCO *bco )
 { return bco->size; }
 
+/*
+ * TODO: Consider to switch return type from 'nat' to 'StgWord' #8742
+ *
+ * (Also for 'closure_sizeW' below)
+ */
 EXTERN_INLINE nat closure_sizeW_ (StgClosure *p, StgInfoTable *info);
 EXTERN_INLINE nat
 closure_sizeW_ (StgClosure *p, StgInfoTable *info)
diff --git a/rts/sm/Compact.c b/rts/sm/Compact.c
index 8ae72a9..3731dd6 100644
--- a/rts/sm/Compact.c
+++ b/rts/sm/Compact.c
@@ -183,7 +183,7 @@ loop:
 // A word-aligned memmove will be faster for small objects than libc's or gcc's.
 // Remember, the two regions *might* overlap, but: to <= from.
 STATIC_INLINE void
-move(StgPtr to, StgPtr from, W_ size)
+move(StgPtr to, StgPtr from, StgWord size)
 {
     for(; size > 0; --size) {
 	*to++ = *from++;
@@ -225,7 +225,7 @@ thread_static( StgClosure* p )
 }
 
 STATIC_INLINE void
-thread_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, W_ size )
+thread_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, StgWord size )
 {
     W_ i, b;
     StgWord bitmap;
@@ -252,7 +252,7 @@ thread_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
 {
     StgPtr p;
     StgWord bitmap;
-    W_ size;
+    StgWord size;
 
     p = (StgPtr)args;
     switch (fun_info->f.fun_type) {
@@ -287,7 +287,7 @@ thread_stack(StgPtr p, StgPtr stack_end)
 {
     const StgRetInfoTable* info;
     StgWord bitmap;
-    W_ size;
+    StgWord size;
     
     // highly similar to scavenge_stack, but we do pointer threading here.
     
@@ -327,7 +327,6 @@ thread_stack(StgPtr p, StgPtr stack_end)
 
 	case RET_BCO: {
 	    StgBCO *bco;
-	    nat size;
 	    
 	    p++;
 	    bco = (StgBCO *)*p;
@@ -773,7 +772,7 @@ update_fwd_compact( bdescr *blocks )
 #endif
     bdescr *bd, *free_bd;
     StgInfoTable *info;
-    nat size;
+    StgWord size;
     StgWord iptr;
 
     bd = blocks;
@@ -858,7 +857,8 @@ update_bkwd_compact( generation *gen )
 #endif
     bdescr *bd, *free_bd;
     StgInfoTable *info;
-    W_ size, free_blocks;
+    StgWord size;
+    W_ free_blocks;
     StgWord iptr;
 
     bd = free_bd = gen->old_blocks;
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index c35444b..5cf4cfa 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -32,7 +32,7 @@ static void scavenge_stack (StgPtr p, StgPtr stack_end);
 
 static void scavenge_large_bitmap (StgPtr p, 
 				   StgLargeBitmap *large_bitmap, 
-				   nat size );
+				   StgWord size );
 
 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
 # define evacuate(a) evacuate1(a)
@@ -178,7 +178,7 @@ scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
 {
     StgPtr p;
     StgWord bitmap;
-    nat size;
+    StgWord size;
 
     p = (StgPtr)args;
     switch (fun_info->f.fun_type) {
@@ -1498,7 +1498,7 @@ scavenge_one(StgPtr p)
       { 
 	StgPtr start = gen->scan;
 	bdescr *start_bd = gen->scan_bd;
-	nat size = 0;
+	StgWord size = 0;
 	scavenge(&gen);
 	if (start_bd != gen->scan_bd) {
 	  size += (P_)BLOCK_ROUND_UP(start) - start;
@@ -1745,7 +1745,7 @@ scavenge_static(void)
    -------------------------------------------------------------------------- */
 
 static void
-scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
+scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, StgWord size )
 {
     nat i, j, b;
     StgWord bitmap;
@@ -1766,7 +1766,7 @@ scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
 }
 
 STATIC_INLINE StgPtr
-scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
+scavenge_small_bitmap (StgPtr p, StgWord size, StgWord bitmap)
 {
     while (size > 0) {
 	if ((bitmap & 1) == 0) {
@@ -1790,7 +1790,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
 {
   const StgRetInfoTable* info;
   StgWord bitmap;
-  nat size;
+  StgWord size;
 
   /* 
    * Each time around this loop, we are looking at a chunk of stack
@@ -1874,7 +1874,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
 
     case RET_BCO: {
 	StgBCO *bco;
-	nat size;
+	StgWord size;
 
 	p++;
 	evacuate((StgClosure **)p);
@@ -1889,7 +1889,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
       // large bitmap (> 32 entries, or > 64 on a 64-bit machine) 
     case RET_BIG:
     {
-	nat size;
+	StgWord size;
 
 	size = GET_LARGE_BITMAP(&info->i)->size;
 	p++;



More information about the ghc-commits mailing list