This is the mail archive of the glibc-cvs@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

GNU C Library master sources branch master updated. glibc-2.20-556-g22971c3


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "GNU C Library master sources".

The branch, master has been updated
       via  22971c35e2de34ec3e1b02e9bceebcba2ead7bfe (commit)
      from  2ec2d7032ff9220da1577c37d41ae85c0721ad66 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=22971c35e2de34ec3e1b02e9bceebcba2ead7bfe

commit 22971c35e2de34ec3e1b02e9bceebcba2ead7bfe
Author: H.J. Lu <hjl.tools@gmail.com>
Date:   Fri Jan 23 14:48:40 2015 -0800

    Use uint64_t and (uint64_t) 1 for 64-bit int
    
    This patch replaces unsigned long int and 1UL with uint64_t and
    (uint64_t) 1 to support ILP32 targets like x32.
    
    	[BZ #17870]
    	* nptl/sem_post.c (__new_sem_post): Replace unsigned long int
    	with uint64_t.
    	* nptl/sem_waitcommon.c (__sem_wait_cleanup): Replace 1UL with
    	(uint64_t) 1.
    	(__new_sem_wait_slow): Replace unsigned long int with uint64_t.
    	Replace 1UL with (uint64_t) 1.
    	* sysdeps/nptl/internaltypes.h (new_sem): Replace unsigned long
    	int with uint64_t.

diff --git a/ChangeLog b/ChangeLog
index 051a7c4..a59266f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,15 @@
+2015-01-23  H.J. Lu  <hongjiu.lu@intel.com>
+
+	[BZ #17870]
+	* nptl/sem_post.c (__new_sem_post): Replace unsigned long int
+	with uint64_t.
+	* nptl/sem_waitcommon.c (__sem_wait_cleanup): Replace 1UL with
+	(uint64_t) 1.
+	(__new_sem_wait_slow): Replace unsigned long int with uint64_t.
+	Replace 1UL with (uint64_t) 1.
+	* sysdeps/nptl/internaltypes.h (new_sem): Replace unsigned long
+	int with uint64_t.
+
 2015-01-23  Roland McGrath  <roland@hack.frob.com>
 
 	* inet/if_index.c (if_nameindex): Add missing libc_hidden_weak.
diff --git a/NEWS b/NEWS
index fd6da90..0ce4352 100644
--- a/NEWS
+++ b/NEWS
@@ -18,7 +18,7 @@ Version 2.21
   17664, 17665, 17668, 17682, 17702, 17717, 17719, 17722, 17723, 17724,
   17725, 17732, 17733, 17744, 17745, 17746, 17747, 17748, 17775, 17777,
   17780, 17781, 17782, 17791, 17793, 17796, 17797, 17803, 17806, 17834,
-  17844, 17848
+  17844, 17848, 17870
 
 * A new semaphore algorithm has been implemented in generic C code for all
   machines. Previous custom assembly implementations of semaphore were
diff --git a/nptl/sem_post.c b/nptl/sem_post.c
index 9162e4c..6e495ed 100644
--- a/nptl/sem_post.c
+++ b/nptl/sem_post.c
@@ -65,7 +65,7 @@ __new_sem_post (sem_t *sem)
      added tokens before (the release sequence includes atomic RMW operations
      by other threads).  */
   /* TODO Use atomic_fetch_add to make it scale better than a CAS loop?  */
-  unsigned long int d = atomic_load_relaxed (&isem->data);
+  uint64_t d = atomic_load_relaxed (&isem->data);
   do
     {
       if ((d & SEM_VALUE_MASK) == SEM_VALUE_MAX)
diff --git a/nptl/sem_waitcommon.c b/nptl/sem_waitcommon.c
index 96848d7..c60daa3 100644
--- a/nptl/sem_waitcommon.c
+++ b/nptl/sem_waitcommon.c
@@ -187,7 +187,7 @@ __sem_wait_cleanup (void *arg)
 
 #if __HAVE_64B_ATOMICS
   /* Stop being registered as a waiter.  See below for MO.  */
-  atomic_fetch_add_relaxed (&sem->data, -(1UL << SEM_NWAITERS_SHIFT));
+  atomic_fetch_add_relaxed (&sem->data, -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
 #else
   __sem_wait_32_finish (sem);
 #endif
@@ -263,8 +263,8 @@ __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
 #if __HAVE_64B_ATOMICS
   /* Add a waiter.  Relaxed MO is sufficient because we can rely on the
      ordering provided by the RMW operations we use.  */
-  unsigned long d = atomic_fetch_add_relaxed (&sem->data,
-      1UL << SEM_NWAITERS_SHIFT);
+  uint64_t d = atomic_fetch_add_relaxed (&sem->data,
+      (uint64_t) 1 << SEM_NWAITERS_SHIFT);
 
   pthread_cleanup_push (__sem_wait_cleanup, sem);
 
@@ -304,7 +304,7 @@ __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
 	      err = -1;
 	      /* Stop being registered as a waiter.  */
 	      atomic_fetch_add_relaxed (&sem->data,
-		  -(1UL << SEM_NWAITERS_SHIFT));
+		  -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
 	      break;
 	    }
 	  /* Relaxed MO is sufficient; see below.  */
@@ -320,7 +320,7 @@ __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
 	     up-to-date value; the futex_wait or the CAS perform the real
 	     work.  */
 	  if (atomic_compare_exchange_weak_acquire (&sem->data,
-	      &d, d - 1 - (1UL << SEM_NWAITERS_SHIFT)))
+	      &d, d - 1 - ((uint64_t) 1 << SEM_NWAITERS_SHIFT)))
 	    {
 	      err = 0;
 	      break;
diff --git a/sysdeps/nptl/internaltypes.h b/sysdeps/nptl/internaltypes.h
index 7c0d240..8f5cfa4 100644
--- a/sysdeps/nptl/internaltypes.h
+++ b/sysdeps/nptl/internaltypes.h
@@ -155,7 +155,7 @@ struct new_sem
 # endif
 # define SEM_NWAITERS_SHIFT 32
 # define SEM_VALUE_MASK (~(unsigned int)0)
-  unsigned long int data;
+  uint64_t data;
   int private;
   int pad;
 #else

-----------------------------------------------------------------------

Summary of changes:
 ChangeLog                    |   12 ++++++++++++
 NEWS                         |    2 +-
 nptl/sem_post.c              |    2 +-
 nptl/sem_waitcommon.c        |   10 +++++-----
 sysdeps/nptl/internaltypes.h |    2 +-
 5 files changed, 20 insertions(+), 8 deletions(-)


hooks/post-receive
-- 
GNU C Library master sources


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]