]> sourceware.org Git - glibc.git/commitdiff
nptl: Add EOVERFLOW checks for futex calls
authorAdhemerval Zanella <adhemerval.zanella@linaro.org>
Thu, 26 Nov 2020 13:54:04 +0000 (10:54 -0300)
committerAdhemerval Zanella <adhemerval.zanella@linaro.org>
Fri, 27 Nov 2020 12:59:13 +0000 (09:59 -0300)
Some futex-internal calls require additional check for EOVERFLOW (as
indicated by [1] [2] [3]).  For both mutex and rwlock code, EOVERFLOW is
handle as ETIMEDOUT; since it indicate to the caller that the blocking
operation could not be issued.

For mutex it avoids a possible issue where PTHREAD_MUTEX_ROBUST_* might
assume EOVERFLOW indicate futex has succeed, and for PTHREAD_MUTEX_PP_*
it avoid a potential busy infinite loop.  For rwlock and semaphores, it
also avoids potential busy infinite loops.

Checked on x86_64-linux-gnu and i686-linux-gnu, although EOVERFLOW
won't be possible with current usage (since all timeouts on 32-bit
architectures with 32-bit time_t support will be in the range of
32-bit time_t).

[1] https://sourceware.org/pipermail/libc-alpha/2020-November/120079.html
[2] https://sourceware.org/pipermail/libc-alpha/2020-November/120080.html
[3] https://sourceware.org/pipermail/libc-alpha/2020-November/120127.html

nptl/pthread_cond_wait.c
nptl/pthread_mutex_timedlock.c
nptl/pthread_rwlock_common.c
nptl/sem_waitcommon.c

index 685dbca32f8b8510b0a681f29393cac0b467b295..02d11c61dbc0e5fccb5554c3c4b59caa7c98eb5a 100644 (file)
@@ -506,7 +506,7 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
 
          __pthread_cleanup_pop (&buffer, 0);
 
-         if (__glibc_unlikely (err == ETIMEDOUT))
+         if (__glibc_unlikely (err == ETIMEDOUT || err == EOVERFLOW))
            {
              __condvar_dec_grefs (cond, g, private);
              /* If we timed out, we effectively cancel waiting.  Note that
@@ -515,7 +515,7 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
                 __condvar_quiesce_and_switch_g1 and us trying to acquire
                 the lock during cancellation is not possible.  */
              __condvar_cancel_waiting (cond, seq, g, private);
-             result = ETIMEDOUT;
+             result = err;
              goto done;
            }
          else
index 74adffe790dad4c10902e36ecfdd73b0c641ee70..6c72a36b2b8ca60db492f7946473b78550ee5abd 100644 (file)
@@ -270,7 +270,7 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
              oldval, clockid, abstime,
              PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
          /* The futex call timed out.  */
-         if (err == ETIMEDOUT)
+         if (err == ETIMEDOUT || err == EOVERFLOW)
            return err;
          /* Reload current lock value.  */
          oldval = mutex->__data.__lock;
@@ -550,8 +550,8 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
                    int e = __futex_abstimed_wait64 (
                      (unsigned int *) &mutex->__data.__lock, ceilval | 2,
                      clockid, abstime, PTHREAD_MUTEX_PSHARED (mutex));
-                   if (e == ETIMEDOUT)
-                     return ETIMEDOUT;
+                   if (e == ETIMEDOUT || e == EOVERFLOW)
+                     return e;
                  }
              }
            while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
index 4c9f582d3daefec6648b26dc8bcda711fbdcc452..9ef432c4740d0a496e3fb2938b0ce486c2684a55 100644 (file)
@@ -334,7 +334,7 @@ __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
                                                     private);
                  /* We ignore EAGAIN and EINTR.  On time-outs, we can just
                     return because we don't need to clean up anything.  */
-                 if (err == ETIMEDOUT)
+                 if (err == ETIMEDOUT || err == EOVERFLOW)
                    return err;
                }
              /* It makes sense to not break out of the outer loop here
@@ -460,7 +460,7 @@ __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
          int err = __futex_abstimed_wait64 (&rwlock->__data.__wrphase_futex,
                                             1 | PTHREAD_RWLOCK_FUTEX_USED,
                                             clockid, abstime, private);
-         if (err == ETIMEDOUT)
+         if (err == ETIMEDOUT || err == EOVERFLOW)
            {
              /* If we timed out, we need to unregister.  If no read phase
                 has been installed while we waited, we can just decrement
@@ -479,7 +479,7 @@ __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
                  if (atomic_compare_exchange_weak_relaxed
                      (&rwlock->__data.__readers, &r,
                       r - (1 << PTHREAD_RWLOCK_READER_SHIFT)))
-                   return ETIMEDOUT;
+                   return err;
                  /* TODO Back-off.  */
                }
              /* Use the acquire MO fence to mirror the steps taken in the
@@ -730,7 +730,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
          int err = __futex_abstimed_wait64 (&rwlock->__data.__writers_futex,
                                             1 | PTHREAD_RWLOCK_FUTEX_USED,
                                             clockid, abstime, private);
-         if (err == ETIMEDOUT)
+         if (err == ETIMEDOUT || err == EOVERFLOW)
            {
              if (prefer_writer)
                {
@@ -758,7 +758,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
                }
              /* We cleaned up and cannot have stolen another waiting writer's
                 futex wake-up, so just return.  */
-             return ETIMEDOUT;
+             return err;
            }
          /* If we got interrupted (EINTR) or the futex word does not have the
             expected value (EAGAIN), retry after reloading __readers.  */
@@ -829,7 +829,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
          int err = __futex_abstimed_wait64 (&rwlock->__data.__wrphase_futex,
                                             PTHREAD_RWLOCK_FUTEX_USED,
                                             clockid, abstime, private);
-         if (err == ETIMEDOUT)
+         if (err == ETIMEDOUT || err == EOVERFLOW)
            {
              if (rwlock->__data.__flags != PTHREAD_RWLOCK_PREFER_READER_NP)
                {
@@ -861,7 +861,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
                              if ((wf & PTHREAD_RWLOCK_FUTEX_USED) != 0)
                                futex_wake (&rwlock->__data.__writers_futex,
                                            1, private);
-                             return ETIMEDOUT;
+                             return err;
                            }
                          /* TODO Back-off.  */
                        }
index 6dd4eb97cb592343e1cc651b3413e74af6561239..0ac1f139bd49f16be971709c0d68393d9b1338e2 100644 (file)
@@ -191,7 +191,7 @@ __new_sem_wait_slow64 (struct new_sem *sem, clockid_t clockid,
             documentation.  Before Linux 2.6.22, EINTR was also returned on
             spurious wake-ups; we only support more recent Linux versions,
             so do not need to consider this here.)  */
-         if (err == ETIMEDOUT || err == EINTR)
+         if (err == ETIMEDOUT || err == EINTR || err == EOVERFLOW)
            {
              __set_errno (err);
              err = -1;
This page took 0.054109 seconds and 5 git commands to generate.