PATCH: Cast to int32 first when casting pointer to int64
H.J. Lu
hjl.tools@gmail.com
Wed Nov 21 23:07:00 GMT 2012
On Wed, Nov 21, 2012 at 9:27 AM, Joseph S. Myers
<joseph@codesourcery.com> wrote:
> On Wed, 21 Nov 2012, H.J. Lu wrote:
>
>> > I'm seeing large numbers of "cast from pointer to integer of different
>> > size" warnings on x86 builds, around uses of these macros. I suspect this
>> > patch is responsible - if code, even in part of an "if" statement that
>> > won't be executed, contains a pointer-to-integer cast, you need to cast to
>> > a pointer of the same size as the integer, and possibly then from there to
>> > a type of the desired final width.
>> >
>>
>> This is a known bug in GCC and there is no workaround.
>
> It's not at all clear it's a bug, and it should certainly be possible to
> work around it using the same sort of trick as in
> tgmath.h:__tgmath_real_type_sub to create a typedef that is uint64_t when
> the argument is 64-bit and uint32_t otherwise.
>
Thanks for the pointer. I copied __tgmath_real_type_sub although
I don't quite understand how it works. Here is the patch I tested
on x32 an ia32. There are no differences in generated codes. It
eliminated all "cast from pointer to integer of different size" warnings.
OK to install?
Thanks.
--
H.J.
---
2012-11-21 H.J. Lu <hongjiu.lu@intel.com>
* elf/dl-load.c (_dl_map_object_from_fd): Cast to uintptr_t
before casting to void *.
* include/sys/types.h (__pointer_type): New macro.
(__real_type_sub): Likewise.
(__real_type): Likewise.
* sysdeps/x86_64/bits/atomic.h
(__arch_c_compare_and_exchange_val_64_acq): Cast to __real_type
before casting to atomic64_t.
(atomic_exchange_acq): Likewise.
(__arch_exchange_and_add_body): Likewise.
(__arch_add_body): Likewise.
(atomic_add_negative): Likewise.
(atomic_add_zero): Likewise.
nptl/
2012-11-21 H.J. Lu <hongjiu.lu@intel.com>
* unwind.c (__pthread_unwind): Pass address of unwind_cleanup
to THREAD_SETMEM.
* sysdeps/i386/tls.h (THREAD_SETMEM): Cast to __real_type before
casting to uint64_t.
(THREAD_SETMEM_NC): Likewise.
* sysdeps/x86_64/tls.h (THREAD_SETMEM): Likewise.
(THREAD_SETMEM_NC): Likewise.
diff --git a/elf/dl-load.c b/elf/dl-load.c
index 7bf0c12..702e3c6 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -1351,7 +1351,8 @@ cannot allocate TLS data structures for initial thread");
&& ((size_t) (c->mapend - c->mapstart + c->mapoff)
>= header->e_phoff + header->e_phnum * sizeof (ElfW(Phdr))))
/* Found the program header in this segment. */
- l->l_phdr = (void *) (c->mapstart + header->e_phoff - c->mapoff);
+ l->l_phdr = (void *) (uintptr_t) (c->mapstart + header->e_phoff
+ - c->mapoff);
if (c->allocend > c->dataend)
{
diff --git a/include/sys/types.h b/include/sys/types.h
index 716732f..4430419 100644
--- a/include/sys/types.h
+++ b/include/sys/types.h
@@ -1 +1,14 @@
#include <posix/sys/types.h>
+
+/* 1 if 'type' is a pointer type, 0 otherwise. */
+#define __pointer_type(type) (__builtin_classify_type ((type) 0) == 5)
+
+/* The real type for T, where P is 1 if T is a pointer. */
+#define __real_type_sub(T, P) \
+ __typeof__ (*(0 ? (__typeof__ (0 ? (T *) 0 : (void *) (P))) 0 \
+ : (__typeof__ (0 ? (__intptr_t *) 0 : (void *) (!(P)))) 0))
+
+/* The real type for EXPR. */
+#define __real_type(expr) \
+ __real_type_sub(__typeof__ ((__typeof__ (expr)) 0), \
+ __pointer_type (__typeof__ (expr)))
diff --git a/nptl/sysdeps/i386/tls.h b/nptl/sysdeps/i386/tls.h
index 65497cf..82032ab 100644
--- a/nptl/sysdeps/i386/tls.h
+++ b/nptl/sysdeps/i386/tls.h
@@ -343,7 +343,7 @@ union user_desc_init
\
asm volatile ("movl %%eax,%%gs:%P1\n\t" \
"movl %%edx,%%gs:%P2" : \
- : "A" ((uint64_t) (value)), \
+ : "A" ((uint64_t) (__real_type (value)) (value)), \
"i" (offsetof (struct pthread, member)), \
"i" (offsetof (struct pthread, member) + 4)); \
}})
@@ -370,7 +370,7 @@ union user_desc_init
\
asm volatile ("movl %%eax,%%gs:%P1(,%2,8)\n\t" \
"movl %%edx,%%gs:4+%P1(,%2,8)" : \
- : "A" ((uint64_t) (value)), \
+ : "A" ((uint64_t) (__real_type (value)) (value)), \
"i" (offsetof (struct pthread, member)), \
"r" (idx)); \
}})
diff --git a/nptl/sysdeps/x86_64/tls.h b/nptl/sysdeps/x86_64/tls.h
index f838916..7fa5646 100644
--- a/nptl/sysdeps/x86_64/tls.h
+++ b/nptl/sysdeps/x86_64/tls.h
@@ -256,7 +256,7 @@ typedef struct
abort (); \
\
asm volatile ("movq %q0,%%fs:%P1" : \
- : IMM_MODE ((uint64_t) value), \
+ : IMM_MODE ((uint64_t) (__real_type (value)) value), \
"i" (offsetof (struct pthread, member))); \
}})
@@ -281,7 +281,7 @@ typedef struct
abort (); \
\
asm volatile ("movq %q0,%%fs:%P1(,%q2,8)" : \
- : IMM_MODE ((uint64_t) value), \
+ : IMM_MODE ((uint64_t) (__real_type (value)) value), \
"i" (offsetof (struct pthread, member[0])), \
"r" (idx)); \
}})
diff --git a/nptl/unwind.c b/nptl/unwind.c
index 7ccb213..aedd037 100644
--- a/nptl/unwind.c
+++ b/nptl/unwind.c
@@ -124,7 +124,7 @@ __pthread_unwind (__pthread_unwind_buf_t *buf)
/* This is not a catchable exception, so don't provide any details about
the exception type. We do need to initialize the field though. */
THREAD_SETMEM (self, exc.exception_class, 0);
- THREAD_SETMEM (self, exc.exception_cleanup, unwind_cleanup);
+ THREAD_SETMEM (self, exc.exception_cleanup, &unwind_cleanup);
_Unwind_ForcedUnwind (&self->exc, unwind_stop, ibuf);
#else
diff --git a/sysdeps/x86_64/bits/atomic.h b/sysdeps/x86_64/bits/atomic.h
index 100943f..f59a258 100644
--- a/sysdeps/x86_64/bits/atomic.h
+++ b/sysdeps/x86_64/bits/atomic.h
@@ -101,8 +101,9 @@ typedef uintmax_t uatomic_max_t;
"lock\n" \
"0:\tcmpxchgq %q2, %1" \
: "=a" (ret), "=m" (*mem) \
- : "q" ((atomic64_t) (newval)), "m" (*mem), \
- "0" ((atomic64_t) (oldval)), \
+ : "q" ((atomic64_t) (__real_type (newval)) (newval)), \
+ "m" (*mem), \
+ "0" ((atomic64_t) (__real_type (oldval)) (oldval)), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
ret; })
@@ -125,7 +126,8 @@ typedef uintmax_t uatomic_max_t;
else \
__asm __volatile ("xchgq %q0, %1" \
: "=r" (result), "=m" (*mem) \
- : "0" ((atomic64_t) (newvalue)), "m" (*mem)); \
+ : "0" ((atomic64_t) (__real_type (newvalue)) (newvalue)), \
+ "m" (*mem)); \
result; })
@@ -149,7 +151,8 @@ typedef uintmax_t uatomic_max_t;
else \
__asm __volatile (lock "xaddq %q0, %1" \
: "=r" (result), "=m" (*mem) \
- : "0" ((atomic64_t) (value)), "m" (*mem), \
+ : "0" ((atomic64_t) (__real_type (value)) (value)), \
+ "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
result; })
@@ -187,7 +190,8 @@ typedef uintmax_t uatomic_max_t;
else \
__asm __volatile (lock "addq %q1, %0" \
: "=m" (*mem) \
- : "ir" ((atomic64_t) (value)), "m" (*mem), \
+ : "ir" ((atomic64_t) (__real_type (value)) (value)), \
+ "m" (*mem), \
"i" (offsetof (tcbhead_t, multiple_threads))); \
} while (0)
@@ -218,7 +222,8 @@ typedef uintmax_t uatomic_max_t;
else \
__asm __volatile (LOCK_PREFIX "addq %q2, %0; sets %1" \
: "=m" (*mem), "=qm" (__result) \
- : "ir" ((atomic64_t) (value)), "m" (*mem)); \
+ : "ir" ((atomic64_t) (__real_type (value)) (value)), \
+ "m" (*mem)); \
__result; })
@@ -239,7 +244,8 @@ typedef uintmax_t uatomic_max_t;
else \
__asm __volatile (LOCK_PREFIX "addq %q2, %0; setz %1" \
: "=m" (*mem), "=qm" (__result) \
- : "ir" ((atomic64_t) (value)), "m" (*mem)); \
+ : "ir" ((atomic64_t) (__real_type (value)) (value)), \
+ "m" (*mem)); \
__result; })
More information about the Libc-alpha
mailing list