The shifts are protected by tests so there was no risk.
op == Iop_Sar32 ? binop(op, mkexpr(sVs[i]), mkU8(size-1))
: size == 32 ? mkU32(0) : mkU64(0)
));
+ } else {
+ res[i] = IRTemp_INVALID;
}
switch (size) {
case 32:
vassert(len >= 1 && len <= 6);
ULong levels = // (zeroes(6 - len) << (6-len)) | ones(len);
- (1 << len) - 1;
+ (1U << len) - 1;
vassert(levels >= 1 && levels <= 63);
if (immediate && ((imms & levels) == levels)) {
case 0:
testimm8 = False; imm64 = Replicate32x2(imm8); break;
case 1:
- testimm8 = True; imm64 = Replicate32x2(imm8 << 8); break;
+ testimm8 = True; imm64 = Replicate32x2(imm8 << 8UL); break;
case 2:
- testimm8 = True; imm64 = Replicate32x2(imm8 << 16); break;
+ testimm8 = True; imm64 = Replicate32x2(imm8 << 16UL); break;
case 3:
- testimm8 = True; imm64 = Replicate32x2(imm8 << 24); break;
+ testimm8 = True; imm64 = Replicate32x2(imm8 << 24UL); break;
case 4:
testimm8 = False; imm64 = Replicate16x4(imm8); break;
case 5:
HChar* HG_(strdup) ( const HChar* cc, const HChar* s );
static inline Bool HG_(is_sane_ThreadId) ( ThreadId coretid ) {
- return coretid >= 0 && coretid < VG_N_THREADS;
+ return coretid < VG_N_THREADS;
}
# else
// On other platforms, just skip one Addr.
lc_sig_skipped_szB += sizeof(Addr);
+ // PJF asserts are always on
+ // coverity[ASSERT_SIDE_EFFECT:FALSE]
tl_assert(bad_scanned_addr >= VG_ROUNDUP(start, sizeof(Addr)));
+ // coverity[ASSERT_SIDE_EFFECT:FALSE]
tl_assert(bad_scanned_addr < VG_ROUNDDN(start+len, sizeof(Addr)));
ptr = bad_scanned_addr + sizeof(Addr); // Unaddressable, - skip it.
#endif