]> sourceware.org Git - valgrind.git/commitdiff
coverity: use some event tags and ensure some shift expression size promotion
authorPaul Floyd <pjfloyd@wanadoo.fr>
Sat, 30 Sep 2023 15:35:54 +0000 (17:35 +0200)
committerPaul Floyd <pjfloyd@wanadoo.fr>
Sat, 30 Sep 2023 15:35:54 +0000 (17:35 +0200)
The shifts are protected by tests so there was no risk.

VEX/priv/guest_amd64_toIR.c
VEX/priv/guest_arm64_toIR.c
helgrind/hg_basics.h
memcheck/mc_leakcheck.c

index 3d4780363df0680d787bc25d344603a6189aa4a6..78d80fe8dc4303bfd2ce924c770d9c4baf74176f 100644 (file)
@@ -23248,6 +23248,8 @@ static ULong dis_AVX_var_shiftV_byE ( const VexAbiInfo* vbi,
                     op == Iop_Sar32 ? binop(op, mkexpr(sVs[i]), mkU8(size-1))
                                     : size == 32 ? mkU32(0) : mkU64(0)
          ));
+      } else {
+         res[i] = IRTemp_INVALID;
       }
    switch (size) {
       case 32:
index a6f5272b9ca1321240c358caecfb7ce60d394518..e6aaf896c91ba7bf304dfe1dd856e20cbc000d11 100644 (file)
@@ -2385,7 +2385,7 @@ Bool dbm_DecodeBitMasks ( /*OUT*/ULong* wmask, /*OUT*/ULong* tmask,
 
    vassert(len >= 1 && len <= 6);
    ULong levels = // (zeroes(6 - len) << (6-len)) | ones(len);
-                  (1 << len) - 1;
+                  (1U << len) - 1;
    vassert(levels >= 1 && levels <= 63);
 
    if (immediate && ((imms & levels) == levels)) { 
@@ -8045,11 +8045,11 @@ static Bool AdvSIMDExpandImm ( /*OUT*/ULong* res,
       case 0:
          testimm8 = False; imm64 = Replicate32x2(imm8); break;
       case 1:
-         testimm8 = True; imm64 = Replicate32x2(imm8 << 8); break;
+         testimm8 = True; imm64 = Replicate32x2(imm8 << 8UL); break;
       case 2:
-         testimm8 = True; imm64 = Replicate32x2(imm8 << 16); break;
+         testimm8 = True; imm64 = Replicate32x2(imm8 << 16UL); break;
       case 3:
-         testimm8 = True; imm64 = Replicate32x2(imm8 << 24); break;
+         testimm8 = True; imm64 = Replicate32x2(imm8 << 24UL); break;
       case 4:
           testimm8 = False; imm64 = Replicate16x4(imm8); break;
       case 5:
index 1698fca151d071b59784ca67d0ce8b061383e7b6..534866b2ff91de25f7e1d72b1612a4a6ed4fa0a0 100644 (file)
@@ -42,7 +42,7 @@ void   HG_(free)   ( void* p );
 HChar* HG_(strdup) ( const HChar* cc, const HChar* s );
 
 static inline Bool HG_(is_sane_ThreadId) ( ThreadId coretid ) {
-   return coretid >= 0 && coretid < VG_N_THREADS;
+   return coretid < VG_N_THREADS;
 }
 
 
index b06e77f605c51befff42655e6e756bcf6a224a4b..83a2b74e2a1e5e6ed3e704998ea72577b5168f5b 100644 (file)
@@ -1123,7 +1123,10 @@ lc_scan_memory(Addr start, SizeT len, Bool is_prior_definite,
 #     else
       // On other platforms, just skip one Addr.
       lc_sig_skipped_szB += sizeof(Addr);
+      // PJF asserts are always on
+      // coverity[ASSERT_SIDE_EFFECT:FALSE]
       tl_assert(bad_scanned_addr >= VG_ROUNDUP(start, sizeof(Addr)));
+      // coverity[ASSERT_SIDE_EFFECT:FALSE]
       tl_assert(bad_scanned_addr < VG_ROUNDDN(start+len, sizeof(Addr)));
       ptr = bad_scanned_addr + sizeof(Addr); // Unaddressable, - skip it.
 #endif
This page took 0.068018 seconds and 5 git commands to generate.