nameIRegOrZR(is64, dd), nameIRegOrZR(is64, nn), shift);
return True;
}
- if (!is64 && immS >= 0 && immS <= 30
+ if (!is64 && immS <= 30
&& immR == immS + 1 && opc == BITS2(1,0)) {
// 32-bit shift left
UInt shift = 32 - immR;
vassert(0);
}
- vassert(ix >= 0 && ix <= 7);
+ vassert(ix <= 7);
putIReg64orZR(
dd,
UInt tabent;
for (tabent = 0; tabent <= len; tabent++) {
- vassert(tabent >= 0 && tabent < 4);
+ vassert(tabent < 4);
IRTemp bias = newTempV128();
assign(bias,
mkexpr(tabent == 0 ? allZero : allXX[tabent-1]));
if (bitU == 1 && (immh & 8) == 8 && opcode == BITS5(0,1,0,1,0)) {
/* -------- 1,1xxx,01010 SLI d_d_#imm -------- */
UInt sh = immhb - 64;
- vassert(sh >= 0 && sh < 64);
+ vassert(sh < 64);
if (sh == 0) {
putQReg128(dd, unop(Iop_ZeroHI64ofV128, getQReg128(nn)));
} else {
case BITS4(0,1,1,1): ks = 2; break;
default: vassert(0);
}
- vassert(ks >= 0 && ks <= 2);
+ vassert(ks <= 2);
UInt mm = 32; // invalid
UInt ix = 16; // invalid
switch (size) {
Bool isAcc = opcode == BITS5(0,0,1,1,0);
Bool ok = getLaneInfo_IMMH_IMMB(&shift, &size, immh, immb);
if (!ok || (bitQ == 0 && size == X11)) return False;
- vassert(size >= 0 && size <= 3);
+ vassert(size <= 3);
UInt lanebits = 8 << size;
vassert(shift >= 1 && shift <= lanebits);
IROp op = isU ? mkVecRSHU(size) : mkVecRSHS(size);
case BITS4(1,0,1,1): ks = 2; break;
default: vassert(0);
}
- vassert(ks >= 0 && ks <= 2);
+ vassert(ks <= 2);
if (size == X00 || size == X11) return False;
vassert(size <= 2);
IRTemp vecN, vecM, vecD, res, sat1q, sat1n, sat2q, sat2n;
case BITS4(0,1,1,1): ks = 2; break;
default: vassert(0);
}
- vassert(ks >= 0 && ks <= 2);
+ vassert(ks <= 2);
Bool is2 = bitQ == 1;
UInt mm = 32; // invalid
UInt ix = 16; // invalid
vassert(typeOfIRTemp(irsb->tyenv, t_dep1 == Ity_I32));
vassert(typeOfIRTemp(irsb->tyenv, t_dep2 == Ity_I32));
vassert(typeOfIRTemp(irsb->tyenv, t_ndep == Ity_I32));
- // strictly unsigned cc_op must always be >= 0, keeong for readability
+ // strictly unsigned cc_op must always be >= 0, keeping for readability
vassert(cc_op >= ARMG_CC_OP_COPY && cc_op < ARMG_CC_OP_NUMBER);
if (guardT == IRTemp_INVALID) {
/* unconditional */
else if ( inst == XVI8GER4SPP )
result[j] = clampS64toS32(sum + acc_word[j]);
+ // @todo PJF Coverity complains that if none of the abofe ifs are true
+ // then result gets used uninitialized
} else {
result[j] = 0;
}
addInstr(env, mk_iMOVsd_RR( hregAMD64_RSP(), r_vecRetAddr ));
}
- vassert(n_args >= 0 && n_args <= 6);
+ vassert(n_args <= 6);
for (i = 0; i < n_args; i++) {
IRExpr* arg = args[i];
if (UNLIKELY(arg->tag == Iex_GSPTR)) {
addInstr(env, AMD64Instr_Set64(cc, res));
return res;
+ // PJF old debug code? - unreachable
+ /*
ppIRExpr(e);
vpanic("iselCondCode_R(amd64)");
+ */
}
/* --------- Reg or imm-8x4 operands --------- */
static UInt ROR32 ( UInt x, UInt sh ) {
- vassert(sh >= 0 && sh < 32);
+ vassert(sh < 32);
if (sh == 0)
return x;
else
}
RRegLRState;
-/* v is always unsigned, wish we could static assert that */
+/* v and r are always unsigned, wish we could static assert that */
#define IS_VALID_VREGNO(v) ((v) < n_vregs)
-#define IS_VALID_RREGNO(r) ((r) >= 0 && (r) < n_rregs)
+#define IS_VALID_RREGNO(r) ((r) < n_rregs)
#define FREE_VREG(v) \
do { \
static void lookupIRTempPair ( HReg* vrHI, HReg* vrLO,
ISelEnv* env, IRTemp tmp )
{
- vassert(tmp >= 0);
vassert(tmp < env->n_vregmap);
vassert(! hregIsInvalid(env->vregmapMedLo[tmp]));
*vrLO = env->vregmapLo[tmp];
case S390_VEC_FLOAT_COMPARE_EQUAL:
return s390_emit_VFCE(buf, v1, v2, v3, s390_getM_from_size(size), 0, 0);
case S390_VEC_FLOAT_COMPARE_LESS_OR_EQUAL:
+ // PJF I assume that CHE is cmpare higher or equal so the order need swapping
+ // coverity[SWAPPED_ARGUMENTS:FALSE]
return s390_emit_VFCHE(buf, v1, v3, v2, s390_getM_from_size(size), 0, 0);
case S390_VEC_FLOAT_COMPARE_LESS:
+ // PJF as above but this time compare higher
+ // coverity[SWAPPED_ARGUMENTS:FALSE]
return s390_emit_VFCH(buf, v1, v3, v2, s390_getM_from_size(size), 0, 0);
default:
static HReg lookupIRTemp ( ISelEnv* env, IRTemp tmp )
{
- vassert(tmp >= 0);
vassert(tmp < env->n_vregmap);
return env->vregmap[tmp];
}
if (from) {
/* Prohibit corruption by array overrun */
- CLG_ASSERT((0 <= jmp) && (jmp <= from->bb->cjmp_count));
+ CLG_ASSERT(jmp <= from->bb->cjmp_count);
jcc->next_from = from->jmp[jmp].jcc_list;
from->jmp[jmp].jcc_list = jcc;
}
DiOffT img_szB = ML_(img_size)(img);
DiOffT curr_off = 0;
while (1) {
- vg_assert(curr_off >= 0 && curr_off <= img_szB);
+ vg_assert(curr_off <= img_szB);
if (curr_off == img_szB) break;
DiOffT avail = img_szB - curr_off;
vg_assert(avail > 0 && avail <= img_szB);
{
UInt cfsi_m_ix;
- vg_assert(pos >= 0 && pos < di->cfsi_used);
+ vg_assert(pos < di->cfsi_used);
switch (di->sizeof_cfsi_m_ix) {
case 1: cfsi_m_ix = ((UChar*) di->cfsi_m_ix)[pos]; break;
case 2: cfsi_m_ix = ((UShort*) di->cfsi_m_ix)[pos]; break;
SizeT new_size;
ExeContext** new_ec_htab;
- vg_assert(ec_htab_size_idx >= 0 && ec_htab_size_idx < N_EC_PRIMES);
+ vg_assert(ec_htab_size_idx < N_EC_PRIMES);
if (ec_htab_size_idx == N_EC_PRIMES-1)
return; /* out of primes - can't resize further */
//--------------------------------------------------------------
VG_(debugLog)(1, "main", "Initialise scheduler (phase 1)\n");
tid_main = VG_(scheduler_init_phase1)();
- vg_assert(tid_main >= 0 && tid_main < VG_N_THREADS
+ vg_assert(tid_main < VG_N_THREADS
&& tid_main != VG_INVALID_THREADID);
/* Tell the tool about tid_main */
VG_TRACK( pre_thread_ll_create, VG_INVALID_THREADID, tid_main );
Superblock * sb;
SizeT pos = min + (max - min)/2;
- vg_assert(pos >= 0 && pos < a->sblocks_used);
+ vg_assert(pos < a->sblocks_used);
sb = a->sblocks[pos];
if ((Block*)&sb->payload_bytes[0] <= b
&& b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
{
vki_sigset_t savedmask;
- vg_assert(tid >= 0 && tid < VG_N_THREADS);
+ vg_assert(tid < VG_N_THREADS);
VG_(cleanup_thread)(&VG_(threads)[tid].arch);
VG_(threads)[tid].tid = tid;
Bool havePatt, haveInput;
const HChar *currPatt, *currInput;
tailcall:
- vg_assert(nPatt >= 0 && nPatt < 1000000); /* arbitrary */
- vg_assert(inputCompleter
- || (nInput >= 0 && nInput < 1000000)); /* arbitrary */
- vg_assert(ixPatt >= 0 && ixPatt <= nPatt);
- vg_assert(ixInput >= 0 && (inputCompleter || ixInput <= nInput));
+ vg_assert(nPatt < 1000000); /* arbitrary */
+ vg_assert(inputCompleter || (nInput < 1000000)); /* arbitrary */
+ vg_assert(ixPatt <= nPatt);
+ vg_assert(inputCompleter || ixInput <= nInput);
havePatt = ixPatt < nPatt;
haveInput = inputCompleter ?
ThreadState *VG_(get_ThreadState)(ThreadId tid)
{
- vg_assert(tid >= 0 && tid < VG_N_THREADS);
+ vg_assert(tid < VG_N_THREADS);
vg_assert(VG_(threads)[tid].tid == tid);
return &VG_(threads)[tid];
}
tres = LibVEX_Translate ( &vta );
vg_assert(tres.status == VexTransOK);
- vg_assert(tres.n_sc_extents >= 0 && tres.n_sc_extents <= 3);
+ vg_assert(tres.n_sc_extents <= 3);
vg_assert(tmpbuf_used <= N_TMPBUF);
vg_assert(tmpbuf_used > 0);
// Point an htt entry to the tt slot
HTTno htti = HASH_TT(entry);
- vg_assert(htti >= 0 && htti < N_HTTES_PER_SECTOR);
+ vg_assert(htti < N_HTTES_PER_SECTOR);
while (True) {
if (sectors[y].htt[htti] == HTT_EMPTY
|| sectors[y].htt[htti] == HTT_DELETED)
all sectors and avoids multiple expensive % operations. */
n_full_lookups++;
kstart = HASH_TT(guest_addr);
- vg_assert(kstart >= 0 && kstart < N_HTTES_PER_SECTOR);
+ vg_assert(kstart < N_HTTES_PER_SECTOR);
/* Search in all the sectors,using sector_search_order[] as a
heuristic guide as to what order to visit the sectors. */
links = NULL;
while (VG_(nextIterFM)( laog, NULL, (UWord*)&links )) {
tl_assert(links);
- tl_assert(links->inns >= 0 && links->inns < univ_laog_cardinality);
+ tl_assert(links->inns < univ_laog_cardinality);
univ_laog_seen[links->inns] = True;
- tl_assert(links->outs >= 0 && links->outs < univ_laog_cardinality);
+ tl_assert(links->outs < univ_laog_cardinality);
univ_laog_seen[links->outs] = True;
links = NULL;
}
WordVec* wv;
tl_assert(wsu);
wv = do_ix2vec( wsu, ws );
- tl_assert(wv->size >= 0);
return wv->size;
}
if (HG_DEBUG) VG_(printf)("getPayloadWS %s %d\n", wsu->cc, (Int)ws);
tl_assert(wsu);
wv = do_ix2vec( wsu, ws );
- tl_assert(wv->size >= 0);
*nWords = wv->size;
*words = wv->words;
}
if (0)
VG_(printf)("scache wback line %d\n", (Int)wix);
- tl_assert(wix >= 0 && wix < N_WAY_NENT);
+ tl_assert(wix < N_WAY_NENT);
tag = cache_shmem.tags0[wix];
cl = &cache_shmem.lyns0[wix];
if (0)
VG_(printf)("scache fetch line %d\n", (Int)wix);
- tl_assert(wix >= 0 && wix < N_WAY_NENT);
+ tl_assert(wix < N_WAY_NENT);
tag = cache_shmem.tags0[wix];
cl = &cache_shmem.lyns0[wix];
tl_assert(acc_thr);
tl_assert(acc_thr->hgthread);
tl_assert(acc_thr->hgthread->hbthr == acc_thr);
- tl_assert(HG_(clo_history_level) >= 0 && HG_(clo_history_level) <= 2);
+ tl_assert(HG_(clo_history_level) <= 2);
if (HG_(clo_history_level) == 1) {
Bool found;
void zsm_swrite64 ( Addr a, SVal svNew ) {
CacheLine* cl;
UWord cloff, tno;
+ SizeT i;
//UWord toff;
stats__cline_swrite64s++;
if (UNLIKELY(!aligned64(a))) goto slowcase;
stats__nia_cache_queries++;
i = nia % N_NIA_TO_ECU_CACHE;
- tl_assert(i >= 0 && i < N_NIA_TO_ECU_CACHE);
+ tl_assert(i < N_NIA_TO_ECU_CACHE);
if (LIKELY( nia_to_ecu_cache[i].nia0 == nia ))
return nia_to_ecu_cache[i].ecu0;