static IRTemp math_PINSRW_128 ( IRTemp v128, IRTemp u16, UInt imm8 )
{
- vassert(imm8 >= 0 && imm8 <= 7);
+ vassert(imm8 <= 7);
// Create a V128 value which has the selected word in the
// specified lane, and zeroes everywhere else.
static IRTemp math_PINSRB_128 ( IRTemp v128, IRTemp u8, UInt imm8 )
{
- vassert(imm8 >= 0 && imm8 <= 15);
+ vassert(imm8 <= 15);
// Create a V128 value which has the selected byte in the
// specified lane, and zeroes everywhere else.
/* Be careful of these (1ULL << (S+1)) - 1 expressions, and the
same below with d. S can be 63 in which case we have an out of
range and hence undefined shift. */
- vassert(S >= 0 && S <= 63);
+ vassert(S <= 63);
vassert(esize >= (S+1));
ULong elem_s = // Zeroes(esize-(S+1)):Ones(S+1)
//(1ULL << (S+1)) - 1;
}
}
- if (is64 && immS >= 0 && immS <= 62
+ if (is64 && immS <= 62
&& immR == immS + 1 && opc == BITS2(1,0)) {
// 64-bit shift left
UInt shift = 64 - immR;
UInt sz = INSN(11,10);
UInt nn = INSN(9,5);
UInt dd = INSN(4,0);
- vassert(sz >= 0 && sz <= 3);
+ vassert(sz <= 3);
if ((bitSF == 0 && sz <= BITS2(1,0))
|| (bitSF == 1 && sz == BITS2(1,1))) {
UInt ix = (bitC == 1 ? 4 : 0) | sz;
return;
}
- vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2);
+ vassert(laneSzBlg2 <= 2);
IROp doubler = Iop_INVALID, halver = Iop_INVALID;
math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2);
return;
}
- vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2);
+ vassert(laneSzBlg2 <= 2);
IROp doubler = Iop_INVALID, halver = Iop_INVALID;
math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2);
return;
}
- vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2);
+ vassert(laneSzBlg2 <= 2);
IROp doubler = Iop_INVALID, halver = Iop_INVALID;
math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2);
return;
}
- vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2);
+ vassert(laneSzBlg2 <= 2);
IROp doubler = Iop_INVALID, halver = Iop_INVALID;
math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2);
return;
}
- vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2);
+ vassert(laneSzBlg2 <= 2);
IROp doubler = Iop_INVALID, halver = Iop_INVALID;
math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2);
return;
}
- vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2);
+ vassert(laneSzBlg2 <= 2);
IROp doubler = Iop_INVALID, halver = Iop_INVALID;
math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2);
static IRTemp math_TBL_TBX ( IRTemp tab[4], UInt len, IRTemp src,
IRTemp oor_values )
{
- vassert(len >= 0 && len <= 3);
+ vassert(len <= 3);
/* Generate some useful constants as concisely as possible. */
IRTemp half15 = newTemp(Ity_I64);
/* Saturation has occurred if any of the shifted-out bits are
different from the top bit of the original value. */
UInt rshift = laneBits - 1 - shift;
- vassert(rshift >= 0 && rshift < laneBits-1);
+ vassert(rshift < laneBits-1);
/* qDiff1 is the shifted out bits, and the top bit of the original
value, preceded by zeroes. */
assign(*qDiff1, binop(mkVecSHRN(size), mkexpr(src), mkU8(rshift)));
if (bitU == 0 && (immh & 8) == 8 && opcode == BITS5(0,1,0,1,0)) {
/* -------- 0,1xxx,01010 SHL d_d_#imm -------- */
UInt sh = immhb - 64;
- vassert(sh >= 0 && sh < 64);
+ vassert(sh < 64);
putQReg128(dd,
unop(Iop_ZeroHI64ofV128,
sh == 0 ? getQReg128(nn)
case BITS4(1,0,1,1): ks = 2; break;
default: vassert(0);
}
- vassert(ks >= 0 && ks <= 2);
+ vassert(ks <= 2);
if (size == X00 || size == X11) return False;
vassert(size <= 2);
IRTemp vecN, vecM, vecD, res, sat1q, sat1n, sat2q, sat2n;
Bool isAcc = opcode == BITS5(0,0,0,1,0);
Bool ok = getLaneInfo_IMMH_IMMB(&shift, &size, immh, immb);
if (!ok || (bitQ == 0 && size == X11)) return False;
- vassert(size >= 0 && size <= 3);
+ vassert(size <= 3);
UInt lanebits = 8 << size;
vassert(shift >= 1 && shift <= lanebits);
IROp op = isU ? mkVecSHRN(size) : mkVecSARN(size);
case BITS4(1,0,1,0): ks = 2; break;
default: vassert(0);
}
- vassert(ks >= 0 && ks <= 2);
+ vassert(ks <= 2);
if (size == X11) return False;
vassert(size <= 2);
Bool isU = bitU == 1;
case BITS4(0,1,1,0): ks = 2; break;
default: vassert(0);
}
- vassert(ks >= 0 && ks <= 2);
+ vassert(ks <= 2);
Bool isU = bitU == 1;
Bool is2 = bitQ == 1;
UInt mm = 32; // invalid
/* So, generate either an unconditional or a conditional write to
the reg. */
ASSERT_IS_THUMB;
- vassert(iregNo >= 0 && iregNo <= 14);
+ vassert(iregNo <= 14);
if (guardT == IRTemp_INVALID) {
/* unconditional write */
llPutIReg( iregNo, e );
vassert(typeOfIRTemp(irsb->tyenv, t_dep1 == Ity_I32));
vassert(typeOfIRTemp(irsb->tyenv, t_dep2 == Ity_I32));
vassert(typeOfIRTemp(irsb->tyenv, t_ndep == Ity_I32));
+ // strictly unsigned cc_op must always be >= 0, keeong for readability
vassert(cc_op >= ARMG_CC_OP_COPY && cc_op < ARMG_CC_OP_NUMBER);
if (guardT == IRTemp_INVALID) {
/* unconditional */
IRTemp tmp = newTemp(Ity_I32);
IRTemp res = newTemp(Ity_I32);
UInt mask = ((1 << wm1) - 1) + (1 << wm1);
- vassert(msb >= 0 && msb <= 31);
+ vassert(msb <= 31);
vassert(mask != 0); // guaranteed by msb being in 0 .. 31 inclusive
assign(src, getIRegA(rN));
IRTemp tmp = newTemp(Ity_I32);
IRTemp res = newTemp(Ity_I32);
UInt mask = ((1 << wm1) - 1) + (1 << wm1);
- vassert(msb >= 0 && msb <= 31);
+ vassert(msb <= 31);
vassert(mask != 0); // guaranteed by msb being in 0 .. 31 inclusive
assign(src, getIRegT(rN));
{
U128* pU128_dst;
- vassert( (acc >= 0) && (acc < 8) );
- vassert( (reg >= 0) && (reg < 4) );
+ vassert(acc < 8);
+ vassert(reg < 4);
pU128_dst = (U128*) (((UChar*)gst) + offset + acc*4*sizeof(U128)
+ reg*sizeof(U128));
static IRExpr* /* :: Ity_V128 */ getACC ( UInt index, UInt reg,
Bool ACC_mapped_on_VSR)
{
- vassert( (index >= 0) && (index < 8) );
- vassert( (reg >= 0) && (reg < 4) );
+ vassert(index < 8);
+ vassert(reg < 4);
return IRExpr_Get( base_acc_addr( ACC_mapped_on_VSR )
+ ACC_offset( index, reg), Ity_V128 );
d->fxState[3].fx = AT_fx;
d->fxState[3].size = sizeof(U128);
- vassert( (AT >= 0) && (AT < 8));
+ vassert(AT < 8);
acc_base_address = base_acc_addr( ACC_mapped_on_VSR );
/* Convert the segment selector onto a table index */
seg_selector >>= 3;
- vassert(seg_selector >= 0 && seg_selector < 8192);
+ vassert(seg_selector < 8192);
if (tiBit == 0) {
static void lookupIRTempPair ( HReg* vrHI, HReg* vrLO,
ISelEnv* env, IRTemp tmp )
{
- vassert(tmp >= 0);
vassert(tmp < env->n_vregmap);
vassert(! hregIsInvalid(env->vregmapHI[tmp]));
*vrLO = env->vregmap[tmp];
never see IRExpr_VECRET() at this point, since the return-type
check above should ensure all those cases use the slow scheme
instead. */
- vassert(n_args >= 0 && n_args <= 6);
+ vassert(n_args <= 6);
for (i = 0; i < n_args; i++) {
IRExpr* arg = args[i];
if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg))) {
am->ARMam1.RRS.base = base;
am->ARMam1.RRS.index = index;
am->ARMam1.RRS.shift = shift;
- vassert(0 <= shift && shift <= 3);
+ vassert(shift <= 3);
return am;
}
ri84->tag = ARMri84_I84;
ri84->ARMri84.I84.imm8 = imm8;
ri84->ARMri84.I84.imm4 = imm4;
- vassert(imm8 >= 0 && imm8 <= 255);
- vassert(imm4 >= 0 && imm4 <= 15);
+ vassert(imm8 <= 255);
+ vassert(imm4 <= 15);
return ri84;
}
ARMRI84* ARMRI84_R ( HReg reg ) {
/*---------------------------------------------------------*/
static UInt ROR32 ( UInt x, UInt sh ) {
- vassert(sh >= 0 && sh < 32);
+ vassert(sh < 32);
if (sh == 0)
return x;
else
}
RRegLRState;
-#define IS_VALID_VREGNO(v) ((v) >= 0 && (v) < n_vregs)
+/* v is always unsigned, wish we could static assert that */
+#define IS_VALID_VREGNO(v) ((v) < n_vregs)
#define IS_VALID_RREGNO(r) ((r) >= 0 && (r) < n_rregs)
#define FREE_VREG(v) \
vassert(rRS < 0x20);
vassert(rRT < 0x20);
vassert(opc2 <= 0x3F);
- vassert(sa >= 0 && sa <= 0x3F);
+ vassert(sa <= 0x3F);
theInstr = ((opc1 << 26) | (rRS << 21) | (rRT << 16) | (rRD << 11) |
((sa & 0x1F) << 6) | (opc2));
as that is a handy way to sign extend the lower 32
bits into the upper 32 bits. */
if (mode64)
- vassert(n >= 0 && n < 32);
+ vassert(n < 32);
else
vassert(n > 0 && n < 32);
p = mkFormX(p, 31, r_srcL, r_dst, n, 824, 0, endness_host);
HReg* vrLo, ISelEnv* env, IRTemp tmp )
{
vassert(!env->mode64);
- vassert(tmp >= 0);
vassert(tmp < env->n_vregmap);
vassert(! hregIsInvalid(env->vregmapMedLo[tmp]));
*vrHi = env->vregmapHi[tmp];
static void lookupIRTemp64 ( HReg* vrHI, HReg* vrLO, ISelEnv* env, IRTemp tmp )
{
- vassert(tmp >= 0);
vassert(tmp < env->n_vregmap);
vassert(! hregIsInvalid(env->vregmapHI[tmp]));
*vrLO = env->vregmap[tmp];
inline
IRType typeOfIRTemp ( const IRTypeEnv* env, IRTemp tmp )
{
- vassert(tmp >= 0);
vassert(tmp < env->types_used);
return env->types[tmp];
}
power of two. Then, increase the associativity by that
factor. Finally, re-calculate the total size so as to make
sure it divides exactly between the sets. */
- tl_assert(old_nSets >= 0);
UInt new_nSets = floor_power_of_2 ( old_nSets );
tl_assert(new_nSets > 0 && new_nSets < old_nSets);
Double factor = (Double)old_nSets / (Double)new_nSets;
seg->hasR = seg->hasW = seg->hasX = seg->hasT
= seg->isCH = False;
#if defined(VGO_freebsd)
- seg->ignore_offset = False;
+ seg->isFF = False;
+ seg->ignore_offset = False;
#endif
}
DiOffT off_orig = off;
vg_assert(img != NULL);
vg_assert(img->ces_used <= CACHE_N_ENTRIES);
- vg_assert(entNo >= 0 && entNo < img->ces_used);
+ vg_assert(entNo < img->ces_used);
vg_assert(off < img->real_size);
CEnt* ce = img->ces[entNo];
vg_assert(ce != NULL);
{
Int i, r;
EClassNo eclasses[3];
- vg_assert(tteno >= 0 && tteno < N_TTES_PER_SECTOR);
+ vg_assert(tteno < N_TTES_PER_SECTOR);
TTEntryH* tteH = &sec->ttH[tteno];
r = vexGuestExtents_to_eclasses( eclasses, tteH );
i = sectors[sNo].empty_tt_list;
sectors[sNo].empty_tt_list = sectors[sNo].ttC[i].usage.next_empty_tte;
- vg_assert (i >= 0 && i < N_TTES_PER_SECTOR);
+ vg_assert (i < N_TTES_PER_SECTOR);
return i;
}
WCache* _cache = &(_zzcache); \
tl_assert(_cache->dynMax >= 1); \
tl_assert(_cache->dynMax <= N_WCACHE_STAT_MAX); \
- tl_assert(_cache->inUse >= 0); \
tl_assert(_cache->inUse <= _cache->dynMax); \
if (_cache->inUse > 0) { \
if (_cache->ent[0].arg1 == _arg1 \
static WordVec* new_WV_of_size ( WordSetU* wsu, UWord sz )
{
WordVec* wv;
- tl_assert(sz >= 0);
wv = wsu->alloc( wsu->cc, sizeof(WordVec) );
wv->owner = wsu;
wv->words = NULL;
from a and b in order, where thrid is the next ThrID
occurring in either a or b, and tyma/b are the relevant
scalar timestamps, taking into account implicit zeroes. */
- tl_assert(ia >= 0 && ia <= useda);
- tl_assert(ib >= 0 && ib <= usedb);
+ tl_assert(ia <= useda);
+ tl_assert(ib <= usedb);
if (ia == useda && ib == usedb) {
/* both empty - done */
can't set the threshold value smaller than it. */
tl_assert(nFreed <= nTab);
nLive = nTab - nFreed;
- tl_assert(nLive >= 0 && nLive <= nTab);
+ tl_assert(nLive <= nTab);
vts_next_GC_at = 2 * nLive;
if (vts_next_GC_at < nTab)
vts_next_GC_at = nTab;
UWord setno = (a >> OC_BITS_PER_LINE) & (OC_N_SETS - 1);
UWord tagmask = ~((1 << OC_BITS_PER_LINE) - 1);
UWord tag = a & tagmask;
- tl_assert(setno >= 0 && setno < OC_N_SETS);
+ tl_assert(setno < OC_N_SETS);
/* we already tried line == 0; skip therefore. */
for (line = 1; line < OC_LINES_PER_SET; line++) {