Addr VG_(thread_get_stack_max)(ThreadId tid)
{
- vg_assert(0 <= tid && tid < VG_N_THREADS && tid != VG_INVALID_THREADID);
+ vg_assert(tid < VG_N_THREADS && tid != VG_INVALID_THREADID);
vg_assert(VG_(threads)[tid].status != VgTs_Empty);
return VG_(threads)[tid].client_stack_highest_byte;
}
SizeT VG_(thread_get_stack_size)(ThreadId tid)
{
- vg_assert(0 <= tid && tid < VG_N_THREADS && tid != VG_INVALID_THREADID);
+ vg_assert(tid < VG_N_THREADS && tid != VG_INVALID_THREADID);
vg_assert(VG_(threads)[tid].status != VgTs_Empty);
return VG_(threads)[tid].client_stack_szB;
}
Addr VG_(thread_get_altstack_min)(ThreadId tid)
{
- vg_assert(0 <= tid && tid < VG_N_THREADS && tid != VG_INVALID_THREADID);
+ vg_assert(tid < VG_N_THREADS && tid != VG_INVALID_THREADID);
vg_assert(VG_(threads)[tid].status != VgTs_Empty);
return (Addr)VG_(threads)[tid].altstack.ss_sp;
}
SizeT VG_(thread_get_altstack_size)(ThreadId tid)
{
- vg_assert(0 <= tid && tid < VG_N_THREADS && tid != VG_INVALID_THREADID);
+ vg_assert(tid < VG_N_THREADS && tid != VG_INVALID_THREADID);
vg_assert(VG_(threads)[tid].status != VgTs_Empty);
return VG_(threads)[tid].altstack.ss_size;
}
cszB = sizeof(Superblock) + sb->n_payload_bytes;
// removes sb from superblock list.
- for (i = 0; i < a->sblocks_used; i++) {
+ for (i = 0U; i < a->sblocks_used; i++) {
if (a->sblocks[i] == sb)
break;
}
- vg_assert(i >= 0 && i < a->sblocks_used);
+ vg_assert(i < a->sblocks_used);
for (j = i; j < a->sblocks_used; j++)
a->sblocks[j] = a->sblocks[j+1];
a->sblocks_used--;
if (sr_isError(res))
return 0;
+ // both clang-tidy and coverity complain about this but I think they are both wrong
return buf.sem_nsems;
# elif defined(__NR_semsys) /* Solaris */
struct vki_semid_ds buf;
void VG_(clear_syscallInfo) ( ThreadId tid )
{
vg_assert(syscallInfo);
- vg_assert(tid >= 0 && tid < VG_N_THREADS);
+ vg_assert(tid < VG_N_THREADS);
VG_(memset)( & syscallInfo[tid], 0, sizeof( syscallInfo[tid] ));
syscallInfo[tid].status.what = SsIdle;
}
Bool VG_(is_in_syscall) ( ThreadId tid )
{
- vg_assert(tid >= 0 && tid < VG_N_THREADS);
+ vg_assert(tid < VG_N_THREADS);
return (syscallInfo && syscallInfo[tid].status.what != SsIdle);
}
Bool VG_(is_in_kernel_restart_syscall) ( ThreadId tid )
{
- vg_assert(tid >= 0 && tid < VG_N_THREADS);
+ vg_assert(tid < VG_N_THREADS);
return (syscallInfo && ((syscallInfo[tid].flags & SfKernelRestart) != 0));
}
Word VG_(is_in_syscall_no) (ThreadId tid )
{
- vg_assert(tid >= 0 && tid < VG_N_THREADS);
+ vg_assert(tid < VG_N_THREADS);
return syscallInfo[tid].orig_args.sysno;
}
address range which does not fall cleanly within any specific bin.
Note that ECLASS_SHIFT + ECLASS_WIDTH must be < 32.
ECLASS_N must fit in a EclassNo. */
-#define ECLASS_SHIFT 13
-#define ECLASS_WIDTH 9
-#define ECLASS_MISC (1 << ECLASS_WIDTH)
-#define ECLASS_N (1 + ECLASS_MISC)
+#define ECLASS_SHIFT 13U
+#define ECLASS_WIDTH 9U
+#define ECLASS_MISC (1U << ECLASS_WIDTH)
+#define ECLASS_N (1U + ECLASS_MISC)
STATIC_ASSERT(ECLASS_SHIFT + ECLASS_WIDTH < 32);
typedef UShort EClassNo;
sizeof(HostExtent));
/* Add an entry in the sector_search_order */
- for (i = 0; i < n_sectors; i++) {
+ for (i = 0U; i < n_sectors; i++) {
if (sector_search_order[i] == INV_SNO)
break;
}
- vg_assert(i >= 0 && i < n_sectors);
+ vg_assert(i < n_sectors);
sector_search_order[i] = sno;
if (VG_(clo_verbosity) > 2)
/*-------------------------------------------------------------*/
/* forward */
-static void unredir_discard_translations( Addr, ULong );
+static void unredir_discard_translations( Addr /*guest_start*/, ULong /*range*/);
/* Stuff for deleting translations which intersect with a given
address range. Unfortunately, to make this run at a reasonable
" FAST, ec = %d\n", ec);
/* Fast scheme */
- vg_assert(ec >= 0 && ec < ECLASS_MISC);
+ vg_assert(ec < ECLASS_MISC);
for (sno = 0; sno < n_sectors; sno++) {
sec = §ors[sno];
#define UNREDIR_SZB 1000
#define N_UNREDIR_TT 500
-#define N_UNREDIR_TCQ (N_UNREDIR_TT * UNREDIR_SZB / sizeof(ULong))
+#define N_UNREDIR_TCQ (N_UNREDIR_TT * UNREDIR_SZB / (Int)sizeof(ULong))
typedef
struct {
int ret;
char c;
char c1 = '\0';
- char c2;
+ char c2 = '\0';
unsigned char csum = 0;
// Look for first '$' (start of packet) or error.
the barrier, so need to mess with dep edges in the same way
as if the barrier had filled up normally. */
present = VG_(sizeXA)(bar->waiting);
- tl_assert(present >= 0 && present <= bar->size);
+ tl_assert(present <= bar->size);
if (newcount <= present) {
bar->size = present; /* keep the cross_sync call happy */
do_barrier_cross_sync_and_empty(bar);
unsigned short sem_nsems; /* no. of semaphores in array */
vki_time_t sem_otime; /* last semop time */
vki_time_t sem_ctime; /* last change time */
- long sem_pad2;
- long sem_pad3[4];
};
struct vki_sembuf {
if (UNLIKELY( MC_(clo_mc_level) == 3 )) {
OCacheLine* line;
UWord lineoff = oc_line_offset(a);
- tl_assert(lineoff >= 0
- && lineoff < OC_W32S_PER_LINE -1/*'cos 8-aligned*/);
+ tl_assert(lineoff < OC_W32S_PER_LINE -1/*'cos 8-aligned*/);
line = find_OCacheLine( a );
line->u.main.descr[lineoff+0] = 0;
line->u.main.descr[lineoff+1] = 0;
--partial-loads-ok needs to be enabled by default on all platforms.
Not doing so causes lots of false errors. */
Bool MC_(clo_partial_loads_ok) = True;
-Long MC_(clo_freelist_vol) = 20*1000*1000LL;
-Long MC_(clo_freelist_big_blocks) = 1*1000*1000LL;
+Long MC_(clo_freelist_vol) = 20LL*1000LL*1000LL;
+Long MC_(clo_freelist_big_blocks) = 1LL*1000LL*1000LL;
LeakCheckMode MC_(clo_leak_check) = LC_Summary;
VgRes MC_(clo_leak_resolution) = Vg_HighRes;
UInt MC_(clo_show_leak_kinds) = R2S(Possible) | R2S(Unreached);