This is the mail archive of the gdb-patches@sourceware.org mailing list for the GDB project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH 3/4] ari, btrace: avoid unsigned long long


Fix the ARI warning about the use of unsigned long long.  We can't use ULONGEST
as this is defined unsigned long on 32-bit systems.  Use unsigned long to hold
the buffer size inside GDB and __u64 when interfacing the kernel.

2015-07-08  Markus Metzger <markus.t.metzger@intel.com>

gdb/
	* nat/linux-btrace.c (perf_event_read): Change the type of DATA_HEAD.
	(perf_event_read_all): Change the type of SIZE and DATA_HEAD.
	(perf_event_read_bts): Change the type of SIZE and READ.
	(linux_enable_bts): Change the type of SIZE, PAGES, DATA_SIZE,
	and DATA_OFFSET.  Move DATA_SIZE declaration.  Restrict the buffer size
	to UINT_MAX.  Check for overflows when using DATA_HEAD from the perf
	mmap page.
	(linux_enable_pt): Change the type of PAGES and SIZE.  Restrict the
	buffer size to UINT_MAX.
	(linux_read_bts): Change the type of BUFFER_SIZE, SIZE, DATA_HEAD, and
	DATA_TAIL.
	* nat/linux-btrace.c (struct perf_event_buffer)<size, data_head>
	<last_head>: Change type.
---
 gdb/nat/linux-btrace.c | 70 ++++++++++++++++++++++++++++++++------------------
 gdb/nat/linux-btrace.h |  6 ++---
 2 files changed, 48 insertions(+), 28 deletions(-)

diff --git a/gdb/nat/linux-btrace.c b/gdb/nat/linux-btrace.c
index b6e13d3..cc1e5a5 100644
--- a/gdb/nat/linux-btrace.c
+++ b/gdb/nat/linux-btrace.c
@@ -111,12 +111,13 @@ perf_event_new_data (const struct perf_event_buffer *pev)
    The caller is responsible for freeing the memory.  */
 
 static gdb_byte *
-perf_event_read (const struct perf_event_buffer *pev, unsigned long data_head,
+perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
 		 unsigned long size)
 {
   const gdb_byte *begin, *end, *start, *stop;
   gdb_byte *buffer;
-  unsigned long data_tail, buffer_size;
+  unsigned long buffer_size;
+  __u64 data_tail;
 
   if (size == 0)
     return NULL;
@@ -151,13 +152,14 @@ static void
 perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
 		     unsigned long *psize)
 {
-  unsigned long data_head, size;
+  unsigned long size;
+  __u64 data_head;
 
   data_head = *pev->data_head;
 
   size = pev->size;
   if (data_head < size)
-    size = data_head;
+    size = (unsigned long) data_head;
 
   *data = perf_event_read (pev, data_head, size);
   *psize = size;
@@ -270,11 +272,11 @@ perf_event_sample_ok (const struct perf_event_sample *sample)
 static VEC (btrace_block_s) *
 perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
 		     const uint8_t *end, const uint8_t *start,
-		     unsigned long long size)
+		     unsigned long size)
 {
   VEC (btrace_block_s) *btrace = NULL;
   struct perf_event_sample sample;
-  unsigned long long read = 0;
+  unsigned long read = 0;
   struct btrace_block block = { 0, 0 };
   struct regcache *regcache;
 
@@ -642,7 +644,8 @@ linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
   struct perf_event_mmap_page *header;
   struct btrace_target_info *tinfo;
   struct btrace_tinfo_bts *bts;
-  unsigned long long size, pages, data_offset, data_size;
+  unsigned long size, pages;
+  __u64 data_offset;
   int pid, pg;
 
   tinfo = xzalloc (sizeof (*tinfo));
@@ -674,7 +677,7 @@ linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
     goto err_out;
 
   /* Convert the requested size in bytes to pages (rounding up).  */
-  pages = (((unsigned long long) conf->size) + PAGE_SIZE - 1) / PAGE_SIZE;
+  pages = (((unsigned long) conf->size) + PAGE_SIZE - 1) / PAGE_SIZE;
   /* We need at least one page.  */
   if (pages == 0)
     pages = 1;
@@ -692,12 +695,13 @@ linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
       size_t length;
 
       size = pages * PAGE_SIZE;
-      length = size + PAGE_SIZE;
 
-      /* Check for overflows.  */
-      if ((unsigned long long) length < size)
+      /* Don't ask for more than we can represent in the configuration.  */
+      if (UINT_MAX < size)
 	continue;
 
+      length = size + PAGE_SIZE;
+
       /* The number of pages we request needs to be a power of two.  */
       header = mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0);
       if (header != MAP_FAILED)
@@ -708,23 +712,33 @@ linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
     goto err_file;
 
   data_offset = PAGE_SIZE;
-  data_size = size;
 
 #if defined (PERF_ATTR_SIZE_VER5)
   if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
     {
+      __u64 data_size;
+
       data_offset = header->data_offset;
       data_size = header->data_size;
+
+      size = (unsigned int) data_size;
+
+      /* Check for overflows.  */
+      if ((__u64) size != data_size)
+	{
+	  munmap ((void *) header, (size_t) size + PAGE_SIZE);
+	  goto err_file;
+	}
     }
 #endif /* defined (PERF_ATTR_SIZE_VER5) */
 
   bts->header = header;
   bts->bts.mem = ((const uint8_t *) header) + data_offset;
-  bts->bts.size = data_size;
+  bts->bts.size = size;
   bts->bts.data_head = &header->data_head;
-  bts->bts.last_head = 0;
+  bts->bts.last_head = 0ull;
 
-  tinfo->conf.bts.size = data_size;
+  tinfo->conf.bts.size = (unsigned int) size;
   return tinfo;
 
  err_file:
@@ -746,7 +760,7 @@ linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
   struct perf_event_mmap_page *header;
   struct btrace_target_info *tinfo;
   struct btrace_tinfo_pt *pt;
-  unsigned long long pages, size;
+  unsigned long pages, size;
   int pid, pg, errcode, type;
 
   if (conf->size == 0)
@@ -788,7 +802,7 @@ linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
   header->aux_offset = header->data_offset + header->data_size;
 
   /* Convert the requested size in bytes to pages (rounding up).  */
-  pages = (((unsigned long long) conf->size) + PAGE_SIZE - 1) / PAGE_SIZE;
+  pages = (((unsigned long) conf->size) + PAGE_SIZE - 1) / PAGE_SIZE;
   /* We need at least one page.  */
   if (pages == 0)
     pages = 1;
@@ -806,12 +820,12 @@ linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
       size_t length;
 
       size = pages * PAGE_SIZE;
-      length = size;
 
-      /* Check for overflows.  */
-      if ((unsigned long long) length < size)
+      /* Don't ask for more than we can represent in the configuration.  */
+      if (UINT_MAX < size)
 	continue;
 
+      length = size;
       header->aux_size = size;
 
       pt->pt.mem = mmap (NULL, length, PROT_READ, MAP_SHARED, pt->file,
@@ -827,7 +841,7 @@ linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
   pt->pt.size = size;
   pt->pt.data_head = &header->aux_head;
 
-  tinfo->conf.pt.size = size;
+  tinfo->conf.pt.size = (unsigned int) size;
   return tinfo;
 
  err_conf:
@@ -938,7 +952,8 @@ linux_read_bts (struct btrace_data_bts *btrace,
 {
   struct perf_event_buffer *pevent;
   const uint8_t *begin, *end, *start;
-  unsigned long long data_head, data_tail, buffer_size, size;
+  unsigned long buffer_size, size;
+  __u64 data_head, data_tail;
   unsigned int retries = 5;
 
   pevent = &tinfo->variant.bts.bts;
@@ -961,6 +976,8 @@ linux_read_bts (struct btrace_data_bts *btrace,
 
       if (type == BTRACE_READ_DELTA)
 	{
+	  __u64 data_size;
+
 	  /* Determine the number of bytes to read and check for buffer
 	     overflows.  */
 
@@ -971,9 +988,12 @@ linux_read_bts (struct btrace_data_bts *btrace,
 	    return BTRACE_ERR_OVERFLOW;
 
 	  /* If the buffer is smaller than the trace delta, we overflowed.  */
-	  size = data_head - data_tail;
-	  if (buffer_size < size)
+	  data_size = data_head - data_tail;
+	  if (buffer_size < data_size)
 	    return BTRACE_ERR_OVERFLOW;
+
+	  /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a long.  */
+	  size = (unsigned long) data_size;
 	}
       else
 	{
@@ -982,7 +1002,7 @@ linux_read_bts (struct btrace_data_bts *btrace,
 
 	  /* Adjust the size if the buffer has not overflowed, yet.  */
 	  if (data_head < size)
-	    size = data_head;
+	    size = (unsigned long) data_head;
 	}
 
       /* Data_head keeps growing; the buffer itself is circular.  */
diff --git a/gdb/nat/linux-btrace.h b/gdb/nat/linux-btrace.h
index b680bf5..5fcaf79 100644
--- a/gdb/nat/linux-btrace.h
+++ b/gdb/nat/linux-btrace.h
@@ -38,13 +38,13 @@ struct perf_event_buffer
   const uint8_t *mem;
 
   /* The size of the mapped memory in bytes.  */
-  unsigned long long size;
+  unsigned long size;
 
   /* A pointer to the data_head field for this buffer. */
-  volatile unsigned long long *data_head;
+  volatile __u64 *data_head;
 
   /* The data_head value from the last read.  */
-  unsigned long long last_head;
+  __u64 last_head;
 };
 
 /* Branch trace target information for BTS tracing.  */
-- 
1.8.3.1


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]