This is the mail archive of the
gdb-patches@sourceware.org
mailing list for the GDB project.
[patch v8 22/24] btrace, gdbserver: read branch trace incrementally
- From: Markus Metzger <markus dot t dot metzger at intel dot com>
- To: jan dot kratochvil at redhat dot com
- Cc: gdb-patches at sourceware dot org, Pedro Alves <palves at redhat dot com>
- Date: Thu, 12 Dec 2013 10:15:45 +0100
- Subject: [patch v8 22/24] btrace, gdbserver: read branch trace incrementally
- Authentication-results: sourceware.org; auth=none
- References: <1386839747-8860-1-git-send-email-markus dot t dot metzger at intel dot com>
Read branch trace data incrementally and extend the current trace rather than
discarding it and reading the entire trace buffer each time.
If the branch trace buffer overflowed, we can't extend the current trace so we
discard it and start anew by reading the entire branch trace buffer.
Reviewed-by: Eli Zaretskii
CC: Pedro Alves <palves@redhat.com>
2013-12-12 Markus Metzger <markus.t.metzger@intel.com>
* common/linux-btrace.c (perf_event_read_bts, linux_read_btrace):
Support delta reads.
(linux_disable_btrace): Change return type.
* common/linux-btrace.h (linux_read_btrace): Change parameters
and return type to allow error reporting. Update users.
(linux_disable_btrace): Change return type. Update users.
* common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>:
New.
(btrace_error): New.
(btrace_block) <begin>: Comment on BEGIN == 0.
* btrace.c (btrace_compute_ftrace): Start from the end of
the current trace.
(btrace_stitch_trace, btrace_clear_history): New.
(btrace_fetch): Read delta trace, return if replaying.
(btrace_clear): Move clear history code to btrace_clear_history.
(parse_xml_btrace): Throw an error if parsing failed.
* target.h (struct target_ops) <to_read_btrace>: Change parameters
and return type to allow error reporting.
(target_read_btrace): Change parameters and return type to allow
error reporting.
* target.c (target_read_btrace): Update.
* remote.c (remote_read_btrace): Support delta reads. Pass
errors on.
gdbserver/
* target.h (target_ops) <read_btrace>: Change parameters and
return type to allow error reporting.
* server.c (handle_qxfer_btrace): Support delta reads. Pass
trace reading errors on.
* linux-low.c (linux_low_read_btrace): Pass trace reading
errors on.
(linux_low_disable_btrace): New.
---
gdb/NEWS | 4 ++
gdb/amd64-linux-nat.c | 6 +-
gdb/btrace.c | 155 ++++++++++++++++++++++++++++++++++++++++-----
gdb/common/btrace-common.h | 27 +++++++-
gdb/common/linux-btrace.c | 97 +++++++++++++++++++---------
gdb/common/linux-btrace.h | 15 +++--
gdb/doc/gdb.texinfo | 8 +++
gdb/gdbserver/linux-low.c | 36 +++++++++--
gdb/gdbserver/server.c | 11 +++-
gdb/gdbserver/target.h | 9 ++-
gdb/i386-linux-nat.c | 6 +-
gdb/remote.c | 23 ++++---
gdb/target.c | 9 +--
gdb/target.h | 15 +++--
14 files changed, 330 insertions(+), 91 deletions(-)
diff --git a/gdb/NEWS b/gdb/NEWS
index e0ac447..f7927f3 100644
--- a/gdb/NEWS
+++ b/gdb/NEWS
@@ -245,6 +245,10 @@ qXfer:libraries-svr4:read's annex
necessary for library list updating, resulting in significant
speedup.
+qXfer:btrace:read's annex
+ The qXfer:btrace:read packet supports a new annex 'delta' to read
+ branch trace incrementally.
+
* New features in the GDB remote stub, GDBserver
** GDBserver now supports target-assisted range stepping. Currently
diff --git a/gdb/amd64-linux-nat.c b/gdb/amd64-linux-nat.c
index b1676ac..d9acda3 100644
--- a/gdb/amd64-linux-nat.c
+++ b/gdb/amd64-linux-nat.c
@@ -1173,10 +1173,10 @@ amd64_linux_enable_btrace (ptid_t ptid)
static void
amd64_linux_disable_btrace (struct btrace_target_info *tinfo)
{
- int errcode = linux_disable_btrace (tinfo);
+ enum btrace_error errcode = linux_disable_btrace (tinfo);
- if (errcode != 0)
- error (_("Could not disable branch tracing: %s."), safe_strerror (errcode));
+ if (errcode != BTRACE_ERR_NONE)
+ error (_("Could not disable branch tracing."));
}
/* Teardown branch tracing. */
diff --git a/gdb/btrace.c b/gdb/btrace.c
index 1f35796..b25f882 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -599,9 +599,9 @@ btrace_compute_ftrace (struct btrace_thread_info *btinfo,
DEBUG ("compute ftrace");
gdbarch = target_gdbarch ();
- begin = NULL;
- end = NULL;
- level = INT_MAX;
+ begin = btinfo->begin;
+ end = btinfo->end;
+ level = begin != NULL ? -btinfo->level : INT_MAX;
blk = VEC_length (btrace_block_s, btrace);
while (blk != 0)
@@ -717,27 +717,157 @@ btrace_teardown (struct thread_info *tp)
btrace_clear (tp);
}
+/* Adjust the block trace in order to stitch old and new trace together.
+ BTRACE is the new delta trace between the last and the current stop.
+ BTINFO is the old branch trace until the last stop.
+ May modify BTRACE as well as the existing trace in BTINFO.
+ Return 0 on success, -1 otherwise. */
+
+static int
+btrace_stitch_trace (VEC (btrace_block_s) **btrace,
+ const struct btrace_thread_info *btinfo)
+{
+ struct btrace_function *last_bfun;
+ struct btrace_insn *last_insn;
+ btrace_block_s *first_new_block;
+
+ /* If we don't have trace, there's nothing to do. */
+ if (VEC_empty (btrace_block_s, *btrace))
+ return 0;
+
+ last_bfun = btinfo->end;
+ gdb_assert (last_bfun != NULL);
+
+ /* Beware that block trace starts with the most recent block, so the
+ chronologically first block in the new trace is the last block in
+ the new trace's block vector. */
+ first_new_block = VEC_last (btrace_block_s, *btrace);
+ last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
+
+ /* If the current PC at the end of the block is the same as in our current
+ trace, there are two explanations:
+ 1. we executed the instruction and some branch brought us back.
+ 2. we have not made any progress.
+ In the first case, the delta trace vector should contain at least two
+ entries.
+ In the second case, the delta trace vector should contain exactly one
+ entry for the partial block containing the current PC. Remove it. */
+ if (first_new_block->end == last_insn->pc
+ && VEC_length (btrace_block_s, *btrace) == 1)
+ {
+ VEC_pop (btrace_block_s, *btrace);
+ return 0;
+ }
+
+ DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
+ core_addr_to_string_nz (first_new_block->end));
+
+ /* Do a simple sanity check to make sure we don't accidentally end up
+ with a bad block. This should not occur in practice. */
+ if (first_new_block->end < last_insn->pc)
+ {
+ warning (_("Error while trying to read delta trace. Falling back to "
+ "a full read."));
+ return -1;
+ }
+
+ /* We adjust the last block to start at the end of our current trace. */
+ gdb_assert (first_new_block->begin == 0);
+ first_new_block->begin = last_insn->pc;
+
+ /* We simply pop the last insn so we can insert it again as part of
+ the normal branch trace computation.
+ Since instruction iterators are based on indices in the instructions
+ vector, we don't leave any pointers dangling. */
+ DEBUG ("pruning insn at %s for stitching",
+ ftrace_print_insn_addr (last_insn));
+
+ VEC_pop (btrace_insn_s, last_bfun->insn);
+
+ /* The instructions vector may become empty temporarily if this has
+ been the only instruction in this function segment.
+ This violates the invariant but will be remedied shortly. */
+ return 0;
+}
+
+/* Clear the branch trace histories in BTINFO. */
+
+static void
+btrace_clear_history (struct btrace_thread_info *btinfo)
+{
+ xfree (btinfo->insn_history);
+ xfree (btinfo->call_history);
+ xfree (btinfo->replay);
+
+ btinfo->insn_history = NULL;
+ btinfo->call_history = NULL;
+ btinfo->replay = NULL;
+}
+
/* See btrace.h. */
void
btrace_fetch (struct thread_info *tp)
{
struct btrace_thread_info *btinfo;
+ struct btrace_target_info *tinfo;
VEC (btrace_block_s) *btrace;
struct cleanup *cleanup;
+ int errcode;
DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
+ btrace = NULL;
btinfo = &tp->btrace;
- if (btinfo->target == NULL)
+ tinfo = btinfo->target;
+ if (tinfo == NULL)
+ return;
+
+ /* There's no way we could get new trace while replaying.
+ On the other hand, delta trace would return a partial record with the
+ current PC, which is the replay PC, not the last PC, as expected. */
+ if (btinfo->replay != NULL)
return;
- btrace = target_read_btrace (btinfo->target, BTRACE_READ_NEW);
cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
+ /* Let's first try to extend the trace we already have. */
+ if (btinfo->end != NULL)
+ {
+ errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
+ if (errcode == 0)
+ {
+ /* Success. Let's try to stitch the traces together. */
+ errcode = btrace_stitch_trace (&btrace, btinfo);
+ }
+ else
+ {
+ /* We failed to read delta trace. Let's try to read new trace. */
+ errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
+
+ /* If we got any new trace, discard what we have. */
+ if (errcode == 0 && !VEC_empty (btrace_block_s, btrace))
+ btrace_clear (tp);
+ }
+
+ /* If we were not able to read the trace, we start over. */
+ if (errcode != 0)
+ {
+ btrace_clear (tp);
+ errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
+ }
+ }
+ else
+ errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
+
+ /* If we were not able to read the branch trace, signal an error. */
+ if (errcode != 0)
+ error (_("Failed to read branch trace."));
+
+ /* Compute the trace, provided we have any. */
if (!VEC_empty (btrace_block_s, btrace))
{
- btrace_clear (tp);
+ btrace_clear_history (btinfo);
btrace_compute_ftrace (btinfo, btrace);
}
@@ -772,13 +902,7 @@ btrace_clear (struct thread_info *tp)
btinfo->begin = NULL;
btinfo->end = NULL;
- xfree (btinfo->insn_history);
- xfree (btinfo->call_history);
- xfree (btinfo->replay);
-
- btinfo->insn_history = NULL;
- btinfo->call_history = NULL;
- btinfo->replay = NULL;
+ btrace_clear_history (btinfo);
}
/* See btrace.h. */
@@ -870,10 +994,7 @@ parse_xml_btrace (const char *buffer)
errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
buffer, &btrace);
if (errcode != 0)
- {
- do_cleanups (cleanup);
- return NULL;
- }
+ error (_("Error parsing branch trace."));
/* Keep parse results. */
discard_cleanups (cleanup);
diff --git a/gdb/common/btrace-common.h b/gdb/common/btrace-common.h
index 1986868..4bab92e 100644
--- a/gdb/common/btrace-common.h
+++ b/gdb/common/btrace-common.h
@@ -42,7 +42,9 @@
asynchronous, e.g. interrupts. */
struct btrace_block
{
- /* The address of the first byte of the first instruction in the block. */
+ /* The address of the first byte of the first instruction in the block.
+ The address may be zero if we do not know the beginning of this block,
+ such as for the first block in a delta trace. */
CORE_ADDR begin;
/* The address of the first byte of the last instruction in the block. */
@@ -67,7 +69,28 @@ enum btrace_read_type
BTRACE_READ_ALL,
/* Send all available trace, if it changed. */
- BTRACE_READ_NEW
+ BTRACE_READ_NEW,
+
+ /* Send the trace since the last request. This will fail if the trace
+ buffer overflowed. */
+ BTRACE_READ_DELTA
+};
+
+/* Enumeration of btrace errors. */
+
+enum btrace_error
+{
+ /* No error. Everything is OK. */
+ BTRACE_ERR_NONE,
+
+ /* An unknown error. */
+ BTRACE_ERR_UNKNOWN,
+
+ /* Branch tracing is not supported on this system. */
+ BTRACE_ERR_NOT_SUPPORTED,
+
+ /* The branch trace buffer overflowed; no delta read possible. */
+ BTRACE_ERR_OVERFLOW
};
#endif /* BTRACE_COMMON_H */
diff --git a/gdb/common/linux-btrace.c b/gdb/common/linux-btrace.c
index 801505b..4996a25 100644
--- a/gdb/common/linux-btrace.c
+++ b/gdb/common/linux-btrace.c
@@ -172,11 +172,11 @@ perf_event_sample_ok (const struct perf_event_sample *sample)
static VEC (btrace_block_s) *
perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
- const uint8_t *end, const uint8_t *start)
+ const uint8_t *end, const uint8_t *start, size_t size)
{
VEC (btrace_block_s) *btrace = NULL;
struct perf_event_sample sample;
- size_t read = 0, size = (end - begin);
+ size_t read = 0;
struct btrace_block block = { 0, 0 };
struct regcache *regcache;
@@ -252,6 +252,13 @@ perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
block.end = psample->bts.from;
}
+ /* Push the last block (i.e. the first one of inferior execution), as well.
+ We don't know where it ends, but we know where it starts. If we're
+ reading delta trace, we can fill in the start address later on.
+ Otherwise we will prune it. */
+ block.begin = 0;
+ VEC_safe_push (btrace_block_s, btrace, &block);
+
return btrace;
}
@@ -476,7 +483,7 @@ linux_enable_btrace (ptid_t ptid)
/* See linux-btrace.h. */
-int
+enum btrace_error
linux_disable_btrace (struct btrace_target_info *tinfo)
{
int errcode;
@@ -484,12 +491,12 @@ linux_disable_btrace (struct btrace_target_info *tinfo)
errno = 0;
errcode = munmap (tinfo->buffer, perf_event_mmap_size (tinfo));
if (errcode != 0)
- return errno;
+ return BTRACE_ERR_UNKNOWN;
close (tinfo->file);
xfree (tinfo);
- return 0;
+ return BTRACE_ERR_NONE;
}
/* Check whether the branch trace has changed. */
@@ -504,21 +511,24 @@ linux_btrace_has_changed (struct btrace_target_info *tinfo)
/* See linux-btrace.h. */
-VEC (btrace_block_s) *
-linux_read_btrace (struct btrace_target_info *tinfo,
+enum btrace_error
+linux_read_btrace (VEC (btrace_block_s) **btrace,
+ struct btrace_target_info *tinfo,
enum btrace_read_type type)
{
- VEC (btrace_block_s) *btrace = NULL;
volatile struct perf_event_mmap_page *header;
const uint8_t *begin, *end, *start;
- unsigned long data_head, retries = 5;
- size_t buffer_size;
+ unsigned long data_head, data_tail, retries = 5;
+ size_t buffer_size, size;
+ /* For delta reads, we return at least the partial last block containing
+ the current PC. */
if (type == BTRACE_READ_NEW && !linux_btrace_has_changed (tinfo))
- return NULL;
+ return BTRACE_ERR_NONE;
header = perf_event_header (tinfo);
buffer_size = perf_event_buffer_size (tinfo);
+ data_tail = tinfo->data_head;
/* We may need to retry reading the trace. See below. */
while (retries--)
@@ -526,23 +536,45 @@ linux_read_btrace (struct btrace_target_info *tinfo,
data_head = header->data_head;
/* Delete any leftover trace from the previous iteration. */
- VEC_free (btrace_block_s, btrace);
+ VEC_free (btrace_block_s, *btrace);
- /* If there's new trace, let's read it. */
- if (data_head != tinfo->data_head)
+ if (type == BTRACE_READ_DELTA)
{
- /* Data_head keeps growing; the buffer itself is circular. */
- begin = perf_event_buffer_begin (tinfo);
- start = begin + data_head % buffer_size;
-
- if (data_head <= buffer_size)
- end = start;
- else
- end = perf_event_buffer_end (tinfo);
+ /* Determine the number of bytes to read and check for buffer
+ overflows. */
+
+ /* Check for data head overflows. We might be able to recover from
+ those but they are very unlikely and it's not really worth the
+ effort, I think. */
+ if (data_head < data_tail)
+ return BTRACE_ERR_OVERFLOW;
+
+ /* If the buffer is smaller than the trace delta, we overflowed. */
+ size = data_head - data_tail;
+ if (buffer_size < size)
+ return BTRACE_ERR_OVERFLOW;
+ }
+ else
+ {
+ /* Read the entire buffer. */
+ size = buffer_size;
- btrace = perf_event_read_bts (tinfo, begin, end, start);
+ /* Adjust the size if the buffer has not overflowed, yet. */
+ if (data_head < size)
+ size = data_head;
}
+ /* Data_head keeps growing; the buffer itself is circular. */
+ begin = perf_event_buffer_begin (tinfo);
+ start = begin + data_head % buffer_size;
+
+ if (data_head <= buffer_size)
+ end = start;
+ else
+ end = perf_event_buffer_end (tinfo);
+
+ *btrace = perf_event_read_bts (tinfo, begin, end, start, size);
+
/* The stopping thread notifies its ptracer before it is scheduled out.
On multi-core systems, the debugger might therefore run while the
kernel might be writing the last branch trace records.
@@ -554,7 +586,13 @@ linux_read_btrace (struct btrace_target_info *tinfo,
tinfo->data_head = data_head;
- return btrace;
+ /* Prune the incomplete last block (i.e. the first one of inferior execution)
+ if we're not doing a delta read. There is no way of filling in its zeroed
+ BEGIN element. */
+ if (!VEC_empty (btrace_block_s, *btrace) && type != BTRACE_READ_DELTA)
+ VEC_pop (btrace_block_s, *btrace);
+
+ return BTRACE_ERR_NONE;
}
#else /* !HAVE_LINUX_PERF_EVENT_H */
@@ -577,19 +615,20 @@ linux_enable_btrace (ptid_t ptid)
/* See linux-btrace.h. */
-int
+enum btrace_error
linux_disable_btrace (struct btrace_target_info *tinfo)
{
- return ENOSYS;
+ return BTRACE_ERR_NOT_SUPPORTED;
}
/* See linux-btrace.h. */
-VEC (btrace_block_s) *
-linux_read_btrace (struct btrace_target_info *tinfo,
+enum btrace_error
+linux_read_btrace (VEC (btrace_block_s) **btrace,
+ struct btrace_target_info *tinfo,
enum btrace_read_type type)
{
- return NULL;
+ return BTRACE_ERR_NOT_SUPPORTED;
}
#endif /* !HAVE_LINUX_PERF_EVENT_H */
diff --git a/gdb/common/linux-btrace.h b/gdb/common/linux-btrace.h
index d4e8402..0f0412c 100644
--- a/gdb/common/linux-btrace.h
+++ b/gdb/common/linux-btrace.h
@@ -61,17 +61,18 @@ struct btrace_target_info
int ptr_bits;
};
-/* Check whether branch tracing is supported. */
+/* See to_supports_btrace in target.h. */
extern int linux_supports_btrace (void);
-/* Enable branch tracing for @ptid. */
+/* See to_enable_btrace in target.h. */
extern struct btrace_target_info *linux_enable_btrace (ptid_t ptid);
-/* Disable branch tracing and deallocate @tinfo. */
-extern int linux_disable_btrace (struct btrace_target_info *tinfo);
+/* See to_disable_btrace in target.h. */
+extern enum btrace_error linux_disable_btrace (struct btrace_target_info *ti);
-/* Read branch trace data. */
-extern VEC (btrace_block_s) *linux_read_btrace (struct btrace_target_info *,
- enum btrace_read_type);
+/* See to_read_btrace in target.h. */
+extern enum btrace_error linux_read_btrace (VEC (btrace_block_s) **btrace,
+ struct btrace_target_info *btinfo,
+ enum btrace_read_type type);
#endif /* LINUX_BTRACE_H */
diff --git a/gdb/doc/gdb.texinfo b/gdb/doc/gdb.texinfo
index 684053a..2161715 100644
--- a/gdb/doc/gdb.texinfo
+++ b/gdb/doc/gdb.texinfo
@@ -39906,6 +39906,14 @@ Returns all available branch trace.
@item new
Returns all available branch trace if the branch trace changed since
the last read request.
+
+@item delta
+Returns the new branch trace since the last read request. Adds a new
+block to the end of the trace that begins at zero and ends at the source
+location of the first branch in the trace buffer. This extra block is
+used to stitch traces together.
+
+If the trace buffer overflowed, returns an error indicating the overflow.
@end table
This packet is not probed by default; the remote stub must request it
diff --git a/gdb/gdbserver/linux-low.c b/gdb/gdbserver/linux-low.c
index 770ee16..679b352 100644
--- a/gdb/gdbserver/linux-low.c
+++ b/gdb/gdbserver/linux-low.c
@@ -5705,7 +5705,7 @@ linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
#ifdef HAVE_LINUX_BTRACE
-/* Enable branch tracing. */
+/* See to_enable_btrace target method. */
static struct btrace_target_info *
linux_low_enable_btrace (ptid_t ptid)
@@ -5725,17 +5725,39 @@ linux_low_enable_btrace (ptid_t ptid)
return tinfo;
}
-/* Read branch trace data as btrace xml document. */
+/* See to_disable_btrace target method. */
-static void
+static int
+linux_low_disable_btrace (struct btrace_target_info *tinfo)
+{
+ enum btrace_error err;
+
+ err = linux_disable_btrace (tinfo);
+ return (err == BTRACE_ERR_NONE ? 0 : -1);
+}
+
+/* See to_read_btrace target method. */
+
+static int
linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
int type)
{
VEC (btrace_block_s) *btrace;
struct btrace_block *block;
+ enum btrace_error err;
int i;
- btrace = linux_read_btrace (tinfo, type);
+ btrace = NULL;
+ err = linux_read_btrace (&btrace, tinfo, type);
+ if (err != BTRACE_ERR_NONE)
+ {
+ if (err == BTRACE_ERR_OVERFLOW)
+ buffer_grow_str0 (buffer, "E.Overflow.");
+ else
+ buffer_grow_str0 (buffer, "E.Generic Error.");
+
+ return -1;
+ }
buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
@@ -5744,9 +5766,11 @@ linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
paddress (block->begin), paddress (block->end));
- buffer_grow_str (buffer, "</btrace>\n");
+ buffer_grow_str0 (buffer, "</btrace>\n");
VEC_free (btrace_block_s, btrace);
+
+ return 0;
}
#endif /* HAVE_LINUX_BTRACE */
@@ -5819,7 +5843,7 @@ static struct target_ops linux_target_ops = {
#ifdef HAVE_LINUX_BTRACE
linux_supports_btrace,
linux_low_enable_btrace,
- linux_disable_btrace,
+ linux_low_disable_btrace,
linux_low_read_btrace,
#else
NULL,
diff --git a/gdb/gdbserver/server.c b/gdb/gdbserver/server.c
index 1a110e0..5903edf 100644
--- a/gdb/gdbserver/server.c
+++ b/gdb/gdbserver/server.c
@@ -1348,7 +1348,7 @@ handle_qxfer_btrace (const char *annex,
{
static struct buffer cache;
struct thread_info *thread;
- int type;
+ int type, result;
if (the_target->read_btrace == NULL || writebuf != NULL)
return -2;
@@ -1380,6 +1380,8 @@ handle_qxfer_btrace (const char *annex,
type = BTRACE_READ_ALL;
else if (strcmp (annex, "new") == 0)
type = BTRACE_READ_NEW;
+ else if (strcmp (annex, "delta") == 0)
+ type = BTRACE_READ_DELTA;
else
{
strcpy (own_buf, "E.Bad annex.");
@@ -1390,7 +1392,12 @@ handle_qxfer_btrace (const char *annex,
{
buffer_free (&cache);
- target_read_btrace (thread->btrace, &cache, type);
+ result = target_read_btrace (thread->btrace, &cache, type);
+ if (result != 0)
+ {
+ memcpy (own_buf, cache.buffer, cache.used_size);
+ return -3;
+ }
}
else if (offset > cache.used_size)
{
diff --git a/gdb/gdbserver/target.h b/gdb/gdbserver/target.h
index c5e6fee..6969b5c 100644
--- a/gdb/gdbserver/target.h
+++ b/gdb/gdbserver/target.h
@@ -356,12 +356,15 @@ struct target_ops
information struct for reading and for disabling branch trace. */
struct btrace_target_info *(*enable_btrace) (ptid_t ptid);
- /* Disable branch tracing. */
+ /* Disable branch tracing.
+ Returns zero on success, non-zero otherwise. */
int (*disable_btrace) (struct btrace_target_info *tinfo);
/* Read branch trace data into buffer. We use an int to specify the type
- to break a cyclic dependency. */
- void (*read_btrace) (struct btrace_target_info *, struct buffer *, int type);
+ to break a cyclic dependency.
+ Return 0 on success; print an error message into BUFFER and return -1,
+ otherwise. */
+ int (*read_btrace) (struct btrace_target_info *, struct buffer *, int type);
/* Return true if target supports range stepping. */
int (*supports_range_stepping) (void);
diff --git a/gdb/i386-linux-nat.c b/gdb/i386-linux-nat.c
index c2f4fcc..f90bfaa 100644
--- a/gdb/i386-linux-nat.c
+++ b/gdb/i386-linux-nat.c
@@ -1084,10 +1084,10 @@ i386_linux_enable_btrace (ptid_t ptid)
static void
i386_linux_disable_btrace (struct btrace_target_info *tinfo)
{
- int errcode = linux_disable_btrace (tinfo);
+ enum btrace_error errcode = linux_disable_btrace (tinfo);
- if (errcode != 0)
- error (_("Could not disable branch tracing: %s."), safe_strerror (errcode));
+ if (errcode != BTRACE_ERR_NONE)
+ error (_("Could not disable branch tracing."));
}
/* Teardown branch tracing. */
diff --git a/gdb/remote.c b/gdb/remote.c
index 5fe1c53..318966b 100644
--- a/gdb/remote.c
+++ b/gdb/remote.c
@@ -11428,13 +11428,14 @@ remote_teardown_btrace (struct btrace_target_info *tinfo)
/* Read the branch trace. */
-static VEC (btrace_block_s) *
-remote_read_btrace (struct btrace_target_info *tinfo,
+static enum btrace_error
+remote_read_btrace (VEC (btrace_block_s) **btrace,
+ struct btrace_target_info *tinfo,
enum btrace_read_type type)
{
struct packet_config *packet = &remote_protocol_packets[PACKET_qXfer_btrace];
struct remote_state *rs = get_remote_state ();
- VEC (btrace_block_s) *btrace = NULL;
+ struct cleanup *cleanup;
const char *annex;
char *xml;
@@ -11453,6 +11454,9 @@ remote_read_btrace (struct btrace_target_info *tinfo,
case BTRACE_READ_NEW:
annex = "new";
break;
+ case BTRACE_READ_DELTA:
+ annex = "delta";
+ break;
default:
internal_error (__FILE__, __LINE__,
_("Bad branch tracing read type: %u."),
@@ -11461,15 +11465,14 @@ remote_read_btrace (struct btrace_target_info *tinfo,
xml = target_read_stralloc (¤t_target,
TARGET_OBJECT_BTRACE, annex);
- if (xml != NULL)
- {
- struct cleanup *cleanup = make_cleanup (xfree, xml);
+ if (xml == NULL)
+ return BTRACE_ERR_UNKNOWN;
- btrace = parse_xml_btrace (xml);
- do_cleanups (cleanup);
- }
+ cleanup = make_cleanup (xfree, xml);
+ *btrace = parse_xml_btrace (xml);
+ do_cleanups (cleanup);
- return btrace;
+ return BTRACE_ERR_NONE;
}
static int
diff --git a/gdb/target.c b/gdb/target.c
index 7b41795c..f6d8a0f 100644
--- a/gdb/target.c
+++ b/gdb/target.c
@@ -4214,18 +4214,19 @@ target_teardown_btrace (struct btrace_target_info *btinfo)
/* See target.h. */
-VEC (btrace_block_s) *
-target_read_btrace (struct btrace_target_info *btinfo,
+enum btrace_error
+target_read_btrace (VEC (btrace_block_s) **btrace,
+ struct btrace_target_info *btinfo,
enum btrace_read_type type)
{
struct target_ops *t;
for (t = current_target.beneath; t != NULL; t = t->beneath)
if (t->to_read_btrace != NULL)
- return t->to_read_btrace (btinfo, type);
+ return t->to_read_btrace (btrace, btinfo, type);
tcomplain ();
- return NULL;
+ return BTRACE_ERR_NOT_SUPPORTED;
}
/* See target.h. */
diff --git a/gdb/target.h b/gdb/target.h
index 97ba1bc..72f5910 100644
--- a/gdb/target.h
+++ b/gdb/target.h
@@ -828,9 +828,13 @@ struct target_ops
be attempting to talk to a remote target. */
void (*to_teardown_btrace) (struct btrace_target_info *tinfo);
- /* Read branch trace data. */
- VEC (btrace_block_s) *(*to_read_btrace) (struct btrace_target_info *,
- enum btrace_read_type);
+ /* Read branch trace data for the thread indicated by BTINFO into DATA.
+ DATA is cleared before new trace is added.
+ The branch trace will start with the most recent block and continue
+ towards older blocks. */
+ enum btrace_error (*to_read_btrace) (VEC (btrace_block_s) **data,
+ struct btrace_target_info *btinfo,
+ enum btrace_read_type type);
/* Stop trace recording. */
void (*to_stop_recording) (void);
@@ -1962,8 +1966,9 @@ extern void target_disable_btrace (struct btrace_target_info *btinfo);
extern void target_teardown_btrace (struct btrace_target_info *btinfo);
/* See to_read_btrace in struct target_ops. */
-extern VEC (btrace_block_s) *target_read_btrace (struct btrace_target_info *,
- enum btrace_read_type);
+extern enum btrace_error target_read_btrace (VEC (btrace_block_s) **,
+ struct btrace_target_info *,
+ enum btrace_read_type);
/* See to_stop_recording in struct target_ops. */
extern void target_stop_recording (void);
--
1.8.3.1