This is the mail archive of the
gdb-patches@sourceware.org
mailing list for the GDB project.
[PATCH] btrace: Store btrace_insn in an std::vector
- From: Simon Marchi <simon dot marchi at ericsson dot com>
- To: <gdb-patches at sourceware dot org>
- Cc: <markus dot t dot metzger at intel dot com>, <tim dot wiederhake at intel dot com>, Simon Marchi <simon dot marchi at ericsson dot com>
- Date: Sun, 3 Sep 2017 11:05:30 +0200
- Subject: [PATCH] btrace: Store btrace_insn in an std::vector
- Authentication-results: sourceware.org; auth=none
- Authentication-results: spf=none (sender IP is ) smtp.mailfrom=simon dot marchi at ericsson dot com;
- Spamdiagnosticmetadata: NSPM
- Spamdiagnosticoutput: 1:99
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
---
gdb/btrace.c | 59 +++++++++++++++++++------------------------
gdb/btrace.h | 6 +----
gdb/python/py-record-btrace.c | 2 +-
gdb/record-btrace.c | 18 ++++---------
4 files changed, 33 insertions(+), 52 deletions(-)
diff --git a/gdb/btrace.c b/gdb/btrace.c
index 2a83e1b..6d2a9c4 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -131,7 +131,7 @@ ftrace_debug (const struct btrace_function *bfun, const char *prefix)
level = bfun->level;
ibegin = bfun->insn_offset;
- iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
+ iend = ibegin + bfun->insn.size ();
DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
prefix, fun, file, level, ibegin, iend);
@@ -149,7 +149,7 @@ ftrace_call_num_insn (const struct btrace_function* bfun)
if (bfun->errcode != 0)
return 1;
- return VEC_length (btrace_insn_s, bfun->insn);
+ return bfun->insn.size ();
}
/* Return the function segment with the given NUMBER or NULL if no such segment
@@ -390,15 +390,13 @@ ftrace_find_call (struct btrace_thread_info *btinfo,
{
for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
{
- struct btrace_insn *last;
-
/* Skip gaps. */
if (bfun->errcode != 0)
continue;
- last = VEC_last (btrace_insn_s, bfun->insn);
+ btrace_insn &last = bfun->insn.back ();
- if (last->iclass == BTRACE_INSN_CALL)
+ if (last.iclass == BTRACE_INSN_CALL)
break;
}
@@ -528,7 +526,7 @@ ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
{
/* We hijack the previous function segment if it was empty. */
bfun = &btinfo->functions.back ();
- if (bfun->errcode != 0 || !VEC_empty (btrace_insn_s, bfun->insn))
+ if (bfun->errcode != 0 || !bfun->insn.empty ())
bfun = ftrace_new_function (btinfo, NULL, NULL);
}
@@ -550,7 +548,6 @@ ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
struct bound_minimal_symbol bmfun;
struct minimal_symbol *mfun;
struct symbol *fun;
- struct btrace_insn *last;
struct btrace_function *bfun;
/* Try to determine the function we're in. We use both types of symbols
@@ -575,9 +572,9 @@ ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
/* Check the last instruction, if we have one.
We do this check first, since it allows us to fill in the call stack
links in addition to the normal flow links. */
- last = NULL;
- if (!VEC_empty (btrace_insn_s, bfun->insn))
- last = VEC_last (btrace_insn_s, bfun->insn);
+ btrace_insn *last = NULL;
+ if (!bfun->insn.empty ())
+ last = &bfun->insn.back ();
if (last != NULL)
{
@@ -648,10 +645,9 @@ ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
/* Add the instruction at PC to BFUN's instructions. */
static void
-ftrace_update_insns (struct btrace_function *bfun,
- const struct btrace_insn *insn)
+ftrace_update_insns (struct btrace_function *bfun, const btrace_insn &insn)
{
- VEC_safe_push (btrace_insn_s, bfun->insn, insn);
+ bfun->insn.push_back (insn);
if (record_debug > 1)
ftrace_debug (bfun, "update insn");
@@ -747,7 +743,7 @@ ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
really part of the trace. If it contains just this one instruction, we
ignore the segment. */
struct btrace_function *last = &btinfo->functions.back();
- if (VEC_length (btrace_insn_s, last->insn) != 1)
+ if (last->insn.size () != 1)
level = std::min (level, last->level);
DEBUG_FTRACE ("setting global level offset: %d", -level);
@@ -1104,7 +1100,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
insn.iclass = ftrace_classify_insn (gdbarch, pc);
insn.flags = 0;
- ftrace_update_insns (bfun, &insn);
+ ftrace_update_insns (bfun, insn);
/* We're done once we pushed the instruction at the end. */
if (block->end == pc)
@@ -1682,7 +1678,6 @@ btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
{
struct btrace_thread_info *btinfo;
struct btrace_function *last_bfun;
- struct btrace_insn *last_insn;
btrace_block_s *first_new_block;
btinfo = &tp->btrace;
@@ -1694,7 +1689,7 @@ btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
/* If the existing trace ends with a gap, we just glue the traces
together. We need to drop the last (i.e. chronologically first) block
of the new trace, though, since we can't fill in the start address.*/
- if (VEC_empty (btrace_insn_s, last_bfun->insn))
+ if (last_bfun->insn.empty ())
{
VEC_pop (btrace_block_s, btrace->blocks);
return 0;
@@ -1704,7 +1699,7 @@ btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
chronologically first block in the new trace is the last block in
the new trace's block vector. */
first_new_block = VEC_last (btrace_block_s, btrace->blocks);
- last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
+ const btrace_insn &last_insn = last_bfun->insn.back ();
/* If the current PC at the end of the block is the same as in our current
trace, there are two explanations:
@@ -1714,19 +1709,19 @@ btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
entries.
In the second case, the delta trace vector should contain exactly one
entry for the partial block containing the current PC. Remove it. */
- if (first_new_block->end == last_insn->pc
+ if (first_new_block->end == last_insn.pc
&& VEC_length (btrace_block_s, btrace->blocks) == 1)
{
VEC_pop (btrace_block_s, btrace->blocks);
return 0;
}
- DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
+ DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn),
core_addr_to_string_nz (first_new_block->end));
/* Do a simple sanity check to make sure we don't accidentally end up
with a bad block. This should not occur in practice. */
- if (first_new_block->end < last_insn->pc)
+ if (first_new_block->end < last_insn.pc)
{
warning (_("Error while trying to read delta trace. Falling back to "
"a full read."));
@@ -1735,16 +1730,16 @@ btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
/* We adjust the last block to start at the end of our current trace. */
gdb_assert (first_new_block->begin == 0);
- first_new_block->begin = last_insn->pc;
+ first_new_block->begin = last_insn.pc;
/* We simply pop the last insn so we can insert it again as part of
the normal branch trace computation.
Since instruction iterators are based on indices in the instructions
vector, we don't leave any pointers dangling. */
DEBUG ("pruning insn at %s for stitching",
- ftrace_print_insn_addr (last_insn));
+ ftrace_print_insn_addr (&last_insn));
- VEC_pop (btrace_insn_s, last_bfun->insn);
+ last_bfun->insn.pop_back ();
/* The instructions vector may become empty temporarily if this has
been the only instruction in this function segment.
@@ -1755,7 +1750,7 @@ btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
of just that one instruction. If we remove it, we might turn the now
empty btrace function segment into a gap. But we don't want gaps at
the beginning. To avoid this, we remove the entire old trace. */
- if (last_bfun->number == 1 && VEC_empty (btrace_insn_s, last_bfun->insn))
+ if (last_bfun->number == 1 && last_bfun->insn.empty ())
btrace_clear (tp);
return 0;
@@ -1980,8 +1975,6 @@ btrace_clear (struct thread_info *tp)
reinit_frame_cache ();
btinfo = &tp->btrace;
- for (auto &bfun : btinfo->functions)
- VEC_free (btrace_insn_s, bfun.insn);
btinfo->functions.clear ();
btinfo->ngaps = 0;
@@ -2337,11 +2330,11 @@ btrace_insn_get (const struct btrace_insn_iterator *it)
return NULL;
/* The index is within the bounds of this function's instruction vector. */
- end = VEC_length (btrace_insn_s, bfun->insn);
+ end = bfun->insn.size ();
gdb_assert (0 < end);
gdb_assert (index < end);
- return VEC_index (btrace_insn_s, bfun->insn, index);
+ return &bfun->insn[index];
}
/* See btrace.h. */
@@ -2387,7 +2380,7 @@ btrace_insn_end (struct btrace_insn_iterator *it,
error (_("No trace."));
bfun = &btinfo->functions.back ();
- length = VEC_length (btrace_insn_s, bfun->insn);
+ length = bfun->insn.size ();
/* The last function may either be a gap or it contains the current
instruction, which is one past the end of the execution trace; ignore
@@ -2416,7 +2409,7 @@ btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
{
unsigned int end, space, adv;
- end = VEC_length (btrace_insn_s, bfun->insn);
+ end = bfun->insn.size ();
/* An empty function segment represents a gap in the trace. We count
it as one instruction. */
@@ -2509,7 +2502,7 @@ btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
/* We point to one after the last instruction in the new function. */
bfun = prev;
- index = VEC_length (btrace_insn_s, bfun->insn);
+ index = bfun->insn.size ();
/* An empty function segment represents a gap in the trace. We count
it as one instruction. */
diff --git a/gdb/btrace.h b/gdb/btrace.h
index 9fde919..df6e895 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -81,10 +81,6 @@ struct btrace_insn
btrace_insn_flags flags;
};
-/* A vector of branch trace instructions. */
-typedef struct btrace_insn btrace_insn_s;
-DEF_VEC_O (btrace_insn_s);
-
/* Flags for btrace function segments. */
enum btrace_function_flag
{
@@ -161,7 +157,7 @@ struct btrace_function
/* The instructions in this function segment.
The instruction vector will be empty if the function segment
represents a decode error. */
- VEC (btrace_insn_s) *insn = NULL;
+ std::vector<btrace_insn> insn;
/* The error code of a decode error that led to a gap.
Must be zero unless INSN is empty; non-zero otherwise. */
diff --git a/gdb/python/py-record-btrace.c b/gdb/python/py-record-btrace.c
index cd2be9f..220990b 100644
--- a/gdb/python/py-record-btrace.c
+++ b/gdb/python/py-record-btrace.c
@@ -376,7 +376,7 @@ recpy_bt_func_instructions (PyObject *self, void *closure)
if (func == NULL)
return NULL;
- len = VEC_length (btrace_insn_s, func->insn);
+ len = func->insn.size ();
/* Gaps count as one instruction. */
if (len == 0)
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index 6ac8573..d35800b 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -891,7 +891,7 @@ btrace_call_history_insn_range (struct ui_out *uiout,
{
unsigned int begin, end, size;
- size = VEC_length (btrace_insn_s, bfun->insn);
+ size = bfun->insn.size ();
gdb_assert (size > 0);
begin = bfun->insn_offset;
@@ -911,10 +911,8 @@ static void
btrace_compute_src_line_range (const struct btrace_function *bfun,
int *pbegin, int *pend)
{
- struct btrace_insn *insn;
struct symtab *symtab;
struct symbol *sym;
- unsigned int idx;
int begin, end;
begin = INT_MAX;
@@ -926,11 +924,11 @@ btrace_compute_src_line_range (const struct btrace_function *bfun,
symtab = symbol_symtab (sym);
- for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
+ for (const btrace_insn &insn : bfun->insn)
{
struct symtab_and_line sal;
- sal = find_pc_line (insn->pc, 0);
+ sal = find_pc_line (insn.pc, 0);
if (sal.symtab != symtab || sal.line == 0)
continue;
@@ -1615,7 +1613,6 @@ record_btrace_frame_prev_register (struct frame_info *this_frame,
{
const struct btrace_frame_cache *cache;
const struct btrace_function *bfun, *caller;
- const struct btrace_insn *insn;
struct btrace_call_iterator it;
struct gdbarch *gdbarch;
CORE_ADDR pc;
@@ -1638,15 +1635,10 @@ record_btrace_frame_prev_register (struct frame_info *this_frame,
caller = btrace_call_get (&it);
if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
- {
- insn = VEC_index (btrace_insn_s, caller->insn, 0);
- pc = insn->pc;
- }
+ pc = caller->insn.front ().pc;
else
{
- insn = VEC_last (btrace_insn_s, caller->insn);
- pc = insn->pc;
-
+ pc = caller->insn.back ().pc;
pc += gdb_insn_length (gdbarch, pc);
}
--
2.7.4