This is the mail archive of the
gdb-patches@sourceware.org
mailing list for the GDB project.
[PATCH v3 07/12] btrace: Remove struct btrace_thread_info::{begin,end}.
- From: Tim Wiederhake <tim dot wiederhake at intel dot com>
- To: gdb-patches at sourceware dot org
- Cc: markus dot t dot metzger at intel dot com
- Date: Tue, 9 May 2017 08:55:24 +0200
- Subject: [PATCH v3 07/12] btrace: Remove struct btrace_thread_info::{begin,end}.
- Authentication-results: sourceware.org; auth=none
- References: <1494312929-22749-1-git-send-email-tim.wiederhake@intel.com>
These are no longer needed and might hold invalid addresses once we change the
vector of function segment pointers into a vector of function segment objects
where a reallocation of the vector changes the address of its elements.
2017-05-09 Tim Wiederhake <tim.wiederhake@intel.com>
gdb/ChangeLog:
* btrace.c (ftrace_new_function, ftrace_new_call, ftrace_new_tailcall,
ftrace_new_return, ftrace_new_switch, ftrace_new_gap,
ftrace_update_function, ftrace_compute_global_level_offset,
btrace_compute_ftrace_bts, ftrace_add_pt, btrace_compute_ftrace_pt,
btrace_stitch_bts, btrace_fetch, btrace_clear, btrace_insn_number,
btrace_insn_end, btrace_is_empty): Remove references to
btrace_thread_info::begin and btrace_thread_info::end.
* btrace.h (struct btrace_thread_info): Remove BEGIN and END.
* record-btrace.c (record_btrace_start_replaying): Remove reference to
btrace_thread_info::begin.
---
gdb/btrace.c | 194 +++++++++++++++++++++++++---------------------------
gdb/btrace.h | 9 ---
gdb/record-btrace.c | 2 +-
3 files changed, 94 insertions(+), 111 deletions(-)
diff --git a/gdb/btrace.c b/gdb/btrace.c
index 1bd11f0..1e45c09 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -212,16 +212,14 @@ ftrace_new_function (struct btrace_thread_info *btinfo,
struct minimal_symbol *mfun,
struct symbol *fun)
{
- struct btrace_function *bfun, *prev;
+ struct btrace_function *bfun;
- prev = btinfo->end;
bfun = XCNEW (struct btrace_function);
bfun->msym = mfun;
bfun->sym = fun;
- bfun->flow.prev = prev;
- if (prev == NULL)
+ if (btinfo->functions.empty ())
{
/* Start counting at one. */
bfun->number = 1;
@@ -229,8 +227,11 @@ ftrace_new_function (struct btrace_thread_info *btinfo,
}
else
{
+ struct btrace_function *prev = btinfo->functions.back ();
+
gdb_assert (prev->flow.next == NULL);
prev->flow.next = bfun;
+ bfun->flow.prev = prev;
bfun->number = prev->number + 1;
bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
@@ -238,7 +239,7 @@ ftrace_new_function (struct btrace_thread_info *btinfo,
}
btinfo->functions.push_back (bfun);
- return btinfo->end = bfun;
+ return bfun;
}
/* Update the UP field of a function segment. */
@@ -286,10 +287,11 @@ ftrace_new_call (struct btrace_thread_info *btinfo,
struct minimal_symbol *mfun,
struct symbol *fun)
{
- struct btrace_function *caller = btinfo->end;
+ const unsigned int length = btinfo->functions.size ();
struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
- bfun->up = caller;
+ if (length != 0)
+ bfun->up = btinfo->functions[length - 1];
bfun->level += 1;
ftrace_debug (bfun, "new call");
@@ -306,10 +308,11 @@ ftrace_new_tailcall (struct btrace_thread_info *btinfo,
struct minimal_symbol *mfun,
struct symbol *fun)
{
- struct btrace_function *caller = btinfo->end;
+ const unsigned int length = btinfo->functions.size ();
struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
- bfun->up = caller;
+ if (length != 0)
+ bfun->up = btinfo->functions[length - 1];
bfun->level += 1;
bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
@@ -385,7 +388,7 @@ ftrace_new_return (struct btrace_thread_info *btinfo,
struct minimal_symbol *mfun,
struct symbol *fun)
{
- struct btrace_function *prev = btinfo->end;
+ struct btrace_function *prev = btinfo->functions.back ();
struct btrace_function *bfun, *caller;
bfun = ftrace_new_function (btinfo, mfun, fun);
@@ -465,7 +468,7 @@ ftrace_new_switch (struct btrace_thread_info *btinfo,
struct minimal_symbol *mfun,
struct symbol *fun)
{
- struct btrace_function *prev = btinfo->end;
+ struct btrace_function *prev = btinfo->functions.back ();
struct btrace_function *bfun;
/* This is an unexplained function switch. We can't really be sure about the
@@ -487,15 +490,17 @@ ftrace_new_switch (struct btrace_thread_info *btinfo,
static struct btrace_function *
ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode)
{
- struct btrace_function *prev = btinfo->end;
struct btrace_function *bfun;
- /* We hijack prev if it was empty. */
- if (prev != NULL && prev->errcode == 0
- && VEC_empty (btrace_insn_s, prev->insn))
- bfun = prev;
- else
+ if (btinfo->functions.empty ())
bfun = ftrace_new_function (btinfo, NULL, NULL);
+ else
+ {
+ /* We hijack the previous function segment if it was empty. */
+ bfun = btinfo->functions.back ();
+ if (bfun->errcode != 0 || !VEC_empty (btrace_insn_s, bfun->insn))
+ bfun = ftrace_new_function (btinfo, NULL, NULL);
+ }
bfun->errcode = errcode;
@@ -516,7 +521,7 @@ ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
struct minimal_symbol *mfun;
struct symbol *fun;
struct btrace_insn *last;
- struct btrace_function *bfun = btinfo->end;
+ struct btrace_function *bfun;
/* Try to determine the function we're in. We use both types of symbols
to avoid surprises when we sometimes get a full symbol and sometimes
@@ -528,8 +533,13 @@ ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
if (fun == NULL && mfun == NULL)
DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
- /* If we didn't have a function or if we had a gap before, we create one. */
- if (bfun == NULL || bfun->errcode != 0)
+ /* If we didn't have a function, we create one. */
+ if (btinfo->functions.empty ())
+ return ftrace_new_function (btinfo, mfun, fun);
+
+ /* If we had a gap before, we create a function. */
+ bfun = btinfo->functions.back ();
+ if (bfun->errcode != 0)
return ftrace_new_function (btinfo, mfun, fun);
/* Check the last instruction, if we have one.
@@ -685,26 +695,27 @@ ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
static void
ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
{
- struct btrace_function *bfun, *end;
+ unsigned int i, length;
int level;
if (btinfo == NULL)
return;
- bfun = btinfo->begin;
- if (bfun == NULL)
+ if (btinfo->functions.empty ())
return;
- /* The last function segment contains the current instruction, which is not
- really part of the trace. If it contains just this one instruction, we
- stop when we reach it; otherwise, we let the below loop run to the end. */
- end = btinfo->end;
- if (VEC_length (btrace_insn_s, end->insn) > 1)
- end = NULL;
-
level = INT_MAX;
- for (; bfun != end; bfun = bfun->flow.next)
- level = std::min (level, bfun->level);
+ length = btinfo->functions.size ();
+ for (const auto &bfun: btinfo->functions)
+ {
+ /* The last function segment contains the current instruction, which is
+ not really part of the trace. If it contains just this one
+ instruction, we ignore the segment. */
+ if (bfun->number == length && VEC_length (btrace_insn_s, bfun->insn) == 1)
+ continue;
+
+ level = std::min (level, bfun->level);
+ }
DEBUG_FTRACE ("setting global level offset: %d", -level);
btinfo->level = -level;
@@ -986,18 +997,19 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
VEC (bfun_s) **gaps)
{
struct btrace_thread_info *btinfo;
- struct btrace_function *begin, *end;
struct gdbarch *gdbarch;
unsigned int blk;
int level;
gdbarch = target_gdbarch ();
btinfo = &tp->btrace;
- begin = btinfo->begin;
- end = btinfo->end;
- level = begin != NULL ? -btinfo->level : INT_MAX;
blk = VEC_length (btrace_block_s, btrace->blocks);
+ if (btinfo->functions.empty ())
+ level = INT_MAX;
+ else
+ level = -btinfo->level;
+
while (blk != 0)
{
btrace_block_s *block;
@@ -1010,6 +1022,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
for (;;)
{
+ struct btrace_function *bfun;
struct btrace_insn insn;
int size;
@@ -1017,27 +1030,23 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
if (block->end < pc)
{
/* Indicate the gap in the trace. */
- end = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW);
- if (begin == NULL)
- begin = end;
+ bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW);
- VEC_safe_push (bfun_s, *gaps, end);
+ VEC_safe_push (bfun_s, *gaps, bfun);
warning (_("Recorded trace may be corrupted at instruction "
- "%u (pc = %s)."), end->insn_offset - 1,
+ "%u (pc = %s)."), bfun->insn_offset - 1,
core_addr_to_string_nz (pc));
break;
}
- end = ftrace_update_function (btinfo, pc);
- if (begin == NULL)
- begin = end;
+ bfun = ftrace_update_function (btinfo, pc);
/* Maintain the function level offset.
For all but the last block, we do it here. */
if (blk != 0)
- level = std::min (level, end->level);
+ level = std::min (level, bfun->level);
size = 0;
TRY
@@ -1054,7 +1063,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
insn.iclass = ftrace_classify_insn (gdbarch, pc);
insn.flags = 0;
- ftrace_update_insns (end, &insn);
+ ftrace_update_insns (bfun, &insn);
/* We're done once we pushed the instruction at the end. */
if (block->end == pc)
@@ -1065,12 +1074,12 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
{
/* Indicate the gap in the trace. We just added INSN so we're
not at the beginning. */
- end = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE);
+ bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE);
- VEC_safe_push (bfun_s, *gaps, end);
+ VEC_safe_push (bfun_s, *gaps, bfun);
warning (_("Recorded trace may be incomplete at instruction %u "
- "(pc = %s)."), end->insn_offset - 1,
+ "(pc = %s)."), bfun->insn_offset - 1,
core_addr_to_string_nz (pc));
break;
@@ -1085,13 +1094,10 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
and is not really part of the execution history, it shouldn't
affect the level. */
if (blk == 0)
- level = std::min (level, end->level);
+ level = std::min (level, bfun->level);
}
}
- btinfo->begin = begin;
- btinfo->end = end;
-
/* LEVEL is the minimal function level of all btrace function segments.
Define the global level offset to -LEVEL so all function levels are
normalized to start at zero. */
@@ -1148,16 +1154,13 @@ pt_btrace_insn (const struct pt_insn &insn)
static void
ftrace_add_pt (struct btrace_thread_info *btinfo,
struct pt_insn_decoder *decoder,
- struct btrace_function **pbegin,
- struct btrace_function **pend, int *plevel,
+ int *plevel,
VEC (bfun_s) **gaps)
{
- struct btrace_function *begin, *end, *upd;
+ struct btrace_function *bfun;
uint64_t offset;
int errcode;
- begin = *pbegin;
- end = *pend;
for (;;)
{
struct pt_insn insn;
@@ -1178,7 +1181,7 @@ ftrace_add_pt (struct btrace_thread_info *btinfo,
break;
/* Look for gaps in the trace - unless we're at the beginning. */
- if (begin != NULL)
+ if (!btinfo->functions.empty ())
{
/* Tracing is disabled and re-enabled each time we enter the
kernel. Most times, we continue from the same instruction we
@@ -1187,64 +1190,53 @@ ftrace_add_pt (struct btrace_thread_info *btinfo,
from some other instruction. Indicate this as a trace gap. */
if (insn.enabled)
{
- *pend = end = ftrace_new_gap (btinfo, BDE_PT_DISABLED);
+ bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED);
- VEC_safe_push (bfun_s, *gaps, end);
+ VEC_safe_push (bfun_s, *gaps, bfun);
pt_insn_get_offset (decoder, &offset);
warning (_("Non-contiguous trace at instruction %u (offset "
"= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
- end->insn_offset - 1, offset, insn.ip);
+ bfun->insn_offset - 1, offset, insn.ip);
}
}
/* Indicate trace overflows. */
if (insn.resynced)
{
- *pend = end = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW);
- if (begin == NULL)
- *pbegin = begin = end;
+ bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW);
- VEC_safe_push (bfun_s, *gaps, end);
+ VEC_safe_push (bfun_s, *gaps, bfun);
pt_insn_get_offset (decoder, &offset);
warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
- ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
+ ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1,
offset, insn.ip);
}
- upd = ftrace_update_function (btinfo, insn.ip);
- if (upd != end)
- {
- *pend = end = upd;
-
- if (begin == NULL)
- *pbegin = begin = upd;
- }
+ bfun = ftrace_update_function (btinfo, insn.ip);
/* Maintain the function level offset. */
- *plevel = std::min (*plevel, end->level);
+ *plevel = std::min (*plevel, bfun->level);
btrace_insn btinsn = pt_btrace_insn (insn);
- ftrace_update_insns (end, &btinsn);
+ ftrace_update_insns (bfun, &btinsn);
}
if (errcode == -pte_eos)
break;
/* Indicate the gap in the trace. */
- *pend = end = ftrace_new_gap (btinfo, errcode);
- if (begin == NULL)
- *pbegin = begin = end;
+ bfun = ftrace_new_gap (btinfo, errcode);
- VEC_safe_push (bfun_s, *gaps, end);
+ VEC_safe_push (bfun_s, *gaps, bfun);
pt_insn_get_offset (decoder, &offset);
warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
- ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
+ ", pc = 0x%" PRIx64 "): %s."), errcode, bfun->insn_offset - 1,
offset, insn.ip, pt_errstr (pt_errcode (errcode)));
}
}
@@ -1326,7 +1318,10 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
return;
btinfo = &tp->btrace;
- level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
+ if (btinfo->functions.empty ())
+ level = INT_MAX;
+ else
+ level = -btinfo->level;
pt_config_init(&config);
config.begin = btrace->data;
@@ -1359,17 +1354,18 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
error (_("Failed to configure the Intel Processor Trace decoder: "
"%s."), pt_errstr (pt_errcode (errcode)));
- ftrace_add_pt (btinfo, decoder, &btinfo->begin, &btinfo->end, &level,
- gaps);
+ ftrace_add_pt (btinfo, decoder, &level, gaps);
}
CATCH (error, RETURN_MASK_ALL)
{
/* Indicate a gap in the trace if we quit trace processing. */
- if (error.reason == RETURN_QUIT && btinfo->end != NULL)
+ if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
{
- btinfo->end = ftrace_new_gap (btinfo, BDE_PT_USER_QUIT);
+ struct btrace_function *bfun;
+
+ bfun = ftrace_new_gap (btinfo, BDE_PT_USER_QUIT);
- VEC_safe_push (bfun_s, *gaps, btinfo->end);
+ VEC_safe_push (bfun_s, *gaps, bfun);
}
btrace_finalize_ftrace_pt (decoder, tp, level);
@@ -1596,10 +1592,11 @@ btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
btrace_block_s *first_new_block;
btinfo = &tp->btrace;
- last_bfun = btinfo->end;
- gdb_assert (last_bfun != NULL);
+ gdb_assert (!btinfo->functions.empty ());
gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
+ last_bfun = btinfo->functions.back ();
+
/* If the existing trace ends with a gap, we just glue the traces
together. We need to drop the last (i.e. chronologically first) block
of the new trace, though, since we can't fill in the start address.*/
@@ -1664,7 +1661,7 @@ btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
of just that one instruction. If we remove it, we might turn the now
empty btrace function segment into a gap. But we don't want gaps at
the beginning. To avoid this, we remove the entire old trace. */
- if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
+ if (last_bfun->number == 1 && VEC_empty (btrace_insn_s, last_bfun->insn))
btrace_clear (tp);
return 0;
@@ -1827,7 +1824,7 @@ btrace_fetch (struct thread_info *tp)
make_cleanup_btrace_data (&btrace);
/* Let's first try to extend the trace we already have. */
- if (btinfo->end != NULL)
+ if (!btinfo->functions.empty ())
{
errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
if (errcode == 0)
@@ -1896,8 +1893,6 @@ btrace_clear (struct thread_info *tp)
}
btinfo->functions.clear ();
- btinfo->begin = NULL;
- btinfo->end = NULL;
btinfo->ngaps = 0;
/* Must clear the maint data before - it depends on BTINFO->DATA. */
@@ -2286,10 +2281,7 @@ void
btrace_insn_begin (struct btrace_insn_iterator *it,
const struct btrace_thread_info *btinfo)
{
- const struct btrace_function *bfun;
-
- bfun = btinfo->begin;
- if (bfun == NULL)
+ if (btinfo->functions.empty ())
error (_("No trace."));
it->btinfo = btinfo;
@@ -2306,10 +2298,10 @@ btrace_insn_end (struct btrace_insn_iterator *it,
const struct btrace_function *bfun;
unsigned int length;
- bfun = btinfo->end;
- if (bfun == NULL)
+ if (btinfo->functions.empty ())
error (_("No trace."));
+ bfun = btinfo->functions.back ();
length = VEC_length (btrace_insn_s, bfun->insn);
/* The last function may either be a gap or it contains the current
@@ -2752,7 +2744,7 @@ btrace_is_empty (struct thread_info *tp)
btinfo = &tp->btrace;
- if (btinfo->begin == NULL)
+ if (btinfo->functions.empty ())
return 1;
btrace_insn_begin (&begin, btinfo);
diff --git a/gdb/btrace.h b/gdb/btrace.h
index ca79667..b3dbe82 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -331,15 +331,6 @@ struct btrace_thread_info
/* The raw branch trace data for the below branch trace. */
struct btrace_data data;
- /* The current branch trace for this thread (both inclusive).
-
- The last instruction of END is the current instruction, which is not
- part of the execution history.
- Both will be NULL if there is no branch trace available. If there is
- branch trace available, both will be non-NULL. */
- struct btrace_function *begin;
- struct btrace_function *end;
-
/* Vector of pointer to decoded function segments. These are in execution
order with the first element == BEGIN and the last element == END. */
std::vector<btrace_function *> functions;
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index ec940f6..5c230e7 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1908,7 +1908,7 @@ record_btrace_start_replaying (struct thread_info *tp)
replay = NULL;
/* We can't start replaying without trace. */
- if (btinfo->begin == NULL)
+ if (btinfo->functions.empty ())
return NULL;
/* GDB stores the current frame_id when stepping in order to detects steps
--
2.7.4