aboutsummaryrefslogtreecommitdiff
path: root/gdb
diff options
context:
space:
mode:
authorTim Wiederhake <tim.wiederhake@intel.com>2017-05-30 12:47:37 +0200
committerTim Wiederhake <tim.wiederhake@intel.com>2017-05-30 12:49:25 +0200
commit08c3f6d234761d92b50d3c56d3cb4b82fdd58c77 (patch)
treef75f360cd18d35994ef8d12c75c00b86a1ad8972 /gdb
parentbtrace: Remove bfun_s vector. (diff)
downloadbinutils-gdb-08c3f6d234761d92b50d3c56d3cb4b82fdd58c77.tar.gz
binutils-gdb-08c3f6d234761d92b50d3c56d3cb4b82fdd58c77.tar.bz2
binutils-gdb-08c3f6d234761d92b50d3c56d3cb4b82fdd58c77.zip
btrace: Store function segments as objects.
Diffstat (limited to 'gdb')
-rw-r--r--gdb/ChangeLog18
-rw-r--r--gdb/btrace.c96
-rw-r--r--gdb/btrace.h23
-rw-r--r--gdb/record-btrace.c2
4 files changed, 82 insertions, 57 deletions
diff --git a/gdb/ChangeLog b/gdb/ChangeLog
index 08795f50a48..f46a2837f49 100644
--- a/gdb/ChangeLog
+++ b/gdb/ChangeLog
@@ -1,5 +1,23 @@
2017-05-30 Tim Wiederhake <tim.wiederhake@intel.com>
+ * btrace.c (ftrace_find_call_by_number): New function.
+ (ftrace_new_function): Store objects, not pointers.
+ (ftrace_find_call_by_number, ftrace_new_return, ftrace_new_switch,
+ ftrace_new_gap, ftrace_update_function,
+ ftrace_compute_global_level_offset, btrace_stich_bts, btrace_clear,
+ btrace_insn_get, btrace_insn_get_error, btrace_insn_end,
+ btrace_insn_next, btrace_insn_prev, ptrace_find_insn_by_number,
+ btrace_ends_with_single_insn, btrace_call_get): Account for
+ btrace_thread_info::functions now storing objects.
+ * btrace.h (struct btrace_thread_info): Add constructor.
+ (struct btrace_thread_info) <functions>: Make std::vector.
+ (struct btrace_thread_info) <prev, next, up, insn, errcode, flags):
+ Initialize with default values.
+ * record-btrace.c (record_btrace_frame_sniffer): Account for
+ btrace_thread_info::functions now storing objects.
+
+2017-05-30 Tim Wiederhake <tim.wiederhake@intel.com>
+
* btrace.c: Remove typedef bfun_s.
(ftrace_new_gap): Directly add gaps to the list of gaps.
(btrace_bridge_gaps, btrace_compute_ftrace_bts, pt_btrace_insn_flags,
diff --git a/gdb/btrace.c b/gdb/btrace.c
index ebac069a668..34572b0787d 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -156,13 +156,25 @@ ftrace_call_num_insn (const struct btrace_function* bfun)
exists. BTINFO is the branch trace information for the current thread. */
static struct btrace_function *
+ftrace_find_call_by_number (struct btrace_thread_info *btinfo,
+ unsigned int number)
+{
+ if (number == 0 || number > btinfo->functions.size ())
+ return NULL;
+
+ return &btinfo->functions[number - 1];
+}
+
+/* A const version of the function above. */
+
+static const struct btrace_function *
ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
unsigned int number)
{
if (number == 0 || number > btinfo->functions.size ())
return NULL;
- return btinfo->functions[number - 1];
+ return &btinfo->functions[number - 1];
}
/* Return non-zero if BFUN does not match MFUN and FUN,
@@ -214,37 +226,34 @@ ftrace_function_switched (const struct btrace_function *bfun,
/* Allocate and initialize a new branch trace function segment at the end of
the trace.
BTINFO is the branch trace information for the current thread.
- MFUN and FUN are the symbol information we have for this function. */
+ MFUN and FUN are the symbol information we have for this function.
+ This invalidates all struct btrace_function pointer currently held. */
static struct btrace_function *
ftrace_new_function (struct btrace_thread_info *btinfo,
struct minimal_symbol *mfun,
struct symbol *fun)
{
- struct btrace_function *bfun;
-
- bfun = XCNEW (struct btrace_function);
-
- bfun->msym = mfun;
- bfun->sym = fun;
+ int level;
+ unsigned int number, insn_offset;
if (btinfo->functions.empty ())
{
- /* Start counting at one. */
- bfun->number = 1;
- bfun->insn_offset = 1;
+ /* Start counting NUMBER and INSN_OFFSET at one. */
+ level = 0;
+ number = 1;
+ insn_offset = 1;
}
else
{
- struct btrace_function *prev = btinfo->functions.back ();
-
- bfun->number = prev->number + 1;
- bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
- bfun->level = prev->level;
+ const struct btrace_function *prev = &btinfo->functions.back ();
+ level = prev->level;
+ number = prev->number + 1;
+ insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
}
- btinfo->functions.push_back (bfun);
- return bfun;
+ btinfo->functions.emplace_back (mfun, fun, number, insn_offset, level);
+ return &btinfo->functions.back ();
}
/* Update the UP field of a function segment. */
@@ -406,10 +415,10 @@ ftrace_new_return (struct btrace_thread_info *btinfo,
struct minimal_symbol *mfun,
struct symbol *fun)
{
- struct btrace_function *prev = btinfo->functions.back ();
- struct btrace_function *bfun, *caller;
+ struct btrace_function *prev, *bfun, *caller;
bfun = ftrace_new_function (btinfo, mfun, fun);
+ prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
/* It is important to start at PREV's caller. Otherwise, we might find
PREV itself, if PREV is a recursive function. */
@@ -488,12 +497,12 @@ ftrace_new_switch (struct btrace_thread_info *btinfo,
struct minimal_symbol *mfun,
struct symbol *fun)
{
- struct btrace_function *prev = btinfo->functions.back ();
- struct btrace_function *bfun;
+ struct btrace_function *prev, *bfun;
/* This is an unexplained function switch. We can't really be sure about the
call stack, yet the best I can think of right now is to preserve it. */
bfun = ftrace_new_function (btinfo, mfun, fun);
+ prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
bfun->up = prev->up;
bfun->flags = prev->flags;
@@ -518,7 +527,7 @@ ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
else
{
/* We hijack the previous function segment if it was empty. */
- bfun = btinfo->functions.back ();
+ bfun = &btinfo->functions.back ();
if (bfun->errcode != 0 || !VEC_empty (btrace_insn_s, bfun->insn))
bfun = ftrace_new_function (btinfo, NULL, NULL);
}
@@ -559,7 +568,7 @@ ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
return ftrace_new_function (btinfo, mfun, fun);
/* If we had a gap before, we create a function. */
- bfun = btinfo->functions.back ();
+ bfun = &btinfo->functions.back ();
if (bfun->errcode != 0)
return ftrace_new_function (btinfo, mfun, fun);
@@ -732,12 +741,12 @@ ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
unsigned int length = btinfo->functions.size() - 1;
for (unsigned int i = 0; i < length; ++i)
- level = std::min (level, btinfo->functions[i]->level);
+ level = std::min (level, btinfo->functions[i].level);
/* The last function segment contains the current instruction, which is not
really part of the trace. If it contains just this one instruction, we
ignore the segment. */
- struct btrace_function *last = btinfo->functions.back();
+ struct btrace_function *last = &btinfo->functions.back();
if (VEC_length (btrace_insn_s, last->insn) != 1)
level = std::min (level, last->level);
@@ -1607,7 +1616,7 @@ btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
gdb_assert (!btinfo->functions.empty ());
gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
- last_bfun = btinfo->functions.back ();
+ last_bfun = &btinfo->functions.back ();
/* If the existing trace ends with a gap, we just glue the traces
together. We need to drop the last (i.e. chronologically first) block
@@ -1899,10 +1908,7 @@ btrace_clear (struct thread_info *tp)
btinfo = &tp->btrace;
for (auto &bfun : btinfo->functions)
- {
- VEC_free (btrace_insn_s, bfun->insn);
- xfree (bfun);
- }
+ VEC_free (btrace_insn_s, bfun.insn);
btinfo->functions.clear ();
btinfo->ngaps = 0;
@@ -2251,7 +2257,7 @@ btrace_insn_get (const struct btrace_insn_iterator *it)
unsigned int index, end;
index = it->insn_index;
- bfun = it->btinfo->functions[it->call_index];
+ bfun = &it->btinfo->functions[it->call_index];
/* Check if the iterator points to a gap in the trace. */
if (bfun->errcode != 0)
@@ -2270,10 +2276,7 @@ btrace_insn_get (const struct btrace_insn_iterator *it)
int
btrace_insn_get_error (const struct btrace_insn_iterator *it)
{
- const struct btrace_function *bfun;
-
- bfun = it->btinfo->functions[it->call_index];
- return bfun->errcode;
+ return it->btinfo->functions[it->call_index].errcode;
}
/* See btrace.h. */
@@ -2281,10 +2284,7 @@ btrace_insn_get_error (const struct btrace_insn_iterator *it)
unsigned int
btrace_insn_number (const struct btrace_insn_iterator *it)
{
- const struct btrace_function *bfun;
-
- bfun = it->btinfo->functions[it->call_index];
- return bfun->insn_offset + it->insn_index;
+ return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
}
/* See btrace.h. */
@@ -2313,7 +2313,7 @@ btrace_insn_end (struct btrace_insn_iterator *it,
if (btinfo->functions.empty ())
error (_("No trace."));
- bfun = btinfo->functions.back ();
+ bfun = &btinfo->functions.back ();
length = VEC_length (btrace_insn_s, bfun->insn);
/* The last function may either be a gap or it contains the current
@@ -2335,7 +2335,7 @@ btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
const struct btrace_function *bfun;
unsigned int index, steps;
- bfun = it->btinfo->functions[it->call_index];
+ bfun = &it->btinfo->functions[it->call_index];
steps = 0;
index = it->insn_index;
@@ -2417,7 +2417,7 @@ btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
const struct btrace_function *bfun;
unsigned int index, steps;
- bfun = it->btinfo->functions[it->call_index];
+ bfun = &it->btinfo->functions[it->call_index];
steps = 0;
index = it->insn_index;
@@ -2495,12 +2495,12 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it,
return 0;
lower = 0;
- bfun = btinfo->functions[lower];
+ bfun = &btinfo->functions[lower];
if (number < bfun->insn_offset)
return 0;
upper = btinfo->functions.size () - 1;
- bfun = btinfo->functions[upper];
+ bfun = &btinfo->functions[upper];
if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
return 0;
@@ -2509,7 +2509,7 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it,
{
const unsigned int average = lower + (upper - lower) / 2;
- bfun = btinfo->functions[average];
+ bfun = &btinfo->functions[average];
if (number < bfun->insn_offset)
{
@@ -2543,7 +2543,7 @@ btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
if (btinfo->functions.empty ())
return false;
- bfun = btinfo->functions.back ();
+ bfun = &btinfo->functions.back ();
if (bfun->errcode != 0)
return false;
@@ -2558,7 +2558,7 @@ btrace_call_get (const struct btrace_call_iterator *it)
if (it->index >= it->btinfo->functions.size ())
return NULL;
- return it->btinfo->functions[it->index];
+ return &it->btinfo->functions[it->index];
}
/* See btrace.h. */
diff --git a/gdb/btrace.h b/gdb/btrace.h
index ed5fe914b4b..9fde919b063 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -135,6 +135,13 @@ enum btrace_pt_error
We do not allow function segments without instructions otherwise. */
struct btrace_function
{
+ btrace_function (struct minimal_symbol *msym_, struct symbol *sym_,
+ unsigned int number_, unsigned int insn_offset_, int level_)
+ : msym (msym_), sym (sym_), insn_offset (insn_offset_), number (number_),
+ level (level_)
+ {
+ }
+
/* The full and minimal symbol for the function. Both may be NULL. */
struct minimal_symbol *msym;
struct symbol *sym;
@@ -143,22 +150,22 @@ struct btrace_function
the same function. If a function calls another function, the former will
have at least two segments: one before the call and another after the
return. Will be zero if there is no such function segment. */
- unsigned int prev;
- unsigned int next;
+ unsigned int prev = 0;
+ unsigned int next = 0;
/* The function segment number of the directly preceding function segment in
a (fake) call stack. Will be zero if there is no such function segment in
the record. */
- unsigned int up;
+ unsigned int up = 0;
/* The instructions in this function segment.
The instruction vector will be empty if the function segment
represents a decode error. */
- VEC (btrace_insn_s) *insn;
+ VEC (btrace_insn_s) *insn = NULL;
/* The error code of a decode error that led to a gap.
Must be zero unless INSN is empty; non-zero otherwise. */
- int errcode;
+ int errcode = 0;
/* The instruction number offset for the first instruction in this
function segment.
@@ -180,7 +187,7 @@ struct btrace_function
int level;
/* A bit-vector of btrace_function_flag. */
- btrace_function_flags flags;
+ btrace_function_flags flags = 0;
};
/* A branch trace instruction iterator. */
@@ -325,10 +332,10 @@ struct btrace_thread_info
/* The raw branch trace data for the below branch trace. */
struct btrace_data data;
- /* Vector of pointer to decoded function segments in execution flow order.
+ /* Vector of decoded function segments in execution flow order.
Note that the numbering for btrace function segments starts with 1, so
function segment i will be at index (i - 1). */
- std::vector<btrace_function *> functions;
+ std::vector<btrace_function> functions;
/* The function level offset. When added to each function's LEVEL,
this normalizes the function levels such that the smallest level
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index 55bc7a1a3a8..b216f1f9cad 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1681,7 +1681,7 @@ record_btrace_frame_sniffer (const struct frame_unwind *self,
replay = tp->btrace.replay;
if (replay != NULL)
- bfun = replay->btinfo->functions[replay->call_index];
+ bfun = &replay->btinfo->functions[replay->call_index];
}
else
{