return 0;
}
+static bool alloc_lbr_stitch(struct thread *thread)
+{
+ if (thread->lbr_stitch)
+ return true;
+
+ thread->lbr_stitch = zalloc(sizeof(*thread->lbr_stitch));
+ if (!thread->lbr_stitch)
+ goto err;
+
+err:
+ pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
+ thread->lbr_stitch_enable = false;
+ return false;
+}
+
/*
* Recolve LBR callstack chain sample
* Return:
{
struct ip_callchain *chain = sample->callchain;
int chain_nr = min(max_stack, (int)chain->nr), i;
+ struct lbr_stitch *lbr_stitch;
u64 branch_from = 0;
int err;
if (i == chain_nr)
return 0;
+ if (thread->lbr_stitch_enable && !sample->no_hw_idx &&
+ alloc_lbr_stitch(thread)) {
+ lbr_stitch = thread->lbr_stitch;
+
+ memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
+ }
+
if (callchain_param.order == ORDER_CALLEE) {
/* Add kernel ip */
err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
#include <linux/refcount.h>
#include <linux/rbtree.h>
#include <linux/list.h>
+#include <linux/zalloc.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include <strlist.h>
#include <intlist.h>
#include "rwsem.h"
+#include "event.h"
struct addr_location;
struct map;
struct thread_stack;
struct unwind_libunwind_ops;
+struct lbr_stitch {
+ struct perf_sample prev_sample;
+};
+
struct thread {
union {
struct rb_node rb_node;
/* LBR call stack stitch */
bool lbr_stitch_enable;
+ struct lbr_stitch *lbr_stitch;
};
struct machine;
return false;
}
+static inline void thread__free_stitch_list(struct thread *thread)
+{
+ zfree(&thread->lbr_stitch);
+}
+
#endif /* __PERF_THREAD_H */