Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 8c3addc..df515cd 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -33,6 +33,7 @@
#include "asm/bug.h"
#include "bpf-event.h"
#include <internal/lib.h> // page_size
+#include "cgroup.h"
#include <linux/ctype.h>
#include <symbol/kallsyms.h>
@@ -42,6 +43,11 @@
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
+static struct dso *machine__kernel_dso(struct machine *machine)
+{
+ return machine->vmlinux_map->dso;
+}
+
static void dsos__init(struct dsos *dsos)
{
INIT_LIST_HEAD(&dsos->head);
@@ -81,7 +87,7 @@
int err = -ENOMEM;
memset(machine, 0, sizeof(*machine));
- map_groups__init(&machine->kmaps, machine);
+ maps__init(&machine->kmaps, machine);
RB_CLEAR_NODE(&machine->rb_node);
dsos__init(&machine->dsos);
@@ -212,7 +218,7 @@
return;
machine__destroy_kernel_maps(machine);
- map_groups__exit(&machine->kmaps);
+ maps__exit(&machine->kmaps);
dsos__exit(&machine->dsos);
machine__exit_vdso(machine);
zfree(&machine->root_dir);
@@ -407,28 +413,28 @@
if (!leader)
goto out_err;
- if (!leader->mg)
- leader->mg = map_groups__new(machine);
+ if (!leader->maps)
+ leader->maps = maps__new(machine);
- if (!leader->mg)
+ if (!leader->maps)
goto out_err;
- if (th->mg == leader->mg)
+ if (th->maps == leader->maps)
return;
- if (th->mg) {
+ if (th->maps) {
/*
* Maps are created from MMAP events which provide the pid and
* tid. Consequently there never should be any maps on a thread
* with an unknown pid. Just print an error if there are.
*/
- if (!map_groups__empty(th->mg))
+ if (!maps__empty(th->maps))
pr_err("Discarding thread maps for %d:%d\n",
th->pid_, th->tid);
- map_groups__put(th->mg);
+ maps__put(th->maps);
}
- th->mg = map_groups__get(leader->mg);
+ th->maps = maps__get(leader->maps);
out_put:
thread__put(leader);
return;
@@ -531,14 +537,13 @@
rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
/*
- * We have to initialize map_groups separately
- * after rb tree is updated.
+ * We have to initialize maps separately after rb tree is updated.
*
* The reason is that we call machine__findnew_thread
- * within thread__init_map_groups to find the thread
+ * within thread__init_maps to find the thread
* leader and that would screwed the rb tree.
*/
- if (thread__init_map_groups(th, machine)) {
+ if (thread__init_maps(th, machine)) {
rb_erase_cached(&th->rb_node, &threads->entries);
RB_CLEAR_NODE(&th->rb_node);
thread__put(th);
@@ -650,6 +655,22 @@
return err;
}
+int machine__process_cgroup_event(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ struct cgroup *cgrp;
+
+ if (dump_trace)
+ perf_event__fprintf_cgroup(event, stdout);
+
+ cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
+ if (cgrp == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
int machine__process_lost_event(struct machine *machine __maybe_unused,
union perf_event *event, struct perf_sample *sample __maybe_unused)
{
@@ -682,6 +703,7 @@
dso__set_module_info(dso, m, machine);
dso__set_long_name(dso, strdup(filename), true);
+ dso->kernel = DSO_SPACE__KERNEL;
}
dso__get(dso);
@@ -719,17 +741,36 @@
struct perf_sample *sample __maybe_unused)
{
struct symbol *sym;
- struct map *map;
+ struct map *map = maps__find(&machine->kmaps, event->ksymbol.addr);
- map = map_groups__find(&machine->kmaps, event->ksymbol.addr);
if (!map) {
- map = dso__new_map(event->ksymbol.name);
- if (!map)
+ struct dso *dso = dso__new(event->ksymbol.name);
+
+ if (dso) {
+ dso->kernel = DSO_SPACE__KERNEL;
+ map = map__new2(0, dso);
+ }
+
+ if (!dso || !map) {
+ dso__put(dso);
return -ENOMEM;
+ }
+
+ if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
+ map->dso->binary_type = DSO_BINARY_TYPE__OOL;
+ map->dso->data.file_size = event->ksymbol.len;
+ dso__set_loaded(map->dso);
+ }
map->start = event->ksymbol.addr;
map->end = map->start + event->ksymbol.len;
- map_groups__insert(&machine->kmaps, map);
+ maps__insert(&machine->kmaps, map);
+ dso__set_loaded(dso);
+
+ if (is_bpf_image(event->ksymbol.name)) {
+ dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE;
+ dso__set_long_name(dso, "", false);
+ }
}
sym = symbol__new(map->map_ip(map, map->start),
@@ -745,11 +786,20 @@
union perf_event *event,
struct perf_sample *sample __maybe_unused)
{
+ struct symbol *sym;
struct map *map;
- map = map_groups__find(&machine->kmaps, event->ksymbol.addr);
- if (map)
- map_groups__remove(&machine->kmaps, map);
+ map = maps__find(&machine->kmaps, event->ksymbol.addr);
+ if (!map)
+ return 0;
+
+ if (map != machine->vmlinux_map)
+ maps__remove(&machine->kmaps, map);
+ else {
+ sym = dso__find_symbol(map->dso, map->map_ip(map, map->start));
+ if (sym)
+ dso__delete_symbol(map->dso, sym);
+ }
return 0;
}
@@ -767,20 +817,57 @@
return machine__process_ksymbol_register(machine, event, sample);
}
-struct map *machine__findnew_module_map(struct machine *machine, u64 start,
- const char *filename)
+int machine__process_text_poke(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ struct map *map = maps__find(&machine->kmaps, event->text_poke.addr);
+ u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
+ if (dump_trace)
+ perf_event__fprintf_text_poke(event, machine, stdout);
+
+ if (!event->text_poke.new_len)
+ return 0;
+
+ if (cpumode != PERF_RECORD_MISC_KERNEL) {
+ pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
+ return 0;
+ }
+
+ if (map && map->dso) {
+ u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
+ int ret;
+
+ /*
+ * Kernel maps might be changed when loading symbols so loading
+ * must be done prior to using kernel maps.
+ */
+ map__load(map);
+ ret = dso__data_write_cache_addr(map->dso, map, machine,
+ event->text_poke.addr,
+ new_bytes,
+ event->text_poke.new_len);
+ if (ret != event->text_poke.new_len)
+ pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
+ event->text_poke.addr);
+ } else {
+ pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
+ event->text_poke.addr);
+ }
+
+ return 0;
+}
+
+static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
+ const char *filename)
{
struct map *map = NULL;
- struct dso *dso = NULL;
struct kmod_path m;
+ struct dso *dso;
if (kmod_path__parse_name(&m, filename))
return NULL;
- map = map_groups__find_by_name(&machine->kmaps, m.name);
- if (map)
- goto out;
-
dso = machine__findnew_module_dso(machine, &m, filename);
if (dso == NULL)
goto out;
@@ -789,9 +876,9 @@
if (map == NULL)
goto out;
- map_groups__insert(&machine->kmaps, map);
+ maps__insert(&machine->kmaps, map);
- /* Put the map here because map_groups__insert alread got it */
+ /* Put the map here because maps__insert alread got it */
map__put(map);
out:
/* put the dso here, corresponding to machine__findnew_module_dso */
@@ -836,7 +923,7 @@
{
int i;
size_t printed = 0;
- struct dso *kdso = machine__kernel_map(machine)->dso;
+ struct dso *kdso = machine__kernel_dso(machine);
if (kdso->has_build_id) {
char filename[PATH_MAX];
@@ -887,14 +974,14 @@
vmlinux_name = symbol_conf.vmlinux_name;
kernel = machine__findnew_kernel(machine, vmlinux_name,
- "[kernel]", DSO_TYPE_KERNEL);
+ "[kernel]", DSO_SPACE__KERNEL);
} else {
if (symbol_conf.default_guest_vmlinux_name)
vmlinux_name = symbol_conf.default_guest_vmlinux_name;
kernel = machine__findnew_kernel(machine, vmlinux_name,
"[guest.kernel]",
- DSO_TYPE_GUEST_KERNEL);
+ DSO_SPACE__KERNEL_GUEST);
}
if (kernel != NULL && (!kernel->has_build_id))
@@ -973,10 +1060,9 @@
kmap = map__kmap(map);
- kmap->kmaps = &machine->kmaps;
strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
- map_groups__insert(&machine->kmaps, map);
+ maps__insert(&machine->kmaps, map);
pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
kmap->name, map->start, map->end);
@@ -1021,8 +1107,7 @@
int machine__map_x86_64_entry_trampolines(struct machine *machine,
struct dso *kernel)
{
- struct map_groups *kmaps = &machine->kmaps;
- struct maps *maps = &kmaps->maps;
+ struct maps *kmaps = &machine->kmaps;
int nr_cpus_avail, cpu;
bool found = false;
struct map *map;
@@ -1032,14 +1117,14 @@
* In the vmlinux case, pgoff is a virtual address which must now be
* mapped to a vmlinux offset.
*/
- for (map = maps__first(maps); map; map = map__next(map)) {
+ maps__for_each_entry(kmaps, map) {
struct kmap *kmap = __map__kmap(map);
struct map *dest_map;
if (!kmap || !is_entry_trampoline(kmap->name))
continue;
- dest_map = map_groups__find(kmaps, map->pgoff);
+ dest_map = maps__find(kmaps, map->pgoff);
if (dest_map != map)
map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
found = true;
@@ -1084,9 +1169,6 @@
static int
__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
{
- struct kmap *kmap;
- struct map *map;
-
/* In case of renewal the kernel map, destroy previous one */
machine__destroy_kernel_maps(machine);
@@ -1095,14 +1177,7 @@
return -1;
machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
- map = machine__kernel_map(machine);
- kmap = map__kmap(map);
- if (!kmap)
- return -1;
-
- kmap->kmaps = &machine->kmaps;
- map_groups__insert(&machine->kmaps, map);
-
+ maps__insert(&machine->kmaps, machine->vmlinux_map);
return 0;
}
@@ -1115,7 +1190,7 @@
return;
kmap = map__kmap(map);
- map_groups__remove(&machine->kmaps, map);
+ maps__remove(&machine->kmaps, map);
if (kmap && kmap->ref_reloc_sym) {
zfree((char **)&kmap->ref_reloc_sym->name);
zfree(&kmap->ref_reloc_sym);
@@ -1210,7 +1285,7 @@
* kernel, with modules between them, fixup the end of all
* sections.
*/
- map_groups__fixup_end(&machine->kmaps);
+ maps__fixup_end(&machine->kmaps);
}
return ret;
@@ -1261,11 +1336,10 @@
dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
}
-static int map_groups__set_module_path(struct map_groups *mg, const char *path,
- struct kmod_path *m)
+static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
{
char *long_name;
- struct map *map = map_groups__find_by_name(mg, m->name);
+ struct map *map = maps__find_by_name(maps, m->name);
if (map == NULL)
return 0;
@@ -1289,8 +1363,7 @@
return 0;
}
-static int map_groups__set_modules_path_dir(struct map_groups *mg,
- const char *dir_name, int depth)
+static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
{
struct dirent *dent;
DIR *dir = opendir(dir_name);
@@ -1322,8 +1395,7 @@
continue;
}
- ret = map_groups__set_modules_path_dir(mg, path,
- depth + 1);
+ ret = maps__set_modules_path_dir(maps, path, depth + 1);
if (ret < 0)
goto out;
} else {
@@ -1334,7 +1406,7 @@
goto out;
if (m.kmod)
- ret = map_groups__set_module_path(mg, path, &m);
+ ret = maps__set_module_path(maps, path, &m);
zfree(&m.name);
@@ -1361,7 +1433,7 @@
machine->root_dir, version);
free(version);
- return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
+ return maps__set_modules_path_dir(&machine->kmaps, modules_path, 0);
}
int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
u64 *size __maybe_unused,
@@ -1379,7 +1451,7 @@
if (arch__fix_module_text_start(&start, &size, name) < 0)
return -1;
- map = machine__findnew_module_map(machine, start, name);
+ map = machine__addnew_module_map(machine, start, name);
if (map == NULL)
return -1;
map->end = start + size;
@@ -1434,11 +1506,11 @@
struct map *map = machine__kernel_map(machine);
map__get(map);
- map_groups__remove(&machine->kmaps, map);
+ maps__remove(&machine->kmaps, map);
machine__set_kernel_mmap(machine, start, end);
- map_groups__insert(&machine->kmaps, map);
+ maps__insert(&machine->kmaps, map);
map__put(map);
}
@@ -1518,8 +1590,7 @@
static int machine__process_extra_kernel_map(struct machine *machine,
union perf_event *event)
{
- struct map *kernel_map = machine__kernel_map(machine);
- struct dso *kernel = kernel_map ? kernel_map->dso : NULL;
+ struct dso *kernel = machine__kernel_dso(machine);
struct extra_kernel_map xm = {
.start = event->mmap.start,
.end = event->mmap.start + event->mmap.len,
@@ -1538,7 +1609,7 @@
union perf_event *event)
{
struct map *map;
- enum dso_kernel_type kernel_type;
+ enum dso_space_type dso_space;
bool is_kernel_mmap;
/* If we have maps from kcore then we do not need or want any others */
@@ -1546,17 +1617,17 @@
return 0;
if (machine__is_host(machine))
- kernel_type = DSO_TYPE_KERNEL;
+ dso_space = DSO_SPACE__KERNEL;
else
- kernel_type = DSO_TYPE_GUEST_KERNEL;
+ dso_space = DSO_SPACE__KERNEL_GUEST;
is_kernel_mmap = memcmp(event->mmap.filename,
machine->mmap_name,
strlen(machine->mmap_name) - 1) == 0;
if (event->mmap.filename[0] == '/' ||
(!is_kernel_mmap && event->mmap.filename[0] == '[')) {
- map = machine__findnew_module_map(machine, event->mmap.start,
- event->mmap.filename);
+ map = machine__addnew_module_map(machine, event->mmap.start,
+ event->mmap.filename);
if (map == NULL)
goto out_problem;
@@ -1608,7 +1679,7 @@
if (kernel == NULL)
goto out_problem;
- kernel->kernel = kernel_type;
+ kernel->kernel = dso_space;
if (__machine__create_kernel_maps(machine, kernel) < 0) {
dso__put(kernel);
goto out_problem;
@@ -1651,6 +1722,12 @@
{
struct thread *thread;
struct map *map;
+ struct dso_id dso_id = {
+ .maj = event->mmap2.maj,
+ .min = event->mmap2.min,
+ .ino = event->mmap2.ino,
+ .ino_generation = event->mmap2.ino_generation,
+ };
int ret = 0;
if (dump_trace)
@@ -1671,10 +1748,7 @@
map = map__new(machine, event->mmap2.start,
event->mmap2.len, event->mmap2.pgoff,
- event->mmap2.maj,
- event->mmap2.min, event->mmap2.ino,
- event->mmap2.ino_generation,
- event->mmap2.prot,
+ &dso_id, event->mmap2.prot,
event->mmap2.flags,
event->mmap2.filename, thread);
@@ -1727,9 +1801,7 @@
map = map__new(machine, event->mmap.start,
event->mmap.len, event->mmap.pgoff,
- 0, 0, 0, 0, prot, 0,
- event->mmap.filename,
- thread);
+ NULL, prot, 0, event->mmap.filename, thread);
if (map == NULL)
goto out_problem_map;
@@ -1885,6 +1957,8 @@
ret = machine__process_mmap_event(machine, event, sample); break;
case PERF_RECORD_NAMESPACES:
ret = machine__process_namespaces_event(machine, event, sample); break;
+ case PERF_RECORD_CGROUP:
+ ret = machine__process_cgroup_event(machine, event, sample); break;
case PERF_RECORD_MMAP2:
ret = machine__process_mmap2_event(machine, event, sample); break;
case PERF_RECORD_FORK:
@@ -1906,6 +1980,8 @@
ret = machine__process_ksymbol(machine, event, sample); break;
case PERF_RECORD_BPF_EVENT:
ret = machine__process_bpf(machine, event, sample); break;
+ case PERF_RECORD_TEXT_POKE:
+ ret = machine__process_text_poke(machine, event, sample); break;
default:
ret = -1;
break;
@@ -1939,8 +2015,9 @@
ams->addr = ip;
ams->al_addr = al.addr;
- ams->sym = al.sym;
- ams->map = al.map;
+ ams->ms.maps = al.maps;
+ ams->ms.sym = al.sym;
+ ams->ms.map = al.map;
ams->phys_addr = 0;
}
@@ -1956,8 +2033,9 @@
ams->addr = addr;
ams->al_addr = al.addr;
- ams->sym = al.sym;
- ams->map = al.map;
+ ams->ms.maps = al.maps;
+ ams->ms.sym = al.sym;
+ ams->ms.map = al.map;
ams->phys_addr = phys_addr;
}
@@ -1977,8 +2055,9 @@
return mi;
}
-static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
+static char *callchain_srcline(struct map_symbol *ms, u64 ip)
{
+ struct map *map = ms->map;
char *srcline = NULL;
if (!map || callchain_param.key == CCKEY_FUNCTION)
@@ -1990,7 +2069,7 @@
bool show_addr = callchain_param.key == CCKEY_ADDRESS;
srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
- sym, show_sym, show_addr, ip);
+ ms->sym, show_sym, show_addr, ip);
srcline__tree_insert(&map->dso->srclines, ip, srcline);
}
@@ -2013,6 +2092,7 @@
struct iterations *iter,
u64 branch_from)
{
+ struct map_symbol ms;
struct addr_location al;
int nr_loop_iter = 0;
u64 iter_cycles = 0;
@@ -2071,8 +2151,11 @@
iter_cycles = iter->cycles;
}
- srcline = callchain_srcline(al.map, al.sym, al.addr);
- return callchain_cursor_append(cursor, ip, al.map, al.sym,
+ ms.maps = al.maps;
+ ms.map = al.map;
+ ms.sym = al.sym;
+ srcline = callchain_srcline(&ms, al.addr);
+ return callchain_cursor_append(cursor, ip, &ms,
branch, flags, nr_loop_iter,
iter_cycles, branch_from, srcline);
}
@@ -2082,15 +2165,16 @@
{
unsigned int i;
const struct branch_stack *bs = sample->branch_stack;
+ struct branch_entry *entries = perf_sample__branch_entries(sample);
struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
if (!bi)
return NULL;
for (i = 0; i < bs->nr; i++) {
- ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
- ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
- bi[i].flags = bs->entries[i].flags;
+ ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
+ ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
+ bi[i].flags = entries[i].flags;
}
return bi;
}
@@ -2159,6 +2243,303 @@
return nr;
}
+static int lbr_callchain_add_kernel_ip(struct thread *thread,
+ struct callchain_cursor *cursor,
+ struct perf_sample *sample,
+ struct symbol **parent,
+ struct addr_location *root_al,
+ u64 branch_from,
+ bool callee, int end)
+{
+ struct ip_callchain *chain = sample->callchain;
+ u8 cpumode = PERF_RECORD_MISC_USER;
+ int err, i;
+
+ if (callee) {
+ for (i = 0; i < end + 1; i++) {
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al, &cpumode, chain->ips[i],
+ false, NULL, NULL, branch_from);
+ if (err)
+ return err;
+ }
+ return 0;
+ }
+
+ for (i = end; i >= 0; i--) {
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al, &cpumode, chain->ips[i],
+ false, NULL, NULL, branch_from);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void save_lbr_cursor_node(struct thread *thread,
+ struct callchain_cursor *cursor,
+ int idx)
+{
+ struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
+
+ if (!lbr_stitch)
+ return;
+
+ if (cursor->pos == cursor->nr) {
+ lbr_stitch->prev_lbr_cursor[idx].valid = false;
+ return;
+ }
+
+ if (!cursor->curr)
+ cursor->curr = cursor->first;
+ else
+ cursor->curr = cursor->curr->next;
+ memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
+ sizeof(struct callchain_cursor_node));
+
+ lbr_stitch->prev_lbr_cursor[idx].valid = true;
+ cursor->pos++;
+}
+
+static int lbr_callchain_add_lbr_ip(struct thread *thread,
+ struct callchain_cursor *cursor,
+ struct perf_sample *sample,
+ struct symbol **parent,
+ struct addr_location *root_al,
+ u64 *branch_from,
+ bool callee)
+{
+ struct branch_stack *lbr_stack = sample->branch_stack;
+ struct branch_entry *entries = perf_sample__branch_entries(sample);
+ u8 cpumode = PERF_RECORD_MISC_USER;
+ int lbr_nr = lbr_stack->nr;
+ struct branch_flags *flags;
+ int err, i;
+ u64 ip;
+
+ /*
+ * The curr and pos are not used in writing session. They are cleared
+ * in callchain_cursor_commit() when the writing session is closed.
+ * Using curr and pos to track the current cursor node.
+ */
+ if (thread->lbr_stitch) {
+ cursor->curr = NULL;
+ cursor->pos = cursor->nr;
+ if (cursor->nr) {
+ cursor->curr = cursor->first;
+ for (i = 0; i < (int)(cursor->nr - 1); i++)
+ cursor->curr = cursor->curr->next;
+ }
+ }
+
+ if (callee) {
+ /* Add LBR ip from first entries.to */
+ ip = entries[0].to;
+ flags = &entries[0].flags;
+ *branch_from = entries[0].from;
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al, &cpumode, ip,
+ true, flags, NULL,
+ *branch_from);
+ if (err)
+ return err;
+
+ /*
+ * The number of cursor node increases.
+ * Move the current cursor node.
+ * But does not need to save current cursor node for entry 0.
+ * It's impossible to stitch the whole LBRs of previous sample.
+ */
+ if (thread->lbr_stitch && (cursor->pos != cursor->nr)) {
+ if (!cursor->curr)
+ cursor->curr = cursor->first;
+ else
+ cursor->curr = cursor->curr->next;
+ cursor->pos++;
+ }
+
+ /* Add LBR ip from entries.from one by one. */
+ for (i = 0; i < lbr_nr; i++) {
+ ip = entries[i].from;
+ flags = &entries[i].flags;
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al, &cpumode, ip,
+ true, flags, NULL,
+ *branch_from);
+ if (err)
+ return err;
+ save_lbr_cursor_node(thread, cursor, i);
+ }
+ return 0;
+ }
+
+ /* Add LBR ip from entries.from one by one. */
+ for (i = lbr_nr - 1; i >= 0; i--) {
+ ip = entries[i].from;
+ flags = &entries[i].flags;
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al, &cpumode, ip,
+ true, flags, NULL,
+ *branch_from);
+ if (err)
+ return err;
+ save_lbr_cursor_node(thread, cursor, i);
+ }
+
+ /* Add LBR ip from first entries.to */
+ ip = entries[0].to;
+ flags = &entries[0].flags;
+ *branch_from = entries[0].from;
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al, &cpumode, ip,
+ true, flags, NULL,
+ *branch_from);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
+ struct callchain_cursor *cursor)
+{
+ struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
+ struct callchain_cursor_node *cnode;
+ struct stitch_list *stitch_node;
+ int err;
+
+ list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
+ cnode = &stitch_node->cursor;
+
+ err = callchain_cursor_append(cursor, cnode->ip,
+ &cnode->ms,
+ cnode->branch,
+ &cnode->branch_flags,
+ cnode->nr_loop_iter,
+ cnode->iter_cycles,
+ cnode->branch_from,
+ cnode->srcline);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static struct stitch_list *get_stitch_node(struct thread *thread)
+{
+ struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
+ struct stitch_list *stitch_node;
+
+ if (!list_empty(&lbr_stitch->free_lists)) {
+ stitch_node = list_first_entry(&lbr_stitch->free_lists,
+ struct stitch_list, node);
+ list_del(&stitch_node->node);
+
+ return stitch_node;
+ }
+
+ return malloc(sizeof(struct stitch_list));
+}
+
+static bool has_stitched_lbr(struct thread *thread,
+ struct perf_sample *cur,
+ struct perf_sample *prev,
+ unsigned int max_lbr,
+ bool callee)
+{
+ struct branch_stack *cur_stack = cur->branch_stack;
+ struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
+ struct branch_stack *prev_stack = prev->branch_stack;
+ struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
+ struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
+ int i, j, nr_identical_branches = 0;
+ struct stitch_list *stitch_node;
+ u64 cur_base, distance;
+
+ if (!cur_stack || !prev_stack)
+ return false;
+
+ /* Find the physical index of the base-of-stack for current sample. */
+ cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
+
+ distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
+ (max_lbr + prev_stack->hw_idx - cur_base);
+ /* Previous sample has shorter stack. Nothing can be stitched. */
+ if (distance + 1 > prev_stack->nr)
+ return false;
+
+ /*
+ * Check if there are identical LBRs between two samples.
+ * Identicall LBRs must have same from, to and flags values. Also,
+ * they have to be saved in the same LBR registers (same physical
+ * index).
+ *
+ * Starts from the base-of-stack of current sample.
+ */
+ for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
+ if ((prev_entries[i].from != cur_entries[j].from) ||
+ (prev_entries[i].to != cur_entries[j].to) ||
+ (prev_entries[i].flags.value != cur_entries[j].flags.value))
+ break;
+ nr_identical_branches++;
+ }
+
+ if (!nr_identical_branches)
+ return false;
+
+ /*
+ * Save the LBRs between the base-of-stack of previous sample
+ * and the base-of-stack of current sample into lbr_stitch->lists.
+ * These LBRs will be stitched later.
+ */
+ for (i = prev_stack->nr - 1; i > (int)distance; i--) {
+
+ if (!lbr_stitch->prev_lbr_cursor[i].valid)
+ continue;
+
+ stitch_node = get_stitch_node(thread);
+ if (!stitch_node)
+ return false;
+
+ memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
+ sizeof(struct callchain_cursor_node));
+
+ if (callee)
+ list_add(&stitch_node->node, &lbr_stitch->lists);
+ else
+ list_add_tail(&stitch_node->node, &lbr_stitch->lists);
+ }
+
+ return true;
+}
+
+static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
+{
+ if (thread->lbr_stitch)
+ return true;
+
+ thread->lbr_stitch = zalloc(sizeof(*thread->lbr_stitch));
+ if (!thread->lbr_stitch)
+ goto err;
+
+ thread->lbr_stitch->prev_lbr_cursor = calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
+ if (!thread->lbr_stitch->prev_lbr_cursor)
+ goto free_lbr_stitch;
+
+ INIT_LIST_HEAD(&thread->lbr_stitch->lists);
+ INIT_LIST_HEAD(&thread->lbr_stitch->free_lists);
+
+ return true;
+
+free_lbr_stitch:
+ zfree(&thread->lbr_stitch);
+err:
+ pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
+ thread->lbr_stitch_enable = false;
+ return false;
+}
+
/*
* Recolve LBR callstack chain sample
* Return:
@@ -2171,12 +2552,16 @@
struct perf_sample *sample,
struct symbol **parent,
struct addr_location *root_al,
- int max_stack)
+ int max_stack,
+ unsigned int max_lbr)
{
+ bool callee = (callchain_param.order == ORDER_CALLEE);
struct ip_callchain *chain = sample->callchain;
int chain_nr = min(max_stack, (int)chain->nr), i;
- u8 cpumode = PERF_RECORD_MISC_USER;
- u64 ip, branch_from = 0;
+ struct lbr_stitch *lbr_stitch;
+ bool stitched_lbr = false;
+ u64 branch_from = 0;
+ int err;
for (i = 0; i < chain_nr; i++) {
if (chain->ips[i] == PERF_CONTEXT_USER)
@@ -2184,72 +2569,65 @@
}
/* LBR only affects the user callchain */
- if (i != chain_nr) {
- struct branch_stack *lbr_stack = sample->branch_stack;
- int lbr_nr = lbr_stack->nr, j, k;
- bool branch;
- struct branch_flags *flags;
- /*
- * LBR callstack can only get user call chain.
- * The mix_chain_nr is kernel call chain
- * number plus LBR user call chain number.
- * i is kernel call chain number,
- * 1 is PERF_CONTEXT_USER,
- * lbr_nr + 1 is the user call chain number.
- * For details, please refer to the comments
- * in callchain__printf
- */
- int mix_chain_nr = i + 1 + lbr_nr + 1;
+ if (i == chain_nr)
+ return 0;
- for (j = 0; j < mix_chain_nr; j++) {
- int err;
- branch = false;
- flags = NULL;
+ if (thread->lbr_stitch_enable && !sample->no_hw_idx &&
+ (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
+ lbr_stitch = thread->lbr_stitch;
- if (callchain_param.order == ORDER_CALLEE) {
- if (j < i + 1)
- ip = chain->ips[j];
- else if (j > i + 1) {
- k = j - i - 2;
- ip = lbr_stack->entries[k].from;
- branch = true;
- flags = &lbr_stack->entries[k].flags;
- } else {
- ip = lbr_stack->entries[0].to;
- branch = true;
- flags = &lbr_stack->entries[0].flags;
- branch_from =
- lbr_stack->entries[0].from;
- }
- } else {
- if (j < lbr_nr) {
- k = lbr_nr - j - 1;
- ip = lbr_stack->entries[k].from;
- branch = true;
- flags = &lbr_stack->entries[k].flags;
- }
- else if (j > lbr_nr)
- ip = chain->ips[i + 1 - (j - lbr_nr)];
- else {
- ip = lbr_stack->entries[0].to;
- branch = true;
- flags = &lbr_stack->entries[0].flags;
- branch_from =
- lbr_stack->entries[0].from;
- }
- }
+ stitched_lbr = has_stitched_lbr(thread, sample,
+ &lbr_stitch->prev_sample,
+ max_lbr, callee);
- err = add_callchain_ip(thread, cursor, parent,
- root_al, &cpumode, ip,
- branch, flags, NULL,
- branch_from);
- if (err)
- return (err < 0) ? err : 0;
+ if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
+ list_replace_init(&lbr_stitch->lists,
+ &lbr_stitch->free_lists);
}
- return 1;
+ memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
}
- return 0;
+ if (callee) {
+ /* Add kernel ip */
+ err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
+ parent, root_al, branch_from,
+ true, i);
+ if (err)
+ goto error;
+
+ err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
+ root_al, &branch_from, true);
+ if (err)
+ goto error;
+
+ if (stitched_lbr) {
+ err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
+ if (err)
+ goto error;
+ }
+
+ } else {
+ if (stitched_lbr) {
+ err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
+ if (err)
+ goto error;
+ }
+ err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
+ root_al, &branch_from, false);
+ if (err)
+ goto error;
+
+ /* Add kernel ip */
+ err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
+ parent, root_al, branch_from,
+ false, i);
+ if (err)
+ goto error;
+ }
+ return 1;
+
+error:
+ return (err < 0) ? err : 0;
}
static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
@@ -2282,6 +2660,7 @@
int max_stack)
{
struct branch_stack *branch = sample->branch_stack;
+ struct branch_entry *entries = perf_sample__branch_entries(sample);
struct ip_callchain *chain = sample->callchain;
int chain_nr = 0;
u8 cpumode = PERF_RECORD_MISC_USER;
@@ -2292,9 +2671,12 @@
if (chain)
chain_nr = chain->nr;
- if (perf_evsel__has_branch_callstack(evsel)) {
+ if (evsel__has_branch_callstack(evsel)) {
+ struct perf_env *env = evsel__env(evsel);
+
err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
- root_al, max_stack);
+ root_al, max_stack,
+ !env ? 0 : env->max_branches);
if (err)
return (err < 0) ? err : 0;
}
@@ -2329,7 +2711,7 @@
for (i = 0; i < nr; i++) {
if (callchain_param.order == ORDER_CALLEE) {
- be[i] = branch->entries[i];
+ be[i] = entries[i];
if (chain == NULL)
continue;
@@ -2348,7 +2730,7 @@
be[i].from >= chain->ips[first_call] - 8)
first_call++;
} else
- be[i] = branch->entries[branch->nr - i - 1];
+ be[i] = entries[branch->nr - i - 1];
}
memset(iter, 0, sizeof(struct iterations) * nr);
@@ -2420,9 +2802,10 @@
return 0;
}
-static int append_inlines(struct callchain_cursor *cursor,
- struct map *map, struct symbol *sym, u64 ip)
+static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
{
+ struct symbol *sym = ms->sym;
+ struct map *map = ms->map;
struct inline_node *inline_node;
struct inline_list *ilist;
u64 addr;
@@ -2443,8 +2826,12 @@
}
list_for_each_entry(ilist, &inline_node->val, list) {
- ret = callchain_cursor_append(cursor, ip, map,
- ilist->symbol, false,
+ struct map_symbol ilist_ms = {
+ .maps = ms->maps,
+ .map = map,
+ .sym = ilist->symbol,
+ };
+ ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
NULL, 0, 0, 0, ilist->srcline);
if (ret != 0)
@@ -2460,22 +2847,21 @@
const char *srcline = NULL;
u64 addr = entry->ip;
- if (symbol_conf.hide_unresolved && entry->sym == NULL)
+ if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
return 0;
- if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0)
+ if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
return 0;
/*
* Convert entry->ip from a virtual address to an offset in
* its corresponding binary.
*/
- if (entry->map)
- addr = map__map_ip(entry->map, entry->ip);
+ if (entry->ms.map)
+ addr = map__map_ip(entry->ms.map, entry->ip);
- srcline = callchain_srcline(entry->map, entry->sym, addr);
- return callchain_cursor_append(cursor, entry->ip,
- entry->map, entry->sym,
+ srcline = callchain_srcline(&entry->ms, addr);
+ return callchain_cursor_append(cursor, entry->ip, &entry->ms,
false, NULL, 0, 0, 0, srcline);
}
@@ -2701,9 +3087,14 @@
return addr_cpumode;
}
+struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id)
+{
+ return dsos__findnew_id(&machine->dsos, filename, id);
+}
+
struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
{
- return dsos__findnew(&machine->dsos, filename);
+ return machine__findnew_dso_id(machine, filename, NULL);
}
char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
@@ -2719,3 +3110,15 @@
*addrp = map->unmap_ip(map, sym->start);
return sym->name;
}
+
+int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv)
+{
+ struct dso *pos;
+ int err = 0;
+
+ list_for_each_entry(pos, &machine->dsos.head, node) {
+ if (fn(pos, machine, priv))
+ err = -1;
+ }
+ return err;
+}