blob: 571e99c908a0e165d72d8c70706872e8dc53c5d1 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2#include "symbol.h"
David Brazdil0f672f62019-12-10 10:32:29 +00003#include <assert.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004#include <errno.h>
5#include <inttypes.h>
6#include <limits.h>
7#include <stdlib.h>
8#include <string.h>
9#include <stdio.h>
10#include <unistd.h>
11#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
David Brazdil0f672f62019-12-10 10:32:29 +000012#include "dso.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013#include "map.h"
David Brazdil0f672f62019-12-10 10:32:29 +000014#include "map_symbol.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015#include "thread.h"
16#include "vdso.h"
17#include "build-id.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018#include "debug.h"
19#include "machine.h"
20#include <linux/string.h>
David Brazdil0f672f62019-12-10 10:32:29 +000021#include <linux/zalloc.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000022#include "srcline.h"
23#include "namespaces.h"
24#include "unwind.h"
David Brazdil0f672f62019-12-10 10:32:29 +000025#include "srccode.h"
26#include "ui/ui.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027
28static void __maps__insert(struct maps *maps, struct map *map);
David Brazdil0f672f62019-12-10 10:32:29 +000029static void __maps__insert_name(struct maps *maps, struct map *map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000030
31static inline int is_anon_memory(const char *filename, u32 flags)
32{
33 return flags & MAP_HUGETLB ||
34 !strcmp(filename, "//anon") ||
35 !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) ||
36 !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1);
37}
38
39static inline int is_no_dso_memory(const char *filename)
40{
41 return !strncmp(filename, "[stack", 6) ||
42 !strncmp(filename, "/SYSV",5) ||
43 !strcmp(filename, "[heap]");
44}
45
46static inline int is_android_lib(const char *filename)
47{
48 return !strncmp(filename, "/data/app-lib", 13) ||
49 !strncmp(filename, "/system/lib", 11);
50}
51
52static inline bool replace_android_lib(const char *filename, char *newfilename)
53{
54 const char *libname;
55 char *app_abi;
56 size_t app_abi_length, new_length;
57 size_t lib_length = 0;
58
59 libname = strrchr(filename, '/');
60 if (libname)
61 lib_length = strlen(libname);
62
63 app_abi = getenv("APP_ABI");
64 if (!app_abi)
65 return false;
66
67 app_abi_length = strlen(app_abi);
68
69 if (!strncmp(filename, "/data/app-lib", 13)) {
70 char *apk_path;
71
72 if (!app_abi_length)
73 return false;
74
75 new_length = 7 + app_abi_length + lib_length;
76
77 apk_path = getenv("APK_PATH");
78 if (apk_path) {
79 new_length += strlen(apk_path) + 1;
80 if (new_length > PATH_MAX)
81 return false;
82 snprintf(newfilename, new_length,
83 "%s/libs/%s/%s", apk_path, app_abi, libname);
84 } else {
85 if (new_length > PATH_MAX)
86 return false;
87 snprintf(newfilename, new_length,
88 "libs/%s/%s", app_abi, libname);
89 }
90 return true;
91 }
92
Olivier Deprez0e641232021-09-23 10:07:05 +020093 if (!strncmp(filename, "/system/lib/", 12)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000094 char *ndk, *app;
95 const char *arch;
Olivier Deprez0e641232021-09-23 10:07:05 +020096 int ndk_length, app_length;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000097
98 ndk = getenv("NDK_ROOT");
99 app = getenv("APP_PLATFORM");
100
101 if (!(ndk && app))
102 return false;
103
104 ndk_length = strlen(ndk);
105 app_length = strlen(app);
106
107 if (!(ndk_length && app_length && app_abi_length))
108 return false;
109
110 arch = !strncmp(app_abi, "arm", 3) ? "arm" :
111 !strncmp(app_abi, "mips", 4) ? "mips" :
112 !strncmp(app_abi, "x86", 3) ? "x86" : NULL;
113
114 if (!arch)
115 return false;
116
117 new_length = 27 + ndk_length +
118 app_length + lib_length
119 + strlen(arch);
120
121 if (new_length > PATH_MAX)
122 return false;
123 snprintf(newfilename, new_length,
Olivier Deprez0e641232021-09-23 10:07:05 +0200124 "%.*s/platforms/%.*s/arch-%s/usr/lib/%s",
125 ndk_length, ndk, app_length, app, arch, libname);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000126
127 return true;
128 }
129 return false;
130}
131
132void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso)
133{
134 map->start = start;
135 map->end = end;
136 map->pgoff = pgoff;
137 map->reloc = 0;
138 map->dso = dso__get(dso);
139 map->map_ip = map__map_ip;
140 map->unmap_ip = map__unmap_ip;
141 RB_CLEAR_NODE(&map->rb_node);
142 map->groups = NULL;
143 map->erange_warned = false;
144 refcount_set(&map->refcnt, 1);
145}
146
147struct map *map__new(struct machine *machine, u64 start, u64 len,
148 u64 pgoff, u32 d_maj, u32 d_min, u64 ino,
149 u64 ino_gen, u32 prot, u32 flags, char *filename,
150 struct thread *thread)
151{
152 struct map *map = malloc(sizeof(*map));
153 struct nsinfo *nsi = NULL;
154 struct nsinfo *nnsi;
155
156 if (map != NULL) {
157 char newfilename[PATH_MAX];
158 struct dso *dso;
159 int anon, no_dso, vdso, android;
160
161 android = is_android_lib(filename);
162 anon = is_anon_memory(filename, flags);
163 vdso = is_vdso_map(filename);
164 no_dso = is_no_dso_memory(filename);
165
166 map->maj = d_maj;
167 map->min = d_min;
168 map->ino = ino;
169 map->ino_generation = ino_gen;
170 map->prot = prot;
171 map->flags = flags;
172 nsi = nsinfo__get(thread->nsinfo);
173
174 if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) {
175 snprintf(newfilename, sizeof(newfilename),
176 "/tmp/perf-%d.map", nsi->pid);
177 filename = newfilename;
178 }
179
180 if (android) {
181 if (replace_android_lib(filename, newfilename))
182 filename = newfilename;
183 }
184
185 if (vdso) {
186 /* The vdso maps are always on the host and not the
187 * container. Ensure that we don't use setns to look
188 * them up.
189 */
190 nnsi = nsinfo__copy(nsi);
191 if (nnsi) {
192 nsinfo__put(nsi);
193 nnsi->need_setns = false;
194 nsi = nnsi;
195 }
196 pgoff = 0;
197 dso = machine__findnew_vdso(machine, thread);
198 } else
199 dso = machine__findnew_dso(machine, filename);
200
201 if (dso == NULL)
202 goto out_delete;
203
204 map__init(map, start, start + len, pgoff, dso);
205
206 if (anon || no_dso) {
207 map->map_ip = map->unmap_ip = identity__map_ip;
208
209 /*
210 * Set memory without DSO as loaded. All map__find_*
211 * functions still return NULL, and we avoid the
212 * unnecessary map__load warning.
213 */
214 if (!(prot & PROT_EXEC))
215 dso__set_loaded(dso);
216 }
217 dso->nsinfo = nsi;
218 dso__put(dso);
219 }
220 return map;
221out_delete:
222 nsinfo__put(nsi);
223 free(map);
224 return NULL;
225}
226
227/*
228 * Constructor variant for modules (where we know from /proc/modules where
229 * they are loaded) and for vmlinux, where only after we load all the
230 * symbols we'll know where it starts and ends.
231 */
232struct map *map__new2(u64 start, struct dso *dso)
233{
234 struct map *map = calloc(1, (sizeof(*map) +
235 (dso->kernel ? sizeof(struct kmap) : 0)));
236 if (map != NULL) {
237 /*
238 * ->end will be filled after we load all the symbols
239 */
240 map__init(map, start, 0, 0, dso);
241 }
242
243 return map;
244}
245
246/*
247 * Use this and __map__is_kmodule() for map instances that are in
248 * machine->kmaps, and thus have map->groups->machine all properly set, to
249 * disambiguate between the kernel and modules.
250 *
251 * When the need arises, introduce map__is_{kernel,kmodule)() that
252 * checks (map->groups != NULL && map->groups->machine != NULL &&
253 * map->dso->kernel) before calling __map__is_{kernel,kmodule}())
254 */
255bool __map__is_kernel(const struct map *map)
256{
257 return machine__kernel_map(map->groups->machine) == map;
258}
259
260bool __map__is_extra_kernel_map(const struct map *map)
261{
262 struct kmap *kmap = __map__kmap((struct map *)map);
263
264 return kmap && kmap->name[0];
265}
266
David Brazdil0f672f62019-12-10 10:32:29 +0000267bool __map__is_bpf_prog(const struct map *map)
268{
269 const char *name;
270
271 if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
272 return true;
273
274 /*
275 * If PERF_RECORD_BPF_EVENT is not included, the dso will not have
276 * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can
277 * guess the type based on name.
278 */
279 name = map->dso->short_name;
280 return name && (strstr(name, "bpf_prog_") == name);
281}
282
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000283bool map__has_symbols(const struct map *map)
284{
285 return dso__has_symbols(map->dso);
286}
287
288static void map__exit(struct map *map)
289{
290 BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
291 dso__zput(map->dso);
292}
293
294void map__delete(struct map *map)
295{
296 map__exit(map);
297 free(map);
298}
299
300void map__put(struct map *map)
301{
302 if (map && refcount_dec_and_test(&map->refcnt))
303 map__delete(map);
304}
305
306void map__fixup_start(struct map *map)
307{
David Brazdil0f672f62019-12-10 10:32:29 +0000308 struct rb_root_cached *symbols = &map->dso->symbols;
309 struct rb_node *nd = rb_first_cached(symbols);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000310 if (nd != NULL) {
311 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
312 map->start = sym->start;
313 }
314}
315
316void map__fixup_end(struct map *map)
317{
David Brazdil0f672f62019-12-10 10:32:29 +0000318 struct rb_root_cached *symbols = &map->dso->symbols;
319 struct rb_node *nd = rb_last(&symbols->rb_root);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000320 if (nd != NULL) {
321 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
322 map->end = sym->end;
323 }
324}
325
326#define DSO__DELETED "(deleted)"
327
328int map__load(struct map *map)
329{
330 const char *name = map->dso->long_name;
331 int nr;
332
333 if (dso__loaded(map->dso))
334 return 0;
335
336 nr = dso__load(map->dso, map);
337 if (nr < 0) {
338 if (map->dso->has_build_id) {
339 char sbuild_id[SBUILD_ID_SIZE];
340
341 build_id__sprintf(map->dso->build_id,
342 sizeof(map->dso->build_id),
343 sbuild_id);
David Brazdil0f672f62019-12-10 10:32:29 +0000344 pr_debug("%s with build id %s not found", name, sbuild_id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000345 } else
David Brazdil0f672f62019-12-10 10:32:29 +0000346 pr_debug("Failed to open %s", name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000347
David Brazdil0f672f62019-12-10 10:32:29 +0000348 pr_debug(", continuing without symbols\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000349 return -1;
350 } else if (nr == 0) {
351#ifdef HAVE_LIBELF_SUPPORT
352 const size_t len = strlen(name);
353 const size_t real_len = len - sizeof(DSO__DELETED);
354
355 if (len > sizeof(DSO__DELETED) &&
356 strcmp(name + real_len + 1, DSO__DELETED) == 0) {
David Brazdil0f672f62019-12-10 10:32:29 +0000357 pr_debug("%.*s was updated (is prelink enabled?). "
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000358 "Restart the long running apps that use it!\n",
359 (int)real_len, name);
360 } else {
David Brazdil0f672f62019-12-10 10:32:29 +0000361 pr_debug("no symbols found in %s, maybe install a debug package?\n", name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000362 }
363#endif
364 return -1;
365 }
366
367 return 0;
368}
369
370struct symbol *map__find_symbol(struct map *map, u64 addr)
371{
372 if (map__load(map) < 0)
373 return NULL;
374
375 return dso__find_symbol(map->dso, addr);
376}
377
378struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
379{
380 if (map__load(map) < 0)
381 return NULL;
382
383 if (!dso__sorted_by_name(map->dso))
384 dso__sort_by_name(map->dso);
385
386 return dso__find_symbol_by_name(map->dso, name);
387}
388
389struct map *map__clone(struct map *from)
390{
391 struct map *map = memdup(from, sizeof(*map));
392
393 if (map != NULL) {
394 refcount_set(&map->refcnt, 1);
395 RB_CLEAR_NODE(&map->rb_node);
396 dso__get(map->dso);
397 map->groups = NULL;
398 }
399
400 return map;
401}
402
403size_t map__fprintf(struct map *map, FILE *fp)
404{
405 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
406 map->start, map->end, map->pgoff, map->dso->name);
407}
408
409size_t map__fprintf_dsoname(struct map *map, FILE *fp)
410{
David Brazdil0f672f62019-12-10 10:32:29 +0000411 char buf[symbol_conf.pad_output_len_dso + 1];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000412 const char *dsoname = "[unknown]";
413
414 if (map && map->dso) {
415 if (symbol_conf.show_kernel_path && map->dso->long_name)
416 dsoname = map->dso->long_name;
417 else
418 dsoname = map->dso->name;
419 }
420
David Brazdil0f672f62019-12-10 10:32:29 +0000421 if (symbol_conf.pad_output_len_dso) {
422 scnprintf_pad(buf, symbol_conf.pad_output_len_dso, "%s", dsoname);
423 dsoname = buf;
424 }
425
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000426 return fprintf(fp, "%s", dsoname);
427}
428
429char *map__srcline(struct map *map, u64 addr, struct symbol *sym)
430{
431 if (map == NULL)
432 return SRCLINE_UNKNOWN;
433 return get_srcline(map->dso, map__rip_2objdump(map, addr), sym, true, true, addr);
434}
435
436int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
437 FILE *fp)
438{
439 int ret = 0;
440
441 if (map && map->dso) {
442 char *srcline = map__srcline(map, addr, NULL);
443 if (srcline != SRCLINE_UNKNOWN)
444 ret = fprintf(fp, "%s%s", prefix, srcline);
445 free_srcline(srcline);
446 }
447 return ret;
448}
449
David Brazdil0f672f62019-12-10 10:32:29 +0000450int map__fprintf_srccode(struct map *map, u64 addr,
451 FILE *fp,
452 struct srccode_state *state)
453{
454 char *srcfile;
455 int ret = 0;
456 unsigned line;
457 int len;
458 char *srccode;
459
460 if (!map || !map->dso)
461 return 0;
462 srcfile = get_srcline_split(map->dso,
463 map__rip_2objdump(map, addr),
464 &line);
465 if (!srcfile)
466 return 0;
467
468 /* Avoid redundant printing */
469 if (state &&
470 state->srcfile &&
471 !strcmp(state->srcfile, srcfile) &&
472 state->line == line) {
473 free(srcfile);
474 return 0;
475 }
476
477 srccode = find_sourceline(srcfile, line, &len);
478 if (!srccode)
479 goto out_free_line;
480
481 ret = fprintf(fp, "|%-8d %.*s", line, len, srccode);
482
483 if (state) {
484 state->srcfile = srcfile;
485 state->line = line;
486 }
487 return ret;
488
489out_free_line:
490 free(srcfile);
491 return ret;
492}
493
494
495void srccode_state_free(struct srccode_state *state)
496{
497 zfree(&state->srcfile);
498 state->line = 0;
499}
500
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000501/**
502 * map__rip_2objdump - convert symbol start address to objdump address.
503 * @map: memory map
504 * @rip: symbol start address
505 *
506 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
507 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
508 * relative to section start.
509 *
510 * Return: Address suitable for passing to "objdump --start-address="
511 */
512u64 map__rip_2objdump(struct map *map, u64 rip)
513{
514 struct kmap *kmap = __map__kmap(map);
515
516 /*
517 * vmlinux does not have program headers for PTI entry trampolines and
518 * kcore may not either. However the trampoline object code is on the
519 * main kernel map, so just use that instead.
520 */
521 if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps && kmap->kmaps->machine) {
522 struct map *kernel_map = machine__kernel_map(kmap->kmaps->machine);
523
524 if (kernel_map)
525 map = kernel_map;
526 }
527
528 if (!map->dso->adjust_symbols)
529 return rip;
530
531 if (map->dso->rel)
532 return rip - map->pgoff;
533
534 /*
535 * kernel modules also have DSO_TYPE_USER in dso->kernel,
536 * but all kernel modules are ET_REL, so won't get here.
537 */
538 if (map->dso->kernel == DSO_TYPE_USER)
539 return rip + map->dso->text_offset;
540
541 return map->unmap_ip(map, rip) - map->reloc;
542}
543
544/**
545 * map__objdump_2mem - convert objdump address to a memory address.
546 * @map: memory map
547 * @ip: objdump address
548 *
549 * Closely related to map__rip_2objdump(), this function takes an address from
550 * objdump and converts it to a memory address. Note this assumes that @map
551 * contains the address. To be sure the result is valid, check it forwards
552 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
553 *
554 * Return: Memory address.
555 */
556u64 map__objdump_2mem(struct map *map, u64 ip)
557{
558 if (!map->dso->adjust_symbols)
559 return map->unmap_ip(map, ip);
560
561 if (map->dso->rel)
562 return map->unmap_ip(map, ip + map->pgoff);
563
564 /*
565 * kernel modules also have DSO_TYPE_USER in dso->kernel,
566 * but all kernel modules are ET_REL, so won't get here.
567 */
568 if (map->dso->kernel == DSO_TYPE_USER)
569 return map->unmap_ip(map, ip - map->dso->text_offset);
570
571 return ip + map->reloc;
572}
573
574static void maps__init(struct maps *maps)
575{
576 maps->entries = RB_ROOT;
David Brazdil0f672f62019-12-10 10:32:29 +0000577 maps->names = RB_ROOT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000578 init_rwsem(&maps->lock);
579}
580
581void map_groups__init(struct map_groups *mg, struct machine *machine)
582{
583 maps__init(&mg->maps);
584 mg->machine = machine;
585 refcount_set(&mg->refcnt, 1);
586}
587
David Brazdil0f672f62019-12-10 10:32:29 +0000588void map_groups__insert(struct map_groups *mg, struct map *map)
589{
590 maps__insert(&mg->maps, map);
591 map->groups = mg;
592}
593
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000594static void __maps__purge(struct maps *maps)
595{
596 struct rb_root *root = &maps->entries;
597 struct rb_node *next = rb_first(root);
598
599 while (next) {
600 struct map *pos = rb_entry(next, struct map, rb_node);
601
602 next = rb_next(&pos->rb_node);
603 rb_erase_init(&pos->rb_node, root);
604 map__put(pos);
605 }
606}
607
David Brazdil0f672f62019-12-10 10:32:29 +0000608static void __maps__purge_names(struct maps *maps)
609{
610 struct rb_root *root = &maps->names;
611 struct rb_node *next = rb_first(root);
612
613 while (next) {
614 struct map *pos = rb_entry(next, struct map, rb_node_name);
615
616 next = rb_next(&pos->rb_node_name);
617 rb_erase_init(&pos->rb_node_name, root);
618 map__put(pos);
619 }
620}
621
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000622static void maps__exit(struct maps *maps)
623{
624 down_write(&maps->lock);
625 __maps__purge(maps);
David Brazdil0f672f62019-12-10 10:32:29 +0000626 __maps__purge_names(maps);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000627 up_write(&maps->lock);
628}
629
630void map_groups__exit(struct map_groups *mg)
631{
632 maps__exit(&mg->maps);
633}
634
635bool map_groups__empty(struct map_groups *mg)
636{
637 return !maps__first(&mg->maps);
638}
639
640struct map_groups *map_groups__new(struct machine *machine)
641{
David Brazdil0f672f62019-12-10 10:32:29 +0000642 struct map_groups *mg = zalloc(sizeof(*mg));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000643
644 if (mg != NULL)
645 map_groups__init(mg, machine);
646
647 return mg;
648}
649
650void map_groups__delete(struct map_groups *mg)
651{
652 map_groups__exit(mg);
David Brazdil0f672f62019-12-10 10:32:29 +0000653 unwind__finish_access(mg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000654 free(mg);
655}
656
657void map_groups__put(struct map_groups *mg)
658{
659 if (mg && refcount_dec_and_test(&mg->refcnt))
660 map_groups__delete(mg);
661}
662
663struct symbol *map_groups__find_symbol(struct map_groups *mg,
664 u64 addr, struct map **mapp)
665{
666 struct map *map = map_groups__find(mg, addr);
667
668 /* Ensure map is loaded before using map->map_ip */
669 if (map != NULL && map__load(map) >= 0) {
670 if (mapp != NULL)
671 *mapp = map;
672 return map__find_symbol(map, map->map_ip(map, addr));
673 }
674
675 return NULL;
676}
677
678static bool map__contains_symbol(struct map *map, struct symbol *sym)
679{
680 u64 ip = map->unmap_ip(map, sym->start);
681
682 return ip >= map->start && ip < map->end;
683}
684
685struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
686 struct map **mapp)
687{
688 struct symbol *sym;
689 struct rb_node *nd;
690
691 down_read(&maps->lock);
692
693 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
694 struct map *pos = rb_entry(nd, struct map, rb_node);
695
696 sym = map__find_symbol_by_name(pos, name);
697
698 if (sym == NULL)
699 continue;
700 if (!map__contains_symbol(pos, sym)) {
701 sym = NULL;
702 continue;
703 }
704 if (mapp != NULL)
705 *mapp = pos;
706 goto out;
707 }
708
709 sym = NULL;
710out:
711 up_read(&maps->lock);
712 return sym;
713}
714
715struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
716 const char *name,
717 struct map **mapp)
718{
719 return maps__find_symbol_by_name(&mg->maps, name, mapp);
720}
721
722int map_groups__find_ams(struct addr_map_symbol *ams)
723{
724 if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
725 if (ams->map->groups == NULL)
726 return -1;
727 ams->map = map_groups__find(ams->map->groups, ams->addr);
728 if (ams->map == NULL)
729 return -1;
730 }
731
732 ams->al_addr = ams->map->map_ip(ams->map, ams->addr);
733 ams->sym = map__find_symbol(ams->map, ams->al_addr);
734
735 return ams->sym ? 0 : -1;
736}
737
738static size_t maps__fprintf(struct maps *maps, FILE *fp)
739{
740 size_t printed = 0;
741 struct rb_node *nd;
742
743 down_read(&maps->lock);
744
745 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
746 struct map *pos = rb_entry(nd, struct map, rb_node);
747 printed += fprintf(fp, "Map:");
748 printed += map__fprintf(pos, fp);
749 if (verbose > 2) {
750 printed += dso__fprintf(pos->dso, fp);
751 printed += fprintf(fp, "--\n");
752 }
753 }
754
755 up_read(&maps->lock);
756
757 return printed;
758}
759
760size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
761{
762 return maps__fprintf(&mg->maps, fp);
763}
764
765static void __map_groups__insert(struct map_groups *mg, struct map *map)
766{
767 __maps__insert(&mg->maps, map);
David Brazdil0f672f62019-12-10 10:32:29 +0000768 __maps__insert_name(&mg->maps, map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000769 map->groups = mg;
770}
771
772static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
773{
774 struct rb_root *root;
775 struct rb_node *next, *first;
776 int err = 0;
777
778 down_write(&maps->lock);
779
780 root = &maps->entries;
781
782 /*
783 * Find first map where end > map->start.
784 * Same as find_vma() in kernel.
785 */
786 next = root->rb_node;
787 first = NULL;
788 while (next) {
789 struct map *pos = rb_entry(next, struct map, rb_node);
790
791 if (pos->end > map->start) {
792 first = next;
793 if (pos->start <= map->start)
794 break;
795 next = next->rb_left;
796 } else
797 next = next->rb_right;
798 }
799
800 next = first;
801 while (next) {
802 struct map *pos = rb_entry(next, struct map, rb_node);
803 next = rb_next(&pos->rb_node);
804
805 /*
806 * Stop if current map starts after map->end.
807 * Maps are ordered by start: next will not overlap for sure.
808 */
809 if (pos->start >= map->end)
810 break;
811
812 if (verbose >= 2) {
813
814 if (use_browser) {
David Brazdil0f672f62019-12-10 10:32:29 +0000815 pr_debug("overlapping maps in %s (disable tui for more info)\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000816 map->dso->name);
817 } else {
818 fputs("overlapping maps:\n", fp);
819 map__fprintf(map, fp);
820 map__fprintf(pos, fp);
821 }
822 }
823
824 rb_erase_init(&pos->rb_node, root);
825 /*
826 * Now check if we need to create new maps for areas not
827 * overlapped by the new map:
828 */
829 if (map->start > pos->start) {
830 struct map *before = map__clone(pos);
831
832 if (before == NULL) {
833 err = -ENOMEM;
834 goto put_map;
835 }
836
837 before->end = map->start;
838 __map_groups__insert(pos->groups, before);
839 if (verbose >= 2 && !use_browser)
840 map__fprintf(before, fp);
841 map__put(before);
842 }
843
844 if (map->end < pos->end) {
845 struct map *after = map__clone(pos);
846
847 if (after == NULL) {
848 err = -ENOMEM;
849 goto put_map;
850 }
851
852 after->start = map->end;
David Brazdil0f672f62019-12-10 10:32:29 +0000853 after->pgoff += map->end - pos->start;
854 assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000855 __map_groups__insert(pos->groups, after);
856 if (verbose >= 2 && !use_browser)
857 map__fprintf(after, fp);
858 map__put(after);
859 }
860put_map:
861 map__put(pos);
862
863 if (err)
864 goto out;
865 }
866
867 err = 0;
868out:
869 up_write(&maps->lock);
870 return err;
871}
872
873int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
874 FILE *fp)
875{
876 return maps__fixup_overlappings(&mg->maps, map, fp);
877}
878
879/*
880 * XXX This should not really _copy_ te maps, but refcount them.
881 */
882int map_groups__clone(struct thread *thread, struct map_groups *parent)
883{
884 struct map_groups *mg = thread->mg;
885 int err = -ENOMEM;
886 struct map *map;
887 struct maps *maps = &parent->maps;
888
889 down_read(&maps->lock);
890
891 for (map = maps__first(maps); map; map = map__next(map)) {
892 struct map *new = map__clone(map);
893 if (new == NULL)
894 goto out_unlock;
895
David Brazdil0f672f62019-12-10 10:32:29 +0000896 err = unwind__prepare_access(mg, new, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000897 if (err)
898 goto out_unlock;
899
900 map_groups__insert(mg, new);
901 map__put(new);
902 }
903
904 err = 0;
905out_unlock:
906 up_read(&maps->lock);
907 return err;
908}
909
910static void __maps__insert(struct maps *maps, struct map *map)
911{
912 struct rb_node **p = &maps->entries.rb_node;
913 struct rb_node *parent = NULL;
914 const u64 ip = map->start;
915 struct map *m;
916
917 while (*p != NULL) {
918 parent = *p;
919 m = rb_entry(parent, struct map, rb_node);
920 if (ip < m->start)
921 p = &(*p)->rb_left;
922 else
923 p = &(*p)->rb_right;
924 }
925
926 rb_link_node(&map->rb_node, parent, p);
927 rb_insert_color(&map->rb_node, &maps->entries);
928 map__get(map);
929}
930
David Brazdil0f672f62019-12-10 10:32:29 +0000931static void __maps__insert_name(struct maps *maps, struct map *map)
932{
933 struct rb_node **p = &maps->names.rb_node;
934 struct rb_node *parent = NULL;
935 struct map *m;
936 int rc;
937
938 while (*p != NULL) {
939 parent = *p;
940 m = rb_entry(parent, struct map, rb_node_name);
941 rc = strcmp(m->dso->short_name, map->dso->short_name);
942 if (rc < 0)
943 p = &(*p)->rb_left;
944 else
945 p = &(*p)->rb_right;
946 }
947 rb_link_node(&map->rb_node_name, parent, p);
948 rb_insert_color(&map->rb_node_name, &maps->names);
949 map__get(map);
950}
951
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000952void maps__insert(struct maps *maps, struct map *map)
953{
954 down_write(&maps->lock);
955 __maps__insert(maps, map);
David Brazdil0f672f62019-12-10 10:32:29 +0000956 __maps__insert_name(maps, map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000957 up_write(&maps->lock);
958}
959
960static void __maps__remove(struct maps *maps, struct map *map)
961{
962 rb_erase_init(&map->rb_node, &maps->entries);
963 map__put(map);
David Brazdil0f672f62019-12-10 10:32:29 +0000964
965 rb_erase_init(&map->rb_node_name, &maps->names);
966 map__put(map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000967}
968
969void maps__remove(struct maps *maps, struct map *map)
970{
971 down_write(&maps->lock);
972 __maps__remove(maps, map);
973 up_write(&maps->lock);
974}
975
976struct map *maps__find(struct maps *maps, u64 ip)
977{
David Brazdil0f672f62019-12-10 10:32:29 +0000978 struct rb_node *p;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000979 struct map *m;
980
981 down_read(&maps->lock);
982
David Brazdil0f672f62019-12-10 10:32:29 +0000983 p = maps->entries.rb_node;
984 while (p != NULL) {
985 m = rb_entry(p, struct map, rb_node);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000986 if (ip < m->start)
David Brazdil0f672f62019-12-10 10:32:29 +0000987 p = p->rb_left;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000988 else if (ip >= m->end)
David Brazdil0f672f62019-12-10 10:32:29 +0000989 p = p->rb_right;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000990 else
991 goto out;
992 }
993
994 m = NULL;
995out:
996 up_read(&maps->lock);
997 return m;
998}
999
1000struct map *maps__first(struct maps *maps)
1001{
1002 struct rb_node *first = rb_first(&maps->entries);
1003
1004 if (first)
1005 return rb_entry(first, struct map, rb_node);
1006 return NULL;
1007}
1008
1009struct map *map__next(struct map *map)
1010{
1011 struct rb_node *next = rb_next(&map->rb_node);
1012
1013 if (next)
1014 return rb_entry(next, struct map, rb_node);
1015 return NULL;
1016}
1017
1018struct kmap *__map__kmap(struct map *map)
1019{
1020 if (!map->dso || !map->dso->kernel)
1021 return NULL;
1022 return (struct kmap *)(map + 1);
1023}
1024
1025struct kmap *map__kmap(struct map *map)
1026{
1027 struct kmap *kmap = __map__kmap(map);
1028
1029 if (!kmap)
1030 pr_err("Internal error: map__kmap with a non-kernel map\n");
1031 return kmap;
1032}
1033
1034struct map_groups *map__kmaps(struct map *map)
1035{
1036 struct kmap *kmap = map__kmap(map);
1037
1038 if (!kmap || !kmap->kmaps) {
1039 pr_err("Internal error: map__kmaps with a non-kernel map\n");
1040 return NULL;
1041 }
1042 return kmap->kmaps;
1043}