Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | #include "unwind.h" |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3 | #include "dso.h" |
| 4 | #include "map.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5 | #include "thread.h" |
| 6 | #include "session.h" |
| 7 | #include "debug.h" |
| 8 | #include "env.h" |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9 | #include "callchain.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | |
| 11 | struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops; |
| 12 | struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops; |
| 13 | struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops; |
| 14 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 15 | static void unwind__register_ops(struct map_groups *mg, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 16 | struct unwind_libunwind_ops *ops) |
| 17 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 18 | mg->unwind_libunwind_ops = ops; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 19 | } |
| 20 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 21 | int unwind__prepare_access(struct map_groups *mg, struct map *map, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 22 | bool *initialized) |
| 23 | { |
| 24 | const char *arch; |
| 25 | enum dso_type dso_type; |
| 26 | struct unwind_libunwind_ops *ops = local_unwind_libunwind_ops; |
| 27 | int err; |
| 28 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 29 | if (!dwarf_callchain_users) |
| 30 | return 0; |
| 31 | |
| 32 | if (mg->addr_space) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 33 | pr_debug("unwind: thread map already set, dso=%s\n", |
| 34 | map->dso->name); |
| 35 | if (initialized) |
| 36 | *initialized = true; |
| 37 | return 0; |
| 38 | } |
| 39 | |
| 40 | /* env->arch is NULL for live-mode (i.e. perf top) */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 41 | if (!mg->machine->env || !mg->machine->env->arch) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 42 | goto out_register; |
| 43 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 44 | dso_type = dso__type(map->dso, mg->machine); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 45 | if (dso_type == DSO__TYPE_UNKNOWN) |
| 46 | return 0; |
| 47 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 48 | arch = perf_env__arch(mg->machine->env); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 49 | |
| 50 | if (!strcmp(arch, "x86")) { |
| 51 | if (dso_type != DSO__TYPE_64BIT) |
| 52 | ops = x86_32_unwind_libunwind_ops; |
| 53 | } else if (!strcmp(arch, "arm64") || !strcmp(arch, "arm")) { |
| 54 | if (dso_type == DSO__TYPE_64BIT) |
| 55 | ops = arm64_unwind_libunwind_ops; |
| 56 | } |
| 57 | |
| 58 | if (!ops) { |
| 59 | pr_err("unwind: target platform=%s is not supported\n", arch); |
| 60 | return 0; |
| 61 | } |
| 62 | out_register: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 63 | unwind__register_ops(mg, ops); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 64 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 65 | err = mg->unwind_libunwind_ops->prepare_access(mg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 66 | if (initialized) |
| 67 | *initialized = err ? false : true; |
| 68 | return err; |
| 69 | } |
| 70 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 71 | void unwind__flush_access(struct map_groups *mg) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 72 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 73 | if (mg->unwind_libunwind_ops) |
| 74 | mg->unwind_libunwind_ops->flush_access(mg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 75 | } |
| 76 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 77 | void unwind__finish_access(struct map_groups *mg) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 78 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 79 | if (mg->unwind_libunwind_ops) |
| 80 | mg->unwind_libunwind_ops->finish_access(mg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 81 | } |
| 82 | |
| 83 | int unwind__get_entries(unwind_entry_cb_t cb, void *arg, |
| 84 | struct thread *thread, |
| 85 | struct perf_sample *data, int max_stack) |
| 86 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 87 | if (thread->mg->unwind_libunwind_ops) |
| 88 | return thread->mg->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 89 | return 0; |
| 90 | } |