blob: d48667b04c4117ca613f5501ecd409083234ad6c [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * arch/arm64/include/asm/ftrace.h
4 *
5 * Copyright (C) 2013 Linaro Limited
6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8#ifndef __ASM_FTRACE_H
9#define __ASM_FTRACE_H
10
11#include <asm/insn.h>
12
David Brazdil0f672f62019-12-10 10:32:29 +000013#define HAVE_FUNCTION_GRAPH_FP_TEST
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014#define MCOUNT_ADDR ((unsigned long)_mcount)
15#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
16
David Brazdil0f672f62019-12-10 10:32:29 +000017/*
18 * Currently, gcc tends to save the link register after the local variables
19 * on the stack. This causes the max stack tracer to report the function
20 * frame sizes for the wrong functions. By defining
21 * ARCH_FTRACE_SHIFT_STACK_TRACER, it will tell the stack tracer to expect
22 * to find the return address on the stack after the local variables have
23 * been set up.
24 *
25 * Note, this may change in the future, and we will need to deal with that
26 * if it were to happen.
27 */
28#define ARCH_FTRACE_SHIFT_STACK_TRACER 1
29
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000030#ifndef __ASSEMBLY__
31#include <linux/compat.h>
32
33extern void _mcount(unsigned long);
34extern void *return_address(unsigned int);
35
36struct dyn_arch_ftrace {
37 /* No extra data needed for arm64 */
38};
39
40extern unsigned long ftrace_graph_call;
41
42extern void return_to_handler(void);
43
44static inline unsigned long ftrace_call_adjust(unsigned long addr)
45{
46 /*
47 * addr is the address of the mcount call instruction.
48 * recordmcount does the necessary offset calculation.
49 */
50 return addr;
51}
52
53#define ftrace_return_address(n) return_address(n)
54
55/*
56 * Because AArch32 mode does not share the same syscall table with AArch64,
57 * tracing compat syscalls may result in reporting bogus syscalls or even
58 * hang-up, so just do not trace them.
59 * See kernel/trace/trace_syscalls.c
60 *
61 * x86 code says:
62 * If the user really wants these, then they should use the
63 * raw syscall tracepoints with filtering.
64 */
65#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
66static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
67{
68 return is_compat_task();
69}
70
71#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
72
73static inline bool arch_syscall_match_sym_name(const char *sym,
74 const char *name)
75{
76 /*
77 * Since all syscall functions have __arm64_ prefix, we must skip it.
78 * However, as we described above, we decided to ignore compat
79 * syscalls, so we don't care about __arm64_compat_ prefix here.
80 */
81 return !strcmp(sym + 8, name);
82}
83#endif /* ifndef __ASSEMBLY__ */
84
85#endif /* __ASM_FTRACE_H */