Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _ASM_X86_FTRACE_H |
| 3 | #define _ASM_X86_FTRACE_H |
| 4 | |
| 5 | #ifdef CONFIG_FUNCTION_TRACER |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6 | #ifndef CC_USING_FENTRY |
| 7 | # error Compiler does not support fentry? |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8 | #endif |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9 | # define MCOUNT_ADDR ((unsigned long)(__fentry__)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ |
| 11 | |
| 12 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 13 | #define ARCH_SUPPORTS_FTRACE_OPS 1 |
| 14 | #endif |
| 15 | |
| 16 | #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR |
| 17 | |
| 18 | #ifndef __ASSEMBLY__ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 19 | extern atomic_t modifying_ftrace_code; |
| 20 | extern void __fentry__(void); |
| 21 | |
| 22 | static inline unsigned long ftrace_call_adjust(unsigned long addr) |
| 23 | { |
| 24 | /* |
| 25 | * addr is the address of the mcount call instruction. |
| 26 | * recordmcount does the necessary offset calculation. |
| 27 | */ |
| 28 | return addr; |
| 29 | } |
| 30 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 31 | /* |
| 32 | * When a ftrace registered caller is tracing a function that is |
| 33 | * also set by a register_ftrace_direct() call, it needs to be |
| 34 | * differentiated in the ftrace_caller trampoline. To do this, we |
| 35 | * place the direct caller in the ORIG_AX part of pt_regs. This |
| 36 | * tells the ftrace_caller that there's a direct caller. |
| 37 | */ |
| 38 | static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr) |
| 39 | { |
| 40 | /* Emulate a call */ |
| 41 | regs->orig_ax = addr; |
| 42 | } |
| 43 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 44 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 45 | |
| 46 | struct dyn_arch_ftrace { |
| 47 | /* No extra data needed for x86 */ |
| 48 | }; |
| 49 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 50 | #define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR |
| 51 | |
| 52 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 53 | #endif /* __ASSEMBLY__ */ |
| 54 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 55 | |
| 56 | |
| 57 | #ifndef __ASSEMBLY__ |
| 58 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 59 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) |
| 60 | extern void set_ftrace_ops_ro(void); |
| 61 | #else |
| 62 | static inline void set_ftrace_ops_ro(void) { } |
| 63 | #endif |
| 64 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 65 | #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME |
| 66 | static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) |
| 67 | { |
| 68 | /* |
| 69 | * Compare the symbol name with the system call name. Skip the |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 70 | * "__x64_sys", "__ia32_sys", "__do_sys" or simple "sys" prefix. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 71 | */ |
| 72 | return !strcmp(sym + 3, name + 3) || |
| 73 | (!strncmp(sym, "__x64_", 6) && !strcmp(sym + 9, name + 3)) || |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 74 | (!strncmp(sym, "__ia32_", 7) && !strcmp(sym + 10, name + 3)) || |
| 75 | (!strncmp(sym, "__do_sys", 8) && !strcmp(sym + 8, name + 3)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 76 | } |
| 77 | |
| 78 | #ifndef COMPILE_OFFSETS |
| 79 | |
| 80 | #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION) |
| 81 | #include <linux/compat.h> |
| 82 | |
| 83 | /* |
| 84 | * Because ia32 syscalls do not map to x86_64 syscall numbers |
| 85 | * this screws up the trace output when tracing a ia32 task. |
| 86 | * Instead of reporting bogus syscalls, just do not trace them. |
| 87 | * |
| 88 | * If the user really wants these, then they should use the |
| 89 | * raw syscall tracepoints with filtering. |
| 90 | */ |
| 91 | #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1 |
| 92 | static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) |
| 93 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 94 | return in_32bit_syscall(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 95 | } |
| 96 | #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */ |
| 97 | #endif /* !COMPILE_OFFSETS */ |
| 98 | #endif /* !__ASSEMBLY__ */ |
| 99 | |
| 100 | #endif /* _ASM_X86_FTRACE_H */ |