blob: 3dfb80b8656140487a5f975f08b1202f9a3974c4 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_FTRACE
3#define _ASM_POWERPC_FTRACE
4
5#include <asm/types.h>
6
7#ifdef CONFIG_FUNCTION_TRACER
8#define MCOUNT_ADDR ((unsigned long)(_mcount))
9#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
10
11#ifdef __ASSEMBLY__
12
13/* Based off of objdump optput from glibc */
14
15#define MCOUNT_SAVE_FRAME \
16 stwu r1,-48(r1); \
17 stw r3, 12(r1); \
18 stw r4, 16(r1); \
19 stw r5, 20(r1); \
20 stw r6, 24(r1); \
21 mflr r3; \
22 lwz r4, 52(r1); \
23 mfcr r5; \
24 stw r7, 28(r1); \
25 stw r8, 32(r1); \
26 stw r9, 36(r1); \
27 stw r10,40(r1); \
28 stw r3, 44(r1); \
29 stw r5, 8(r1)
30
31#define MCOUNT_RESTORE_FRAME \
32 lwz r6, 8(r1); \
33 lwz r0, 44(r1); \
34 lwz r3, 12(r1); \
35 mtctr r0; \
36 lwz r4, 16(r1); \
37 mtcr r6; \
38 lwz r5, 20(r1); \
39 lwz r6, 24(r1); \
40 lwz r0, 52(r1); \
41 lwz r7, 28(r1); \
42 lwz r8, 32(r1); \
43 mtlr r0; \
44 lwz r9, 36(r1); \
45 lwz r10,40(r1); \
46 addi r1, r1, 48
47
48#else /* !__ASSEMBLY__ */
49extern void _mcount(void);
50
51static inline unsigned long ftrace_call_adjust(unsigned long addr)
52{
53 /* reloction of mcount call site is the same as the address */
54 return addr;
55}
56
57struct dyn_arch_ftrace {
58 struct module *mod;
59};
60#endif /* __ASSEMBLY__ */
61
62#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
63#define ARCH_SUPPORTS_FTRACE_OPS 1
64#endif
65#endif /* CONFIG_FUNCTION_TRACER */
66
67#ifndef __ASSEMBLY__
68#ifdef CONFIG_FTRACE_SYSCALLS
69/*
70 * Some syscall entry functions on powerpc start with "ppc_" (fork and clone,
71 * for instance) or ppc32_/ppc64_. We should also match the sys_ variant with
72 * those.
73 */
74#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
75#ifdef PPC64_ELF_ABI_v1
76static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
77{
78 /* We need to skip past the initial dot, and the __se_sys alias */
79 return !strcmp(sym + 1, name) ||
80 (!strncmp(sym, ".__se_sys", 9) && !strcmp(sym + 6, name)) ||
81 (!strncmp(sym, ".ppc_", 5) && !strcmp(sym + 5, name + 4)) ||
82 (!strncmp(sym, ".ppc32_", 7) && !strcmp(sym + 7, name + 4)) ||
83 (!strncmp(sym, ".ppc64_", 7) && !strcmp(sym + 7, name + 4));
84}
85#else
86static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
87{
88 return !strcmp(sym, name) ||
89 (!strncmp(sym, "__se_sys", 8) && !strcmp(sym + 5, name)) ||
90 (!strncmp(sym, "ppc_", 4) && !strcmp(sym + 4, name + 4)) ||
91 (!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) ||
92 (!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4));
93}
94#endif /* PPC64_ELF_ABI_v1 */
95#endif /* CONFIG_FTRACE_SYSCALLS */
96
97#ifdef CONFIG_PPC64
98#include <asm/paca.h>
99
100static inline void this_cpu_disable_ftrace(void)
101{
102 get_paca()->ftrace_enabled = 0;
103}
104
105static inline void this_cpu_enable_ftrace(void)
106{
107 get_paca()->ftrace_enabled = 1;
108}
109#else /* CONFIG_PPC64 */
110static inline void this_cpu_disable_ftrace(void) { }
111static inline void this_cpu_enable_ftrace(void) { }
112#endif /* CONFIG_PPC64 */
113#endif /* !__ASSEMBLY__ */
114
115#endif /* _ASM_POWERPC_FTRACE */