1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_S390_FTRACE_H
3 #define _ASM_S390_FTRACE_H
4
5 #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
6 #define ARCH_SUPPORTS_FTRACE_OPS 1
7 #define MCOUNT_INSN_SIZE 6
8
9 #ifndef __ASSEMBLY__
10
11 #ifdef CONFIG_CC_IS_CLANG
12 /* https://bugs.llvm.org/show_bug.cgi?id=41424 */
13 #define ftrace_return_address(n) 0UL
14 #else
15 #define ftrace_return_address(n) __builtin_return_address(n)
16 #endif
17
18 void ftrace_caller(void);
19
20 extern void *ftrace_func;
21
22 struct dyn_arch_ftrace { };
23
24 #define MCOUNT_ADDR 0
25 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
26
27 #define KPROBE_ON_FTRACE_NOP 0
28 #define KPROBE_ON_FTRACE_CALL 1
29
30 struct module;
31 struct dyn_ftrace;
32
33 bool ftrace_need_init_nop(void);
34 #define ftrace_need_init_nop ftrace_need_init_nop
35
36 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
37 #define ftrace_init_nop ftrace_init_nop
38
ftrace_call_adjust(unsigned long addr)39 static inline unsigned long ftrace_call_adjust(unsigned long addr)
40 {
41 return addr;
42 }
43
44 struct ftrace_regs {
45 struct pt_regs regs;
46 };
47
arch_ftrace_get_regs(struct ftrace_regs * fregs)48 static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
49 {
50 return &fregs->regs;
51 }
52
ftrace_instruction_pointer_set(struct ftrace_regs * fregs,unsigned long ip)53 static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs,
54 unsigned long ip)
55 {
56 struct pt_regs *regs = arch_ftrace_get_regs(fregs);
57
58 regs->psw.addr = ip;
59 }
60
61 /*
62 * When an ftrace registered caller is tracing a function that is
63 * also set by a register_ftrace_direct() call, it needs to be
64 * differentiated in the ftrace_caller trampoline. To do this,
65 * place the direct caller in the ORIG_GPR2 part of pt_regs. This
66 * tells the ftrace_caller that there's a direct caller.
67 */
arch_ftrace_set_direct_caller(struct pt_regs * regs,unsigned long addr)68 static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
69 {
70 regs->orig_gpr2 = addr;
71 }
72
73 /*
74 * Even though the system call numbers are identical for s390/s390x a
75 * different system call table is used for compat tasks. This may lead
76 * to e.g. incorrect or missing trace event sysfs files.
77 * Therefore simply do not trace compat system calls at all.
78 * See kernel/trace/trace_syscalls.c.
79 */
80 #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
arch_trace_is_compat_syscall(struct pt_regs * regs)81 static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
82 {
83 return is_compat_task();
84 }
85
86 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
arch_syscall_match_sym_name(const char * sym,const char * name)87 static inline bool arch_syscall_match_sym_name(const char *sym,
88 const char *name)
89 {
90 /*
91 * Skip __s390_ and __s390x_ prefix - due to compat wrappers
92 * and aliasing some symbols of 64 bit system call functions
93 * may get the __s390_ prefix instead of the __s390x_ prefix.
94 */
95 return !strcmp(sym + 7, name) || !strcmp(sym + 8, name);
96 }
97
98 #endif /* __ASSEMBLY__ */
99
100 #ifdef CONFIG_FUNCTION_TRACER
101
102 #define FTRACE_NOP_INSN .word 0xc004, 0x0000, 0x0000 /* brcl 0,0 */
103
104 #ifndef CC_USING_HOTPATCH
105
106 #define FTRACE_GEN_MCOUNT_RECORD(name) \
107 .section __mcount_loc, "a", @progbits; \
108 .quad name; \
109 .previous;
110
111 #else /* !CC_USING_HOTPATCH */
112
113 #define FTRACE_GEN_MCOUNT_RECORD(name)
114
115 #endif /* !CC_USING_HOTPATCH */
116
117 #define FTRACE_GEN_NOP_ASM(name) \
118 FTRACE_GEN_MCOUNT_RECORD(name) \
119 FTRACE_NOP_INSN
120
121 #else /* CONFIG_FUNCTION_TRACER */
122
123 #define FTRACE_GEN_NOP_ASM(name)
124
125 #endif /* CONFIG_FUNCTION_TRACER */
126
127 #endif /* _ASM_S390_FTRACE_H */
128