1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2013 Linaro Limited
4 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
5 * Copyright (C) 2017 Andes Technology Corporation
6 */
7
8 #include <linux/ftrace.h>
9 #include <linux/uaccess.h>
10 #include <linux/memory.h>
11 #include <asm/cacheflush.h>
12 #include <asm/patch.h>
13
14 #ifdef CONFIG_DYNAMIC_FTRACE
ftrace_arch_code_modify_prepare(void)15 int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
16 {
17 mutex_lock(&text_mutex);
18 return 0;
19 }
20
ftrace_arch_code_modify_post_process(void)21 int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
22 {
23 mutex_unlock(&text_mutex);
24 return 0;
25 }
26
ftrace_check_current_call(unsigned long hook_pos,unsigned int * expected)27 static int ftrace_check_current_call(unsigned long hook_pos,
28 unsigned int *expected)
29 {
30 unsigned int replaced[2];
31 unsigned int nops[2] = {NOP4, NOP4};
32
33 /* we expect nops at the hook position */
34 if (!expected)
35 expected = nops;
36
37 /*
38 * Read the text we want to modify;
39 * return must be -EFAULT on read error
40 */
41 if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
42 MCOUNT_INSN_SIZE))
43 return -EFAULT;
44
45 /*
46 * Make sure it is what we expect it to be;
47 * return must be -EINVAL on failed comparison
48 */
49 if (memcmp(expected, replaced, sizeof(replaced))) {
50 pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
51 (void *)hook_pos, expected[0], expected[1], replaced[0],
52 replaced[1]);
53 return -EINVAL;
54 }
55
56 return 0;
57 }
58
__ftrace_modify_call(unsigned long hook_pos,unsigned long target,bool enable)59 static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
60 bool enable)
61 {
62 unsigned int call[2];
63 unsigned int nops[2] = {NOP4, NOP4};
64
65 make_call(hook_pos, target, call);
66
67 /* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
68 if (patch_text_nosync
69 ((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
70 return -EPERM;
71
72 return 0;
73 }
74
75 /*
76 * Put 5 instructions with 16 bytes at the front of function within
77 * patchable function entry nops' area.
78 *
79 * 0: REG_S ra, -SZREG(sp)
80 * 1: auipc ra, 0x?
81 * 2: jalr -?(ra)
82 * 3: REG_L ra, -SZREG(sp)
83 *
84 * So the opcodes is:
85 * 0: 0xfe113c23 (sd)/0xfe112e23 (sw)
86 * 1: 0x???????? -> auipc
87 * 2: 0x???????? -> jalr
88 * 3: 0xff813083 (ld)/0xffc12083 (lw)
89 */
90 #if __riscv_xlen == 64
91 #define INSN0 0xfe113c23
92 #define INSN3 0xff813083
93 #elif __riscv_xlen == 32
94 #define INSN0 0xfe112e23
95 #define INSN3 0xffc12083
96 #endif
97
98 #define FUNC_ENTRY_SIZE 16
99 #define FUNC_ENTRY_JMP 4
100
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)101 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
102 {
103 unsigned int call[4] = {INSN0, 0, 0, INSN3};
104 unsigned long target = addr;
105 unsigned long caller = rec->ip + FUNC_ENTRY_JMP;
106
107 call[1] = to_auipc_insn((unsigned int)(target - caller));
108 call[2] = to_jalr_insn((unsigned int)(target - caller));
109
110 if (patch_text_nosync((void *)rec->ip, call, FUNC_ENTRY_SIZE))
111 return -EPERM;
112
113 return 0;
114 }
115
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)116 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
117 unsigned long addr)
118 {
119 unsigned int nops[4] = {NOP4, NOP4, NOP4, NOP4};
120
121 if (patch_text_nosync((void *)rec->ip, nops, FUNC_ENTRY_SIZE))
122 return -EPERM;
123
124 return 0;
125 }
126
127
128 /*
129 * This is called early on, and isn't wrapped by
130 * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
131 * text_mutex, which triggers a lockdep failure. SMP isn't running so we could
132 * just directly poke the text, but it's simpler to just take the lock
133 * ourselves.
134 */
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)135 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
136 {
137 int out;
138
139 ftrace_arch_code_modify_prepare();
140 out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
141 ftrace_arch_code_modify_post_process();
142
143 return out;
144 }
145
ftrace_update_ftrace_func(ftrace_func_t func)146 int ftrace_update_ftrace_func(ftrace_func_t func)
147 {
148 int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
149 (unsigned long)func, true);
150 if (!ret) {
151 ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
152 (unsigned long)func, true);
153 }
154
155 return ret;
156 }
157 #endif
158
159 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)160 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
161 unsigned long addr)
162 {
163 unsigned int call[2];
164 unsigned long caller = rec->ip + FUNC_ENTRY_JMP;
165 int ret;
166
167 make_call(caller, old_addr, call);
168 ret = ftrace_check_current_call(caller, call);
169
170 if (ret)
171 return ret;
172
173 return __ftrace_modify_call(caller, addr, true);
174 }
175 #endif
176
177 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
178 /*
179 * Most of this function is copied from arm64.
180 */
prepare_ftrace_return(unsigned long * parent,unsigned long self_addr,unsigned long frame_pointer)181 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
182 unsigned long frame_pointer)
183 {
184 unsigned long return_hooker = (unsigned long)&return_to_handler;
185 unsigned long old;
186
187 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
188 return;
189
190 /*
191 * We don't suffer access faults, so no extra fault-recovery assembly
192 * is needed here.
193 */
194 old = *parent;
195
196 if (!function_graph_enter(old, self_addr, frame_pointer, parent))
197 *parent = return_hooker;
198 }
199
200 #ifdef CONFIG_DYNAMIC_FTRACE
201 extern void ftrace_graph_call(void);
202 extern void ftrace_graph_regs_call(void);
ftrace_enable_ftrace_graph_caller(void)203 int ftrace_enable_ftrace_graph_caller(void)
204 {
205 int ret;
206
207 ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
208 (unsigned long)&prepare_ftrace_return, true);
209 if (ret)
210 return ret;
211
212 return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
213 (unsigned long)&prepare_ftrace_return, true);
214 }
215
ftrace_disable_ftrace_graph_caller(void)216 int ftrace_disable_ftrace_graph_caller(void)
217 {
218 int ret;
219
220 ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
221 (unsigned long)&prepare_ftrace_return, false);
222 if (ret)
223 return ret;
224
225 return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
226 (unsigned long)&prepare_ftrace_return, false);
227 }
228 #endif /* CONFIG_DYNAMIC_FTRACE */
229 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
230