1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2013 Linaro Limited
4 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
5 * Copyright (C) 2017 Andes Technology Corporation
6 */
7
8 #include <linux/ftrace.h>
9 #include <linux/uaccess.h>
10 #include <linux/memory.h>
11 #include <linux/irqflags.h>
12 #include <linux/stop_machine.h>
13 #include <asm/cacheflush.h>
14 #include <asm/text-patching.h>
15
16 #ifdef CONFIG_DYNAMIC_FTRACE
ftrace_arch_code_modify_prepare(void)17 void ftrace_arch_code_modify_prepare(void)
18 __acquires(&text_mutex)
19 {
20 mutex_lock(&text_mutex);
21 }
22
ftrace_arch_code_modify_post_process(void)23 void ftrace_arch_code_modify_post_process(void)
24 __releases(&text_mutex)
25 {
26 mutex_unlock(&text_mutex);
27 }
28
ftrace_call_adjust(unsigned long addr)29 unsigned long ftrace_call_adjust(unsigned long addr)
30 {
31 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
32 return addr + 8 + MCOUNT_AUIPC_SIZE;
33
34 return addr + MCOUNT_AUIPC_SIZE;
35 }
36
arch_ftrace_get_symaddr(unsigned long fentry_ip)37 unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip)
38 {
39 return fentry_ip - MCOUNT_AUIPC_SIZE;
40 }
41
arch_ftrace_update_code(int command)42 void arch_ftrace_update_code(int command)
43 {
44 command |= FTRACE_MAY_SLEEP;
45 ftrace_modify_all_code(command);
46 flush_icache_all();
47 }
48
__ftrace_modify_call(unsigned long source,unsigned long target,bool validate)49 static int __ftrace_modify_call(unsigned long source, unsigned long target, bool validate)
50 {
51 unsigned int call[2], offset;
52 unsigned int replaced[2];
53
54 offset = target - source;
55 call[1] = to_jalr_t0(offset);
56
57 if (validate) {
58 call[0] = to_auipc_t0(offset);
59 /*
60 * Read the text we want to modify;
61 * return must be -EFAULT on read error
62 */
63 if (copy_from_kernel_nofault(replaced, (void *)source, 2 * MCOUNT_INSN_SIZE))
64 return -EFAULT;
65
66 if (replaced[0] != call[0]) {
67 pr_err("%p: expected (%08x) but got (%08x)\n",
68 (void *)source, call[0], replaced[0]);
69 return -EINVAL;
70 }
71 }
72
73 /* Replace the jalr at once. Return -EPERM on write error. */
74 if (patch_insn_write((void *)(source + MCOUNT_AUIPC_SIZE), call + 1, MCOUNT_JALR_SIZE))
75 return -EPERM;
76
77 return 0;
78 }
79
80 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
riscv64_rec_get_ops(struct dyn_ftrace * rec)81 static const struct ftrace_ops *riscv64_rec_get_ops(struct dyn_ftrace *rec)
82 {
83 const struct ftrace_ops *ops = NULL;
84
85 if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
86 ops = ftrace_find_unique_ops(rec);
87 WARN_ON_ONCE(!ops);
88 }
89
90 if (!ops)
91 ops = &ftrace_list_ops;
92
93 return ops;
94 }
95
ftrace_rec_set_ops(const struct dyn_ftrace * rec,const struct ftrace_ops * ops)96 static int ftrace_rec_set_ops(const struct dyn_ftrace *rec, const struct ftrace_ops *ops)
97 {
98 unsigned long literal = ALIGN_DOWN(rec->ip - 12, 8);
99
100 return patch_text_nosync((void *)literal, &ops, sizeof(ops));
101 }
102
ftrace_rec_set_nop_ops(struct dyn_ftrace * rec)103 static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec)
104 {
105 return ftrace_rec_set_ops(rec, &ftrace_nop_ops);
106 }
107
ftrace_rec_update_ops(struct dyn_ftrace * rec)108 static int ftrace_rec_update_ops(struct dyn_ftrace *rec)
109 {
110 return ftrace_rec_set_ops(rec, riscv64_rec_get_ops(rec));
111 }
112 #else
ftrace_rec_set_nop_ops(struct dyn_ftrace * rec)113 static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) { return 0; }
ftrace_rec_update_ops(struct dyn_ftrace * rec)114 static int ftrace_rec_update_ops(struct dyn_ftrace *rec) { return 0; }
115 #endif
116
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)117 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
118 {
119 unsigned long distance, orig_addr, pc = rec->ip - MCOUNT_AUIPC_SIZE;
120 int ret;
121
122 ret = ftrace_rec_update_ops(rec);
123 if (ret)
124 return ret;
125
126 orig_addr = (unsigned long)&ftrace_caller;
127 distance = addr > orig_addr ? addr - orig_addr : orig_addr - addr;
128 if (distance > JALR_RANGE)
129 addr = FTRACE_ADDR;
130
131 return __ftrace_modify_call(pc, addr, false);
132 }
133
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)134 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
135 {
136 u32 nop4 = RISCV_INSN_NOP4;
137 int ret;
138
139 ret = ftrace_rec_set_nop_ops(rec);
140 if (ret)
141 return ret;
142
143 if (patch_insn_write((void *)rec->ip, &nop4, MCOUNT_NOP4_SIZE))
144 return -EPERM;
145
146 return 0;
147 }
148
149 /*
150 * This is called early on, and isn't wrapped by
151 * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
152 * text_mutex, which triggers a lockdep failure. SMP isn't running so we could
153 * just directly poke the text, but it's simpler to just take the lock
154 * ourselves.
155 */
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)156 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
157 {
158 unsigned long pc = rec->ip - MCOUNT_AUIPC_SIZE;
159 unsigned int nops[2], offset;
160 int ret;
161
162 guard(mutex)(&text_mutex);
163
164 ret = ftrace_rec_set_nop_ops(rec);
165 if (ret)
166 return ret;
167
168 offset = (unsigned long) &ftrace_caller - pc;
169 nops[0] = to_auipc_t0(offset);
170 nops[1] = RISCV_INSN_NOP4;
171
172 ret = patch_insn_write((void *)pc, nops, 2 * MCOUNT_INSN_SIZE);
173
174 return ret;
175 }
176
177 ftrace_func_t ftrace_call_dest = ftrace_stub;
ftrace_update_ftrace_func(ftrace_func_t func)178 int ftrace_update_ftrace_func(ftrace_func_t func)
179 {
180 /*
181 * When using CALL_OPS, the function to call is associated with the
182 * call site, and we don't have a global function pointer to update.
183 */
184 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
185 return 0;
186
187 WRITE_ONCE(ftrace_call_dest, func);
188 /*
189 * The data fence ensure that the update to ftrace_call_dest happens
190 * before the write to function_trace_op later in the generic ftrace.
191 * If the sequence is not enforced, then an old ftrace_call_dest may
192 * race loading a new function_trace_op set in ftrace_modify_all_code
193 */
194 smp_wmb();
195 /*
196 * Updating ftrace dpes not take stop_machine path, so irqs should not
197 * be disabled.
198 */
199 WARN_ON(irqs_disabled());
200 smp_call_function(ftrace_sync_ipi, NULL, 1);
201 return 0;
202 }
203
204 #else /* CONFIG_DYNAMIC_FTRACE */
ftrace_call_adjust(unsigned long addr)205 unsigned long ftrace_call_adjust(unsigned long addr)
206 {
207 return addr;
208 }
209 #endif /* CONFIG_DYNAMIC_FTRACE */
210
211 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)212 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
213 unsigned long addr)
214 {
215 unsigned long caller = rec->ip - MCOUNT_AUIPC_SIZE;
216 int ret;
217
218 ret = ftrace_rec_update_ops(rec);
219 if (ret)
220 return ret;
221
222 return __ftrace_modify_call(caller, FTRACE_ADDR, true);
223 }
224 #endif
225
226 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
227 /*
228 * Most of this function is copied from arm64.
229 */
prepare_ftrace_return(unsigned long * parent,unsigned long self_addr,unsigned long frame_pointer)230 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
231 unsigned long frame_pointer)
232 {
233 unsigned long return_hooker = (unsigned long)&return_to_handler;
234 unsigned long old;
235
236 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
237 return;
238
239 /*
240 * We don't suffer access faults, so no extra fault-recovery assembly
241 * is needed here.
242 */
243 old = *parent;
244
245 if (!function_graph_enter(old, self_addr, frame_pointer, parent))
246 *parent = return_hooker;
247 }
248
249 #ifdef CONFIG_DYNAMIC_FTRACE
ftrace_graph_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)250 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
251 struct ftrace_ops *op, struct ftrace_regs *fregs)
252 {
253 unsigned long return_hooker = (unsigned long)&return_to_handler;
254 unsigned long frame_pointer = arch_ftrace_regs(fregs)->s0;
255 unsigned long *parent = &arch_ftrace_regs(fregs)->ra;
256 unsigned long old;
257
258 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
259 return;
260
261 /*
262 * We don't suffer access faults, so no extra fault-recovery assembly
263 * is needed here.
264 */
265 old = *parent;
266
267 if (!function_graph_enter_regs(old, ip, frame_pointer, parent, fregs))
268 *parent = return_hooker;
269 }
270 #endif /* CONFIG_DYNAMIC_FTRACE */
271 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
272