1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/highmem.h>
4 #include <linux/ptrace.h>
5 #include <linux/uprobes.h>
6 
7 #include "decode-insn.h"
8 
9 #define UPROBE_TRAP_NR	UINT_MAX
10 
is_swbp_insn(uprobe_opcode_t * insn)11 bool is_swbp_insn(uprobe_opcode_t *insn)
12 {
13 #ifdef CONFIG_RISCV_ISA_C
14 	return (*insn & 0xffff) == UPROBE_SWBP_INSN;
15 #else
16 	return *insn == UPROBE_SWBP_INSN;
17 #endif
18 }
19 
uprobe_get_swbp_addr(struct pt_regs * regs)20 unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
21 {
22 	return instruction_pointer(regs);
23 }
24 
arch_uprobe_analyze_insn(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long addr)25 int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
26 			     unsigned long addr)
27 {
28 	probe_opcode_t opcode;
29 
30 	opcode = *(probe_opcode_t *)(&auprobe->insn[0]);
31 
32 	auprobe->insn_size = GET_INSN_LENGTH(opcode);
33 
34 	switch (riscv_probe_decode_insn(&opcode, &auprobe->api)) {
35 	case INSN_REJECTED:
36 		return -EINVAL;
37 
38 	case INSN_GOOD_NO_SLOT:
39 		auprobe->simulate = true;
40 		break;
41 
42 	case INSN_GOOD:
43 		auprobe->simulate = false;
44 		break;
45 
46 	default:
47 		return -EINVAL;
48 	}
49 
50 	return 0;
51 }
52 
arch_uprobe_pre_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)53 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
54 {
55 	struct uprobe_task *utask = current->utask;
56 
57 	utask->autask.saved_cause = current->thread.bad_cause;
58 	current->thread.bad_cause = UPROBE_TRAP_NR;
59 
60 	instruction_pointer_set(regs, utask->xol_vaddr);
61 
62 	return 0;
63 }
64 
arch_uprobe_post_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)65 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
66 {
67 	struct uprobe_task *utask = current->utask;
68 
69 	WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR);
70 
71 	instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
72 
73 	return 0;
74 }
75 
arch_uprobe_xol_was_trapped(struct task_struct * t)76 bool arch_uprobe_xol_was_trapped(struct task_struct *t)
77 {
78 	if (t->thread.bad_cause != UPROBE_TRAP_NR)
79 		return true;
80 
81 	return false;
82 }
83 
arch_uprobe_skip_sstep(struct arch_uprobe * auprobe,struct pt_regs * regs)84 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
85 {
86 	probe_opcode_t insn;
87 	unsigned long addr;
88 
89 	if (!auprobe->simulate)
90 		return false;
91 
92 	insn = *(probe_opcode_t *)(&auprobe->insn[0]);
93 	addr = instruction_pointer(regs);
94 
95 	if (auprobe->api.handler)
96 		auprobe->api.handler(insn, addr, regs);
97 
98 	return true;
99 }
100 
arch_uprobe_abort_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)101 void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
102 {
103 	struct uprobe_task *utask = current->utask;
104 
105 	/*
106 	 * Task has received a fatal signal, so reset back to probbed
107 	 * address.
108 	 */
109 	instruction_pointer_set(regs, utask->vaddr);
110 }
111 
arch_uretprobe_is_alive(struct return_instance * ret,enum rp_check ctx,struct pt_regs * regs)112 bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
113 		struct pt_regs *regs)
114 {
115 	if (ctx == RP_CHECK_CHAIN_CALL)
116 		return regs->sp <= ret->stack;
117 	else
118 		return regs->sp < ret->stack;
119 }
120 
121 unsigned long
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,struct pt_regs * regs)122 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
123 				  struct pt_regs *regs)
124 {
125 	unsigned long ra;
126 
127 	ra = regs->ra;
128 
129 	regs->ra = trampoline_vaddr;
130 
131 	return ra;
132 }
133 
arch_uprobe_exception_notify(struct notifier_block * self,unsigned long val,void * data)134 int arch_uprobe_exception_notify(struct notifier_block *self,
135 				 unsigned long val, void *data)
136 {
137 	return NOTIFY_DONE;
138 }
139 
uprobe_breakpoint_handler(struct pt_regs * regs)140 bool uprobe_breakpoint_handler(struct pt_regs *regs)
141 {
142 	if (uprobe_pre_sstep_notifier(regs))
143 		return true;
144 
145 	return false;
146 }
147 
uprobe_single_step_handler(struct pt_regs * regs)148 bool uprobe_single_step_handler(struct pt_regs *regs)
149 {
150 	if (uprobe_post_sstep_notifier(regs))
151 		return true;
152 
153 	return false;
154 }
155 
arch_uprobe_copy_ixol(struct page * page,unsigned long vaddr,void * src,unsigned long len)156 void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
157 			   void *src, unsigned long len)
158 {
159 	/* Initialize the slot */
160 	void *kaddr = kmap_atomic(page);
161 	void *dst = kaddr + (vaddr & ~PAGE_MASK);
162 
163 	memcpy(dst, src, len);
164 
165 	/* Add ebreak behind opcode to simulate singlestep */
166 	if (vaddr) {
167 		dst += GET_INSN_LENGTH(*(probe_opcode_t *)src);
168 		*(uprobe_opcode_t *)dst = __BUG_INSN_32;
169 	}
170 
171 	kunmap_atomic(kaddr);
172 
173 	/*
174 	 * We probably need flush_icache_user_page() but it needs vma.
175 	 * This should work on most of architectures by default. If
176 	 * architecture needs to do something different it can define
177 	 * its own version of the function.
178 	 */
179 	flush_dcache_page(page);
180 }
181