1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 SiFive
4 */
5
6 #include <linux/spinlock.h>
7 #include <linux/mm.h>
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/stop_machine.h>
11 #include <asm/kprobes.h>
12 #include <asm/cacheflush.h>
13 #include <asm/fixmap.h>
14 #include <asm/ftrace.h>
15 #include <asm/patch.h>
16
17 struct patch_insn {
18 void *addr;
19 u32 *insns;
20 int ninsns;
21 atomic_t cpu_count;
22 };
23
24 int riscv_patch_in_stop_machine = false;
25
26 #ifdef CONFIG_MMU
27 /*
28 * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
29 * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
30 * So use '__always_inline' and 'const unsigned int fixmap' here.
31 */
patch_map(void * addr,const unsigned int fixmap)32 static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
33 {
34 uintptr_t uintaddr = (uintptr_t) addr;
35 struct page *page;
36
37 if (core_kernel_text(uintaddr))
38 page = phys_to_page(__pa_symbol(addr));
39 else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
40 page = vmalloc_to_page(addr);
41 else
42 return addr;
43
44 BUG_ON(!page);
45
46 return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
47 (uintaddr & ~PAGE_MASK));
48 }
49
patch_unmap(int fixmap)50 static void patch_unmap(int fixmap)
51 {
52 clear_fixmap(fixmap);
53 }
54 NOKPROBE_SYMBOL(patch_unmap);
55
patch_insn_write(void * addr,const void * insn,size_t len)56 static int patch_insn_write(void *addr, const void *insn, size_t len)
57 {
58 void *waddr = addr;
59 bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
60 int ret;
61
62 /*
63 * Before reaching here, it was expected to lock the text_mutex
64 * already, so we don't need to give another lock here and could
65 * ensure that it was safe between each cores.
66 *
67 * We're currently using stop_machine() for ftrace & kprobes, and while
68 * that ensures text_mutex is held before installing the mappings it
69 * does not ensure text_mutex is held by the calling thread. That's
70 * safe but triggers a lockdep failure, so just elide it for that
71 * specific case.
72 */
73 if (!riscv_patch_in_stop_machine)
74 lockdep_assert_held(&text_mutex);
75
76 if (across_pages)
77 patch_map(addr + len, FIX_TEXT_POKE1);
78
79 waddr = patch_map(addr, FIX_TEXT_POKE0);
80
81 ret = copy_to_kernel_nofault(waddr, insn, len);
82
83 patch_unmap(FIX_TEXT_POKE0);
84
85 if (across_pages)
86 patch_unmap(FIX_TEXT_POKE1);
87
88 return ret;
89 }
90 NOKPROBE_SYMBOL(patch_insn_write);
91 #else
patch_insn_write(void * addr,const void * insn,size_t len)92 static int patch_insn_write(void *addr, const void *insn, size_t len)
93 {
94 return copy_to_kernel_nofault(addr, insn, len);
95 }
96 NOKPROBE_SYMBOL(patch_insn_write);
97 #endif /* CONFIG_MMU */
98
patch_text_nosync(void * addr,const void * insns,size_t len)99 int patch_text_nosync(void *addr, const void *insns, size_t len)
100 {
101 u32 *tp = addr;
102 int ret;
103
104 ret = patch_insn_write(tp, insns, len);
105
106 if (!ret)
107 flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);
108
109 return ret;
110 }
111 NOKPROBE_SYMBOL(patch_text_nosync);
112
patch_text_cb(void * data)113 static int patch_text_cb(void *data)
114 {
115 struct patch_insn *patch = data;
116 unsigned long len;
117 int i, ret = 0;
118
119 if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
120 for (i = 0; ret == 0 && i < patch->ninsns; i++) {
121 len = GET_INSN_LENGTH(patch->insns[i]);
122 ret = patch_text_nosync(patch->addr + i * len,
123 &patch->insns[i], len);
124 }
125 atomic_inc(&patch->cpu_count);
126 } else {
127 while (atomic_read(&patch->cpu_count) <= num_online_cpus())
128 cpu_relax();
129 smp_mb();
130 }
131
132 return ret;
133 }
134 NOKPROBE_SYMBOL(patch_text_cb);
135
patch_text(void * addr,u32 * insns,int ninsns)136 int patch_text(void *addr, u32 *insns, int ninsns)
137 {
138 int ret;
139 struct patch_insn patch = {
140 .addr = addr,
141 .insns = insns,
142 .ninsns = ninsns,
143 .cpu_count = ATOMIC_INIT(0),
144 };
145
146 /*
147 * kprobes takes text_mutex, before calling patch_text(), but as we call
148 * calls stop_machine(), the lockdep assertion in patch_insn_write()
149 * gets confused by the context in which the lock is taken.
150 * Instead, ensure the lock is held before calling stop_machine(), and
151 * set riscv_patch_in_stop_machine to skip the check in
152 * patch_insn_write().
153 */
154 lockdep_assert_held(&text_mutex);
155 riscv_patch_in_stop_machine = true;
156 ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask);
157 riscv_patch_in_stop_machine = false;
158 return ret;
159 }
160 NOKPROBE_SYMBOL(patch_text);
161