1 /*
2 * alternative runtime patching
3 * inspired by the x86 version
4 *
5 * Copyright (C) 2014-2016 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <xen/init.h>
21 #include <xen/types.h>
22 #include <xen/kernel.h>
23 #include <xen/mm.h>
24 #include <xen/vmap.h>
25 #include <xen/smp.h>
26 #include <xen/stop_machine.h>
27 #include <xen/virtual_region.h>
28 #include <asm/alternative.h>
29 #include <asm/atomic.h>
30 #include <asm/byteorder.h>
31 #include <asm/cpufeature.h>
32 #include <asm/insn.h>
33 #include <asm/page.h>
34
35 /* Override macros from asm/page.h to make them work with mfn_t */
36 #undef virt_to_mfn
37 #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
38
39 extern const struct alt_instr __alt_instructions[], __alt_instructions_end[];
40
41 struct alt_region {
42 const struct alt_instr *begin;
43 const struct alt_instr *end;
44 };
45
46 /*
47 * Check if the target PC is within an alternative block.
48 */
branch_insn_requires_update(const struct alt_instr * alt,unsigned long pc)49 static bool branch_insn_requires_update(const struct alt_instr *alt,
50 unsigned long pc)
51 {
52 unsigned long replptr;
53
54 if ( is_active_kernel_text(pc) )
55 return true;
56
57 replptr = (unsigned long)ALT_REPL_PTR(alt);
58 if ( pc >= replptr && pc <= (replptr + alt->alt_len) )
59 return false;
60
61 /*
62 * Branching into *another* alternate sequence is doomed, and
63 * we're not even trying to fix it up.
64 */
65 BUG();
66 }
67
get_alt_insn(const struct alt_instr * alt,const u32 * insnptr,const u32 * altinsnptr)68 static u32 get_alt_insn(const struct alt_instr *alt,
69 const u32 *insnptr, const u32 *altinsnptr)
70 {
71 u32 insn;
72
73 insn = le32_to_cpu(*altinsnptr);
74
75 if ( insn_is_branch_imm(insn) )
76 {
77 s32 offset = insn_get_branch_offset(insn);
78 unsigned long target;
79
80 target = (unsigned long)altinsnptr + offset;
81
82 /*
83 * If we're branching inside the alternate sequence,
84 * do not rewrite the instruction, as it is already
85 * correct. Otherwise, generate the new instruction.
86 */
87 if ( branch_insn_requires_update(alt, target) )
88 {
89 offset = target - (unsigned long)insnptr;
90 insn = insn_set_branch_offset(insn, offset);
91 }
92 }
93
94 return insn;
95 }
96
97 /*
98 * The region patched should be read-write to allow __apply_alternatives
99 * to replacing the instructions when necessary.
100 */
__apply_alternatives(const struct alt_region * region)101 static int __apply_alternatives(const struct alt_region *region)
102 {
103 const struct alt_instr *alt;
104 const u32 *replptr;
105 u32 *origptr;
106
107 printk(XENLOG_INFO "alternatives: Patching with alt table %p -> %p\n",
108 region->begin, region->end);
109
110 for ( alt = region->begin; alt < region->end; alt++ )
111 {
112 u32 insn;
113 int i, nr_inst;
114
115 if ( !cpus_have_cap(alt->cpufeature) )
116 continue;
117
118 BUG_ON(alt->alt_len != alt->orig_len);
119
120 origptr = ALT_ORIG_PTR(alt);
121 replptr = ALT_REPL_PTR(alt);
122
123 nr_inst = alt->alt_len / sizeof(insn);
124
125 for ( i = 0; i < nr_inst; i++ )
126 {
127 insn = get_alt_insn(alt, origptr + i, replptr + i);
128 *(origptr + i) = cpu_to_le32(insn);
129 }
130
131 /* Ensure the new instructions reached the memory and nuke */
132 clean_and_invalidate_dcache_va_range(origptr,
133 (sizeof (*origptr) * nr_inst));
134 }
135
136 /* Nuke the instruction cache */
137 invalidate_icache();
138
139 return 0;
140 }
141
142 /*
143 * We might be patching the stop_machine state machine, so implement a
144 * really simple polling protocol here.
145 */
__apply_alternatives_multi_stop(void * unused)146 static int __apply_alternatives_multi_stop(void *unused)
147 {
148 static int patched = 0;
149
150 /* We always have a CPU 0 at this point (__init) */
151 if ( smp_processor_id() )
152 {
153 while ( !read_atomic(&patched) )
154 cpu_relax();
155 isb();
156 }
157 else
158 {
159 int ret;
160 struct alt_region region;
161 mfn_t xen_mfn = virt_to_mfn(_start);
162 paddr_t xen_size = _end - _start;
163 unsigned int xen_order = get_order_from_bytes(xen_size);
164 void *xenmap;
165 struct virtual_region patch_region = {
166 .list = LIST_HEAD_INIT(patch_region.list),
167 };
168
169 BUG_ON(patched);
170
171 /*
172 * The text and inittext section are read-only. So re-map Xen to
173 * be able to patch the code.
174 */
175 xenmap = __vmap(&xen_mfn, 1U << xen_order, 1, 1, PAGE_HYPERVISOR,
176 VMAP_DEFAULT);
177 /* Re-mapping Xen is not expected to fail during boot. */
178 BUG_ON(!xenmap);
179
180 /*
181 * If we generate a new branch instruction, the target will be
182 * calculated in this re-mapped Xen region. So we have to register
183 * this re-mapped Xen region as a virtual region temporarily.
184 */
185 patch_region.start = xenmap;
186 patch_region.end = xenmap + xen_size;
187 register_virtual_region(&patch_region);
188
189 /*
190 * Find the virtual address of the alternative region in the new
191 * mapping.
192 * alt_instr contains relative offset, so the function
193 * __apply_alternatives will patch in the re-mapped version of
194 * Xen.
195 */
196 region.begin = (void *)__alt_instructions - (void *)_start + xenmap;
197 region.end = (void *)__alt_instructions_end - (void *)_start + xenmap;
198
199 ret = __apply_alternatives(®ion);
200 /* The patching is not expected to fail during boot. */
201 BUG_ON(ret != 0);
202
203 unregister_virtual_region(&patch_region);
204
205 vunmap(xenmap);
206
207 /* Barriers provided by the cache flushing */
208 write_atomic(&patched, 1);
209 }
210
211 return 0;
212 }
213
214 /*
215 * This function should only be called during boot and before CPU0 jump
216 * into the idle_loop.
217 */
apply_alternatives_all(void)218 void __init apply_alternatives_all(void)
219 {
220 int ret;
221
222 ASSERT(system_state != SYS_STATE_active);
223
224 /* better not try code patching on a live SMP system */
225 ret = stop_machine_run(__apply_alternatives_multi_stop, NULL, NR_CPUS);
226
227 /* stop_machine_run should never fail at this stage of the boot */
228 BUG_ON(ret);
229 }
230
apply_alternatives(const struct alt_instr * start,const struct alt_instr * end)231 int apply_alternatives(const struct alt_instr *start, const struct alt_instr *end)
232 {
233 const struct alt_region region = {
234 .begin = start,
235 .end = end,
236 };
237
238 return __apply_alternatives(®ion);
239 }
240
241 /*
242 * Local variables:
243 * mode: C
244 * c-file-style: "BSD"
245 * c-basic-offset: 4
246 * indent-tabs-mode: nil
247 * End:
248 */
249