1/*
2 * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <context.h>
10#include <services/arm_arch_svc.h>
11
12	.globl	wa_cve_2017_5715_bpiall_vbar
13
14#define EMIT_BPIALL		0xee070fd5
15#define EMIT_SMC		0xe1600070
16#define ESR_EL3_A64_SMC0	0x5e000000
17
18	.macro	apply_cve_2017_5715_wa _from_vector
19	/*
20	 * Save register state to enable a call to AArch32 S-EL1 and return
21	 * Identify the original calling vector in w2 (==_from_vector)
22	 * Use w3-w6 for additional register state preservation while in S-EL1
23	 */
24
25	/* Save GP regs */
26	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
27	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
28	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
29	stp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
30	stp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
31	stp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
32	stp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
33	stp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
34	stp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
35	stp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
36	stp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
37	stp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
38	stp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
39	stp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
40	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
41
42	/* Identify the original exception vector */
43	mov	w2, \_from_vector
44
45	/* Preserve 32-bit system registers in GP registers through the workaround */
46	mrs	x3, esr_el3
47	mrs	x4, spsr_el3
48	mrs	x5, scr_el3
49	mrs	x6, sctlr_el1
50
51	/*
52	 * Preserve LR and ELR_EL3 registers in the GP regs context.
53	 * Temporarily use the CTX_GPREG_SP_EL0 slot to preserve ELR_EL3
54	 * through the workaround. This is OK because at this point the
55	 * current state for this context's SP_EL0 is in the live system
56	 * register, which is unmodified by the workaround.
57	 */
58	mrs	x7, elr_el3
59	stp	x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
60
61	/*
62	 * Load system registers for entry to S-EL1.
63	 */
64
65	/* Mask all interrupts and set AArch32 Supervisor mode */
66	movz	w8, SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, SPSR_AIF_MASK)
67
68	/* Switch EL3 exception vectors while the workaround is executing. */
69	adr	x9, wa_cve_2017_5715_bpiall_ret_vbar
70
71	/* Setup SCTLR_EL1 with MMU off and I$ on */
72	ldr	x10, stub_sel1_sctlr
73
74	/* Land at the S-EL1 workaround stub */
75	adr	x11, aarch32_stub
76
77	/*
78	 * Setting SCR_EL3 to all zeroes means that the NS, RW
79	 * and SMD bits are configured as expected.
80	 */
81	msr	scr_el3, xzr
82	msr	spsr_el3, x8
83	msr	vbar_el3, x9
84	msr	sctlr_el1, x10
85	msr	elr_el3, x11
86
87	eret
88	.endm
89
90	/* ---------------------------------------------------------------------
91	 * This vector table is used at runtime to enter the workaround at
92	 * AArch32 S-EL1 for Sync/IRQ/FIQ/SError exceptions.  If the workaround
93	 * is not enabled, the existing runtime exception vector table is used.
94	 * ---------------------------------------------------------------------
95	 */
96vector_base wa_cve_2017_5715_bpiall_vbar
97
98	/* ---------------------------------------------------------------------
99	 * Current EL with SP_EL0 : 0x0 - 0x200
100	 * ---------------------------------------------------------------------
101	 */
102vector_entry bpiall_sync_exception_sp_el0
103	b	sync_exception_sp_el0
104	nop	/* to force 8 byte alignment for the following stub */
105
106	/*
107	 * Since each vector table entry is 128 bytes, we can store the
108	 * stub context in the unused space to minimize memory footprint.
109	 */
110stub_sel1_sctlr:
111	.quad	SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
112
113aarch32_stub:
114	.word	EMIT_BPIALL
115	.word	EMIT_SMC
116
117end_vector_entry bpiall_sync_exception_sp_el0
118
119vector_entry bpiall_irq_sp_el0
120	b	irq_sp_el0
121end_vector_entry bpiall_irq_sp_el0
122
123vector_entry bpiall_fiq_sp_el0
124	b	fiq_sp_el0
125end_vector_entry bpiall_fiq_sp_el0
126
127vector_entry bpiall_serror_sp_el0
128	b	serror_sp_el0
129end_vector_entry bpiall_serror_sp_el0
130
131	/* ---------------------------------------------------------------------
132	 * Current EL with SP_ELx: 0x200 - 0x400
133	 * ---------------------------------------------------------------------
134	 */
135vector_entry bpiall_sync_exception_sp_elx
136	b	sync_exception_sp_elx
137end_vector_entry bpiall_sync_exception_sp_elx
138
139vector_entry bpiall_irq_sp_elx
140	b	irq_sp_elx
141end_vector_entry bpiall_irq_sp_elx
142
143vector_entry bpiall_fiq_sp_elx
144	b	fiq_sp_elx
145end_vector_entry bpiall_fiq_sp_elx
146
147vector_entry bpiall_serror_sp_elx
148	b	serror_sp_elx
149end_vector_entry bpiall_serror_sp_elx
150
151	/* ---------------------------------------------------------------------
152	 * Lower EL using AArch64 : 0x400 - 0x600
153	 * ---------------------------------------------------------------------
154	 */
155vector_entry bpiall_sync_exception_aarch64
156	apply_cve_2017_5715_wa 1
157end_vector_entry bpiall_sync_exception_aarch64
158
159vector_entry bpiall_irq_aarch64
160	apply_cve_2017_5715_wa 2
161end_vector_entry bpiall_irq_aarch64
162
163vector_entry bpiall_fiq_aarch64
164	apply_cve_2017_5715_wa 4
165end_vector_entry bpiall_fiq_aarch64
166
167vector_entry bpiall_serror_aarch64
168	apply_cve_2017_5715_wa 8
169end_vector_entry bpiall_serror_aarch64
170
171	/* ---------------------------------------------------------------------
172	 * Lower EL using AArch32 : 0x600 - 0x800
173	 * ---------------------------------------------------------------------
174	 */
175vector_entry bpiall_sync_exception_aarch32
176	apply_cve_2017_5715_wa 1
177end_vector_entry bpiall_sync_exception_aarch32
178
179vector_entry bpiall_irq_aarch32
180	apply_cve_2017_5715_wa 2
181end_vector_entry bpiall_irq_aarch32
182
183vector_entry bpiall_fiq_aarch32
184	apply_cve_2017_5715_wa 4
185end_vector_entry bpiall_fiq_aarch32
186
187vector_entry bpiall_serror_aarch32
188	apply_cve_2017_5715_wa 8
189end_vector_entry bpiall_serror_aarch32
190
191	/* ---------------------------------------------------------------------
192	 * This vector table is used while the workaround is executing.  It
193	 * installs a simple SMC handler to allow the Sync/IRQ/FIQ/SError
194	 * workaround stubs to enter EL3 from S-EL1.  It restores the previous
195	 * EL3 state before proceeding with the normal runtime exception vector.
196	 * ---------------------------------------------------------------------
197	 */
198vector_base wa_cve_2017_5715_bpiall_ret_vbar
199
200	/* ---------------------------------------------------------------------
201	 * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED)
202	 * ---------------------------------------------------------------------
203	 */
204vector_entry bpiall_ret_sync_exception_sp_el0
205	b	report_unhandled_exception
206end_vector_entry bpiall_ret_sync_exception_sp_el0
207
208vector_entry bpiall_ret_irq_sp_el0
209	b	report_unhandled_interrupt
210end_vector_entry bpiall_ret_irq_sp_el0
211
212vector_entry bpiall_ret_fiq_sp_el0
213	b	report_unhandled_interrupt
214end_vector_entry bpiall_ret_fiq_sp_el0
215
216vector_entry bpiall_ret_serror_sp_el0
217	b	report_unhandled_exception
218end_vector_entry bpiall_ret_serror_sp_el0
219
220	/* ---------------------------------------------------------------------
221	 * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED)
222	 * ---------------------------------------------------------------------
223	 */
224vector_entry bpiall_ret_sync_exception_sp_elx
225	b	report_unhandled_exception
226end_vector_entry bpiall_ret_sync_exception_sp_elx
227
228vector_entry bpiall_ret_irq_sp_elx
229	b	report_unhandled_interrupt
230end_vector_entry bpiall_ret_irq_sp_elx
231
232vector_entry bpiall_ret_fiq_sp_elx
233	b	report_unhandled_interrupt
234end_vector_entry bpiall_ret_fiq_sp_elx
235
236vector_entry bpiall_ret_serror_sp_elx
237	b	report_unhandled_exception
238end_vector_entry bpiall_ret_serror_sp_elx
239
240	/* ---------------------------------------------------------------------
241	 * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED)
242	 * ---------------------------------------------------------------------
243	 */
244vector_entry bpiall_ret_sync_exception_aarch64
245	b	report_unhandled_exception
246end_vector_entry bpiall_ret_sync_exception_aarch64
247
248vector_entry bpiall_ret_irq_aarch64
249	b	report_unhandled_interrupt
250end_vector_entry bpiall_ret_irq_aarch64
251
252vector_entry bpiall_ret_fiq_aarch64
253	b	report_unhandled_interrupt
254end_vector_entry bpiall_ret_fiq_aarch64
255
256vector_entry bpiall_ret_serror_aarch64
257	b	report_unhandled_exception
258end_vector_entry bpiall_ret_serror_aarch64
259
260	/* ---------------------------------------------------------------------
261	 * Lower EL using AArch32 : 0x600 - 0x800
262	 * ---------------------------------------------------------------------
263	 */
264vector_entry bpiall_ret_sync_exception_aarch32
265	/*
266	 * w2 indicates which SEL1 stub was run and thus which original vector was used
267	 * w3-w6 contain saved system register state (esr_el3 in w3)
268	 * Restore LR and ELR_EL3 register state from the GP regs context
269	 */
270	ldp	x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
271
272	/* Apply the restored system register state */
273	msr	esr_el3, x3
274	msr	spsr_el3, x4
275	msr	scr_el3, x5
276	msr	sctlr_el1, x6
277	msr	elr_el3, x7
278
279	/*
280	 * Workaround is complete, so swap VBAR_EL3 to point
281	 * to workaround entry table in preparation for subsequent
282	 * Sync/IRQ/FIQ/SError exceptions.
283	 */
284	adr	x0, wa_cve_2017_5715_bpiall_vbar
285	msr	vbar_el3, x0
286
287	/*
288	 * Restore all GP regs except x2 and x3 (esr).  The value in x2
289	 * indicates the type of the original exception.
290	 */
291	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
292	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
293	ldp	x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
294	ldp	x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
295	ldp	x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
296	ldp	x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
297	ldp	x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
298	ldp	x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
299	ldp	x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
300	ldp	x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
301	ldp	x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
302	ldp	x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
303	ldp	x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
304	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
305
306	/* Fast path Sync exceptions.  Static predictor will fall through. */
307	tbz	w2, #0, workaround_not_sync
308
309	/*
310	 * Check if SMC is coming from A64 state on #0
311	 * with W0 = SMCCC_ARCH_WORKAROUND_1 or W0 = SMCCC_ARCH_WORKAROUND_3
312	 *
313	 * This sequence evaluates as:
314	 *    (W0==SMCCC_ARCH_WORKAROUND_1) || (W0==SMCCC_ARCH_WORKAROUND_3) ?
315	 *    (ESR_EL3==SMC#0) : (NE)
316	 * allowing use of a single branch operation
317	 */
318	orr	w2, wzr, #SMCCC_ARCH_WORKAROUND_1
319	cmp	w0, w2
320	orr	w2, wzr, #SMCCC_ARCH_WORKAROUND_3
321	ccmp	w0, w2, #4, ne
322	mov_imm	w2, ESR_EL3_A64_SMC0
323	ccmp	w3, w2, #0, eq
324	/* Static predictor will predict a fall through */
325	bne	1f
326	eret
3271:
328	/* restore x2 and x3 and continue sync exception handling */
329	b	bpiall_ret_sync_exception_aarch32_tail
330end_vector_entry bpiall_ret_sync_exception_aarch32
331
332vector_entry bpiall_ret_irq_aarch32
333	b	report_unhandled_interrupt
334
335	/*
336	 * Post-workaround fan-out for non-sync exceptions
337	 */
338workaround_not_sync:
339	tbnz	w2, #3, bpiall_ret_serror
340	tbnz	w2, #2, bpiall_ret_fiq
341	/* IRQ */
342	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
343	b	irq_aarch64
344
345bpiall_ret_fiq:
346	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
347	b	fiq_aarch64
348
349bpiall_ret_serror:
350	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
351	b	serror_aarch64
352end_vector_entry bpiall_ret_irq_aarch32
353
354vector_entry bpiall_ret_fiq_aarch32
355	b	report_unhandled_interrupt
356end_vector_entry bpiall_ret_fiq_aarch32
357
358vector_entry bpiall_ret_serror_aarch32
359	b	report_unhandled_exception
360end_vector_entry bpiall_ret_serror_aarch32
361
362	/*
363	 * Part of bpiall_ret_sync_exception_aarch32 to save vector space
364	 */
365func bpiall_ret_sync_exception_aarch32_tail
366	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
367	b	sync_exception_aarch64
368endfunc bpiall_ret_sync_exception_aarch32_tail
369