1#include <asm/asm_defns.h>
2#include <asm/regs.h>
3#include <asm/alternative.h>
4#include <public/xen.h>
5
6/*
7 * Register aliases.
8 */
9lr      .req    x30             /* link register */
10
11/*
12 * Stack pushing/popping (register pairs only). Equivalent to store decrement
13 * before, load increment after.
14 */
15        .macro  push, xreg1, xreg2
16        stp     \xreg1, \xreg2, [sp, #-16]!
17        .endm
18
19        .macro  pop, xreg1, xreg2
20        ldp     \xreg1, \xreg2, [sp], #16
21        .endm
22
23/*
24 * Save/restore guest mode specific state, outer stack frame
25 */
26        .macro  entry_guest, compat
27
28        add     x21, sp, #UREGS_SPSR_el1
29        mrs     x23, SPSR_el1
30        str     x23, [x21]
31
32        .if \compat == 0 /* Aarch64 mode */
33
34        add     x21, sp, #UREGS_SP_el0
35        mrs     x22, SP_el0
36        str     x22, [x21]
37
38        add     x21, sp, #UREGS_SP_el1
39        mrs     x22, SP_el1
40        mrs     x23, ELR_el1
41        stp     x22, x23, [x21]
42
43        .else            /* Aarch32 mode */
44
45        add     x21, sp, #UREGS_SPSR_fiq
46        mrs     x22, SPSR_fiq
47        mrs     x23, SPSR_irq
48        stp     w22, w23, [x21]
49
50        add     x21, sp, #UREGS_SPSR_und
51        mrs     x22, SPSR_und
52        mrs     x23, SPSR_abt
53        stp     w22, w23, [x21]
54
55        .endif
56
57        .endm
58
59        .macro  exit_guest, compat
60
61        add     x21, sp, #UREGS_SPSR_el1
62        ldr     x23, [x21]
63        msr     SPSR_el1, x23
64
65        .if \compat == 0 /* Aarch64 mode */
66
67        add     x21, sp, #UREGS_SP_el0
68        ldr     x22, [x21]
69        msr     SP_el0, x22
70
71        add     x21, sp, #UREGS_SP_el1
72        ldp     x22, x23, [x21]
73        msr     SP_el1, x22
74        msr     ELR_el1, x23
75
76        .else            /* Aarch32 mode */
77
78        add     x21, sp, #UREGS_SPSR_fiq
79        ldp     w22, w23, [x21]
80        msr     SPSR_fiq, x22
81        msr     SPSR_irq, x23
82
83        add     x21, sp, #UREGS_SPSR_und
84        ldp     w22, w23, [x21]
85        msr     SPSR_und, x22
86        msr     SPSR_abt, x23
87
88        .endif
89
90        .endm
91/*
92 * Save state on entry to hypervisor, restore on exit
93 */
94        .macro  entry, hyp, compat
95        sub     sp, sp, #(UREGS_SPSR_el1 - UREGS_LR) /* CPSR, PC, SP, LR */
96        push    x28, x29
97        push    x26, x27
98        push    x24, x25
99        push    x22, x23
100        push    x20, x21
101        push    x18, x19
102        push    x16, x17
103        push    x14, x15
104        push    x12, x13
105        push    x10, x11
106        push    x8, x9
107        push    x6, x7
108        push    x4, x5
109        push    x2, x3
110        push    x0, x1
111
112        .if \hyp == 1        /* Hypervisor mode */
113
114        add     x21, sp, #UREGS_kernel_sizeof
115
116        .else                /* Guest mode */
117
118        entry_guest \compat
119        mov     x21, ~0 /* sp only valid for hyp frame XXX */
120
121        .endif
122
123        stp     lr, x21, [sp, #UREGS_LR]
124
125        mrs     x21, elr_el2
126        str     x21, [sp, #UREGS_PC]
127
128        add     x21, sp, #UREGS_CPSR
129        mrs     x22, spsr_el2
130        mrs     x23, esr_el2
131        stp     w22, w23, [x21]
132
133        .endm
134
135        .macro  exit, hyp, compat
136
137        .if \hyp == 0         /* Guest mode */
138
139        bl      leave_hypervisor_tail /* Disables interrupts on return */
140
141        exit_guest \compat
142
143        .endif
144
145        b       return_from_trap
146
147        .endm
148
149/*
150 * Bad Abort numbers
151 *-----------------
152 */
153#define BAD_SYNC        0
154#define BAD_IRQ         1
155#define BAD_FIQ         2
156#define BAD_ERROR       3
157
158        .macro  invalid, reason
159        mov     x0, sp
160        mov     x1, #\reason
161        b       do_bad_mode
162        .endm
163
164hyp_sync_invalid:
165        entry   hyp=1
166        invalid BAD_SYNC
167
168hyp_irq_invalid:
169        entry   hyp=1
170        invalid BAD_IRQ
171
172hyp_fiq_invalid:
173        entry   hyp=1
174        invalid BAD_FIQ
175
176hyp_error_invalid:
177        entry   hyp=1
178        invalid BAD_ERROR
179
180hyp_error:
181        entry   hyp=1
182        msr     daifclr, #2
183        mov     x0, sp
184        bl      do_trap_hyp_serror
185        exit    hyp=1
186
187/* Traps taken in Current EL with SP_ELx */
188hyp_sync:
189        entry   hyp=1
190        msr     daifclr, #6
191        mov     x0, sp
192        bl      do_trap_hyp_sync
193        exit    hyp=1
194
195hyp_irq:
196        entry   hyp=1
197        msr     daifclr, #4
198        mov     x0, sp
199        bl      do_trap_irq
200        exit    hyp=1
201
202guest_sync:
203        entry   hyp=0, compat=0
204        /*
205         * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
206         * is not set. If a vSError took place, the initial exception will be
207         * skipped. Exit ASAP
208         */
209        ALTERNATIVE("bl check_pending_vserror; cbnz x0, 1f",
210                    "nop; nop",
211                    SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT)
212        msr     daifclr, #6
213        mov     x0, sp
214        bl      do_trap_guest_sync
2151:
216        exit    hyp=0, compat=0
217
218guest_irq:
219        entry   hyp=0, compat=0
220        /*
221         * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
222         * is not set. If a vSError took place, the initial exception will be
223         * skipped. Exit ASAP
224         */
225        ALTERNATIVE("bl check_pending_vserror; cbnz x0, 1f",
226                    "nop; nop",
227                    SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT)
228        msr     daifclr, #4
229        mov     x0, sp
230        bl      do_trap_irq
2311:
232        exit    hyp=0, compat=0
233
234guest_fiq_invalid:
235        entry   hyp=0, compat=0
236        invalid BAD_FIQ
237
238guest_error:
239        entry   hyp=0, compat=0
240        msr     daifclr, #6
241        mov     x0, sp
242        bl      do_trap_guest_serror
243        exit    hyp=0, compat=0
244
245guest_sync_compat:
246        entry   hyp=0, compat=1
247        /*
248         * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
249         * is not set. If a vSError took place, the initial exception will be
250         * skipped. Exit ASAP
251         */
252        ALTERNATIVE("bl check_pending_vserror; cbnz x0, 1f",
253                    "nop; nop",
254                    SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT)
255        msr     daifclr, #6
256        mov     x0, sp
257        bl      do_trap_guest_sync
2581:
259        exit    hyp=0, compat=1
260
261guest_irq_compat:
262        entry   hyp=0, compat=1
263        /*
264         * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
265         * is not set. If a vSError took place, the initial exception will be
266         * skipped. Exit ASAP
267         */
268        ALTERNATIVE("bl check_pending_vserror; cbnz x0, 1f",
269                    "nop; nop",
270                    SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT)
271        msr     daifclr, #4
272        mov     x0, sp
273        bl      do_trap_irq
2741:
275        exit    hyp=0, compat=1
276
277guest_fiq_invalid_compat:
278        entry   hyp=0, compat=1
279        invalid BAD_FIQ
280
281guest_error_compat:
282        entry   hyp=0, compat=1
283        msr     daifclr, #6
284        mov     x0, sp
285        bl      do_trap_guest_serror
286        exit    hyp=0, compat=1
287
288ENTRY(return_to_new_vcpu32)
289        exit    hyp=0, compat=1
290ENTRY(return_to_new_vcpu64)
291        exit    hyp=0, compat=0
292
293return_from_trap:
294        msr     daifset, #2 /* Mask interrupts */
295
296        ldr     x21, [sp, #UREGS_PC]            /* load ELR */
297        ldr     w22, [sp, #UREGS_CPSR]          /* load SPSR */
298
299        pop     x0, x1
300        pop     x2, x3
301        pop     x4, x5
302        pop     x6, x7
303        pop     x8, x9
304
305        msr     elr_el2, x21                    /* set up the return data */
306        msr     spsr_el2, x22
307
308        pop     x10, x11
309        pop     x12, x13
310        pop     x14, x15
311        pop     x16, x17
312        pop     x18, x19
313        pop     x20, x21
314        pop     x22, x23
315        pop     x24, x25
316        pop     x26, x27
317        pop     x28, x29
318
319        ldr     lr, [sp], #(UREGS_SPSR_el1 - UREGS_LR) /* CPSR, PC, SP, LR */
320
321        eret
322
323/*
324 * This function is used to check pending virtual SError in the gap of
325 * EL1 -> EL2 world switch.
326 * The x0 register will be used to indicate the results of detection.
327 * x0 -- Non-zero indicates a pending virtual SError took place.
328 * x0 -- Zero indicates no pending virtual SError took place.
329 */
330check_pending_vserror:
331        /*
332         * Save elr_el2 to check whether the pending SError exception takes
333         * place while we are doing this sync exception.
334         */
335        mrs     x0, elr_el2
336
337        /* Synchronize against in-flight ld/st */
338        dsb     sy
339
340        /*
341         * Unmask PSTATE asynchronous abort bit. If there is a pending
342         * SError, the EL2 error exception will happen after PSTATE.A
343         * is cleared.
344         */
345        msr     daifclr, #4
346
347        /*
348         * This is our single instruction exception window. A pending
349         * SError is guaranteed to occur at the earliest when we unmask
350         * it, and at the latest just after the ISB.
351         *
352         * If a pending SError occurs, the program will jump to EL2 error
353         * exception handler, and the elr_el2 will be set to
354         * abort_guest_exit_start or abort_guest_exit_end.
355         */
356        .global abort_guest_exit_start
357abort_guest_exit_start:
358
359        isb
360
361        .global abort_guest_exit_end
362abort_guest_exit_end:
363        /* Mask PSTATE asynchronous abort bit, close the checking window. */
364        msr     daifset, #4
365
366        /*
367         * Compare elr_el2 and the saved value to check whether we are
368         * returning from a valid exception caused by pending SError.
369         */
370        mrs     x1, elr_el2
371        cmp     x0, x1
372
373        /*
374         * Not equal, the pending SError exception took place, set
375         * x0 to non-zero.
376         */
377        cset    x0, ne
378
379        ret
380
381/*
382 * Exception vectors.
383 */
384        .macro  ventry  label
385        .align  7
386        b       \label
387        .endm
388
389        .align  11
390ENTRY(hyp_traps_vector)
391        ventry  hyp_sync_invalid            /* Synchronous EL2t */
392        ventry  hyp_irq_invalid             /* IRQ EL2t */
393        ventry  hyp_fiq_invalid             /* FIQ EL2t */
394        ventry  hyp_error_invalid           /* Error EL2t */
395
396        ventry  hyp_sync                    /* Synchronous EL2h */
397        ventry  hyp_irq                     /* IRQ EL2h */
398        ventry  hyp_fiq_invalid             /* FIQ EL2h */
399        ventry  hyp_error                   /* Error EL2h */
400
401        ventry  guest_sync                  /* Synchronous 64-bit EL0/EL1 */
402        ventry  guest_irq                   /* IRQ 64-bit EL0/EL1 */
403        ventry  guest_fiq_invalid           /* FIQ 64-bit EL0/EL1 */
404        ventry  guest_error                 /* Error 64-bit EL0/EL1 */
405
406        ventry  guest_sync_compat           /* Synchronous 32-bit EL0/EL1 */
407        ventry  guest_irq_compat            /* IRQ 32-bit EL0/EL1 */
408        ventry  guest_fiq_invalid_compat    /* FIQ 32-bit EL0/EL1 */
409        ventry  guest_error_compat          /* Error 32-bit EL0/EL1 */
410
411/*
412 * struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next)
413 *
414 * x0 - prev
415 * x1 - next
416 *
417 * Returns prev in x0
418 */
419ENTRY(__context_switch)
420        add     x8, x0, #VCPU_arch_saved_context
421        mov     x9, sp
422        stp     x19, x20, [x8], #16         /* store callee-saved registers */
423        stp     x21, x22, [x8], #16
424        stp     x23, x24, [x8], #16
425        stp     x25, x26, [x8], #16
426        stp     x27, x28, [x8], #16
427        stp     x29, x9, [x8], #16
428        str     lr, [x8]
429
430        add     x8, x1, #VCPU_arch_saved_context
431        ldp     x19, x20, [x8], #16         /* restore callee-saved registers */
432        ldp     x21, x22, [x8], #16
433        ldp     x23, x24, [x8], #16
434        ldp     x25, x26, [x8], #16
435        ldp     x27, x28, [x8], #16
436        ldp     x29, x9, [x8], #16
437        ldr     lr, [x8]
438        mov     sp, x9
439        ret
440
441/*
442 * Local variables:
443 * mode: ASM
444 * indent-tabs-mode: nil
445 * End:
446 */
447