1#include <asm/asm_defns.h> 2#include <asm/regs.h> 3#include <asm/alternative.h> 4#include <public/xen.h> 5 6#define SAVE_ONE_BANKED(reg) mrs r11, reg; str r11, [sp, #UREGS_##reg] 7#define RESTORE_ONE_BANKED(reg) ldr r11, [sp, #UREGS_##reg]; msr reg, r11 8 9#define SAVE_BANKED(mode) \ 10 SAVE_ONE_BANKED(SP_##mode) ; SAVE_ONE_BANKED(LR_##mode) ; SAVE_ONE_BANKED(SPSR_##mode) 11 12#define RESTORE_BANKED(mode) \ 13 RESTORE_ONE_BANKED(SP_##mode) ; RESTORE_ONE_BANKED(LR_##mode) ; RESTORE_ONE_BANKED(SPSR_##mode) 14 15#define SAVE_ALL \ 16 sub sp, #(UREGS_SP_usr - UREGS_sp); /* SP, LR, SPSR, PC */ \ 17 push {r0-r12}; /* Save R0-R12 */ \ 18 \ 19 mrs r11, ELR_hyp; /* ELR_hyp is return address. */\ 20 str r11, [sp, #UREGS_pc]; \ 21 \ 22 str lr, [sp, #UREGS_lr]; \ 23 \ 24 add r11, sp, #UREGS_kernel_sizeof+4; \ 25 str r11, [sp, #UREGS_sp]; \ 26 \ 27 mrc CP32(r11, HSR); /* Save exception syndrome */ \ 28 str r11, [sp, #UREGS_hsr]; \ 29 \ 30 mrs r11, SPSR_hyp; \ 31 str r11, [sp, #UREGS_cpsr]; \ 32 and r11, #PSR_MODE_MASK; \ 33 cmp r11, #PSR_MODE_HYP; \ 34 blne save_guest_regs 35 36save_guest_regs: 37 ldr r11, =0xffffffff /* Clobber SP which is only valid for hypervisor frames. */ 38 str r11, [sp, #UREGS_sp] 39 SAVE_ONE_BANKED(SP_usr) 40 /* LR_usr is the same physical register as lr and is saved in SAVE_ALL */ 41 SAVE_BANKED(svc) 42 SAVE_BANKED(abt) 43 SAVE_BANKED(und) 44 SAVE_BANKED(irq) 45 SAVE_BANKED(fiq) 46 SAVE_ONE_BANKED(R8_fiq); SAVE_ONE_BANKED(R9_fiq); SAVE_ONE_BANKED(R10_fiq) 47 SAVE_ONE_BANKED(R11_fiq); SAVE_ONE_BANKED(R12_fiq); 48 49 /* 50 * If the SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT has been set in the cpu 51 * feature, the checking of pending SErrors will be skipped. 52 */ 53 ALTERNATIVE("nop", 54 "b skip_check", 55 SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT) 56 /* 57 * Start to check pending virtual abort in the gap of Guest -> HYP 58 * world switch. 59 * 60 * Save ELR_hyp to check whether the pending virtual abort exception 61 * takes place while we are doing this trap exception. 62 */ 63 mrs r1, ELR_hyp 64 65 /* 66 * Force loads and stores to complete before unmasking asynchronous 67 * aborts and forcing the delivery of the exception. 68 */ 69 dsb sy 70 71 /* 72 * Unmask asynchronous abort bit. If there is a pending asynchronous 73 * abort, the data_abort exception will happen after A bit is cleared. 74 */ 75 cpsie a 76 77 /* 78 * This is our single instruction exception window. A pending 79 * asynchronous abort is guaranteed to occur at the earliest when we 80 * unmask it, and at the latest just after the ISB. 81 * 82 * If a pending abort occurs, the program will jump to data_abort 83 * exception handler, and the ELR_hyp will be set to 84 * abort_guest_exit_start or abort_guest_exit_end. 85 */ 86 .global abort_guest_exit_start 87abort_guest_exit_start: 88 89 isb 90 91 .global abort_guest_exit_end 92abort_guest_exit_end: 93 /* Mask CPSR asynchronous abort bit, close the checking window. */ 94 cpsid a 95 96 /* 97 * Compare ELR_hyp and the saved value to check whether we are 98 * returning from a valid exception caused by pending virtual 99 * abort. 100 */ 101 mrs r2, ELR_hyp 102 cmp r1, r2 103 104 /* 105 * Not equal, the pending virtual abort exception took place, the 106 * initial exception does not have any significance to be handled. 107 * Exit ASAP. 108 */ 109 bne return_from_trap 110 111skip_check: 112 mov pc, lr 113 114#define DEFINE_TRAP_ENTRY(trap) \ 115 ALIGN; \ 116trap_##trap: \ 117 SAVE_ALL; \ 118 cpsie i; /* local_irq_enable */ \ 119 cpsie a; /* asynchronous abort enable */ \ 120 adr lr, return_from_trap; \ 121 mov r0, sp; \ 122 mov r11, sp; \ 123 bic sp, #7; /* Align the stack pointer (noop on guest trap) */ \ 124 b do_trap_##trap 125 126#define DEFINE_TRAP_ENTRY_NOIRQ(trap) \ 127 ALIGN; \ 128trap_##trap: \ 129 SAVE_ALL; \ 130 cpsie a; /* asynchronous abort enable */ \ 131 adr lr, return_from_trap; \ 132 mov r0, sp; \ 133 mov r11, sp; \ 134 bic sp, #7; /* Align the stack pointer (noop on guest trap) */ \ 135 b do_trap_##trap 136 137#define DEFINE_TRAP_ENTRY_NOABORT(trap) \ 138 ALIGN; \ 139trap_##trap: \ 140 SAVE_ALL; \ 141 cpsie i; /* local_irq_enable */ \ 142 adr lr, return_from_trap; \ 143 mov r0, sp; \ 144 mov r11, sp; \ 145 bic sp, #7; /* Align the stack pointer (noop on guest trap) */ \ 146 b do_trap_##trap 147 148 .align 5 149GLOBAL(hyp_traps_vector) 150 .word 0 /* 0x00 - Reset */ 151 b trap_undefined_instruction /* 0x04 - Undefined Instruction */ 152 b trap_hypervisor_call /* 0x08 - Hypervisor Call */ 153 b trap_prefetch_abort /* 0x0c - Prefetch Abort */ 154 b trap_data_abort /* 0x10 - Data Abort */ 155 b trap_guest_sync /* 0x14 - Hypervisor */ 156 b trap_irq /* 0x18 - IRQ */ 157 b trap_fiq /* 0x1c - FIQ */ 158 159DEFINE_TRAP_ENTRY(undefined_instruction) 160DEFINE_TRAP_ENTRY(hypervisor_call) 161DEFINE_TRAP_ENTRY(prefetch_abort) 162DEFINE_TRAP_ENTRY(guest_sync) 163DEFINE_TRAP_ENTRY_NOIRQ(irq) 164DEFINE_TRAP_ENTRY_NOIRQ(fiq) 165DEFINE_TRAP_ENTRY_NOABORT(data_abort) 166 167return_from_trap: 168 mov sp, r11 169ENTRY(return_to_new_vcpu32) 170 ldr r11, [sp, #UREGS_cpsr] 171 and r11, #PSR_MODE_MASK 172 cmp r11, #PSR_MODE_HYP 173 beq return_to_hypervisor 174 /* Fall thru */ 175return_to_guest: 176 mov r11, sp 177 bic sp, #7 /* Align the stack pointer */ 178 bl leave_hypervisor_tail /* Disables interrupts on return */ 179 mov sp, r11 180 RESTORE_ONE_BANKED(SP_usr) 181 /* LR_usr is the same physical register as lr and is restored below */ 182 RESTORE_BANKED(svc) 183 RESTORE_BANKED(abt) 184 RESTORE_BANKED(und) 185 RESTORE_BANKED(irq) 186 RESTORE_BANKED(fiq) 187 RESTORE_ONE_BANKED(R8_fiq); RESTORE_ONE_BANKED(R9_fiq); RESTORE_ONE_BANKED(R10_fiq) 188 RESTORE_ONE_BANKED(R11_fiq); RESTORE_ONE_BANKED(R12_fiq); 189 /* Fall thru */ 190return_to_hypervisor: 191 cpsid i 192 ldr lr, [sp, #UREGS_lr] 193 ldr r11, [sp, #UREGS_pc] 194 msr ELR_hyp, r11 195 ldr r11, [sp, #UREGS_cpsr] 196 msr SPSR_hyp, r11 197 pop {r0-r12} 198 add sp, #(UREGS_SP_usr - UREGS_sp); /* SP, LR, SPSR, PC */ 199 clrex 200 eret 201 202/* 203 * struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next) 204 * 205 * r0 - prev 206 * r1 - next 207 * 208 * Returns prev in r0 209 */ 210ENTRY(__context_switch) 211 add ip, r0, #VCPU_arch_saved_context 212 stmia ip!, {r4 - sl, fp, sp, lr} /* Save register state */ 213 214 add r4, r1, #VCPU_arch_saved_context 215 ldmia r4, {r4 - sl, fp, sp, pc} /* Load registers and return */ 216 217/* 218 * Local variables: 219 * mode: ASM 220 * indent-tabs-mode: nil 221 * End: 222 */ 223