| /arch/arm64/kvm/hyp/vhe/ |
| A D | switch.c | 330 u64 spsr, elr, mode; in kvm_hyp_handle_eret() local 346 spsr = read_sysreg_el1(SYS_SPSR); in kvm_hyp_handle_eret() 347 mode = spsr & (PSR_MODE_MASK | PSR_MODE32_BIT); in kvm_hyp_handle_eret() 372 spsr = (spsr & ~(PSR_MODE_MASK | PSR_MODE32_BIT)) | mode; in kvm_hyp_handle_eret() 374 write_sysreg_el2(spsr, SYS_SPSR); in kvm_hyp_handle_eret() 648 static void __noreturn __hyp_call_panic(u64 spsr, u64 elr, u64 par) in __hyp_call_panic() argument 660 spsr, elr, in __hyp_call_panic() 668 u64 spsr = read_sysreg_el2(SYS_SPSR); in hyp_panic() local 672 __hyp_call_panic(spsr, elr, par); in hyp_panic()
|
| /arch/arm64/kvm/hyp/ |
| A D | exception.c | 290 unsigned long spsr = *vcpu_cpsr(vcpu); in enter_exception32() local 291 bool is_thumb = (spsr & PSR_AA32_T_BIT); in enter_exception32() 302 __vcpu_write_spsr_abt(vcpu, host_spsr_to_spsr32(spsr)); in enter_exception32() 307 __vcpu_write_spsr_und(vcpu, host_spsr_to_spsr32(spsr)); in enter_exception32()
|
| /arch/arm64/include/asm/ |
| A D | kvm_emulate.h | 289 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr) in host_spsr_to_spsr32() argument 292 unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT); in host_spsr_to_spsr32() 294 spsr &= ~overlap; in host_spsr_to_spsr32() 296 spsr |= dit << 21; in host_spsr_to_spsr32() 298 return spsr; in host_spsr_to_spsr32()
|
| A D | kvm_asm.h | 268 u64 spsr, elr; \ 280 : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \ 301 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt,
|
| A D | kvm_hyp.h | 124 void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
|
| /arch/arm64/kernel/ |
| A D | signal32.c | 323 compat_ulong_t spsr = regs->pstate & ~(PSR_f | PSR_AA32_E_BIT); in compat_setup_return() local 330 spsr |= PSR_AA32_T_BIT; in compat_setup_return() 332 spsr &= ~PSR_AA32_T_BIT; in compat_setup_return() 335 spsr &= ~PSR_AA32_IT_MASK; in compat_setup_return() 338 spsr |= PSR_AA32_ENDSTATE; in compat_setup_return() 357 regs->pstate = spsr; in compat_setup_return()
|
| /arch/arm64/kvm/ |
| A D | guest.c | 88 case KVM_REG_ARM_CORE_REG(spsr[0]) ... in core_reg_size_from_offset() 89 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]): in core_reg_size_from_offset() 154 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]): in core_reg_addr() 157 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]): in core_reg_addr() 160 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_UND]): in core_reg_addr() 163 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_IRQ]): in core_reg_addr() 166 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_FIQ]): in core_reg_addr()
|
| A D | handle_exit.c | 519 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, in nvhe_hyp_panic_handler() argument 525 u64 mode = spsr & PSR_MODE_MASK; in nvhe_hyp_panic_handler() 571 spsr, elr_virt, esr, far, hpfar, par, vcpu); in nvhe_hyp_panic_handler()
|
| A D | emulate-nested.c | 2637 u64 mode = spsr & PSR_MODE_MASK; in kvm_check_illegal_exception_return() 2648 (spsr & PSR_MODE32_BIT) || in kvm_check_illegal_exception_return() 2657 spsr = *vcpu_cpsr(vcpu); in kvm_check_illegal_exception_return() 2659 spsr &= (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | in kvm_check_illegal_exception_return() 2662 spsr |= PSR_IL_BIT; in kvm_check_illegal_exception_return() 2665 return spsr; in kvm_check_illegal_exception_return() 2670 u64 spsr, elr, esr; in kvm_emulate_nested_eret() local 2672 spsr = vcpu_read_sys_reg(vcpu, SPSR_EL2); in kvm_emulate_nested_eret() 2673 spsr = kvm_check_illegal_exception_return(vcpu, spsr); in kvm_emulate_nested_eret() 2703 trace_kvm_nested_eret(vcpu, elr, spsr); in kvm_emulate_nested_eret() [all …]
|
| /arch/arm64/kvm/hyp/nvhe/ |
| A D | switch.c | 352 u64 spsr = read_sysreg_el2(SYS_SPSR); in hyp_panic() local 372 __hyp_do_panic(host_ctxt, spsr, elr, par); in hyp_panic()
|
| /arch/arm/kernel/ |
| A D | entry-armv.S | 327 mrs r2, spsr @ Save spsr_abt, abort is now safe 856 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 880 mrs lr, spsr 881 str lr, [sp, #8] @ save spsr
|
| A D | entry-common.S | 182 mrs saved_psr, spsr @ called from non-FIQ mode, so ok.
|
| /arch/arm64/include/uapi/asm/ |
| A D | kvm.h | 52 __u64 spsr[KVM_NR_SPSR]; member
|
| /arch/arm/boot/compressed/ |
| A D | head.S | 488 mrs r0, spsr 644 mrs r0, spsr @ Get saved CPU boot mode
|