| /arch/s390/kvm/ |
| A D | intercept.c | 27 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; in kvm_s390_get_ilen() local 30 switch (vcpu->arch.sie_block->icptcode) { in kvm_s390_get_ilen() 39 if (sie_block->icptstatus & 1) { in kvm_s390_get_ilen() 110 vcpu->arch.sie_block->ipa, in handle_instruction() 111 vcpu->arch.sie_block->ipb); in handle_instruction() 146 .code = vcpu->arch.sie_block->iprcc, in inject_prog_on_prog_intercept() 265 if (vcpu->arch.sie_block->iprcc == 0) in handle_prog() 304 u16 eic = vcpu->arch.sie_block->eic; in handle_external_interrupt() 312 newpsw = vcpu->arch.sie_block->gpsw; in handle_external_interrupt() 469 vcpu->arch.sie_block->ipb); in handle_operexc() [all …]
|
| A D | guestdbg.c | 62 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_bp() 63 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_bp() 64 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_bp() 102 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_wp() 103 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_wp() 158 vcpu->arch.sie_block->gcr[10] = 0; in kvm_s390_patch_guest_per_regs() 159 vcpu->arch.sie_block->gcr[11] = -1UL; in kvm_s390_patch_guest_per_regs() 444 *addr = vcpu->arch.sie_block->peraddr; in per_fetched_addr() 541 u64 cr9 = vcpu->arch.sie_block->gcr[9]; in filter_guest_per_event() 591 vcpu->arch.sie_block->peraddr)) in kvm_s390_handle_per_event() [all …]
|
| A D | priv.c | 38 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in handle_ri() 47 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4) in kvm_s390_handle_aa() 64 vcpu->arch.sie_block->ecb |= ECB_GS; in handle_gs() 65 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in handle_gs() 75 int code = vcpu->arch.sie_block->ipb & 0xff; in kvm_s390_handle_e3() 591 if (vcpu->arch.sie_block->ipa == 0xb236) in handle_io_inst() 593 if (vcpu->arch.sie_block->ipa == 0xb235) in handle_io_inst() 794 vcpu->arch.sie_block->gpsw = new_psw; in handle_lpswe() 823 vcpu->arch.sie_block->gpsw = new_psw; in handle_lpswey() 1298 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; in handle_essa() [all …]
|
| A D | kvm-s390.h | 77 d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\ 134 u32 base2 = vcpu->arch.sie_block->ipb >> 28; in kvm_s390_get_base_disp_s() 145 u32 base1 = vcpu->arch.sie_block->ipb >> 28; in kvm_s390_get_base_disp_siy() 165 u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff; in kvm_s390_get_base_disp_sse() 186 u32 base2 = vcpu->arch.sie_block->ipb >> 28; in kvm_s390_get_base_disp_rsy() 188 ((vcpu->arch.sie_block->ipb & 0xff00) << 4); in kvm_s390_get_base_disp_rsy() 201 u32 base2 = vcpu->arch.sie_block->ipb >> 28; in kvm_s390_get_base_disp_rs() 214 vcpu->arch.sie_block->gpsw.mask |= cc << 44; in kvm_s390_set_psw_cc() 401 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; in kvm_s390_rewind_psw() local 403 sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilen); in kvm_s390_rewind_psw() [all …]
|
| A D | kvm-s390.c | 3861 vcpu->arch.sie_block->cbrlo = 0; in kvm_s390_vcpu_unsetup_cmma() 3933 if (vcpu->arch.sie_block->gd) { in kvm_s390_vcpu_setup() 3988 vcpu->arch.sie_block = &sie_page->sie_block; in kvm_arch_vcpu_create() 3992 vcpu->arch.sie_block->mso = 0; in kvm_arch_vcpu_create() 4281 vcpu->arch.sie_block->ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset() 4282 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr)); in kvm_arch_vcpu_ioctl_initial_reset() 4305 vcpu->arch.sie_block->gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset() 4306 vcpu->arch.sie_block->pp = 0; in kvm_arch_vcpu_ioctl_initial_reset() 4308 vcpu->arch.sie_block->todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset() 4810 vcpu->arch.sie_block->icptcode = 0; in vcpu_pre_run() [all …]
|
| A D | interrupt.c | 394 if (!(vcpu->arch.sie_block->gcr[14] & in deliverable_irqs() 424 vcpu->arch.sie_block->lctl = 0x0000; in __reset_intercept_indicators() 441 vcpu->arch.sie_block->lctl |= LCTL_CR6; in set_intercept_indicators_io() 451 vcpu->arch.sie_block->lctl |= LCTL_CR0; in set_intercept_indicators_ext() 1069 &vcpu->arch.sie_block->gpsw, in __deliver_pfault_done() 1072 &vcpu->arch.sie_block->gpsw, in __deliver_pfault_done() 1113 &vcpu->arch.sie_block->gpsw, in __deliver_virtio() 1116 &vcpu->arch.sie_block->gpsw, in __deliver_virtio() 1145 &vcpu->arch.sie_block->gpsw, in __do_deliver_io() 1148 &vcpu->arch.sie_block->gpsw, in __do_deliver_io() [all …]
|
| A D | diag.c | 43 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in diag_release_pages() 44 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE; in diag_release_pages() 92 u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; in __diag_page_ref_service() 93 u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); in __diag_page_ref_service() 192 tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in __diag_time_slice_end_directed() 234 unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; in __diag_ipl_functions() 306 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in kvm_s390_handle_diag()
|
| A D | trace-s390.h | 44 struct kvm_s390_sie_block *sie_block), 45 TP_ARGS(id, vcpu, sie_block), 50 __field(struct kvm_s390_sie_block *, sie_block) 56 __entry->sie_block = sie_block; 60 __entry->id, __entry->vcpu, __entry->sie_block)
|
| A D | gaccess.h | 89 return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga); in kvm_s390_logical_to_effective() 281 u8 access_key = psw_bits(vcpu->arch.sie_block->gpsw).key; in write_guest() 325 u8 access_key = psw_bits(vcpu->arch.sie_block->gpsw).key; in read_guest() 348 u8 access_key = psw_bits(vcpu->arch.sie_block->gpsw).key; in read_guest_instr()
|
| A D | sigp.c | 76 psw = &dst_vcpu->arch.sie_block->gpsw; in __sigp_conditional_emergency() 77 p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ in __sigp_conditional_emergency() 78 s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ in __sigp_conditional_emergency() 427 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in kvm_s390_handle_sigp() 428 int r3 = vcpu->arch.sie_block->ipa & 0x000f; in kvm_s390_handle_sigp() 435 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in kvm_s390_handle_sigp() 478 int r3 = vcpu->arch.sie_block->ipa & 0x000f; in kvm_s390_handle_sigp_pei()
|
| A D | gaccess.c | 244 asce->val = vcpu->arch.sie_block->gcr[1]; in ar_translation() 247 asce->val = vcpu->arch.sie_block->gcr[7]; in ar_translation() 255 ald_addr = vcpu->arch.sie_block->gcr[5]; in ar_translation() 257 ald_addr = vcpu->arch.sie_block->gcr[2]; in ar_translation() 418 asce->val = vcpu->arch.sie_block->gcr[1]; in get_vcpu_asce() 421 asce->val = vcpu->arch.sie_block->gcr[7]; in get_vcpu_asce() 635 psw_t *psw = &vcpu->arch.sie_block->gpsw; in low_address_protection_enabled() 676 psw_t *psw = &vcpu->arch.sie_block->gpsw; in fetch_prot_override_applicable() 681 override = vcpu->arch.sie_block->gcr[0]; in fetch_prot_override_applicable() 786 psw_t *psw = &vcpu->arch.sie_block->gpsw; in guest_range_to_gpas() [all …]
|
| A D | pv.c | 144 free_page((unsigned long)sida_addr(vcpu->arch.sie_block)); in kvm_s390_pv_destroy_cpu() 145 vcpu->arch.sie_block->pv_handle_cpu = 0; in kvm_s390_pv_destroy_cpu() 146 vcpu->arch.sie_block->pv_handle_config = 0; in kvm_s390_pv_destroy_cpu() 148 vcpu->arch.sie_block->sdf = 0; in kvm_s390_pv_destroy_cpu() 154 vcpu->arch.sie_block->gbea = 1; in kvm_s390_pv_destroy_cpu() 179 uvcb.num = vcpu->arch.sie_block->icpua; in kvm_s390_pv_create_cpu() 180 uvcb.state_origin = virt_to_phys(vcpu->arch.sie_block); in kvm_s390_pv_create_cpu() 190 vcpu->arch.sie_block->sidad = virt_to_phys(sida_addr); in kvm_s390_pv_create_cpu() 209 vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle; in kvm_s390_pv_create_cpu() 210 vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm); in kvm_s390_pv_create_cpu() [all …]
|
| A D | vsie.c | 342 int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK; in shadow_crycb() 347 apie_h = vcpu->arch.sie_block->eca & ECA_APIE; in shadow_crycb() 370 ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 & in shadow_crycb() 372 ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & in shadow_crycb() 1076 cr0.val = vcpu->arch.sie_block->gcr[0]; in vsie_handle_mvpg() 1170 vcpu->arch.sie_block->fpf & FPF_BPBC) in do_vsie_run() 1179 vcpu->arch.sie_block->prog0c |= PROG_IN_SIE; in do_vsie_run() 1190 vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE; in do_vsie_run() 1247 asce = vcpu->arch.sie_block->gcr[1]; in acquire_gmap_shadow() 1248 cr0.val = vcpu->arch.sie_block->gcr[0]; in acquire_gmap_shadow() [all …]
|
| A D | trace.h | 27 __entry->pswmask = vcpu->arch.sie_block->gpsw.mask; \ 28 __entry->pswaddr = vcpu->arch.sie_block->gpsw.addr; \
|
| /arch/s390/include/asm/ |
| A D | kvm_host_types.h | 109 #define sida_addr(sie_block) phys_to_virt((sie_block)->sidad & PAGE_MASK) argument 110 #define sida_size(sie_block) \ argument 111 ((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE) 339 struct kvm_s390_sie_block sie_block; member
|
| A D | kvm_host.h | 415 struct kvm_s390_sie_block *sie_block; member 706 int __sie64a(phys_addr_t sie_block_phys, struct kvm_s390_sie_block *sie_block, u64 *rsa, 709 static inline int sie64a(struct kvm_s390_sie_block *sie_block, u64 *rsa, unsigned long gasce) in sie64a() argument 711 return __sie64a(virt_to_phys(sie_block), sie_block, rsa, gasce); in sie64a()
|
| /arch/s390/kernel/ |
| A D | perf_event.c | 28 static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs) in sie_block() function 51 return sie_block(regs)->gpsw.mask & PSW_MASK_PSTATE; in guest_is_user_mode() 56 return sie_block(regs)->gpsw.addr; in instruction_pointer_guest()
|
| A D | nmi.c | 339 struct kvm_s390_sie_block *sie_block = phys_to_virt(regs->gprs[14]); in s390_backup_mcck_info() local 341 if (sie_block == NULL) in s390_backup_mcck_info() 345 sie_page = container_of(sie_block, struct sie_page, sie_block); in s390_backup_mcck_info()
|