Lines Matching refs:vcpu

33 static int handle_ri(struct kvm_vcpu *vcpu)  in handle_ri()  argument
35 vcpu->stat.instruction_ri++; in handle_ri()
37 if (test_kvm_facility(vcpu->kvm, 64)) { in handle_ri()
38 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)"); in handle_ri()
39 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in handle_ri()
40 kvm_s390_retry_instr(vcpu); in handle_ri()
43 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); in handle_ri()
46 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu) in kvm_s390_handle_aa() argument
48 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4) in kvm_s390_handle_aa()
49 return handle_ri(vcpu); in kvm_s390_handle_aa()
54 static int handle_gs(struct kvm_vcpu *vcpu) in handle_gs() argument
56 vcpu->stat.instruction_gs++; in handle_gs()
58 if (test_kvm_facility(vcpu->kvm, 133)) { in handle_gs()
59 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)"); in handle_gs()
62 current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb; in handle_gs()
65 vcpu->arch.sie_block->ecb |= ECB_GS; in handle_gs()
66 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in handle_gs()
67 vcpu->arch.gs_enabled = 1; in handle_gs()
68 kvm_s390_retry_instr(vcpu); in handle_gs()
71 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); in handle_gs()
74 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu) in kvm_s390_handle_e3() argument
76 int code = vcpu->arch.sie_block->ipb & 0xff; in kvm_s390_handle_e3()
79 return handle_gs(vcpu); in kvm_s390_handle_e3()
84 static int handle_set_clock(struct kvm_vcpu *vcpu) in handle_set_clock() argument
91 vcpu->stat.instruction_sck++; in handle_set_clock()
93 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_set_clock()
94 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_set_clock()
96 op2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_set_clock()
98 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_set_clock()
99 rc = read_guest(vcpu, op2, ar, &gtod.tod, sizeof(gtod.tod)); in handle_set_clock()
101 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_set_clock()
103 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod); in handle_set_clock()
114 if (!kvm_s390_try_set_tod_clock(vcpu->kvm, &gtod)) { in handle_set_clock()
115 kvm_s390_retry_instr(vcpu); in handle_set_clock()
119 kvm_s390_set_psw_cc(vcpu, 0); in handle_set_clock()
123 static int handle_set_prefix(struct kvm_vcpu *vcpu) in handle_set_prefix() argument
130 vcpu->stat.instruction_spx++; in handle_set_prefix()
132 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_set_prefix()
133 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_set_prefix()
135 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_set_prefix()
139 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_set_prefix()
142 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); in handle_set_prefix()
144 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_set_prefix()
153 if (kvm_is_error_gpa(vcpu->kvm, address)) in handle_set_prefix()
154 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_set_prefix()
156 kvm_s390_set_prefix(vcpu, address); in handle_set_prefix()
157 trace_kvm_s390_handle_prefix(vcpu, 1, address); in handle_set_prefix()
161 static int handle_store_prefix(struct kvm_vcpu *vcpu) in handle_store_prefix() argument
168 vcpu->stat.instruction_stpx++; in handle_store_prefix()
170 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_store_prefix()
171 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_store_prefix()
173 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_store_prefix()
177 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_store_prefix()
179 address = kvm_s390_get_prefix(vcpu); in handle_store_prefix()
182 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); in handle_store_prefix()
184 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_store_prefix()
186 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2); in handle_store_prefix()
187 trace_kvm_s390_handle_prefix(vcpu, 0, address); in handle_store_prefix()
191 static int handle_store_cpu_address(struct kvm_vcpu *vcpu) in handle_store_cpu_address() argument
193 u16 vcpu_id = vcpu->vcpu_id; in handle_store_cpu_address()
198 vcpu->stat.instruction_stap++; in handle_store_cpu_address()
200 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_store_cpu_address()
201 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_store_cpu_address()
203 ga = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_store_cpu_address()
206 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_store_cpu_address()
208 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); in handle_store_cpu_address()
210 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_store_cpu_address()
212 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga); in handle_store_cpu_address()
213 trace_kvm_s390_handle_stap(vcpu, ga); in handle_store_cpu_address()
217 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu) in kvm_s390_skey_check_enable() argument
221 trace_kvm_s390_skey_related_inst(vcpu); in kvm_s390_skey_check_enable()
223 if (vcpu->arch.skey_enabled) in kvm_s390_skey_check_enable()
227 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc); in kvm_s390_skey_check_enable()
231 if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS)) in kvm_s390_skey_check_enable()
232 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS); in kvm_s390_skey_check_enable()
233 if (!vcpu->kvm->arch.use_skf) in kvm_s390_skey_check_enable()
234 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_s390_skey_check_enable()
236 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); in kvm_s390_skey_check_enable()
237 vcpu->arch.skey_enabled = true; in kvm_s390_skey_check_enable()
241 static int try_handle_skey(struct kvm_vcpu *vcpu) in try_handle_skey() argument
245 rc = kvm_s390_skey_check_enable(vcpu); in try_handle_skey()
248 if (vcpu->kvm->arch.use_skf) { in try_handle_skey()
250 kvm_s390_retry_instr(vcpu); in try_handle_skey()
251 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); in try_handle_skey()
257 static int handle_iske(struct kvm_vcpu *vcpu) in handle_iske() argument
265 vcpu->stat.instruction_iske++; in handle_iske()
267 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_iske()
268 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_iske()
270 rc = try_handle_skey(vcpu); in handle_iske()
274 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2); in handle_iske()
276 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; in handle_iske()
277 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr); in handle_iske()
278 gaddr = kvm_s390_real_to_abs(vcpu, gaddr); in handle_iske()
279 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr)); in handle_iske()
281 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_iske()
297 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_iske()
300 vcpu->run->s.regs.gprs[reg1] &= ~0xff; in handle_iske()
301 vcpu->run->s.regs.gprs[reg1] |= key; in handle_iske()
305 static int handle_rrbe(struct kvm_vcpu *vcpu) in handle_rrbe() argument
312 vcpu->stat.instruction_rrbe++; in handle_rrbe()
314 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_rrbe()
315 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_rrbe()
317 rc = try_handle_skey(vcpu); in handle_rrbe()
321 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2); in handle_rrbe()
323 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; in handle_rrbe()
324 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr); in handle_rrbe()
325 gaddr = kvm_s390_real_to_abs(vcpu, gaddr); in handle_rrbe()
326 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr)); in handle_rrbe()
328 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_rrbe()
343 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_rrbe()
346 kvm_s390_set_psw_cc(vcpu, rc); in handle_rrbe()
354 static int handle_sske(struct kvm_vcpu *vcpu) in handle_sske() argument
356 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28; in handle_sske()
363 vcpu->stat.instruction_sske++; in handle_sske()
365 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_sske()
366 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_sske()
368 rc = try_handle_skey(vcpu); in handle_sske()
372 if (!test_kvm_facility(vcpu->kvm, 8)) in handle_sske()
374 if (!test_kvm_facility(vcpu->kvm, 10)) in handle_sske()
376 if (!test_kvm_facility(vcpu->kvm, 14)) in handle_sske()
379 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2); in handle_sske()
381 key = vcpu->run->s.regs.gprs[reg1] & 0xfe; in handle_sske()
382 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; in handle_sske()
383 start = kvm_s390_logical_to_effective(vcpu, start); in handle_sske()
388 start = kvm_s390_real_to_abs(vcpu, start); in handle_sske()
393 unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); in handle_sske()
397 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_sske()
411 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_sske()
422 kvm_s390_set_psw_cc(vcpu, 3); in handle_sske()
424 kvm_s390_set_psw_cc(vcpu, rc); in handle_sske()
425 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL; in handle_sske()
426 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8; in handle_sske()
430 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) in handle_sske()
431 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK; in handle_sske()
433 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL; in handle_sske()
434 end = kvm_s390_logical_to_effective(vcpu, end); in handle_sske()
435 vcpu->run->s.regs.gprs[reg2] |= end; in handle_sske()
440 static int handle_ipte_interlock(struct kvm_vcpu *vcpu) in handle_ipte_interlock() argument
442 vcpu->stat.instruction_ipte_interlock++; in handle_ipte_interlock()
443 if (psw_bits(vcpu->arch.sie_block->gpsw).pstate) in handle_ipte_interlock()
444 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_ipte_interlock()
445 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu->kvm)); in handle_ipte_interlock()
446 kvm_s390_retry_instr(vcpu); in handle_ipte_interlock()
447 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); in handle_ipte_interlock()
451 static int handle_test_block(struct kvm_vcpu *vcpu) in handle_test_block() argument
456 vcpu->stat.instruction_tb++; in handle_test_block()
458 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_test_block()
459 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_test_block()
461 kvm_s390_get_regs_rre(vcpu, NULL, &reg2); in handle_test_block()
462 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; in handle_test_block()
463 addr = kvm_s390_logical_to_effective(vcpu, addr); in handle_test_block()
464 if (kvm_s390_check_low_addr_prot_real(vcpu, addr)) in handle_test_block()
465 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in handle_test_block()
466 addr = kvm_s390_real_to_abs(vcpu, addr); in handle_test_block()
468 if (kvm_is_error_gpa(vcpu->kvm, addr)) in handle_test_block()
469 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_test_block()
474 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) in handle_test_block()
476 kvm_s390_set_psw_cc(vcpu, 0); in handle_test_block()
477 vcpu->run->s.regs.gprs[0] = 0; in handle_test_block()
481 static int handle_tpi(struct kvm_vcpu *vcpu) in handle_tpi() argument
490 vcpu->stat.instruction_tpi++; in handle_tpi()
492 addr = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_tpi()
494 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_tpi()
496 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); in handle_tpi()
498 kvm_s390_set_psw_cc(vcpu, 0); in handle_tpi()
511 rc = write_guest(vcpu, addr, ar, &tpi_data, len); in handle_tpi()
513 rc = kvm_s390_inject_prog_cond(vcpu, rc); in handle_tpi()
522 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { in handle_tpi()
531 kvm_s390_set_psw_cc(vcpu, 1); in handle_tpi()
539 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { in handle_tpi()
547 static int handle_tsch(struct kvm_vcpu *vcpu) in handle_tsch() argument
552 vcpu->stat.instruction_tsch++; in handle_tsch()
555 if (vcpu->run->s.regs.gprs[1]) in handle_tsch()
556 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, in handle_tsch()
557 vcpu->run->s.regs.gprs[1]); in handle_tsch()
567 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; in handle_tsch()
568 vcpu->run->s390_tsch.dequeued = !!inti; in handle_tsch()
570 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; in handle_tsch()
571 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; in handle_tsch()
572 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; in handle_tsch()
573 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; in handle_tsch()
575 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; in handle_tsch()
580 static int handle_io_inst(struct kvm_vcpu *vcpu) in handle_io_inst() argument
582 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); in handle_io_inst()
584 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_io_inst()
585 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_io_inst()
587 if (vcpu->kvm->arch.css_support) { in handle_io_inst()
592 if (vcpu->arch.sie_block->ipa == 0xb236) in handle_io_inst()
593 return handle_tpi(vcpu); in handle_io_inst()
594 if (vcpu->arch.sie_block->ipa == 0xb235) in handle_io_inst()
595 return handle_tsch(vcpu); in handle_io_inst()
597 vcpu->stat.instruction_io_other++; in handle_io_inst()
604 kvm_s390_set_psw_cc(vcpu, 3); in handle_io_inst()
624 static int handle_pqap(struct kvm_vcpu *vcpu) in handle_pqap() argument
636 if (!(vcpu->arch.sie_block->eca & ECA_APIE)) in handle_pqap()
646 reg0 = vcpu->run->s.regs.gprs[0]; in handle_pqap()
652 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_pqap()
653 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_pqap()
658 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_pqap()
660 if (!test_kvm_facility(vcpu->kvm, 15) && (reg0 & 0x00800000UL)) in handle_pqap()
661 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_pqap()
663 if (!(vcpu->kvm->arch.crypto.crycbd & 0x02) && (reg0 & 0x0000c0f0UL)) in handle_pqap()
664 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_pqap()
668 if (!test_kvm_facility(vcpu->kvm, 65)) in handle_pqap()
669 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_pqap()
676 down_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem); in handle_pqap()
677 if (vcpu->kvm->arch.crypto.pqap_hook) { in handle_pqap()
678 pqap_hook = *vcpu->kvm->arch.crypto.pqap_hook; in handle_pqap()
679 ret = pqap_hook(vcpu); in handle_pqap()
680 if (!ret && vcpu->run->s.regs.gprs[1] & 0x00ff0000) in handle_pqap()
681 kvm_s390_set_psw_cc(vcpu, 3); in handle_pqap()
682 up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem); in handle_pqap()
685 up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem); in handle_pqap()
692 memcpy(&vcpu->run->s.regs.gprs[1], &status, sizeof(status)); in handle_pqap()
693 kvm_s390_set_psw_cc(vcpu, 3); in handle_pqap()
697 static int handle_stfl(struct kvm_vcpu *vcpu) in handle_stfl() argument
702 vcpu->stat.instruction_stfl++; in handle_stfl()
704 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_stfl()
705 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_stfl()
711 fac = *vcpu->kvm->arch.model.fac_list >> 32; in handle_stfl()
712 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list), in handle_stfl()
716 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac); in handle_stfl()
717 trace_kvm_s390_handle_stfl(vcpu, fac); in handle_stfl()
743 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) in kvm_s390_handle_lpsw() argument
745 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; in kvm_s390_handle_lpsw()
751 vcpu->stat.instruction_lpsw++; in kvm_s390_handle_lpsw()
754 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in kvm_s390_handle_lpsw()
756 addr = kvm_s390_get_base_disp_s(vcpu, &ar); in kvm_s390_handle_lpsw()
758 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in kvm_s390_handle_lpsw()
760 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); in kvm_s390_handle_lpsw()
762 return kvm_s390_inject_prog_cond(vcpu, rc); in kvm_s390_handle_lpsw()
764 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in kvm_s390_handle_lpsw()
769 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in kvm_s390_handle_lpsw()
773 static int handle_lpswe(struct kvm_vcpu *vcpu) in handle_lpswe() argument
780 vcpu->stat.instruction_lpswe++; in handle_lpswe()
782 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_lpswe()
783 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_lpswe()
785 addr = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_lpswe()
787 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_lpswe()
788 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); in handle_lpswe()
790 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_lpswe()
791 vcpu->arch.sie_block->gpsw = new_psw; in handle_lpswe()
792 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) in handle_lpswe()
793 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_lpswe()
797 static int handle_stidp(struct kvm_vcpu *vcpu) in handle_stidp() argument
799 u64 stidp_data = vcpu->kvm->arch.model.cpuid; in handle_stidp()
804 vcpu->stat.instruction_stidp++; in handle_stidp()
806 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_stidp()
807 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_stidp()
809 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_stidp()
812 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_stidp()
814 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); in handle_stidp()
816 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_stidp()
818 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data); in handle_stidp()
822 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) in handle_stsi_3_2_2() argument
827 cpus = atomic_read(&vcpu->kvm->online_vcpus); in handle_stsi_3_2_2()
849 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar, in insert_stsi_usr_data() argument
852 vcpu->run->exit_reason = KVM_EXIT_S390_STSI; in insert_stsi_usr_data()
853 vcpu->run->s390_stsi.addr = addr; in insert_stsi_usr_data()
854 vcpu->run->s390_stsi.ar = ar; in insert_stsi_usr_data()
855 vcpu->run->s390_stsi.fc = fc; in insert_stsi_usr_data()
856 vcpu->run->s390_stsi.sel1 = sel1; in insert_stsi_usr_data()
857 vcpu->run->s390_stsi.sel2 = sel2; in insert_stsi_usr_data()
860 static int handle_stsi(struct kvm_vcpu *vcpu) in handle_stsi() argument
862 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; in handle_stsi()
863 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; in handle_stsi()
864 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; in handle_stsi()
870 vcpu->stat.instruction_stsi++; in handle_stsi()
871 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2); in handle_stsi()
873 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_stsi()
874 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_stsi()
885 if (fc == 15 && (!test_kvm_facility(vcpu->kvm, 11) || in handle_stsi()
886 !vcpu->kvm->arch.user_stsi)) in handle_stsi()
889 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 in handle_stsi()
890 || vcpu->run->s.regs.gprs[1] & 0xffff0000) in handle_stsi()
891 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_stsi()
894 vcpu->run->s.regs.gprs[0] = 3 << 28; in handle_stsi()
895 kvm_s390_set_psw_cc(vcpu, 0); in handle_stsi()
899 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_stsi()
901 if (!kvm_s390_pv_cpu_is_protected(vcpu) && (operand2 & 0xfff)) in handle_stsi()
902 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_stsi()
919 handle_stsi_3_2_2(vcpu, (void *) mem); in handle_stsi()
922 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); in handle_stsi()
923 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); in handle_stsi()
926 if (kvm_s390_pv_cpu_is_protected(vcpu)) { in handle_stsi()
927 memcpy(sida_addr(vcpu->arch.sie_block), (void *)mem, PAGE_SIZE); in handle_stsi()
930 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); in handle_stsi()
933 rc = kvm_s390_inject_prog_cond(vcpu, rc); in handle_stsi()
936 if (vcpu->kvm->arch.user_stsi) { in handle_stsi()
937 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); in handle_stsi()
940 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); in handle_stsi()
942 kvm_s390_set_psw_cc(vcpu, 0); in handle_stsi()
943 vcpu->run->s.regs.gprs[0] = 0; in handle_stsi()
946 kvm_s390_set_psw_cc(vcpu, 3); in handle_stsi()
952 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) in kvm_s390_handle_b2() argument
954 switch (vcpu->arch.sie_block->ipa & 0x00ff) { in kvm_s390_handle_b2()
956 return handle_stidp(vcpu); in kvm_s390_handle_b2()
958 return handle_set_clock(vcpu); in kvm_s390_handle_b2()
960 return handle_set_prefix(vcpu); in kvm_s390_handle_b2()
962 return handle_store_prefix(vcpu); in kvm_s390_handle_b2()
964 return handle_store_cpu_address(vcpu); in kvm_s390_handle_b2()
966 return kvm_s390_handle_vsie(vcpu); in kvm_s390_handle_b2()
969 return handle_ipte_interlock(vcpu); in kvm_s390_handle_b2()
971 return handle_iske(vcpu); in kvm_s390_handle_b2()
973 return handle_rrbe(vcpu); in kvm_s390_handle_b2()
975 return handle_sske(vcpu); in kvm_s390_handle_b2()
977 return handle_test_block(vcpu); in kvm_s390_handle_b2()
994 return handle_io_inst(vcpu); in kvm_s390_handle_b2()
996 return handle_sthyi(vcpu); in kvm_s390_handle_b2()
998 return handle_stsi(vcpu); in kvm_s390_handle_b2()
1000 return handle_pqap(vcpu); in kvm_s390_handle_b2()
1002 return handle_stfl(vcpu); in kvm_s390_handle_b2()
1004 return handle_lpswe(vcpu); in kvm_s390_handle_b2()
1010 static int handle_epsw(struct kvm_vcpu *vcpu) in handle_epsw() argument
1014 vcpu->stat.instruction_epsw++; in handle_epsw()
1016 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2); in handle_epsw()
1019 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; in handle_epsw()
1020 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; in handle_epsw()
1022 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; in handle_epsw()
1023 vcpu->run->s.regs.gprs[reg2] |= in handle_epsw()
1024 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; in handle_epsw()
1039 static int handle_pfmf(struct kvm_vcpu *vcpu) in handle_pfmf() argument
1046 vcpu->stat.instruction_pfmf++; in handle_pfmf()
1048 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2); in handle_pfmf()
1050 if (!test_kvm_facility(vcpu->kvm, 8)) in handle_pfmf()
1051 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); in handle_pfmf()
1053 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_pfmf()
1054 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_pfmf()
1056 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) in handle_pfmf()
1057 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_pfmf()
1060 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && in handle_pfmf()
1061 !test_kvm_facility(vcpu->kvm, 14)) in handle_pfmf()
1062 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_pfmf()
1065 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK && in handle_pfmf()
1066 test_kvm_facility(vcpu->kvm, 10)) { in handle_pfmf()
1067 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR; in handle_pfmf()
1068 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC; in handle_pfmf()
1071 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ; in handle_pfmf()
1072 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY; in handle_pfmf()
1073 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; in handle_pfmf()
1074 start = kvm_s390_logical_to_effective(vcpu, start); in handle_pfmf()
1076 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { in handle_pfmf()
1077 if (kvm_s390_check_low_addr_prot_real(vcpu, start)) in handle_pfmf()
1078 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in handle_pfmf()
1081 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { in handle_pfmf()
1084 start = kvm_s390_real_to_abs(vcpu, start); in handle_pfmf()
1093 if (!test_kvm_facility(vcpu->kvm, 78) || in handle_pfmf()
1094 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT) in handle_pfmf()
1095 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_pfmf()
1099 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_pfmf()
1107 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); in handle_pfmf()
1109 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_pfmf()
1111 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { in handle_pfmf()
1112 if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE)) in handle_pfmf()
1113 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_pfmf()
1116 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { in handle_pfmf()
1117 int rc = kvm_s390_skey_check_enable(vcpu); in handle_pfmf()
1131 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_pfmf()
1139 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { in handle_pfmf()
1140 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) { in handle_pfmf()
1141 vcpu->run->s.regs.gprs[reg2] = end; in handle_pfmf()
1143 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL; in handle_pfmf()
1144 end = kvm_s390_logical_to_effective(vcpu, end); in handle_pfmf()
1145 vcpu->run->s.regs.gprs[reg2] |= end; in handle_pfmf()
1154 static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc) in __do_essa() argument
1165 kvm_s390_get_regs_rre(vcpu, &r1, &r2); in __do_essa()
1166 gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT; in __do_essa()
1167 hva = gfn_to_hva(vcpu->kvm, gfn); in __do_essa()
1168 entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; in __do_essa()
1171 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in __do_essa()
1173 nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev); in __do_essa()
1176 vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */ in __do_essa()
1193 vcpu->run->s.regs.gprs[r1] = res; in __do_essa()
1201 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK); in __do_essa()
1206 struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn); in __do_essa()
1210 atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages); in __do_essa()
1216 static int handle_essa(struct kvm_vcpu *vcpu) in handle_essa() argument
1219 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; in handle_essa()
1224 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries); in handle_essa()
1225 gmap = vcpu->arch.gmap; in handle_essa()
1226 vcpu->stat.instruction_essa++; in handle_essa()
1227 if (!vcpu->kvm->arch.use_cmma) in handle_essa()
1228 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); in handle_essa()
1230 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_essa()
1231 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_essa()
1233 orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; in handle_essa()
1235 if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT in handle_essa()
1237 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_essa()
1239 if (!vcpu->kvm->arch.migration_mode) { in handle_essa()
1249 if (vcpu->kvm->mm->context.uses_cmm == 0) { in handle_essa()
1250 mmap_write_lock(vcpu->kvm->mm); in handle_essa()
1251 vcpu->kvm->mm->context.uses_cmm = 1; in handle_essa()
1252 mmap_write_unlock(vcpu->kvm->mm); in handle_essa()
1263 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; in handle_essa()
1265 kvm_s390_retry_instr(vcpu); in handle_essa()
1269 mmap_read_lock(vcpu->kvm->mm); in handle_essa()
1270 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in handle_essa()
1271 i = __do_essa(vcpu, orc); in handle_essa()
1272 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in handle_essa()
1273 mmap_read_unlock(vcpu->kvm->mm); in handle_essa()
1279 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ in handle_essa()
1280 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); in handle_essa()
1288 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) in kvm_s390_handle_b9() argument
1290 switch (vcpu->arch.sie_block->ipa & 0x00ff) { in kvm_s390_handle_b9()
1294 return handle_ipte_interlock(vcpu); in kvm_s390_handle_b9()
1296 return handle_epsw(vcpu); in kvm_s390_handle_b9()
1298 return handle_essa(vcpu); in kvm_s390_handle_b9()
1300 return handle_pfmf(vcpu); in kvm_s390_handle_b9()
1306 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) in kvm_s390_handle_lctl() argument
1308 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in kvm_s390_handle_lctl()
1309 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; in kvm_s390_handle_lctl()
1315 vcpu->stat.instruction_lctl++; in kvm_s390_handle_lctl()
1317 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in kvm_s390_handle_lctl()
1318 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in kvm_s390_handle_lctl()
1320 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); in kvm_s390_handle_lctl()
1323 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in kvm_s390_handle_lctl()
1325 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); in kvm_s390_handle_lctl()
1326 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); in kvm_s390_handle_lctl()
1329 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); in kvm_s390_handle_lctl()
1331 return kvm_s390_inject_prog_cond(vcpu, rc); in kvm_s390_handle_lctl()
1335 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; in kvm_s390_handle_lctl()
1336 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; in kvm_s390_handle_lctl()
1341 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvm_s390_handle_lctl()
1345 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) in kvm_s390_handle_stctl() argument
1347 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in kvm_s390_handle_stctl()
1348 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; in kvm_s390_handle_stctl()
1354 vcpu->stat.instruction_stctl++; in kvm_s390_handle_stctl()
1356 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in kvm_s390_handle_stctl()
1357 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in kvm_s390_handle_stctl()
1359 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); in kvm_s390_handle_stctl()
1362 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in kvm_s390_handle_stctl()
1364 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); in kvm_s390_handle_stctl()
1365 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); in kvm_s390_handle_stctl()
1370 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; in kvm_s390_handle_stctl()
1375 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); in kvm_s390_handle_stctl()
1376 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; in kvm_s390_handle_stctl()
1379 static int handle_lctlg(struct kvm_vcpu *vcpu) in handle_lctlg() argument
1381 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in handle_lctlg()
1382 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; in handle_lctlg()
1388 vcpu->stat.instruction_lctlg++; in handle_lctlg()
1390 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_lctlg()
1391 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_lctlg()
1393 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); in handle_lctlg()
1396 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_lctlg()
1398 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); in handle_lctlg()
1399 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); in handle_lctlg()
1402 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); in handle_lctlg()
1404 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_lctlg()
1408 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; in handle_lctlg()
1413 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in handle_lctlg()
1417 static int handle_stctg(struct kvm_vcpu *vcpu) in handle_stctg() argument
1419 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in handle_stctg()
1420 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; in handle_stctg()
1426 vcpu->stat.instruction_stctg++; in handle_stctg()
1428 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_stctg()
1429 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_stctg()
1431 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); in handle_stctg()
1434 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_stctg()
1436 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); in handle_stctg()
1437 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); in handle_stctg()
1442 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; in handle_stctg()
1447 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); in handle_stctg()
1448 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; in handle_stctg()
1451 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) in kvm_s390_handle_eb() argument
1453 switch (vcpu->arch.sie_block->ipb & 0x000000ff) { in kvm_s390_handle_eb()
1455 return handle_stctg(vcpu); in kvm_s390_handle_eb()
1457 return handle_lctlg(vcpu); in kvm_s390_handle_eb()
1461 return handle_ri(vcpu); in kvm_s390_handle_eb()
1467 static int handle_tprot(struct kvm_vcpu *vcpu) in handle_tprot() argument
1476 vcpu->stat.instruction_tprot++; in handle_tprot()
1478 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_tprot()
1479 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_tprot()
1481 kvm_s390_get_base_disp_sse(vcpu, &address, &operand2, &ar, NULL); in handle_tprot()
1484 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) in handle_tprot()
1485 ipte_lock(vcpu->kvm); in handle_tprot()
1487 ret = guest_translate_address_with_key(vcpu, address, ar, &gpa, in handle_tprot()
1490 gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); in handle_tprot()
1494 ret = guest_translate_address_with_key(vcpu, address, ar, &gpa, in handle_tprot()
1514 kvm_s390_set_psw_cc(vcpu, cc); in handle_tprot()
1517 ret = kvm_s390_inject_program_int(vcpu, ret); in handle_tprot()
1521 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) in handle_tprot()
1522 ipte_unlock(vcpu->kvm); in handle_tprot()
1526 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) in kvm_s390_handle_e5() argument
1528 switch (vcpu->arch.sie_block->ipa & 0x00ff) { in kvm_s390_handle_e5()
1530 return handle_tprot(vcpu); in kvm_s390_handle_e5()
1536 static int handle_sckpf(struct kvm_vcpu *vcpu) in handle_sckpf() argument
1540 vcpu->stat.instruction_sckpf++; in handle_sckpf()
1542 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_sckpf()
1543 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_sckpf()
1545 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) in handle_sckpf()
1546 return kvm_s390_inject_program_int(vcpu, in handle_sckpf()
1549 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; in handle_sckpf()
1550 vcpu->arch.sie_block->todpr = value; in handle_sckpf()
1555 static int handle_ptff(struct kvm_vcpu *vcpu) in handle_ptff() argument
1557 vcpu->stat.instruction_ptff++; in handle_ptff()
1560 kvm_s390_set_psw_cc(vcpu, 3); in handle_ptff()
1564 int kvm_s390_handle_01(struct kvm_vcpu *vcpu) in kvm_s390_handle_01() argument
1566 switch (vcpu->arch.sie_block->ipa & 0x00ff) { in kvm_s390_handle_01()
1568 return handle_ptff(vcpu); in kvm_s390_handle_01()
1570 return handle_sckpf(vcpu); in kvm_s390_handle_01()