1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * definition for kvm on s390
4 *
5 * Copyright IBM Corp. 2008, 2020
6 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Christian Ehrhardt <ehrhardt@de.ibm.com>
10 */
11
12 #ifndef ARCH_S390_KVM_S390_H
13 #define ARCH_S390_KVM_S390_H
14
15 #include <linux/hrtimer.h>
16 #include <linux/kvm.h>
17 #include <linux/kvm_host.h>
18 #include <linux/lockdep.h>
19 #include <asm/facility.h>
20 #include <asm/processor.h>
21 #include <asm/sclp.h>
22
23 /* Transactional Memory Execution related macros */
24 #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE))
25 #define TDB_FORMAT1 1
26 #define IS_ITDB_VALID(vcpu) \
27 ((*(char *)phys_to_virt((vcpu)->arch.sie_block->itdba) == TDB_FORMAT1))
28
29 extern debug_info_t *kvm_s390_dbf;
30 extern debug_info_t *kvm_s390_dbf_uv;
31
32 #define KVM_UV_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
33 do { \
34 debug_sprintf_event((d_kvm)->arch.dbf, d_loglevel, d_string "\n", \
35 d_args); \
36 debug_sprintf_event(kvm_s390_dbf_uv, d_loglevel, \
37 "%d: " d_string "\n", (d_kvm)->userspace_pid, \
38 d_args); \
39 } while (0)
40
41 #define KVM_EVENT(d_loglevel, d_string, d_args...)\
42 do { \
43 debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \
44 d_args); \
45 } while (0)
46
47 #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
48 do { \
49 debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
50 d_args); \
51 } while (0)
52
53 #define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\
54 do { \
55 debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
56 "%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \
57 d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
58 d_args); \
59 } while (0)
60
kvm_s390_set_cpuflags(struct kvm_vcpu * vcpu,u32 flags)61 static inline void kvm_s390_set_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
62 {
63 atomic_or(flags, &vcpu->arch.sie_block->cpuflags);
64 }
65
kvm_s390_clear_cpuflags(struct kvm_vcpu * vcpu,u32 flags)66 static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
67 {
68 atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags);
69 }
70
kvm_s390_test_cpuflags(struct kvm_vcpu * vcpu,u32 flags)71 static inline bool kvm_s390_test_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
72 {
73 return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags;
74 }
75
is_vcpu_stopped(struct kvm_vcpu * vcpu)76 static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
77 {
78 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_STOPPED);
79 }
80
is_vcpu_idle(struct kvm_vcpu * vcpu)81 static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
82 {
83 return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
84 }
85
kvm_is_ucontrol(struct kvm * kvm)86 static inline int kvm_is_ucontrol(struct kvm *kvm)
87 {
88 #ifdef CONFIG_KVM_S390_UCONTROL
89 if (kvm->arch.gmap)
90 return 0;
91 return 1;
92 #else
93 return 0;
94 #endif
95 }
96
97 #define GUEST_PREFIX_SHIFT 13
kvm_s390_get_prefix(struct kvm_vcpu * vcpu)98 static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
99 {
100 return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT;
101 }
102
kvm_s390_set_prefix(struct kvm_vcpu * vcpu,u32 prefix)103 static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
104 {
105 VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id,
106 prefix);
107 vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
108 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
109 kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
110 }
111
kvm_s390_get_base_disp_s(struct kvm_vcpu * vcpu,u8 * ar)112 static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
113 {
114 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
115 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
116
117 if (ar)
118 *ar = base2;
119
120 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
121 }
122
kvm_s390_get_base_disp_sse(struct kvm_vcpu * vcpu,u64 * address1,u64 * address2,u8 * ar_b1,u8 * ar_b2)123 static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
124 u64 *address1, u64 *address2,
125 u8 *ar_b1, u8 *ar_b2)
126 {
127 u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
128 u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
129 u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
130 u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
131
132 *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
133 *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
134
135 if (ar_b1)
136 *ar_b1 = base1;
137 if (ar_b2)
138 *ar_b2 = base2;
139 }
140
kvm_s390_get_regs_rre(struct kvm_vcpu * vcpu,int * r1,int * r2)141 static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
142 {
143 if (r1)
144 *r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
145 if (r2)
146 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
147 }
148
kvm_s390_get_base_disp_rsy(struct kvm_vcpu * vcpu,u8 * ar)149 static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, u8 *ar)
150 {
151 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
152 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
153 ((vcpu->arch.sie_block->ipb & 0xff00) << 4);
154 /* The displacement is a 20bit _SIGNED_ value */
155 if (disp2 & 0x80000)
156 disp2+=0xfff00000;
157
158 if (ar)
159 *ar = base2;
160
161 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
162 }
163
kvm_s390_get_base_disp_rs(struct kvm_vcpu * vcpu,u8 * ar)164 static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, u8 *ar)
165 {
166 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
167 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
168
169 if (ar)
170 *ar = base2;
171
172 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
173 }
174
175 /* Set the condition code in the guest program status word */
kvm_s390_set_psw_cc(struct kvm_vcpu * vcpu,unsigned long cc)176 static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
177 {
178 vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44);
179 vcpu->arch.sie_block->gpsw.mask |= cc << 44;
180 }
181
182 /* test availability of facility in a kvm instance */
test_kvm_facility(struct kvm * kvm,unsigned long nr)183 static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
184 {
185 return __test_facility(nr, kvm->arch.model.fac_mask) &&
186 __test_facility(nr, kvm->arch.model.fac_list);
187 }
188
set_kvm_facility(u64 * fac_list,unsigned long nr)189 static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
190 {
191 unsigned char *ptr;
192
193 if (nr >= MAX_FACILITY_BIT)
194 return -EINVAL;
195 ptr = (unsigned char *) fac_list + (nr >> 3);
196 *ptr |= (0x80UL >> (nr & 7));
197 return 0;
198 }
199
test_kvm_cpu_feat(struct kvm * kvm,unsigned long nr)200 static inline int test_kvm_cpu_feat(struct kvm *kvm, unsigned long nr)
201 {
202 WARN_ON_ONCE(nr >= KVM_S390_VM_CPU_FEAT_NR_BITS);
203 return test_bit_inv(nr, kvm->arch.cpu_feat);
204 }
205
206 /* are cpu states controlled by user space */
kvm_s390_user_cpu_state_ctrl(struct kvm * kvm)207 static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
208 {
209 return kvm->arch.user_cpu_state_ctrl != 0;
210 }
211
kvm_s390_set_user_cpu_state_ctrl(struct kvm * kvm)212 static inline void kvm_s390_set_user_cpu_state_ctrl(struct kvm *kvm)
213 {
214 if (kvm->arch.user_cpu_state_ctrl)
215 return;
216
217 VM_EVENT(kvm, 3, "%s", "ENABLE: Userspace CPU state control");
218 kvm->arch.user_cpu_state_ctrl = 1;
219 }
220
221 /* get the end gfn of the last (highest gfn) memslot */
kvm_s390_get_gfn_end(struct kvm_memslots * slots)222 static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots)
223 {
224 struct rb_node *node;
225 struct kvm_memory_slot *ms;
226
227 if (WARN_ON(kvm_memslots_empty(slots)))
228 return 0;
229
230 node = rb_last(&slots->gfn_tree);
231 ms = container_of(node, struct kvm_memory_slot, gfn_node[slots->node_idx]);
232 return ms->base_gfn + ms->npages;
233 }
234
kvm_s390_get_gisa_desc(struct kvm * kvm)235 static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
236 {
237 u32 gd = virt_to_phys(kvm->arch.gisa_int.origin);
238
239 if (gd && sclp.has_gisaf)
240 gd |= GISA_FORMAT1;
241 return gd;
242 }
243
244 /* implemented in pv.c */
245 int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
246 int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
247 int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc);
248 int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
249 int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc);
250 int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
251 int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
252 int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
253 u16 *rrc);
254 int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
255 unsigned long tweak, u16 *rc, u16 *rrc);
256 int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state);
257 int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc);
258 int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user,
259 u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc);
260 int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user,
261 u16 *rc, u16 *rrc);
262
kvm_s390_pv_get_handle(struct kvm * kvm)263 static inline u64 kvm_s390_pv_get_handle(struct kvm *kvm)
264 {
265 return kvm->arch.pv.handle;
266 }
267
kvm_s390_pv_cpu_get_handle(struct kvm_vcpu * vcpu)268 static inline u64 kvm_s390_pv_cpu_get_handle(struct kvm_vcpu *vcpu)
269 {
270 return vcpu->arch.pv.handle;
271 }
272
kvm_s390_pv_is_protected(struct kvm * kvm)273 static inline bool kvm_s390_pv_is_protected(struct kvm *kvm)
274 {
275 lockdep_assert_held(&kvm->lock);
276 return !!kvm_s390_pv_get_handle(kvm);
277 }
278
kvm_s390_pv_cpu_is_protected(struct kvm_vcpu * vcpu)279 static inline bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu)
280 {
281 lockdep_assert_held(&vcpu->mutex);
282 return !!kvm_s390_pv_cpu_get_handle(vcpu);
283 }
284
285 /* implemented in interrupt.c */
286 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
287 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
288 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
289 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
290 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
291 void kvm_s390_clear_float_irqs(struct kvm *kvm);
292 int __must_check kvm_s390_inject_vm(struct kvm *kvm,
293 struct kvm_s390_interrupt *s390int);
294 int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
295 struct kvm_s390_irq *irq);
kvm_s390_inject_prog_irq(struct kvm_vcpu * vcpu,struct kvm_s390_pgm_info * pgm_info)296 static inline int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
297 struct kvm_s390_pgm_info *pgm_info)
298 {
299 struct kvm_s390_irq irq = {
300 .type = KVM_S390_PROGRAM_INT,
301 .u.pgm = *pgm_info,
302 };
303
304 return kvm_s390_inject_vcpu(vcpu, &irq);
305 }
kvm_s390_inject_program_int(struct kvm_vcpu * vcpu,u16 code)306 static inline int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
307 {
308 struct kvm_s390_irq irq = {
309 .type = KVM_S390_PROGRAM_INT,
310 .u.pgm.code = code,
311 };
312
313 return kvm_s390_inject_vcpu(vcpu, &irq);
314 }
315 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
316 u64 isc_mask, u32 schid);
317 int kvm_s390_reinject_io_int(struct kvm *kvm,
318 struct kvm_s390_interrupt_info *inti);
319 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
320
321 /* implemented in intercept.c */
322 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu);
323 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
kvm_s390_rewind_psw(struct kvm_vcpu * vcpu,int ilen)324 static inline void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilen)
325 {
326 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
327
328 sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilen);
329 }
kvm_s390_forward_psw(struct kvm_vcpu * vcpu,int ilen)330 static inline void kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen)
331 {
332 kvm_s390_rewind_psw(vcpu, -ilen);
333 }
kvm_s390_retry_instr(struct kvm_vcpu * vcpu)334 static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
335 {
336 /* don't inject PER events if we re-execute the instruction */
337 vcpu->arch.sie_block->icptstatus &= ~0x02;
338 kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
339 }
340
341 int handle_sthyi(struct kvm_vcpu *vcpu);
342
343 /* implemented in priv.c */
344 int is_valid_psw(psw_t *psw);
345 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
346 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
347 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu);
348 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
349 int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
350 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
351 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
352 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
353 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
354 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
355 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu);
356
357 /* implemented in vsie.c */
358 int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu);
359 void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu);
360 void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
361 unsigned long end);
362 void kvm_s390_vsie_init(struct kvm *kvm);
363 void kvm_s390_vsie_destroy(struct kvm *kvm);
364
365 /* implemented in sigp.c */
366 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
367 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
368
369 /* implemented in kvm-s390.c */
370 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
371 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
372 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
373 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
374 int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
375 int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
376 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
377 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
378 bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);
379 void exit_sie(struct kvm_vcpu *vcpu);
380 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
381 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
382 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
383 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
384 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);
385 int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc);
386
387 /* implemented in diag.c */
388 int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
389
kvm_s390_vcpu_block_all(struct kvm * kvm)390 static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
391 {
392 unsigned long i;
393 struct kvm_vcpu *vcpu;
394
395 WARN_ON(!mutex_is_locked(&kvm->lock));
396 kvm_for_each_vcpu(i, vcpu, kvm)
397 kvm_s390_vcpu_block(vcpu);
398 }
399
kvm_s390_vcpu_unblock_all(struct kvm * kvm)400 static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
401 {
402 unsigned long i;
403 struct kvm_vcpu *vcpu;
404
405 kvm_for_each_vcpu(i, vcpu, kvm)
406 kvm_s390_vcpu_unblock(vcpu);
407 }
408
kvm_s390_get_tod_clock_fast(struct kvm * kvm)409 static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm)
410 {
411 u64 rc;
412
413 preempt_disable();
414 rc = get_tod_clock_fast() + kvm->arch.epoch;
415 preempt_enable();
416 return rc;
417 }
418
419 /**
420 * kvm_s390_inject_prog_cond - conditionally inject a program check
421 * @vcpu: virtual cpu
422 * @rc: original return/error code
423 *
424 * This function is supposed to be used after regular guest access functions
425 * failed, to conditionally inject a program check to a vcpu. The typical
426 * pattern would look like
427 *
428 * rc = write_guest(vcpu, addr, data, len);
429 * if (rc)
430 * return kvm_s390_inject_prog_cond(vcpu, rc);
431 *
432 * A negative return code from guest access functions implies an internal error
433 * like e.g. out of memory. In these cases no program check should be injected
434 * to the guest.
435 * A positive value implies that an exception happened while accessing a guest's
436 * memory. In this case all data belonging to the corresponding program check
437 * has been stored in vcpu->arch.pgm and can be injected with
438 * kvm_s390_inject_prog_irq().
439 *
440 * Returns: - the original @rc value if @rc was negative (internal error)
441 * - zero if @rc was already zero
442 * - zero or error code from injecting if @rc was positive
443 * (program check injected to @vcpu)
444 */
kvm_s390_inject_prog_cond(struct kvm_vcpu * vcpu,int rc)445 static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
446 {
447 if (rc <= 0)
448 return rc;
449 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
450 }
451
452 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
453 struct kvm_s390_irq *s390irq);
454
455 /* implemented in interrupt.c */
456 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop);
457 int psw_extint_disabled(struct kvm_vcpu *vcpu);
458 void kvm_s390_destroy_adapters(struct kvm *kvm);
459 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
460 extern struct kvm_device_ops kvm_flic_ops;
461 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
462 int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu);
463 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
464 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
465 void __user *buf, int len);
466 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
467 __u8 __user *buf, int len);
468 void kvm_s390_gisa_init(struct kvm *kvm);
469 void kvm_s390_gisa_clear(struct kvm *kvm);
470 void kvm_s390_gisa_destroy(struct kvm *kvm);
471 void kvm_s390_gisa_disable(struct kvm *kvm);
472 void kvm_s390_gisa_enable(struct kvm *kvm);
473 int __init kvm_s390_gib_init(u8 nisc);
474 void kvm_s390_gib_destroy(void);
475
476 /* implemented in guestdbg.c */
477 void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
478 void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu);
479 void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu);
480 int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
481 struct kvm_guest_debug *dbg);
482 void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
483 void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
484 int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu);
485 int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
486
487 /* support for Basic/Extended SCA handling */
kvm_s390_get_ipte_control(struct kvm * kvm)488 static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
489 {
490 struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */
491
492 return &sca->ipte_control;
493 }
kvm_s390_use_sca_entries(void)494 static inline int kvm_s390_use_sca_entries(void)
495 {
496 /*
497 * Without SIGP interpretation, only SRS interpretation (if available)
498 * might use the entries. By not setting the entries and keeping them
499 * invalid, hardware will not access them but intercept.
500 */
501 return sclp.has_sigpif;
502 }
503 void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
504 struct mcck_volatile_info *mcck_info);
505
506 /**
507 * kvm_s390_vcpu_crypto_reset_all
508 *
509 * Reset the crypto attributes for each vcpu. This can be done while the vcpus
510 * are running as each vcpu will be removed from SIE before resetting the crypt
511 * attributes and restored to SIE afterward.
512 *
513 * Note: The kvm->lock must be held while calling this function
514 *
515 * @kvm: the KVM guest
516 */
517 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm);
518
519 /**
520 * kvm_s390_vcpu_pci_enable_interp
521 *
522 * Set the associated PCI attributes for each vcpu to allow for zPCI Load/Store
523 * interpretation as well as adapter interruption forwarding.
524 *
525 * @kvm: the KVM guest
526 */
527 void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm);
528
529 /**
530 * diag9c_forwarding_hz
531 *
532 * Set the maximum number of diag9c forwarding per second
533 */
534 extern unsigned int diag9c_forwarding_hz;
535
536 #endif
537