1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7 */
8
9 #ifndef __POWERPC_KVM_PPC_H__
10 #define __POWERPC_KVM_PPC_H__
11
12 /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
13 * dependencies. */
14
15 #include <linux/mutex.h>
16 #include <linux/timer.h>
17 #include <linux/types.h>
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bug.h>
21 #ifdef CONFIG_PPC_BOOK3S
22 #include <asm/kvm_book3s.h>
23 #else
24 #include <asm/kvm_booke.h>
25 #endif
26 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
27 #include <asm/paca.h>
28 #include <asm/xive.h>
29 #include <asm/cpu_has_feature.h>
30 #endif
31
32 /*
33 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
34 * for supporting software breakpoint.
35 */
36 #define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
37
38 enum emulation_result {
39 EMULATE_DONE, /* no further processing */
40 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
41 EMULATE_FAIL, /* can't emulate this instruction */
42 EMULATE_AGAIN, /* something went wrong. go again */
43 EMULATE_EXIT_USER, /* emulation requires exit to user-space */
44 };
45
46 enum instruction_fetch_type {
47 INST_GENERIC,
48 INST_SC, /* system call */
49 };
50
51 enum xlate_instdata {
52 XLATE_INST, /* translate instruction address */
53 XLATE_DATA /* translate data address */
54 };
55
56 enum xlate_readwrite {
57 XLATE_READ, /* check for read permissions */
58 XLATE_WRITE /* check for write permissions */
59 };
60
61 extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
62 extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
63 extern void kvmppc_handler_highmem(void);
64
65 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
66 extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
67 unsigned int rt, unsigned int bytes,
68 int is_default_endian);
69 extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
70 unsigned int rt, unsigned int bytes,
71 int is_default_endian);
72 extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
73 unsigned int rt, unsigned int bytes,
74 int is_default_endian, int mmio_sign_extend);
75 extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
76 unsigned int rt, unsigned int bytes, int is_default_endian);
77 extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
78 unsigned int rs, unsigned int bytes, int is_default_endian);
79 extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
80 u64 val, unsigned int bytes,
81 int is_default_endian);
82 extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
83 int rs, unsigned int bytes,
84 int is_default_endian);
85
86 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
87 enum instruction_fetch_type type, u32 *inst);
88
89 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
90 bool data);
91 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
92 bool data);
93 extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu);
94 extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
95 extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu);
96 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
97 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
98 extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
99 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
100 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
101 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
102
103 /* Core-specific hooks */
104
105 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
106 unsigned int gtlb_idx);
107 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
108 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
109 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
110 extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
111 gva_t eaddr);
112 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
113 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
114 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
115 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
116 struct kvmppc_pte *pte);
117
118 extern int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu);
119 extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
120 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
121 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
122 struct kvm_translation *tr);
123
124 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
125 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
126
127 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
128 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
129 extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
130 extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
131 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
132 extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
133 extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
134 extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
135 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
136 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
137 extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
138 struct kvm_interrupt *irq);
139 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
140 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
141 ulong esr_flags);
142 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
143 ulong dear_flags,
144 ulong esr_flags);
145 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
146 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
147 ulong esr_flags);
148 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
149 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
150
151 extern int kvmppc_booke_init(void);
152 extern void kvmppc_booke_exit(void);
153
154 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
155 extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
156
157 extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
158 extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
159 extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
160 extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
161 extern void kvmppc_rmap_reset(struct kvm *kvm);
162 extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
163 struct kvm_memory_slot *memslot, unsigned long porder);
164 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
165 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
166 struct iommu_group *grp);
167 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
168 struct iommu_group *grp);
169 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
170 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
171 extern void kvmppc_setup_partition_table(struct kvm *kvm);
172
173 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
174 struct kvm_create_spapr_tce_64 *args);
175 #define kvmppc_ioba_validate(stt, ioba, npages) \
176 (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
177 (stt)->size, (ioba), (npages)) ? \
178 H_PARAMETER : H_SUCCESS)
179 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
180 unsigned long ioba, unsigned long tce);
181 extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
182 unsigned long liobn, unsigned long ioba,
183 unsigned long tce_list, unsigned long npages);
184 extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
185 unsigned long liobn, unsigned long ioba,
186 unsigned long tce_value, unsigned long npages);
187 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
188 unsigned long ioba);
189 extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
190 extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
191 extern int kvmppc_core_init_vm(struct kvm *kvm);
192 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
193 extern void kvmppc_core_free_memslot(struct kvm *kvm,
194 struct kvm_memory_slot *slot);
195 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
196 const struct kvm_memory_slot *old,
197 struct kvm_memory_slot *new,
198 enum kvm_mr_change change);
199 extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
200 struct kvm_memory_slot *old,
201 const struct kvm_memory_slot *new,
202 enum kvm_mr_change change);
203 extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
204 struct kvm_ppc_smmu_info *info);
205 extern void kvmppc_core_flush_memslot(struct kvm *kvm,
206 struct kvm_memory_slot *memslot);
207
208 extern int kvmppc_bookehv_init(void);
209 extern void kvmppc_bookehv_exit(void);
210
211 extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
212
213 extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
214 extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
215 struct kvm_ppc_resize_hpt *rhpt);
216 extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
217 struct kvm_ppc_resize_hpt *rhpt);
218
219 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
220
221 extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
222 extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
223 extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
224
225 extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
226 u32 priority);
227 extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
228 u32 *priority);
229 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
230 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
231
232 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
233 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
234
235 union kvmppc_one_reg {
236 u32 wval;
237 u64 dval;
238 vector128 vval;
239 u64 vsxval[2];
240 u32 vsx32val[4];
241 u16 vsx16val[8];
242 u8 vsx8val[16];
243 struct {
244 u64 addr;
245 u64 length;
246 } vpaval;
247 u64 xive_timaval[2];
248 };
249
250 struct kvmppc_ops {
251 struct module *owner;
252 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
253 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
254 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
255 union kvmppc_one_reg *val);
256 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
257 union kvmppc_one_reg *val);
258 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
259 void (*vcpu_put)(struct kvm_vcpu *vcpu);
260 void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
261 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
262 int (*vcpu_run)(struct kvm_vcpu *vcpu);
263 int (*vcpu_create)(struct kvm_vcpu *vcpu);
264 void (*vcpu_free)(struct kvm_vcpu *vcpu);
265 int (*check_requests)(struct kvm_vcpu *vcpu);
266 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
267 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
268 int (*prepare_memory_region)(struct kvm *kvm,
269 const struct kvm_memory_slot *old,
270 struct kvm_memory_slot *new,
271 enum kvm_mr_change change);
272 void (*commit_memory_region)(struct kvm *kvm,
273 struct kvm_memory_slot *old,
274 const struct kvm_memory_slot *new,
275 enum kvm_mr_change change);
276 bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range);
277 bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
278 bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
279 bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
280 void (*free_memslot)(struct kvm_memory_slot *slot);
281 int (*init_vm)(struct kvm *kvm);
282 void (*destroy_vm)(struct kvm *kvm);
283 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
284 int (*emulate_op)(struct kvm_vcpu *vcpu,
285 unsigned int inst, int *advance);
286 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
287 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
288 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
289 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
290 unsigned long arg);
291 int (*hcall_implemented)(unsigned long hcall);
292 int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
293 struct irq_bypass_producer *);
294 void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
295 struct irq_bypass_producer *);
296 int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
297 int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
298 int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
299 unsigned long flags);
300 void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
301 int (*enable_nested)(struct kvm *kvm);
302 int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
303 int size);
304 int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
305 int size);
306 int (*enable_svm)(struct kvm *kvm);
307 int (*svm_off)(struct kvm *kvm);
308 int (*enable_dawr1)(struct kvm *kvm);
309 bool (*hash_v3_possible)(void);
310 int (*create_vm_debugfs)(struct kvm *kvm);
311 int (*create_vcpu_debugfs)(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
312 };
313
314 extern struct kvmppc_ops *kvmppc_hv_ops;
315 extern struct kvmppc_ops *kvmppc_pr_ops;
316
kvmppc_get_last_inst(struct kvm_vcpu * vcpu,enum instruction_fetch_type type,u32 * inst)317 static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
318 enum instruction_fetch_type type, u32 *inst)
319 {
320 int ret = EMULATE_DONE;
321 u32 fetched_inst;
322
323 /* Load the instruction manually if it failed to do so in the
324 * exit path */
325 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
326 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
327
328 /* Write fetch_failed unswapped if the fetch failed */
329 if (ret == EMULATE_DONE)
330 fetched_inst = kvmppc_need_byteswap(vcpu) ?
331 swab32(vcpu->arch.last_inst) :
332 vcpu->arch.last_inst;
333 else
334 fetched_inst = vcpu->arch.last_inst;
335
336 *inst = fetched_inst;
337 return ret;
338 }
339
is_kvmppc_hv_enabled(struct kvm * kvm)340 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
341 {
342 return kvm->arch.kvm_ops == kvmppc_hv_ops;
343 }
344
345 extern int kvmppc_hwrng_present(void);
346
347 /*
348 * Cuts out inst bits with ordering according to spec.
349 * That means the leftmost bit is zero. All given bits are included.
350 */
kvmppc_get_field(u64 inst,int msb,int lsb)351 static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
352 {
353 u32 r;
354 u32 mask;
355
356 BUG_ON(msb > lsb);
357
358 mask = (1 << (lsb - msb + 1)) - 1;
359 r = (inst >> (63 - lsb)) & mask;
360
361 return r;
362 }
363
364 /*
365 * Replaces inst bits with ordering according to spec.
366 */
kvmppc_set_field(u64 inst,int msb,int lsb,int value)367 static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
368 {
369 u32 r;
370 u32 mask;
371
372 BUG_ON(msb > lsb);
373
374 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
375 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
376
377 return r;
378 }
379
380 #define one_reg_size(id) \
381 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
382
383 #define get_reg_val(id, reg) ({ \
384 union kvmppc_one_reg __u; \
385 switch (one_reg_size(id)) { \
386 case 4: __u.wval = (reg); break; \
387 case 8: __u.dval = (reg); break; \
388 default: BUG(); \
389 } \
390 __u; \
391 })
392
393
394 #define set_reg_val(id, val) ({ \
395 u64 __v; \
396 switch (one_reg_size(id)) { \
397 case 4: __v = (val).wval; break; \
398 case 8: __v = (val).dval; break; \
399 default: BUG(); \
400 } \
401 __v; \
402 })
403
404 int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
405 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
406
407 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
408 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
409
410 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
411 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
412 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
413 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
414
415 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
416
417 struct openpic;
418
419 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
420 extern void kvm_cma_reserve(void) __init;
kvmppc_set_xics_phys(int cpu,unsigned long addr)421 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
422 {
423 paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
424 }
425
kvmppc_set_xive_tima(int cpu,unsigned long phys_addr,void __iomem * virt_addr)426 static inline void kvmppc_set_xive_tima(int cpu,
427 unsigned long phys_addr,
428 void __iomem *virt_addr)
429 {
430 paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
431 paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
432 }
433
kvmppc_get_xics_latch(void)434 static inline u32 kvmppc_get_xics_latch(void)
435 {
436 u32 xirr;
437
438 xirr = get_paca()->kvm_hstate.saved_xirr;
439 get_paca()->kvm_hstate.saved_xirr = 0;
440 return xirr;
441 }
442
443 /*
444 * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to
445 * a CPU thread that's running/napping inside of a guest is by default regarded
446 * as a request to wake the CPU (if needed) and continue execution within the
447 * guest, potentially to process new state like externally-generated
448 * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI).
449 *
450 * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called
451 * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the
452 * target CPU's PACA. To avoid unnecessary exits to the host, this flag should
453 * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on
454 * the receiving side prior to processing the IPI work.
455 *
456 * NOTE:
457 *
458 * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi().
459 * This is to guard against sequences such as the following:
460 *
461 * CPU
462 * X: smp_muxed_ipi_set_message():
463 * X: smp_mb()
464 * X: message[RESCHEDULE] = 1
465 * X: doorbell_global_ipi(42):
466 * X: kvmppc_set_host_ipi(42)
467 * X: ppc_msgsnd_sync()/smp_mb()
468 * X: ppc_msgsnd() -> 42
469 * 42: doorbell_exception(): // from CPU X
470 * 42: ppc_msgsync()
471 * 105: smp_muxed_ipi_set_message():
472 * 105: smb_mb()
473 * // STORE DEFERRED DUE TO RE-ORDERING
474 * --105: message[CALL_FUNCTION] = 1
475 * | 105: doorbell_global_ipi(42):
476 * | 105: kvmppc_set_host_ipi(42)
477 * | 42: kvmppc_clear_host_ipi(42)
478 * | 42: smp_ipi_demux_relaxed()
479 * | 42: // returns to executing guest
480 * | // RE-ORDERED STORE COMPLETES
481 * ->105: message[CALL_FUNCTION] = 1
482 * 105: ppc_msgsnd_sync()/smp_mb()
483 * 105: ppc_msgsnd() -> 42
484 * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
485 * 105: // hangs waiting on 42 to process messages/call_single_queue
486 *
487 * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is
488 * to guard against sequences such as the following (as well as to create
489 * a read-side pairing with the barrier in kvmppc_set_host_ipi()):
490 *
491 * CPU
492 * X: smp_muxed_ipi_set_message():
493 * X: smp_mb()
494 * X: message[RESCHEDULE] = 1
495 * X: doorbell_global_ipi(42):
496 * X: kvmppc_set_host_ipi(42)
497 * X: ppc_msgsnd_sync()/smp_mb()
498 * X: ppc_msgsnd() -> 42
499 * 42: doorbell_exception(): // from CPU X
500 * 42: ppc_msgsync()
501 * // STORE DEFERRED DUE TO RE-ORDERING
502 * -- 42: kvmppc_clear_host_ipi(42)
503 * | 42: smp_ipi_demux_relaxed()
504 * | 105: smp_muxed_ipi_set_message():
505 * | 105: smb_mb()
506 * | 105: message[CALL_FUNCTION] = 1
507 * | 105: doorbell_global_ipi(42):
508 * | 105: kvmppc_set_host_ipi(42)
509 * | // RE-ORDERED STORE COMPLETES
510 * -> 42: kvmppc_clear_host_ipi(42)
511 * 42: // returns to executing guest
512 * 105: ppc_msgsnd_sync()/smp_mb()
513 * 105: ppc_msgsnd() -> 42
514 * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
515 * 105: // hangs waiting on 42 to process messages/call_single_queue
516 */
kvmppc_set_host_ipi(int cpu)517 static inline void kvmppc_set_host_ipi(int cpu)
518 {
519 /*
520 * order stores of IPI messages vs. setting of host_ipi flag
521 *
522 * pairs with the barrier in kvmppc_clear_host_ipi()
523 */
524 smp_mb();
525 paca_ptrs[cpu]->kvm_hstate.host_ipi = 1;
526 }
527
kvmppc_clear_host_ipi(int cpu)528 static inline void kvmppc_clear_host_ipi(int cpu)
529 {
530 paca_ptrs[cpu]->kvm_hstate.host_ipi = 0;
531 /*
532 * order clearing of host_ipi flag vs. processing of IPI messages
533 *
534 * pairs with the barrier in kvmppc_set_host_ipi()
535 */
536 smp_mb();
537 }
538
kvmppc_fast_vcpu_kick(struct kvm_vcpu * vcpu)539 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
540 {
541 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
542 }
543
544 extern void kvm_hv_vm_activated(void);
545 extern void kvm_hv_vm_deactivated(void);
546 extern bool kvm_hv_mode_active(void);
547
548 extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu);
549
550 #else
kvm_cma_reserve(void)551 static inline void __init kvm_cma_reserve(void)
552 {}
553
kvmppc_set_xics_phys(int cpu,unsigned long addr)554 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
555 {}
556
kvmppc_set_xive_tima(int cpu,unsigned long phys_addr,void __iomem * virt_addr)557 static inline void kvmppc_set_xive_tima(int cpu,
558 unsigned long phys_addr,
559 void __iomem *virt_addr)
560 {}
561
kvmppc_get_xics_latch(void)562 static inline u32 kvmppc_get_xics_latch(void)
563 {
564 return 0;
565 }
566
kvmppc_set_host_ipi(int cpu)567 static inline void kvmppc_set_host_ipi(int cpu)
568 {}
569
kvmppc_clear_host_ipi(int cpu)570 static inline void kvmppc_clear_host_ipi(int cpu)
571 {}
572
kvmppc_fast_vcpu_kick(struct kvm_vcpu * vcpu)573 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
574 {
575 kvm_vcpu_kick(vcpu);
576 }
577
kvm_hv_mode_active(void)578 static inline bool kvm_hv_mode_active(void) { return false; }
579
580 #endif
581
582 #ifdef CONFIG_PPC_PSERIES
kvmhv_on_pseries(void)583 static inline bool kvmhv_on_pseries(void)
584 {
585 return !cpu_has_feature(CPU_FTR_HVMODE);
586 }
587 #else
kvmhv_on_pseries(void)588 static inline bool kvmhv_on_pseries(void)
589 {
590 return false;
591 }
592 #endif
593
594 #ifdef CONFIG_KVM_XICS
kvmppc_xics_enabled(struct kvm_vcpu * vcpu)595 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
596 {
597 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
598 }
599
kvmppc_get_passthru_irqmap(struct kvm * kvm)600 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
601 struct kvm *kvm)
602 {
603 if (kvm && kvm_irq_bypass)
604 return kvm->arch.pimap;
605 return NULL;
606 }
607
608 extern void kvmppc_alloc_host_rm_ops(void);
609 extern void kvmppc_free_host_rm_ops(void);
610 extern void kvmppc_free_pimap(struct kvm *kvm);
611 extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
612 extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
613 extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
614 extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req);
615 extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
616 extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
617 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
618 struct kvm_vcpu *vcpu, u32 cpu);
619 extern void kvmppc_xics_ipi_action(void);
620 extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
621 unsigned long host_irq);
622 extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
623 unsigned long host_irq);
624 extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
625 struct kvmppc_irq_map *irq_map,
626 struct kvmppc_passthru_irqmap *pimap,
627 bool *again);
628
629 extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
630 int level, bool line_status);
631
632 extern int h_ipi_redirect;
633 #else
kvmppc_get_passthru_irqmap(struct kvm * kvm)634 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
635 struct kvm *kvm)
636 { return NULL; }
kvmppc_alloc_host_rm_ops(void)637 static inline void kvmppc_alloc_host_rm_ops(void) {}
kvmppc_free_host_rm_ops(void)638 static inline void kvmppc_free_host_rm_ops(void) {}
kvmppc_free_pimap(struct kvm * kvm)639 static inline void kvmppc_free_pimap(struct kvm *kvm) {}
kvmppc_xics_rm_complete(struct kvm_vcpu * vcpu,u32 hcall)640 static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
641 { return 0; }
kvmppc_xics_enabled(struct kvm_vcpu * vcpu)642 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
643 { return 0; }
kvmppc_xics_free_icp(struct kvm_vcpu * vcpu)644 static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
kvmppc_xics_hcall(struct kvm_vcpu * vcpu,u32 cmd)645 static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
646 { return 0; }
kvmppc_xive_xics_hcall(struct kvm_vcpu * vcpu,u32 req)647 static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
648 { return 0; }
649 #endif
650
651 #ifdef CONFIG_KVM_XIVE
652 /*
653 * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
654 * ie. P9 new interrupt controller, while the second "xive" is the legacy
655 * "eXternal Interrupt Vector Entry" which is the configuration of an
656 * interrupt on the "xics" interrupt controller on P8 and earlier. Those
657 * two function consume or produce a legacy "XIVE" state from the
658 * new "XIVE" interrupt controller.
659 */
660 extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
661 u32 priority);
662 extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
663 u32 *priority);
664 extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
665 extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
666
667 extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
668 struct kvm_vcpu *vcpu, u32 cpu);
669 extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
670 extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
671 unsigned long host_irq);
672 extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
673 unsigned long host_irq);
674 extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
675 extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
676
677 extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
678 int level, bool line_status);
679 extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
680 extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
681 extern bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
682
kvmppc_xive_enabled(struct kvm_vcpu * vcpu)683 static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
684 {
685 return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
686 }
687
688 extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
689 struct kvm_vcpu *vcpu, u32 cpu);
690 extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
691 extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
692 union kvmppc_one_reg *val);
693 extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
694 union kvmppc_one_reg *val);
695 extern bool kvmppc_xive_native_supported(void);
696
697 #else
kvmppc_xive_set_xive(struct kvm * kvm,u32 irq,u32 server,u32 priority)698 static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
699 u32 priority) { return -1; }
kvmppc_xive_get_xive(struct kvm * kvm,u32 irq,u32 * server,u32 * priority)700 static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
701 u32 *priority) { return -1; }
kvmppc_xive_int_on(struct kvm * kvm,u32 irq)702 static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
kvmppc_xive_int_off(struct kvm * kvm,u32 irq)703 static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
704
kvmppc_xive_connect_vcpu(struct kvm_device * dev,struct kvm_vcpu * vcpu,u32 cpu)705 static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
706 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
kvmppc_xive_cleanup_vcpu(struct kvm_vcpu * vcpu)707 static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
kvmppc_xive_set_mapped(struct kvm * kvm,unsigned long guest_irq,struct irq_desc * host_desc)708 static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
709 struct irq_desc *host_desc) { return -ENODEV; }
kvmppc_xive_clr_mapped(struct kvm * kvm,unsigned long guest_irq,struct irq_desc * host_desc)710 static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
711 struct irq_desc *host_desc) { return -ENODEV; }
kvmppc_xive_get_icp(struct kvm_vcpu * vcpu)712 static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
kvmppc_xive_set_icp(struct kvm_vcpu * vcpu,u64 icpval)713 static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
714
kvmppc_xive_set_irq(struct kvm * kvm,int irq_source_id,u32 irq,int level,bool line_status)715 static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
716 int level, bool line_status) { return -ENODEV; }
kvmppc_xive_push_vcpu(struct kvm_vcpu * vcpu)717 static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
kvmppc_xive_pull_vcpu(struct kvm_vcpu * vcpu)718 static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
kvmppc_xive_rearm_escalation(struct kvm_vcpu * vcpu)719 static inline bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { return true; }
720
kvmppc_xive_enabled(struct kvm_vcpu * vcpu)721 static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
722 { return 0; }
kvmppc_xive_native_connect_vcpu(struct kvm_device * dev,struct kvm_vcpu * vcpu,u32 cpu)723 static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
724 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu * vcpu)725 static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
kvmppc_xive_native_get_vp(struct kvm_vcpu * vcpu,union kvmppc_one_reg * val)726 static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
727 union kvmppc_one_reg *val)
728 { return 0; }
kvmppc_xive_native_set_vp(struct kvm_vcpu * vcpu,union kvmppc_one_reg * val)729 static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
730 union kvmppc_one_reg *val)
731 { return -ENOENT; }
732
733 #endif /* CONFIG_KVM_XIVE */
734
735 #if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
xics_on_xive(void)736 static inline bool xics_on_xive(void)
737 {
738 return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
739 }
740 #else
xics_on_xive(void)741 static inline bool xics_on_xive(void)
742 {
743 return false;
744 }
745 #endif
746
747 /*
748 * Prototypes for functions called only from assembler code.
749 * Having prototypes reduces sparse errors.
750 */
751 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
752 unsigned long ioba, unsigned long tce);
753 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
754 unsigned long liobn, unsigned long ioba,
755 unsigned long tce_list, unsigned long npages);
756 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
757 unsigned long liobn, unsigned long ioba,
758 unsigned long tce_value, unsigned long npages);
759 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
760 unsigned int yield_count);
761 long kvmppc_rm_h_random(struct kvm_vcpu *vcpu);
762 void kvmhv_commence_exit(int trap);
763 void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
764 void kvmppc_subcore_enter_guest(void);
765 void kvmppc_subcore_exit_guest(void);
766 long kvmppc_realmode_hmi_handler(void);
767 long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu);
768 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
769 long pte_index, unsigned long pteh, unsigned long ptel);
770 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
771 unsigned long pte_index, unsigned long avpn);
772 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
773 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
774 unsigned long pte_index, unsigned long avpn);
775 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
776 unsigned long pte_index);
777 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
778 unsigned long pte_index);
779 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
780 unsigned long pte_index);
781 long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
782 unsigned long dest, unsigned long src);
783 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
784 unsigned long slb_v, unsigned int status, bool data);
785 void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
786
787 /*
788 * Host-side operations we want to set up while running in real
789 * mode in the guest operating on the xics.
790 * Currently only VCPU wakeup is supported.
791 */
792
793 union kvmppc_rm_state {
794 unsigned long raw;
795 struct {
796 u32 in_host;
797 u32 rm_action;
798 };
799 };
800
801 struct kvmppc_host_rm_core {
802 union kvmppc_rm_state rm_state;
803 void *rm_data;
804 char pad[112];
805 };
806
807 struct kvmppc_host_rm_ops {
808 struct kvmppc_host_rm_core *rm_core;
809 void (*vcpu_kick)(struct kvm_vcpu *vcpu);
810 };
811
812 extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
813
kvmppc_get_epr(struct kvm_vcpu * vcpu)814 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
815 {
816 #ifdef CONFIG_KVM_BOOKE_HV
817 return mfspr(SPRN_GEPR);
818 #elif defined(CONFIG_BOOKE)
819 return vcpu->arch.epr;
820 #else
821 return 0;
822 #endif
823 }
824
kvmppc_set_epr(struct kvm_vcpu * vcpu,u32 epr)825 static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
826 {
827 #ifdef CONFIG_KVM_BOOKE_HV
828 mtspr(SPRN_GEPR, epr);
829 #elif defined(CONFIG_BOOKE)
830 vcpu->arch.epr = epr;
831 #endif
832 }
833
834 #ifdef CONFIG_KVM_MPIC
835
836 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
837 int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
838 u32 cpu);
839 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
840
841 #else
842
kvmppc_mpic_set_epr(struct kvm_vcpu * vcpu)843 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
844 {
845 }
846
kvmppc_mpic_connect_vcpu(struct kvm_device * dev,struct kvm_vcpu * vcpu,u32 cpu)847 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
848 struct kvm_vcpu *vcpu, u32 cpu)
849 {
850 return -EINVAL;
851 }
852
kvmppc_mpic_disconnect_vcpu(struct openpic * opp,struct kvm_vcpu * vcpu)853 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
854 struct kvm_vcpu *vcpu)
855 {
856 }
857
858 #endif /* CONFIG_KVM_MPIC */
859
860 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
861 struct kvm_config_tlb *cfg);
862 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
863 struct kvm_dirty_tlb *cfg);
864
865 long kvmppc_alloc_lpid(void);
866 void kvmppc_free_lpid(long lpid);
867 void kvmppc_init_lpid(unsigned long nr_lpids);
868
kvmppc_mmu_flush_icache(kvm_pfn_t pfn)869 static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
870 {
871 struct page *page;
872 /*
873 * We can only access pages that the kernel maps
874 * as memory. Bail out for unmapped ones.
875 */
876 if (!pfn_valid(pfn))
877 return;
878
879 /* Clear i-cache for new pages */
880 page = pfn_to_page(pfn);
881 if (!test_bit(PG_dcache_clean, &page->flags)) {
882 flush_dcache_icache_page(page);
883 set_bit(PG_dcache_clean, &page->flags);
884 }
885 }
886
887 /*
888 * Shared struct helpers. The shared struct can be little or big endian,
889 * depending on the guest endianness. So expose helpers to all of them.
890 */
kvmppc_shared_big_endian(struct kvm_vcpu * vcpu)891 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
892 {
893 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
894 /* Only Book3S_64 PR supports bi-endian for now */
895 return vcpu->arch.shared_big_endian;
896 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
897 /* Book3s_64 HV on little endian is always little endian */
898 return false;
899 #else
900 return true;
901 #endif
902 }
903
904 #define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
905 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
906 { \
907 return mfspr(bookehv_spr); \
908 } \
909
910 #define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
911 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
912 { \
913 mtspr(bookehv_spr, val); \
914 } \
915
916 #define SHARED_WRAPPER_GET(reg, size) \
917 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
918 { \
919 if (kvmppc_shared_big_endian(vcpu)) \
920 return be##size##_to_cpu(vcpu->arch.shared->reg); \
921 else \
922 return le##size##_to_cpu(vcpu->arch.shared->reg); \
923 } \
924
925 #define SHARED_WRAPPER_SET(reg, size) \
926 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
927 { \
928 if (kvmppc_shared_big_endian(vcpu)) \
929 vcpu->arch.shared->reg = cpu_to_be##size(val); \
930 else \
931 vcpu->arch.shared->reg = cpu_to_le##size(val); \
932 } \
933
934 #define SHARED_WRAPPER(reg, size) \
935 SHARED_WRAPPER_GET(reg, size) \
936 SHARED_WRAPPER_SET(reg, size) \
937
938 #define SPRNG_WRAPPER(reg, bookehv_spr) \
939 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
940 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
941
942 #ifdef CONFIG_KVM_BOOKE_HV
943
944 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
945 SPRNG_WRAPPER(reg, bookehv_spr) \
946
947 #else
948
949 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
950 SHARED_WRAPPER(reg, size) \
951
952 #endif
953
954 SHARED_WRAPPER(critical, 64)
955 SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
956 SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
957 SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
958 SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
959 SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
960 SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
961 SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
962 SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
963 SHARED_WRAPPER_GET(msr, 64)
kvmppc_set_msr_fast(struct kvm_vcpu * vcpu,u64 val)964 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
965 {
966 if (kvmppc_shared_big_endian(vcpu))
967 vcpu->arch.shared->msr = cpu_to_be64(val);
968 else
969 vcpu->arch.shared->msr = cpu_to_le64(val);
970 }
971 SHARED_WRAPPER(dsisr, 32)
972 SHARED_WRAPPER(int_pending, 32)
973 SHARED_WRAPPER(sprg4, 64)
974 SHARED_WRAPPER(sprg5, 64)
975 SHARED_WRAPPER(sprg6, 64)
976 SHARED_WRAPPER(sprg7, 64)
977
kvmppc_get_sr(struct kvm_vcpu * vcpu,int nr)978 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
979 {
980 if (kvmppc_shared_big_endian(vcpu))
981 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
982 else
983 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
984 }
985
kvmppc_set_sr(struct kvm_vcpu * vcpu,int nr,u32 val)986 static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
987 {
988 if (kvmppc_shared_big_endian(vcpu))
989 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
990 else
991 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
992 }
993
994 /*
995 * Please call after prepare_to_enter. This function puts the lazy ee and irq
996 * disabled tracking state back to normal mode, without actually enabling
997 * interrupts.
998 */
kvmppc_fix_ee_before_entry(void)999 static inline void kvmppc_fix_ee_before_entry(void)
1000 {
1001 trace_hardirqs_on();
1002
1003 #ifdef CONFIG_PPC64
1004 /*
1005 * To avoid races, the caller must have gone directly from having
1006 * interrupts fully-enabled to hard-disabled.
1007 */
1008 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
1009
1010 /* Only need to enable IRQs by hard enabling them after this */
1011 local_paca->irq_happened = 0;
1012 irq_soft_mask_set(IRQS_ENABLED);
1013 #endif
1014 }
1015
kvmppc_fix_ee_after_exit(void)1016 static inline void kvmppc_fix_ee_after_exit(void)
1017 {
1018 #ifdef CONFIG_PPC64
1019 /* Only need to enable IRQs by hard enabling them after this */
1020 local_paca->irq_happened = PACA_IRQ_HARD_DIS;
1021 irq_soft_mask_set(IRQS_ALL_DISABLED);
1022 #endif
1023
1024 trace_hardirqs_off();
1025 }
1026
1027
kvmppc_get_ea_indexed(struct kvm_vcpu * vcpu,int ra,int rb)1028 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
1029 {
1030 ulong ea;
1031 ulong msr_64bit = 0;
1032
1033 ea = kvmppc_get_gpr(vcpu, rb);
1034 if (ra)
1035 ea += kvmppc_get_gpr(vcpu, ra);
1036
1037 #if defined(CONFIG_PPC_BOOK3E_64)
1038 msr_64bit = MSR_CM;
1039 #elif defined(CONFIG_PPC_BOOK3S_64)
1040 msr_64bit = MSR_SF;
1041 #endif
1042
1043 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
1044 ea = (uint32_t)ea;
1045
1046 return ea;
1047 }
1048
1049 extern void xics_wake_cpu(int cpu);
1050
1051 #endif /* __POWERPC_KVM_PPC_H__ */
1052