1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Anup Patel <anup.patel@wdc.com>
7 */
8
9 #include <linux/bitops.h>
10 #include <linux/cpumask.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/module.h>
14 #include <linux/smp.h>
15 #include <linux/kvm_host.h>
16 #include <asm/csr.h>
17
18 static unsigned long vmid_version = 1;
19 static unsigned long vmid_next;
20 static unsigned long vmid_bits __ro_after_init;
21 static DEFINE_SPINLOCK(vmid_lock);
22
kvm_riscv_gstage_vmid_detect(void)23 void __init kvm_riscv_gstage_vmid_detect(void)
24 {
25 unsigned long old;
26
27 /* Figure-out number of VMID bits in HW */
28 old = csr_read(CSR_HGATP);
29 csr_write(CSR_HGATP, old | HGATP_VMID_MASK);
30 vmid_bits = csr_read(CSR_HGATP);
31 vmid_bits = (vmid_bits & HGATP_VMID_MASK) >> HGATP_VMID_SHIFT;
32 vmid_bits = fls_long(vmid_bits);
33 csr_write(CSR_HGATP, old);
34
35 /* We polluted local TLB so flush all guest TLB */
36 kvm_riscv_local_hfence_gvma_all();
37
38 /* We don't use VMID bits if they are not sufficient */
39 if ((1UL << vmid_bits) < num_possible_cpus())
40 vmid_bits = 0;
41 }
42
kvm_riscv_gstage_vmid_bits(void)43 unsigned long kvm_riscv_gstage_vmid_bits(void)
44 {
45 return vmid_bits;
46 }
47
kvm_riscv_gstage_vmid_init(struct kvm * kvm)48 int kvm_riscv_gstage_vmid_init(struct kvm *kvm)
49 {
50 /* Mark the initial VMID and VMID version invalid */
51 kvm->arch.vmid.vmid_version = 0;
52 kvm->arch.vmid.vmid = 0;
53
54 return 0;
55 }
56
kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid * vmid)57 bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid)
58 {
59 if (!vmid_bits)
60 return false;
61
62 return unlikely(READ_ONCE(vmid->vmid_version) !=
63 READ_ONCE(vmid_version));
64 }
65
__local_hfence_gvma_all(void * info)66 static void __local_hfence_gvma_all(void *info)
67 {
68 kvm_riscv_local_hfence_gvma_all();
69 }
70
kvm_riscv_gstage_vmid_update(struct kvm_vcpu * vcpu)71 void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu)
72 {
73 unsigned long i;
74 struct kvm_vcpu *v;
75 struct kvm_vmid *vmid = &vcpu->kvm->arch.vmid;
76
77 if (!kvm_riscv_gstage_vmid_ver_changed(vmid))
78 return;
79
80 spin_lock(&vmid_lock);
81
82 /*
83 * We need to re-check the vmid_version here to ensure that if
84 * another vcpu already allocated a valid vmid for this vm.
85 */
86 if (!kvm_riscv_gstage_vmid_ver_changed(vmid)) {
87 spin_unlock(&vmid_lock);
88 return;
89 }
90
91 /* First user of a new VMID version? */
92 if (unlikely(vmid_next == 0)) {
93 WRITE_ONCE(vmid_version, READ_ONCE(vmid_version) + 1);
94 vmid_next = 1;
95
96 /*
97 * We ran out of VMIDs so we increment vmid_version and
98 * start assigning VMIDs from 1.
99 *
100 * This also means existing VMIDs assignment to all Guest
101 * instances is invalid and we have force VMID re-assignement
102 * for all Guest instances. The Guest instances that were not
103 * running will automatically pick-up new VMIDs because will
104 * call kvm_riscv_gstage_vmid_update() whenever they enter
105 * in-kernel run loop. For Guest instances that are already
106 * running, we force VM exits on all host CPUs using IPI and
107 * flush all Guest TLBs.
108 */
109 on_each_cpu_mask(cpu_online_mask, __local_hfence_gvma_all,
110 NULL, 1);
111 }
112
113 vmid->vmid = vmid_next;
114 vmid_next++;
115 vmid_next &= (1 << vmid_bits) - 1;
116
117 WRITE_ONCE(vmid->vmid_version, READ_ONCE(vmid_version));
118
119 spin_unlock(&vmid_lock);
120
121 /* Request G-stage page table update for all VCPUs */
122 kvm_for_each_vcpu(i, v, vcpu->kvm)
123 kvm_make_request(KVM_REQ_UPDATE_HGATP, v);
124 }
125