1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * KVM L1 hypervisor optimizations on Hyper-V.
4 */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6
7 #include <linux/kvm_host.h>
8 #include <asm/mshyperv.h>
9
10 #include "hyperv.h"
11 #include "kvm_onhyperv.h"
12
kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list * flush,void * data)13 static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
14 void *data)
15 {
16 struct kvm_tlb_range *range = data;
17
18 return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
19 range->pages);
20 }
21
hv_remote_flush_root_tdp(hpa_t root_tdp,struct kvm_tlb_range * range)22 static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
23 struct kvm_tlb_range *range)
24 {
25 if (range)
26 return hyperv_flush_guest_mapping_range(root_tdp,
27 kvm_fill_hv_flush_list_func, (void *)range);
28 else
29 return hyperv_flush_guest_mapping(root_tdp);
30 }
31
hv_remote_flush_tlb_with_range(struct kvm * kvm,struct kvm_tlb_range * range)32 int hv_remote_flush_tlb_with_range(struct kvm *kvm,
33 struct kvm_tlb_range *range)
34 {
35 struct kvm_arch *kvm_arch = &kvm->arch;
36 struct kvm_vcpu *vcpu;
37 int ret = 0, nr_unique_valid_roots;
38 unsigned long i;
39 hpa_t root;
40
41 spin_lock(&kvm_arch->hv_root_tdp_lock);
42
43 if (!VALID_PAGE(kvm_arch->hv_root_tdp)) {
44 nr_unique_valid_roots = 0;
45
46 /*
47 * Flush all valid roots, and see if all vCPUs have converged
48 * on a common root, in which case future flushes can skip the
49 * loop and flush the common root.
50 */
51 kvm_for_each_vcpu(i, vcpu, kvm) {
52 root = vcpu->arch.hv_root_tdp;
53 if (!VALID_PAGE(root) || root == kvm_arch->hv_root_tdp)
54 continue;
55
56 /*
57 * Set the tracked root to the first valid root. Keep
58 * this root for the entirety of the loop even if more
59 * roots are encountered as a low effort optimization
60 * to avoid flushing the same (first) root again.
61 */
62 if (++nr_unique_valid_roots == 1)
63 kvm_arch->hv_root_tdp = root;
64
65 if (!ret)
66 ret = hv_remote_flush_root_tdp(root, range);
67
68 /*
69 * Stop processing roots if a failure occurred and
70 * multiple valid roots have already been detected.
71 */
72 if (ret && nr_unique_valid_roots > 1)
73 break;
74 }
75
76 /*
77 * The optimized flush of a single root can't be used if there
78 * are multiple valid roots (obviously).
79 */
80 if (nr_unique_valid_roots > 1)
81 kvm_arch->hv_root_tdp = INVALID_PAGE;
82 } else {
83 ret = hv_remote_flush_root_tdp(kvm_arch->hv_root_tdp, range);
84 }
85
86 spin_unlock(&kvm_arch->hv_root_tdp_lock);
87 return ret;
88 }
89 EXPORT_SYMBOL_GPL(hv_remote_flush_tlb_with_range);
90
hv_remote_flush_tlb(struct kvm * kvm)91 int hv_remote_flush_tlb(struct kvm *kvm)
92 {
93 return hv_remote_flush_tlb_with_range(kvm, NULL);
94 }
95 EXPORT_SYMBOL_GPL(hv_remote_flush_tlb);
96
hv_track_root_tdp(struct kvm_vcpu * vcpu,hpa_t root_tdp)97 void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
98 {
99 struct kvm_arch *kvm_arch = &vcpu->kvm->arch;
100
101 if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) {
102 spin_lock(&kvm_arch->hv_root_tdp_lock);
103 vcpu->arch.hv_root_tdp = root_tdp;
104 if (root_tdp != kvm_arch->hv_root_tdp)
105 kvm_arch->hv_root_tdp = INVALID_PAGE;
106 spin_unlock(&kvm_arch->hv_root_tdp_lock);
107 }
108 }
109 EXPORT_SYMBOL_GPL(hv_track_root_tdp);
110