1 /*
2 * Nested HVM
3 * Copyright (c) 2011, Advanced Micro Devices, Inc.
4 * Author: Christoph Egger <Christoph.Egger@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include <asm/msr.h>
20 #include <asm/hvm/support.h>
21 #include <asm/hvm/hvm.h>
22 #include <asm/p2m.h> /* for struct p2m_domain */
23 #include <asm/hvm/nestedhvm.h>
24 #include <asm/event.h> /* for local_event_delivery_(en|dis)able */
25 #include <asm/paging.h> /* for paging_mode_hap() */
26
27 static unsigned long *shadow_io_bitmap[3];
28
29 /* Nested HVM on/off per domain */
nestedhvm_enabled(const struct domain * d)30 bool nestedhvm_enabled(const struct domain *d)
31 {
32 return is_hvm_domain(d) && d->arch.hvm_domain.params &&
33 d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM];
34 }
35
36 /* Nested VCPU */
37 bool_t
nestedhvm_vcpu_in_guestmode(struct vcpu * v)38 nestedhvm_vcpu_in_guestmode(struct vcpu *v)
39 {
40 return vcpu_nestedhvm(v).nv_guestmode;
41 }
42
43 void
nestedhvm_vcpu_reset(struct vcpu * v)44 nestedhvm_vcpu_reset(struct vcpu *v)
45 {
46 struct nestedvcpu *nv = &vcpu_nestedhvm(v);
47
48 nv->nv_vmentry_pending = 0;
49 nv->nv_vmexit_pending = 0;
50 nv->nv_vmswitch_in_progress = 0;
51 nv->nv_ioport80 = 0;
52 nv->nv_ioportED = 0;
53
54 hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
55 nv->nv_vvmcx = NULL;
56 nv->nv_vvmcxaddr = INVALID_PADDR;
57 nv->nv_flushp2m = 0;
58 nv->nv_p2m = NULL;
59 nv->stale_np2m = false;
60 nv->np2m_generation = 0;
61
62 hvm_asid_flush_vcpu_asid(&nv->nv_n2asid);
63
64 if ( hvm_funcs.nhvm_vcpu_reset )
65 hvm_funcs.nhvm_vcpu_reset(v);
66
67 /* vcpu is in host mode */
68 nestedhvm_vcpu_exit_guestmode(v);
69 }
70
71 int
nestedhvm_vcpu_initialise(struct vcpu * v)72 nestedhvm_vcpu_initialise(struct vcpu *v)
73 {
74 int rc = -EOPNOTSUPP;
75
76 if ( !shadow_io_bitmap[0] )
77 return -ENOMEM;
78
79 if ( !hvm_funcs.nhvm_vcpu_initialise ||
80 ((rc = hvm_funcs.nhvm_vcpu_initialise(v)) != 0) )
81 return rc;
82
83 nestedhvm_vcpu_reset(v);
84 return 0;
85 }
86
87 void
nestedhvm_vcpu_destroy(struct vcpu * v)88 nestedhvm_vcpu_destroy(struct vcpu *v)
89 {
90 if ( hvm_funcs.nhvm_vcpu_destroy )
91 hvm_funcs.nhvm_vcpu_destroy(v);
92 }
93
94 static void
nestedhvm_flushtlb_ipi(void * info)95 nestedhvm_flushtlb_ipi(void *info)
96 {
97 struct vcpu *v = current;
98 struct domain *d = info;
99
100 ASSERT(d != NULL);
101 if (v->domain != d) {
102 /* This cpu doesn't belong to the domain */
103 return;
104 }
105
106 /* Just flush the ASID (or request a new one).
107 * This is cheaper than flush_tlb_local() and has
108 * the same desired effect.
109 */
110 hvm_asid_flush_core();
111 vcpu_nestedhvm(v).nv_p2m = NULL;
112 vcpu_nestedhvm(v).stale_np2m = true;
113 }
114
115 void
nestedhvm_vmcx_flushtlb(struct p2m_domain * p2m)116 nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m)
117 {
118 on_selected_cpus(p2m->dirty_cpumask, nestedhvm_flushtlb_ipi,
119 p2m->domain, 1);
120 cpumask_clear(p2m->dirty_cpumask);
121 }
122
123 bool_t
nestedhvm_is_n2(struct vcpu * v)124 nestedhvm_is_n2(struct vcpu *v)
125 {
126 if (!nestedhvm_enabled(v->domain)
127 || nestedhvm_vmswitch_in_progress(v)
128 || !nestedhvm_paging_mode_hap(v))
129 return 0;
130
131 if (nestedhvm_vcpu_in_guestmode(v))
132 return 1;
133
134 return 0;
135 }
136
137 /* Common shadow IO Permission bitmap */
138
139 /* There four global patterns of io bitmap each guest can
140 * choose depending on interception of io port 0x80 and/or
141 * 0xED (shown in table below).
142 * The users of the bitmap patterns are in SVM/VMX specific code.
143 *
144 * bitmap port 0x80 port 0xed
145 * hvm_io_bitmap cleared cleared
146 * iomap[0] cleared set
147 * iomap[1] set cleared
148 * iomap[2] set set
149 */
150
151 static int __init
nestedhvm_setup(void)152 nestedhvm_setup(void)
153 {
154 /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */
155 unsigned nr = cpu_has_vmx ? 2 : 3;
156 unsigned int i, order = get_order_from_pages(nr);
157
158 if ( !hvm_funcs.name )
159 return 0;
160
161 /* shadow_io_bitmaps can't be declared static because
162 * they must fulfill hw requirements (page aligned section)
163 * and doing so triggers the ASSERT(va >= XEN_VIRT_START)
164 * in __virt_to_maddr()
165 *
166 * So as a compromise pre-allocate them when xen boots.
167 * This function must be called from within start_xen() when
168 * it is valid to use _xmalloc()
169 */
170
171 for ( i = 0; i < ARRAY_SIZE(shadow_io_bitmap); i++ )
172 {
173 shadow_io_bitmap[i] = alloc_xenheap_pages(order, 0);
174 if ( !shadow_io_bitmap[i] )
175 {
176 while ( i-- )
177 {
178 free_xenheap_pages(shadow_io_bitmap[i], order);
179 shadow_io_bitmap[i] = NULL;
180 }
181 return -ENOMEM;
182 }
183 memset(shadow_io_bitmap[i], ~0U, nr << PAGE_SHIFT);
184 }
185
186 __clear_bit(0x80, shadow_io_bitmap[0]);
187 __clear_bit(0xed, shadow_io_bitmap[1]);
188
189 return 0;
190 }
191 __initcall(nestedhvm_setup);
192
193 unsigned long *
nestedhvm_vcpu_iomap_get(bool_t port_80,bool_t port_ed)194 nestedhvm_vcpu_iomap_get(bool_t port_80, bool_t port_ed)
195 {
196 int i;
197
198 if (!hvm_port80_allowed)
199 port_80 = 1;
200
201 if (port_80 == 0) {
202 if (port_ed == 0)
203 return hvm_io_bitmap;
204 i = 0;
205 } else {
206 if (port_ed == 0)
207 i = 1;
208 else
209 i = 2;
210 }
211
212 return shadow_io_bitmap[i];
213 }
214