1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /******************************************************************************
3 * arch/x86/mm/p2m-basic.c
4 *
5 * Basic P2M management largely applicable to all domain types.
6 *
7 * Parts of this code are Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
8 * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices.
9 * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
10 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
11 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
12 */
13
14 #include <xen/event.h>
15 #include <xen/types.h>
16 #include <asm/altp2m.h>
17 #include <asm/hvm/nestedhvm.h>
18 #include <asm/p2m.h>
19 #include "mm-locks.h"
20 #include "p2m.h"
21
22 /* Init the datastructures for later use by the p2m code */
p2m_initialise(struct domain * d,struct p2m_domain * p2m)23 static int p2m_initialise(struct domain *d, struct p2m_domain *p2m)
24 {
25 int ret = 0;
26
27 #ifdef CONFIG_HVM
28 mm_rwlock_init(&p2m->lock);
29 INIT_PAGE_LIST_HEAD(&p2m->pages);
30 spin_lock_init(&p2m->ioreq.lock);
31 #endif
32
33 p2m->domain = d;
34 p2m->default_access = p2m_access_rwx;
35 p2m->p2m_class = p2m_host;
36
37 if ( !is_hvm_domain(d) )
38 return 0;
39
40 p2m_pod_init(p2m);
41 p2m_nestedp2m_init(p2m);
42
43 if ( hap_enabled(d) && cpu_has_vmx )
44 ret = ept_p2m_init(p2m);
45 else
46 p2m_pt_init(p2m);
47
48 return ret;
49 }
50
p2m_init_one(struct domain * d)51 struct p2m_domain *p2m_init_one(struct domain *d)
52 {
53 struct p2m_domain *p2m = xzalloc(struct p2m_domain);
54
55 if ( !p2m )
56 return NULL;
57
58 if ( !zalloc_cpumask_var(&p2m->dirty_cpumask) )
59 goto free_p2m;
60
61 if ( p2m_initialise(d, p2m) )
62 goto free_cpumask;
63 return p2m;
64
65 free_cpumask:
66 free_cpumask_var(p2m->dirty_cpumask);
67 free_p2m:
68 xfree(p2m);
69 return NULL;
70 }
71
p2m_free_one(struct p2m_domain * p2m)72 void p2m_free_one(struct p2m_domain *p2m)
73 {
74 p2m_free_logdirty(p2m);
75 if ( hap_enabled(p2m->domain) && cpu_has_vmx )
76 ept_p2m_uninit(p2m);
77 free_cpumask_var(p2m->dirty_cpumask);
78 xfree(p2m);
79 }
80
p2m_init_hostp2m(struct domain * d)81 static int p2m_init_hostp2m(struct domain *d)
82 {
83 struct p2m_domain *p2m = p2m_init_one(d);
84 int rc;
85
86 if ( !p2m )
87 return -ENOMEM;
88
89 rc = p2m_init_logdirty(p2m);
90
91 if ( !rc )
92 d->arch.p2m = p2m;
93 else
94 p2m_free_one(p2m);
95
96 return rc;
97 }
98
p2m_teardown_hostp2m(struct domain * d)99 static void p2m_teardown_hostp2m(struct domain *d)
100 {
101 /* Iterate over all p2m tables per domain */
102 struct p2m_domain *p2m = p2m_get_hostp2m(d);
103
104 if ( p2m )
105 {
106 p2m_free_one(p2m);
107 d->arch.p2m = NULL;
108 }
109 }
110
p2m_init(struct domain * d)111 int p2m_init(struct domain *d)
112 {
113 int rc;
114
115 rc = p2m_init_hostp2m(d);
116 if ( rc || !is_hvm_domain(d) )
117 return rc;
118
119 /*
120 * Must initialise nestedp2m unconditionally
121 * since nestedhvm_enabled(d) returns false here.
122 * (p2m_init runs too early for HVM_PARAM_* options)
123 */
124 rc = p2m_init_nestedp2m(d);
125 if ( rc )
126 {
127 p2m_teardown_hostp2m(d);
128 return rc;
129 }
130
131 rc = p2m_init_altp2m(d);
132 if ( rc )
133 {
134 p2m_teardown_hostp2m(d);
135 p2m_teardown_nestedp2m(d);
136 }
137
138 return rc;
139 }
140
141 /*
142 * Return all the p2m pages to Xen.
143 * We know we don't have any extra mappings to these pages.
144 *
145 * hvm fixme: when adding support for pvh non-hardware domains, this path must
146 * cleanup any foreign p2m types (release refcnts on them).
147 */
p2m_teardown(struct p2m_domain * p2m,bool remove_root,bool * preempted)148 void p2m_teardown(struct p2m_domain *p2m, bool remove_root, bool *preempted)
149 {
150 #ifdef CONFIG_HVM
151 struct page_info *pg, *root_pg = NULL;
152 struct domain *d;
153 unsigned int i = 0;
154
155 if ( !p2m )
156 return;
157
158 d = p2m->domain;
159
160 p2m_lock(p2m);
161
162 #ifdef CONFIG_MEM_SHARING
163 ASSERT(atomic_read(&d->shr_pages) == 0);
164 #endif
165
166 if ( remove_root )
167 p2m->phys_table = pagetable_null();
168 else if ( !pagetable_is_null(p2m->phys_table) )
169 {
170 root_pg = pagetable_get_page(p2m->phys_table);
171 clear_domain_page(pagetable_get_mfn(p2m->phys_table));
172 }
173
174 while ( (pg = page_list_remove_head(&p2m->pages)) )
175 {
176 if ( pg == root_pg )
177 continue;
178
179 d->arch.paging.free_page(d, pg);
180
181 /* Arbitrarily check preemption every 1024 iterations */
182 if ( preempted && !(++i % 1024) && general_preempt_check() )
183 {
184 *preempted = true;
185 break;
186 }
187 }
188
189 if ( root_pg )
190 page_list_add(root_pg, &p2m->pages);
191
192 p2m_unlock(p2m);
193 #endif
194 }
195
p2m_final_teardown(struct domain * d)196 void p2m_final_teardown(struct domain *d)
197 {
198 if ( is_hvm_domain(d) )
199 {
200 /*
201 * We must tear down both of them unconditionally because
202 * we initialise them unconditionally.
203 */
204 p2m_teardown_altp2m(d);
205 p2m_teardown_nestedp2m(d);
206 }
207
208 /* Iterate over all p2m tables per domain */
209 p2m_teardown_hostp2m(d);
210 }
211
arch_acquire_resource_check(const struct domain * d)212 bool arch_acquire_resource_check(const struct domain *d)
213 {
214 /*
215 * altp2m is not supported as we would otherwise also need to walk the
216 * altp2m tables and drop any foreign map entries in order to drop the page
217 * reference.
218 *
219 * The same applies to nestedhvm nested p2m tables, as the type from the L0
220 * p2m is replicated into the L1 p2m, and there's no filtering that
221 * prevents foreign mappings from being created in nestedp2m.
222 */
223 return is_pv_domain(d) ||
224 (d->arch.hvm.params[HVM_PARAM_ALTP2M] == XEN_ALTP2M_disabled &&
225 !nestedhvm_enabled(d));
226 }
227
228 /*
229 * Local variables:
230 * mode: C
231 * c-file-style: "BSD"
232 * c-basic-offset: 4
233 * indent-tabs-mode: nil
234 * End:
235 */
236