1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Alternate p2m HVM
4  * Copyright (c) 2014, Intel Corporation.
5  */
6 
7 #include <asm/hvm/hvm.h>
8 #include <asm/p2m.h>
9 #include <asm/altp2m.h>
10 #include <public/hvm/hvm_op.h>
11 #include <xen/event.h>
12 #include "mm-locks.h"
13 #include "p2m.h"
14 
15 void
altp2m_vcpu_initialise(struct vcpu * v)16 altp2m_vcpu_initialise(struct vcpu *v)
17 {
18     if ( v != current )
19         vcpu_pause(v);
20 
21     vcpu_altp2m(v).p2midx = 0;
22     atomic_inc(&p2m_get_altp2m(v)->active_vcpus);
23 
24     altp2m_vcpu_update_p2m(v);
25 
26     if ( v != current )
27         vcpu_unpause(v);
28 }
29 
30 void
altp2m_vcpu_destroy(struct vcpu * v)31 altp2m_vcpu_destroy(struct vcpu *v)
32 {
33     struct p2m_domain *p2m;
34 
35     if ( v != current )
36         vcpu_pause(v);
37 
38     if ( (p2m = p2m_get_altp2m(v)) )
39         atomic_dec(&p2m->active_vcpus);
40 
41     altp2m_vcpu_disable_ve(v);
42 
43     vcpu_altp2m(v).p2midx = INVALID_ALTP2M;
44     altp2m_vcpu_update_p2m(v);
45 
46     if ( v != current )
47         vcpu_unpause(v);
48 }
49 
altp2m_vcpu_enable_ve(struct vcpu * v,gfn_t gfn)50 int altp2m_vcpu_enable_ve(struct vcpu *v, gfn_t gfn)
51 {
52     struct domain *d = v->domain;
53     struct altp2mvcpu *a = &vcpu_altp2m(v);
54     p2m_type_t p2mt;
55     struct page_info *pg;
56     int rc;
57 
58     /* Early exit path if #VE is already configured. */
59     if ( a->veinfo_pg )
60         return -EEXIST;
61 
62     rc = check_get_page_from_gfn(d, gfn, false, &p2mt, &pg);
63     if ( rc )
64         return rc;
65 
66     /*
67      * Looking for a plain piece of guest writeable RAM with isn't a magic
68      * frame such as a grant/ioreq/shared_info/etc mapping.  We (ab)use the
69      * pageable() predicate for this, due to it having the same properties
70      * that we want.
71      */
72     if ( !p2m_is_pageable(p2mt) || is_special_page(pg) )
73     {
74         rc = -EINVAL;
75         goto err;
76     }
77 
78     /*
79      * Update veinfo_pg, making sure to be safe with concurrent hypercalls.
80      * The first caller to make veinfo_pg become non-NULL will program its MFN
81      * into the VMCS, so must not be clobbered.  Callers which lose the race
82      * back off with -EEXIST.
83      */
84     if ( cmpxchg(&a->veinfo_pg, NULL, pg) != NULL )
85     {
86         rc = -EEXIST;
87         goto err;
88     }
89 
90     altp2m_vcpu_update_vmfunc_ve(v);
91 
92     return 0;
93 
94  err:
95     put_page(pg);
96 
97     return rc;
98 }
99 
altp2m_vcpu_disable_ve(struct vcpu * v)100 void altp2m_vcpu_disable_ve(struct vcpu *v)
101 {
102     struct altp2mvcpu *a = &vcpu_altp2m(v);
103     struct page_info *pg;
104 
105     /*
106      * Update veinfo_pg, making sure to be safe with concurrent hypercalls.
107      * The winner of this race is responsible to update the VMCS to no longer
108      * point at the page, then drop the associated ref.
109      */
110     if ( (pg = xchg(&a->veinfo_pg, NULL)) )
111     {
112         altp2m_vcpu_update_vmfunc_ve(v);
113 
114         put_page(pg);
115     }
116 }
117 
p2m_init_altp2m(struct domain * d)118 int p2m_init_altp2m(struct domain *d)
119 {
120     unsigned int i;
121     struct p2m_domain *p2m;
122     struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
123 
124     mm_lock_init(&d->arch.altp2m_list_lock);
125     for ( i = 0; i < MAX_ALTP2M; i++ )
126     {
127         d->arch.altp2m_p2m[i] = p2m = p2m_init_one(d);
128         if ( p2m == NULL )
129         {
130             p2m_teardown_altp2m(d);
131             return -ENOMEM;
132         }
133         p2m->p2m_class = p2m_alternate;
134         p2m->access_required = hostp2m->access_required;
135         _atomic_set(&p2m->active_vcpus, 0);
136     }
137 
138     return 0;
139 }
140 
p2m_teardown_altp2m(struct domain * d)141 void p2m_teardown_altp2m(struct domain *d)
142 {
143     unsigned int i;
144     struct p2m_domain *p2m;
145 
146     for ( i = 0; i < MAX_ALTP2M; i++ )
147     {
148         if ( !d->arch.altp2m_p2m[i] )
149             continue;
150         p2m = d->arch.altp2m_p2m[i];
151         d->arch.altp2m_p2m[i] = NULL;
152         p2m_free_one(p2m);
153     }
154 }
155 
altp2m_get_effective_entry(struct p2m_domain * ap2m,gfn_t gfn,mfn_t * mfn,p2m_type_t * t,p2m_access_t * a,bool prepopulate)156 int altp2m_get_effective_entry(struct p2m_domain *ap2m, gfn_t gfn, mfn_t *mfn,
157                                p2m_type_t *t, p2m_access_t *a,
158                                bool prepopulate)
159 {
160     *mfn = ap2m->get_entry(ap2m, gfn, t, a, 0, NULL, NULL);
161 
162     /* Check host p2m if no valid entry in alternate */
163     if ( !mfn_valid(*mfn) && !p2m_is_hostp2m(ap2m) )
164     {
165         struct p2m_domain *hp2m = p2m_get_hostp2m(ap2m->domain);
166         unsigned int page_order;
167         int rc;
168 
169         *mfn = p2m_get_gfn_type_access(hp2m, gfn, t, a, P2M_ALLOC | P2M_UNSHARE,
170                                        &page_order, 0);
171 
172         rc = -ESRCH;
173         if ( !mfn_valid(*mfn) || *t != p2m_ram_rw )
174             return rc;
175 
176         /* If this is a superpage, copy that first */
177         if ( prepopulate && page_order != PAGE_ORDER_4K )
178         {
179             unsigned long mask = ~((1UL << page_order) - 1);
180             gfn_t gfn_aligned = _gfn(gfn_x(gfn) & mask);
181             mfn_t mfn_aligned = _mfn(mfn_x(*mfn) & mask);
182 
183             rc = ap2m->set_entry(ap2m, gfn_aligned, mfn_aligned, page_order, *t, *a, 1);
184             if ( rc )
185                 return rc;
186         }
187     }
188 
189     return 0;
190 }
191 
p2m_altp2m_check(struct vcpu * v,uint16_t idx)192 void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
193 {
194     if ( altp2m_active(v->domain) )
195         p2m_switch_vcpu_altp2m_by_id(v, idx);
196 }
197 
p2m_switch_vcpu_altp2m_by_id(struct vcpu * v,unsigned int idx)198 bool p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx)
199 {
200     struct domain *d = v->domain;
201     bool rc = false;
202 
203     if ( idx >= MAX_ALTP2M )
204         return rc;
205 
206     altp2m_list_lock(d);
207 
208     if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
209     {
210         if ( p2m_set_altp2m(v, idx) )
211             altp2m_vcpu_update_p2m(v);
212         rc = 1;
213     }
214 
215     altp2m_list_unlock(d);
216     return rc;
217 }
218 
219 /*
220  * Read info about the gfn in an altp2m, locking the gfn.
221  *
222  * If the entry is valid, pass the results back to the caller.
223  *
224  * If the entry was invalid, and the host's entry is also invalid,
225  * return to the caller without any changes.
226  *
227  * If the entry is invalid, and the host entry was valid, propagate
228  * the host's entry to the altp2m (retaining page order), and indicate
229  * that the caller should re-try the faulting instruction.
230  */
p2m_altp2m_get_or_propagate(struct p2m_domain * ap2m,unsigned long gfn_l,mfn_t * mfn,p2m_type_t * p2mt,p2m_access_t * p2ma,unsigned int * page_order)231 bool p2m_altp2m_get_or_propagate(struct p2m_domain *ap2m, unsigned long gfn_l,
232                                  mfn_t *mfn, p2m_type_t *p2mt,
233                                  p2m_access_t *p2ma, unsigned int *page_order)
234 {
235     p2m_type_t ap2mt;
236     p2m_access_t ap2ma;
237     unsigned int cur_order;
238     unsigned long mask;
239     gfn_t gfn;
240     mfn_t amfn;
241     int rc;
242 
243     /*
244      * NB we must get the full lock on the altp2m here, in addition to
245      * the lock on the individual gfn, since we may change a range of
246      * gfns below.
247      */
248     p2m_lock(ap2m);
249 
250     amfn = get_gfn_type_access(ap2m, gfn_l, &ap2mt, &ap2ma, 0, &cur_order);
251 
252     if ( cur_order > *page_order )
253         cur_order = *page_order;
254 
255     if ( !mfn_eq(amfn, INVALID_MFN) )
256     {
257         p2m_unlock(ap2m);
258         *mfn  = amfn;
259         *p2mt = ap2mt;
260         *p2ma = ap2ma;
261         *page_order = cur_order;
262         return false;
263     }
264 
265     /* Host entry is also invalid; don't bother setting the altp2m entry. */
266     if ( mfn_eq(*mfn, INVALID_MFN) )
267     {
268         p2m_unlock(ap2m);
269         *page_order = cur_order;
270         return false;
271     }
272 
273     /*
274      * If this is a superpage mapping, round down both frame numbers
275      * to the start of the superpage.  NB that we repupose `amfn`
276      * here.
277      */
278     mask = ~((1UL << cur_order) - 1);
279     amfn = _mfn(mfn_x(*mfn) & mask);
280     gfn = _gfn(gfn_l & mask);
281 
282     /* Override the altp2m entry with its default access. */
283     *p2ma = ap2m->default_access;
284 
285     rc = p2m_set_entry(ap2m, gfn, amfn, cur_order, *p2mt, *p2ma);
286     p2m_unlock(ap2m);
287 
288     if ( rc )
289     {
290         gprintk(XENLOG_ERR,
291                 "failed to set entry for %"PRI_gfn" -> %"PRI_mfn" altp2m %u, rc %d\n",
292                 gfn_l, mfn_x(amfn), vcpu_altp2m(current).p2midx, rc);
293         domain_crash(ap2m->domain);
294     }
295 
296     return true;
297 }
298 
299 enum altp2m_reset_type {
300     ALTP2M_RESET,
301     ALTP2M_DEACTIVATE
302 };
303 
p2m_reset_altp2m(struct domain * d,unsigned int idx,enum altp2m_reset_type reset_type)304 static void p2m_reset_altp2m(struct domain *d, unsigned int idx,
305                              enum altp2m_reset_type reset_type)
306 {
307     struct p2m_domain *p2m;
308 
309     ASSERT(idx < MAX_ALTP2M);
310     p2m = array_access_nospec(d->arch.altp2m_p2m, idx);
311 
312     p2m_lock(p2m);
313 
314     p2m_flush_table_locked(p2m);
315 
316     if ( reset_type == ALTP2M_DEACTIVATE )
317         p2m_free_logdirty(p2m);
318 
319     /* Uninit and reinit ept to force TLB shootdown */
320     ept_p2m_uninit(p2m);
321     ept_p2m_init(p2m);
322 
323     p2m->min_remapped_gfn = gfn_x(INVALID_GFN);
324     p2m->max_remapped_gfn = 0;
325 
326     p2m_unlock(p2m);
327 }
328 
p2m_flush_altp2m(struct domain * d)329 void p2m_flush_altp2m(struct domain *d)
330 {
331     unsigned int i;
332 
333     altp2m_list_lock(d);
334 
335     for ( i = 0; i < MAX_ALTP2M; i++ )
336     {
337         p2m_reset_altp2m(d, i, ALTP2M_DEACTIVATE);
338         d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
339         d->arch.altp2m_visible_eptp[i] = mfn_x(INVALID_MFN);
340     }
341 
342     altp2m_list_unlock(d);
343 }
344 
p2m_activate_altp2m(struct domain * d,unsigned int idx,p2m_access_t hvmmem_default_access)345 static int p2m_activate_altp2m(struct domain *d, unsigned int idx,
346                                p2m_access_t hvmmem_default_access)
347 {
348     struct p2m_domain *hostp2m, *p2m;
349     int rc;
350 
351     ASSERT(idx < MAX_ALTP2M);
352 
353     p2m = array_access_nospec(d->arch.altp2m_p2m, idx);
354     hostp2m = p2m_get_hostp2m(d);
355 
356     p2m_lock(p2m);
357 
358     rc = p2m_init_logdirty(p2m);
359 
360     if ( rc )
361         goto out;
362 
363     /* The following is really just a rangeset copy. */
364     rc = rangeset_merge(p2m->logdirty_ranges, hostp2m->logdirty_ranges);
365 
366     if ( rc )
367     {
368         p2m_free_logdirty(p2m);
369         goto out;
370     }
371 
372     p2m->default_access = hvmmem_default_access;
373     p2m->domain = hostp2m->domain;
374     p2m->global_logdirty = hostp2m->global_logdirty;
375     p2m->min_remapped_gfn = gfn_x(INVALID_GFN);
376     p2m->max_mapped_pfn = p2m->max_remapped_gfn = 0;
377 
378     p2m_init_altp2m_ept(d, idx);
379 
380  out:
381     p2m_unlock(p2m);
382 
383     return rc;
384 }
385 
p2m_init_altp2m_by_id(struct domain * d,unsigned int idx)386 int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx)
387 {
388     int rc = -EINVAL;
389     struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
390 
391     if ( idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) )
392         return rc;
393 
394     altp2m_list_lock(d);
395 
396     if ( d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)] ==
397          mfn_x(INVALID_MFN) )
398         rc = p2m_activate_altp2m(d, idx, hostp2m->default_access);
399 
400     altp2m_list_unlock(d);
401     return rc;
402 }
403 
p2m_init_next_altp2m(struct domain * d,uint16_t * idx,xenmem_access_t hvmmem_default_access)404 int p2m_init_next_altp2m(struct domain *d, uint16_t *idx,
405                          xenmem_access_t hvmmem_default_access)
406 {
407     int rc = -EINVAL;
408     unsigned int i;
409     p2m_access_t a;
410     struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
411 
412     if ( hvmmem_default_access > XENMEM_access_default ||
413          !xenmem_access_to_p2m_access(hostp2m, hvmmem_default_access, &a) )
414         return rc;
415 
416     altp2m_list_lock(d);
417 
418     for ( i = 0; i < MAX_ALTP2M; i++ )
419     {
420         if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
421             continue;
422 
423         rc = p2m_activate_altp2m(d, i, a);
424 
425         if ( !rc )
426             *idx = i;
427 
428         break;
429     }
430 
431     altp2m_list_unlock(d);
432     return rc;
433 }
434 
p2m_destroy_altp2m_by_id(struct domain * d,unsigned int idx)435 int p2m_destroy_altp2m_by_id(struct domain *d, unsigned int idx)
436 {
437     struct p2m_domain *p2m;
438     int rc = -EBUSY;
439 
440     if ( !idx || idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) )
441         return rc;
442 
443     rc = domain_pause_except_self(d);
444     if ( rc )
445         return rc;
446 
447     rc = -EBUSY;
448     altp2m_list_lock(d);
449 
450     if ( d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)] !=
451          mfn_x(INVALID_MFN) )
452     {
453         p2m = array_access_nospec(d->arch.altp2m_p2m, idx);
454 
455         if ( !_atomic_read(p2m->active_vcpus) )
456         {
457             p2m_reset_altp2m(d, idx, ALTP2M_DEACTIVATE);
458             d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)] =
459                 mfn_x(INVALID_MFN);
460             d->arch.altp2m_visible_eptp[array_index_nospec(idx, MAX_EPTP)] =
461                 mfn_x(INVALID_MFN);
462             rc = 0;
463         }
464     }
465 
466     altp2m_list_unlock(d);
467 
468     domain_unpause_except_self(d);
469 
470     return rc;
471 }
472 
p2m_switch_domain_altp2m_by_id(struct domain * d,unsigned int idx)473 int p2m_switch_domain_altp2m_by_id(struct domain *d, unsigned int idx)
474 {
475     struct vcpu *v;
476     int rc = -EINVAL;
477 
478     if ( idx >= MAX_ALTP2M )
479         return rc;
480 
481     rc = domain_pause_except_self(d);
482     if ( rc )
483         return rc;
484 
485     rc = -EINVAL;
486     altp2m_list_lock(d);
487 
488     if ( d->arch.altp2m_visible_eptp[idx] != mfn_x(INVALID_MFN) )
489     {
490         for_each_vcpu( d, v )
491             if ( p2m_set_altp2m(v, idx) )
492                 altp2m_vcpu_update_p2m(v);
493 
494         rc = 0;
495     }
496 
497     altp2m_list_unlock(d);
498 
499     domain_unpause_except_self(d);
500 
501     return rc;
502 }
503 
p2m_change_altp2m_gfn(struct domain * d,unsigned int idx,gfn_t old_gfn,gfn_t new_gfn)504 int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx,
505                           gfn_t old_gfn, gfn_t new_gfn)
506 {
507     struct p2m_domain *hp2m, *ap2m;
508     p2m_access_t a;
509     p2m_type_t t;
510     mfn_t mfn;
511     int rc = -EINVAL;
512 
513     if ( idx >=  min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
514          d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)] ==
515          mfn_x(INVALID_MFN) )
516         return rc;
517 
518     hp2m = p2m_get_hostp2m(d);
519     ap2m = array_access_nospec(d->arch.altp2m_p2m, idx);
520 
521     p2m_lock(hp2m);
522     p2m_lock(ap2m);
523 
524     if ( gfn_eq(new_gfn, INVALID_GFN) )
525     {
526         mfn = ap2m->get_entry(ap2m, old_gfn, &t, &a, 0, NULL, NULL);
527         rc = mfn_valid(mfn)
528              ? p2m_remove_entry(ap2m, old_gfn, mfn, PAGE_ORDER_4K)
529              : 0;
530         goto out;
531     }
532 
533     rc = altp2m_get_effective_entry(ap2m, old_gfn, &mfn, &t, &a,
534                                     AP2MGET_prepopulate);
535     if ( rc )
536         goto out;
537 
538     rc = altp2m_get_effective_entry(ap2m, new_gfn, &mfn, &t, &a,
539                                     AP2MGET_query);
540     if ( rc )
541         goto out;
542 
543     if ( !ap2m->set_entry(ap2m, old_gfn, mfn, PAGE_ORDER_4K, t, a,
544                           (current->domain != d)) )
545     {
546         rc = 0;
547 
548         if ( gfn_x(new_gfn) < ap2m->min_remapped_gfn )
549             ap2m->min_remapped_gfn = gfn_x(new_gfn);
550         if ( gfn_x(new_gfn) > ap2m->max_remapped_gfn )
551             ap2m->max_remapped_gfn = gfn_x(new_gfn);
552     }
553 
554  out:
555     p2m_unlock(ap2m);
556     p2m_unlock(hp2m);
557     return rc;
558 }
559 
p2m_altp2m_propagate_change(struct domain * d,gfn_t gfn,mfn_t mfn,unsigned int page_order,p2m_type_t p2mt,p2m_access_t p2ma)560 int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
561                                 mfn_t mfn, unsigned int page_order,
562                                 p2m_type_t p2mt, p2m_access_t p2ma)
563 {
564     struct p2m_domain *p2m;
565     unsigned int i;
566     unsigned int reset_count = 0;
567     unsigned int last_reset_idx = ~0;
568     int ret = 0;
569 
570     if ( !altp2m_active(d) )
571         return 0;
572 
573     altp2m_list_lock(d);
574 
575     for ( i = 0; i < MAX_ALTP2M; i++ )
576     {
577         p2m_type_t t;
578         p2m_access_t a;
579 
580         if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
581             continue;
582 
583         p2m = d->arch.altp2m_p2m[i];
584 
585         /* Check for a dropped page that may impact this altp2m */
586         if ( mfn_eq(mfn, INVALID_MFN) &&
587              gfn_x(gfn) + (1UL << page_order) > p2m->min_remapped_gfn &&
588              gfn_x(gfn) <= p2m->max_remapped_gfn )
589         {
590             if ( !reset_count++ )
591             {
592                 p2m_reset_altp2m(d, i, ALTP2M_RESET);
593                 last_reset_idx = i;
594             }
595             else
596             {
597                 /* At least 2 altp2m's impacted, so reset everything */
598                 for ( i = 0; i < MAX_ALTP2M; i++ )
599                 {
600                     if ( i == last_reset_idx ||
601                          d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
602                         continue;
603 
604                     p2m_reset_altp2m(d, i, ALTP2M_RESET);
605                 }
606 
607                 ret = 0;
608                 break;
609             }
610         }
611         else if ( !mfn_eq(get_gfn_type_access(p2m, gfn_x(gfn), &t, &a, 0,
612                                               NULL), INVALID_MFN) )
613         {
614             int rc = p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
615 
616             /* Best effort: Don't bail on error. */
617             if ( !ret )
618                 ret = rc;
619 
620             p2m_put_gfn(p2m, gfn);
621         }
622         else
623             p2m_put_gfn(p2m, gfn);
624     }
625 
626     altp2m_list_unlock(d);
627 
628     return ret;
629 }
630 
631 /*
632  * Set/clear the #VE suppress bit for a page.  Only available on VMX.
633  */
p2m_set_suppress_ve(struct domain * d,gfn_t gfn,bool suppress_ve,unsigned int altp2m_idx)634 int p2m_set_suppress_ve(struct domain *d, gfn_t gfn, bool suppress_ve,
635                         unsigned int altp2m_idx)
636 {
637     int rc;
638     struct xen_hvm_altp2m_suppress_ve_multi sve = {
639         altp2m_idx, suppress_ve, 0, 0, gfn_x(gfn), gfn_x(gfn), 0
640     };
641 
642     if ( !(rc = p2m_set_suppress_ve_multi(d, &sve)) )
643         rc = sve.first_error;
644 
645     return rc;
646 }
647 
648 /*
649  * Set/clear the #VE suppress bit for multiple pages.  Only available on VMX.
650  */
p2m_set_suppress_ve_multi(struct domain * d,struct xen_hvm_altp2m_suppress_ve_multi * sve)651 int p2m_set_suppress_ve_multi(struct domain *d,
652                               struct xen_hvm_altp2m_suppress_ve_multi *sve)
653 {
654     struct p2m_domain *host_p2m = p2m_get_hostp2m(d);
655     struct p2m_domain *ap2m = NULL;
656     struct p2m_domain *p2m = host_p2m;
657     uint64_t start = sve->first_gfn;
658     int rc = 0;
659 
660     if ( sve->view > 0 )
661     {
662         if ( sve->view >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
663              d->arch.altp2m_eptp[array_index_nospec(sve->view, MAX_EPTP)] ==
664              mfn_x(INVALID_MFN) )
665             return -EINVAL;
666 
667         p2m = ap2m = array_access_nospec(d->arch.altp2m_p2m, sve->view);
668     }
669 
670     p2m_lock(host_p2m);
671 
672     if ( ap2m )
673         p2m_lock(ap2m);
674 
675     while ( sve->last_gfn >= start )
676     {
677         p2m_access_t a;
678         p2m_type_t t;
679         mfn_t mfn;
680         int err = 0;
681 
682         if ( (err = altp2m_get_effective_entry(p2m, _gfn(start), &mfn, &t, &a,
683                                                AP2MGET_query)) &&
684              !sve->first_error )
685         {
686             sve->first_error_gfn = start; /* Save the gfn of the first error */
687             sve->first_error = err; /* Save the first error code */
688         }
689 
690         if ( !err && (err = p2m->set_entry(p2m, _gfn(start), mfn,
691                                            PAGE_ORDER_4K, t, a,
692                                            sve->suppress_ve)) &&
693              !sve->first_error )
694         {
695             sve->first_error_gfn = start; /* Save the gfn of the first error */
696             sve->first_error = err; /* Save the first error code */
697         }
698 
699         /* Check for continuation if it's not the last iteration. */
700         if ( sve->last_gfn >= ++start && hypercall_preempt_check() )
701         {
702             rc = -ERESTART;
703             break;
704         }
705     }
706 
707     sve->first_gfn = start;
708 
709     if ( ap2m )
710         p2m_unlock(ap2m);
711 
712     p2m_unlock(host_p2m);
713 
714     return rc;
715 }
716 
p2m_get_suppress_ve(struct domain * d,gfn_t gfn,bool * suppress_ve,unsigned int altp2m_idx)717 int p2m_get_suppress_ve(struct domain *d, gfn_t gfn, bool *suppress_ve,
718                         unsigned int altp2m_idx)
719 {
720     struct p2m_domain *host_p2m = p2m_get_hostp2m(d);
721     struct p2m_domain *ap2m = NULL;
722     struct p2m_domain *p2m;
723     mfn_t mfn;
724     p2m_access_t a;
725     p2m_type_t t;
726     int rc = 0;
727 
728     if ( altp2m_idx > 0 )
729     {
730         if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
731              d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
732              mfn_x(INVALID_MFN) )
733             return -EINVAL;
734 
735         p2m = ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
736     }
737     else
738         p2m = host_p2m;
739 
740     gfn_lock(host_p2m, gfn, 0);
741 
742     if ( ap2m )
743         p2m_lock(ap2m);
744 
745     mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, suppress_ve);
746     if ( !mfn_valid(mfn) )
747         rc = -ESRCH;
748 
749     if ( ap2m )
750         p2m_unlock(ap2m);
751 
752     gfn_unlock(host_p2m, gfn, 0);
753 
754     return rc;
755 }
756 
p2m_set_altp2m_view_visibility(struct domain * d,unsigned int altp2m_idx,uint8_t visible)757 int p2m_set_altp2m_view_visibility(struct domain *d, unsigned int altp2m_idx,
758                                    uint8_t visible)
759 {
760     int rc = 0;
761 
762     altp2m_list_lock(d);
763 
764     /*
765      * Eptp index is correlated with altp2m index and should not exceed
766      * min(MAX_ALTP2M, MAX_EPTP).
767      */
768     if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
769          d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
770          mfn_x(INVALID_MFN) )
771         rc = -EINVAL;
772     else if ( visible )
773         d->arch.altp2m_visible_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] =
774             d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)];
775     else
776         d->arch.altp2m_visible_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] =
777             mfn_x(INVALID_MFN);
778 
779     altp2m_list_unlock(d);
780 
781     return rc;
782 }
783 
784 /*
785  * Local variables:
786  * mode: C
787  * c-file-style: "BSD"
788  * c-basic-offset: 4
789  * tab-width: 4
790  * indent-tabs-mode: nil
791  * End:
792  */
793