1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /******************************************************************************
3  * arch/x86/mm/mem_access.c
4  *
5  * Parts of this code are Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
6  * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices.
7  * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
8  * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
9  * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
10  */
11 
12 #include <xen/guest_access.h> /* copy_from_guest() */
13 #include <xen/mem_access.h>
14 #include <xen/nospec.h>
15 #include <xen/vm_event.h>
16 #include <xen/event.h>
17 #include <public/vm_event.h>
18 #include <asm/p2m.h>
19 #include <asm/altp2m.h>
20 #include <asm/hvm/emulate.h>
21 #include <asm/vm_event.h>
22 
23 #include "mm-locks.h"
24 
25 /*
26  * Get access type for a gfn.
27  * If gfn == INVALID_GFN, gets the default access type.
28  */
_p2m_get_mem_access(struct p2m_domain * p2m,gfn_t gfn,xenmem_access_t * access)29 static int _p2m_get_mem_access(struct p2m_domain *p2m, gfn_t gfn,
30                                xenmem_access_t *access)
31 {
32     p2m_type_t t;
33     p2m_access_t a;
34     mfn_t mfn;
35 
36     static const xenmem_access_t memaccess[] = {
37 #define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac
38             ACCESS(n),
39             ACCESS(r),
40             ACCESS(w),
41             ACCESS(rw),
42             ACCESS(x),
43             ACCESS(rx),
44             ACCESS(wx),
45             ACCESS(rwx),
46             ACCESS(rx2rw),
47             ACCESS(n2rwx),
48             ACCESS(r_pw),
49 #undef ACCESS
50     };
51 
52     /* If request to get default access. */
53     if ( gfn_eq(gfn, INVALID_GFN) )
54     {
55         *access = memaccess[p2m->default_access];
56         return 0;
57     }
58 
59     gfn_lock(p2m, gfn, 0);
60     mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, NULL);
61     gfn_unlock(p2m, gfn, 0);
62 
63     if ( mfn_eq(mfn, INVALID_MFN) )
64         return -ESRCH;
65 
66     if ( (unsigned int)a >= ARRAY_SIZE(memaccess) )
67         return -ERANGE;
68 
69     *access =  memaccess[a];
70     return 0;
71 }
72 
p2m_mem_access_emulate_check(struct vcpu * v,const struct vm_event_st * rsp)73 bool p2m_mem_access_emulate_check(struct vcpu *v,
74                                   const struct vm_event_st *rsp)
75 {
76     xenmem_access_t access;
77     bool violation = true;
78     const struct vm_event_mem_access *data = &rsp->u.mem_access;
79     struct domain *d = v->domain;
80     struct p2m_domain *p2m = NULL;
81 
82     if ( altp2m_active(d) )
83         p2m = p2m_get_altp2m(v);
84     if ( !p2m )
85         p2m = p2m_get_hostp2m(d);
86 
87     if ( _p2m_get_mem_access(p2m, _gfn(data->gfn), &access) == 0 )
88     {
89         switch ( access )
90         {
91         case XENMEM_access_n:
92         case XENMEM_access_n2rwx:
93         default:
94             violation = data->flags & MEM_ACCESS_RWX;
95             break;
96 
97         case XENMEM_access_r:
98         case XENMEM_access_r_pw:
99             violation = data->flags & MEM_ACCESS_WX;
100             break;
101 
102         case XENMEM_access_w:
103             violation = data->flags & MEM_ACCESS_RX;
104             break;
105 
106         case XENMEM_access_x:
107             violation = data->flags & MEM_ACCESS_RW;
108             break;
109 
110         case XENMEM_access_rx:
111         case XENMEM_access_rx2rw:
112             violation = data->flags & MEM_ACCESS_W;
113             break;
114 
115         case XENMEM_access_wx:
116             violation = data->flags & MEM_ACCESS_R;
117             break;
118 
119         case XENMEM_access_rw:
120             violation = data->flags & MEM_ACCESS_X;
121             break;
122 
123         case XENMEM_access_rwx:
124             violation = false;
125             break;
126         }
127     }
128 
129     return violation;
130 }
131 
p2m_mem_access_check(paddr_t gpa,unsigned long gla,struct npfec npfec,struct vm_event_st ** req_ptr)132 bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
133                           struct npfec npfec,
134                           struct vm_event_st **req_ptr)
135 {
136     struct vcpu *v = current;
137     gfn_t gfn = gaddr_to_gfn(gpa);
138     struct domain *d = v->domain;
139     struct p2m_domain *p2m = NULL;
140     mfn_t mfn;
141     p2m_type_t p2mt;
142     p2m_access_t p2ma;
143     vm_event_request_t *req;
144     int rc;
145 
146     if ( altp2m_active(d) )
147         p2m = p2m_get_altp2m(v);
148     if ( !p2m )
149         p2m = p2m_get_hostp2m(d);
150 
151     /* First, handle rx2rw conversion automatically.
152      * These calls to p2m->set_entry() must succeed: we have the gfn
153      * locked and just did a successful get_entry(). */
154     gfn_lock(p2m, gfn, 0);
155     mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL);
156 
157     if ( npfec.write_access && p2ma == p2m_access_rx2rw )
158     {
159         rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw, -1);
160         ASSERT(rc == 0);
161         gfn_unlock(p2m, gfn, 0);
162         return true;
163     }
164     else if ( p2ma == p2m_access_n2rwx )
165     {
166         ASSERT(npfec.write_access || npfec.read_access || npfec.insn_fetch);
167         rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
168                             p2mt, p2m_access_rwx, -1);
169         ASSERT(rc == 0);
170     }
171     gfn_unlock(p2m, gfn, 0);
172 
173     /* Otherwise, check if there is a memory event listener, and send the message along */
174     if ( !vm_event_check_ring(d->vm_event_monitor) || !req_ptr )
175     {
176         /* No listener */
177         if ( p2m->access_required )
178         {
179             gdprintk(XENLOG_INFO, "Memory access permissions failure, "
180                                   "no vm_event listener VCPU %d, dom %d\n",
181                                   v->vcpu_id, d->domain_id);
182             domain_crash(v->domain);
183             return false;
184         }
185         else
186         {
187             gfn_lock(p2m, gfn, 0);
188             mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL);
189             if ( p2ma != p2m_access_n2rwx )
190             {
191                 /* A listener is not required, so clear the access
192                  * restrictions.  This set must succeed: we have the
193                  * gfn locked and just did a successful get_entry(). */
194                 rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
195                                     p2mt, p2m_access_rwx, -1);
196                 ASSERT(rc == 0);
197             }
198             gfn_unlock(p2m, gfn, 0);
199             return true;
200         }
201     }
202 
203     /*
204      * Try to avoid sending a mem event. Suppress events caused by page-walks
205      * by emulating but still checking mem_access violations.
206      */
207     if ( vm_event_check_ring(d->vm_event_monitor) &&
208          d->arch.monitor.inguest_pagefault_disabled &&
209          npfec.kind == npfec_kind_in_gpt )
210     {
211         v->arch.vm_event->send_event = true;
212         hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, X86_EXC_UD, X86_EVENT_NO_EC);
213         v->arch.vm_event->send_event = false;
214 
215         return true;
216     }
217 
218     *req_ptr = NULL;
219     req = xzalloc(vm_event_request_t);
220     if ( req )
221     {
222         *req_ptr = req;
223 
224         req->reason = VM_EVENT_REASON_MEM_ACCESS;
225         req->u.mem_access.gfn = gfn_x(gfn);
226         req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
227 
228         if ( npfec.gla_valid )
229         {
230             req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
231             req->u.mem_access.gla = gla;
232         }
233 
234         switch ( npfec.kind )
235         {
236         case npfec_kind_with_gla:
237             req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
238             break;
239 
240         case npfec_kind_in_gpt:
241             req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
242             break;
243         }
244 
245         req->u.mem_access.flags |= npfec.read_access    ? MEM_ACCESS_R : 0;
246         req->u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
247         req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
248     }
249 
250     /* Return whether vCPU pause is required (aka. sync event) */
251     return (p2ma != p2m_access_n2rwx);
252 }
253 
p2m_set_altp2m_mem_access(struct domain * d,struct p2m_domain * hp2m,struct p2m_domain * ap2m,p2m_access_t a,gfn_t gfn)254 static int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m,
255                                      struct p2m_domain *ap2m, p2m_access_t a,
256                                      gfn_t gfn)
257 {
258     mfn_t mfn;
259     p2m_type_t t;
260     p2m_access_t old_a;
261     int rc;
262 
263     rc = altp2m_get_effective_entry(ap2m, gfn, &mfn, &t, &old_a,
264                                     AP2MGET_prepopulate);
265     if ( rc )
266         return rc;
267 
268     /*
269      * Inherit the old suppress #VE bit value if it is already set, or set it
270      * to 1 otherwise
271      */
272     return ap2m->set_entry(ap2m, gfn, mfn, PAGE_ORDER_4K, t, a, -1);
273 }
274 
set_mem_access(struct domain * d,struct p2m_domain * p2m,struct p2m_domain * ap2m,p2m_access_t a,gfn_t gfn)275 static int set_mem_access(struct domain *d, struct p2m_domain *p2m,
276                           struct p2m_domain *ap2m, p2m_access_t a,
277                           gfn_t gfn)
278 {
279     int rc = 0;
280 
281     if ( ap2m )
282     {
283         rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, gfn);
284         /* If the corresponding mfn is invalid we will want to just skip it */
285         if ( rc == -ESRCH )
286             rc = 0;
287     }
288     else
289     {
290         p2m_access_t _a;
291         p2m_type_t t;
292         mfn_t mfn = p2m_get_gfn_type_access(p2m, gfn, &t, &_a,
293                                             P2M_ALLOC, NULL, false);
294 
295         rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, t, a, -1);
296     }
297 
298     return rc;
299 }
300 
xenmem_access_to_p2m_access(const struct p2m_domain * p2m,xenmem_access_t xaccess,p2m_access_t * paccess)301 bool xenmem_access_to_p2m_access(const struct p2m_domain *p2m,
302                                  xenmem_access_t xaccess,
303                                  p2m_access_t *paccess)
304 {
305     static const p2m_access_t memaccess[] = {
306 #define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
307         ACCESS(n),
308         ACCESS(r),
309         ACCESS(w),
310         ACCESS(rw),
311         ACCESS(x),
312         ACCESS(rx),
313         ACCESS(wx),
314         ACCESS(rwx),
315         ACCESS(rx2rw),
316         ACCESS(n2rwx),
317         ACCESS(r_pw),
318 #undef ACCESS
319     };
320 
321     switch ( xaccess )
322     {
323     case 0 ... ARRAY_SIZE(memaccess) - 1:
324         xaccess = array_index_nospec(xaccess, ARRAY_SIZE(memaccess));
325         *paccess = memaccess[xaccess];
326         break;
327     case XENMEM_access_default:
328         *paccess = p2m->default_access;
329         break;
330     default:
331         return false;
332     }
333 
334     return true;
335 }
336 
337 /*
338  * Set access type for a region of gfns.
339  * If gfn == INVALID_GFN, sets the default access type.
340  */
p2m_set_mem_access(struct domain * d,gfn_t gfn,uint32_t nr,uint32_t start,uint32_t mask,xenmem_access_t access,unsigned int altp2m_idx)341 long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
342                         uint32_t start, uint32_t mask, xenmem_access_t access,
343                         unsigned int altp2m_idx)
344 {
345     struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
346     p2m_access_t a;
347     unsigned long gfn_l;
348     long rc = 0;
349 
350     /* altp2m view 0 is treated as the hostp2m */
351     if ( altp2m_idx )
352     {
353         if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
354              d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
355              mfn_x(INVALID_MFN) )
356             return -EINVAL;
357 
358         ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
359     }
360 
361     if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
362         return -EINVAL;
363 
364     /* If request to set default access. */
365     if ( gfn_eq(gfn, INVALID_GFN) )
366     {
367         p2m->default_access = a;
368         return 0;
369     }
370 
371     p2m_lock(p2m);
372     if ( ap2m )
373         p2m_lock(ap2m);
374 
375     for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l )
376     {
377         rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
378 
379         if ( rc )
380             break;
381 
382         /* Check for continuation if it's not the last iteration. */
383         if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
384         {
385             rc = start;
386             break;
387         }
388     }
389 
390     if ( ap2m )
391         p2m_unlock(ap2m);
392     p2m_unlock(p2m);
393 
394     return rc;
395 }
396 
p2m_set_mem_access_multi(struct domain * d,const XEN_GUEST_HANDLE (const_uint64)pfn_list,const XEN_GUEST_HANDLE (const_uint8)access_list,uint32_t nr,uint32_t start,uint32_t mask,unsigned int altp2m_idx)397 long p2m_set_mem_access_multi(struct domain *d,
398                               const XEN_GUEST_HANDLE(const_uint64) pfn_list,
399                               const XEN_GUEST_HANDLE(const_uint8) access_list,
400                               uint32_t nr, uint32_t start, uint32_t mask,
401                               unsigned int altp2m_idx)
402 {
403     struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
404     long rc = 0;
405 
406     /* altp2m view 0 is treated as the hostp2m */
407     if ( altp2m_idx )
408     {
409         if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
410              d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
411              mfn_x(INVALID_MFN) )
412             return -EINVAL;
413 
414         ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
415     }
416 
417     p2m_lock(p2m);
418     if ( ap2m )
419         p2m_lock(ap2m);
420 
421     while ( start < nr )
422     {
423         p2m_access_t a;
424         uint8_t access;
425         uint64_t gfn_l;
426 
427         if ( copy_from_guest_offset(&gfn_l, pfn_list, start, 1) ||
428              copy_from_guest_offset(&access, access_list, start, 1) )
429         {
430             rc = -EFAULT;
431             break;
432         }
433 
434         if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
435         {
436             rc = -EINVAL;
437             break;
438         }
439 
440         rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
441 
442         if ( rc )
443             break;
444 
445         /* Check for continuation if it's not the last iteration. */
446         if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
447         {
448             rc = start;
449             break;
450         }
451     }
452 
453     if ( ap2m )
454         p2m_unlock(ap2m);
455     p2m_unlock(p2m);
456 
457     return rc;
458 }
459 
p2m_get_mem_access(struct domain * d,gfn_t gfn,xenmem_access_t * access,unsigned int altp2m_idx)460 int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access,
461                        unsigned int altp2m_idx)
462 {
463     struct p2m_domain *p2m = p2m_get_hostp2m(d);
464 
465     if ( !altp2m_active(d) )
466     {
467         if ( altp2m_idx )
468             return -EINVAL;
469     }
470     else if ( altp2m_idx ) /* altp2m view 0 is treated as the hostp2m */
471     {
472         if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
473              d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
474              mfn_x(INVALID_MFN) )
475             return -EINVAL;
476 
477         p2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
478     }
479 
480     return _p2m_get_mem_access(p2m, gfn, access);
481 }
482 
arch_p2m_set_access_required(struct domain * d,bool access_required)483 void arch_p2m_set_access_required(struct domain *d, bool access_required)
484 {
485     ASSERT(atomic_read(&d->pause_count));
486 
487     p2m_get_hostp2m(d)->access_required = access_required;
488 
489     if ( altp2m_active(d) )
490     {
491         unsigned int i;
492         for ( i = 0; i < MAX_ALTP2M; i++ )
493         {
494             struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
495 
496             if ( p2m )
497                 p2m->access_required = access_required;
498         }
499     }
500 }
501 
p2m_mem_access_sanity_check(const struct domain * d)502 bool p2m_mem_access_sanity_check(const struct domain *d)
503 {
504     return is_hvm_domain(d) && cpu_has_vmx && hap_enabled(d);
505 }
506 
507 /*
508  * Local variables:
509  * mode: C
510  * c-file-style: "BSD"
511  * c-basic-offset: 4
512  * indent-tabs-mode: nil
513  * End:
514  */
515