1 /******************************************************************************
2  * arch/x86/mm/mem_access.c
3  *
4  * Parts of this code are Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
5  * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices.
6  * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
7  * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
8  * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; If not, see <http://www.gnu.org/licenses/>.
22  */
23 
24 #include <xen/guest_access.h> /* copy_from_guest() */
25 #include <xen/mem_access.h>
26 #include <xen/vm_event.h>
27 #include <xen/event.h>
28 #include <public/vm_event.h>
29 #include <asm/p2m.h>
30 #include <asm/altp2m.h>
31 #include <asm/vm_event.h>
32 
33 #include "mm-locks.h"
34 
35 /*
36  * Get access type for a gfn.
37  * If gfn == INVALID_GFN, gets the default access type.
38  */
_p2m_get_mem_access(struct p2m_domain * p2m,gfn_t gfn,xenmem_access_t * access)39 static int _p2m_get_mem_access(struct p2m_domain *p2m, gfn_t gfn,
40                                xenmem_access_t *access)
41 {
42     p2m_type_t t;
43     p2m_access_t a;
44     mfn_t mfn;
45 
46     static const xenmem_access_t memaccess[] = {
47 #define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac
48             ACCESS(n),
49             ACCESS(r),
50             ACCESS(w),
51             ACCESS(rw),
52             ACCESS(x),
53             ACCESS(rx),
54             ACCESS(wx),
55             ACCESS(rwx),
56             ACCESS(rx2rw),
57             ACCESS(n2rwx),
58 #undef ACCESS
59     };
60 
61     /* If request to get default access. */
62     if ( gfn_eq(gfn, INVALID_GFN) )
63     {
64         *access = memaccess[p2m->default_access];
65         return 0;
66     }
67 
68     gfn_lock(p2m, gfn, 0);
69     mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, NULL);
70     gfn_unlock(p2m, gfn, 0);
71 
72     if ( mfn_eq(mfn, INVALID_MFN) )
73         return -ESRCH;
74 
75     if ( (unsigned int)a >= ARRAY_SIZE(memaccess) )
76         return -ERANGE;
77 
78     *access =  memaccess[a];
79     return 0;
80 }
81 
p2m_mem_access_emulate_check(struct vcpu * v,const vm_event_response_t * rsp)82 bool p2m_mem_access_emulate_check(struct vcpu *v,
83                                   const vm_event_response_t *rsp)
84 {
85     xenmem_access_t access;
86     bool violation = true;
87     const struct vm_event_mem_access *data = &rsp->u.mem_access;
88     struct domain *d = v->domain;
89     struct p2m_domain *p2m = NULL;
90 
91     if ( altp2m_active(d) )
92         p2m = p2m_get_altp2m(v);
93     if ( !p2m )
94         p2m = p2m_get_hostp2m(d);
95 
96     if ( _p2m_get_mem_access(p2m, _gfn(data->gfn), &access) == 0 )
97     {
98         switch ( access )
99         {
100         case XENMEM_access_n:
101         case XENMEM_access_n2rwx:
102         default:
103             violation = data->flags & MEM_ACCESS_RWX;
104             break;
105 
106         case XENMEM_access_r:
107             violation = data->flags & MEM_ACCESS_WX;
108             break;
109 
110         case XENMEM_access_w:
111             violation = data->flags & MEM_ACCESS_RX;
112             break;
113 
114         case XENMEM_access_x:
115             violation = data->flags & MEM_ACCESS_RW;
116             break;
117 
118         case XENMEM_access_rx:
119         case XENMEM_access_rx2rw:
120             violation = data->flags & MEM_ACCESS_W;
121             break;
122 
123         case XENMEM_access_wx:
124             violation = data->flags & MEM_ACCESS_R;
125             break;
126 
127         case XENMEM_access_rw:
128             violation = data->flags & MEM_ACCESS_X;
129             break;
130 
131         case XENMEM_access_rwx:
132             violation = false;
133             break;
134         }
135     }
136 
137     return violation;
138 }
139 
p2m_mem_access_check(paddr_t gpa,unsigned long gla,struct npfec npfec,vm_event_request_t ** req_ptr)140 bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
141                           struct npfec npfec,
142                           vm_event_request_t **req_ptr)
143 {
144     struct vcpu *v = current;
145     gfn_t gfn = gaddr_to_gfn(gpa);
146     struct domain *d = v->domain;
147     struct p2m_domain *p2m = NULL;
148     mfn_t mfn;
149     p2m_type_t p2mt;
150     p2m_access_t p2ma;
151     vm_event_request_t *req;
152     int rc;
153 
154     if ( altp2m_active(d) )
155         p2m = p2m_get_altp2m(v);
156     if ( !p2m )
157         p2m = p2m_get_hostp2m(d);
158 
159     /* First, handle rx2rw conversion automatically.
160      * These calls to p2m->set_entry() must succeed: we have the gfn
161      * locked and just did a successful get_entry(). */
162     gfn_lock(p2m, gfn, 0);
163     mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL);
164 
165     if ( npfec.write_access && p2ma == p2m_access_rx2rw )
166     {
167         rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw, -1);
168         ASSERT(rc == 0);
169         gfn_unlock(p2m, gfn, 0);
170         return true;
171     }
172     else if ( p2ma == p2m_access_n2rwx )
173     {
174         ASSERT(npfec.write_access || npfec.read_access || npfec.insn_fetch);
175         rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
176                             p2mt, p2m_access_rwx, -1);
177         ASSERT(rc == 0);
178     }
179     gfn_unlock(p2m, gfn, 0);
180 
181     /* Otherwise, check if there is a memory event listener, and send the message along */
182     if ( !vm_event_check_ring(d->vm_event_monitor) || !req_ptr )
183     {
184         /* No listener */
185         if ( p2m->access_required )
186         {
187             gdprintk(XENLOG_INFO, "Memory access permissions failure, "
188                                   "no vm_event listener VCPU %d, dom %d\n",
189                                   v->vcpu_id, d->domain_id);
190             domain_crash(v->domain);
191             return false;
192         }
193         else
194         {
195             gfn_lock(p2m, gfn, 0);
196             mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL);
197             if ( p2ma != p2m_access_n2rwx )
198             {
199                 /* A listener is not required, so clear the access
200                  * restrictions.  This set must succeed: we have the
201                  * gfn locked and just did a successful get_entry(). */
202                 rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
203                                     p2mt, p2m_access_rwx, -1);
204                 ASSERT(rc == 0);
205             }
206             gfn_unlock(p2m, gfn, 0);
207             return true;
208         }
209     }
210 
211     *req_ptr = NULL;
212     req = xzalloc(vm_event_request_t);
213     if ( req )
214     {
215         *req_ptr = req;
216 
217         req->reason = VM_EVENT_REASON_MEM_ACCESS;
218         req->u.mem_access.gfn = gfn_x(gfn);
219         req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
220         if ( npfec.gla_valid )
221         {
222             req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
223             req->u.mem_access.gla = gla;
224 
225             if ( npfec.kind == npfec_kind_with_gla )
226                 req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
227             else if ( npfec.kind == npfec_kind_in_gpt )
228                 req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
229         }
230         req->u.mem_access.flags |= npfec.read_access    ? MEM_ACCESS_R : 0;
231         req->u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
232         req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
233     }
234 
235     /* Return whether vCPU pause is required (aka. sync event) */
236     return (p2ma != p2m_access_n2rwx);
237 }
238 
p2m_set_altp2m_mem_access(struct domain * d,struct p2m_domain * hp2m,struct p2m_domain * ap2m,p2m_access_t a,gfn_t gfn)239 int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m,
240                               struct p2m_domain *ap2m, p2m_access_t a,
241                               gfn_t gfn)
242 {
243     mfn_t mfn;
244     p2m_type_t t;
245     p2m_access_t old_a;
246     unsigned int page_order;
247     unsigned long gfn_l = gfn_x(gfn);
248     int rc;
249 
250     mfn = ap2m->get_entry(ap2m, gfn, &t, &old_a, 0, NULL, NULL);
251 
252     /* Check host p2m if no valid entry in alternate */
253     if ( !mfn_valid(mfn) )
254     {
255 
256         mfn = __get_gfn_type_access(hp2m, gfn_l, &t, &old_a,
257                                     P2M_ALLOC | P2M_UNSHARE, &page_order, 0);
258 
259         rc = -ESRCH;
260         if ( !mfn_valid(mfn) || t != p2m_ram_rw )
261             return rc;
262 
263         /* If this is a superpage, copy that first */
264         if ( page_order != PAGE_ORDER_4K )
265         {
266             unsigned long mask = ~((1UL << page_order) - 1);
267             gfn_t gfn2 = _gfn(gfn_l & mask);
268             mfn_t mfn2 = _mfn(mfn_x(mfn) & mask);
269 
270             rc = ap2m->set_entry(ap2m, gfn2, mfn2, page_order, t, old_a, 1);
271             if ( rc )
272                 return rc;
273         }
274     }
275 
276     return ap2m->set_entry(ap2m, gfn, mfn, PAGE_ORDER_4K, t, a,
277                            current->domain != d);
278 }
279 
set_mem_access(struct domain * d,struct p2m_domain * p2m,struct p2m_domain * ap2m,p2m_access_t a,gfn_t gfn)280 static int set_mem_access(struct domain *d, struct p2m_domain *p2m,
281                           struct p2m_domain *ap2m, p2m_access_t a,
282                           gfn_t gfn)
283 {
284     int rc = 0;
285 
286     if ( ap2m )
287     {
288         rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, gfn);
289         /* If the corresponding mfn is invalid we will want to just skip it */
290         if ( rc == -ESRCH )
291             rc = 0;
292     }
293     else
294     {
295         mfn_t mfn;
296         p2m_access_t _a;
297         p2m_type_t t;
298 
299         mfn = p2m->get_entry(p2m, gfn, &t, &_a, 0, NULL, NULL);
300         rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, t, a, -1);
301     }
302 
303     return rc;
304 }
305 
xenmem_access_to_p2m_access(struct p2m_domain * p2m,xenmem_access_t xaccess,p2m_access_t * paccess)306 static bool xenmem_access_to_p2m_access(struct p2m_domain *p2m,
307                                         xenmem_access_t xaccess,
308                                         p2m_access_t *paccess)
309 {
310     static const p2m_access_t memaccess[] = {
311 #define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
312         ACCESS(n),
313         ACCESS(r),
314         ACCESS(w),
315         ACCESS(rw),
316         ACCESS(x),
317         ACCESS(rx),
318         ACCESS(wx),
319         ACCESS(rwx),
320         ACCESS(rx2rw),
321         ACCESS(n2rwx),
322 #undef ACCESS
323     };
324 
325     switch ( xaccess )
326     {
327     case 0 ... ARRAY_SIZE(memaccess) - 1:
328         *paccess = memaccess[xaccess];
329         break;
330     case XENMEM_access_default:
331         *paccess = p2m->default_access;
332         break;
333     default:
334         return false;
335     }
336 
337     return true;
338 }
339 
340 /*
341  * Set access type for a region of gfns.
342  * If gfn == INVALID_GFN, sets the default access type.
343  */
p2m_set_mem_access(struct domain * d,gfn_t gfn,uint32_t nr,uint32_t start,uint32_t mask,xenmem_access_t access,unsigned int altp2m_idx)344 long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
345                         uint32_t start, uint32_t mask, xenmem_access_t access,
346                         unsigned int altp2m_idx)
347 {
348     struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
349     p2m_access_t a;
350     unsigned long gfn_l;
351     long rc = 0;
352 
353     /* altp2m view 0 is treated as the hostp2m */
354     if ( altp2m_idx )
355     {
356         if ( altp2m_idx >= MAX_ALTP2M ||
357              d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
358             return -EINVAL;
359 
360         ap2m = d->arch.altp2m_p2m[altp2m_idx];
361     }
362 
363     if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
364         return -EINVAL;
365 
366     /* If request to set default access. */
367     if ( gfn_eq(gfn, INVALID_GFN) )
368     {
369         p2m->default_access = a;
370         return 0;
371     }
372 
373     p2m_lock(p2m);
374     if ( ap2m )
375         p2m_lock(ap2m);
376 
377     for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l )
378     {
379         rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
380 
381         if ( rc )
382             break;
383 
384         /* Check for continuation if it's not the last iteration. */
385         if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
386         {
387             rc = start;
388             break;
389         }
390     }
391 
392     if ( ap2m )
393         p2m_unlock(ap2m);
394     p2m_unlock(p2m);
395 
396     return rc;
397 }
398 
p2m_set_mem_access_multi(struct domain * d,const XEN_GUEST_HANDLE (const_uint64)pfn_list,const XEN_GUEST_HANDLE (const_uint8)access_list,uint32_t nr,uint32_t start,uint32_t mask,unsigned int altp2m_idx)399 long p2m_set_mem_access_multi(struct domain *d,
400                               const XEN_GUEST_HANDLE(const_uint64) pfn_list,
401                               const XEN_GUEST_HANDLE(const_uint8) access_list,
402                               uint32_t nr, uint32_t start, uint32_t mask,
403                               unsigned int altp2m_idx)
404 {
405     struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
406     long rc = 0;
407 
408     /* altp2m view 0 is treated as the hostp2m */
409     if ( altp2m_idx )
410     {
411         if ( altp2m_idx >= MAX_ALTP2M ||
412              d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
413             return -EINVAL;
414 
415         ap2m = d->arch.altp2m_p2m[altp2m_idx];
416     }
417 
418     p2m_lock(p2m);
419     if ( ap2m )
420         p2m_lock(ap2m);
421 
422     while ( start < nr )
423     {
424         p2m_access_t a;
425         uint8_t access;
426         uint64_t gfn_l;
427 
428         if ( copy_from_guest_offset(&gfn_l, pfn_list, start, 1) ||
429              copy_from_guest_offset(&access, access_list, start, 1) )
430         {
431             rc = -EFAULT;
432             break;
433         }
434 
435         if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
436         {
437             rc = -EINVAL;
438             break;
439         }
440 
441         rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
442 
443         if ( rc )
444             break;
445 
446         /* Check for continuation if it's not the last iteration. */
447         if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
448         {
449             rc = start;
450             break;
451         }
452     }
453 
454     if ( ap2m )
455         p2m_unlock(ap2m);
456     p2m_unlock(p2m);
457 
458     return rc;
459 }
460 
p2m_get_mem_access(struct domain * d,gfn_t gfn,xenmem_access_t * access)461 int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access)
462 {
463     struct p2m_domain *p2m = p2m_get_hostp2m(d);
464 
465     return _p2m_get_mem_access(p2m, gfn, access);
466 }
467 
468 /*
469  * Local variables:
470  * mode: C
471  * c-file-style: "BSD"
472  * c-basic-offset: 4
473  * indent-tabs-mode: nil
474  * End:
475  */
476