1 /*
2  * arch/arm/mem_access.c
3  *
4  * Architecture-specific mem_access handling routines
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public
8  * License v2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public
16  * License along with this program; If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include <xen/mem_access.h>
20 #include <xen/monitor.h>
21 #include <xen/sched.h>
22 #include <xen/vm_event.h>
23 #include <public/vm_event.h>
24 #include <asm/event.h>
25 #include <asm/guest_walk.h>
26 
__p2m_get_mem_access(struct domain * d,gfn_t gfn,xenmem_access_t * access)27 static int __p2m_get_mem_access(struct domain *d, gfn_t gfn,
28                                 xenmem_access_t *access)
29 {
30     struct p2m_domain *p2m = p2m_get_hostp2m(d);
31     void *i;
32     unsigned int index;
33 
34     static const xenmem_access_t memaccess[] = {
35 #define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac
36             ACCESS(n),
37             ACCESS(r),
38             ACCESS(w),
39             ACCESS(rw),
40             ACCESS(x),
41             ACCESS(rx),
42             ACCESS(wx),
43             ACCESS(rwx),
44             ACCESS(rx2rw),
45             ACCESS(n2rwx),
46 #undef ACCESS
47     };
48 
49     ASSERT(p2m_is_locked(p2m));
50 
51     /* If no setting was ever set, just return rwx. */
52     if ( !p2m->mem_access_enabled )
53     {
54         *access = XENMEM_access_rwx;
55         return 0;
56     }
57 
58     /* If request to get default access. */
59     if ( gfn_eq(gfn, INVALID_GFN) )
60     {
61         *access = memaccess[p2m->default_access];
62         return 0;
63     }
64 
65     i = radix_tree_lookup(&p2m->mem_access_settings, gfn_x(gfn));
66 
67     if ( !i )
68     {
69         /*
70          * No setting was found in the Radix tree. Check if the
71          * entry exists in the page-tables.
72          */
73         mfn_t mfn = p2m_get_entry(p2m, gfn, NULL, NULL, NULL);
74 
75         if ( mfn_eq(mfn, INVALID_MFN) )
76             return -ESRCH;
77 
78         /* If entry exists then its rwx. */
79         *access = XENMEM_access_rwx;
80     }
81     else
82     {
83         /* Setting was found in the Radix tree. */
84         index = radix_tree_ptr_to_int(i);
85         if ( index >= ARRAY_SIZE(memaccess) )
86             return -ERANGE;
87 
88         *access = memaccess[index];
89     }
90 
91     return 0;
92 }
93 
94 /*
95  * If mem_access is in use it might have been the reason why get_page_from_gva
96  * failed to fetch the page, as it uses the MMU for the permission checking.
97  * Only in these cases we do a software-based type check and fetch the page if
98  * we indeed found a conflicting mem_access setting.
99  */
100 struct page_info*
p2m_mem_access_check_and_get_page(vaddr_t gva,unsigned long flag,const struct vcpu * v)101 p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag,
102                                   const struct vcpu *v)
103 {
104     long rc;
105     unsigned int perms;
106     paddr_t ipa;
107     gfn_t gfn;
108     mfn_t mfn;
109     xenmem_access_t xma;
110     p2m_type_t t;
111     struct page_info *page = NULL;
112     struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
113 
114     rc = gva_to_ipa(gva, &ipa, flag);
115 
116     /*
117      * In case mem_access is active, hardware-based gva_to_ipa translation
118      * might fail. Since gva_to_ipa uses the guest's translation tables, access
119      * to which might be restricted by the active VTTBR, we perform a gva to
120      * ipa translation in software.
121      */
122     if ( rc < 0 )
123     {
124         /*
125          * The software gva to ipa translation can still fail, e.g., if the gva
126          * is not mapped.
127          */
128         if ( guest_walk_tables(v, gva, &ipa, &perms) < 0 )
129             goto err;
130 
131         /*
132          * Check permissions that are assumed by the caller. For instance in
133          * case of guestcopy, the caller assumes that the translated page can
134          * be accessed with requested permissions. If this is not the case, we
135          * should fail.
136          *
137          * Please note that we do not check for the GV2M_EXEC permission. Yet,
138          * since the hardware-based translation through gva_to_ipa does not
139          * test for execute permissions this check can be left out.
140          */
141         if ( (flag & GV2M_WRITE) && !(perms & GV2M_WRITE) )
142             goto err;
143     }
144 
145     gfn = gaddr_to_gfn(ipa);
146 
147     /*
148      * We do this first as this is faster in the default case when no
149      * permission is set on the page.
150      */
151     rc = __p2m_get_mem_access(v->domain, gfn, &xma);
152     if ( rc < 0 )
153         goto err;
154 
155     /* Let's check if mem_access limited the access. */
156     switch ( xma )
157     {
158     default:
159     case XENMEM_access_rwx:
160     case XENMEM_access_rw:
161         /*
162          * If mem_access contains no rw perm restrictions at all then the original
163          * fault was correct.
164          */
165         goto err;
166     case XENMEM_access_n2rwx:
167     case XENMEM_access_n:
168     case XENMEM_access_x:
169         /*
170          * If no r/w is permitted by mem_access, this was a fault caused by mem_access.
171          */
172         break;
173     case XENMEM_access_wx:
174     case XENMEM_access_w:
175         /*
176          * If this was a read then it was because of mem_access, but if it was
177          * a write then the original get_page_from_gva fault was correct.
178          */
179         if ( flag == GV2M_READ )
180             break;
181         else
182             goto err;
183     case XENMEM_access_rx2rw:
184     case XENMEM_access_rx:
185     case XENMEM_access_r:
186         /*
187          * If this was a write then it was because of mem_access, but if it was
188          * a read then the original get_page_from_gva fault was correct.
189          */
190         if ( flag == GV2M_WRITE )
191             break;
192         else
193             goto err;
194     }
195 
196     /*
197      * We had a mem_access permission limiting the access, but the page type
198      * could also be limiting, so we need to check that as well.
199      */
200     mfn = p2m_get_entry(p2m, gfn, &t, NULL, NULL);
201     if ( mfn_eq(mfn, INVALID_MFN) )
202         goto err;
203 
204     if ( !mfn_valid(mfn) )
205         goto err;
206 
207     /*
208      * Base type doesn't allow r/w
209      */
210     if ( t != p2m_ram_rw )
211         goto err;
212 
213     page = mfn_to_page(mfn_x(mfn));
214 
215     if ( unlikely(!get_page(page, v->domain)) )
216         page = NULL;
217 
218 err:
219     return page;
220 }
221 
p2m_mem_access_check(paddr_t gpa,vaddr_t gla,const struct npfec npfec)222 bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec)
223 {
224     int rc;
225     bool violation;
226     xenmem_access_t xma;
227     vm_event_request_t *req;
228     struct vcpu *v = current;
229     struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
230 
231     /* Mem_access is not in use. */
232     if ( !p2m->mem_access_enabled )
233         return true;
234 
235     rc = p2m_get_mem_access(v->domain, gaddr_to_gfn(gpa), &xma);
236     if ( rc )
237         return true;
238 
239     /* Now check for mem_access violation. */
240     switch ( xma )
241     {
242     case XENMEM_access_rwx:
243         violation = false;
244         break;
245     case XENMEM_access_rw:
246         violation = npfec.insn_fetch;
247         break;
248     case XENMEM_access_wx:
249         violation = npfec.read_access;
250         break;
251     case XENMEM_access_rx:
252     case XENMEM_access_rx2rw:
253         violation = npfec.write_access;
254         break;
255     case XENMEM_access_x:
256         violation = npfec.read_access || npfec.write_access;
257         break;
258     case XENMEM_access_w:
259         violation = npfec.read_access || npfec.insn_fetch;
260         break;
261     case XENMEM_access_r:
262         violation = npfec.write_access || npfec.insn_fetch;
263         break;
264     default:
265     case XENMEM_access_n:
266     case XENMEM_access_n2rwx:
267         violation = true;
268         break;
269     }
270 
271     if ( !violation )
272         return true;
273 
274     /* First, handle rx2rw and n2rwx conversion automatically. */
275     if ( npfec.write_access && xma == XENMEM_access_rx2rw )
276     {
277         rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1,
278                                 0, ~0, XENMEM_access_rw, 0);
279         return false;
280     }
281     else if ( xma == XENMEM_access_n2rwx )
282     {
283         rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1,
284                                 0, ~0, XENMEM_access_rwx, 0);
285     }
286 
287     /* Otherwise, check if there is a vm_event monitor subscriber */
288     if ( !vm_event_check_ring(v->domain->vm_event_monitor) )
289     {
290         /* No listener */
291         if ( p2m->access_required )
292         {
293             gdprintk(XENLOG_INFO, "Memory access permissions failure, "
294                                   "no vm_event listener VCPU %d, dom %d\n",
295                                   v->vcpu_id, v->domain->domain_id);
296             domain_crash(v->domain);
297         }
298         else
299         {
300             /* n2rwx was already handled */
301             if ( xma != XENMEM_access_n2rwx )
302             {
303                 /* A listener is not required, so clear the access
304                  * restrictions. */
305                 rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1,
306                                         0, ~0, XENMEM_access_rwx, 0);
307             }
308         }
309 
310         /* No need to reinject */
311         return false;
312     }
313 
314     req = xzalloc(vm_event_request_t);
315     if ( req )
316     {
317         req->reason = VM_EVENT_REASON_MEM_ACCESS;
318 
319         /* Send request to mem access subscriber */
320         req->u.mem_access.gfn = gpa >> PAGE_SHIFT;
321         req->u.mem_access.offset =  gpa & ((1 << PAGE_SHIFT) - 1);
322         if ( npfec.gla_valid )
323         {
324             req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
325             req->u.mem_access.gla = gla;
326 
327             if ( npfec.kind == npfec_kind_with_gla )
328                 req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
329             else if ( npfec.kind == npfec_kind_in_gpt )
330                 req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
331         }
332         req->u.mem_access.flags |= npfec.read_access    ? MEM_ACCESS_R : 0;
333         req->u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
334         req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
335 
336         if ( monitor_traps(v, (xma != XENMEM_access_n2rwx), req) < 0 )
337             domain_crash(v->domain);
338 
339         xfree(req);
340     }
341 
342     return false;
343 }
344 
345 /*
346  * Set access type for a region of pfns.
347  * If gfn == INVALID_GFN, sets the default access type.
348  */
p2m_set_mem_access(struct domain * d,gfn_t gfn,uint32_t nr,uint32_t start,uint32_t mask,xenmem_access_t access,unsigned int altp2m_idx)349 long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
350                         uint32_t start, uint32_t mask, xenmem_access_t access,
351                         unsigned int altp2m_idx)
352 {
353     struct p2m_domain *p2m = p2m_get_hostp2m(d);
354     p2m_access_t a;
355     unsigned int order;
356     long rc = 0;
357 
358     static const p2m_access_t memaccess[] = {
359 #define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
360         ACCESS(n),
361         ACCESS(r),
362         ACCESS(w),
363         ACCESS(rw),
364         ACCESS(x),
365         ACCESS(rx),
366         ACCESS(wx),
367         ACCESS(rwx),
368         ACCESS(rx2rw),
369         ACCESS(n2rwx),
370 #undef ACCESS
371     };
372 
373     switch ( access )
374     {
375     case 0 ... ARRAY_SIZE(memaccess) - 1:
376         a = memaccess[access];
377         break;
378     case XENMEM_access_default:
379         a = p2m->default_access;
380         break;
381     default:
382         return -EINVAL;
383     }
384 
385     /*
386      * Flip mem_access_enabled to true when a permission is set, as to prevent
387      * allocating or inserting super-pages.
388      */
389     p2m->mem_access_enabled = true;
390 
391     /* If request to set default access. */
392     if ( gfn_eq(gfn, INVALID_GFN) )
393     {
394         p2m->default_access = a;
395         return 0;
396     }
397 
398     p2m_write_lock(p2m);
399 
400     for ( gfn = gfn_add(gfn, start); nr > start;
401           gfn = gfn_next_boundary(gfn, order) )
402     {
403         p2m_type_t t;
404         mfn_t mfn = p2m_get_entry(p2m, gfn, &t, NULL, &order);
405 
406 
407         if ( !mfn_eq(mfn, INVALID_MFN) )
408         {
409             order = 0;
410             rc = p2m_set_entry(p2m, gfn, 1, mfn, t, a);
411             if ( rc )
412                 break;
413         }
414 
415         start += gfn_x(gfn_next_boundary(gfn, order)) - gfn_x(gfn);
416         /* Check for continuation if it is not the last iteration */
417         if ( nr > start && !(start & mask) && hypercall_preempt_check() )
418         {
419             rc = start;
420             break;
421         }
422     }
423 
424     p2m_write_unlock(p2m);
425 
426     return rc;
427 }
428 
p2m_set_mem_access_multi(struct domain * d,const XEN_GUEST_HANDLE (const_uint64)pfn_list,const XEN_GUEST_HANDLE (const_uint8)access_list,uint32_t nr,uint32_t start,uint32_t mask,unsigned int altp2m_idx)429 long p2m_set_mem_access_multi(struct domain *d,
430                               const XEN_GUEST_HANDLE(const_uint64) pfn_list,
431                               const XEN_GUEST_HANDLE(const_uint8) access_list,
432                               uint32_t nr, uint32_t start, uint32_t mask,
433                               unsigned int altp2m_idx)
434 {
435     /* Not yet implemented on ARM. */
436     return -EOPNOTSUPP;
437 }
438 
p2m_get_mem_access(struct domain * d,gfn_t gfn,xenmem_access_t * access)439 int p2m_get_mem_access(struct domain *d, gfn_t gfn,
440                        xenmem_access_t *access)
441 {
442     int ret;
443     struct p2m_domain *p2m = p2m_get_hostp2m(d);
444 
445     p2m_read_lock(p2m);
446     ret = __p2m_get_mem_access(d, gfn, access);
447     p2m_read_unlock(p2m);
448 
449     return ret;
450 }
451 
452 /*
453  * Local variables:
454  * mode: C
455  * c-file-style: "BSD"
456  * c-basic-offset: 4
457  * indent-tabs-mode: nil
458  * End:
459  */
460