1 /*
2  * pv/mm.c
3  *
4  * Memory managment code for PV guests
5  *
6  * Copyright (c) 2002-2005 K A Fraser
7  * Copyright (c) 2004 Christian Limpach
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms and conditions of the GNU General Public
11  * License, version 2, as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public
19  * License along with this program; If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include <xen/guest_access.h>
23 
24 #include <asm/current.h>
25 #include <asm/p2m.h>
26 
27 #include "mm.h"
28 
29 /* Override macros from asm/page.h to make them work with mfn_t */
30 #undef mfn_to_page
31 #define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
32 #undef page_to_mfn
33 #define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
34 
35 /*
36  * Get a mapping of a PV guest's l1e for this linear address.  The return
37  * pointer should be unmapped using unmap_domain_page().
38  */
map_guest_l1e(unsigned long linear,mfn_t * gl1mfn)39 l1_pgentry_t *map_guest_l1e(unsigned long linear, mfn_t *gl1mfn)
40 {
41     l2_pgentry_t l2e;
42 
43     ASSERT(!paging_mode_translate(current->domain));
44     ASSERT(!paging_mode_external(current->domain));
45 
46     if ( unlikely(!__addr_ok(linear)) )
47         return NULL;
48 
49     /* Find this l1e and its enclosing l1mfn in the linear map. */
50     if ( __copy_from_user(&l2e,
51                           &__linear_l2_table[l2_linear_offset(linear)],
52                           sizeof(l2_pgentry_t)) )
53         return NULL;
54 
55     /* Check flags that it will be safe to read the l1e. */
56     if ( (l2e_get_flags(l2e) & (_PAGE_PRESENT | _PAGE_PSE)) != _PAGE_PRESENT )
57         return NULL;
58 
59     *gl1mfn = l2e_get_mfn(l2e);
60 
61     return (l1_pgentry_t *)map_domain_page(*gl1mfn) + l1_table_offset(linear);
62 }
63 
64 /*
65  * Read the guest's l1e that maps this address, from the kernel-mode
66  * page tables.
67  */
guest_get_eff_kern_l1e(unsigned long linear)68 static l1_pgentry_t guest_get_eff_kern_l1e(unsigned long linear)
69 {
70     struct vcpu *curr = current;
71     const bool user_mode = !(curr->arch.flags & TF_kernel_mode);
72     l1_pgentry_t l1e;
73 
74     if ( user_mode )
75         toggle_guest_pt(curr);
76 
77     l1e = guest_get_eff_l1e(linear);
78 
79     if ( user_mode )
80         toggle_guest_pt(curr);
81 
82     return l1e;
83 }
84 
85 /*
86  * Map a guest's LDT page (covering the byte at @offset from start of the LDT)
87  * into Xen's virtual range.  Returns true if the mapping changed, false
88  * otherwise.
89  */
pv_map_ldt_shadow_page(unsigned int offset)90 bool pv_map_ldt_shadow_page(unsigned int offset)
91 {
92     struct vcpu *curr = current;
93     struct domain *currd = curr->domain;
94     struct page_info *page;
95     l1_pgentry_t gl1e, *pl1e;
96     unsigned long linear = curr->arch.pv_vcpu.ldt_base + offset;
97 
98     BUG_ON(unlikely(in_irq()));
99 
100     /*
101      * Hardware limit checking should guarantee this property.  NB. This is
102      * safe as updates to the LDT can only be made by MMUEXT_SET_LDT to the
103      * current vcpu, and vcpu_reset() will block until this vcpu has been
104      * descheduled before continuing.
105      */
106     ASSERT((offset >> 3) <= curr->arch.pv_vcpu.ldt_ents);
107 
108     if ( is_pv_32bit_domain(currd) )
109         linear = (uint32_t)linear;
110 
111     gl1e = guest_get_eff_kern_l1e(linear);
112     if ( unlikely(!(l1e_get_flags(gl1e) & _PAGE_PRESENT)) )
113         return false;
114 
115     page = get_page_from_gfn(currd, l1e_get_pfn(gl1e), NULL, P2M_ALLOC);
116     if ( unlikely(!page) )
117         return false;
118 
119     if ( unlikely(!get_page_type(page, PGT_seg_desc_page)) )
120     {
121         put_page(page);
122         return false;
123     }
124 
125     pl1e = &pv_ldt_ptes(curr)[offset >> PAGE_SHIFT];
126     l1e_add_flags(gl1e, _PAGE_RW);
127 
128     spin_lock(&curr->arch.pv_vcpu.shadow_ldt_lock);
129     l1e_write(pl1e, gl1e);
130     curr->arch.pv_vcpu.shadow_ldt_mapcnt++;
131     spin_unlock(&curr->arch.pv_vcpu.shadow_ldt_lock);
132 
133     return true;
134 }
135 
136 /*
137  * Local variables:
138  * mode: C
139  * c-file-style: "BSD"
140  * c-basic-offset: 4
141  * tab-width: 4
142  * indent-tabs-mode: nil
143  * End:
144  */
145