1 #include <xen/lib.h>
2 #include <xen/domain_page.h>
3 #include <xen/mm.h>
4 #include <xen/sched.h>
5 #include <asm/current.h>
6 #include <asm/guest_access.h>
7 
raw_copy_to_guest_helper(void * to,const void * from,unsigned len,int flush_dcache)8 static unsigned long raw_copy_to_guest_helper(void *to, const void *from,
9                                               unsigned len, int flush_dcache)
10 {
11     /* XXX needs to handle faults */
12     unsigned offset = (vaddr_t)to & ~PAGE_MASK;
13 
14     while ( len )
15     {
16         void *p;
17         unsigned size = min(len, (unsigned)PAGE_SIZE - offset);
18         struct page_info *page;
19 
20         page = get_page_from_gva(current, (vaddr_t) to, GV2M_WRITE);
21         if ( page == NULL )
22             return len;
23 
24         p = __map_domain_page(page);
25         p += offset;
26         memcpy(p, from, size);
27         if ( flush_dcache )
28             clean_dcache_va_range(p, size);
29 
30         unmap_domain_page(p - offset);
31         put_page(page);
32         len -= size;
33         from += size;
34         to += size;
35         /*
36          * After the first iteration, guest virtual address is correctly
37          * aligned to PAGE_SIZE.
38          */
39         offset = 0;
40     }
41 
42     return 0;
43 }
44 
raw_copy_to_guest(void * to,const void * from,unsigned len)45 unsigned long raw_copy_to_guest(void *to, const void *from, unsigned len)
46 {
47     return raw_copy_to_guest_helper(to, from, len, 0);
48 }
49 
raw_copy_to_guest_flush_dcache(void * to,const void * from,unsigned len)50 unsigned long raw_copy_to_guest_flush_dcache(void *to, const void *from,
51                                              unsigned len)
52 {
53     return raw_copy_to_guest_helper(to, from, len, 1);
54 }
55 
raw_clear_guest(void * to,unsigned len)56 unsigned long raw_clear_guest(void *to, unsigned len)
57 {
58     /* XXX needs to handle faults */
59     unsigned offset = (vaddr_t)to & ~PAGE_MASK;
60 
61     while ( len )
62     {
63         void *p;
64         unsigned size = min(len, (unsigned)PAGE_SIZE - offset);
65         struct page_info *page;
66 
67         page = get_page_from_gva(current, (vaddr_t) to, GV2M_WRITE);
68         if ( page == NULL )
69             return len;
70 
71         p = __map_domain_page(page);
72         p += offset;
73         memset(p, 0x00, size);
74 
75         unmap_domain_page(p - offset);
76         put_page(page);
77         len -= size;
78         to += size;
79         /*
80          * After the first iteration, guest virtual address is correctly
81          * aligned to PAGE_SIZE.
82          */
83         offset = 0;
84     }
85 
86     return 0;
87 }
88 
raw_copy_from_guest(void * to,const void __user * from,unsigned len)89 unsigned long raw_copy_from_guest(void *to, const void __user *from, unsigned len)
90 {
91     unsigned offset = (vaddr_t)from & ~PAGE_MASK;
92 
93     while ( len )
94     {
95         void *p;
96         unsigned size = min(len, (unsigned)(PAGE_SIZE - offset));
97         struct page_info *page;
98 
99         page = get_page_from_gva(current, (vaddr_t) from, GV2M_READ);
100         if ( page == NULL )
101             return len;
102 
103         p = __map_domain_page(page);
104         p += ((vaddr_t)from & (~PAGE_MASK));
105 
106         memcpy(to, p, size);
107 
108         unmap_domain_page(p);
109         put_page(page);
110         len -= size;
111         from += size;
112         to += size;
113         /*
114          * After the first iteration, guest virtual address is correctly
115          * aligned to PAGE_SIZE.
116          */
117         offset = 0;
118     }
119     return 0;
120 }
121 
122 /*
123  * Temporarily map one physical guest page and copy data to or from it.
124  * The data to be copied cannot cross a page boundary.
125  */
access_guest_memory_by_ipa(struct domain * d,paddr_t gpa,void * buf,uint32_t size,bool is_write)126 int access_guest_memory_by_ipa(struct domain *d, paddr_t gpa, void *buf,
127                                uint32_t size, bool is_write)
128 {
129     struct page_info *page;
130     uint64_t offset = gpa & ~PAGE_MASK;  /* Offset within the mapped page */
131     p2m_type_t p2mt;
132     void *p;
133 
134     /* Do not cross a page boundary. */
135     if ( size > (PAGE_SIZE - offset) )
136     {
137         printk(XENLOG_G_ERR "d%d: guestcopy: memory access crosses page boundary.\n",
138                d->domain_id);
139         return -EINVAL;
140     }
141 
142     page = get_page_from_gfn(d, paddr_to_pfn(gpa), &p2mt, P2M_ALLOC);
143     if ( !page )
144     {
145         printk(XENLOG_G_ERR "d%d: guestcopy: failed to get table entry.\n",
146                d->domain_id);
147         return -EINVAL;
148     }
149 
150     if ( !p2m_is_ram(p2mt) )
151     {
152         put_page(page);
153         printk(XENLOG_G_ERR "d%d: guestcopy: guest memory should be RAM.\n",
154                d->domain_id);
155         return -EINVAL;
156     }
157 
158     p = __map_domain_page(page);
159 
160     if ( is_write )
161         memcpy(p + offset, buf, size);
162     else
163         memcpy(buf, p + offset, size);
164 
165     unmap_domain_page(p);
166     put_page(page);
167 
168     return 0;
169 }
170 
171 /*
172  * Local variables:
173  * mode: C
174  * c-file-style: "BSD"
175  * c-basic-offset: 4
176  * indent-tabs-mode: nil
177  * End:
178  */
179