1 /*
2 * User address space access functions.
3 *
4 * Copyright 1997 Andi Kleen <ak@muc.de>
5 * Copyright 1997 Linus Torvalds
6 * Copyright 2002 Andi Kleen <ak@suse.de>
7 */
8
9 #include <xen/lib.h>
10 #include <xen/sched.h>
11 #include <asm/uaccess.h>
12
13 #ifndef GUARD
14 # define GUARD UA_KEEP
15 #endif
16
copy_to_guest_ll(void __user * to,const void * from,unsigned int n)17 unsigned int copy_to_guest_ll(void __user *to, const void *from, unsigned int n)
18 {
19 unsigned dummy;
20
21 stac();
22 asm volatile (
23 GUARD(
24 " guest_access_mask_ptr %[to], %q[scratch1], %q[scratch2]\n"
25 )
26 " cmp $"STR(2*BYTES_PER_LONG-1)", %[cnt]\n"
27 " jbe 1f\n"
28 " mov %k[to], %[cnt]\n"
29 " neg %[cnt]\n"
30 " and $"STR(BYTES_PER_LONG-1)", %[cnt]\n"
31 " sub %[cnt], %[aux]\n"
32 "4: rep movsb\n" /* make 'to' address aligned */
33 " mov %[aux], %[cnt]\n"
34 " shr $"STR(LONG_BYTEORDER)", %[cnt]\n"
35 " and $"STR(BYTES_PER_LONG-1)", %[aux]\n"
36 " .align 2,0x90\n"
37 "0: rep movs"__OS"\n" /* as many words as possible... */
38 " mov %[aux],%[cnt]\n"
39 "1: rep movsb\n" /* ...remainder copied as bytes */
40 "2:\n"
41 ".section .fixup,\"ax\"\n"
42 "5: add %[aux], %[cnt]\n"
43 " jmp 2b\n"
44 "3: lea (%q[aux], %q[cnt], "STR(BYTES_PER_LONG)"), %[cnt]\n"
45 " jmp 2b\n"
46 ".previous\n"
47 _ASM_EXTABLE(4b, 5b)
48 _ASM_EXTABLE(0b, 3b)
49 _ASM_EXTABLE(1b, 2b)
50 : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from),
51 [aux] "=&r" (dummy)
52 GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy))
53 : "[aux]" (n)
54 : "memory" );
55 clac();
56
57 return n;
58 }
59
copy_from_guest_ll(void * to,const void __user * from,unsigned int n)60 unsigned int copy_from_guest_ll(void *to, const void __user *from, unsigned int n)
61 {
62 unsigned dummy;
63
64 stac();
65 asm volatile (
66 GUARD(
67 " guest_access_mask_ptr %[from], %q[scratch1], %q[scratch2]\n"
68 )
69 " cmp $"STR(2*BYTES_PER_LONG-1)", %[cnt]\n"
70 " jbe 1f\n"
71 " mov %k[to], %[cnt]\n"
72 " neg %[cnt]\n"
73 " and $"STR(BYTES_PER_LONG-1)", %[cnt]\n"
74 " sub %[cnt], %[aux]\n"
75 "4: rep movsb\n" /* make 'to' address aligned */
76 " mov %[aux],%[cnt]\n"
77 " shr $"STR(LONG_BYTEORDER)", %[cnt]\n"
78 " and $"STR(BYTES_PER_LONG-1)", %[aux]\n"
79 " .align 2,0x90\n"
80 "0: rep movs"__OS"\n" /* as many words as possible... */
81 " mov %[aux], %[cnt]\n"
82 "1: rep movsb\n" /* ...remainder copied as bytes */
83 "2:\n"
84 ".section .fixup,\"ax\"\n"
85 "5: add %[aux], %[cnt]\n"
86 " jmp 6f\n"
87 "3: lea (%q[aux], %q[cnt], "STR(BYTES_PER_LONG)"), %[cnt]\n"
88 "6: mov %[cnt], %k[from]\n"
89 " xchg %%eax, %[aux]\n"
90 " xor %%eax, %%eax\n"
91 " rep stosb\n"
92 " xchg %[aux], %%eax\n"
93 " mov %k[from], %[cnt]\n"
94 " jmp 2b\n"
95 ".previous\n"
96 _ASM_EXTABLE(4b, 5b)
97 _ASM_EXTABLE(0b, 3b)
98 _ASM_EXTABLE(1b, 6b)
99 : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from),
100 [aux] "=&r" (dummy)
101 GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy))
102 : "[aux]" (n)
103 : "memory" );
104 clac();
105
106 return n;
107 }
108
109 #if GUARD(1) + 0
110
111 /**
112 * copy_to_guest_pv: - Copy a block of data into PV guest space.
113 * @to: Destination address, in PV guest space.
114 * @from: Source address, in hypervisor space.
115 * @n: Number of bytes to copy.
116 *
117 * Copy data from hypervisor space to PV guest space.
118 *
119 * Returns number of bytes that could not be copied.
120 * On success, this will be zero.
121 */
copy_to_guest_pv(void __user * to,const void * from,unsigned int n)122 unsigned int copy_to_guest_pv(void __user *to, const void *from, unsigned int n)
123 {
124 if ( access_ok(to, n) )
125 n = __copy_to_guest_pv(to, from, n);
126 return n;
127 }
128
129 /**
130 * clear_guest_pv: - Zero a block of memory in PV guest space.
131 * @to: Destination address, in PV guest space.
132 * @n: Number of bytes to zero.
133 *
134 * Zero a block of memory in PV guest space.
135 *
136 * Returns number of bytes that could not be cleared.
137 * On success, this will be zero.
138 */
clear_guest_pv(void __user * to,unsigned int n)139 unsigned int clear_guest_pv(void __user *to, unsigned int n)
140 {
141 if ( access_ok(to, n) )
142 {
143 long dummy;
144
145 stac();
146 asm volatile (
147 " guest_access_mask_ptr %[to], %[scratch1], %[scratch2]\n"
148 "0: rep stos"__OS"\n"
149 " mov %[bytes], %[cnt]\n"
150 "1: rep stosb\n"
151 "2:\n"
152 ".section .fixup,\"ax\"\n"
153 "3: lea (%q[bytes], %q[longs], "STR(BYTES_PER_LONG)"), %[cnt]\n"
154 " jmp 2b\n"
155 ".previous\n"
156 _ASM_EXTABLE(0b,3b)
157 _ASM_EXTABLE(1b,2b)
158 : [cnt] "=&c" (n), [to] "+D" (to), [scratch1] "=&r" (dummy),
159 [scratch2] "=&r" (dummy)
160 : [bytes] "r" (n & (BYTES_PER_LONG - 1)),
161 [longs] "0" (n / BYTES_PER_LONG), "a" (0) );
162 clac();
163 }
164
165 return n;
166 }
167
168 /**
169 * copy_from_guest_pv: - Copy a block of data from PV guest space.
170 * @to: Destination address, in hypervisor space.
171 * @from: Source address, in PV guest space.
172 * @n: Number of bytes to copy.
173 *
174 * Copy data from PV guest space to hypervisor space.
175 *
176 * Returns number of bytes that could not be copied.
177 * On success, this will be zero.
178 *
179 * If some data could not be copied, this function will pad the copied
180 * data to the requested size using zero bytes.
181 */
copy_from_guest_pv(void * to,const void __user * from,unsigned int n)182 unsigned int copy_from_guest_pv(void *to, const void __user *from,
183 unsigned int n)
184 {
185 if ( access_ok(from, n) )
186 n = __copy_from_guest_pv(to, from, n);
187 else
188 memset(to, 0, n);
189 return n;
190 }
191
192 # undef GUARD
193 # define GUARD UA_DROP
194 # define copy_to_guest_ll copy_to_unsafe_ll
195 # define copy_from_guest_ll copy_from_unsafe_ll
196 # undef __user
197 # define __user
198 # include __FILE__
199
200 #endif /* GUARD(1) */
201
202 /*
203 * Local variables:
204 * mode: C
205 * c-file-style: "BSD"
206 * c-basic-offset: 4
207 * tab-width: 4
208 * indent-tabs-mode: nil
209 * End:
210 */
211