1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
3 
4 #define COMPAT_ARG_XLAT_VIRT_BASE ((void *)ARG_XLAT_START(current))
5 #define COMPAT_ARG_XLAT_SIZE      (2*PAGE_SIZE)
6 struct vcpu;
7 int setup_compat_arg_xlat(struct vcpu *v);
8 void free_compat_arg_xlat(struct vcpu *v);
9 #define is_compat_arg_xlat_range(addr, size) ({                               \
10     unsigned long __off;                                                      \
11     __off = (unsigned long)(addr) - (unsigned long)COMPAT_ARG_XLAT_VIRT_BASE; \
12     (__off < COMPAT_ARG_XLAT_SIZE) &&                                         \
13     ((__off + (unsigned long)(size)) <= COMPAT_ARG_XLAT_SIZE);                \
14 })
15 
16 #define xlat_page_start ((unsigned long)COMPAT_ARG_XLAT_VIRT_BASE)
17 #define xlat_page_size  COMPAT_ARG_XLAT_SIZE
18 #define xlat_page_left_size(xlat_page_current) \
19     (xlat_page_start + xlat_page_size - xlat_page_current)
20 
21 #define xlat_malloc_init(xlat_page_current)    do { \
22     xlat_page_current = xlat_page_start; \
23 } while (0)
24 
25 extern void *xlat_malloc(unsigned long *xlat_page_current, size_t size);
26 
27 #define xlat_malloc_array(_p, _t, _c) ((_t *) xlat_malloc(&_p, sizeof(_t) * _c))
28 
29 /*
30  * Valid if in +ve half of 48-bit address space, or above Xen-reserved area.
31  * This is also valid for range checks (addr, addr+size). As long as the
32  * start address is outside the Xen-reserved area, sequential accesses
33  * (starting at addr) will hit a non-canonical address (and thus fault)
34  * before ever reaching VIRT_START.
35  */
36 #define __addr_ok(addr) \
37     (((unsigned long)(addr) < (1UL<<47)) || \
38      ((unsigned long)(addr) >= HYPERVISOR_VIRT_END))
39 
40 #define access_ok(addr, size) \
41     (__addr_ok(addr) || is_compat_arg_xlat_range(addr, size))
42 
43 #define array_access_ok(addr, count, size) \
44     (likely(((count) ?: 0UL) < (~0UL / (size))) && \
45      access_ok(addr, (count) * (size)))
46 
47 #define __compat_addr_ok(d, addr) \
48     ((unsigned long)(addr) < HYPERVISOR_COMPAT_VIRT_START(d))
49 
50 #define __compat_access_ok(d, addr, size) \
51     __compat_addr_ok(d, (unsigned long)(addr) + ((size) ? (size) - 1 : 0))
52 
53 #define compat_access_ok(addr, size) \
54     __compat_access_ok(current->domain, addr, size)
55 
56 #define compat_array_access_ok(addr,count,size) \
57     (likely((count) < (~0U / (size))) && \
58      compat_access_ok(addr, 0 + (count) * (size)))
59 
60 #define __put_user_size(x,ptr,size,retval,errret)			\
61 do {									\
62 	retval = 0;							\
63 	switch (size) {							\
64 	case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break;	\
65 	case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \
66 	case 4: __put_user_asm(x,ptr,retval,"l","k","ir",errret);break;	\
67 	case 8: __put_user_asm(x,ptr,retval,"q","","ir",errret);break;	\
68 	default: __put_user_bad();					\
69 	}								\
70 } while (0)
71 
72 #define __get_user_size(x,ptr,size,retval,errret)			\
73 do {									\
74 	retval = 0;							\
75 	switch (size) {							\
76 	case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break;	\
77 	case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break;	\
78 	case 4: __get_user_asm(x,ptr,retval,"l","k","=r",errret);break;	\
79 	case 8: __get_user_asm(x,ptr,retval,"q","","=r",errret); break;	\
80 	default: __get_user_bad();					\
81 	}								\
82 } while (0)
83 
84 #endif /* __X86_64_UACCESS_H */
85