1 
2 #ifndef __X86_64_PAGE_H__
3 #define __X86_64_PAGE_H__
4 
5 #define L1_PAGETABLE_SHIFT      12
6 #define L2_PAGETABLE_SHIFT      21
7 #define L3_PAGETABLE_SHIFT      30
8 #define L4_PAGETABLE_SHIFT      39
9 #define PAGE_SHIFT              L1_PAGETABLE_SHIFT
10 #define SUPERPAGE_SHIFT         L2_PAGETABLE_SHIFT
11 #define ROOT_PAGETABLE_SHIFT    L4_PAGETABLE_SHIFT
12 
13 #define PAGETABLE_ORDER         9
14 #define L1_PAGETABLE_ENTRIES    (1<<PAGETABLE_ORDER)
15 #define L2_PAGETABLE_ENTRIES    (1<<PAGETABLE_ORDER)
16 #define L3_PAGETABLE_ENTRIES    (1<<PAGETABLE_ORDER)
17 #define L4_PAGETABLE_ENTRIES    (1<<PAGETABLE_ORDER)
18 #define ROOT_PAGETABLE_ENTRIES  L4_PAGETABLE_ENTRIES
19 #define SUPERPAGE_ORDER         PAGETABLE_ORDER
20 #define SUPERPAGE_PAGES         (1<<SUPERPAGE_ORDER)
21 
22 #define __XEN_VIRT_START        XEN_VIRT_START
23 
24 /* These are architectural limits. Current CPUs support only 40-bit phys. */
25 #define PADDR_BITS              52
26 #define VADDR_BITS              48
27 #define PADDR_MASK              ((1UL << PADDR_BITS)-1)
28 #define VADDR_MASK              ((1UL << VADDR_BITS)-1)
29 
30 #define VADDR_TOP_BIT           (1UL << (VADDR_BITS - 1))
31 #define CANONICAL_MASK          (~0UL & ~VADDR_MASK)
32 
33 #define is_canonical_address(x) (((long)(x) >> 47) == ((long)(x) >> 63))
34 
35 #ifndef __ASSEMBLY__
36 
37 #include <asm/types.h>
38 
39 #include <xen/pdx.h>
40 
41 extern unsigned long xen_virt_end;
42 
43 /*
44  * Note: These are solely for the use by page_{get,set}_owner(), and
45  *       therefore don't need to handle the XEN_VIRT_{START,END} range.
46  */
47 #define virt_to_pdx(va)  (((unsigned long)(va) - DIRECTMAP_VIRT_START) >> \
48                           PAGE_SHIFT)
49 #define pdx_to_virt(pdx) ((void *)(DIRECTMAP_VIRT_START + \
50                                    ((unsigned long)(pdx) << PAGE_SHIFT)))
51 
__virt_to_maddr(unsigned long va)52 static inline unsigned long __virt_to_maddr(unsigned long va)
53 {
54     ASSERT(va < DIRECTMAP_VIRT_END);
55     if ( va >= DIRECTMAP_VIRT_START )
56         va -= DIRECTMAP_VIRT_START;
57     else
58     {
59         BUILD_BUG_ON(XEN_VIRT_END - XEN_VIRT_START != GB(1));
60         /* Signed, so ((long)XEN_VIRT_START >> 30) fits in an imm32. */
61         ASSERT(((long)va >> (PAGE_ORDER_1G + PAGE_SHIFT)) ==
62                ((long)XEN_VIRT_START >> (PAGE_ORDER_1G + PAGE_SHIFT)));
63 
64         va += xen_phys_start - XEN_VIRT_START;
65     }
66     return (va & ma_va_bottom_mask) |
67            ((va << pfn_pdx_hole_shift) & ma_top_mask);
68 }
69 
__maddr_to_virt(unsigned long ma)70 static inline void *__maddr_to_virt(unsigned long ma)
71 {
72     ASSERT(pfn_to_pdx(ma >> PAGE_SHIFT) < (DIRECTMAP_SIZE >> PAGE_SHIFT));
73     return (void *)(DIRECTMAP_VIRT_START +
74                     ((ma & ma_va_bottom_mask) |
75                      ((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
76 }
77 
78 /* read access (should only be used for debug printk's) */
79 typedef u64 intpte_t;
80 #define PRIpte "016lx"
81 
82 typedef struct { intpte_t l1; } l1_pgentry_t;
83 typedef struct { intpte_t l2; } l2_pgentry_t;
84 typedef struct { intpte_t l3; } l3_pgentry_t;
85 typedef struct { intpte_t l4; } l4_pgentry_t;
86 typedef l4_pgentry_t root_pgentry_t;
87 
88 #endif /* !__ASSEMBLY__ */
89 
90 #define pte_read_atomic(ptep)       read_atomic(ptep)
91 #define pte_write_atomic(ptep, pte) write_atomic(ptep, pte)
92 #define pte_write(ptep, pte)        write_atomic(ptep, pte)
93 
94 /* Given a virtual address, get an entry offset into a linear page table. */
95 #define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> L1_PAGETABLE_SHIFT)
96 #define l2_linear_offset(_a) (((_a) & VADDR_MASK) >> L2_PAGETABLE_SHIFT)
97 #define l3_linear_offset(_a) (((_a) & VADDR_MASK) >> L3_PAGETABLE_SHIFT)
98 #define l4_linear_offset(_a) (((_a) & VADDR_MASK) >> L4_PAGETABLE_SHIFT)
99 
100 #define is_guest_l2_slot(_d, _t, _s)                   \
101     ( !is_pv_32bit_domain(_d) ||                       \
102       !((_t) & PGT_pae_xen_l2) ||                      \
103       ((_s) < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_d)) )
104 #define is_guest_l4_slot(_d, _s)                    \
105     ( is_pv_32bit_domain(_d)                        \
106       ? ((_s) == 0)                                 \
107       : (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) ||  \
108          ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT)))
109 
110 #define root_get_pfn              l4e_get_pfn
111 #define root_get_flags            l4e_get_flags
112 #define root_get_intpte           l4e_get_intpte
113 #define root_empty                l4e_empty
114 #define root_from_paddr           l4e_from_paddr
115 #define PGT_root_page_table       PGT_l4_page_table
116 
117 /*
118  * PTE pfn and flags:
119  *  40-bit pfn   = (pte[51:12])
120  *  24-bit flags = (pte[63:52],pte[11:0])
121  */
122 
123 /* Extract flags into 24-bit integer, or turn 24-bit flags into a pte mask. */
124 #ifndef __ASSEMBLY__
get_pte_flags(intpte_t x)125 static inline unsigned int get_pte_flags(intpte_t x)
126 {
127     return ((x >> 40) & ~0xfff) | (x & 0xfff);
128 }
129 
put_pte_flags(unsigned int x)130 static inline intpte_t put_pte_flags(unsigned int x)
131 {
132     return (((intpte_t)x & ~0xfff) << 40) | (x & 0xfff);
133 }
134 #endif
135 
136 /*
137  * Protection keys define a new 4-bit protection key field
138  * (PKEY) in bits 62:59 of leaf entries of the page tables.
139  * This corresponds to bit 22:19 of a 24-bit flags.
140  *
141  * Notice: Bit 22 is used by _PAGE_GNTTAB which is visible to PV guests,
142  * so Protection keys must be disabled on PV guests.
143  */
144 #define _PAGE_PKEY_BITS  (0x780000)	 /* Protection Keys, 22:19 */
145 
146 #define get_pte_pkey(x) (MASK_EXTR(get_pte_flags(x), _PAGE_PKEY_BITS))
147 
148 /* Bit 23 of a 24-bit flag mask. This corresponds to bit 63 of a pte.*/
149 #define _PAGE_NX_BIT (1U<<23)
150 
151 /* Bit 22 of a 24-bit flag mask. This corresponds to bit 62 of a pte.*/
152 #define _PAGE_GNTTAB (1U<<22)
153 
154 /*
155  * Bit 12 of a 24-bit flag mask. This corresponds to bit 52 of a pte.
156  * This is needed to distinguish between user and kernel PTEs since _PAGE_USER
157  * is asserted for both.
158  */
159 #define _PAGE_GUEST_KERNEL (1U<<12)
160 
161 #define PAGE_HYPERVISOR_RO      (__PAGE_HYPERVISOR_RO      | _PAGE_GLOBAL)
162 #define PAGE_HYPERVISOR_RW      (__PAGE_HYPERVISOR_RW      | _PAGE_GLOBAL)
163 #define PAGE_HYPERVISOR_RX      (__PAGE_HYPERVISOR_RX      | _PAGE_GLOBAL)
164 #define PAGE_HYPERVISOR_RWX     (__PAGE_HYPERVISOR         | _PAGE_GLOBAL)
165 
166 #ifdef __ASSEMBLY__
167 /* Dependency on NX being available can't be expressed. */
168 # define PAGE_HYPERVISOR         PAGE_HYPERVISOR_RWX
169 # define PAGE_HYPERVISOR_UCMINUS (__PAGE_HYPERVISOR_UCMINUS | _PAGE_GLOBAL)
170 # define PAGE_HYPERVISOR_UC      (__PAGE_HYPERVISOR_UC      | _PAGE_GLOBAL)
171 #else
172 # define PAGE_HYPERVISOR         PAGE_HYPERVISOR_RW
173 # define PAGE_HYPERVISOR_UCMINUS (__PAGE_HYPERVISOR_UCMINUS | \
174                                   _PAGE_GLOBAL | _PAGE_NX)
175 # define PAGE_HYPERVISOR_UC      (__PAGE_HYPERVISOR_UC | \
176                                   _PAGE_GLOBAL | _PAGE_NX)
177 #endif
178 
179 #endif /* __X86_64_PAGE_H__ */
180 
181 /*
182  * Local variables:
183  * mode: C
184  * c-file-style: "BSD"
185  * c-basic-offset: 4
186  * tab-width: 4
187  * indent-tabs-mode: nil
188  * End:
189  */
190