1 #ifndef __X86_PAGE_H__
2 #define __X86_PAGE_H__
3 
4 #include <xen/const.h>
5 
6 /*
7  * It is important that the masks are signed quantities. This ensures that
8  * the compiler sign-extends a 32-bit mask to 64 bits if that is required.
9  */
10 #define PAGE_SIZE           (_AC(1,L) << PAGE_SHIFT)
11 #define PAGE_MASK           (~(PAGE_SIZE-1))
12 #define PAGE_FLAG_MASK      (~0)
13 
14 #define PAGE_ORDER_4K       0
15 #define PAGE_ORDER_2M       9
16 #define PAGE_ORDER_1G       18
17 
18 #ifndef __ASSEMBLY__
19 # include <asm/types.h>
20 # include <xen/lib.h>
21 #endif
22 
23 #include <asm/x86_64/page.h>
24 
25 /* Read a pte atomically from memory. */
26 #define l1e_read_atomic(l1ep) \
27     l1e_from_intpte(pte_read_atomic(&l1e_get_intpte(*(l1ep))))
28 #define l2e_read_atomic(l2ep) \
29     l2e_from_intpte(pte_read_atomic(&l2e_get_intpte(*(l2ep))))
30 #define l3e_read_atomic(l3ep) \
31     l3e_from_intpte(pte_read_atomic(&l3e_get_intpte(*(l3ep))))
32 #define l4e_read_atomic(l4ep) \
33     l4e_from_intpte(pte_read_atomic(&l4e_get_intpte(*(l4ep))))
34 
35 /* Write a pte atomically to memory. */
36 #define l1e_write_atomic(l1ep, l1e) \
37     pte_write_atomic(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e))
38 #define l2e_write_atomic(l2ep, l2e) \
39     pte_write_atomic(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e))
40 #define l3e_write_atomic(l3ep, l3e) \
41     pte_write_atomic(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e))
42 #define l4e_write_atomic(l4ep, l4e) \
43     pte_write_atomic(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
44 
45 /*
46  * Write a pte safely but non-atomically to memory.
47  * The PTE may become temporarily not-present during the update.
48  */
49 #define l1e_write(l1ep, l1e) \
50     pte_write(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e))
51 #define l2e_write(l2ep, l2e) \
52     pte_write(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e))
53 #define l3e_write(l3ep, l3e) \
54     pte_write(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e))
55 #define l4e_write(l4ep, l4e) \
56     pte_write(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
57 
58 /* Get direct integer representation of a pte's contents (intpte_t). */
59 #define l1e_get_intpte(x)          ((x).l1)
60 #define l2e_get_intpte(x)          ((x).l2)
61 #define l3e_get_intpte(x)          ((x).l3)
62 #define l4e_get_intpte(x)          ((x).l4)
63 
64 /* Get pfn mapped by pte (unsigned long). */
65 #define l1e_get_pfn(x)             \
66     ((unsigned long)(((x).l1 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
67 #define l2e_get_pfn(x)             \
68     ((unsigned long)(((x).l2 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
69 #define l3e_get_pfn(x)             \
70     ((unsigned long)(((x).l3 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
71 #define l4e_get_pfn(x)             \
72     ((unsigned long)(((x).l4 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
73 
74 /* Get mfn mapped by pte (mfn_t). */
75 #define l1e_get_mfn(x) _mfn(l1e_get_pfn(x))
76 #define l2e_get_mfn(x) _mfn(l2e_get_pfn(x))
77 #define l3e_get_mfn(x) _mfn(l3e_get_pfn(x))
78 #define l4e_get_mfn(x) _mfn(l4e_get_pfn(x))
79 
80 /* Get physical address of page mapped by pte (paddr_t). */
81 #define l1e_get_paddr(x)           \
82     ((paddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
83 #define l2e_get_paddr(x)           \
84     ((paddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
85 #define l3e_get_paddr(x)           \
86     ((paddr_t)(((x).l3 & (PADDR_MASK&PAGE_MASK))))
87 #define l4e_get_paddr(x)           \
88     ((paddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
89 
90 /* Get pointer to info structure of page mapped by pte (struct page_info *). */
91 #define l1e_get_page(x)           (__mfn_to_page(l1e_get_pfn(x)))
92 #define l2e_get_page(x)           (__mfn_to_page(l2e_get_pfn(x)))
93 #define l3e_get_page(x)           (__mfn_to_page(l3e_get_pfn(x)))
94 #define l4e_get_page(x)           (__mfn_to_page(l4e_get_pfn(x)))
95 
96 /* Get pte access flags (unsigned int). */
97 #define l1e_get_flags(x)           (get_pte_flags((x).l1))
98 #define l2e_get_flags(x)           (get_pte_flags((x).l2))
99 #define l3e_get_flags(x)           (get_pte_flags((x).l3))
100 #define l4e_get_flags(x)           (get_pte_flags((x).l4))
101 
102 /* Get pte pkeys (unsigned int). */
103 #define l1e_get_pkey(x)           get_pte_pkey((x).l1)
104 #define l2e_get_pkey(x)           get_pte_pkey((x).l2)
105 #define l3e_get_pkey(x)           get_pte_pkey((x).l3)
106 
107 /* Construct an empty pte. */
108 #define l1e_empty()                ((l1_pgentry_t) { 0 })
109 #define l2e_empty()                ((l2_pgentry_t) { 0 })
110 #define l3e_empty()                ((l3_pgentry_t) { 0 })
111 #define l4e_empty()                ((l4_pgentry_t) { 0 })
112 
113 /* Construct a pte from a pfn and access flags. */
114 #define l1e_from_pfn(pfn, flags)   \
115     ((l1_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
116 #define l2e_from_pfn(pfn, flags)   \
117     ((l2_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
118 #define l3e_from_pfn(pfn, flags)   \
119     ((l3_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
120 #define l4e_from_pfn(pfn, flags)   \
121     ((l4_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
122 
123 /* Construct a pte from an mfn and access flags. */
124 #define l1e_from_mfn(m, f) l1e_from_pfn(mfn_x(m), f)
125 #define l2e_from_mfn(m, f) l2e_from_pfn(mfn_x(m), f)
126 #define l3e_from_mfn(m, f) l3e_from_pfn(mfn_x(m), f)
127 #define l4e_from_mfn(m, f) l4e_from_pfn(mfn_x(m), f)
128 
129 /* Construct a pte from a physical address and access flags. */
130 #ifndef __ASSEMBLY__
l1e_from_paddr(paddr_t pa,unsigned int flags)131 static inline l1_pgentry_t l1e_from_paddr(paddr_t pa, unsigned int flags)
132 {
133     ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
134     return (l1_pgentry_t) { pa | put_pte_flags(flags) };
135 }
l2e_from_paddr(paddr_t pa,unsigned int flags)136 static inline l2_pgentry_t l2e_from_paddr(paddr_t pa, unsigned int flags)
137 {
138     ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
139     return (l2_pgentry_t) { pa | put_pte_flags(flags) };
140 }
l3e_from_paddr(paddr_t pa,unsigned int flags)141 static inline l3_pgentry_t l3e_from_paddr(paddr_t pa, unsigned int flags)
142 {
143     ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
144     return (l3_pgentry_t) { pa | put_pte_flags(flags) };
145 }
l4e_from_paddr(paddr_t pa,unsigned int flags)146 static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
147 {
148     ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
149     return (l4_pgentry_t) { pa | put_pte_flags(flags) };
150 }
151 #endif /* !__ASSEMBLY__ */
152 
153 /* Construct a pte from its direct integer representation. */
154 #define l1e_from_intpte(intpte)    ((l1_pgentry_t) { (intpte_t)(intpte) })
155 #define l2e_from_intpte(intpte)    ((l2_pgentry_t) { (intpte_t)(intpte) })
156 #define l3e_from_intpte(intpte)    ((l3_pgentry_t) { (intpte_t)(intpte) })
157 #define l4e_from_intpte(intpte)    ((l4_pgentry_t) { (intpte_t)(intpte) })
158 
159 /* Construct a pte from a page pointer and access flags. */
160 #define l1e_from_page(page, flags) l1e_from_pfn(__page_to_mfn(page), (flags))
161 #define l2e_from_page(page, flags) l2e_from_pfn(__page_to_mfn(page), (flags))
162 #define l3e_from_page(page, flags) l3e_from_pfn(__page_to_mfn(page), (flags))
163 #define l4e_from_page(page, flags) l4e_from_pfn(__page_to_mfn(page), (flags))
164 
165 /* Add extra flags to an existing pte. */
166 #define l1e_add_flags(x, flags)    ((x).l1 |= put_pte_flags(flags))
167 #define l2e_add_flags(x, flags)    ((x).l2 |= put_pte_flags(flags))
168 #define l3e_add_flags(x, flags)    ((x).l3 |= put_pte_flags(flags))
169 #define l4e_add_flags(x, flags)    ((x).l4 |= put_pte_flags(flags))
170 
171 /* Remove flags from an existing pte. */
172 #define l1e_remove_flags(x, flags) ((x).l1 &= ~put_pte_flags(flags))
173 #define l2e_remove_flags(x, flags) ((x).l2 &= ~put_pte_flags(flags))
174 #define l3e_remove_flags(x, flags) ((x).l3 &= ~put_pte_flags(flags))
175 #define l4e_remove_flags(x, flags) ((x).l4 &= ~put_pte_flags(flags))
176 
177 /* Flip flags in an existing L1 PTE. */
178 #define l1e_flip_flags(x, flags)    ((x).l1 ^= put_pte_flags(flags))
179 
180 /* Check if a pte's page mapping or significant access flags have changed. */
181 #define l1e_has_changed(x,y,flags) \
182     ( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
183 #define l2e_has_changed(x,y,flags) \
184     ( !!(((x).l2 ^ (y).l2) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
185 #define l3e_has_changed(x,y,flags) \
186     ( !!(((x).l3 ^ (y).l3) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
187 #define l4e_has_changed(x,y,flags) \
188     ( !!(((x).l4 ^ (y).l4) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
189 
190 /* Pagetable walking. */
191 #define l2e_to_l1e(x)              ((l1_pgentry_t *)__va(l2e_get_paddr(x)))
192 #define l3e_to_l2e(x)              ((l2_pgentry_t *)__va(l3e_get_paddr(x)))
193 #define l4e_to_l3e(x)              ((l3_pgentry_t *)__va(l4e_get_paddr(x)))
194 
195 #define map_l1t_from_l2e(x)        (l1_pgentry_t *)map_domain_page(l2e_get_mfn(x))
196 #define map_l2t_from_l3e(x)        (l2_pgentry_t *)map_domain_page(l3e_get_mfn(x))
197 #define map_l3t_from_l4e(x)        (l3_pgentry_t *)map_domain_page(l4e_get_mfn(x))
198 
199 /* Given a virtual address, get an entry offset into a page table. */
200 #define l1_table_offset(a)         \
201     (((a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
202 #define l2_table_offset(a)         \
203     (((a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
204 #define l3_table_offset(a)         \
205     (((a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
206 #define l4_table_offset(a)         \
207     (((a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
208 
209 /* Convert a pointer to a page-table entry into pagetable slot index. */
210 #define pgentry_ptr_to_slot(_p)    \
211     (((unsigned long)(_p) & ~PAGE_MASK) / sizeof(*(_p)))
212 
213 #ifndef __ASSEMBLY__
214 
215 /* Page-table type. */
216 typedef struct { u64 pfn; } pagetable_t;
217 #define pagetable_get_paddr(x)  ((paddr_t)(x).pfn << PAGE_SHIFT)
218 #define pagetable_get_page(x)   __mfn_to_page((x).pfn)
219 #define pagetable_get_pfn(x)    ((x).pfn)
220 #define pagetable_get_mfn(x)    _mfn(((x).pfn))
221 #define pagetable_is_null(x)    ((x).pfn == 0)
222 #define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) })
223 #define pagetable_from_mfn(mfn) ((pagetable_t) { mfn_x(mfn) })
224 #define pagetable_from_page(pg) pagetable_from_pfn(__page_to_mfn(pg))
225 #define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT)
226 #define pagetable_null()        pagetable_from_pfn(0)
227 
228 void clear_page_sse2(void *);
229 void copy_page_sse2(void *, const void *);
230 
231 #define clear_page(_p)      clear_page_sse2(_p)
232 #define copy_page(_t, _f)   copy_page_sse2(_t, _f)
233 
234 /* Convert between Xen-heap virtual addresses and machine addresses. */
235 #define __pa(x)             (virt_to_maddr(x))
236 #define __va(x)             (maddr_to_virt(x))
237 
238 /* Convert between Xen-heap virtual addresses and machine frame numbers. */
239 #define __virt_to_mfn(va)   (virt_to_maddr(va) >> PAGE_SHIFT)
240 #define __mfn_to_virt(mfn)  (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
241 
242 /* Convert between machine frame numbers and page-info structures. */
243 #define __mfn_to_page(mfn)  (frame_table + pfn_to_pdx(mfn))
244 #define __page_to_mfn(pg)   pdx_to_pfn((unsigned long)((pg) - frame_table))
245 
246 /* Convert between machine addresses and page-info structures. */
247 #define __maddr_to_page(ma) __mfn_to_page((ma) >> PAGE_SHIFT)
248 #define __page_to_maddr(pg) ((paddr_t)__page_to_mfn(pg) << PAGE_SHIFT)
249 
250 /* Convert between frame number and address formats.  */
251 #define __pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
252 #define __paddr_to_pfn(pa)  ((unsigned long)((pa) >> PAGE_SHIFT))
253 #define gfn_to_gaddr(gfn)   __pfn_to_paddr(gfn_x(gfn))
254 #define gaddr_to_gfn(ga)    _gfn(__paddr_to_pfn(ga))
255 #define mfn_to_maddr(mfn)   __pfn_to_paddr(mfn_x(mfn))
256 #define maddr_to_mfn(ma)    _mfn(__paddr_to_pfn(ma))
257 
258 /*
259  * We define non-underscored wrappers for above conversion functions. These are
260  * overridden in various source files while underscored versions remain intact.
261  */
262 #define mfn_valid(mfn)      __mfn_valid(mfn_x(mfn))
263 #define virt_to_mfn(va)     __virt_to_mfn(va)
264 #define mfn_to_virt(mfn)    __mfn_to_virt(mfn)
265 #define virt_to_maddr(va)   __virt_to_maddr((unsigned long)(va))
266 #define maddr_to_virt(ma)   __maddr_to_virt((unsigned long)(ma))
267 #define mfn_to_page(mfn)    __mfn_to_page(mfn)
268 #define page_to_mfn(pg)     __page_to_mfn(pg)
269 #define maddr_to_page(ma)   __maddr_to_page(ma)
270 #define page_to_maddr(pg)   __page_to_maddr(pg)
271 #define virt_to_page(va)    __virt_to_page(va)
272 #define page_to_virt(pg)    __page_to_virt(pg)
273 #define pfn_to_paddr(pfn)   __pfn_to_paddr(pfn)
274 #define paddr_to_pfn(pa)    __paddr_to_pfn(pa)
275 #define paddr_to_pdx(pa)    pfn_to_pdx(paddr_to_pfn(pa))
276 #define vmap_to_mfn(va)     l1e_get_pfn(*virt_to_xen_l1e((unsigned long)(va)))
277 #define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
278 
279 #endif /* !defined(__ASSEMBLY__) */
280 
281 /* Where to find each level of the linear mapping */
282 #define __linear_l1_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START))
283 #define __linear_l2_table \
284  ((l2_pgentry_t *)(__linear_l1_table + l1_linear_offset(LINEAR_PT_VIRT_START)))
285 #define __linear_l3_table \
286  ((l3_pgentry_t *)(__linear_l2_table + l2_linear_offset(LINEAR_PT_VIRT_START)))
287 #define __linear_l4_table \
288  ((l4_pgentry_t *)(__linear_l3_table + l3_linear_offset(LINEAR_PT_VIRT_START)))
289 
290 
291 #ifndef __ASSEMBLY__
292 extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
293 extern l2_pgentry_t  *compat_idle_pg_table_l2;
294 extern unsigned int   m2p_compat_vstart;
295 extern l2_pgentry_t l2_xenmap[L2_PAGETABLE_ENTRIES],
296     l2_bootmap[4*L2_PAGETABLE_ENTRIES];
297 extern l3_pgentry_t l3_bootmap[L3_PAGETABLE_ENTRIES];
298 extern l2_pgentry_t l2_identmap[4*L2_PAGETABLE_ENTRIES];
299 extern l1_pgentry_t l1_fixmap[L1_PAGETABLE_ENTRIES];
300 void paging_init(void);
301 void efi_update_l4_pgtable(unsigned int l4idx, l4_pgentry_t);
302 #endif /* !defined(__ASSEMBLY__) */
303 
304 #define _PAGE_NONE     _AC(0x000,U)
305 #define _PAGE_PRESENT  _AC(0x001,U)
306 #define _PAGE_RW       _AC(0x002,U)
307 #define _PAGE_USER     _AC(0x004,U)
308 #define _PAGE_PWT      _AC(0x008,U)
309 #define _PAGE_PCD      _AC(0x010,U)
310 #define _PAGE_ACCESSED _AC(0x020,U)
311 #define _PAGE_DIRTY    _AC(0x040,U)
312 #define _PAGE_PAT      _AC(0x080,U)
313 #define _PAGE_PSE      _AC(0x080,U)
314 #define _PAGE_GLOBAL   _AC(0x100,U)
315 #define _PAGE_AVAIL0   _AC(0x200,U)
316 #define _PAGE_AVAIL1   _AC(0x400,U)
317 #define _PAGE_AVAIL2   _AC(0x800,U)
318 #define _PAGE_AVAIL    _AC(0xE00,U)
319 #define _PAGE_PSE_PAT  _AC(0x1000,U)
320 #define _PAGE_AVAIL_HIGH (_AC(0x7ff, U) << 12)
321 #define _PAGE_NX       (cpu_has_nx ? _PAGE_NX_BIT : 0)
322 
323 #define PAGE_CACHE_ATTRS (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)
324 
325 /*
326  * Debug option: Ensure that granted mappings are not implicitly unmapped.
327  * WARNING: This will need to be disabled to run OSes that use the spare PTE
328  * bits themselves (e.g., *BSD).
329  */
330 #ifdef NDEBUG
331 #undef _PAGE_GNTTAB
332 #endif
333 #ifndef _PAGE_GNTTAB
334 #define _PAGE_GNTTAB   0
335 #endif
336 
337 #define __PAGE_HYPERVISOR_RO      (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NX)
338 #define __PAGE_HYPERVISOR_RW      (__PAGE_HYPERVISOR_RO | \
339                                    _PAGE_DIRTY | _PAGE_RW)
340 #define __PAGE_HYPERVISOR_RX      (_PAGE_PRESENT | _PAGE_ACCESSED)
341 #define __PAGE_HYPERVISOR         (__PAGE_HYPERVISOR_RX | \
342                                    _PAGE_DIRTY | _PAGE_RW)
343 #define __PAGE_HYPERVISOR_UCMINUS (__PAGE_HYPERVISOR | _PAGE_PCD)
344 #define __PAGE_HYPERVISOR_UC      (__PAGE_HYPERVISOR | _PAGE_PCD | _PAGE_PWT)
345 
346 #define MAP_SMALL_PAGES _PAGE_AVAIL0 /* don't use superpages mappings */
347 
348 #ifndef __ASSEMBLY__
349 
350 /* Allocator functions for Xen pagetables. */
351 void *alloc_xen_pagetable(void);
352 void free_xen_pagetable(void *v);
353 l1_pgentry_t *virt_to_xen_l1e(unsigned long v);
354 
355 /* Convert between PAT/PCD/PWT embedded in PTE flags and 3-bit cacheattr. */
pte_flags_to_cacheattr(unsigned int flags)356 static inline unsigned int pte_flags_to_cacheattr(unsigned int flags)
357 {
358     return ((flags >> 5) & 4) | ((flags >> 3) & 3);
359 }
cacheattr_to_pte_flags(unsigned int cacheattr)360 static inline unsigned int cacheattr_to_pte_flags(unsigned int cacheattr)
361 {
362     return ((cacheattr & 4) << 5) | ((cacheattr & 3) << 3);
363 }
364 
365 /* return true if permission increased */
366 static inline bool_t
perms_strictly_increased(uint32_t old_flags,uint32_t new_flags)367 perms_strictly_increased(uint32_t old_flags, uint32_t new_flags)
368 /* Given the flags of two entries, are the new flags a strict
369  * increase in rights over the old ones? */
370 {
371     uint32_t of = old_flags & (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_NX_BIT);
372     uint32_t nf = new_flags & (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_NX_BIT);
373     /* Flip the NX bit, since it's the only one that decreases rights;
374      * we calculate as if it were an "X" bit. */
375     of ^= _PAGE_NX_BIT;
376     nf ^= _PAGE_NX_BIT;
377     /* If the changed bits are all set in the new flags, then rights strictly
378      * increased between old and new. */
379     return ((of | (of ^ nf)) == nf);
380 }
381 
invalidate_icache(void)382 static inline void invalidate_icache(void)
383 {
384 /*
385  * There is nothing to be done here as icaches are sufficiently
386  * coherent on x86.
387  */
388 }
389 
390 #endif /* !__ASSEMBLY__ */
391 
392 #define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
393 
394 #endif /* __X86_PAGE_H__ */
395 
396 /*
397  * Local variables:
398  * mode: C
399  * c-file-style: "BSD"
400  * c-basic-offset: 4
401  * tab-width: 4
402  * indent-tabs-mode: nil
403  * End:
404  */
405