1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_IA64_PAGE_H
3 #define _ASM_IA64_PAGE_H
4 /*
5  * Pagetable related stuff.
6  *
7  * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
8  *	David Mosberger-Tang <davidm@hpl.hp.com>
9  */
10 
11 #include <asm/intrinsics.h>
12 #include <asm/types.h>
13 
14 /*
15  * The top three bits of an IA64 address are its Region Number.
16  * Different regions are assigned to different purposes.
17  */
18 #define RGN_SHIFT	(61)
19 #define RGN_BASE(r)	(__IA64_UL_CONST(r)<<RGN_SHIFT)
20 #define RGN_BITS	(RGN_BASE(-1))
21 
22 #define RGN_KERNEL	7	/* Identity mapped region */
23 #define RGN_UNCACHED    6	/* Identity mapped I/O region */
24 #define RGN_GATE	5	/* Gate page, Kernel text, etc */
25 #define RGN_HPAGE	4	/* For Huge TLB pages */
26 
27 /*
28  * PAGE_SHIFT determines the actual kernel page size.
29  */
30 #if defined(CONFIG_IA64_PAGE_SIZE_4KB)
31 # define PAGE_SHIFT	12
32 #elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
33 # define PAGE_SHIFT	13
34 #elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
35 # define PAGE_SHIFT	14
36 #elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
37 # define PAGE_SHIFT	16
38 #else
39 # error Unsupported page size!
40 #endif
41 
42 #define PAGE_SIZE		(__IA64_UL_CONST(1) << PAGE_SHIFT)
43 #define PAGE_MASK		(~(PAGE_SIZE - 1))
44 
45 #define PERCPU_PAGE_SHIFT	18	/* log2() of max. size of per-CPU area */
46 #define PERCPU_PAGE_SIZE	(__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
47 
48 
49 #ifdef CONFIG_HUGETLB_PAGE
50 # define HPAGE_REGION_BASE	RGN_BASE(RGN_HPAGE)
51 # define HPAGE_SHIFT		hpage_shift
52 # define HPAGE_SHIFT_DEFAULT	28	/* check ia64 SDM for architecture supported size */
53 # define HPAGE_SIZE		(__IA64_UL_CONST(1) << HPAGE_SHIFT)
54 # define HPAGE_MASK		(~(HPAGE_SIZE - 1))
55 
56 # define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
57 #endif /* CONFIG_HUGETLB_PAGE */
58 
59 #ifdef __ASSEMBLY__
60 # define __pa(x)		((x) - PAGE_OFFSET)
61 # define __va(x)		((x) + PAGE_OFFSET)
62 #else /* !__ASSEMBLY */
63 #  define STRICT_MM_TYPECHECKS
64 
65 extern void clear_page (void *page);
66 extern void copy_page (void *to, void *from);
67 
68 /*
69  * clear_user_page() and copy_user_page() can't be inline functions because
70  * flush_dcache_page() can't be defined until later...
71  */
72 #define clear_user_page(addr, vaddr, page)	\
73 do {						\
74 	clear_page(addr);			\
75 	flush_dcache_page(page);		\
76 } while (0)
77 
78 #define copy_user_page(to, from, vaddr, page)	\
79 do {						\
80 	copy_page((to), (from));		\
81 	flush_dcache_page(page);		\
82 } while (0)
83 
84 
85 #define vma_alloc_zeroed_movable_folio(vma, vaddr)			\
86 ({									\
87 	struct folio *folio = vma_alloc_folio(				\
88 		GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false); \
89 	if (folio)							\
90 		flush_dcache_folio(folio);				\
91 	folio;								\
92 })
93 
94 #define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
95 
96 #include <asm-generic/memory_model.h>
97 
98 #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
99 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
100 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
101 
102 typedef union ia64_va {
103 	struct {
104 		unsigned long off : 61;		/* intra-region offset */
105 		unsigned long reg :  3;		/* region number */
106 	} f;
107 	unsigned long l;
108 	void *p;
109 } ia64_va;
110 
111 /*
112  * Note: These macros depend on the fact that PAGE_OFFSET has all
113  * region bits set to 1 and all other bits set to zero.  They are
114  * expressed in this way to ensure they result in a single "dep"
115  * instruction.
116  */
117 #define __pa(x)		({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
118 #define __va(x)		({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
119 
120 #define REGION_NUMBER(x)	({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
121 #define REGION_OFFSET(x)	({ia64_va _v; _v.l = (long) (x); _v.f.off;})
122 
123 #ifdef CONFIG_HUGETLB_PAGE
124 # define htlbpage_to_page(x)	(((unsigned long) REGION_NUMBER(x) << 61)			\
125 				 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
126 # define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
127 extern unsigned int hpage_shift;
128 #endif
129 
130 static __inline__ int
get_order(unsigned long size)131 get_order (unsigned long size)
132 {
133 	long double d = size - 1;
134 	long order;
135 
136 	order = ia64_getf_exp(d);
137 	order = order - PAGE_SHIFT - 0xffff + 1;
138 	if (order < 0)
139 		order = 0;
140 	return order;
141 }
142 
143 #endif /* !__ASSEMBLY__ */
144 
145 #ifdef STRICT_MM_TYPECHECKS
146   /*
147    * These are used to make use of C type-checking..
148    */
149   typedef struct { unsigned long pte; } pte_t;
150   typedef struct { unsigned long pmd; } pmd_t;
151 #if CONFIG_PGTABLE_LEVELS == 4
152   typedef struct { unsigned long pud; } pud_t;
153 #endif
154   typedef struct { unsigned long pgd; } pgd_t;
155   typedef struct { unsigned long pgprot; } pgprot_t;
156   typedef struct page *pgtable_t;
157 
158 # define pte_val(x)	((x).pte)
159 # define pmd_val(x)	((x).pmd)
160 #if CONFIG_PGTABLE_LEVELS == 4
161 # define pud_val(x)	((x).pud)
162 #endif
163 # define pgd_val(x)	((x).pgd)
164 # define pgprot_val(x)	((x).pgprot)
165 
166 # define __pte(x)	((pte_t) { (x) } )
167 # define __pmd(x)	((pmd_t) { (x) } )
168 # define __pgprot(x)	((pgprot_t) { (x) } )
169 
170 #else /* !STRICT_MM_TYPECHECKS */
171   /*
172    * .. while these make it easier on the compiler
173    */
174 # ifndef __ASSEMBLY__
175     typedef unsigned long pte_t;
176     typedef unsigned long pmd_t;
177     typedef unsigned long pgd_t;
178     typedef unsigned long pgprot_t;
179     typedef struct page *pgtable_t;
180 # endif
181 
182 # define pte_val(x)	(x)
183 # define pmd_val(x)	(x)
184 # define pgd_val(x)	(x)
185 # define pgprot_val(x)	(x)
186 
187 # define __pte(x)	(x)
188 # define __pgd(x)	(x)
189 # define __pgprot(x)	(x)
190 #endif /* !STRICT_MM_TYPECHECKS */
191 
192 #define PAGE_OFFSET			RGN_BASE(RGN_KERNEL)
193 
194 #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_TSK_EXEC
195 
196 #define GATE_ADDR		RGN_BASE(RGN_GATE)
197 
198 /*
199  * 0xa000000000000000+2*PERCPU_PAGE_SIZE
200  * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
201  */
202 #define KERNEL_START		 (GATE_ADDR+__IA64_UL_CONST(0x100000000))
203 #define PERCPU_ADDR		(-PERCPU_PAGE_SIZE)
204 #define LOAD_OFFSET		(KERNEL_START - KERNEL_TR_PAGE_SIZE)
205 
206 #define __HAVE_ARCH_GATE_AREA	1
207 
208 #endif /* _ASM_IA64_PAGE_H */
209