1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_PAGE_H
3 #define _ASM_POWERPC_PAGE_H
4
5 /*
6 * Copyright (C) 2001,2005 IBM Corporation.
7 */
8
9 #ifndef __ASSEMBLY__
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #else
13 #include <asm/types.h>
14 #endif
15 #include <asm/asm-const.h>
16
17 /*
18 * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
19 * on PPC44x and 4K/16K on 8xx). For PPC64 we support either 4K or 64K software
20 * page size. When using 64K pages however, whether we are really supporting
21 * 64K pages in HW or not is irrelevant to those definitions.
22 */
23 #define PAGE_SHIFT CONFIG_PPC_PAGE_SHIFT
24 #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
25
26 #ifndef __ASSEMBLY__
27 #ifndef CONFIG_HUGETLB_PAGE
28 #define HPAGE_SHIFT PAGE_SHIFT
29 #elif defined(CONFIG_PPC_BOOK3S_64)
30 extern unsigned int hpage_shift;
31 #define HPAGE_SHIFT hpage_shift
32 #elif defined(CONFIG_PPC_8xx)
33 #define HPAGE_SHIFT 19 /* 512k pages */
34 #elif defined(CONFIG_PPC_E500)
35 #define HPAGE_SHIFT 22 /* 4M pages */
36 #endif
37 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
38 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
39 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
40 #define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
41 #endif
42
43 /*
44 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
45 * assign PAGE_MASK to a larger type it gets extended the way we want
46 * (i.e. with 1s in the high bits)
47 */
48 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
49
50 /*
51 * KERNELBASE is the virtual address of the start of the kernel, it's often
52 * the same as PAGE_OFFSET, but _might not be_.
53 *
54 * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
55 *
56 * PAGE_OFFSET is the virtual address of the start of lowmem.
57 *
58 * PHYSICAL_START is the physical address of the start of the kernel.
59 *
60 * MEMORY_START is the physical address of the start of lowmem.
61 *
62 * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
63 * ppc32 and based on how they are set we determine MEMORY_START.
64 *
65 * For the linear mapping the following equation should be true:
66 * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
67 *
68 * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
69 *
70 * There are two ways to determine a physical address from a virtual one:
71 * va = pa + PAGE_OFFSET - MEMORY_START
72 * va = pa + KERNELBASE - PHYSICAL_START
73 *
74 * If you want to know something's offset from the start of the kernel you
75 * should subtract KERNELBASE.
76 *
77 * If you want to test if something's a kernel address, use is_kernel_addr().
78 */
79
80 #define KERNELBASE ASM_CONST(CONFIG_KERNEL_START)
81 #define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET)
82 #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
83
84 #if defined(CONFIG_NONSTATIC_KERNEL)
85 #ifndef __ASSEMBLY__
86
87 extern phys_addr_t memstart_addr;
88 extern phys_addr_t kernstart_addr;
89
90 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
91 extern long long virt_phys_offset;
92 #endif
93
94 #endif /* __ASSEMBLY__ */
95 #define PHYSICAL_START kernstart_addr
96
97 #else /* !CONFIG_NONSTATIC_KERNEL */
98 #define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START)
99 #endif
100
101 /* See Description below for VIRT_PHYS_OFFSET */
102 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
103 #ifdef CONFIG_RELOCATABLE
104 #define VIRT_PHYS_OFFSET virt_phys_offset
105 #else
106 #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
107 #endif
108 #endif
109
110 #ifdef CONFIG_PPC64
111 #define MEMORY_START 0UL
112 #elif defined(CONFIG_NONSTATIC_KERNEL)
113 #define MEMORY_START memstart_addr
114 #else
115 #define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
116 #endif
117
118 #ifdef CONFIG_FLATMEM
119 #define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
120 #endif
121
122 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
123 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
124 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
125
126 #define virt_addr_valid(vaddr) ({ \
127 unsigned long _addr = (unsigned long)vaddr; \
128 _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
129 pfn_valid(virt_to_pfn(_addr)); \
130 })
131
132 /*
133 * On Book-E parts we need __va to parse the device tree and we can't
134 * determine MEMORY_START until then. However we can determine PHYSICAL_START
135 * from information at hand (program counter, TLB lookup).
136 *
137 * On BookE with RELOCATABLE && PPC32
138 *
139 * With RELOCATABLE && PPC32, we support loading the kernel at any physical
140 * address without any restriction on the page alignment.
141 *
142 * We find the runtime address of _stext and relocate ourselves based on
143 * the following calculation:
144 *
145 * virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
146 * MODULO(_stext.run,256M)
147 * and create the following mapping:
148 *
149 * ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
150 *
151 * When we process relocations, we cannot depend on the
152 * existing equation for the __va()/__pa() translations:
153 *
154 * __va(x) = (x) - PHYSICAL_START + KERNELBASE
155 *
156 * Where:
157 * PHYSICAL_START = kernstart_addr = Physical address of _stext
158 * KERNELBASE = Compiled virtual address of _stext.
159 *
160 * This formula holds true iff, kernel load address is TLB page aligned.
161 *
162 * In our case, we need to also account for the shift in the kernel Virtual
163 * address.
164 *
165 * E.g.,
166 *
167 * Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
168 * In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
169 *
170 * Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
171 * = 0xbc100000 , which is wrong.
172 *
173 * Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
174 * according to our mapping.
175 *
176 * Hence we use the following formula to get the translations right:
177 *
178 * __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
179 *
180 * Where :
181 * PHYSICAL_START = dynamic load address.(kernstart_addr variable)
182 * Effective KERNELBASE = virtual_base =
183 * = ALIGN_DOWN(KERNELBASE,256M) +
184 * MODULO(PHYSICAL_START,256M)
185 *
186 * To make the cost of __va() / __pa() more light weight, we introduce
187 * a new variable virt_phys_offset, which will hold :
188 *
189 * virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
190 * = ALIGN_DOWN(KERNELBASE,256M) -
191 * ALIGN_DOWN(PHYSICALSTART,256M)
192 *
193 * Hence :
194 *
195 * __va(x) = x - PHYSICAL_START + Effective KERNELBASE
196 * = x + virt_phys_offset
197 *
198 * and
199 * __pa(x) = x + PHYSICAL_START - Effective KERNELBASE
200 * = x - virt_phys_offset
201 *
202 * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
203 * the other definitions for __va & __pa.
204 */
205 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
206 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
207 #define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET)
208 #else
209 #ifdef CONFIG_PPC64
210
211 #define VIRTUAL_WARN_ON(x) WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && (x))
212
213 /*
214 * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
215 * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
216 * This also results in better code generation.
217 */
218 #define __va(x) \
219 ({ \
220 VIRTUAL_WARN_ON((unsigned long)(x) >= PAGE_OFFSET); \
221 (void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \
222 })
223
224 #define __pa(x) \
225 ({ \
226 VIRTUAL_WARN_ON((unsigned long)(x) < PAGE_OFFSET); \
227 (unsigned long)(x) & 0x0fffffffffffffffUL; \
228 })
229
230 #else /* 32-bit, non book E */
231 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
232 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
233 #endif
234 #endif
235
236 /*
237 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
238 * and needs to be executable. This means the whole heap ends
239 * up being executable.
240 */
241 #define VM_DATA_DEFAULT_FLAGS32 VM_DATA_FLAGS_TSK_EXEC
242 #define VM_DATA_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC
243
244 #ifdef __powerpc64__
245 #include <asm/page_64.h>
246 #else
247 #include <asm/page_32.h>
248 #endif
249
250 /*
251 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
252 * "kernelness", use is_kernel_addr() - it should do what you want.
253 */
254 #ifdef CONFIG_PPC_BOOK3E_64
255 #define is_kernel_addr(x) ((x) >= 0x8000000000000000ul)
256 #elif defined(CONFIG_PPC_BOOK3S_64)
257 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
258 #else
259 #define is_kernel_addr(x) ((x) >= TASK_SIZE)
260 #endif
261
262 #ifndef CONFIG_PPC_BOOK3S_64
263 /*
264 * Use the top bit of the higher-level page table entries to indicate whether
265 * the entries we point to contain hugepages. This works because we know that
266 * the page tables live in kernel space. If we ever decide to support having
267 * page tables at arbitrary addresses, this breaks and will have to change.
268 */
269 #ifdef CONFIG_PPC64
270 #define PD_HUGE 0x8000000000000000UL
271 #else
272 #define PD_HUGE 0x80000000
273 #endif
274
275 #else /* CONFIG_PPC_BOOK3S_64 */
276 /*
277 * Book3S 64 stores real addresses in the hugepd entries to
278 * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE.
279 */
280 #define HUGEPD_ADDR_MASK (0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK)
281 #endif /* CONFIG_PPC_BOOK3S_64 */
282
283 /*
284 * Some number of bits at the level of the page table that points to
285 * a hugepte are used to encode the size. This masks those bits.
286 * On 8xx, HW assistance requires 4k alignment for the hugepte.
287 */
288 #ifdef CONFIG_PPC_8xx
289 #define HUGEPD_SHIFT_MASK 0xfff
290 #else
291 #define HUGEPD_SHIFT_MASK 0x3f
292 #endif
293
294 #ifndef __ASSEMBLY__
295
296 #ifdef CONFIG_PPC_BOOK3S_64
297 #include <asm/pgtable-be-types.h>
298 #else
299 #include <asm/pgtable-types.h>
300 #endif
301
302 struct page;
303 extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
304 extern void copy_user_page(void *to, void *from, unsigned long vaddr,
305 struct page *p);
306 extern int devmem_is_allowed(unsigned long pfn);
307
308 #ifdef CONFIG_PPC_SMLPAR
309 void arch_free_page(struct page *page, int order);
310 #define HAVE_ARCH_FREE_PAGE
311 #endif
312
313 struct vm_area_struct;
314
315 extern unsigned long kernstart_virt_addr;
316
kaslr_offset(void)317 static inline unsigned long kaslr_offset(void)
318 {
319 return kernstart_virt_addr - KERNELBASE;
320 }
321
322 #include <asm-generic/memory_model.h>
323 #endif /* __ASSEMBLY__ */
324
325 #endif /* _ASM_POWERPC_PAGE_H */
326