1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PGTABLE_RADIX_H
3 #define _ASM_POWERPC_PGTABLE_RADIX_H
4
5 #include <asm/asm-const.h>
6
7 #ifndef __ASSEMBLY__
8 #include <asm/cmpxchg.h>
9 #endif
10
11 #ifdef CONFIG_PPC_64K_PAGES
12 #include <asm/book3s/64/radix-64k.h>
13 #else
14 #include <asm/book3s/64/radix-4k.h>
15 #endif
16
17 #ifndef __ASSEMBLY__
18 #include <asm/book3s/64/tlbflush-radix.h>
19 #include <asm/cpu_has_feature.h>
20 #endif
21
22 /* An empty PTE can still have a R or C writeback */
23 #define RADIX_PTE_NONE_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
24
25 /* Bits to set in a RPMD/RPUD/RPGD */
26 #define RADIX_PMD_VAL_BITS (0x8000000000000000UL | RADIX_PTE_INDEX_SIZE)
27 #define RADIX_PUD_VAL_BITS (0x8000000000000000UL | RADIX_PMD_INDEX_SIZE)
28 #define RADIX_PGD_VAL_BITS (0x8000000000000000UL | RADIX_PUD_INDEX_SIZE)
29
30 /* Don't have anything in the reserved bits and leaf bits */
31 #define RADIX_PMD_BAD_BITS 0x60000000000000e0UL
32 #define RADIX_PUD_BAD_BITS 0x60000000000000e0UL
33 #define RADIX_P4D_BAD_BITS 0x60000000000000e0UL
34
35 #define RADIX_PMD_SHIFT (PAGE_SHIFT + RADIX_PTE_INDEX_SIZE)
36 #define RADIX_PUD_SHIFT (RADIX_PMD_SHIFT + RADIX_PMD_INDEX_SIZE)
37 #define RADIX_PGD_SHIFT (RADIX_PUD_SHIFT + RADIX_PUD_INDEX_SIZE)
38
39 #define R_PTRS_PER_PTE (1 << RADIX_PTE_INDEX_SIZE)
40 #define R_PTRS_PER_PMD (1 << RADIX_PMD_INDEX_SIZE)
41 #define R_PTRS_PER_PUD (1 << RADIX_PUD_INDEX_SIZE)
42
43 /*
44 * Size of EA range mapped by our pagetables.
45 */
46 #define RADIX_PGTABLE_EADDR_SIZE (RADIX_PTE_INDEX_SIZE + RADIX_PMD_INDEX_SIZE + \
47 RADIX_PUD_INDEX_SIZE + RADIX_PGD_INDEX_SIZE + PAGE_SHIFT)
48 #define RADIX_PGTABLE_RANGE (ASM_CONST(1) << RADIX_PGTABLE_EADDR_SIZE)
49
50 /*
51 * We support 52 bit address space, Use top bit for kernel
52 * virtual mapping. Also make sure kernel fit in the top
53 * quadrant.
54 *
55 * +------------------+
56 * +------------------+ Kernel virtual map (0xc008000000000000)
57 * | |
58 * | |
59 * | |
60 * 0b11......+------------------+ Kernel linear map (0xc....)
61 * | |
62 * | 2 quadrant |
63 * | |
64 * 0b10......+------------------+
65 * | |
66 * | 1 quadrant |
67 * | |
68 * 0b01......+------------------+
69 * | |
70 * | 0 quadrant |
71 * | |
72 * 0b00......+------------------+
73 *
74 *
75 * 3rd quadrant expanded:
76 * +------------------------------+ Highest address (0xc010000000000000)
77 * +------------------------------+ KASAN shadow end (0xc00fc00000000000)
78 * | |
79 * | |
80 * +------------------------------+ Kernel vmemmap end/shadow start (0xc00e000000000000)
81 * | |
82 * | 512TB |
83 * | |
84 * +------------------------------+ Kernel IO map end/vmemap start
85 * | |
86 * | 512TB |
87 * | |
88 * +------------------------------+ Kernel vmap end/ IO map start
89 * | |
90 * | 512TB |
91 * | |
92 * +------------------------------+ Kernel virt start (0xc008000000000000)
93 * | |
94 * | |
95 * | |
96 * +------------------------------+ Kernel linear (0xc.....)
97 */
98
99 /* For the sizes of the shadow area, see kasan.h */
100
101 /*
102 * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
103 * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
104 * page_to_nid does a page->section->node lookup
105 * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
106 * memory requirements with large number of sections.
107 * 51 bits is the max physical real address on POWER9
108 */
109
110 #if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME)
111 #define R_MAX_PHYSMEM_BITS 51
112 #else
113 #define R_MAX_PHYSMEM_BITS 46
114 #endif
115
116 #define RADIX_KERN_VIRT_START ASM_CONST(0xc008000000000000)
117 /*
118 * 49 = MAX_EA_BITS_PER_CONTEXT (hash specific). To make sure we pick
119 * the same value as hash.
120 */
121 #define RADIX_KERN_MAP_SIZE (1UL << 49)
122
123 #define RADIX_VMALLOC_START RADIX_KERN_VIRT_START
124 #define RADIX_VMALLOC_SIZE RADIX_KERN_MAP_SIZE
125 #define RADIX_VMALLOC_END (RADIX_VMALLOC_START + RADIX_VMALLOC_SIZE)
126
127 #define RADIX_KERN_IO_START RADIX_VMALLOC_END
128 #define RADIX_KERN_IO_SIZE RADIX_KERN_MAP_SIZE
129 #define RADIX_KERN_IO_END (RADIX_KERN_IO_START + RADIX_KERN_IO_SIZE)
130
131 #define RADIX_VMEMMAP_START RADIX_KERN_IO_END
132 #define RADIX_VMEMMAP_SIZE RADIX_KERN_MAP_SIZE
133 #define RADIX_VMEMMAP_END (RADIX_VMEMMAP_START + RADIX_VMEMMAP_SIZE)
134
135 #ifndef __ASSEMBLY__
136 #define RADIX_PTE_TABLE_SIZE (sizeof(pte_t) << RADIX_PTE_INDEX_SIZE)
137 #define RADIX_PMD_TABLE_SIZE (sizeof(pmd_t) << RADIX_PMD_INDEX_SIZE)
138 #define RADIX_PUD_TABLE_SIZE (sizeof(pud_t) << RADIX_PUD_INDEX_SIZE)
139 #define RADIX_PGD_TABLE_SIZE (sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE)
140
141 #ifdef CONFIG_STRICT_KERNEL_RWX
142 extern void radix__mark_rodata_ro(void);
143 extern void radix__mark_initmem_nx(void);
144 #endif
145
146 extern void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
147 pte_t entry, unsigned long address,
148 int psize);
149
150 extern void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
151 unsigned long addr, pte_t *ptep,
152 pte_t old_pte, pte_t pte);
153
__radix_pte_update(pte_t * ptep,unsigned long clr,unsigned long set)154 static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
155 unsigned long set)
156 {
157 __be64 old_be, tmp_be;
158
159 __asm__ __volatile__(
160 "1: ldarx %0,0,%3 # pte_update\n"
161 " andc %1,%0,%5 \n"
162 " or %1,%1,%4 \n"
163 " stdcx. %1,0,%3 \n"
164 " bne- 1b"
165 : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
166 : "r" (ptep), "r" (cpu_to_be64(set)), "r" (cpu_to_be64(clr))
167 : "cc" );
168
169 return be64_to_cpu(old_be);
170 }
171
radix__pte_update(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned long clr,unsigned long set,int huge)172 static inline unsigned long radix__pte_update(struct mm_struct *mm,
173 unsigned long addr,
174 pte_t *ptep, unsigned long clr,
175 unsigned long set,
176 int huge)
177 {
178 unsigned long old_pte;
179
180 old_pte = __radix_pte_update(ptep, clr, set);
181 if (!huge)
182 assert_pte_locked(mm, addr);
183
184 return old_pte;
185 }
186
radix__ptep_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int full)187 static inline pte_t radix__ptep_get_and_clear_full(struct mm_struct *mm,
188 unsigned long addr,
189 pte_t *ptep, int full)
190 {
191 unsigned long old_pte;
192
193 if (full) {
194 old_pte = pte_val(*ptep);
195 *ptep = __pte(0);
196 } else
197 old_pte = radix__pte_update(mm, addr, ptep, ~0ul, 0, 0);
198
199 return __pte(old_pte);
200 }
201
radix__pte_same(pte_t pte_a,pte_t pte_b)202 static inline int radix__pte_same(pte_t pte_a, pte_t pte_b)
203 {
204 return ((pte_raw(pte_a) ^ pte_raw(pte_b)) == 0);
205 }
206
radix__pte_none(pte_t pte)207 static inline int radix__pte_none(pte_t pte)
208 {
209 return (pte_val(pte) & ~RADIX_PTE_NONE_MASK) == 0;
210 }
211
radix__set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,int percpu)212 static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
213 pte_t *ptep, pte_t pte, int percpu)
214 {
215 *ptep = pte;
216
217 /*
218 * The architecture suggests a ptesync after setting the pte, which
219 * orders the store that updates the pte with subsequent page table
220 * walk accesses which may load the pte. Without this it may be
221 * possible for a subsequent access to result in spurious fault.
222 *
223 * This is not necessary for correctness, because a spurious fault
224 * is tolerated by the page fault handler, and this store will
225 * eventually be seen. In testing, there was no noticable increase
226 * in user faults on POWER9. Avoiding ptesync here is a significant
227 * win for things like fork. If a future microarchitecture benefits
228 * from ptesync, it should probably go into update_mmu_cache, rather
229 * than set_pte_at (which is used to set ptes unrelated to faults).
230 *
231 * Spurious faults from the kernel memory are not tolerated, so there
232 * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows
233 * the pte update sequence from ISA Book III 6.10 Translation Table
234 * Update Synchronization Requirements.
235 */
236 }
237
radix__pmd_bad(pmd_t pmd)238 static inline int radix__pmd_bad(pmd_t pmd)
239 {
240 return !!(pmd_val(pmd) & RADIX_PMD_BAD_BITS);
241 }
242
radix__pmd_same(pmd_t pmd_a,pmd_t pmd_b)243 static inline int radix__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
244 {
245 return ((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) == 0);
246 }
247
radix__pud_bad(pud_t pud)248 static inline int radix__pud_bad(pud_t pud)
249 {
250 return !!(pud_val(pud) & RADIX_PUD_BAD_BITS);
251 }
252
253
radix__p4d_bad(p4d_t p4d)254 static inline int radix__p4d_bad(p4d_t p4d)
255 {
256 return !!(p4d_val(p4d) & RADIX_P4D_BAD_BITS);
257 }
258
259 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
260
radix__pmd_trans_huge(pmd_t pmd)261 static inline int radix__pmd_trans_huge(pmd_t pmd)
262 {
263 return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE;
264 }
265
radix__pmd_mkhuge(pmd_t pmd)266 static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
267 {
268 return __pmd(pmd_val(pmd) | _PAGE_PTE);
269 }
270
271 extern unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
272 pmd_t *pmdp, unsigned long clr,
273 unsigned long set);
274 extern pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma,
275 unsigned long address, pmd_t *pmdp);
276 extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
277 pgtable_t pgtable);
278 extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
279 extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
280 unsigned long addr, pmd_t *pmdp);
radix__has_transparent_hugepage(void)281 static inline int radix__has_transparent_hugepage(void)
282 {
283 /* For radix 2M at PMD level means thp */
284 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
285 return 1;
286 return 0;
287 }
288 #endif
289
radix__pmd_mkdevmap(pmd_t pmd)290 static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd)
291 {
292 return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
293 }
294
295 extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
296 unsigned long page_size,
297 unsigned long phys);
298 extern void radix__vmemmap_remove_mapping(unsigned long start,
299 unsigned long page_size);
300
301 extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
302 pgprot_t flags, unsigned int psz);
303
radix__get_tree_size(void)304 static inline unsigned long radix__get_tree_size(void)
305 {
306 unsigned long rts_field;
307 /*
308 * We support 52 bits, hence:
309 * bits 52 - 31 = 21, 0b10101
310 * RTS encoding details
311 * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
312 * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
313 */
314 rts_field = (0x5UL << 5); /* 6 - 8 bits */
315 rts_field |= (0x2UL << 61);
316
317 return rts_field;
318 }
319
320 #ifdef CONFIG_MEMORY_HOTPLUG
321 int radix__create_section_mapping(unsigned long start, unsigned long end,
322 int nid, pgprot_t prot);
323 int radix__remove_section_mapping(unsigned long start, unsigned long end);
324 #endif /* CONFIG_MEMORY_HOTPLUG */
325
326 void radix__kernel_map_pages(struct page *page, int numpages, int enable);
327
328 #endif /* __ASSEMBLY__ */
329 #endif
330