1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
3 #define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
4
5 #include <asm/firmware.h>
6
7 /*
8 * For radix we want generic code to handle hugetlb. But then if we want
9 * both hash and radix to be enabled together we need to workaround the
10 * limitations.
11 */
12 void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
13 void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
14
15 extern void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
16 unsigned long addr, pte_t *ptep,
17 pte_t old_pte, pte_t pte);
18
hstate_get_psize(struct hstate * hstate)19 static inline int hstate_get_psize(struct hstate *hstate)
20 {
21 unsigned long shift;
22
23 shift = huge_page_shift(hstate);
24 if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
25 return MMU_PAGE_2M;
26 else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
27 return MMU_PAGE_1G;
28 else if (shift == mmu_psize_defs[MMU_PAGE_16M].shift)
29 return MMU_PAGE_16M;
30 else if (shift == mmu_psize_defs[MMU_PAGE_16G].shift)
31 return MMU_PAGE_16G;
32 else {
33 WARN(1, "Wrong huge page shift\n");
34 return mmu_virtual_psize;
35 }
36 }
37
38 #define __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED
gigantic_page_runtime_supported(void)39 static inline bool gigantic_page_runtime_supported(void)
40 {
41 /*
42 * We used gigantic page reservation with hypervisor assist in some case.
43 * We cannot use runtime allocation of gigantic pages in those platforms
44 * This is hash translation mode LPARs.
45 */
46 if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
47 return false;
48
49 return true;
50 }
51
52 /* hugepd entry valid bit */
53 #define HUGEPD_VAL_BITS (0x8000000000000000UL)
54
55 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
56 extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
57 unsigned long addr, pte_t *ptep);
58
59 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
60 extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
61 unsigned long addr, pte_t *ptep,
62 pte_t old_pte, pte_t new_pte);
63 /*
64 * This should work for other subarchs too. But right now we use the
65 * new format only for 64bit book3s
66 */
hugepd_page(hugepd_t hpd)67 static inline pte_t *hugepd_page(hugepd_t hpd)
68 {
69 BUG_ON(!hugepd_ok(hpd));
70 /*
71 * We have only four bits to encode, MMU page size
72 */
73 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
74 return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
75 }
76
hugepd_mmu_psize(hugepd_t hpd)77 static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
78 {
79 return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
80 }
81
hugepd_shift(hugepd_t hpd)82 static inline unsigned int hugepd_shift(hugepd_t hpd)
83 {
84 return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
85 }
flush_hugetlb_page(struct vm_area_struct * vma,unsigned long vmaddr)86 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
87 unsigned long vmaddr)
88 {
89 if (radix_enabled())
90 return radix__flush_hugetlb_page(vma, vmaddr);
91 }
92
hugepte_offset(hugepd_t hpd,unsigned long addr,unsigned int pdshift)93 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
94 unsigned int pdshift)
95 {
96 unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
97
98 return hugepd_page(hpd) + idx;
99 }
100
hugepd_populate(hugepd_t * hpdp,pte_t * new,unsigned int pshift)101 static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
102 {
103 *hpdp = __hugepd(__pa(new) | HUGEPD_VAL_BITS | (shift_to_mmu_psize(pshift) << 2));
104 }
105
106 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
107
check_and_get_huge_psize(int shift)108 static inline int check_and_get_huge_psize(int shift)
109 {
110 int mmu_psize;
111
112 if (shift > SLICE_HIGH_SHIFT)
113 return -EINVAL;
114
115 mmu_psize = shift_to_mmu_psize(shift);
116
117 /*
118 * We need to make sure that for different page sizes reported by
119 * firmware we only add hugetlb support for page sizes that can be
120 * supported by linux page table layout.
121 * For now we have
122 * Radix: 2M and 1G
123 * Hash: 16M and 16G
124 */
125 if (radix_enabled()) {
126 if (mmu_psize != MMU_PAGE_2M && mmu_psize != MMU_PAGE_1G)
127 return -EINVAL;
128 } else {
129 if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
130 return -EINVAL;
131 }
132 return mmu_psize;
133 }
134
135 #endif
136