1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
3 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
4
5 #define MMU_NO_CONTEXT ~0UL
6
7 #include <linux/mm_types.h>
8 #include <asm/book3s/64/tlbflush-hash.h>
9 #include <asm/book3s/64/tlbflush-radix.h>
10
11 /* TLB flush actions. Used as argument to tlbiel_all() */
12 enum {
13 TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
14 TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */
15 };
16
tlbiel_all(void)17 static inline void tlbiel_all(void)
18 {
19 /*
20 * This is used for host machine check and bootup.
21 *
22 * This uses early_radix_enabled and implementations use
23 * early_cpu_has_feature etc because that works early in boot
24 * and this is the machine check path which is not performance
25 * critical.
26 */
27 if (early_radix_enabled())
28 radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
29 else
30 hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
31 }
32
tlbiel_all_lpid(bool radix)33 static inline void tlbiel_all_lpid(bool radix)
34 {
35 /*
36 * This is used for guest machine check.
37 */
38 if (radix)
39 radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);
40 else
41 hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
42 }
43
44
45 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
flush_pmd_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)46 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
47 unsigned long start, unsigned long end)
48 {
49 if (radix_enabled())
50 radix__flush_pmd_tlb_range(vma, start, end);
51 }
52
53 #define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
flush_hugetlb_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)54 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
55 unsigned long start,
56 unsigned long end)
57 {
58 if (radix_enabled())
59 radix__flush_hugetlb_tlb_range(vma, start, end);
60 }
61
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)62 static inline void flush_tlb_range(struct vm_area_struct *vma,
63 unsigned long start, unsigned long end)
64 {
65 if (radix_enabled())
66 radix__flush_tlb_range(vma, start, end);
67 }
68
flush_tlb_kernel_range(unsigned long start,unsigned long end)69 static inline void flush_tlb_kernel_range(unsigned long start,
70 unsigned long end)
71 {
72 if (radix_enabled())
73 radix__flush_tlb_kernel_range(start, end);
74 }
75
local_flush_tlb_mm(struct mm_struct * mm)76 static inline void local_flush_tlb_mm(struct mm_struct *mm)
77 {
78 if (radix_enabled())
79 radix__local_flush_tlb_mm(mm);
80 }
81
local_flush_tlb_page(struct vm_area_struct * vma,unsigned long vmaddr)82 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
83 unsigned long vmaddr)
84 {
85 if (radix_enabled())
86 radix__local_flush_tlb_page(vma, vmaddr);
87 }
88
local_flush_tlb_page_psize(struct mm_struct * mm,unsigned long vmaddr,int psize)89 static inline void local_flush_tlb_page_psize(struct mm_struct *mm,
90 unsigned long vmaddr, int psize)
91 {
92 if (radix_enabled())
93 radix__local_flush_tlb_page_psize(mm, vmaddr, psize);
94 }
95
tlb_flush(struct mmu_gather * tlb)96 static inline void tlb_flush(struct mmu_gather *tlb)
97 {
98 if (radix_enabled())
99 radix__tlb_flush(tlb);
100 else
101 hash__tlb_flush(tlb);
102 }
103
104 #ifdef CONFIG_SMP
flush_tlb_mm(struct mm_struct * mm)105 static inline void flush_tlb_mm(struct mm_struct *mm)
106 {
107 if (radix_enabled())
108 radix__flush_tlb_mm(mm);
109 }
110
flush_tlb_page(struct vm_area_struct * vma,unsigned long vmaddr)111 static inline void flush_tlb_page(struct vm_area_struct *vma,
112 unsigned long vmaddr)
113 {
114 if (radix_enabled())
115 radix__flush_tlb_page(vma, vmaddr);
116 }
117 #else
118 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
119 #define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
120 #endif /* CONFIG_SMP */
121
122 #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
flush_tlb_fix_spurious_fault(struct vm_area_struct * vma,unsigned long address)123 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
124 unsigned long address)
125 {
126 /*
127 * Book3S 64 does not require spurious fault flushes because the PTE
128 * must be re-fetched in case of an access permission problem. So the
129 * only reason for a spurious fault should be concurrent modification
130 * to the PTE, in which case the PTE will eventually be re-fetched by
131 * the MMU when it attempts the access again.
132 *
133 * See: Power ISA Version 3.1B, 6.10.1.2 Modifying a Translation Table
134 * Entry, Setting a Reference or Change Bit or Upgrading Access
135 * Authority (PTE Subject to Atomic Hardware Updates):
136 *
137 * "If the only change being made to a valid PTE that is subject to
138 * atomic hardware updates is to set the Reference or Change bit to
139 * 1 or to upgrade access authority, a simpler sequence suffices
140 * because the translation hardware will refetch the PTE if an
141 * access is attempted for which the only problems were reference
142 * and/or change bits needing to be set or insufficient access
143 * authority."
144 *
145 * The nest MMU in POWER9 does not perform this PTE re-fetch, but
146 * it avoids the spurious fault problem by flushing the TLB before
147 * upgrading PTE permissions, see radix__ptep_set_access_flags.
148 */
149 }
150
__pte_flags_need_flush(unsigned long oldval,unsigned long newval)151 static inline bool __pte_flags_need_flush(unsigned long oldval,
152 unsigned long newval)
153 {
154 unsigned long delta = oldval ^ newval;
155
156 /*
157 * The return value of this function doesn't matter for hash,
158 * ptep_modify_prot_start() does a pte_update() which does or schedules
159 * any necessary hash table update and flush.
160 */
161 if (!radix_enabled())
162 return true;
163
164 /*
165 * We do not expect kernel mappings or non-PTEs or not-present PTEs.
166 */
167 VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
168 VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
169 VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
170 VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
171 VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
172 VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT));
173
174 /*
175 * Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED.
176 *
177 * In theory, some changed software bits could be tolerated, in
178 * practice those should rarely if ever matter.
179 */
180
181 if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED))
182 return true;
183
184 /*
185 * If any of the above was present in old but cleared in new, flush.
186 * With the exception of _PAGE_ACCESSED, don't worry about flushing
187 * if that was cleared (see the comment in ptep_clear_flush_young()).
188 */
189 if ((delta & ~_PAGE_ACCESSED) & oldval)
190 return true;
191
192 return false;
193 }
194
pte_needs_flush(pte_t oldpte,pte_t newpte)195 static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
196 {
197 return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte));
198 }
199 #define pte_needs_flush pte_needs_flush
200
huge_pmd_needs_flush(pmd_t oldpmd,pmd_t newpmd)201 static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
202 {
203 return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd));
204 }
205 #define huge_pmd_needs_flush huge_pmd_needs_flush
206
207 extern bool tlbie_capable;
208 extern bool tlbie_enabled;
209
cputlb_use_tlbie(void)210 static inline bool cputlb_use_tlbie(void)
211 {
212 return tlbie_enabled;
213 }
214
215 #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
216