1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* include/asm-generic/tlb.h
3  *
4  *	Generic TLB shootdown code
5  *
6  * Copyright 2001 Red Hat, Inc.
7  * Based on code from mm/memory.c Copyright Linus Torvalds and others.
8  *
9  * Copyright 2011 Red Hat, Inc., Peter Zijlstra
10  */
11 #ifndef _ASM_GENERIC__TLB_H
12 #define _ASM_GENERIC__TLB_H
13 
14 #include <linux/mmu_notifier.h>
15 #include <linux/swap.h>
16 #include <linux/hugetlb_inline.h>
17 #include <asm/tlbflush.h>
18 #include <asm/cacheflush.h>
19 
20 /*
21  * Blindly accessing user memory from NMI context can be dangerous
22  * if we're in the middle of switching the current user task or switching
23  * the loaded mm.
24  */
25 #ifndef nmi_uaccess_okay
26 # define nmi_uaccess_okay() true
27 #endif
28 
29 #ifdef CONFIG_MMU
30 
31 /*
32  * Generic MMU-gather implementation.
33  *
34  * The mmu_gather data structure is used by the mm code to implement the
35  * correct and efficient ordering of freeing pages and TLB invalidations.
36  *
37  * This correct ordering is:
38  *
39  *  1) unhook page
40  *  2) TLB invalidate page
41  *  3) free page
42  *
43  * That is, we must never free a page before we have ensured there are no live
44  * translations left to it. Otherwise it might be possible to observe (or
45  * worse, change) the page content after it has been reused.
46  *
47  * The mmu_gather API consists of:
48  *
49  *  - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
50  *
51  *    start and finish a mmu_gather
52  *
53  *    Finish in particular will issue a (final) TLB invalidate and free
54  *    all (remaining) queued pages.
55  *
56  *  - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
57  *
58  *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
59  *    there's large holes between the VMAs.
60  *
61  *  - tlb_free_vmas()
62  *
63  *    tlb_free_vmas() marks the start of unlinking of one or more vmas
64  *    and freeing page-tables.
65  *
66  *  - tlb_remove_table()
67  *
68  *    tlb_remove_table() is the basic primitive to free page-table directories
69  *    (__p*_free_tlb()).  In it's most primitive form it is an alias for
70  *    tlb_remove_page() below, for when page directories are pages and have no
71  *    additional constraints.
72  *
73  *    See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
74  *
75  *  - tlb_remove_page() / tlb_remove_page_size()
76  *  - __tlb_remove_folio_pages() / __tlb_remove_page_size()
77  *  - __tlb_remove_folio_pages_size()
78  *
79  *    __tlb_remove_folio_pages_size() is the basic primitive that queues pages
80  *    for freeing. It will return a boolean indicating if the queue is (now)
81  *    full and a call to tlb_flush_mmu() is required.
82  *
83  *    tlb_remove_page() and tlb_remove_page_size() imply the call to
84  *    tlb_flush_mmu() when required and has no return value.
85  *
86  *    __tlb_remove_folio_pages() is similar to __tlb_remove_page_size(),
87  *    however, instead of removing a single page, assume PAGE_SIZE and remove
88  *    the given number of consecutive pages that are all part of the
89  *    same (large) folio.
90  *
91  *  - tlb_change_page_size()
92  *
93  *    call before __tlb_remove_page*() to set the current page-size; implies a
94  *    possible tlb_flush_mmu() call.
95  *
96  *  - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
97  *
98  *    tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
99  *                              related state, like the range)
100  *
101  *    tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
102  *			whatever pages are still batched.
103  *
104  *  - mmu_gather::fullmm
105  *
106  *    A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free
107  *    the entire mm; this allows a number of optimizations.
108  *
109  *    - We can ignore tlb_{start,end}_vma(); because we don't
110  *      care about ranges. Everything will be shot down.
111  *
112  *    - (RISC) architectures that use ASIDs can cycle to a new ASID
113  *      and delay the invalidation until ASID space runs out.
114  *
115  *  - mmu_gather::need_flush_all
116  *
117  *    A flag that can be set by the arch code if it wants to force
118  *    flush the entire TLB irrespective of the range. For instance
119  *    x86-PAE needs this when changing top-level entries.
120  *
121  * And allows the architecture to provide and implement tlb_flush():
122  *
123  * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
124  * use of:
125  *
126  *  - mmu_gather::start / mmu_gather::end
127  *
128  *    which provides the range that needs to be flushed to cover the pages to
129  *    be freed.
130  *
131  *  - mmu_gather::freed_tables
132  *
133  *    set when we freed page table pages
134  *
135  *  - tlb_get_unmap_shift() / tlb_get_unmap_size()
136  *
137  *    returns the smallest TLB entry size unmapped in this range.
138  *
139  * If an architecture does not provide tlb_flush() a default implementation
140  * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
141  * specified, in which case we'll default to flush_tlb_mm().
142  *
143  * Additionally there are a few opt-in features:
144  *
145  *  MMU_GATHER_PAGE_SIZE
146  *
147  *  This ensures we call tlb_flush() every time tlb_change_page_size() actually
148  *  changes the size and provides mmu_gather::page_size to tlb_flush().
149  *
150  *  This might be useful if your architecture has size specific TLB
151  *  invalidation instructions.
152  *
153  *  MMU_GATHER_TABLE_FREE
154  *
155  *  This provides tlb_remove_table(), to be used instead of tlb_remove_page()
156  *  for page directores (__p*_free_tlb()).
157  *
158  *  Useful if your architecture has non-page page directories.
159  *
160  *  When used, an architecture is expected to provide __tlb_remove_table() or
161  *  use the generic __tlb_remove_table(), which does the actual freeing of these
162  *  pages.
163  *
164  *  MMU_GATHER_RCU_TABLE_FREE
165  *
166  *  Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
167  *  comment below).
168  *
169  *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
170  *  and therefore doesn't naturally serialize with software page-table walkers.
171  *
172  *  MMU_GATHER_NO_FLUSH_CACHE
173  *
174  *  Indicates the architecture has flush_cache_range() but it needs *NOT* be called
175  *  before unmapping a VMA.
176  *
177  *  NOTE: strictly speaking we shouldn't have this knob and instead rely on
178  *	  flush_cache_range() being a NOP, except Sparc64 seems to be
179  *	  different here.
180  *
181  *  MMU_GATHER_MERGE_VMAS
182  *
183  *  Indicates the architecture wants to merge ranges over VMAs; typical when
184  *  multiple range invalidates are more expensive than a full invalidate.
185  *
186  *  MMU_GATHER_NO_RANGE
187  *
188  *  Use this if your architecture lacks an efficient flush_tlb_range(). This
189  *  option implies MMU_GATHER_MERGE_VMAS above.
190  *
191  *  MMU_GATHER_NO_GATHER
192  *
193  *  If the option is set the mmu_gather will not track individual pages for
194  *  delayed page free anymore. A platform that enables the option needs to
195  *  provide its own implementation of the __tlb_remove_page_size() function to
196  *  free pages.
197  *
198  *  This is useful if your architecture already flushes TLB entries in the
199  *  various ptep_get_and_clear() functions.
200  */
201 
202 #ifdef CONFIG_MMU_GATHER_TABLE_FREE
203 
204 struct mmu_table_batch {
205 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
206 	struct rcu_head		rcu;
207 #endif
208 	unsigned int		nr;
209 	void			*tables[];
210 };
211 
212 #define MAX_TABLE_BATCH		\
213 	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
214 
215 #ifndef __HAVE_ARCH_TLB_REMOVE_TABLE
__tlb_remove_table(void * table)216 static inline void __tlb_remove_table(void *table)
217 {
218 	struct ptdesc *ptdesc = (struct ptdesc *)table;
219 
220 	pagetable_dtor_free(ptdesc);
221 }
222 #endif
223 
224 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
225 
226 #else /* !CONFIG_MMU_GATHER_TABLE_FREE */
227 
228 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page);
229 /*
230  * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
231  * page directories and we can use the normal page batching to free them.
232  */
tlb_remove_table(struct mmu_gather * tlb,void * table)233 static inline void tlb_remove_table(struct mmu_gather *tlb, void *table)
234 {
235 	struct ptdesc *ptdesc = (struct ptdesc *)table;
236 
237 	pagetable_dtor(ptdesc);
238 	tlb_remove_page(tlb, ptdesc_page(ptdesc));
239 }
240 #endif /* CONFIG_MMU_GATHER_TABLE_FREE */
241 
242 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
243 /*
244  * This allows an architecture that does not use the linux page-tables for
245  * hardware to skip the TLBI when freeing page tables.
246  */
247 #ifndef tlb_needs_table_invalidate
248 #define tlb_needs_table_invalidate() (true)
249 #endif
250 
251 void tlb_remove_table_sync_one(void);
252 
253 #else
254 
255 #ifdef tlb_needs_table_invalidate
256 #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
257 #endif
258 
tlb_remove_table_sync_one(void)259 static inline void tlb_remove_table_sync_one(void) { }
260 
261 #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
262 
263 
264 #ifndef CONFIG_MMU_GATHER_NO_GATHER
265 /*
266  * If we can't allocate a page to make a big batch of page pointers
267  * to work on, then just handle a few from the on-stack structure.
268  */
269 #define MMU_GATHER_BUNDLE	8
270 
271 struct mmu_gather_batch {
272 	struct mmu_gather_batch	*next;
273 	unsigned int		nr;
274 	unsigned int		max;
275 	struct encoded_page	*encoded_pages[];
276 };
277 
278 #define MAX_GATHER_BATCH	\
279 	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
280 
281 /*
282  * Limit the maximum number of mmu_gather batches to reduce a risk of soft
283  * lockups for non-preemptible kernels on huge machines when a lot of memory
284  * is zapped during unmapping.
285  * 10K pages freed at once should be safe even without a preemption point.
286  */
287 #define MAX_GATHER_BATCH_COUNT	(10000UL/MAX_GATHER_BATCH)
288 
289 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
290 		bool delay_rmap, int page_size);
291 bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page,
292 		unsigned int nr_pages, bool delay_rmap);
293 
294 #ifdef CONFIG_SMP
295 /*
296  * This both sets 'delayed_rmap', and returns true. It would be an inline
297  * function, except we define it before the 'struct mmu_gather'.
298  */
299 #define tlb_delay_rmap(tlb) (((tlb)->delayed_rmap = 1), true)
300 extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma);
301 #endif
302 
303 #endif
304 
305 /*
306  * We have a no-op version of the rmap removal that doesn't
307  * delay anything. That is used on S390, which flushes remote
308  * TLBs synchronously, and on UP, which doesn't have any
309  * remote TLBs to flush and is not preemptible due to this
310  * all happening under the page table lock.
311  */
312 #ifndef tlb_delay_rmap
313 #define tlb_delay_rmap(tlb) (false)
tlb_flush_rmaps(struct mmu_gather * tlb,struct vm_area_struct * vma)314 static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
315 #endif
316 
317 /*
318  * struct mmu_gather is an opaque type used by the mm code for passing around
319  * any data needed by arch specific code for tlb_remove_page.
320  */
321 struct mmu_gather {
322 	struct mm_struct	*mm;
323 
324 #ifdef CONFIG_MMU_GATHER_TABLE_FREE
325 	struct mmu_table_batch	*batch;
326 #endif
327 
328 	unsigned long		start;
329 	unsigned long		end;
330 	/*
331 	 * we are in the middle of an operation to clear
332 	 * a full mm and can make some optimizations
333 	 */
334 	unsigned int		fullmm : 1;
335 
336 	/*
337 	 * we have performed an operation which
338 	 * requires a complete flush of the tlb
339 	 */
340 	unsigned int		need_flush_all : 1;
341 
342 	/*
343 	 * we have removed page directories
344 	 */
345 	unsigned int		freed_tables : 1;
346 
347 	/*
348 	 * Do we have pending delayed rmap removals?
349 	 */
350 	unsigned int		delayed_rmap : 1;
351 
352 	/*
353 	 * at which levels have we cleared entries?
354 	 */
355 	unsigned int		cleared_ptes : 1;
356 	unsigned int		cleared_pmds : 1;
357 	unsigned int		cleared_puds : 1;
358 	unsigned int		cleared_p4ds : 1;
359 
360 	/*
361 	 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
362 	 */
363 	unsigned int		vma_exec : 1;
364 	unsigned int		vma_huge : 1;
365 	unsigned int		vma_pfn  : 1;
366 
367 	unsigned int		batch_count;
368 
369 #ifndef CONFIG_MMU_GATHER_NO_GATHER
370 	struct mmu_gather_batch *active;
371 	struct mmu_gather_batch	local;
372 	struct page		*__pages[MMU_GATHER_BUNDLE];
373 
374 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
375 	unsigned int page_size;
376 #endif
377 #endif
378 };
379 
380 void tlb_flush_mmu(struct mmu_gather *tlb);
381 
__tlb_adjust_range(struct mmu_gather * tlb,unsigned long address,unsigned int range_size)382 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
383 				      unsigned long address,
384 				      unsigned int range_size)
385 {
386 	tlb->start = min(tlb->start, address);
387 	tlb->end = max(tlb->end, address + range_size);
388 }
389 
__tlb_reset_range(struct mmu_gather * tlb)390 static inline void __tlb_reset_range(struct mmu_gather *tlb)
391 {
392 	if (tlb->fullmm) {
393 		tlb->start = tlb->end = ~0;
394 	} else {
395 		tlb->start = TASK_SIZE;
396 		tlb->end = 0;
397 	}
398 	tlb->freed_tables = 0;
399 	tlb->cleared_ptes = 0;
400 	tlb->cleared_pmds = 0;
401 	tlb->cleared_puds = 0;
402 	tlb->cleared_p4ds = 0;
403 	/*
404 	 * Do not reset mmu_gather::vma_* fields here, we do not
405 	 * call into tlb_start_vma() again to set them if there is an
406 	 * intermediate flush.
407 	 */
408 }
409 
410 #ifdef CONFIG_MMU_GATHER_NO_RANGE
411 
412 #if defined(tlb_flush)
413 #error MMU_GATHER_NO_RANGE relies on default tlb_flush()
414 #endif
415 
416 /*
417  * When an architecture does not have efficient means of range flushing TLBs
418  * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
419  * range small. We equally don't have to worry about page granularity or other
420  * things.
421  *
422  * All we need to do is issue a full flush for any !0 range.
423  */
tlb_flush(struct mmu_gather * tlb)424 static inline void tlb_flush(struct mmu_gather *tlb)
425 {
426 	if (tlb->end)
427 		flush_tlb_mm(tlb->mm);
428 }
429 
430 #else /* CONFIG_MMU_GATHER_NO_RANGE */
431 
432 #ifndef tlb_flush
433 /*
434  * When an architecture does not provide its own tlb_flush() implementation
435  * but does have a reasonably efficient flush_vma_range() implementation
436  * use that.
437  */
tlb_flush(struct mmu_gather * tlb)438 static inline void tlb_flush(struct mmu_gather *tlb)
439 {
440 	if (tlb->fullmm || tlb->need_flush_all) {
441 		flush_tlb_mm(tlb->mm);
442 	} else if (tlb->end) {
443 		struct vm_area_struct vma = {
444 			.vm_mm = tlb->mm,
445 			.vm_flags = (tlb->vma_exec ? VM_EXEC    : 0) |
446 				    (tlb->vma_huge ? VM_HUGETLB : 0),
447 		};
448 
449 		flush_tlb_range(&vma, tlb->start, tlb->end);
450 	}
451 }
452 #endif
453 
454 #endif /* CONFIG_MMU_GATHER_NO_RANGE */
455 
456 static inline void
tlb_update_vma_flags(struct mmu_gather * tlb,struct vm_area_struct * vma)457 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
458 {
459 	/*
460 	 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
461 	 * mips-4k) flush only large pages.
462 	 *
463 	 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
464 	 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
465 	 * range.
466 	 *
467 	 * We rely on tlb_end_vma() to issue a flush, such that when we reset
468 	 * these values the batch is empty.
469 	 */
470 	tlb->vma_huge = is_vm_hugetlb_page(vma);
471 	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
472 
473 	/*
474 	 * Track if there's at least one VM_PFNMAP/VM_MIXEDMAP vma
475 	 * in the tracked range, see tlb_free_vmas().
476 	 */
477 	tlb->vma_pfn |= !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
478 }
479 
tlb_flush_mmu_tlbonly(struct mmu_gather * tlb)480 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
481 {
482 	/*
483 	 * Anything calling __tlb_adjust_range() also sets at least one of
484 	 * these bits.
485 	 */
486 	if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
487 	      tlb->cleared_puds || tlb->cleared_p4ds))
488 		return;
489 
490 	tlb_flush(tlb);
491 	__tlb_reset_range(tlb);
492 }
493 
tlb_remove_page_size(struct mmu_gather * tlb,struct page * page,int page_size)494 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
495 					struct page *page, int page_size)
496 {
497 	if (__tlb_remove_page_size(tlb, page, false, page_size))
498 		tlb_flush_mmu(tlb);
499 }
500 
tlb_remove_page(struct mmu_gather * tlb,struct page * page)501 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
502 {
503 	return tlb_remove_page_size(tlb, page, PAGE_SIZE);
504 }
505 
tlb_remove_ptdesc(struct mmu_gather * tlb,struct ptdesc * pt)506 static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt)
507 {
508 	tlb_remove_table(tlb, pt);
509 }
510 
tlb_change_page_size(struct mmu_gather * tlb,unsigned int page_size)511 static inline void tlb_change_page_size(struct mmu_gather *tlb,
512 						     unsigned int page_size)
513 {
514 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
515 	if (tlb->page_size && tlb->page_size != page_size) {
516 		if (!tlb->fullmm && !tlb->need_flush_all)
517 			tlb_flush_mmu(tlb);
518 	}
519 
520 	tlb->page_size = page_size;
521 #endif
522 }
523 
tlb_get_unmap_shift(struct mmu_gather * tlb)524 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
525 {
526 	if (tlb->cleared_ptes)
527 		return PAGE_SHIFT;
528 	if (tlb->cleared_pmds)
529 		return PMD_SHIFT;
530 	if (tlb->cleared_puds)
531 		return PUD_SHIFT;
532 	if (tlb->cleared_p4ds)
533 		return P4D_SHIFT;
534 
535 	return PAGE_SHIFT;
536 }
537 
tlb_get_unmap_size(struct mmu_gather * tlb)538 static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
539 {
540 	return 1UL << tlb_get_unmap_shift(tlb);
541 }
542 
543 /*
544  * In the case of tlb vma handling, we can optimise these away in the
545  * case where we're doing a full MM flush.  When we're doing a munmap,
546  * the vmas are adjusted to only cover the region to be torn down.
547  */
tlb_start_vma(struct mmu_gather * tlb,struct vm_area_struct * vma)548 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
549 {
550 	if (tlb->fullmm)
551 		return;
552 
553 	tlb_update_vma_flags(tlb, vma);
554 #ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE
555 	flush_cache_range(vma, vma->vm_start, vma->vm_end);
556 #endif
557 }
558 
tlb_end_vma(struct mmu_gather * tlb,struct vm_area_struct * vma)559 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
560 {
561 	if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
562 		return;
563 
564 	/*
565 	 * Do a TLB flush and reset the range at VMA boundaries; this avoids
566 	 * the ranges growing with the unused space between consecutive VMAs,
567 	 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
568 	 * this.
569 	 */
570 	tlb_flush_mmu_tlbonly(tlb);
571 }
572 
tlb_free_vmas(struct mmu_gather * tlb)573 static inline void tlb_free_vmas(struct mmu_gather *tlb)
574 {
575 	if (tlb->fullmm)
576 		return;
577 
578 	/*
579 	 * VM_PFNMAP is more fragile because the core mm will not track the
580 	 * page mapcount -- there might not be page-frames for these PFNs
581 	 * after all.
582 	 *
583 	 * Specifically() there is a race between munmap() and
584 	 * unmap_mapping_range(), where munmap() will unlink the VMA, such
585 	 * that unmap_mapping_range() will no longer observe the VMA and
586 	 * no-op, without observing the TLBI, returning prematurely.
587 	 *
588 	 * So if we're about to unlink such a VMA, and we have pending
589 	 * TLBI for such a vma, flush things now.
590 	 */
591 	if (tlb->vma_pfn)
592 		tlb_flush_mmu_tlbonly(tlb);
593 }
594 
595 /*
596  * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
597  * and set corresponding cleared_*.
598  */
tlb_flush_pte_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)599 static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
600 				     unsigned long address, unsigned long size)
601 {
602 	__tlb_adjust_range(tlb, address, size);
603 	tlb->cleared_ptes = 1;
604 }
605 
tlb_flush_pmd_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)606 static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
607 				     unsigned long address, unsigned long size)
608 {
609 	__tlb_adjust_range(tlb, address, size);
610 	tlb->cleared_pmds = 1;
611 }
612 
tlb_flush_pud_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)613 static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
614 				     unsigned long address, unsigned long size)
615 {
616 	__tlb_adjust_range(tlb, address, size);
617 	tlb->cleared_puds = 1;
618 }
619 
tlb_flush_p4d_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)620 static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
621 				     unsigned long address, unsigned long size)
622 {
623 	__tlb_adjust_range(tlb, address, size);
624 	tlb->cleared_p4ds = 1;
625 }
626 
627 #ifndef __tlb_remove_tlb_entry
__tlb_remove_tlb_entry(struct mmu_gather * tlb,pte_t * ptep,unsigned long address)628 static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
629 {
630 }
631 #endif
632 
633 /**
634  * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
635  *
636  * Record the fact that pte's were really unmapped by updating the range,
637  * so we can later optimise away the tlb invalidate.   This helps when
638  * userspace is unmapping already-unmapped pages, which happens quite a lot.
639  */
640 #define tlb_remove_tlb_entry(tlb, ptep, address)		\
641 	do {							\
642 		tlb_flush_pte_range(tlb, address, PAGE_SIZE);	\
643 		__tlb_remove_tlb_entry(tlb, ptep, address);	\
644 	} while (0)
645 
646 /**
647  * tlb_remove_tlb_entries - remember unmapping of multiple consecutive ptes for
648  *			    later tlb invalidation.
649  *
650  * Similar to tlb_remove_tlb_entry(), but remember unmapping of multiple
651  * consecutive ptes instead of only a single one.
652  */
tlb_remove_tlb_entries(struct mmu_gather * tlb,pte_t * ptep,unsigned int nr,unsigned long address)653 static inline void tlb_remove_tlb_entries(struct mmu_gather *tlb,
654 		pte_t *ptep, unsigned int nr, unsigned long address)
655 {
656 	tlb_flush_pte_range(tlb, address, PAGE_SIZE * nr);
657 	for (;;) {
658 		__tlb_remove_tlb_entry(tlb, ptep, address);
659 		if (--nr == 0)
660 			break;
661 		ptep++;
662 		address += PAGE_SIZE;
663 	}
664 }
665 
666 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
667 	do {							\
668 		unsigned long _sz = huge_page_size(h);		\
669 		if (_sz >= P4D_SIZE)				\
670 			tlb_flush_p4d_range(tlb, address, _sz);	\
671 		else if (_sz >= PUD_SIZE)			\
672 			tlb_flush_pud_range(tlb, address, _sz);	\
673 		else if (_sz >= PMD_SIZE)			\
674 			tlb_flush_pmd_range(tlb, address, _sz);	\
675 		else						\
676 			tlb_flush_pte_range(tlb, address, _sz);	\
677 		__tlb_remove_tlb_entry(tlb, ptep, address);	\
678 	} while (0)
679 
680 /**
681  * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
682  * This is a nop so far, because only x86 needs it.
683  */
684 #ifndef __tlb_remove_pmd_tlb_entry
685 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
686 #endif
687 
688 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)			\
689 	do {								\
690 		tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE);	\
691 		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);		\
692 	} while (0)
693 
694 /**
695  * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
696  * invalidation. This is a nop so far, because only x86 needs it.
697  */
698 #ifndef __tlb_remove_pud_tlb_entry
699 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
700 #endif
701 
702 #define tlb_remove_pud_tlb_entry(tlb, pudp, address)			\
703 	do {								\
704 		tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE);	\
705 		__tlb_remove_pud_tlb_entry(tlb, pudp, address);		\
706 	} while (0)
707 
708 /*
709  * For things like page tables caches (ie caching addresses "inside" the
710  * page tables, like x86 does), for legacy reasons, flushing an
711  * individual page had better flush the page table caches behind it. This
712  * is definitely how x86 works, for example. And if you have an
713  * architected non-legacy page table cache (which I'm not aware of
714  * anybody actually doing), you're going to have some architecturally
715  * explicit flushing for that, likely *separate* from a regular TLB entry
716  * flush, and thus you'd need more than just some range expansion..
717  *
718  * So if we ever find an architecture
719  * that would want something that odd, I think it is up to that
720  * architecture to do its own odd thing, not cause pain for others
721  * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
722  *
723  * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
724  */
725 
726 #ifndef pte_free_tlb
727 #define pte_free_tlb(tlb, ptep, address)			\
728 	do {							\
729 		tlb_flush_pmd_range(tlb, address, PAGE_SIZE);	\
730 		tlb->freed_tables = 1;				\
731 		__pte_free_tlb(tlb, ptep, address);		\
732 	} while (0)
733 #endif
734 
735 #ifndef pmd_free_tlb
736 #define pmd_free_tlb(tlb, pmdp, address)			\
737 	do {							\
738 		tlb_flush_pud_range(tlb, address, PAGE_SIZE);	\
739 		tlb->freed_tables = 1;				\
740 		__pmd_free_tlb(tlb, pmdp, address);		\
741 	} while (0)
742 #endif
743 
744 #ifndef pud_free_tlb
745 #define pud_free_tlb(tlb, pudp, address)			\
746 	do {							\
747 		tlb_flush_p4d_range(tlb, address, PAGE_SIZE);	\
748 		tlb->freed_tables = 1;				\
749 		__pud_free_tlb(tlb, pudp, address);		\
750 	} while (0)
751 #endif
752 
753 #ifndef p4d_free_tlb
754 #define p4d_free_tlb(tlb, pudp, address)			\
755 	do {							\
756 		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
757 		tlb->freed_tables = 1;				\
758 		__p4d_free_tlb(tlb, pudp, address);		\
759 	} while (0)
760 #endif
761 
762 #ifndef pte_needs_flush
pte_needs_flush(pte_t oldpte,pte_t newpte)763 static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
764 {
765 	return true;
766 }
767 #endif
768 
769 #ifndef huge_pmd_needs_flush
huge_pmd_needs_flush(pmd_t oldpmd,pmd_t newpmd)770 static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
771 {
772 	return true;
773 }
774 #endif
775 
776 #endif /* CONFIG_MMU */
777 
778 #endif /* _ASM_GENERIC__TLB_H */
779