1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Hexagon Virtual Machine TLB functions
4  *
5  * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6  */
7 
8 /*
9  * The Hexagon Virtual Machine conceals the real workings of
10  * the TLB, but there are one or two functions that need to
11  * be instantiated for it, differently from a native build.
12  */
13 #include <linux/mm.h>
14 #include <linux/sched.h>
15 #include <asm/page.h>
16 #include <asm/hexagon_vm.h>
17 
18 /*
19  * Initial VM implementation has only one map active at a time, with
20  * TLB purgings on changes.  So either we're nuking the current map,
21  * or it's a no-op.  This operation is messy on true SMPs where other
22  * processors must be induced to flush the copies in their local TLBs,
23  * but Hexagon thread-based virtual processors share the same MMU.
24  */
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)25 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
26 			unsigned long end)
27 {
28 	struct mm_struct *mm = vma->vm_mm;
29 
30 	if (mm->context.ptbase == current->active_mm->context.ptbase)
31 		__vmclrmap((void *)start, end - start);
32 }
33 
34 /*
35  * Flush a page from the kernel virtual map - used by highmem
36  */
flush_tlb_one(unsigned long vaddr)37 void flush_tlb_one(unsigned long vaddr)
38 {
39 	__vmclrmap((void *)vaddr, PAGE_SIZE);
40 }
41 
42 /*
43  * Flush all TLBs across all CPUs, virtual or real.
44  * A single Hexagon core has 6 thread contexts but
45  * only one TLB.
46  */
tlb_flush_all(void)47 void tlb_flush_all(void)
48 {
49 	/*  should probably use that fixaddr end or whateve label  */
50 	__vmclrmap(0, 0xffff0000);
51 }
52 
53 /*
54  * Flush TLB entries associated with a given mm_struct mapping.
55  */
flush_tlb_mm(struct mm_struct * mm)56 void flush_tlb_mm(struct mm_struct *mm)
57 {
58 	/* Current Virtual Machine has only one map active at a time */
59 	if (current->active_mm->context.ptbase == mm->context.ptbase)
60 		tlb_flush_all();
61 }
62 
63 /*
64  * Flush TLB state associated with a page of a vma.
65  */
flush_tlb_page(struct vm_area_struct * vma,unsigned long vaddr)66 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vaddr)
67 {
68 	struct mm_struct *mm = vma->vm_mm;
69 
70 	if (mm->context.ptbase  == current->active_mm->context.ptbase)
71 		__vmclrmap((void *)vaddr, PAGE_SIZE);
72 }
73 
74 /*
75  * Flush TLB entries associated with a kernel address range.
76  * Like flush range, but without the check on the vma->vm_mm.
77  */
flush_tlb_kernel_range(unsigned long start,unsigned long end)78 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
79 {
80 		__vmclrmap((void *)start, end - start);
81 }
82