1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Common implementation of switch_mm_irqs_off
4 *
5 * Copyright IBM Corp. 2017
6 */
7
8 #include <linux/mm.h>
9 #include <linux/cpu.h>
10 #include <linux/sched/mm.h>
11
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
14
15 #if defined(CONFIG_PPC32)
switch_mm_pgdir(struct task_struct * tsk,struct mm_struct * mm)16 static inline void switch_mm_pgdir(struct task_struct *tsk,
17 struct mm_struct *mm)
18 {
19 /* 32-bit keeps track of the current PGDIR in the thread struct */
20 tsk->thread.pgdir = mm->pgd;
21 #ifdef CONFIG_PPC_BOOK3S_32
22 tsk->thread.sr0 = mm->context.sr0;
23 #endif
24 #if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
25 tsk->thread.pid = mm->context.id;
26 #endif
27 }
28 #elif defined(CONFIG_PPC_BOOK3E_64)
switch_mm_pgdir(struct task_struct * tsk,struct mm_struct * mm)29 static inline void switch_mm_pgdir(struct task_struct *tsk,
30 struct mm_struct *mm)
31 {
32 /* 64-bit Book3E keeps track of current PGD in the PACA */
33 get_paca()->pgd = mm->pgd;
34 #ifdef CONFIG_PPC_KUAP
35 tsk->thread.pid = mm->context.id;
36 #endif
37 }
38 #else
switch_mm_pgdir(struct task_struct * tsk,struct mm_struct * mm)39 static inline void switch_mm_pgdir(struct task_struct *tsk,
40 struct mm_struct *mm) { }
41 #endif
42
switch_mm_irqs_off(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)43 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
44 struct task_struct *tsk)
45 {
46 bool new_on_cpu = false;
47
48 /* Mark this context has been used on the new CPU */
49 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
50 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
51 inc_mm_active_cpus(next);
52
53 /*
54 * This full barrier orders the store to the cpumask above vs
55 * a subsequent load which allows this CPU/MMU to begin loading
56 * translations for 'next' from page table PTEs into the TLB.
57 *
58 * When using the radix MMU, that operation is the load of the
59 * MMU context id, which is then moved to SPRN_PID.
60 *
61 * For the hash MMU it is either the first load from slb_cache
62 * in switch_slb() to preload the SLBs, or the load of
63 * get_user_context which loads the context for the VSID hash
64 * to insert a new SLB, in the SLB fault handler.
65 *
66 * On the other side, the barrier is in mm/tlb-radix.c for
67 * radix which orders earlier stores to clear the PTEs before
68 * the load of mm_cpumask to check which CPU TLBs should be
69 * flushed. For hash, pte_xchg to clear the PTE includes the
70 * barrier.
71 *
72 * This full barrier is also needed by membarrier when
73 * switching between processes after store to rq->curr, before
74 * user-space memory accesses.
75 */
76 smp_mb();
77
78 new_on_cpu = true;
79 }
80
81 /* Some subarchs need to track the PGD elsewhere */
82 switch_mm_pgdir(tsk, next);
83
84 /* Nothing else to do if we aren't actually switching */
85 if (prev == next)
86 return;
87
88 /*
89 * We must stop all altivec streams before changing the HW
90 * context
91 */
92 if (cpu_has_feature(CPU_FTR_ALTIVEC))
93 asm volatile (PPC_DSSALL);
94
95 if (!new_on_cpu)
96 membarrier_arch_switch_mm(prev, next, tsk);
97
98 /*
99 * The actual HW switching method differs between the various
100 * sub architectures. Out of line for now
101 */
102 switch_mmu_context(prev, next, tsk);
103 }
104
105 #ifndef CONFIG_PPC_BOOK3S_64
arch_exit_mmap(struct mm_struct * mm)106 void arch_exit_mmap(struct mm_struct *mm)
107 {
108 void *frag = pte_frag_get(&mm->context);
109
110 if (frag)
111 pte_frag_destroy(frag);
112 }
113 #endif
114