1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7 #include <arm.h>
8 #include <assert.h>
9 #include <bitstring.h>
10 #include <config.h>
11 #include <kernel/cache_helpers.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/tee_l2cc_mutex.h>
14 #include <kernel/tee_misc.h>
15 #include <kernel/tlb_helpers.h>
16 #include <kernel/tz_ssvce_pl310.h>
17 #include <mm/core_mmu.h>
18 #include <platform_config.h>
19 #include <trace.h>
20 #include <util.h>
21
22 /*
23 * Two ASIDs per context, one for kernel mode and one for user mode. ASID 0
24 * and 1 are reserved and not used. This means a maximum of 126 loaded user
25 * mode contexts. This value can be increased but not beyond the maximum
26 * ASID, which is architecture dependent (max 255 for ARMv7-A and ARMv8-A
27 * Aarch32). This constant defines number of ASID pairs.
28 */
29 #define MMU_NUM_ASID_PAIRS 64
30
31 static bitstr_t bit_decl(g_asid, MMU_NUM_ASID_PAIRS) __nex_bss;
32 static unsigned int g_asid_spinlock __nex_bss = SPINLOCK_UNLOCK;
33
tlbi_mva_range(vaddr_t va,size_t len,size_t granule)34 void tlbi_mva_range(vaddr_t va, size_t len, size_t granule)
35 {
36 assert(granule == CORE_MMU_PGDIR_SIZE || granule == SMALL_PAGE_SIZE);
37 assert(!(va & (granule - 1)) && !(len & (granule - 1)));
38
39 dsb_ishst();
40 while (len) {
41 tlbi_mva_allasid_nosync(va);
42 len -= granule;
43 va += granule;
44 }
45 dsb_ish();
46 isb();
47 }
48
tlbi_mva_range_asid(vaddr_t va,size_t len,size_t granule,uint32_t asid)49 void tlbi_mva_range_asid(vaddr_t va, size_t len, size_t granule, uint32_t asid)
50 {
51 assert(granule == CORE_MMU_PGDIR_SIZE || granule == SMALL_PAGE_SIZE);
52 assert(!(va & (granule - 1)) && !(len & (granule - 1)));
53
54 dsb_ishst();
55 while (len) {
56 tlbi_mva_asid_nosync(va, asid);
57 len -= granule;
58 va += granule;
59 }
60 dsb_ish();
61 isb();
62 }
63
cache_op_inner(enum cache_op op,void * va,size_t len)64 TEE_Result cache_op_inner(enum cache_op op, void *va, size_t len)
65 {
66 switch (op) {
67 case DCACHE_CLEAN:
68 dcache_op_all(DCACHE_OP_CLEAN);
69 break;
70 case DCACHE_AREA_CLEAN:
71 dcache_clean_range(va, len);
72 break;
73 case DCACHE_INVALIDATE:
74 dcache_op_all(DCACHE_OP_INV);
75 break;
76 case DCACHE_AREA_INVALIDATE:
77 dcache_inv_range(va, len);
78 break;
79 case ICACHE_INVALIDATE:
80 icache_inv_all();
81 break;
82 case ICACHE_AREA_INVALIDATE:
83 icache_inv_range(va, len);
84 break;
85 case DCACHE_CLEAN_INV:
86 dcache_op_all(DCACHE_OP_CLEAN_INV);
87 break;
88 case DCACHE_AREA_CLEAN_INV:
89 dcache_cleaninv_range(va, len);
90 break;
91 default:
92 return TEE_ERROR_NOT_IMPLEMENTED;
93 }
94 return TEE_SUCCESS;
95 }
96
97 #ifdef CFG_PL310
cache_op_outer(enum cache_op op,paddr_t pa,size_t len)98 TEE_Result cache_op_outer(enum cache_op op, paddr_t pa, size_t len)
99 {
100 TEE_Result ret = TEE_SUCCESS;
101 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
102
103 tee_l2cc_mutex_lock();
104 switch (op) {
105 case DCACHE_INVALIDATE:
106 arm_cl2_invbyway(pl310_base());
107 break;
108 case DCACHE_AREA_INVALIDATE:
109 if (len)
110 arm_cl2_invbypa(pl310_base(), pa, pa + len - 1);
111 break;
112 case DCACHE_CLEAN:
113 arm_cl2_cleanbyway(pl310_base());
114 break;
115 case DCACHE_AREA_CLEAN:
116 if (len)
117 arm_cl2_cleanbypa(pl310_base(), pa, pa + len - 1);
118 break;
119 case DCACHE_CLEAN_INV:
120 arm_cl2_cleaninvbyway(pl310_base());
121 break;
122 case DCACHE_AREA_CLEAN_INV:
123 if (len)
124 arm_cl2_cleaninvbypa(pl310_base(), pa, pa + len - 1);
125 break;
126 default:
127 ret = TEE_ERROR_NOT_IMPLEMENTED;
128 }
129
130 tee_l2cc_mutex_unlock();
131 thread_set_exceptions(exceptions);
132 return ret;
133 }
134 #endif /*CFG_PL310*/
135
asid_alloc(void)136 unsigned int asid_alloc(void)
137 {
138 uint32_t exceptions = cpu_spin_lock_xsave(&g_asid_spinlock);
139 unsigned int r;
140 int i;
141
142 bit_ffc(g_asid, MMU_NUM_ASID_PAIRS, &i);
143 if (i == -1) {
144 r = 0;
145 } else {
146 bit_set(g_asid, i);
147 r = (i + 1) * 2;
148 }
149
150 cpu_spin_unlock_xrestore(&g_asid_spinlock, exceptions);
151 return r;
152 }
153
asid_free(unsigned int asid)154 void asid_free(unsigned int asid)
155 {
156 uint32_t exceptions = cpu_spin_lock_xsave(&g_asid_spinlock);
157
158 /* Only even ASIDs are supposed to be allocated */
159 assert(!(asid & 1));
160
161 if (asid) {
162 int i = (asid - 1) / 2;
163
164 assert(i < MMU_NUM_ASID_PAIRS && bit_test(g_asid, i));
165 bit_clear(g_asid, i);
166 }
167
168 cpu_spin_unlock_xrestore(&g_asid_spinlock, exceptions);
169 }
170
arch_va2pa_helper(void * va,paddr_t * pa)171 bool arch_va2pa_helper(void *va, paddr_t *pa)
172 {
173 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
174 paddr_t par = 0;
175 paddr_t par_pa_mask = 0;
176 bool ret = false;
177
178 #ifdef ARM32
179 write_ats1cpr((vaddr_t)va);
180 isb();
181 #ifdef CFG_WITH_LPAE
182 par = read_par64();
183 par_pa_mask = PAR64_PA_MASK;
184 #else
185 par = read_par32();
186 par_pa_mask = PAR32_PA_MASK;
187 #endif
188 #endif /*ARM32*/
189
190 #ifdef ARM64
191 write_at_s1e1r((vaddr_t)va);
192 isb();
193 par = read_par_el1();
194 par_pa_mask = PAR_PA_MASK;
195 #endif
196 if (par & PAR_F)
197 goto out;
198 *pa = (par & (par_pa_mask << PAR_PA_SHIFT)) |
199 ((vaddr_t)va & (BIT64(PAR_PA_SHIFT) - 1));
200
201 ret = true;
202 out:
203 thread_unmask_exceptions(exceptions);
204 return ret;
205 }
206
cpu_mmu_enabled(void)207 bool cpu_mmu_enabled(void)
208 {
209 uint32_t sctlr;
210
211 #ifdef ARM32
212 sctlr = read_sctlr();
213 #else
214 sctlr = read_sctlr_el1();
215 #endif
216
217 return sctlr & SCTLR_M ? true : false;
218 }
219