1 /*
2 * Copyright (c) 2024 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr/kernel.h>
7 #include <zephyr/drivers/cache.h>
8 #include <zephyr/sys/barrier.h>
9 #include <hal/nrf_cache.h>
10 #include <zephyr/logging/log.h>
11
12 LOG_MODULE_REGISTER(cache_nrfx, CONFIG_CACHE_LOG_LEVEL);
13
14 #if !defined(NRF_ICACHE) && defined(NRF_CACHE)
15 #define NRF_ICACHE NRF_CACHE
16 #endif
17
18 #define CACHE_BUSY_RETRY_INTERVAL_US 10
19
20
21 enum k_nrf_cache_op {
22 /*
23 * Sequentially loop through all dirty lines and write those data units to
24 * memory.
25 *
26 * This is FLUSH in Zephyr nomenclature.
27 */
28 K_NRF_CACHE_CLEAN,
29
30 /*
31 * Mark all lines as invalid, ignoring any dirty data.
32 *
33 * This is INVALIDATE in Zephyr nomenclature.
34 */
35 K_NRF_CACHE_INVD,
36
37 /*
38 * Clean followed by invalidate
39 *
40 * This is FLUSH_AND_INVALIDATE in Zephyr nomenclature.
41 */
42 K_NRF_CACHE_FLUSH,
43 };
44
is_cache_busy(NRF_CACHE_Type * cache)45 static inline bool is_cache_busy(NRF_CACHE_Type *cache)
46 {
47 #if NRF_CACHE_HAS_STATUS
48 return nrf_cache_busy_check(cache);
49 #else
50 return false;
51 #endif
52 }
53
wait_for_cache(NRF_CACHE_Type * cache)54 static inline void wait_for_cache(NRF_CACHE_Type *cache)
55 {
56 while (is_cache_busy(cache)) {
57 }
58 }
59
_cache_all(NRF_CACHE_Type * cache,enum k_nrf_cache_op op)60 static inline int _cache_all(NRF_CACHE_Type *cache, enum k_nrf_cache_op op)
61 {
62 /*
63 * We really do not want to invalidate the whole cache.
64 */
65 if (op == K_NRF_CACHE_INVD) {
66 return -ENOTSUP;
67 }
68
69 wait_for_cache(cache);
70
71 barrier_dsync_fence_full();
72
73 switch (op) {
74
75 #if NRF_CACHE_HAS_TASK_CLEAN
76 case K_NRF_CACHE_CLEAN:
77 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_CLEANCACHE);
78 break;
79 #endif
80
81 case K_NRF_CACHE_INVD:
82 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_INVALIDATECACHE);
83 break;
84
85 #if NRF_CACHE_HAS_TASK_FLUSH
86 case K_NRF_CACHE_FLUSH:
87 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_FLUSHCACHE);
88 break;
89 #endif
90
91 default:
92 break;
93 }
94
95 wait_for_cache(cache);
96
97 return 0;
98 }
99
100 #if NRF_CACHE_HAS_LINEADDR
_cache_line(NRF_CACHE_Type * cache,enum k_nrf_cache_op op,uintptr_t line_addr)101 static inline void _cache_line(NRF_CACHE_Type *cache, enum k_nrf_cache_op op, uintptr_t line_addr)
102 {
103 do {
104 wait_for_cache(cache);
105
106 nrf_cache_lineaddr_set(cache, line_addr);
107
108 barrier_dsync_fence_full();
109
110 switch (op) {
111
112 #if NRF_CACHE_HAS_TASK_CLEAN
113 case K_NRF_CACHE_CLEAN:
114 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_CLEANLINE);
115 break;
116 #endif
117
118 case K_NRF_CACHE_INVD:
119 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_INVALIDATELINE);
120 break;
121
122 #if NRF_CACHE_HAS_TASK_FLUSH
123 case K_NRF_CACHE_FLUSH:
124 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_FLUSHLINE);
125 break;
126 #endif
127
128 default:
129 break;
130 }
131 } while (nrf_cache_lineaddr_get(cache) != line_addr);
132 }
133
_cache_range(NRF_CACHE_Type * cache,enum k_nrf_cache_op op,void * addr,size_t size)134 static inline int _cache_range(NRF_CACHE_Type *cache, enum k_nrf_cache_op op, void *addr,
135 size_t size)
136 {
137 uintptr_t line_addr = (uintptr_t)addr;
138 uintptr_t end_addr;
139
140 /* Some SOCs has a bug that requires to set 28th bit in the address on
141 * Trustzone secure builds.
142 */
143 if (IS_ENABLED(CONFIG_CACHE_NRF_PATCH_LINEADDR) &&
144 !IS_ENABLED(CONFIG_TRUSTED_EXECUTION_NONSECURE)) {
145 line_addr |= BIT(28);
146 }
147
148 end_addr = line_addr + size;
149
150 /*
151 * Align address to line size
152 */
153 line_addr &= ~(CONFIG_DCACHE_LINE_SIZE - 1);
154
155 do {
156 _cache_line(cache, op, line_addr);
157 line_addr += CONFIG_DCACHE_LINE_SIZE;
158 } while (line_addr < end_addr);
159
160 wait_for_cache(cache);
161
162 return 0;
163 }
164
_cache_checks(NRF_CACHE_Type * cache,enum k_nrf_cache_op op,void * addr,size_t size,bool is_range)165 static inline int _cache_checks(NRF_CACHE_Type *cache, enum k_nrf_cache_op op, void *addr,
166 size_t size, bool is_range)
167 {
168 /* Check if the cache is enabled */
169 if (!nrf_cache_enable_check(cache)) {
170 return -EAGAIN;
171 }
172
173 if (!is_range) {
174 return _cache_all(cache, op);
175 }
176
177 /* Check for invalid address or size */
178 if ((!addr) || (!size)) {
179 return -EINVAL;
180 }
181
182 return _cache_range(cache, op, addr, size);
183 }
184 #else
_cache_all_checks(NRF_CACHE_Type * cache,enum k_nrf_cache_op op)185 static inline int _cache_all_checks(NRF_CACHE_Type *cache, enum k_nrf_cache_op op)
186 {
187 /* Check if the cache is enabled */
188 if (!nrf_cache_enable_check(cache)) {
189 return -EAGAIN;
190 }
191 return _cache_all(cache, op);
192 }
193 #endif /* NRF_CACHE_HAS_LINEADDR */
194
195 #if defined(NRF_DCACHE) && NRF_CACHE_HAS_TASKS
196
cache_data_enable(void)197 void cache_data_enable(void)
198 {
199 nrf_cache_enable(NRF_DCACHE);
200 }
201
cache_data_flush_all(void)202 int cache_data_flush_all(void)
203 {
204 #if NRF_CACHE_HAS_TASK_CLEAN
205 return _cache_checks(NRF_DCACHE, K_NRF_CACHE_CLEAN, NULL, 0, false);
206 #else
207 return -ENOTSUP;
208 #endif
209 }
210
cache_data_disable(void)211 void cache_data_disable(void)
212 {
213 if (nrf_cache_enable_check(NRF_DCACHE)) {
214 (void)cache_data_flush_all();
215 }
216 nrf_cache_disable(NRF_DCACHE);
217 }
218
cache_data_invd_all(void)219 int cache_data_invd_all(void)
220 {
221 return _cache_checks(NRF_DCACHE, K_NRF_CACHE_INVD, NULL, 0, false);
222 }
223
cache_data_flush_and_invd_all(void)224 int cache_data_flush_and_invd_all(void)
225 {
226 #if NRF_CACHE_HAS_TASK_FLUSH
227 return _cache_checks(NRF_DCACHE, K_NRF_CACHE_FLUSH, NULL, 0, false);
228 #else
229 return -ENOTSUP;
230 #endif
231 }
232
cache_data_flush_range(void * addr,size_t size)233 int cache_data_flush_range(void *addr, size_t size)
234 {
235 #if NRF_CACHE_HAS_TASK_CLEAN
236 return _cache_checks(NRF_DCACHE, K_NRF_CACHE_CLEAN, addr, size, true);
237 #else
238 return -ENOTSUP;
239 #endif
240 }
241
cache_data_invd_range(void * addr,size_t size)242 int cache_data_invd_range(void *addr, size_t size)
243 {
244 return _cache_checks(NRF_DCACHE, K_NRF_CACHE_INVD, addr, size, true);
245 }
246
cache_data_flush_and_invd_range(void * addr,size_t size)247 int cache_data_flush_and_invd_range(void *addr, size_t size)
248 {
249 #if NRF_CACHE_HAS_TASK_FLUSH
250 return _cache_checks(NRF_DCACHE, K_NRF_CACHE_FLUSH, addr, size, true);
251 #else
252 return -ENOTSUP;
253 #endif
254 }
255
256 #else
257
cache_data_enable(void)258 void cache_data_enable(void)
259 {
260 /* Nothing */
261 }
262
cache_data_disable(void)263 void cache_data_disable(void)
264 {
265 /* Nothing */
266 }
267
cache_data_flush_all(void)268 int cache_data_flush_all(void)
269 {
270 return -ENOTSUP;
271 }
272
cache_data_invd_all(void)273 int cache_data_invd_all(void)
274 {
275 return -ENOTSUP;
276 }
277
cache_data_flush_and_invd_all(void)278 int cache_data_flush_and_invd_all(void)
279 {
280 return -ENOTSUP;
281 }
282
cache_data_flush_range(void * addr,size_t size)283 int cache_data_flush_range(void *addr, size_t size)
284 {
285 return -ENOTSUP;
286 }
287
cache_data_invd_range(void * addr,size_t size)288 int cache_data_invd_range(void *addr, size_t size)
289 {
290 return -ENOTSUP;
291 }
292
cache_data_flush_and_invd_range(void * addr,size_t size)293 int cache_data_flush_and_invd_range(void *addr, size_t size)
294 {
295 return -ENOTSUP;
296 }
297
298 #endif /* NRF_DCACHE */
299
300 #if defined(NRF_ICACHE) && NRF_CACHE_HAS_TASKS
301
cache_instr_enable(void)302 void cache_instr_enable(void)
303 {
304 nrf_cache_enable(NRF_ICACHE);
305 }
306
cache_instr_disable(void)307 void cache_instr_disable(void)
308 {
309 nrf_cache_disable(NRF_ICACHE);
310 }
311
cache_instr_flush_all(void)312 int cache_instr_flush_all(void)
313 {
314 #if NRF_CACHE_HAS_TASK_CLEAN
315 #if NRF_CACHE_HAS_LINEADDR
316 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_CLEAN, NULL, 0, false);
317 #else
318 return _cache_all_checks(NRF_ICACHE, K_NRF_CACHE_CLEAN);
319 #endif
320 #else
321 return -ENOTSUP;
322 #endif
323 }
324
cache_instr_invd_all(void)325 int cache_instr_invd_all(void)
326 {
327 #if NRF_CACHE_HAS_LINEADDR
328 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_INVD, NULL, 0, false);
329 #else
330 return _cache_all_checks(NRF_ICACHE, K_NRF_CACHE_INVD);
331 #endif
332 }
333
cache_instr_flush_and_invd_all(void)334 int cache_instr_flush_and_invd_all(void)
335 {
336 #if NRF_CACHE_HAS_TASK_FLUSH
337 #if NRF_CACHE_HAS_LINEADDR
338 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_FLUSH, NULL, 0, false);
339 #else
340 return _cache_all_checks(NRF_ICACHE, K_NRF_CACHE_FLUSH);
341 #endif
342 #else
343 return -ENOTSUP;
344 #endif
345 }
346
cache_instr_flush_range(void * addr,size_t size)347 int cache_instr_flush_range(void *addr, size_t size)
348 {
349 #if NRF_CACHE_HAS_TASK_CLEAN && NRF_CACHE_HAS_LINEADDR
350 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_CLEAN, addr, size, true);
351 #else
352 return -ENOTSUP;
353 #endif
354 }
355
cache_instr_invd_range(void * addr,size_t size)356 int cache_instr_invd_range(void *addr, size_t size)
357 {
358 #if NRF_CACHE_HAS_LINEADDR
359 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_INVD, addr, size, true);
360 #else
361 return -ENOTSUP;
362 #endif
363 }
364
cache_instr_flush_and_invd_range(void * addr,size_t size)365 int cache_instr_flush_and_invd_range(void *addr, size_t size)
366 {
367 #if NRF_CACHE_HAS_TASK_FLUSH && NRF_CACHE_HAS_LINEADDR
368 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_FLUSH, addr, size, true);
369 #else
370 return -ENOTSUP;
371 #endif
372 }
373
374 #else
375
cache_instr_enable(void)376 void cache_instr_enable(void)
377 {
378 /* Nothing */
379 }
380
cache_instr_disable(void)381 void cache_instr_disable(void)
382 {
383 /* Nothing */
384 }
385
cache_instr_flush_all(void)386 int cache_instr_flush_all(void)
387 {
388 return -ENOTSUP;
389 }
390
cache_instr_invd_all(void)391 int cache_instr_invd_all(void)
392 {
393 return -ENOTSUP;
394 }
395
cache_instr_flush_and_invd_all(void)396 int cache_instr_flush_and_invd_all(void)
397 {
398 return -ENOTSUP;
399 }
400
cache_instr_flush_range(void * addr,size_t size)401 int cache_instr_flush_range(void *addr, size_t size)
402 {
403 return -ENOTSUP;
404 }
405
cache_instr_invd_range(void * addr,size_t size)406 int cache_instr_invd_range(void *addr, size_t size)
407 {
408 return -ENOTSUP;
409 }
410
cache_instr_flush_and_invd_range(void * addr,size_t size)411 int cache_instr_flush_and_invd_range(void *addr, size_t size)
412 {
413 return -ENOTSUP;
414 }
415
416 #endif /* NRF_ICACHE */
417