1 /* cache.c - d-cache support for ARC CPUs */
2
3 /*
4 * Copyright (c) 2016 Synopsys, Inc. All rights reserved.
5 * Copyright (c) 2025 GSI Technology, All rights reserved.
6 *
7 * SPDX-License-Identifier: Apache-2.0
8 */
9
10 /**
11 * @file
12 * @brief d-cache manipulation
13 *
14 * This module contains functions for manipulation of the d-cache.
15 */
16
17 #include <zephyr/kernel.h>
18 #include <zephyr/arch/cpu.h>
19 #include <zephyr/sys/util.h>
20 #include <zephyr/toolchain.h>
21 #include <zephyr/cache.h>
22 #include <zephyr/linker/linker-defs.h>
23 #include <zephyr/arch/arc/v2/aux_regs.h>
24 #include <kernel_internal.h>
25 #include <zephyr/sys/__assert.h>
26 #include <zephyr/init.h>
27 #include <stdbool.h>
28
29 #if defined(CONFIG_DCACHE_LINE_SIZE_DETECT)
30 size_t sys_cache_line_size;
31 #endif
32
33 #define DC_CTRL_DC_ENABLE 0x0 /* enable d-cache */
34 #define DC_CTRL_DC_DISABLE 0x1 /* disable d-cache */
35 #define DC_CTRL_INVALID_ONLY 0x0 /* invalid d-cache only */
36 #define DC_CTRL_INVALID_FLUSH 0x40 /* invalid and flush d-cache */
37 #define DC_CTRL_ENABLE_FLUSH_LOCKED 0x80 /* locked d-cache can be flushed */
38 #define DC_CTRL_DISABLE_FLUSH_LOCKED 0x0 /* locked d-cache cannot be flushed */
39 #define DC_CTRL_FLUSH_STATUS 0x100 /* flush status */
40 #define DC_CTRL_DIRECT_ACCESS 0x0 /* direct access mode */
41 #define DC_CTRL_INDIRECT_ACCESS 0x20 /* indirect access mode */
42 #define DC_CTRL_OP_SUCCEEDED 0x4 /* d-cache operation succeeded */
43 #define DC_CTRL_INVALIDATE_MODE 0x40 /* d-cache invalidate mode bit */
44 #define DC_CTRL_REGION_OP 0xe00 /* d-cache region operation */
45
46 #define MMU_BUILD_PHYSICAL_ADDR_EXTENSION 0x1000 /* physical address extension enable mask */
47
48 #define SLC_CTRL_DISABLE 0x1 /* SLC disable */
49 #define SLC_CTRL_INVALIDATE_MODE 0x40 /* SLC invalidate mode */
50 #define SLC_CTRL_BUSY_STATUS 0x100 /* SLC busy status */
51 #define SLC_CTRL_REGION_OP 0xe00 /* SLC region operation */
52
53 #if defined(CONFIG_ARC_SLC)
54 /*
55 * spinlock is used for SLC access because depending on HW configuration, the SLC might be shared
56 * between the cores, and in this case, only one core is allowed to access the SLC register
57 * interface at a time.
58 */
59 static struct k_spinlock slc_lock;
60 #endif
61
dcache_available(void)62 static bool dcache_available(void)
63 {
64 unsigned long val = z_arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
65
66 val &= 0xff; /* extract version */
67 return (val == 0) ? false : true;
68 }
69
dcache_dc_ctrl(uint32_t dcache_en_mask)70 static void dcache_dc_ctrl(uint32_t dcache_en_mask)
71 {
72 if (dcache_available()) {
73 z_arc_v2_aux_reg_write(_ARC_V2_DC_CTRL, dcache_en_mask);
74 }
75 }
76
pae_exists(void)77 static bool pae_exists(void)
78 {
79 uint32_t bcr = z_arc_v2_aux_reg_read(_ARC_V2_MMU_BUILD);
80
81 return 1 == FIELD_GET(MMU_BUILD_PHYSICAL_ADDR_EXTENSION, bcr);
82 }
83
84 #if defined(CONFIG_ARC_SLC)
slc_enable(void)85 static void slc_enable(void)
86 {
87 uint32_t val = z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL);
88
89 val &= ~SLC_CTRL_DISABLE;
90 z_arc_v2_aux_reg_write(_ARC_V2_SLC_CTRL, val);
91 }
92
slc_high_addr_init(void)93 static void slc_high_addr_init(void)
94 {
95 z_arc_v2_aux_reg_write(_ARC_V2_SLC_RGN_END1, 0);
96 z_arc_v2_aux_reg_write(_ARC_V2_SLC_RGN_START1, 0);
97 }
98
slc_flush_region(void * start_addr_ptr,size_t size)99 static void slc_flush_region(void *start_addr_ptr, size_t size)
100 {
101 uintptr_t start_addr = (uintptr_t)start_addr_ptr;
102 uintptr_t end_addr;
103 uint32_t ctrl;
104
105 K_SPINLOCK(&slc_lock) {
106 ctrl = z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL);
107
108 ctrl &= ~SLC_CTRL_REGION_OP;
109
110 z_arc_v2_aux_reg_write(_ARC_V2_SLC_CTRL, ctrl);
111
112 /*
113 * END needs to be setup before START (latter triggers the operation)
114 * END can't be same as START, so add (l2_line_sz - 1) to sz
115 */
116 end_addr = start_addr + size + CONFIG_ARC_SLC_LINE_SIZE - 1;
117
118 z_arc_v2_aux_reg_write(_ARC_V2_SLC_RGN_END, end_addr);
119 /* Align start address to cache line size, see STAR 5103816 */
120 z_arc_v2_aux_reg_write(_ARC_V2_SLC_RGN_START,
121 start_addr & ~(CONFIG_ARC_SLC_LINE_SIZE - 1));
122
123 /* Make sure "busy" bit reports correct status, see STAR 9001165532 */
124 z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL);
125 while (z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL) & SLC_CTRL_BUSY_STATUS) {
126 /* Do Nothing */
127 }
128 }
129 }
130
slc_invalidate_region(void * start_addr_ptr,size_t size)131 static void slc_invalidate_region(void *start_addr_ptr, size_t size)
132 {
133 uintptr_t start_addr = (uintptr_t)start_addr_ptr;
134 uintptr_t end_addr;
135 uint32_t ctrl;
136
137 K_SPINLOCK(&slc_lock) {
138 ctrl = z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL);
139
140 ctrl &= ~SLC_CTRL_INVALIDATE_MODE;
141
142 ctrl &= ~SLC_CTRL_REGION_OP;
143 ctrl |= FIELD_PREP(SLC_CTRL_REGION_OP, 0x1);
144
145 z_arc_v2_aux_reg_write(_ARC_V2_SLC_CTRL, ctrl);
146
147 /*
148 * END needs to be setup before START (latter triggers the operation)
149 * END can't be same as START, so add (l2_line_sz - 1) to sz
150 */
151 end_addr = start_addr + size + CONFIG_ARC_SLC_LINE_SIZE - 1;
152
153 z_arc_v2_aux_reg_write(_ARC_V2_SLC_RGN_END, end_addr);
154 /* Align start address to cache line size, see STAR 5103816 */
155 z_arc_v2_aux_reg_write(_ARC_V2_SLC_RGN_START,
156 start_addr & ~(CONFIG_ARC_SLC_LINE_SIZE - 1));
157
158 /* Make sure "busy" bit reports correct status, see STAR 9001165532 */
159 z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL);
160 while (z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL) & SLC_CTRL_BUSY_STATUS) {
161 /* Do Nothing */
162 }
163 }
164 }
165
slc_flush_and_invalidate_region(void * start_addr_ptr,size_t size)166 static void slc_flush_and_invalidate_region(void *start_addr_ptr, size_t size)
167 {
168 uintptr_t start_addr = (uintptr_t)start_addr_ptr;
169 uintptr_t end_addr;
170 uint32_t ctrl;
171
172 K_SPINLOCK(&slc_lock) {
173 ctrl = z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL);
174
175 ctrl |= SLC_CTRL_INVALIDATE_MODE;
176
177 ctrl &= ~SLC_CTRL_REGION_OP;
178 ctrl |= FIELD_PREP(SLC_CTRL_REGION_OP, 0x1);
179
180 z_arc_v2_aux_reg_write(_ARC_V2_SLC_CTRL, ctrl);
181
182 /*
183 * END needs to be setup before START (latter triggers the operation)
184 * END can't be same as START, so add (l2_line_sz - 1) to sz
185 */
186 end_addr = start_addr + size + CONFIG_ARC_SLC_LINE_SIZE - 1;
187
188 z_arc_v2_aux_reg_write(_ARC_V2_SLC_RGN_END, end_addr);
189 /* Align start address to cache line size, see STAR 5103816 */
190 z_arc_v2_aux_reg_write(_ARC_V2_SLC_RGN_START,
191 start_addr & ~(CONFIG_ARC_SLC_LINE_SIZE - 1));
192
193 /* Make sure "busy" bit reports correct status, see STAR 9001165532 */
194 z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL);
195 while (z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL) & SLC_CTRL_BUSY_STATUS) {
196 /* Do Nothing */
197 }
198 }
199 }
200
slc_flush_all(void)201 static void slc_flush_all(void)
202 {
203 K_SPINLOCK(&slc_lock) {
204 z_arc_v2_aux_reg_write(_ARC_V2_SLC_FLUSH, 0x1);
205
206 /* Make sure "busy" bit reports correct status, see STAR 9001165532 */
207 z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL);
208 while (z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL) & SLC_CTRL_BUSY_STATUS) {
209 /* Do Nothing */
210 }
211 }
212 }
213
slc_invalidate_all(void)214 static void slc_invalidate_all(void)
215 {
216 uint32_t ctrl;
217
218 K_SPINLOCK(&slc_lock) {
219 ctrl = z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL);
220 ctrl &= ~SLC_CTRL_INVALIDATE_MODE;
221 z_arc_v2_aux_reg_write(_ARC_V2_SLC_CTRL, ctrl);
222
223 z_arc_v2_aux_reg_write(_ARC_V2_SLC_INVALIDATE, 0x1);
224
225 /* Make sure "busy" bit reports correct status, see STAR 9001165532 */
226 z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL);
227 while (z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL) & SLC_CTRL_BUSY_STATUS) {
228 /* Do Nothing */
229 }
230 }
231 }
232
slc_flush_and_invalidate_all(void)233 static void slc_flush_and_invalidate_all(void)
234 {
235 uint32_t ctrl;
236
237 K_SPINLOCK(&slc_lock) {
238 ctrl = z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL);
239 ctrl |= SLC_CTRL_INVALIDATE_MODE;
240 z_arc_v2_aux_reg_write(_ARC_V2_SLC_CTRL, ctrl);
241
242 z_arc_v2_aux_reg_write(_ARC_V2_SLC_INVALIDATE, 0x1);
243
244 /* Make sure "busy" bit reports correct status, see STAR 9001165532 */
245 z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL);
246 while (z_arc_v2_aux_reg_read(_ARC_V2_SLC_CTRL) & SLC_CTRL_BUSY_STATUS) {
247 /* Do Nothing */
248 }
249 }
250 }
251 #endif /* CONFIG_ARC_SLC */
252
arch_dcache_enable(void)253 void arch_dcache_enable(void)
254 {
255 dcache_dc_ctrl(DC_CTRL_DC_ENABLE);
256
257 #if defined(CONFIG_ARC_SLC)
258 slc_enable();
259 #endif
260 }
261
arch_dcache_disable(void)262 void arch_dcache_disable(void)
263 {
264 /* nothing */
265 }
266
267 #if defined(CONFIG_ARC_DCACHE_REGION_OPERATIONS)
dcache_high_addr_init(void)268 static void dcache_high_addr_init(void)
269 {
270 z_arc_v2_aux_reg_write(_ARC_V2_DC_PTAG_HI, 0);
271 }
272
dcache_flush_region(void * start_addr_ptr,size_t size)273 static void dcache_flush_region(void *start_addr_ptr, size_t size)
274 {
275 size_t line_size = sys_cache_data_line_size_get();
276 uintptr_t start_addr = (uintptr_t)start_addr_ptr;
277 uintptr_t end_addr;
278 uint32_t ctrl;
279 unsigned int key;
280
281 key = arch_irq_lock(); /* --enter critical section-- */
282
283 ctrl = z_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL);
284
285 ctrl &= ~DC_CTRL_REGION_OP;
286
287 z_arc_v2_aux_reg_write(_ARC_V2_DC_CTRL, ctrl);
288
289 end_addr = start_addr + size + line_size - 1;
290
291 z_arc_v2_aux_reg_write(_ARC_V2_DC_ENDR, end_addr);
292 z_arc_v2_aux_reg_write(_ARC_V2_DC_STARTR, start_addr);
293
294 while (z_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL) & DC_CTRL_FLUSH_STATUS) {
295 /* Do nothing */
296 }
297
298 arch_irq_unlock(key); /* --exit critical section-- */
299 }
300
dcache_invalidate_region(void * start_addr_ptr,size_t size)301 static void dcache_invalidate_region(void *start_addr_ptr, size_t size)
302 {
303 size_t line_size = sys_cache_data_line_size_get();
304 uintptr_t start_addr = (uintptr_t)start_addr_ptr;
305 uintptr_t end_addr;
306 uint32_t ctrl;
307 unsigned int key;
308
309 key = arch_irq_lock(); /* --enter critical section-- */
310
311 ctrl = z_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL);
312
313 ctrl &= ~DC_CTRL_INVALIDATE_MODE;
314
315 ctrl &= ~DC_CTRL_REGION_OP;
316 ctrl |= FIELD_PREP(DC_CTRL_REGION_OP, 0x1);
317
318 z_arc_v2_aux_reg_write(_ARC_V2_DC_CTRL, ctrl);
319
320 end_addr = start_addr + size + line_size - 1;
321
322 z_arc_v2_aux_reg_write(_ARC_V2_DC_ENDR, end_addr);
323 z_arc_v2_aux_reg_write(_ARC_V2_DC_STARTR, start_addr);
324
325 arch_irq_unlock(key); /* --exit critical section-- */
326 }
327
dcache_flush_and_invalidate_region(void * start_addr_ptr,size_t size)328 static void dcache_flush_and_invalidate_region(void *start_addr_ptr, size_t size)
329 {
330 size_t line_size = sys_cache_data_line_size_get();
331 uintptr_t start_addr = (uintptr_t)start_addr_ptr;
332 uintptr_t end_addr;
333 uint32_t ctrl;
334 unsigned int key;
335
336 key = arch_irq_lock(); /* --enter critical section-- */
337
338 ctrl = z_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL);
339
340 ctrl |= DC_CTRL_INVALIDATE_MODE;
341
342 ctrl &= ~DC_CTRL_REGION_OP;
343 ctrl |= FIELD_PREP(DC_CTRL_REGION_OP, 0x1);
344
345 z_arc_v2_aux_reg_write(_ARC_V2_DC_CTRL, ctrl);
346
347 end_addr = start_addr + size + line_size - 1;
348
349 z_arc_v2_aux_reg_write(_ARC_V2_DC_ENDR, end_addr);
350 z_arc_v2_aux_reg_write(_ARC_V2_DC_STARTR, start_addr);
351
352 while (z_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL) & DC_CTRL_FLUSH_STATUS) {
353 /* Do nothing */
354 }
355
356 arch_irq_unlock(key); /* --exit critical section-- */
357 }
358
359 #else /* CONFIG_ARC_DCACHE_REGION_OPERATIONS */
360
dcache_flush_lines(void * start_addr_ptr,size_t size)361 static void dcache_flush_lines(void *start_addr_ptr, size_t size)
362 {
363 size_t line_size = sys_cache_data_line_size_get();
364 uintptr_t start_addr = (uintptr_t)start_addr_ptr;
365 uintptr_t end_addr;
366 unsigned int key;
367
368 end_addr = start_addr + size;
369
370 start_addr = ROUND_DOWN(start_addr, line_size);
371
372 key = arch_irq_lock(); /* --enter critical section-- */
373
374 do {
375 z_arc_v2_aux_reg_write(_ARC_V2_DC_FLDL, start_addr);
376 __builtin_arc_nop();
377 __builtin_arc_nop();
378 __builtin_arc_nop();
379 /* wait for flush completion */
380 do {
381 if ((z_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL) &
382 DC_CTRL_FLUSH_STATUS) == 0) {
383 break;
384 }
385 } while (1);
386 start_addr += line_size;
387 } while (start_addr < end_addr);
388
389 arch_irq_unlock(key); /* --exit critical section-- */
390 }
391
dcache_invalidate_lines(void * start_addr_ptr,size_t size)392 static void dcache_invalidate_lines(void *start_addr_ptr, size_t size)
393 {
394 size_t line_size = sys_cache_data_line_size_get();
395 uintptr_t start_addr = (uintptr_t)start_addr_ptr;
396 uintptr_t end_addr;
397 unsigned int key;
398 uint32_t ctrl;
399
400 end_addr = start_addr + size;
401 start_addr = ROUND_DOWN(start_addr, line_size);
402
403 key = arch_irq_lock(); /* -enter critical section- */
404
405 ctrl = z_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL);
406 ctrl &= ~DC_CTRL_INVALIDATE_MODE;
407 z_arc_v2_aux_reg_write(_ARC_V2_DC_CTRL, ctrl);
408
409 do {
410 z_arc_v2_aux_reg_write(_ARC_V2_DC_IVDL, start_addr);
411 __builtin_arc_nop();
412 __builtin_arc_nop();
413 __builtin_arc_nop();
414 start_addr += line_size;
415 } while (start_addr < end_addr);
416 irq_unlock(key); /* -exit critical section- */
417 }
418
dcache_flush_and_invalidate_lines(void * start_addr_ptr,size_t size)419 static void dcache_flush_and_invalidate_lines(void *start_addr_ptr, size_t size)
420 {
421 size_t line_size = sys_cache_data_line_size_get();
422 uintptr_t start_addr = (uintptr_t)start_addr_ptr;
423 uintptr_t end_addr;
424 unsigned int key;
425 uint32_t ctrl;
426
427 end_addr = start_addr + size;
428 start_addr = ROUND_DOWN(start_addr, line_size);
429
430 key = arch_irq_lock(); /* -enter critical section- */
431
432 ctrl = z_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL);
433 ctrl |= DC_CTRL_INVALIDATE_MODE;
434 z_arc_v2_aux_reg_write(_ARC_V2_DC_CTRL, ctrl);
435
436 do {
437 z_arc_v2_aux_reg_write(_ARC_V2_DC_IVDL, start_addr);
438 __builtin_arc_nop();
439 __builtin_arc_nop();
440 __builtin_arc_nop();
441 start_addr += line_size;
442 } while (start_addr < end_addr);
443 irq_unlock(key); /* -exit critical section- */
444 }
445
446 #endif /* CONFIG_ARC_DCACHE_REGION_OPERATIONS */
447
arch_dcache_flush_range(void * start_addr_ptr,size_t size)448 int arch_dcache_flush_range(void *start_addr_ptr, size_t size)
449 {
450 size_t line_size = sys_cache_data_line_size_get();
451
452 if (!dcache_available() || (size == 0U) || line_size == 0U) {
453 return -ENOTSUP;
454 }
455
456 #if defined(CONFIG_ARC_DCACHE_REGION_OPERATIONS)
457 dcache_flush_region(start_addr_ptr, size);
458 #else
459 dcache_flush_lines(start_addr_ptr, size);
460 #endif
461
462 #if defined(CONFIG_ARC_SLC)
463 slc_flush_region(start_addr_ptr, size);
464 #endif
465
466 return 0;
467 }
468
arch_dcache_invd_range(void * start_addr_ptr,size_t size)469 int arch_dcache_invd_range(void *start_addr_ptr, size_t size)
470 {
471 size_t line_size = sys_cache_data_line_size_get();
472
473 if (!dcache_available() || (size == 0U) || line_size == 0U) {
474 return -ENOTSUP;
475 }
476
477 #if defined(CONFIG_ARC_DCACHE_REGION_OPERATIONS)
478 dcache_invalidate_region(start_addr_ptr, size);
479 #else
480 dcache_invalidate_lines(start_addr_ptr, size);
481 #endif
482
483 #if defined(CONFIG_ARC_SLC)
484 slc_invalidate_region(start_addr_ptr, size);
485 #endif
486
487 return 0;
488 }
489
arch_dcache_flush_and_invd_range(void * start_addr_ptr,size_t size)490 int arch_dcache_flush_and_invd_range(void *start_addr_ptr, size_t size)
491 {
492 size_t line_size = sys_cache_data_line_size_get();
493
494 if (!dcache_available() || (size == 0U) || line_size == 0U) {
495 return -ENOTSUP;
496 }
497
498 #if defined(CONFIG_ARC_DCACHE_REGION_OPERATIONS)
499 dcache_flush_and_invalidate_region(start_addr_ptr, size);
500 #else
501 dcache_flush_and_invalidate_lines(start_addr_ptr, size);
502 #endif
503
504 #if defined(CONFIG_ARC_SLC)
505 slc_flush_and_invalidate_region(start_addr_ptr, size);
506 #endif
507
508 return 0;
509 }
510
arch_dcache_flush_all(void)511 int arch_dcache_flush_all(void)
512 {
513 size_t line_size = sys_cache_data_line_size_get();
514 unsigned int key;
515
516 if (!dcache_available() || line_size == 0U) {
517 return -ENOTSUP;
518 }
519
520 key = irq_lock();
521
522 z_arc_v2_aux_reg_write(_ARC_V2_DC_FLSH, 0x1);
523
524 while (z_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL) & DC_CTRL_FLUSH_STATUS) {
525 /* Do nothing */
526 }
527
528 irq_unlock(key);
529
530 #if defined(CONFIG_ARC_SLC)
531 slc_flush_all();
532 #endif
533
534 return 0;
535 }
536
arch_dcache_invd_all(void)537 int arch_dcache_invd_all(void)
538 {
539 size_t line_size = sys_cache_data_line_size_get();
540 unsigned int key;
541 uint32_t ctrl;
542
543 if (!dcache_available() || line_size == 0U) {
544 return -ENOTSUP;
545 }
546
547 key = irq_lock();
548
549 ctrl = z_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL);
550 ctrl &= ~DC_CTRL_INVALIDATE_MODE;
551 z_arc_v2_aux_reg_write(_ARC_V2_DC_CTRL, ctrl);
552
553 z_arc_v2_aux_reg_write(_ARC_V2_DC_IVDC, 0x1);
554
555 irq_unlock(key);
556
557 #if defined(CONFIG_ARC_SLC)
558 slc_invalidate_all();
559 #endif
560
561 return 0;
562 }
563
arch_dcache_flush_and_invd_all(void)564 int arch_dcache_flush_and_invd_all(void)
565 {
566 size_t line_size = sys_cache_data_line_size_get();
567 unsigned int key;
568 uint32_t ctrl;
569
570 if (!dcache_available() || line_size == 0U) {
571 return -ENOTSUP;
572 }
573
574 key = irq_lock();
575
576 ctrl = z_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL);
577 ctrl |= DC_CTRL_INVALIDATE_MODE;
578 z_arc_v2_aux_reg_write(_ARC_V2_DC_CTRL, ctrl);
579
580 z_arc_v2_aux_reg_write(_ARC_V2_DC_IVDC, 0x1);
581
582 while (z_arc_v2_aux_reg_read(_ARC_V2_DC_CTRL) & DC_CTRL_FLUSH_STATUS) {
583 /* Do nothing */
584 }
585
586 irq_unlock(key);
587
588 #if defined(CONFIG_ARC_SLC)
589 slc_flush_and_invalidate_all();
590 #endif
591
592 return 0;
593 }
594
595 #if defined(CONFIG_DCACHE_LINE_SIZE_DETECT)
init_dcache_line_size(void)596 static void init_dcache_line_size(void)
597 {
598 uint32_t val;
599
600 val = z_arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD);
601 __ASSERT((val&0xff) != 0U, "d-cache is not present");
602 val = ((val>>16) & 0xf) + 1;
603 val *= 16U;
604 sys_cache_line_size = (size_t) val;
605 }
606
arch_dcache_line_size_get(void)607 size_t arch_dcache_line_size_get(void)
608 {
609 return sys_cache_line_size;
610 }
611 #endif
612
arch_icache_enable(void)613 void arch_icache_enable(void)
614 {
615 /* nothing */
616 }
617
arch_icache_disable(void)618 void arch_icache_disable(void)
619 {
620 /* nothing */
621 }
622
arch_icache_flush_all(void)623 int arch_icache_flush_all(void)
624 {
625 return -ENOTSUP;
626 }
627
arch_icache_invd_all(void)628 int arch_icache_invd_all(void)
629 {
630 return -ENOTSUP;
631 }
632
arch_icache_flush_and_invd_all(void)633 int arch_icache_flush_and_invd_all(void)
634 {
635 return -ENOTSUP;
636 }
637
arch_icache_flush_range(void * addr,size_t size)638 int arch_icache_flush_range(void *addr, size_t size)
639 {
640 ARG_UNUSED(addr);
641 ARG_UNUSED(size);
642
643 return -ENOTSUP;
644 }
645
arch_icache_invd_range(void * addr,size_t size)646 int arch_icache_invd_range(void *addr, size_t size)
647 {
648 ARG_UNUSED(addr);
649 ARG_UNUSED(size);
650
651 return -ENOTSUP;
652 }
653
arch_icache_flush_and_invd_range(void * addr,size_t size)654 int arch_icache_flush_and_invd_range(void *addr, size_t size)
655 {
656 ARG_UNUSED(addr);
657 ARG_UNUSED(size);
658
659 return -ENOTSUP;
660 }
661
init_dcache(void)662 static int init_dcache(void)
663 {
664 sys_cache_data_enable();
665
666 #if defined(CONFIG_DCACHE_LINE_SIZE_DETECT)
667 init_dcache_line_size();
668 #endif
669
670 /*
671 * Init high address registers to 0 if PAE exists, cache operations for 40 bit addresses not
672 * implemented
673 */
674 if (pae_exists()) {
675 #if defined(CONFIG_ARC_DCACHE_REGION_OPERATIONS)
676 dcache_high_addr_init();
677 #endif
678 #if defined(CONFIG_ARC_SLC)
679 slc_high_addr_init();
680 #endif
681 }
682
683 return 0;
684 }
685
686
arch_cache_init(void)687 void arch_cache_init(void)
688 {
689 init_dcache();
690 }
691