1 /*
2 * Copyright (c) 2025 Henrik Lindblom <henrik.lindblom@vaisala.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr/kernel.h>
7 #include <zephyr/drivers/cache.h>
8 #include <zephyr/logging/log.h>
9 #include <zephyr/sys/math_extras.h>
10 #include <stm32_ll_dcache.h>
11 #include <stm32_ll_icache.h>
12
13 LOG_MODULE_REGISTER(cache_stm32, CONFIG_CACHE_LOG_LEVEL);
14
15 #ifdef CONFIG_DCACHE
16
cache_data_enable(void)17 void cache_data_enable(void)
18 {
19 LL_DCACHE_Enable(DCACHE1);
20 #if defined(DCACHE2)
21 LL_DCACHE_Enable(DCACHE2);
22 #endif
23 }
24
cache_data_disable(void)25 void cache_data_disable(void)
26 {
27 cache_data_flush_all();
28
29 while (LL_DCACHE_IsActiveFlag_BUSYCMD(DCACHE1)) {
30 }
31
32 LL_DCACHE_Disable(DCACHE1);
33 LL_DCACHE_ClearFlag_BSYEND(DCACHE1);
34
35 #if defined(DCACHE2)
36 while (LL_DCACHE_IsActiveFlag_BUSYCMD(DCACHE2)) {
37 }
38
39 LL_DCACHE_Disable(DCACHE2);
40 LL_DCACHE_ClearFlag_BSYEND(DCACHE2);
41 #endif
42 }
43
cache_data_manage_range(void * addr,size_t size,uint32_t command)44 static int cache_data_manage_range(void *addr, size_t size, uint32_t command)
45 {
46 /*
47 * This is a simple approach to invalidate the range. The address might be in either DCACHE1
48 * or DCACHE2 (if present). The cache invalidation algorithm checks the TAG memory for the
49 * specified address range so there's little harm in just checking both caches.
50 */
51 uint32_t start = (uint32_t)addr;
52 uint32_t end;
53
54 if (u32_add_overflow(start, size, &end)) {
55 return -EOVERFLOW;
56 }
57
58 LL_DCACHE_SetStartAddress(DCACHE1, start);
59 LL_DCACHE_SetEndAddress(DCACHE1, end);
60 LL_DCACHE_SetCommand(DCACHE1, command);
61 LL_DCACHE_StartCommand(DCACHE1);
62 #if defined(DCACHE2)
63 LL_DCACHE_SetStartAddress(DCACHE2, start);
64 LL_DCACHE_SetEndAddress(DCACHE2, end);
65 LL_DCACHE_SetCommand(DCACHE2, command);
66 LL_DCACHE_StartCommand(DCACHE2);
67 #endif
68 return 0;
69 }
70
cache_data_flush_range(void * addr,size_t size)71 int cache_data_flush_range(void *addr, size_t size)
72 {
73 return cache_data_manage_range(addr, size, LL_DCACHE_COMMAND_CLEAN_BY_ADDR);
74 }
75
cache_data_invd_range(void * addr,size_t size)76 int cache_data_invd_range(void *addr, size_t size)
77 {
78 return cache_data_manage_range(addr, size, LL_DCACHE_COMMAND_INVALIDATE_BY_ADDR);
79 }
80
cache_data_flush_and_invd_range(void * addr,size_t size)81 int cache_data_flush_and_invd_range(void *addr, size_t size)
82 {
83 return cache_data_manage_range(addr, size, LL_DCACHE_COMMAND_CLEAN_INVALIDATE_BY_ADDR);
84 }
85
cache_data_flush_all(void)86 int cache_data_flush_all(void)
87 {
88 return cache_data_flush_range(0, UINT32_MAX);
89 }
90
cache_data_invd_all(void)91 int cache_data_invd_all(void)
92 {
93 LL_DCACHE_Invalidate(DCACHE1);
94 #if defined(DCACHE2)
95 LL_DCACHE_Invalidate(DCACHE2);
96 #endif
97 return 0;
98 }
99
cache_data_flush_and_invd_all(void)100 int cache_data_flush_and_invd_all(void)
101 {
102 return cache_data_flush_and_invd_range(0, UINT32_MAX);
103 }
104
105 #endif /* CONFIG_DCACHE */
106
wait_for_icache(void)107 static inline void wait_for_icache(void)
108 {
109 while (LL_ICACHE_IsActiveFlag_BUSY()) {
110 }
111
112 /* Clear BSYEND to avoid an extra interrupt if somebody enables them. */
113 LL_ICACHE_ClearFlag_BSYEND();
114 }
115
cache_instr_enable(void)116 void cache_instr_enable(void)
117 {
118 if (IS_ENABLED(CONFIG_CACHE_STM32_ICACHE_DIRECT_MAPPING)) {
119 LL_ICACHE_SetMode(LL_ICACHE_1WAY);
120 }
121
122 /*
123 * Need to wait until any pending cache invalidation operations finish. This is recommended
124 * in the reference manual to ensure execution timing determinism.
125 */
126 wait_for_icache();
127 LL_ICACHE_Enable();
128 }
129
cache_instr_disable(void)130 void cache_instr_disable(void)
131 {
132 LL_ICACHE_Disable();
133
134 while (LL_ICACHE_IsEnabled()) {
135 /**
136 * Wait until the ICACHE is disabled (CR.EN=0), at which point
137 * all requests bypass the cache and are forwarded directly
138 * from the ICACHE slave port to the ICACHE master port(s).
139 *
140 * The cache invalidation will start once disabled, but we allow
141 * it to proceed in the background since it doesn't need to be
142 * complete for requests to bypass the ICACHE.
143 */
144 }
145 }
146
cache_instr_flush_all(void)147 int cache_instr_flush_all(void)
148 {
149 return -ENOTSUP;
150 }
151
cache_instr_invd_all(void)152 int cache_instr_invd_all(void)
153 {
154 LL_ICACHE_Invalidate();
155 return 0;
156 }
157
cache_instr_flush_and_invd_all(void)158 int cache_instr_flush_and_invd_all(void)
159 {
160 return -ENOTSUP;
161 }
162
cache_instr_flush_range(void * addr,size_t size)163 int cache_instr_flush_range(void *addr, size_t size)
164 {
165 ARG_UNUSED(addr);
166 ARG_UNUSED(size);
167 return -ENOTSUP;
168 }
169
cache_instr_invd_range(void * addr,size_t size)170 int cache_instr_invd_range(void *addr, size_t size)
171 {
172 ARG_UNUSED(addr);
173 ARG_UNUSED(size);
174 return -ENOTSUP;
175 }
176
cache_instr_flush_and_invd_range(void * addr,size_t size)177 int cache_instr_flush_and_invd_range(void *addr, size_t size)
178 {
179 ARG_UNUSED(addr);
180 ARG_UNUSED(size);
181 return -ENOTSUP;
182 }
183