1 /*
2  * Copyright (c) 2006-2021, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2016-09-07     Urey         the first version
9  */
10 
11 #ifndef _MIPS_CACHE_H_
12 #define _MIPS_CACHE_H_
13 
14 #ifndef __ASSEMBLER__
15 #include <rtdef.h>
16 #include <mips_cfg.h>
17 
18 /*
19  * Cache Operations available on all MIPS processors with R4000-style caches
20  */
21 #define INDEX_INVALIDATE_I      0x00
22 #define INDEX_WRITEBACK_INV_D   0x01
23 #define INDEX_LOAD_TAG_I        0x04
24 #define INDEX_LOAD_TAG_D        0x05
25 #define INDEX_STORE_TAG_I       0x08
26 #define INDEX_STORE_TAG_D       0x09
27 #define HIT_INVALIDATE_I        0x10
28 #define HIT_INVALIDATE_D        0x11
29 #define HIT_WRITEBACK_INV_D     0x15
30 
31 /*
32  *The lock state is cleared by executing an Index
33 Invalidate, Index Writeback Invalidate, Hit
34 Invalidate, or Hit Writeback Invalidate
35 operation to the locked line, or via an Index
36 Store Tag operation with the lock bit reset in
37 the TagLo register.
38  */
39 #define FETCH_AND_LOCK_I        0x1c
40 #define FETCH_AND_LOCK_D        0x1d
41 
42 
43 enum dma_data_direction
44 {
45     DMA_BIDIRECTIONAL = 0,
46     DMA_TO_DEVICE = 1,
47     DMA_FROM_DEVICE = 2,
48     DMA_NONE = 3,
49 };
50 
51 /*
52  * R4000-specific cacheops
53  */
54 #define CREATE_DIRTY_EXCL_D     0x0d
55 #define FILL                    0x14
56 #define HIT_WRITEBACK_I         0x18
57 #define HIT_WRITEBACK_D         0x19
58 
59 /*
60  * R4000SC and R4400SC-specific cacheops
61  */
62 #define INDEX_INVALIDATE_SI     0x02
63 #define INDEX_WRITEBACK_INV_SD  0x03
64 #define INDEX_LOAD_TAG_SI       0x06
65 #define INDEX_LOAD_TAG_SD       0x07
66 #define INDEX_STORE_TAG_SI      0x0A
67 #define INDEX_STORE_TAG_SD      0x0B
68 #define CREATE_DIRTY_EXCL_SD    0x0f
69 #define HIT_INVALIDATE_SI       0x12
70 #define HIT_INVALIDATE_SD       0x13
71 #define HIT_WRITEBACK_INV_SD    0x17
72 #define HIT_WRITEBACK_SD        0x1b
73 #define HIT_SET_VIRTUAL_SI      0x1e
74 #define HIT_SET_VIRTUAL_SD      0x1f
75 
76 /*
77  * R5000-specific cacheops
78  */
79 #define R5K_PAGE_INVALIDATE_S   0x17
80 
81 /*
82  * RM7000-specific cacheops
83  */
84 #define PAGE_INVALIDATE_T       0x16
85 
86 /*
87  * R10000-specific cacheops
88  *
89  * Cacheops 0x02, 0x06, 0x0a, 0x0c-0x0e, 0x16, 0x1a and 0x1e are unused.
90  * Most of the _S cacheops are identical to the R4000SC _SD cacheops.
91  */
92 #define INDEX_WRITEBACK_INV_S   0x03
93 #define INDEX_LOAD_TAG_S        0x07
94 #define INDEX_STORE_TAG_S       0x0B
95 #define HIT_INVALIDATE_S        0x13
96 #define CACHE_BARRIER           0x14
97 #define HIT_WRITEBACK_INV_S     0x17
98 #define INDEX_LOAD_DATA_I       0x18
99 #define INDEX_LOAD_DATA_D       0x19
100 #define INDEX_LOAD_DATA_S       0x1b
101 #define INDEX_STORE_DATA_I      0x1c
102 #define INDEX_STORE_DATA_D      0x1d
103 #define INDEX_STORE_DATA_S      0x1f
104 
105 #define cache_op(op, addr)          \
106     __asm__ __volatile__(        \
107         ".set   push\n"             \
108         ".set   noreorder\n"        \
109         ".set   mips3\n"            \
110         "cache  %0, %1\n"           \
111         ".set   pop\n"              \
112         :                           \
113         : "i" (op), "R" (*(unsigned char *)(addr)))
114 
115 #define cache16_unroll32(base, op)                  \
116     __asm__ __volatile__(                       \
117     "   .set noreorder                  \n" \
118     "   .set mips3                  \n" \
119     "   cache %1, 0x000(%0); cache %1, 0x010(%0)    \n" \
120     "   cache %1, 0x020(%0); cache %1, 0x030(%0)    \n" \
121     "   cache %1, 0x040(%0); cache %1, 0x050(%0)    \n" \
122     "   cache %1, 0x060(%0); cache %1, 0x070(%0)    \n" \
123     "   cache %1, 0x080(%0); cache %1, 0x090(%0)    \n" \
124     "   cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)    \n" \
125     "   cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)    \n" \
126     "   cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)    \n" \
127     "   cache %1, 0x100(%0); cache %1, 0x110(%0)    \n" \
128     "   cache %1, 0x120(%0); cache %1, 0x130(%0)    \n" \
129     "   cache %1, 0x140(%0); cache %1, 0x150(%0)    \n" \
130     "   cache %1, 0x160(%0); cache %1, 0x170(%0)    \n" \
131     "   cache %1, 0x180(%0); cache %1, 0x190(%0)    \n" \
132     "   cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)    \n" \
133     "   cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)    \n" \
134     "   cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)    \n" \
135     "   .set mips0                  \n" \
136     "   .set reorder                    \n" \
137         :                           \
138         : "r" (base),                       \
139           "i" (op));
140 
141 
flush_icache_line_indexed(rt_ubase_t addr)142 static inline void flush_icache_line_indexed(rt_ubase_t addr)
143 {
144     cache_op(INDEX_INVALIDATE_I, addr);
145 }
146 
flush_dcache_line_indexed(rt_ubase_t addr)147 static inline void flush_dcache_line_indexed(rt_ubase_t addr)
148 {
149     cache_op(INDEX_WRITEBACK_INV_D, addr);
150 }
151 
flush_icache_line(rt_ubase_t addr)152 static inline void flush_icache_line(rt_ubase_t addr)
153 {
154     cache_op(HIT_INVALIDATE_I, addr);
155 }
156 
lock_icache_line(rt_ubase_t addr)157 static inline void lock_icache_line(rt_ubase_t addr)
158 {
159     cache_op(FETCH_AND_LOCK_I, addr);
160 }
161 
lock_dcache_line(rt_ubase_t addr)162 static inline void lock_dcache_line(rt_ubase_t addr)
163 {
164     cache_op(FETCH_AND_LOCK_D, addr);
165 }
166 
flush_dcache_line(rt_ubase_t addr)167 static inline void flush_dcache_line(rt_ubase_t addr)
168 {
169     cache_op(HIT_WRITEBACK_INV_D, addr);
170 }
171 
invalidate_dcache_line(rt_ubase_t addr)172 static inline void invalidate_dcache_line(rt_ubase_t addr)
173 {
174     cache_op(HIT_INVALIDATE_D, addr);
175 }
blast_dcache16(void)176 static inline void blast_dcache16(void)
177 {
178     rt_ubase_t start = KSEG0BASE;
179     rt_ubase_t end = start + g_mips_core.dcache_size;
180     rt_ubase_t addr;
181 
182     for (addr = start; addr < end; addr += g_mips_core.dcache_line_size)
183         cache16_unroll32(addr, INDEX_WRITEBACK_INV_D);
184 }
185 
inv_dcache16(void)186 static inline void inv_dcache16(void)
187 {
188     rt_ubase_t start = KSEG0BASE;
189     rt_ubase_t end = start + g_mips_core.dcache_size;
190     rt_ubase_t addr;
191 
192     for (addr = start; addr < end; addr += g_mips_core.dcache_line_size)
193         cache16_unroll32(addr, HIT_INVALIDATE_D);
194 }
195 
blast_icache16(void)196 static inline void blast_icache16(void)
197 {
198     rt_ubase_t start = KSEG0BASE;
199     rt_ubase_t end = start + g_mips_core.icache_size;
200     rt_ubase_t addr;
201 
202     for (addr = start; addr < end; addr += g_mips_core.icache_line_size)
203         cache16_unroll32(addr, INDEX_INVALIDATE_I);
204 }
205 
206 void r4k_cache_init(void);
207 void r4k_cache_flush_all(void);
208 void r4k_icache_flush_all(void);
209 void r4k_icache_flush_range(rt_ubase_t addr, rt_ubase_t size);
210 void r4k_icache_lock_range(rt_ubase_t addr, rt_ubase_t size);
211 void r4k_dcache_inv(rt_ubase_t addr, rt_ubase_t size);
212 void r4k_dcache_wback_inv(rt_ubase_t addr, rt_ubase_t size);
213 void r4k_dma_cache_sync(rt_ubase_t addr, rt_size_t size, enum dma_data_direction direction);
214 #endif
215 
216 #endif /* _MIPS_CACHE_H_ */
217