1/*
2 * Copyright (c) 2006-2020, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date           Author             Notes
8 * 2020-03-17     bigmagic           first version
9 */
10
11/*
12 * void __asm_dcache_level(level)
13 *
14 * flush or invalidate one level cache.
15 *
16 * x0: cache level
17 * x1: 0 clean & invalidate, 1 invalidate only
18 * x2~x9: clobbered
19 */
20.globl __asm_dcache_level
21__asm_dcache_level:
22    lsl    x12, x0, #1
23    msr    csselr_el1, x12        /* select cache level */
24    isb                /* sync change of cssidr_el1 */
25    mrs    x6, ccsidr_el1        /* read the new cssidr_el1 */
26    and    x2, x6, #7        /* x2 <- log2(cache line size)-4 */
27    add    x2, x2, #4        /* x2 <- log2(cache line size) */
28    mov    x3, #0x3ff
29    and    x3, x3, x6, lsr #3    /* x3 <- max number of #ways */
30    clz    w5, w3            /* bit position of #ways */
31    mov    x4, #0x7fff
32    and    x4, x4, x6, lsr #13    /* x4 <- max number of #sets */
33    /* x12 <- cache level << 1 */
34    /* x2 <- line length offset */
35    /* x3 <- number of cache ways - 1 */
36    /* x4 <- number of cache sets - 1 */
37    /* x5 <- bit position of #ways */
38
39loop_set:
40    mov    x6, x3            /* x6 <- working copy of #ways */
41loop_way:
42    lsl    x7, x6, x5
43    orr    x9, x12, x7        /* map way and level to cisw value */
44    lsl    x7, x4, x2
45    orr    x9, x9, x7        /* map set number to cisw value */
46    tbz    w1, #0, 1f
47    dc    isw, x9
48    b    2f
491:    dc    cisw, x9        /* clean & invalidate by set/way */
502:    subs    x6, x6, #1        /* decrement the way */
51    b.ge    loop_way
52    subs    x4, x4, #1        /* decrement the set */
53    b.ge    loop_set
54
55    ret
56
57/*
58 * void __asm_flush_dcache_all(int invalidate_only)
59 *
60 * x0: 0 clean & invalidate, 1 invalidate only
61 *
62 * flush or invalidate all data cache by SET/WAY.
63 */
64.globl __asm_dcache_all
65__asm_dcache_all:
66    mov    x1, x0
67    dsb    sy
68    mrs    x10, clidr_el1        /* read clidr_el1 */
69    lsr    x11, x10, #24
70    and    x11, x11, #0x7        /* x11 <- loc */
71    cbz    x11, finished        /* if loc is 0, exit */
72    mov    x15, lr
73    mov    x0, #0            /* start flush at cache level 0 */
74    /* x0  <- cache level */
75    /* x10 <- clidr_el1 */
76    /* x11 <- loc */
77    /* x15 <- return address */
78
79loop_level:
80    lsl    x12, x0, #1
81    add    x12, x12, x0        /* x0 <- tripled cache level */
82    lsr    x12, x10, x12
83    and    x12, x12, #7        /* x12 <- cache type */
84    cmp    x12, #2
85    b.lt    skip            /* skip if no cache or icache */
86    bl    __asm_dcache_level    /* x1 = 0 flush, 1 invalidate */
87skip:
88    add    x0, x0, #1        /* increment cache level */
89    cmp    x11, x0
90    b.gt    loop_level
91
92    mov    x0, #0
93    msr    csselr_el1, x0        /* restore csselr_el1 */
94    dsb    sy
95    isb
96    mov    lr, x15
97
98finished:
99    ret
100
101.globl __asm_flush_dcache_all
102__asm_flush_dcache_all:
103    mov    x0, #0
104    b    __asm_dcache_all
105
106.globl __asm_invalidate_dcache_all
107__asm_invalidate_dcache_all:
108    mov    x0, #0x1
109    b    __asm_dcache_all
110
111/*
112 * void __asm_flush_dcache_range(start, end)
113 *
114 * clean & invalidate data cache in the range
115 *
116 * x0: start address
117 * x1: end address
118 */
119.globl __asm_flush_dcache_range
120__asm_flush_dcache_range:
121    mrs    x3, ctr_el0
122    lsr    x3, x3, #16
123    and    x3, x3, #0xf
124    mov    x2, #4
125    lsl    x2, x2, x3        /* cache line size */
126
127    /* x2 <- minimal cache line size in cache system */
128    sub    x3, x2, #1
129    bic    x0, x0, x3
130
1311:  dc    civac, x0    /* clean & invalidate data or unified cache */
132    add    x0, x0, x2
133    cmp    x0, x1
134    b.lo    1b
135    dsb    sy
136    ret
137
138/* void __asm_invalidate_dcache_range(start, end)
139 *
140 * invalidate data cache in the range
141 *
142 * x0: start address
143 * x1: end address
144 */
145.globl __asm_invalidate_dcache_range
146__asm_invalidate_dcache_range:
147    mrs    x3, ctr_el0
148    lsr    x3, x3, #16
149    and    x3, x3, #0xf
150    mov    x2, #4
151    lsl    x2, x2, x3        /* cache line size */
152
153    /* x2 <- minimal cache line size in cache system */
154    sub    x3, x2, #1
155    bic    x0, x0, x3
156
1571:  dc   ivac, x0    /* invalidate data or unified cache */
158    add    x0, x0, x2
159    cmp    x0, x1
160    b.lo    1b
161    dsb    sy
162    ret
163
164/* void __asm_invalidate_icache_range(start, end)
165 *
166 * invalidate icache in the range
167 *
168 * x0: start address
169 * x1: end address
170 */
171.globl __asm_invalidate_icache_range
172__asm_invalidate_icache_range:
173    mrs    x3, ctr_el0
174    and    x3, x3, #0xf
175    mov    x2, #4
176    lsl    x2, x2, x3        /* cache line size */
177
178    /* x2 <- minimal cache line size in cache system */
179    sub    x3, x2, #1
180    bic    x0, x0, x3
181
1821:  ic   ivau, x0    /* invalidate instruction or unified cache */
183    add    x0, x0, x2
184    cmp    x0, x1
185    b.lo    1b
186    dsb    sy
187    ret
188
189/*
190 * void __asm_invalidate_icache_all(void)
191 *
192 * invalidate all tlb entries.
193 */
194.globl __asm_invalidate_icache_all
195__asm_invalidate_icache_all:
196    dsb     sy
197    ic      ialluis
198    isb     sy
199    ret
200
201.globl __asm_flush_l3_cache
202__asm_flush_l3_cache:
203    mov    x0, #0            /* return status as success */
204    ret
205