1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 ARM Ltd.
4 */
5 #ifndef __ASM_CACHE_H
6 #define __ASM_CACHE_H
7
8 #define L1_CACHE_SHIFT (6)
9 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10
11 #define CLIDR_LOUU_SHIFT 27
12 #define CLIDR_LOC_SHIFT 24
13 #define CLIDR_LOUIS_SHIFT 21
14
15 #define CLIDR_LOUU(clidr) (((clidr) >> CLIDR_LOUU_SHIFT) & 0x7)
16 #define CLIDR_LOC(clidr) (((clidr) >> CLIDR_LOC_SHIFT) & 0x7)
17 #define CLIDR_LOUIS(clidr) (((clidr) >> CLIDR_LOUIS_SHIFT) & 0x7)
18
19 /* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */
20 #define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1))
21 #define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level))
22 #define CLIDR_CTYPE(clidr, level) \
23 (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level))
24
25 /* Ttypen, bits [2(n - 1) + 34 : 2(n - 1) + 33], for n = 1 to 7 */
26 #define CLIDR_TTYPE_SHIFT(level) (2 * ((level) - 1) + CLIDR_EL1_Ttypen_SHIFT)
27
28 /*
29 * Memory returned by kmalloc() may be used for DMA, so we must make
30 * sure that all such allocations are cache aligned. Otherwise,
31 * unrelated code may cause parts of the buffer to be read into the
32 * cache before the transfer is done, causing old data to be seen by
33 * the CPU.
34 */
35 #define ARCH_DMA_MINALIGN (128)
36
37 #ifndef __ASSEMBLY__
38
39 #include <linux/bitops.h>
40 #include <linux/kasan-enabled.h>
41
42 #include <asm/cputype.h>
43 #include <asm/mte-def.h>
44 #include <asm/sysreg.h>
45
46 #ifdef CONFIG_KASAN_SW_TAGS
47 #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
48 #elif defined(CONFIG_KASAN_HW_TAGS)
arch_slab_minalign(void)49 static inline unsigned int arch_slab_minalign(void)
50 {
51 return kasan_hw_tags_enabled() ? MTE_GRANULE_SIZE :
52 __alignof__(unsigned long long);
53 }
54 #define arch_slab_minalign() arch_slab_minalign()
55 #endif
56
57 #define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr)
58
59 #define ICACHEF_ALIASING 0
60 #define ICACHEF_VPIPT 1
61 extern unsigned long __icache_flags;
62
63 /*
64 * Whilst the D-side always behaves as PIPT on AArch64, aliasing is
65 * permitted in the I-cache.
66 */
icache_is_aliasing(void)67 static inline int icache_is_aliasing(void)
68 {
69 return test_bit(ICACHEF_ALIASING, &__icache_flags);
70 }
71
icache_is_vpipt(void)72 static __always_inline int icache_is_vpipt(void)
73 {
74 return test_bit(ICACHEF_VPIPT, &__icache_flags);
75 }
76
cache_type_cwg(void)77 static inline u32 cache_type_cwg(void)
78 {
79 return SYS_FIELD_GET(CTR_EL0, CWG, read_cpuid_cachetype());
80 }
81
82 #define __read_mostly __section(".data..read_mostly")
83
cache_line_size_of_cpu(void)84 static inline int cache_line_size_of_cpu(void)
85 {
86 u32 cwg = cache_type_cwg();
87
88 return cwg ? 4 << cwg : ARCH_DMA_MINALIGN;
89 }
90
91 int cache_line_size(void);
92
93 /*
94 * Read the effective value of CTR_EL0.
95 *
96 * According to ARM ARM for ARMv8-A (ARM DDI 0487C.a),
97 * section D10.2.33 "CTR_EL0, Cache Type Register" :
98 *
99 * CTR_EL0.IDC reports the data cache clean requirements for
100 * instruction to data coherence.
101 *
102 * 0 - dcache clean to PoU is required unless :
103 * (CLIDR_EL1.LoC == 0) || (CLIDR_EL1.LoUIS == 0 && CLIDR_EL1.LoUU == 0)
104 * 1 - dcache clean to PoU is not required for i-to-d coherence.
105 *
106 * This routine provides the CTR_EL0 with the IDC field updated to the
107 * effective state.
108 */
read_cpuid_effective_cachetype(void)109 static inline u32 __attribute_const__ read_cpuid_effective_cachetype(void)
110 {
111 u32 ctr = read_cpuid_cachetype();
112
113 if (!(ctr & BIT(CTR_EL0_IDC_SHIFT))) {
114 u64 clidr = read_sysreg(clidr_el1);
115
116 if (CLIDR_LOC(clidr) == 0 ||
117 (CLIDR_LOUIS(clidr) == 0 && CLIDR_LOUU(clidr) == 0))
118 ctr |= BIT(CTR_EL0_IDC_SHIFT);
119 }
120
121 return ctr;
122 }
123
124 #endif /* __ASSEMBLY__ */
125
126 #endif
127