1 /*
2  * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #ifndef CPU_DATA_H
8 #define CPU_DATA_H
9 
10 #include <platform_def.h>	/* CACHE_WRITEBACK_GRANULE required */
11 
12 #include <bl31/ehf.h>
13 
14 /* Size of psci_cpu_data structure */
15 #define PSCI_CPU_DATA_SIZE		12
16 
17 #ifdef __aarch64__
18 
19 /* 8-bytes aligned size of psci_cpu_data structure */
20 #define PSCI_CPU_DATA_SIZE_ALIGNED	((PSCI_CPU_DATA_SIZE + 7) & ~7)
21 
22 #if ENABLE_RME
23 /* Size of cpu_context array */
24 #define CPU_DATA_CONTEXT_NUM		3
25 /* Offset of cpu_ops_ptr, size 8 bytes */
26 #define CPU_DATA_CPU_OPS_PTR		0x18
27 #else /* ENABLE_RME */
28 #define CPU_DATA_CONTEXT_NUM		2
29 #define CPU_DATA_CPU_OPS_PTR		0x10
30 #endif /* ENABLE_RME */
31 
32 #if ENABLE_PAUTH
33 /* 8-bytes aligned offset of apiakey[2], size 16 bytes */
34 #define	CPU_DATA_APIAKEY_OFFSET		(0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
35 					     + CPU_DATA_CPU_OPS_PTR)
36 #define CPU_DATA_CRASH_BUF_OFFSET	(0x10 + CPU_DATA_APIAKEY_OFFSET)
37 #else /* ENABLE_PAUTH */
38 #define CPU_DATA_CRASH_BUF_OFFSET	(0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
39 					     + CPU_DATA_CPU_OPS_PTR)
40 #endif /* ENABLE_PAUTH */
41 
42 /* need enough space in crash buffer to save 8 registers */
43 #define CPU_DATA_CRASH_BUF_SIZE		64
44 
45 #else	/* !__aarch64__ */
46 
47 #if CRASH_REPORTING
48 #error "Crash reporting is not supported in AArch32"
49 #endif
50 #define CPU_DATA_CPU_OPS_PTR		0x0
51 #define CPU_DATA_CRASH_BUF_OFFSET	(0x4 + PSCI_CPU_DATA_SIZE)
52 
53 #endif	/* __aarch64__ */
54 
55 #if CRASH_REPORTING
56 #define CPU_DATA_CRASH_BUF_END		(CPU_DATA_CRASH_BUF_OFFSET + \
57 						CPU_DATA_CRASH_BUF_SIZE)
58 #else
59 #define CPU_DATA_CRASH_BUF_END		CPU_DATA_CRASH_BUF_OFFSET
60 #endif
61 
62 /* buffer space for EHF data is sizeof(pe_exc_data_t) */
63 #define CPU_DATA_EHF_DATA_SIZE		8
64 #define CPU_DATA_EHF_DATA_BUF_OFFSET	CPU_DATA_CRASH_BUF_END
65 
66 #if defined(IMAGE_BL31) && EL3_EXCEPTION_HANDLING
67 #define CPU_DATA_EHF_DATA_BUF_END	(CPU_DATA_EHF_DATA_BUF_OFFSET + \
68 						CPU_DATA_EHF_DATA_SIZE)
69 #else
70 #define CPU_DATA_EHF_DATA_BUF_END	CPU_DATA_EHF_DATA_BUF_OFFSET
71 #endif	/* EL3_EXCEPTION_HANDLING */
72 
73 /* cpu_data size is the data size rounded up to the platform cache line size */
74 #define CPU_DATA_SIZE			(((CPU_DATA_EHF_DATA_BUF_END + \
75 					CACHE_WRITEBACK_GRANULE - 1) / \
76 						CACHE_WRITEBACK_GRANULE) * \
77 							CACHE_WRITEBACK_GRANULE)
78 
79 #if ENABLE_RUNTIME_INSTRUMENTATION
80 /* Temporary space to store PMF timestamps from assembly code */
81 #define CPU_DATA_PMF_TS_COUNT		1
82 #define CPU_DATA_PMF_TS0_OFFSET		CPU_DATA_EHF_DATA_BUF_END
83 #define CPU_DATA_PMF_TS0_IDX		0
84 #endif
85 
86 #ifndef __ASSEMBLER__
87 
88 #include <assert.h>
89 #include <stdint.h>
90 
91 #include <arch_helpers.h>
92 #include <lib/cassert.h>
93 #include <lib/psci/psci.h>
94 
95 #include <platform_def.h>
96 
97 /* Offsets for the cpu_data structure */
98 #define CPU_DATA_PSCI_LOCK_OFFSET	__builtin_offsetof\
99 		(cpu_data_t, psci_svc_cpu_data.pcpu_bakery_info)
100 
101 #if PLAT_PCPU_DATA_SIZE
102 #define CPU_DATA_PLAT_PCPU_OFFSET	__builtin_offsetof\
103 		(cpu_data_t, platform_cpu_data)
104 #endif
105 
106 typedef enum context_pas {
107 	CPU_CONTEXT_SECURE = 0,
108 	CPU_CONTEXT_NS,
109 #if ENABLE_RME
110 	CPU_CONTEXT_REALM,
111 #endif
112 	CPU_CONTEXT_NUM
113 } context_pas_t;
114 
115 /*******************************************************************************
116  * Function & variable prototypes
117  ******************************************************************************/
118 
119 /*******************************************************************************
120  * Cache of frequently used per-cpu data:
121  *   Pointers to non-secure, realm, and secure security state contexts
122  *   Address of the crash stack
123  * It is aligned to the cache line boundary to allow efficient concurrent
124  * manipulation of these pointers on different cpus
125  *
126  * The data structure and the _cpu_data accessors should not be used directly
127  * by components that have per-cpu members. The member access macros should be
128  * used for this.
129  ******************************************************************************/
130 typedef struct cpu_data {
131 #ifdef __aarch64__
132 	void *cpu_context[CPU_DATA_CONTEXT_NUM];
133 #endif /* __aarch64__ */
134 	uintptr_t cpu_ops_ptr;
135 	struct psci_cpu_data psci_svc_cpu_data;
136 #if ENABLE_PAUTH
137 	uint64_t apiakey[2];
138 #endif
139 #if CRASH_REPORTING
140 	u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
141 #endif
142 #if ENABLE_RUNTIME_INSTRUMENTATION
143 	uint64_t cpu_data_pmf_ts[CPU_DATA_PMF_TS_COUNT];
144 #endif
145 #if PLAT_PCPU_DATA_SIZE
146 	uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE];
147 #endif
148 #if defined(IMAGE_BL31) && EL3_EXCEPTION_HANDLING
149 	pe_exc_data_t ehf_data;
150 #endif
151 } __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
152 
153 extern cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
154 
155 #ifdef __aarch64__
156 CASSERT(CPU_DATA_CONTEXT_NUM == CPU_CONTEXT_NUM,
157 		assert_cpu_data_context_num_mismatch);
158 #endif
159 
160 #if ENABLE_PAUTH
161 CASSERT(CPU_DATA_APIAKEY_OFFSET == __builtin_offsetof
162 	(cpu_data_t, apiakey),
163 	assert_cpu_data_pauth_stack_offset_mismatch);
164 #endif
165 
166 #if CRASH_REPORTING
167 /* verify assembler offsets match data structures */
168 CASSERT(CPU_DATA_CRASH_BUF_OFFSET == __builtin_offsetof
169 	(cpu_data_t, crash_buf),
170 	assert_cpu_data_crash_stack_offset_mismatch);
171 #endif
172 
173 #if defined(IMAGE_BL31) && EL3_EXCEPTION_HANDLING
174 CASSERT(CPU_DATA_EHF_DATA_BUF_OFFSET == __builtin_offsetof
175 	(cpu_data_t, ehf_data),
176 	assert_cpu_data_ehf_stack_offset_mismatch);
177 #endif
178 
179 CASSERT(CPU_DATA_SIZE == sizeof(cpu_data_t),
180 		assert_cpu_data_size_mismatch);
181 
182 CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof
183 		(cpu_data_t, cpu_ops_ptr),
184 		assert_cpu_data_cpu_ops_ptr_offset_mismatch);
185 
186 #if ENABLE_RUNTIME_INSTRUMENTATION
187 CASSERT(CPU_DATA_PMF_TS0_OFFSET == __builtin_offsetof
188 		(cpu_data_t, cpu_data_pmf_ts[0]),
189 		assert_cpu_data_pmf_ts0_offset_mismatch);
190 #endif
191 
192 struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
193 
194 #ifdef __aarch64__
195 /* Return the cpu_data structure for the current CPU. */
_cpu_data(void)196 static inline struct cpu_data *_cpu_data(void)
197 {
198 	return (cpu_data_t *)read_tpidr_el3();
199 }
200 #else
201 struct cpu_data *_cpu_data(void);
202 #endif
203 
204 /*
205  * Returns the index of the cpu_context array for the given security state.
206  * All accesses to cpu_context should be through this helper to make sure
207  * an access is not out-of-bounds. The function assumes security_state is
208  * valid.
209  */
get_cpu_context_index(uint32_t security_state)210 static inline context_pas_t get_cpu_context_index(uint32_t security_state)
211 {
212 	if (security_state == SECURE) {
213 		return CPU_CONTEXT_SECURE;
214 	} else {
215 #if ENABLE_RME
216 		if (security_state == NON_SECURE) {
217 			return CPU_CONTEXT_NS;
218 		} else {
219 			assert(security_state == REALM);
220 			return CPU_CONTEXT_REALM;
221 		}
222 #else
223 		assert(security_state == NON_SECURE);
224 		return CPU_CONTEXT_NS;
225 #endif
226 	}
227 }
228 
229 /**************************************************************************
230  * APIs for initialising and accessing per-cpu data
231  *************************************************************************/
232 
233 void init_cpu_data_ptr(void);
234 void init_cpu_ops(void);
235 
236 #define get_cpu_data(_m)		   _cpu_data()->_m
237 #define set_cpu_data(_m, _v)		   _cpu_data()->_m = (_v)
238 #define get_cpu_data_by_index(_ix, _m)	   _cpu_data_by_index(_ix)->_m
239 #define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = (_v)
240 /* ((cpu_data_t *)0)->_m is a dummy to get the sizeof the struct member _m */
241 #define flush_cpu_data(_m)	   flush_dcache_range((uintptr_t)	  \
242 						&(_cpu_data()->_m), \
243 						sizeof(((cpu_data_t *)0)->_m))
244 #define inv_cpu_data(_m)	   inv_dcache_range((uintptr_t)	  	  \
245 						&(_cpu_data()->_m), \
246 						sizeof(((cpu_data_t *)0)->_m))
247 #define flush_cpu_data_by_index(_ix, _m)	\
248 				   flush_dcache_range((uintptr_t)	  \
249 					 &(_cpu_data_by_index(_ix)->_m),  \
250 						sizeof(((cpu_data_t *)0)->_m))
251 
252 
253 #endif /* __ASSEMBLER__ */
254 #endif /* CPU_DATA_H */
255