1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2011-09-15 Bernard first version
9 * 2019-07-28 zdzn add smp support
10 * 2023-02-21 GuEe-GUI mov cpu ofw init to setup
11 * 2024-04-29 Shell Add generic ticket spinlock using C11 atomic
12 */
13
14 #include <rthw.h>
15 #include <rtthread.h>
16 #include <rtdevice.h>
17 #include <cpu.h>
18
19 #define DBG_TAG "libcpu.aarch64.cpu"
20 #define DBG_LVL DBG_INFO
21 #include <rtdbg.h>
22
23 #ifdef RT_USING_SMP
24
25 #define REPORT_ERR(retval) LOG_E("got error code %d in %s(), %s:%d", (retval), __func__, __FILE__, __LINE__)
26 #define CHECK_RETVAL(retval) if (retval) {REPORT_ERR(retval);}
27 #define cpuid_to_hwid(cpuid) \
28 ((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? rt_cpu_mpidr_early[cpuid] : ID_ERROR)
29 #define set_hwid(cpuid, hwid) \
30 ((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? (rt_cpu_mpidr_early[cpuid] = (hwid)) : ID_ERROR)
31 #define get_cpu_node(cpuid) \
32 ((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? _cpu_node[cpuid] : NULL)
33 #define set_cpu_node(cpuid, node) \
34 ((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? (_cpu_node[cpuid] = node) : NULL)
35
36 typedef rt_hw_spinlock_t arch_spinlock_t;
37 struct cpu_ops_t *cpu_ops_tbl[RT_CPUS_NR];
38
39 #ifdef RT_USING_SMART
40 // _id_to_mpidr is a table translate logical id to mpid, which is a 64-bit value
41 rt_uint64_t rt_cpu_mpidr_early[RT_CPUS_NR] rt_weak = {[0 ... RT_CPUS_NR - 1] = ID_ERROR};
42 #else
43 /* The more common mpidr_el1 table, redefine it in BSP if it is in other cases */
44 rt_weak rt_uint64_t rt_cpu_mpidr_early[] =
45 {
46 [0] = 0x80000000,
47 [1] = 0x80000001,
48 [2] = 0x80000002,
49 [3] = 0x80000003,
50 [4] = 0x80000004,
51 [5] = 0x80000005,
52 [6] = 0x80000006,
53 [7] = 0x80000007,
54 [RT_CPUS_NR] = 0
55 };
56 #endif /* RT_USING_SMART */
57
58 /* in support of C11 atomic */
59 #if __STDC_VERSION__ >= 201112L
60 #include <stdatomic.h>
61
62 union _spinlock
63 {
64 _Atomic(rt_uint32_t) _value;
65 struct
66 {
67 _Atomic(rt_uint16_t) owner;
68 _Atomic(rt_uint16_t) next;
69 } ticket;
70 };
71
rt_hw_spin_lock_init(rt_hw_spinlock_t * _lock)72 void rt_hw_spin_lock_init(rt_hw_spinlock_t *_lock)
73 {
74 union _spinlock *lock = (void *)_lock;
75
76 /**
77 * just a dummy note that this is an atomic operation, though it alway is
78 * even without usage of atomic API in arm64
79 */
80 atomic_store_explicit(&lock->_value, 0, memory_order_relaxed);
81 }
82
rt_hw_spin_trylock(rt_hw_spinlock_t * _lock)83 rt_bool_t rt_hw_spin_trylock(rt_hw_spinlock_t *_lock)
84 {
85 rt_bool_t rc;
86 rt_uint32_t readonce;
87 union _spinlock temp;
88 union _spinlock *lock = (void *)_lock;
89
90 readonce = atomic_load_explicit(&lock->_value, memory_order_acquire);
91 temp._value = readonce;
92
93 if (temp.ticket.owner != temp.ticket.next)
94 {
95 rc = RT_FALSE;
96 }
97 else
98 {
99 temp.ticket.next += 1;
100 rc = atomic_compare_exchange_strong_explicit(
101 &lock->_value, &readonce, temp._value,
102 memory_order_acquire, memory_order_relaxed);
103 }
104 return rc;
105 }
106
_load_acq_exclusive(_Atomic (rt_uint16_t)* halfword)107 rt_inline rt_base_t _load_acq_exclusive(_Atomic(rt_uint16_t) *halfword)
108 {
109 rt_uint32_t old;
110 __asm__ volatile("ldaxrh %w0, [%1]"
111 : "=&r"(old)
112 : "r"(halfword)
113 : "memory");
114 return old;
115 }
116
_send_event_local(void)117 rt_inline void _send_event_local(void)
118 {
119 __asm__ volatile("sevl");
120 }
121
_wait_for_event(void)122 rt_inline void _wait_for_event(void)
123 {
124 __asm__ volatile("wfe" ::: "memory");
125 }
126
rt_hw_spin_lock(rt_hw_spinlock_t * _lock)127 void rt_hw_spin_lock(rt_hw_spinlock_t *_lock)
128 {
129 union _spinlock *lock = (void *)_lock;
130 rt_uint16_t ticket =
131 atomic_fetch_add_explicit(&lock->ticket.next, 1, memory_order_relaxed);
132
133 if (atomic_load_explicit(&lock->ticket.owner, memory_order_acquire) !=
134 ticket)
135 {
136 _send_event_local();
137 do
138 {
139 _wait_for_event();
140 }
141 while (_load_acq_exclusive(&lock->ticket.owner) != ticket);
142 }
143 }
144
rt_hw_spin_unlock(rt_hw_spinlock_t * _lock)145 void rt_hw_spin_unlock(rt_hw_spinlock_t *_lock)
146 {
147 union _spinlock *lock = (void *)_lock;
148 atomic_fetch_add_explicit(&lock->ticket.owner, 1, memory_order_release);
149 }
150
151 #endif
152
_cpus_init_data_hardcoded(int num_cpus,rt_uint64_t * cpu_hw_ids,struct cpu_ops_t * cpu_ops[])153 static int _cpus_init_data_hardcoded(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
154 {
155 // load in cpu_hw_ids in cpuid_to_hwid,
156 // cpu_ops to cpu_ops_tbl
157 if (num_cpus > RT_CPUS_NR)
158 {
159 LOG_W("num_cpus (%d) greater than RT_CPUS_NR (%d)\n", num_cpus, RT_CPUS_NR);
160 num_cpus = RT_CPUS_NR;
161 }
162
163 for (int i = 0; i < num_cpus; i++)
164 {
165 set_hwid(i, cpu_hw_ids[i]);
166 cpu_ops_tbl[i] = cpu_ops[i];
167 }
168 return 0;
169 }
170
171 /** init cpu with hardcoded infomation or parsing from FDT */
_cpus_init(int num_cpus,rt_uint64_t * cpu_hw_ids,struct cpu_ops_t * cpu_ops[])172 static int _cpus_init(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
173 {
174 int retval;
175
176 // first setup cpu_ops_tbl and cpuid_to_hwid
177 if (num_cpus > 0)
178 retval = _cpus_init_data_hardcoded(num_cpus, cpu_hw_ids, cpu_ops);
179 else
180 {
181 retval = -1;
182 }
183
184 if (retval)
185 return retval;
186
187 // using cpuid_to_hwid and cpu_ops_tbl to call method_init and cpu_init
188 // assuming that cpuid 0 has already init
189 for (int i = 1; i < RT_CPUS_NR; i++)
190 {
191 if (rt_cpu_mpidr_early[i] == ID_ERROR)
192 {
193 LOG_E("Failed to find hardware id of CPU %d", i);
194 continue;
195 }
196
197 if (cpu_ops_tbl[i] && cpu_ops_tbl[i]->cpu_init)
198 {
199 retval = cpu_ops_tbl[i]->cpu_init(i, RT_NULL);
200 CHECK_RETVAL(retval);
201 }
202 else
203 {
204 LOG_E("Failed to find cpu_init for cpu %d with cpu_ops[%p], cpu_ops->cpu_init[%p]"
205 , rt_cpu_mpidr_early[i], cpu_ops_tbl[i], cpu_ops_tbl[i] ? cpu_ops_tbl[i]->cpu_init : NULL);
206 }
207 }
208 return 0;
209 }
210
211 /**
212 * @brief boot cpu with hardcoded data
213 *
214 * @param num_cpus number of cpus
215 * @param cpu_hw_ids each element represents a hwid of cpu[i]
216 * @param cpu_ops each element represents a pointer to cpu_ops of cpu[i]
217 * @return int 0 on success,
218 */
rt_hw_cpu_boot_secondary(int num_cpus,rt_uint64_t * cpu_hw_ids,struct cpu_ops_t * cpu_ops[])219 int rt_hw_cpu_boot_secondary(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
220 {
221 int retval = 0;
222 if (num_cpus < 1 || !cpu_hw_ids || !cpu_ops)
223 return -1;
224
225 retval = _cpus_init(num_cpus, cpu_hw_ids, cpu_ops);
226 CHECK_RETVAL(retval);
227
228 return retval;
229 }
230
231 #endif /*RT_USING_SMP*/
232
233 /**
234 * @addtogroup ARM CPU
235 */
236 /*@{*/
237
rt_hw_cpu_arch(void)238 const char *rt_hw_cpu_arch(void)
239 {
240 return "aarch64";
241 }
242
243 /*@}*/
244