1 /*
2 * Copyright (c) 2008-2014 Travis Geiselbrecht
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8 #pragma once
9
10 #ifndef ASSEMBLY
11
12 #include <stdbool.h>
13 #include <lk/compiler.h>
14 #include <lk/reg.h>
15 #include <arch/arm.h>
16
17 #if ARM_ISA_ARMV7M
18 #include <arch/arm/cm.h>
19 #endif
20
21 __BEGIN_CDECLS
22
23 #if ARM_ISA_ARMV7 || (ARM_ISA_ARMV6 && !__thumb__)
24 #define ENABLE_CYCLE_COUNTER 1
25
26 // override of some routines
arch_enable_ints(void)27 static inline void arch_enable_ints(void) {
28 CF;
29 __asm__ volatile("cpsie i");
30 }
31
arch_disable_ints(void)32 static inline void arch_disable_ints(void) {
33 __asm__ volatile("cpsid i");
34 CF;
35 }
36
arch_ints_disabled(void)37 static inline bool arch_ints_disabled(void) {
38 unsigned int state;
39
40 #if ARM_ISA_ARMV7M
41 __asm__ volatile("mrs %0, primask" : "=r"(state));
42 state &= 0x1;
43 #else
44 __asm__ volatile("mrs %0, cpsr" : "=r"(state));
45 state &= (1<<7);
46 #endif
47
48 return !!state;
49 }
50
arch_enable_fiqs(void)51 static inline void arch_enable_fiqs(void) {
52 CF;
53 __asm__ volatile("cpsie f");
54 }
55
arch_disable_fiqs(void)56 static inline void arch_disable_fiqs(void) {
57 __asm__ volatile("cpsid f");
58 CF;
59 }
60
arch_fiqs_disabled(void)61 static inline bool arch_fiqs_disabled(void) {
62 unsigned int state;
63
64 __asm__ volatile("mrs %0, cpsr" : "=r"(state));
65 state &= (1<<6);
66
67 return !!state;
68 }
69
arch_in_int_handler(void)70 static inline bool arch_in_int_handler(void) {
71 #if ARM_ISA_ARMV7M
72 uint32_t ipsr;
73 __asm volatile ("MRS %0, ipsr" : "=r" (ipsr) );
74 return (ipsr & IPSR_ISR_Msk);
75 #else
76 /* set by the interrupt glue to track that the cpu is inside a handler */
77 extern bool __arm_in_handler;
78
79 return __arm_in_handler;
80 #endif
81 }
82
arch_cycle_count(void)83 static inline ulong arch_cycle_count(void) {
84 #if ARM_ISA_ARMV7M
85 #if ENABLE_CYCLE_COUNTER
86 #define DWT_CYCCNT (0xE0001004)
87 return *REG32(DWT_CYCCNT);
88 #else
89 return 0;
90 #endif
91 #elif ARM_ISA_ARMV7
92 uint32_t count;
93 __asm__ volatile("mrc p15, 0, %0, c9, c13, 0"
94 : "=r" (count)
95 );
96 return count;
97 #else
98 //#warning no arch_cycle_count implementation
99 return 0;
100 #endif
101 }
102
103 #if WITH_SMP && ARM_ISA_ARMV7
arch_curr_cpu_num(void)104 static inline uint arch_curr_cpu_num(void) {
105 uint32_t mpidr = arm_read_mpidr();
106 return ((mpidr & ((1U << SMP_CPU_ID_BITS) - 1)) >> 8 << SMP_CPU_CLUSTER_SHIFT) | (mpidr & 0xff);
107 }
108 #else
arch_curr_cpu_num(void)109 static inline uint arch_curr_cpu_num(void) {
110 return 0;
111 }
112 #endif
113
114 /* defined in kernel/thread.h */
115
116 #if !ARM_ISA_ARMV7M
117 /* use the cpu local thread context pointer to store current_thread */
arch_get_current_thread(void)118 static inline struct thread *arch_get_current_thread(void) {
119 return (struct thread *)arm_read_tpidrprw();
120 }
121
arch_set_current_thread(struct thread * t)122 static inline void arch_set_current_thread(struct thread *t) {
123 arm_write_tpidrprw((uint32_t)t);
124 }
125 #else // ARM_ISA_ARM7M
126
127 /* use a global pointer to store the current_thread */
128 extern struct thread *_current_thread;
129
arch_get_current_thread(void)130 static inline struct thread *arch_get_current_thread(void) {
131 return _current_thread;
132 }
133
arch_set_current_thread(struct thread * t)134 static inline void arch_set_current_thread(struct thread *t) {
135 _current_thread = t;
136 }
137
138 #endif // !ARM_ISA_ARMV7M
139
140 #elif ARM_ISA_ARMV6M // cortex-m0 cortex-m0+
141
142 static inline void arch_enable_fiqs(void) {
143 CF;
144 __asm__ volatile("cpsie f");
145 }
146
147 static inline void arch_disable_fiqs(void) {
148 __asm__ volatile("cpsid f");
149 CF;
150 }
151
152 static inline bool arch_fiqs_disabled(void) {
153 unsigned int state;
154
155 __asm__ volatile("mrs %0, cpsr" : "=r"(state));
156 state &= (1<<6);
157
158 return !!state;
159 }
160
161 static inline void arch_enable_ints(void) {
162 CF;
163 __asm__ volatile("cpsie i");
164 }
165 static inline void arch_disable_ints(void) {
166 __asm__ volatile("cpsid i");
167 CF;
168 }
169
170 static inline bool arch_ints_disabled(void) {
171 unsigned int state;
172
173 __asm__ volatile("mrs %0, primask" : "=r"(state));
174 state &= 0x1;
175 return !!state;
176 }
177
178 static inline ulong arch_cycle_count(void) {
179 return 0;
180 }
181
182 static inline uint arch_curr_cpu_num(void) {
183 return 0;
184 }
185
186 /* use a global pointer to store the current_thread */
187 extern struct thread *_current_thread;
188
189 static inline struct thread *arch_get_current_thread(void) {
190 return _current_thread;
191 }
192
193 static inline void arch_set_current_thread(struct thread *t) {
194 _current_thread = t;
195 }
196
197 #else // pre-armv6 || (armv6 & thumb)
198
199 #error pre-armv6 or armv6 + thumb unimplemented
200
201 // Pre armv6 and/or limited thumb1 support inside a armv6 compile
202 // are no longer maintained due to disuse and the complexities of
203 // supporting it.
204
205 #endif
206
207 #define mb() DSB
208 #define wmb() DSB
209 #define rmb() DSB
210
211 #ifdef WITH_SMP
212 #define smp_mb() DMB
213 #define smp_wmb() DMB
214 #define smp_rmb() DMB
215 #else
216 #define smp_mb() CF
217 #define smp_wmb() CF
218 #define smp_rmb() CF
219 #endif
220
221 __END_CDECLS
222
223 #endif // ASSEMBLY
224