1 /* 2 * Copyright (c) 2006-2021, RT-Thread Development Team 3 * 4 * SPDX-License-Identifier: Apache-2.0 5 * 6 * Change Logs: 7 * Date Author Notes 8 * 2006-03-18 Bernard the first version 9 * 2006-04-25 Bernard add rt_hw_context_switch_interrupt declaration 10 * 2006-09-24 Bernard add rt_hw_context_switch_to declaration 11 * 2012-12-29 Bernard add rt_hw_exception_install declaration 12 * 2017-10-17 Hichard add some macros 13 * 2018-11-17 Jesven add rt_hw_spinlock_t 14 * add smp support 15 * 2019-05-18 Bernard add empty definition for not enable cache case 16 * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable 17 * 2023-10-16 Shell Support a new backtrace framework 18 */ 19 20 #ifndef __RT_HW_H__ 21 #define __RT_HW_H__ 22 23 #include <rtdef.h> 24 25 #if defined (RT_USING_CACHE) || defined(RT_USING_SMP) || defined(RT_HW_INCLUDE_CPUPORT) 26 #include <cpuport.h> /* include spinlock, cache ops, etc. */ 27 #endif 28 29 #ifdef __cplusplus 30 extern "C" { 31 #endif 32 33 /* 34 * Some macros define 35 */ 36 #ifndef HWREG64 37 #define HWREG64(x) (*((volatile rt_uint64_t *)(x))) 38 #endif 39 #ifndef HWREG32 40 #define HWREG32(x) (*((volatile rt_uint32_t *)(x))) 41 #endif 42 #ifndef HWREG16 43 #define HWREG16(x) (*((volatile rt_uint16_t *)(x))) 44 #endif 45 #ifndef HWREG8 46 #define HWREG8(x) (*((volatile rt_uint8_t *)(x))) 47 #endif 48 49 #ifndef RT_CPU_CACHE_LINE_SZ 50 #define RT_CPU_CACHE_LINE_SZ 32 51 #endif 52 53 enum RT_HW_CACHE_OPS 54 { 55 RT_HW_CACHE_FLUSH = 0x01, 56 RT_HW_CACHE_INVALIDATE = 0x02, 57 }; 58 59 /* 60 * CPU interfaces 61 */ 62 #ifdef RT_USING_CACHE 63 64 #ifdef RT_USING_SMART 65 #include <cache.h> 66 #endif 67 68 void rt_hw_cpu_icache_enable(void); 69 void rt_hw_cpu_icache_disable(void); 70 rt_base_t rt_hw_cpu_icache_status(void); 71 void rt_hw_cpu_icache_ops(int ops, void* addr, int size); 72 73 void rt_hw_cpu_dcache_enable(void); 74 void rt_hw_cpu_dcache_disable(void); 75 rt_base_t rt_hw_cpu_dcache_status(void); 76 void rt_hw_cpu_dcache_ops(int ops, void* addr, int size); 77 #else 78 79 /* define cache ops as empty */ 80 #define rt_hw_cpu_icache_enable(...) 81 #define rt_hw_cpu_icache_disable(...) 82 #define rt_hw_cpu_icache_ops(...) 83 #define rt_hw_cpu_dcache_enable(...) 84 #define rt_hw_cpu_dcache_disable(...) 85 #define rt_hw_cpu_dcache_ops(...) 86 87 #define rt_hw_cpu_icache_status(...) 0 88 #define rt_hw_cpu_dcache_status(...) 0 89 90 #endif 91 92 void rt_hw_cpu_reset(void); 93 void rt_hw_cpu_shutdown(void); 94 95 const char *rt_hw_cpu_arch(void); 96 97 rt_uint8_t *rt_hw_stack_init(void *entry, 98 void *parameter, 99 rt_uint8_t *stack_addr, 100 void *exit); 101 102 #ifdef RT_USING_HW_STACK_GUARD 103 void rt_hw_stack_guard_init(rt_thread_t thread); 104 #endif 105 106 /* 107 * Interrupt handler definition 108 */ 109 typedef void (*rt_isr_handler_t)(int vector, void *param); 110 111 struct rt_irq_desc 112 { 113 rt_isr_handler_t handler; 114 void *param; 115 116 #ifdef RT_USING_INTERRUPT_INFO 117 char name[RT_NAME_MAX]; 118 rt_uint32_t counter; 119 #ifdef RT_USING_SMP 120 rt_ubase_t cpu_counter[RT_CPUS_NR]; 121 #endif 122 #endif 123 }; 124 125 /* 126 * Interrupt interfaces 127 */ 128 void rt_hw_interrupt_init(void); 129 void rt_hw_interrupt_mask(int vector); 130 void rt_hw_interrupt_umask(int vector); 131 rt_isr_handler_t rt_hw_interrupt_install(int vector, 132 rt_isr_handler_t handler, 133 void *param, 134 const char *name); 135 void rt_hw_interrupt_uninstall(int vector, 136 rt_isr_handler_t handler, 137 void *param); 138 139 #ifdef RT_USING_SMP 140 rt_base_t rt_hw_local_irq_disable(void); 141 void rt_hw_local_irq_enable(rt_base_t level); 142 143 rt_base_t rt_cpus_lock(void); 144 void rt_cpus_unlock(rt_base_t level); 145 146 #define rt_hw_interrupt_disable rt_cpus_lock 147 #define rt_hw_interrupt_enable rt_cpus_unlock 148 #else 149 rt_base_t rt_hw_interrupt_disable(void); 150 void rt_hw_interrupt_enable(rt_base_t level); 151 152 #define rt_hw_local_irq_disable rt_hw_interrupt_disable 153 #define rt_hw_local_irq_enable rt_hw_interrupt_enable 154 155 #endif /*RT_USING_SMP*/ 156 rt_bool_t rt_hw_interrupt_is_disabled(void); 157 158 /* 159 * Context interfaces 160 */ 161 #ifdef RT_USING_SMP 162 void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread); 163 void rt_hw_context_switch_to(rt_ubase_t to, struct rt_thread *to_thread); 164 void rt_hw_context_switch_interrupt(void *context, rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread); 165 #else 166 void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to); 167 void rt_hw_context_switch_to(rt_ubase_t to); 168 void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t from_thread, rt_thread_t to_thread); 169 #endif /*RT_USING_SMP*/ 170 171 /** 172 * Hardware Layer Backtrace Service 173 */ 174 struct rt_hw_backtrace_frame { 175 rt_uintptr_t fp; 176 rt_uintptr_t pc; 177 }; 178 179 rt_err_t rt_hw_backtrace_frame_get(rt_thread_t thread, struct rt_hw_backtrace_frame *frame); 180 181 rt_err_t rt_hw_backtrace_frame_unwind(rt_thread_t thread, struct rt_hw_backtrace_frame *frame); 182 183 void rt_hw_console_output(const char *str); 184 185 void rt_hw_show_memory(rt_uint32_t addr, rt_size_t size); 186 187 /* 188 * Exception interfaces 189 */ 190 void rt_hw_exception_install(rt_err_t (*exception_handle)(void *context)); 191 192 /* 193 * delay interfaces 194 */ 195 void rt_hw_us_delay(rt_uint32_t us); 196 197 int rt_hw_cpu_id(void); 198 199 #if defined(RT_USING_SMP) || defined(RT_USING_AMP) 200 /** 201 * ipi function 202 */ 203 void rt_hw_ipi_send(int ipi_vector, unsigned int cpu_mask); 204 #endif 205 206 #ifdef RT_USING_SMP 207 208 void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock); 209 void rt_hw_spin_lock(rt_hw_spinlock_t *lock); 210 void rt_hw_spin_unlock(rt_hw_spinlock_t *lock); 211 212 extern rt_hw_spinlock_t _cpus_lock; 213 214 #define __RT_HW_SPIN_LOCK_INITIALIZER(lockname) {0} 215 216 #define __RT_HW_SPIN_LOCK_UNLOCKED(lockname) \ 217 (rt_hw_spinlock_t) __RT_HW_SPIN_LOCK_INITIALIZER(lockname) 218 219 #define RT_DEFINE_HW_SPINLOCK(x) rt_hw_spinlock_t x = __RT_HW_SPIN_LOCK_UNLOCKED(x) 220 221 /** 222 * boot secondary cpu 223 */ 224 void rt_hw_secondary_cpu_up(void); 225 226 /** 227 * secondary cpu idle function 228 */ 229 void rt_hw_secondary_cpu_idle_exec(void); 230 231 #else /* !RT_USING_SMP */ 232 233 #define RT_DEFINE_HW_SPINLOCK(x) rt_ubase_t x 234 235 #define rt_hw_spin_lock(lock) *(lock) = rt_hw_interrupt_disable() 236 #define rt_hw_spin_unlock(lock) rt_hw_interrupt_enable(*(lock)) 237 238 239 #endif /* RT_USING_SMP */ 240 241 #ifndef RT_USING_CACHE 242 #define rt_hw_isb() 243 #define rt_hw_dmb() 244 #define rt_hw_dsb() 245 #endif /* RT_USING_CACHE */ 246 247 #ifdef __cplusplus 248 } 249 #endif 250 251 #endif 252