1 /*
2 * Copyright (c) 2015 Stefan Kristiansson
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8 #pragma once
9
10 #include <lk/compiler.h>
11 #include <arch/or1k.h>
12
13 #ifndef ASSEMBLY
arch_enable_ints(void)14 static inline void arch_enable_ints(void) {
15 uint32_t sr = mfspr(OR1K_SPR_SYS_SR_ADDR);
16
17 sr |= OR1K_SPR_SYS_SR_IEE_MASK | OR1K_SPR_SYS_SR_TEE_MASK;
18 mtspr(OR1K_SPR_SYS_SR_ADDR, sr);
19 }
20
arch_disable_ints(void)21 static inline void arch_disable_ints(void) {
22 uint32_t sr = mfspr(OR1K_SPR_SYS_SR_ADDR);
23
24 sr &= ~(OR1K_SPR_SYS_SR_IEE_MASK | OR1K_SPR_SYS_SR_TEE_MASK);
25 mtspr(OR1K_SPR_SYS_SR_ADDR, sr);
26 }
27
arch_ints_disabled(void)28 static inline bool arch_ints_disabled(void) {
29 uint32_t sr = mfspr(OR1K_SPR_SYS_SR_ADDR);
30
31 return !(sr & (OR1K_SPR_SYS_SR_IEE_MASK | OR1K_SPR_SYS_SR_TEE_MASK));
32 }
33
34 // Using builtin atomics
35 #if 0
36 static inline int atomic_add(volatile int *ptr, int val) {
37 return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
38 }
39
40 static inline int atomic_or(volatile int *ptr, int val) {
41 return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
42 }
43
44 static inline int atomic_and(volatile int *ptr, int val) {
45 return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
46 }
47
48 static inline int atomic_swap(volatile int *ptr, int val) {
49 return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
50 }
51
52 static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval) {
53 __asm__ __volatile__(
54 "1: l.lwa %0, 0(%1) \n"
55 " l.sfeq %0, %2 \n"
56 " l.bnf 1f \n"
57 " l.nop \n"
58 " l.swa 0(%1), %3 \n"
59 " l.bnf 1b \n"
60 "1: l.nop \n"
61 : "=&r"(oldval)
62 : "r"(ptr), "r"(oldval), "r"(newval)
63 : "cc", "memory");
64
65 return oldval;
66 }
67 #endif
68
69 /* use a global pointer to store the current_thread */
70 extern struct thread *_current_thread;
71
arch_get_current_thread(void)72 static inline struct thread *arch_get_current_thread(void) {
73 return _current_thread;
74 }
75
arch_set_current_thread(struct thread * t)76 static inline void arch_set_current_thread(struct thread *t) {
77 _current_thread = t;
78 }
79
arch_cycle_count(void)80 static inline ulong arch_cycle_count(void) { return 0; }
81
arch_curr_cpu_num(void)82 static inline uint arch_curr_cpu_num(void) {
83 return 0;
84 }
85
86 // Default barriers for architectures that generally don't need them
87 // TODO: do we need these for or1k?
88 #define mb() CF
89 #define wmb() CF
90 #define rmb() CF
91 #define smp_mb() CF
92 #define smp_wmb() CF
93 #define smp_rmb() CF
94
95 #endif // !ASSEMBLY
96