1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2023-03-14 WangShun first version
9 */
10
11 #include <rtthread.h>
12
rt_hw_atomic_exchange(volatile rt_atomic_t * ptr,rt_atomic_t val)13 rt_atomic_t rt_hw_atomic_exchange(volatile rt_atomic_t *ptr, rt_atomic_t val)
14 {
15 rt_atomic_t result = 0;
16 #if __riscv_xlen == 32
17 asm volatile ("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
18 #elif __riscv_xlen == 64
19 asm volatile ("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
20 #endif
21 return result;
22 }
23
rt_hw_atomic_add(volatile rt_atomic_t * ptr,rt_atomic_t val)24 rt_atomic_t rt_hw_atomic_add(volatile rt_atomic_t *ptr, rt_atomic_t val)
25 {
26 rt_atomic_t result = 0;
27 #if __riscv_xlen == 32
28 asm volatile ("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
29 #elif __riscv_xlen == 64
30 asm volatile ("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
31 #endif
32 return result;
33 }
34
rt_hw_atomic_sub(volatile rt_atomic_t * ptr,rt_atomic_t val)35 rt_atomic_t rt_hw_atomic_sub(volatile rt_atomic_t *ptr, rt_atomic_t val)
36 {
37 rt_atomic_t result = 0;
38 val = -val;
39 #if __riscv_xlen == 32
40 asm volatile ("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
41 #elif __riscv_xlen == 64
42 asm volatile ("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
43 #endif
44 return result;
45 }
46
rt_hw_atomic_xor(volatile rt_atomic_t * ptr,rt_atomic_t val)47 rt_atomic_t rt_hw_atomic_xor(volatile rt_atomic_t *ptr, rt_atomic_t val)
48 {
49 rt_atomic_t result = 0;
50 #if __riscv_xlen == 32
51 asm volatile ("amoxor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
52 #elif __riscv_xlen == 64
53 asm volatile ("amoxor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
54 #endif
55 return result;
56 }
57
rt_hw_atomic_and(volatile rt_atomic_t * ptr,rt_atomic_t val)58 rt_atomic_t rt_hw_atomic_and(volatile rt_atomic_t *ptr, rt_atomic_t val)
59 {
60 rt_atomic_t result = 0;
61 #if __riscv_xlen == 32
62 asm volatile ("amoand.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
63 #elif __riscv_xlen == 64
64 asm volatile ("amoand.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
65 #endif
66 return result;
67 }
68
rt_hw_atomic_or(volatile rt_atomic_t * ptr,rt_atomic_t val)69 rt_atomic_t rt_hw_atomic_or(volatile rt_atomic_t *ptr, rt_atomic_t val)
70 {
71 rt_atomic_t result = 0;
72 #if __riscv_xlen == 32
73 asm volatile ("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
74 #elif __riscv_xlen == 64
75 asm volatile ("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
76 #endif
77 return result;
78 }
79
rt_hw_atomic_load(volatile rt_atomic_t * ptr)80 rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr)
81 {
82 rt_atomic_t result = 0;
83 #if __riscv_xlen == 32
84 asm volatile ("amoxor.w %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
85 #elif __riscv_xlen == 64
86 asm volatile ("amoxor.d %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
87 #endif
88 return result;
89 }
90
rt_hw_atomic_store(volatile rt_atomic_t * ptr,rt_atomic_t val)91 void rt_hw_atomic_store(volatile rt_atomic_t *ptr, rt_atomic_t val)
92 {
93 rt_atomic_t result = 0;
94 #if __riscv_xlen == 32
95 asm volatile ("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
96 #elif __riscv_xlen == 64
97 asm volatile ("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
98 #endif
99 }
100
rt_hw_atomic_flag_test_and_set(volatile rt_atomic_t * ptr)101 rt_atomic_t rt_hw_atomic_flag_test_and_set(volatile rt_atomic_t *ptr)
102 {
103 rt_atomic_t result = 0;
104 rt_atomic_t temp = 1;
105 #if __riscv_xlen == 32
106 asm volatile ("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
107 #elif __riscv_xlen == 64
108 asm volatile ("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
109 #endif
110 return result;
111 }
112
rt_hw_atomic_flag_clear(volatile rt_atomic_t * ptr)113 void rt_hw_atomic_flag_clear(volatile rt_atomic_t *ptr)
114 {
115 rt_atomic_t result = 0;
116 #if __riscv_xlen == 32
117 asm volatile ("amoand.w %0, x0, (%1)" : "=r"(result) :"r"(ptr) : "memory");
118 #elif __riscv_xlen == 64
119 asm volatile ("amoand.d %0, x0, (%1)" : "=r"(result) :"r"(ptr) : "memory");
120 #endif
121 }
122
rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t * ptr,rt_atomic_t * old,rt_atomic_t desired)123 rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_atomic_t *old, rt_atomic_t desired)
124 {
125 rt_atomic_t tmp = *old;
126 rt_atomic_t result = 0;
127 #if __riscv_xlen == 32
128 asm volatile(
129 " fence iorw, ow\n"
130 "1: lr.w.aq %[result], (%[ptr])\n"
131 " bne %[result], %[tmp], 2f\n"
132 " sc.w.rl %[tmp], %[desired], (%[ptr])\n"
133 " bnez %[tmp], 1b\n"
134 " li %[result], 1\n"
135 " j 3f\n"
136 " 2:sw %[result], (%[old])\n"
137 " li %[result], 0\n"
138 " 3:\n"
139 : [result]"+r" (result), [tmp]"+r" (tmp), [ptr]"+r" (ptr)
140 : [desired]"r" (desired), [old]"r"(old)
141 : "memory");
142 #elif __riscv_xlen == 64
143 asm volatile(
144 " fence iorw, ow\n"
145 "1: lr.d.aq %[result], (%[ptr])\n"
146 " bne %[result], %[tmp], 2f\n"
147 " sc.d.rl %[tmp], %[desired], (%[ptr])\n"
148 " bnez %[tmp], 1b\n"
149 " li %[result], 1\n"
150 " j 3f\n"
151 " 2:sd %[result], (%[old])\n"
152 " li %[result], 0\n"
153 " 3:\n"
154 : [result]"+r" (result), [tmp]"+r" (tmp), [ptr]"+r" (ptr)
155 : [desired]"r" (desired), [old]"r"(old)
156 : "memory");
157 #endif
158 return result;
159 }
160