1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * Copyright (c) 2018-2022 Intel Corporation.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifndef ATOMIC_H
30 #define ATOMIC_H
31 #include <types.h>
32 
33 #define	BUS_LOCK	"lock ; "
34 
35 #define build_atomic_inc(name, size, type)		\
36 static inline void name(type *ptr)			\
37 {							\
38 	asm volatile(BUS_LOCK "inc" size " %0"		\
39 			: "=m" (*ptr)			\
40 			:  "m" (*ptr));			\
41 }
42 build_atomic_inc(atomic_inc16, "w", uint16_t)
43 build_atomic_inc(atomic_inc32, "l", uint32_t)
44 build_atomic_inc(atomic_inc64, "q", uint64_t)
45 
46 #define build_atomic_dec(name, size, type)		\
47 static inline void name(type *ptr)			\
48 {							\
49 	asm volatile(BUS_LOCK "dec" size " %0"		\
50 			: "=m" (*ptr)			\
51 			:  "m" (*ptr));			\
52 }
53 build_atomic_dec(atomic_dec16, "w", uint16_t)
54 build_atomic_dec(atomic_dec32, "l", uint32_t)
55 build_atomic_dec(atomic_dec64, "q", uint64_t)
56 
57 #define build_atomic_swap(name, size, type)		\
58 static inline type name(type *ptr, type v)		\
59 {							\
60 	asm volatile(BUS_LOCK "xchg" size " %1,%0"	\
61 			:  "+m" (*ptr), "+r" (v)	\
62 			:				\
63 			:  "cc", "memory");		\
64 	return v;					\
65 }
66 build_atomic_swap(atomic_swap32, "l", uint32_t)
67 build_atomic_swap(atomic_swap64, "q", uint64_t)
68 
69  /*
70  * #define atomic_readandclear32(P) \
71  * (return (*(uint32_t *)(P)); *(uint32_t *)(P) = 0U;)
72   */
atomic_readandclear32(uint32_t * p)73 static inline uint32_t atomic_readandclear32(uint32_t *p)
74 {
75 	return atomic_swap32(p, 0U);
76 }
77 
78  /*
79  * #define atomic_readandclear64(P) \
80  * (return (*(uint64_t *)(P)); *(uint64_t *)(P) = 0UL;)
81   */
atomic_readandclear64(uint64_t * p)82 static inline uint64_t atomic_readandclear64(uint64_t *p)
83 {
84 	return atomic_swap64(p, 0UL);
85 }
86 
87 #define build_atomic_cmpxchg(name, size, type)			\
88 static inline type name(volatile type *ptr, type old, type new)	\
89 {								\
90 	type ret;						\
91 	asm volatile(BUS_LOCK "cmpxchg" size " %2,%1"		\
92 			: "=a" (ret), "+m" (*ptr)		\
93 			: "r" (new), "0" (old)			\
94 			: "memory");				\
95 	return ret;						\
96 }
97 build_atomic_cmpxchg(atomic_cmpxchg32, "l", uint32_t)
98 build_atomic_cmpxchg(atomic_cmpxchg64, "q", uint64_t)
99 
100 #define build_atomic_xadd(name, size, type)			\
101 static inline type name(type *ptr, type v)			\
102 {								\
103 	asm volatile(BUS_LOCK "xadd" size " %0,%1"		\
104 			: "+r" (v), "+m" (*ptr)			\
105 			:					\
106 			: "cc", "memory");			\
107 	return v;						\
108  }
109 build_atomic_xadd(atomic_xadd16, "w", uint16_t)
110 build_atomic_xadd(atomic_xadd32, "l", int32_t)
111 build_atomic_xadd(atomic_xadd64, "q", int64_t)
112 
atomic_add_return(int32_t * p,int32_t v)113 static inline int32_t atomic_add_return(int32_t *p, int32_t v)
114 {
115 	return (atomic_xadd32(p, v) + v);
116 }
117 
atomic_sub_return(int32_t * p,int32_t v)118 static inline int32_t atomic_sub_return(int32_t *p, int32_t v)
119 {
120 	return (atomic_xadd32(p, -v) - v);
121 }
122 
atomic_inc_return(int32_t * v)123 static inline int32_t atomic_inc_return(int32_t *v)
124 {
125 	return atomic_add_return(v, 1);
126 }
127 
atomic_dec_return(int32_t * v)128 static inline int32_t atomic_dec_return(int32_t *v)
129 {
130 	return atomic_sub_return(v, 1);
131 }
132 
atomic_add64_return(int64_t * p,int64_t v)133 static inline int64_t atomic_add64_return(int64_t *p, int64_t v)
134 {
135 	return (atomic_xadd64(p, v) + v);
136 }
137 
atomic_sub64_return(int64_t * p,int64_t v)138 static inline int64_t atomic_sub64_return(int64_t *p, int64_t v)
139 {
140 	return (atomic_xadd64(p, -v) - v);
141 }
142 
atomic_inc64_return(int64_t * v)143 static inline int64_t atomic_inc64_return(int64_t *v)
144 {
145 	return atomic_add64_return(v, 1);
146 }
147 
atomic_dec64_return(int64_t * v)148 static inline int64_t atomic_dec64_return(int64_t *v)
149 {
150 	return atomic_sub64_return(v, 1);
151 }
152 
153 #endif /* ATOMIC_H*/
154