1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
3 
4 #define _raw_read_unlock(l) \
5     BUILD_BUG_ON(sizeof((l)->lock) != 4); /* Clang doesn't support %z in asm. */ \
6     asm volatile ( "lock; decl %0" : "+m" ((l)->lock) :: "memory" )
7 
8 /*
9  * On x86 the only reordering is of reads with older writes.  In the
10  * lock case, the read in observe_head() can only be reordered with
11  * writes that precede it, and moving a write _into_ a locked section
12  * is OK.  In the release case, the write in add_sized() can only be
13  * reordered with reads that follow it, and hoisting a read _into_ a
14  * locked region is OK.
15  */
16 #define arch_lock_acquire_barrier() barrier()
17 #define arch_lock_release_barrier() barrier()
18 
19 #define arch_lock_relax() cpu_relax()
20 #define arch_lock_signal()
21 #define arch_lock_signal_wmb()      \
22 ({                                  \
23     smp_wmb();                      \
24     arch_lock_signal();             \
25 })
26 
27 #endif /* __ASM_SPINLOCK_H */
28