1 /*
2 * Copyright (C) 2018-2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #ifndef SPINLOCK_H
8 #define SPINLOCK_H
9
10 #ifndef ASSEMBLER
11
12 #include <types.h>
13 #include <rtl.h>
14
15 /** The architecture dependent spinlock type. */
16 typedef struct _spinlock {
17 uint32_t head;
18 uint32_t tail;
19
20 } spinlock_t;
21
22 /* Function prototypes */
spinlock_init(spinlock_t * lock)23 static inline void spinlock_init(spinlock_t *lock)
24 {
25 (void)memset(lock, 0U, sizeof(spinlock_t));
26 }
27
spinlock_obtain(spinlock_t * lock)28 static inline void spinlock_obtain(spinlock_t *lock)
29 {
30
31 /* The lock function atomically increments and exchanges the head
32 * counter of the queue. If the old head of the queue is equal to the
33 * tail, we have locked the spinlock. Otherwise we have to wait.
34 */
35
36 asm volatile (" movl $0x1,%%eax\n"
37 " lock xaddl %%eax,%[head]\n"
38 " cmpl %%eax,%[tail]\n"
39 " jz 1f\n"
40 "2: pause\n"
41 " cmpl %%eax,%[tail]\n"
42 " jnz 2b\n"
43 "1:\n"
44 :
45 :
46 [head] "m"(lock->head),
47 [tail] "m"(lock->tail)
48 : "cc", "memory", "eax");
49 }
50
spinlock_release(spinlock_t * lock)51 static inline void spinlock_release(spinlock_t *lock)
52 {
53 /* Increment tail of queue */
54 asm volatile (" lock incl %[tail]\n"
55 :
56 : [tail] "m" (lock->tail)
57 : "cc", "memory");
58 }
59
60 #else /* ASSEMBLER */
61
62 /** The offset of the head element. */
63 #define SYNC_SPINLOCK_HEAD_OFFSET 0
64
65 /** The offset of the tail element. */
66 #define SYNC_SPINLOCK_TAIL_OFFSET 4
67
68 .macro spinlock_obtain lock
69 movl $1, % eax
70 lea \lock, % rbx
71 lock xaddl % eax, SYNC_SPINLOCK_HEAD_OFFSET(%rbx)
72 cmpl % eax, SYNC_SPINLOCK_TAIL_OFFSET(%rbx)
73 jz 1f
74 2 :
75 pause
76 cmpl % eax, SYNC_SPINLOCK_TAIL_OFFSET(%rbx)
77 jnz 2b
78 1 :
79 .endm
80
81 #define spinlock_obtain(x) spinlock_obtain lock = (x)
82
83 .macro spinlock_release lock
84 lea \lock, % rbx
85 lock incl SYNC_SPINLOCK_TAIL_OFFSET(%rbx)
86 .endm
87
88 #define spinlock_release(x) spinlock_release lock = (x)
89
90 #endif /* ASSEMBLER */
91
92 #define spinlock_irqsave_obtain(lock, p_rflags) \
93 do { \
94 CPU_INT_ALL_DISABLE(p_rflags); \
95 spinlock_obtain(lock); \
96 } while (0)
97
98 #define spinlock_irqrestore_release(lock, rflags) \
99 do { \
100 spinlock_release(lock); \
101 CPU_INT_ALL_RESTORE(rflags); \
102 } while (0)
103 #endif /* SPINLOCK_H */
104