1 /*
2 * Copyright (C) 2015-2018 Alibaba Group Holding Limited
3 */
4
5 #include "k_api.h"
6 #include "k_spin_lock.h"
7
8 #if (RHINO_CONFIG_CPU_NUM > 1)
9
10 /* not use for linuxhost */
11
12 #define DBG_PRINTF(...) //printf(__VA_ARGS__)
13
14 #define KRHINO_SPINLOCK_FREE_VAL 0xB33FFFFFu
15 #define KRHINO_SPINLOCK_MAGIC_VAL 0xB33F0000u
16 #define KRHINO_SPINLOCK_MAGIC_MASK 0xFFFF0000u
17 #define KRHINO_SPINLOCK_MAGIC_SHIFT 16
18 #define KRHINO_SPINLOCK_CNT_MASK 0x0000FF00u
19 #define KRHINO_SPINLOCK_CNT_SHIFT 8
20 #define KRHINO_SPINLOCK_VAL_MASK 0x000000FFu
21 #define KRHINO_SPINLOCK_VAL_SHIFT 0
22
23 #ifdef RHINO_CONFIG_SPINLOCK_DEBUG
k_cpu_spin_lock(kspinlock_t * lock,const char * fnName,int32_t line)24 void k_cpu_spin_lock(kspinlock_t *lock, const char *fnName, int32_t line)
25 #else
26 void k_cpu_spin_lock(kspinlock_t *lock)
27 #endif
28 {
29 uint32_t res;
30 uint32_t recCnt;
31 uint32_t cnt = (1 << 16);
32
33 if ((lock->owner & KRHINO_SPINLOCK_MAGIC_MASK) != KRHINO_SPINLOCK_MAGIC_VAL) {
34 lock->owner = KRHINO_SPINLOCK_FREE_VAL;
35 }
36
37 do {
38 /* Lock mux if it's currently unlocked */
39 res = (cpu_cur_get() << KRHINO_SPINLOCK_VAL_SHIFT) | KRHINO_SPINLOCK_MAGIC_VAL;
40 cpu_atomic_compare_set(&lock->owner, KRHINO_SPINLOCK_FREE_VAL, &res);
41
42 /* If it wasn't free and we're the owner of the lock, we are locking recursively. */
43 if ((res != KRHINO_SPINLOCK_FREE_VAL) && (((res & KRHINO_SPINLOCK_VAL_MASK) >>
44 KRHINO_SPINLOCK_VAL_SHIFT) == cpu_cur_get())) {
45 /* Mux was already locked by us. Just increase count by one. */
46 recCnt = (res & KRHINO_SPINLOCK_CNT_MASK) >> KRHINO_SPINLOCK_CNT_SHIFT;
47 recCnt++;
48
49 #ifdef RHINO_CONFIG_SPINLOCK_DEBUG
50 /* DBG_PRINTF("Recursive lock: recCnt=%d last non-recursive lock %s line %d,curr %s line %d\n",
51 recCnt, lock->last_lockfile, lock->last_lockline, fnName, line); */
52 #endif
53 lock->owner = KRHINO_SPINLOCK_MAGIC_VAL | (recCnt << KRHINO_SPINLOCK_CNT_SHIFT) |
54 (cpu_cur_get() << KRHINO_SPINLOCK_VAL_SHIFT);
55 break;
56 }
57
58 cnt--;
59
60 if (cnt == 0) {
61 #ifdef RHINO_CONFIG_SPINLOCK_DEBUG
62 /* DBG_PRINTF("Error! Timeout on mux! last non-recursive lock %s line %d, curr %s line %d\n",
63 lock->last_lockfile, lock->last_lockline, fnName, line); */
64 #endif
65 DBG_PRINTF("Error! Timeout on mux! lock value %X,cpu_cur_get():%d\r\n",
66 lock->owner, cpu_cur_get());
67 }
68 } while (res != KRHINO_SPINLOCK_FREE_VAL);
69
70 #ifdef RHINO_CONFIG_SPINLOCK_DEBUG
71 if (res == KRHINO_SPINLOCK_FREE_VAL) {
72 lock->last_lockfile = fnName;
73 lock->last_lockline = line;
74 }
75 #endif
76
77 }
78
79 #ifdef RHINO_CONFIG_SPINLOCK_DEBUG
k_cpu_spin_unlock(kspinlock_t * lock,const char * fnName,int32_t line)80 void k_cpu_spin_unlock(kspinlock_t *lock, const char *fnName, int32_t line)
81 #else
82 void k_cpu_spin_unlock(kspinlock_t *lock)
83 #endif
84 {
85 uint32_t res = 0;
86 uint32_t recCnt;
87 #ifdef RHINO_CONFIG_SPINLOCK_DEBUG
88 const char *lastLockedFn = lock->last_lockfile;
89 int lastLockedLine = lock->last_lockline;
90
91 lock->last_lockfile = fnName;
92 lock->last_lockline = line;
93 #endif
94
95 if ((lock->owner & KRHINO_SPINLOCK_MAGIC_MASK) != KRHINO_SPINLOCK_MAGIC_VAL) {
96 DBG_PRINTF("ERROR: k_cpu_spin_unlock: spinlock %p is uninitialized (0x%X)!\n",
97 lock, lock->owner);
98 }
99
100 /* Unlock if it's currently locked with a recurse count of 0 */
101 res = KRHINO_SPINLOCK_FREE_VAL;
102 cpu_atomic_compare_set(&lock->owner,
103 (cpu_cur_get() << KRHINO_SPINLOCK_VAL_SHIFT) | KRHINO_SPINLOCK_MAGIC_VAL,
104 &res);
105
106 if ( ((res & KRHINO_SPINLOCK_VAL_MASK) >> KRHINO_SPINLOCK_VAL_SHIFT) == cpu_cur_get() ) {
107 if ( ((res & KRHINO_SPINLOCK_CNT_MASK) >> KRHINO_SPINLOCK_CNT_SHIFT) != 0) {
108 /* We locked this, but the reccount isn't zero. Decrease refcount and continue. */
109 recCnt = (res & KRHINO_SPINLOCK_CNT_MASK) >> KRHINO_SPINLOCK_CNT_SHIFT;
110 recCnt--;
111
112 lock->owner = KRHINO_SPINLOCK_MAGIC_VAL | (recCnt << KRHINO_SPINLOCK_CNT_SHIFT)
113 | (cpu_cur_get() << KRHINO_SPINLOCK_VAL_SHIFT);
114 }
115 } else if ( res == KRHINO_SPINLOCK_FREE_VAL ) {
116 DBG_PRINTF("ERROR: k_cpu_spin_unlock: lock %p was already unlocked!\n", lock);
117 } else {
118 DBG_PRINTF("ERROR: k_cpu_spin_unlock: lock %p wasn't locked by this core (%d) \
119 but by core %d (ret=%x, lock=%x).\n", lock, cpu_cur_get(), \
120 ((res & KRHINO_SPINLOCK_VAL_MASK) >> KRHINO_SPINLOCK_VAL_SHIFT),\
121 res, lock->owner);
122 }
123 return;
124 }
125
126 extern volatile uint64_t g_cpu_flag;
127
k_wait_allcores(void)128 void k_wait_allcores(void)
129 {
130 uint8_t loop = 1;
131
132 while (loop) {
133 switch (RHINO_CONFIG_CPU_NUM) {
134 case 2:
135 if (g_cpu_flag == 2u) {
136 loop = 0;
137 }
138 break;
139 case 3:
140 if (g_cpu_flag == 6u) {
141 loop = 0;
142 }
143 break;
144 case 4:
145 if (g_cpu_flag == 14u) {
146 loop = 0;
147 }
148 break;
149 default:
150 DBG_PRINTF("too many cpus!!!\n");
151 break;
152 }
153 }
154 }
155
156 #endif /* RHINO_CONFIG_CPU_NUM > 1 */
157
158