1 /*
2 * Copyright (C) 2015-2017 Alibaba Group Holding Limited
3 */
4
5 #include "k_api.h"
6
mutex_create(kmutex_t * mutex,const name_t * name,uint8_t mm_alloc_flag)7 kstat_t mutex_create(kmutex_t *mutex, const name_t *name, uint8_t mm_alloc_flag)
8 {
9 #if (RHINO_CONFIG_KOBJ_LIST > 0)
10 CPSR_ALLOC();
11 #endif
12
13 NULL_PARA_CHK(mutex);
14 NULL_PARA_CHK(name);
15
16 memset(mutex, 0, sizeof(kmutex_t));
17
18 /* init the list */
19 klist_init(&mutex->blk_obj.blk_list);
20 mutex->blk_obj.blk_policy = BLK_POLICY_PRI;
21 mutex->blk_obj.name = name;
22 mutex->mutex_task = NULL;
23 mutex->mutex_list = NULL;
24 mutex->mm_alloc_flag = mm_alloc_flag;
25 #if (RHINO_CONFIG_TASK_DEL > 0)
26 mutex->blk_obj.cancel = 0u;
27 #endif
28
29 #if (RHINO_CONFIG_KOBJ_LIST > 0)
30 RHINO_CRITICAL_ENTER();
31 klist_insert(&(g_kobj_list.mutex_head), &mutex->mutex_item);
32 RHINO_CRITICAL_EXIT();
33 #endif
34
35 mutex->blk_obj.obj_type = RHINO_MUTEX_OBJ_TYPE;
36
37 TRACE_MUTEX_CREATE(krhino_cur_task_get(), mutex, name);
38
39 return RHINO_SUCCESS;
40 }
41
krhino_mutex_create(kmutex_t * mutex,const name_t * name)42 kstat_t krhino_mutex_create(kmutex_t *mutex, const name_t *name)
43 {
44 return mutex_create(mutex, name, K_OBJ_STATIC_ALLOC);
45 }
46
mutex_release(ktask_t * task,kmutex_t * mutex_rel)47 static void mutex_release(ktask_t *task, kmutex_t *mutex_rel)
48 {
49 uint8_t new_pri;
50
51 /* find suitable task prio */
52 new_pri = mutex_pri_look(task, mutex_rel);
53 if (new_pri != task->prio) {
54 /* change prio */
55 task_pri_change(task, new_pri);
56
57 TRACE_MUTEX_RELEASE(g_active_task[cpu_cur_get()], task, new_pri);
58
59 }
60 }
61
krhino_mutex_del(kmutex_t * mutex)62 kstat_t krhino_mutex_del(kmutex_t *mutex)
63 {
64 CPSR_ALLOC();
65 klist_t *blk_list_head;
66
67 if (mutex == NULL) {
68 return RHINO_NULL_PTR;
69 }
70
71 NULL_PARA_CHK(mutex);
72
73 RHINO_CRITICAL_ENTER();
74
75 INTRPT_NESTED_LEVEL_CHK();
76
77 if (mutex->blk_obj.obj_type != RHINO_MUTEX_OBJ_TYPE) {
78 RHINO_CRITICAL_EXIT();
79 return RHINO_KOBJ_TYPE_ERR;
80 }
81
82 if (mutex->mm_alloc_flag != K_OBJ_STATIC_ALLOC) {
83 RHINO_CRITICAL_EXIT();
84 return RHINO_KOBJ_DEL_ERR;
85 }
86
87 blk_list_head = &mutex->blk_obj.blk_list;
88
89 mutex->blk_obj.obj_type = RHINO_OBJ_TYPE_NONE;
90
91 if (mutex->mutex_task != NULL) {
92 mutex_release(mutex->mutex_task, mutex);
93 }
94
95 /* all task blocked on this mutex is waken up */
96 while (!is_klist_empty(blk_list_head)) {
97 pend_task_rm(krhino_list_entry(blk_list_head->next, ktask_t, task_list));
98 }
99
100 #if (RHINO_CONFIG_KOBJ_LIST > 0)
101 klist_rm(&mutex->mutex_item);
102 #endif
103
104 TRACE_MUTEX_DEL(g_active_task[cpu_cur_get()], mutex);
105
106 RHINO_CRITICAL_EXIT_SCHED();
107
108 return RHINO_SUCCESS;
109 }
110
111 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
krhino_mutex_dyn_create(kmutex_t ** mutex,const name_t * name)112 kstat_t krhino_mutex_dyn_create(kmutex_t **mutex, const name_t *name)
113 {
114 kstat_t stat;
115 kmutex_t *mutex_obj;
116
117 if (mutex == NULL) {
118 return RHINO_NULL_PTR;
119 }
120
121 NULL_PARA_CHK(mutex);
122
123 mutex_obj = krhino_mm_alloc(sizeof(kmutex_t));
124 if (mutex_obj == NULL) {
125 return RHINO_NO_MEM;
126 }
127
128 stat = mutex_create(mutex_obj, name, K_OBJ_DYN_ALLOC);
129 if (stat != RHINO_SUCCESS) {
130 krhino_mm_free(mutex_obj);
131 return stat;
132 }
133
134 *mutex = mutex_obj;
135
136 return stat;
137 }
138
krhino_mutex_dyn_del(kmutex_t * mutex)139 kstat_t krhino_mutex_dyn_del(kmutex_t *mutex)
140 {
141 CPSR_ALLOC();
142 klist_t *blk_list_head;
143
144 if (mutex == NULL) {
145 return RHINO_NULL_PTR;
146 }
147
148 NULL_PARA_CHK(mutex);
149
150 RHINO_CRITICAL_ENTER();
151
152 INTRPT_NESTED_LEVEL_CHK();
153
154 if (mutex->blk_obj.obj_type != RHINO_MUTEX_OBJ_TYPE) {
155 RHINO_CRITICAL_EXIT();
156 return RHINO_KOBJ_TYPE_ERR;
157 }
158
159 if (mutex->mm_alloc_flag != K_OBJ_DYN_ALLOC) {
160 RHINO_CRITICAL_EXIT();
161 return RHINO_KOBJ_DEL_ERR;
162 }
163
164 blk_list_head = &mutex->blk_obj.blk_list;
165
166 mutex->blk_obj.obj_type = RHINO_OBJ_TYPE_NONE;
167
168 if (mutex->mutex_task != NULL) {
169 mutex_release(mutex->mutex_task, mutex);
170 }
171
172 /* all task blocked on this mutex is waken up */
173 while (!is_klist_empty(blk_list_head)) {
174 pend_task_rm(krhino_list_entry(blk_list_head->next, ktask_t, task_list));
175 }
176
177 #if (RHINO_CONFIG_KOBJ_LIST > 0)
178 klist_rm(&mutex->mutex_item);
179 #endif
180
181 TRACE_MUTEX_DEL(g_active_task[cpu_cur_get()], mutex);
182
183 RHINO_CRITICAL_EXIT_SCHED();
184
185 krhino_mm_free(mutex);
186
187 return RHINO_SUCCESS;
188 }
189 #endif
190
mutex_pri_limit(ktask_t * task,uint8_t pri)191 uint8_t mutex_pri_limit(ktask_t *task, uint8_t pri)
192 {
193 #if (RHINO_CONFIG_MUTEX_INHERIT > 0)
194 kmutex_t *mutex_tmp;
195 uint8_t high_pri;
196 ktask_t *first_blk_task;
197 klist_t *blk_list_head;
198
199 high_pri = pri;
200
201 for (mutex_tmp = task->mutex_list; mutex_tmp != NULL;
202 mutex_tmp = mutex_tmp->mutex_list) {
203 blk_list_head = &mutex_tmp->blk_obj.blk_list;
204
205 if (!is_klist_empty(blk_list_head)) {
206 first_blk_task = krhino_list_entry(blk_list_head->next, ktask_t, task_list);
207 pri = first_blk_task->prio;
208 }
209
210 /* can not set lower prio than the highest prio in all mutexes which hold lock */
211 if (pri < high_pri) {
212 high_pri = pri;
213 }
214 }
215
216 return high_pri;
217 #else
218 return pri;
219 #endif
220 }
221
mutex_pri_look(ktask_t * task,kmutex_t * mutex_rel)222 uint8_t mutex_pri_look(ktask_t *task, kmutex_t *mutex_rel)
223 {
224 #if (RHINO_CONFIG_MUTEX_INHERIT > 0)
225 kmutex_t *mutex_tmp;
226 kmutex_t **prev;
227 uint8_t new_pri;
228 uint8_t pri;
229 ktask_t *first_blk_task;
230 klist_t *blk_list_head;
231
232 /* the base prio of task */
233 new_pri = task->b_prio;
234
235 /* the highest prio in mutex which is locked */
236 pri = new_pri;
237 prev = &task->mutex_list;
238
239 while ((mutex_tmp = *prev) != NULL) {
240 if (mutex_tmp == mutex_rel) {
241 /* delete itself from list and make task->mutex_list point to next */
242 *prev = mutex_tmp->mutex_list;
243 continue;
244 }
245
246 blk_list_head = &mutex_tmp->blk_obj.blk_list;
247 if (!is_klist_empty(blk_list_head)) {
248 first_blk_task = krhino_list_entry(blk_list_head->next, ktask_t, task_list);
249 pri = first_blk_task->prio;
250 }
251
252 if (new_pri > pri) {
253 new_pri = pri;
254 }
255
256 prev = &mutex_tmp->mutex_list;
257 }
258
259 return new_pri;
260 #else
261 return task->b_prio;
262 #endif
263 }
264
mutex_task_pri_reset(ktask_t * task)265 void mutex_task_pri_reset(ktask_t *task)
266 {
267 #if (RHINO_CONFIG_MUTEX_INHERIT > 0)
268 kmutex_t *mutex_tmp;
269 ktask_t *mutex_task;
270
271 if (task->blk_obj->obj_type == RHINO_MUTEX_OBJ_TYPE) {
272 mutex_tmp = (kmutex_t *)(task->blk_obj);
273 mutex_task = mutex_tmp->mutex_task;
274
275 /* the new highest prio task blocked on this mutex may decrease prio than before so reset the mutex task prio */
276 if (mutex_task->prio == task->prio) {
277 mutex_release(mutex_task, NULL);
278 }
279 }
280 #endif
281 }
282
krhino_mutex_lock(kmutex_t * mutex,tick_t ticks)283 kstat_t krhino_mutex_lock(kmutex_t *mutex, tick_t ticks)
284 {
285 CPSR_ALLOC();
286 kstat_t ret;
287 ktask_t *mutex_task;
288 uint8_t cur_cpu_num;
289
290 NULL_PARA_CHK(mutex);
291
292 if (g_sys_stat == RHINO_STOPPED) {
293 return RHINO_SUCCESS;
294 }
295
296 RHINO_CRITICAL_ENTER();
297
298 cur_cpu_num = cpu_cur_get();
299 TASK_CANCEL_CHK(mutex);
300
301 INTRPT_NESTED_LEVEL_CHK();
302
303 if (mutex->blk_obj.obj_type != RHINO_MUTEX_OBJ_TYPE) {
304 RHINO_CRITICAL_EXIT();
305 return RHINO_KOBJ_TYPE_ERR;
306 }
307
308 /* if the same task get the same mutex again, it causes mutex owner nested */
309 if (g_active_task[cur_cpu_num] == mutex->mutex_task) {
310 if (mutex->owner_nested == (mutex_nested_t)-1) {
311 /* fatal error here, system must be stoped here */
312 k_err_proc(RHINO_MUTEX_NESTED_OVF);
313 RHINO_CRITICAL_EXIT();
314 return RHINO_MUTEX_NESTED_OVF;
315 } else {
316 mutex->owner_nested++;
317 }
318
319 RHINO_CRITICAL_EXIT();
320
321 return RHINO_MUTEX_OWNER_NESTED;
322 }
323
324 mutex_task = mutex->mutex_task;
325 if (mutex_task == NULL) {
326 /* get lock */
327 mutex->mutex_task = g_active_task[cur_cpu_num];
328 #if (RHINO_CONFIG_MUTEX_INHERIT > 0)
329 mutex->mutex_list = g_active_task[cur_cpu_num]->mutex_list;
330 g_active_task[cur_cpu_num]->mutex_list = mutex;
331 #endif
332 mutex->owner_nested = 1u;
333
334 TRACE_MUTEX_GET(g_active_task[cur_cpu_num], mutex, ticks);
335
336 RHINO_CRITICAL_EXIT();
337
338 return RHINO_SUCCESS;
339 }
340
341 /* can't get mutex, and return immediately if wait_option is RHINO_NO_WAIT */
342 if (ticks == RHINO_NO_WAIT) {
343 RHINO_CRITICAL_EXIT();
344 return RHINO_NO_PEND_WAIT;
345 }
346
347 /* system is locked so task can not be blocked just return immediately */
348 if (g_sched_lock[cur_cpu_num] > 0u) {
349 RHINO_CRITICAL_EXIT();
350 return RHINO_SCHED_DISABLE;
351 }
352 #if (RHINO_CONFIG_MUTEX_INHERIT > 0)
353 /* if current task is a higher prio task and block on the mutex
354 prio inverse condition happened, prio inherit method is used here */
355 if (g_active_task[cur_cpu_num]->prio < mutex_task->prio) {
356 task_pri_change(mutex_task, g_active_task[cur_cpu_num]->prio);
357
358 TRACE_TASK_PRI_INV(g_active_task[cur_cpu_num], mutex_task);
359
360 }
361 #endif
362
363 /* any way block the current task */
364 pend_to_blk_obj(&mutex->blk_obj, g_active_task[cur_cpu_num], ticks);
365
366 TRACE_MUTEX_GET_BLK(g_active_task[cur_cpu_num], mutex, ticks);
367
368 RHINO_CRITICAL_EXIT_SCHED();
369
370 RHINO_CPU_INTRPT_DISABLE();
371
372 /* so the task is waked up, need know which reason cause wake up */
373 ret = pend_state_end_proc(g_active_task[cpu_cur_get()], &mutex->blk_obj);
374
375 RHINO_CPU_INTRPT_ENABLE();
376
377 return ret;
378 }
379
krhino_mutex_unlock(kmutex_t * mutex)380 kstat_t krhino_mutex_unlock(kmutex_t *mutex)
381 {
382 CPSR_ALLOC();
383 klist_t *blk_list_head;
384 ktask_t *task;
385 uint8_t cur_cpu_num;
386
387 NULL_PARA_CHK(mutex);
388
389 if (g_sys_stat == RHINO_STOPPED) {
390 return RHINO_SUCCESS;
391 }
392
393 RHINO_CRITICAL_ENTER();
394
395 INTRPT_NESTED_LEVEL_CHK();
396
397 if (mutex->blk_obj.obj_type != RHINO_MUTEX_OBJ_TYPE) {
398 RHINO_CRITICAL_EXIT();
399 return RHINO_KOBJ_TYPE_ERR;
400 }
401
402 cur_cpu_num = cpu_cur_get();
403
404 /* mutex must be released by itself */
405 if (g_active_task[cur_cpu_num] != mutex->mutex_task) {
406 RHINO_CRITICAL_EXIT();
407 return RHINO_MUTEX_NOT_RELEASED_BY_OWNER;
408 }
409
410 mutex->owner_nested--;
411
412 if (mutex->owner_nested > 0u) {
413 RHINO_CRITICAL_EXIT();
414 return RHINO_MUTEX_OWNER_NESTED;
415 }
416
417 mutex_release(g_active_task[cur_cpu_num], mutex);
418
419 blk_list_head = &mutex->blk_obj.blk_list;
420
421 /* if no block task on this list just return */
422 if (is_klist_empty(blk_list_head)) {
423 /* No wait task */
424 mutex->mutex_task = NULL;
425
426 TRACE_MUTEX_RELEASE_SUCCESS(g_active_task[cur_cpu_num], mutex);
427 RHINO_CRITICAL_EXIT();
428
429 return RHINO_SUCCESS;
430 }
431
432 /* there must have task blocked on this mutex object */
433 task = krhino_list_entry(blk_list_head->next, ktask_t, task_list);
434
435 /* wake up the occupy task, which is the highst prio task on the list */
436 pend_task_wakeup(task);
437
438 TRACE_MUTEX_TASK_WAKE(g_active_task[cur_cpu_num], task, mutex);
439
440 /* change mutex get task */
441 mutex->mutex_task = task;
442 #if (RHINO_CONFIG_MUTEX_INHERIT > 0)
443 mutex->mutex_list = task->mutex_list;
444 task->mutex_list = mutex;
445 #endif
446 mutex->owner_nested = 1u;
447
448 RHINO_CRITICAL_EXIT_SCHED();
449
450 return RHINO_SUCCESS;
451 }
452
453