1 /*
2 * Copyright (C) 2015-2017 Alibaba Group Holding Limited
3 */
4
5 #include "k_api.h"
6
7 #if (RHINO_CONFIG_BUF_QUEUE > 0)
8
buf_queue_create(kbuf_queue_t * queue,const name_t * name,void * buf,size_t size,size_t max_msg,uint8_t mm_alloc_flag,size_t type)9 static kstat_t buf_queue_create(kbuf_queue_t *queue, const name_t *name, void *buf, size_t size,
10 size_t max_msg, uint8_t mm_alloc_flag, size_t type)
11 {
12 #if (RHINO_CONFIG_KOBJ_LIST > 0)
13 CPSR_ALLOC();
14 #endif
15
16 NULL_PARA_CHK(queue);
17 NULL_PARA_CHK(buf);
18 NULL_PARA_CHK(name);
19
20 if (max_msg == 0u) {
21 return RHINO_INV_PARAM;
22 }
23
24 if (size == 0u) {
25 return RHINO_BUF_QUEUE_SIZE_ZERO;
26 }
27
28 memset(queue, 0, sizeof(kbuf_queue_t));
29
30 /* init the queue blocked list */
31 klist_init(&queue->blk_obj.blk_list);
32
33 queue->buf = buf;
34 queue->cur_num = 0u;
35 queue->peak_num = 0u;
36 queue->max_msg_size = max_msg;
37 queue->blk_obj.name = name;
38 queue->blk_obj.blk_policy = BLK_POLICY_PRI;
39 queue->mm_alloc_flag = mm_alloc_flag;
40 #if (RHINO_CONFIG_TASK_DEL > 0)
41 queue->blk_obj.cancel = 1u;
42 #endif
43
44 #if (RHINO_CONFIG_KOBJ_LIST > 0)
45 RHINO_CRITICAL_ENTER();
46 klist_insert(&(g_kobj_list.buf_queue_head), &queue->buf_queue_item);
47 RHINO_CRITICAL_EXIT();
48 #endif
49
50 queue->blk_obj.obj_type = RHINO_BUF_QUEUE_OBJ_TYPE;
51
52 ringbuf_init(&(queue->ringbuf), buf, size, type, max_msg);
53 queue->min_free_buf_size = queue->ringbuf.freesize;
54 TRACE_BUF_QUEUE_CREATE(krhino_cur_task_get(), queue);
55
56 return RHINO_SUCCESS;
57 }
58
krhino_buf_queue_create(kbuf_queue_t * queue,const name_t * name,void * buf,size_t size,size_t max_msg)59 kstat_t krhino_buf_queue_create(kbuf_queue_t *queue, const name_t *name,
60 void *buf, size_t size, size_t max_msg)
61 {
62 return buf_queue_create(queue, name, buf, size, max_msg, K_OBJ_STATIC_ALLOC, RINGBUF_TYPE_DYN);
63 }
64
krhino_fix_buf_queue_create(kbuf_queue_t * queue,const name_t * name,void * buf,size_t msg_size,size_t msg_num)65 kstat_t krhino_fix_buf_queue_create(kbuf_queue_t *queue, const name_t *name,
66 void *buf, size_t msg_size, size_t msg_num)
67 {
68 return buf_queue_create(queue, name, buf, msg_size * msg_num, msg_size, K_OBJ_STATIC_ALLOC, RINGBUF_TYPE_FIX);
69 }
70
krhino_buf_queue_del(kbuf_queue_t * queue)71 kstat_t krhino_buf_queue_del(kbuf_queue_t *queue)
72 {
73 CPSR_ALLOC();
74 klist_t *head;
75
76 NULL_PARA_CHK(queue);
77
78 RHINO_CRITICAL_ENTER();
79
80 INTRPT_NESTED_LEVEL_CHK();
81
82 if (queue->blk_obj.obj_type != RHINO_BUF_QUEUE_OBJ_TYPE) {
83 RHINO_CRITICAL_EXIT();
84 return RHINO_KOBJ_TYPE_ERR;
85 }
86
87 if (queue->mm_alloc_flag != K_OBJ_STATIC_ALLOC) {
88 RHINO_CRITICAL_EXIT();
89 return RHINO_KOBJ_DEL_ERR;
90 }
91
92 head = &queue->blk_obj.blk_list;
93
94 queue->blk_obj.obj_type = RHINO_OBJ_TYPE_NONE;
95
96 /* all task blocked on this queue is waken up */
97 while (!is_klist_empty(head)) {
98 pend_task_rm(krhino_list_entry(head->next, ktask_t, task_list));
99 }
100
101 #if (RHINO_CONFIG_KOBJ_LIST > 0)
102 klist_rm(&queue->buf_queue_item);
103 #endif
104
105 ringbuf_reset(&queue->ringbuf);
106
107 RHINO_CRITICAL_EXIT_SCHED();
108
109 return RHINO_SUCCESS;
110 }
111
112 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
buf_queue_dyn_create(kbuf_queue_t ** queue,const name_t * name,size_t size,size_t max_msg,uint8_t type)113 static kstat_t buf_queue_dyn_create(kbuf_queue_t **queue, const name_t *name,
114 size_t size, size_t max_msg, uint8_t type)
115 {
116 kstat_t stat;
117 kbuf_queue_t *queue_obj;
118
119 NULL_PARA_CHK(queue);
120
121 if (size == 0u) {
122 return RHINO_BUF_QUEUE_SIZE_ZERO;
123 }
124
125 queue_obj = krhino_mm_alloc(sizeof(kbuf_queue_t));
126
127 if (queue_obj == NULL) {
128 return RHINO_NO_MEM;
129 }
130
131 queue_obj->buf = krhino_mm_alloc(size);
132
133 if (queue_obj->buf == NULL) {
134 krhino_mm_free(queue_obj);
135
136 return RHINO_NO_MEM;
137 }
138
139 stat = buf_queue_create(queue_obj, name, queue_obj->buf, size, max_msg,
140 K_OBJ_DYN_ALLOC, type);
141
142 if (stat != RHINO_SUCCESS) {
143 krhino_mm_free(queue_obj->buf);
144 krhino_mm_free(queue_obj);
145
146 return stat;
147 }
148
149 *queue = queue_obj;
150
151 return RHINO_SUCCESS;
152 }
153
krhino_buf_queue_dyn_create(kbuf_queue_t ** queue,const name_t * name,size_t size,size_t max_msg)154 kstat_t krhino_buf_queue_dyn_create(kbuf_queue_t **queue, const name_t *name,
155 size_t size, size_t max_msg)
156 {
157 return buf_queue_dyn_create(queue, name, size, max_msg, RINGBUF_TYPE_DYN);
158 }
159
krhino_fix_buf_queue_dyn_create(kbuf_queue_t ** queue,const name_t * name,size_t msg_size,size_t msg_num)160 kstat_t krhino_fix_buf_queue_dyn_create(kbuf_queue_t **queue, const name_t *name,
161 size_t msg_size, size_t msg_num)
162 {
163 return buf_queue_dyn_create(queue, name, msg_size * msg_num, msg_size, RINGBUF_TYPE_FIX);
164 }
165
krhino_buf_queue_dyn_del(kbuf_queue_t * queue)166 kstat_t krhino_buf_queue_dyn_del(kbuf_queue_t *queue)
167 {
168 CPSR_ALLOC();
169 klist_t *head;
170
171 NULL_PARA_CHK(queue);
172
173 RHINO_CRITICAL_ENTER();
174
175 INTRPT_NESTED_LEVEL_CHK();
176
177 if (queue->blk_obj.obj_type != RHINO_BUF_QUEUE_OBJ_TYPE) {
178 RHINO_CRITICAL_EXIT();
179 return RHINO_KOBJ_TYPE_ERR;
180 }
181
182 if (queue->mm_alloc_flag != K_OBJ_DYN_ALLOC) {
183 RHINO_CRITICAL_EXIT();
184 return RHINO_KOBJ_DEL_ERR;
185 }
186
187 head = &queue->blk_obj.blk_list;
188
189 queue->blk_obj.obj_type = RHINO_OBJ_TYPE_NONE;
190
191 while (!is_klist_empty(head)) {
192 pend_task_rm(krhino_list_entry(head->next, ktask_t, task_list));
193 }
194
195 #if (RHINO_CONFIG_KOBJ_LIST > 0)
196 klist_rm(&queue->buf_queue_item);
197 #endif
198
199 ringbuf_reset(&queue->ringbuf);
200
201 RHINO_CRITICAL_EXIT_SCHED();
202 krhino_mm_free(queue->buf);
203 krhino_mm_free(queue);
204
205 return RHINO_SUCCESS;
206 }
207 #endif
208
buf_queue_send(kbuf_queue_t * queue,void * msg,size_t msg_size)209 static kstat_t buf_queue_send(kbuf_queue_t *queue, void *msg, size_t msg_size)
210 {
211 CPSR_ALLOC();
212 klist_t *head;
213 ktask_t *task;
214 kstat_t err;
215 uint8_t cur_cpu_num;
216
217 RHINO_CRITICAL_ENTER();
218
219 if (queue->blk_obj.obj_type != RHINO_BUF_QUEUE_OBJ_TYPE) {
220 RHINO_CRITICAL_EXIT();
221 return RHINO_KOBJ_TYPE_ERR;
222 }
223
224 cur_cpu_num = cpu_cur_get();
225 (void)cur_cpu_num;
226
227 if (msg_size > queue->max_msg_size) {
228 TRACE_BUF_QUEUE_MAX(g_active_task[cur_cpu_num], queue, msg, msg_size);
229 RHINO_CRITICAL_EXIT();
230 return RHINO_BUF_QUEUE_MSG_SIZE_OVERFLOW;
231 }
232
233 if (msg_size == 0) {
234 RHINO_CRITICAL_EXIT();
235 return RHINO_INV_PARAM;
236 }
237
238 head = &queue->blk_obj.blk_list;
239
240 /* buf queue is not full here, if there is no blocked receive task */
241 if (is_klist_empty(head)) {
242
243 err = ringbuf_push(&(queue->ringbuf), msg, msg_size);
244
245 if (err != RHINO_SUCCESS) {
246 RHINO_CRITICAL_EXIT();
247 if (err == RHINO_RINGBUF_FULL) {
248 err = RHINO_BUF_QUEUE_FULL;
249 }
250 return err;
251 }
252
253 queue->cur_num++;
254
255 if (queue->peak_num < queue->cur_num) {
256 queue->peak_num = queue->cur_num;
257 }
258
259 if (queue->min_free_buf_size > queue->ringbuf.freesize) {
260 queue->min_free_buf_size = queue->ringbuf.freesize;
261 }
262
263 TRACE_BUF_QUEUE_POST(g_active_task[cur_cpu_num], queue, msg, msg_size);
264
265 RHINO_CRITICAL_EXIT();
266
267 return RHINO_SUCCESS;
268 }
269
270 task = krhino_list_entry(head->next, ktask_t, task_list);
271 memcpy(task->msg, msg, msg_size);
272 task->bq_msg_size = msg_size;
273
274 pend_task_wakeup(task);
275
276 TRACE_BUF_QUEUE_TASK_WAKE(g_active_task[cur_cpu_num], task, queue);
277
278 RHINO_CRITICAL_EXIT_SCHED();
279
280 return RHINO_SUCCESS;
281 }
282
krhino_buf_queue_send(kbuf_queue_t * queue,void * msg,size_t size)283 kstat_t krhino_buf_queue_send(kbuf_queue_t *queue, void *msg, size_t size)
284 {
285 NULL_PARA_CHK(queue);
286 NULL_PARA_CHK(msg);
287
288 return buf_queue_send(queue, msg, size);
289 }
290
krhino_buf_queue_recv(kbuf_queue_t * queue,tick_t ticks,void * msg,size_t * size)291 kstat_t krhino_buf_queue_recv(kbuf_queue_t *queue, tick_t ticks, void *msg, size_t *size)
292 {
293 CPSR_ALLOC();
294 kstat_t ret;
295 uint8_t cur_cpu_num;
296
297 NULL_PARA_CHK(queue);
298 NULL_PARA_CHK(msg);
299 NULL_PARA_CHK(size);
300
301 RHINO_CRITICAL_ENTER();
302
303 cur_cpu_num = cpu_cur_get();
304 TASK_CANCEL_CHK(queue);
305
306 if ((g_intrpt_nested_level[cur_cpu_num] > 0u) && (ticks != RHINO_NO_WAIT)) {
307 RHINO_CRITICAL_EXIT();
308 return RHINO_NOT_CALLED_BY_INTRPT;
309 }
310
311 if (queue->blk_obj.obj_type != RHINO_BUF_QUEUE_OBJ_TYPE) {
312 RHINO_CRITICAL_EXIT();
313 return RHINO_KOBJ_TYPE_ERR;
314 }
315
316 if (queue->cur_num > 0u) {
317 ringbuf_pop(&(queue->ringbuf), msg, size);
318 queue->cur_num--;
319 RHINO_CRITICAL_EXIT();
320 return RHINO_SUCCESS;
321 }
322
323 if (ticks == RHINO_NO_WAIT) {
324 *size = 0u;
325 RHINO_CRITICAL_EXIT();
326 return RHINO_NO_PEND_WAIT;
327 }
328
329 if (g_sched_lock[cur_cpu_num] > 0u) {
330 *size = 0u;
331 RHINO_CRITICAL_EXIT();
332 return RHINO_SCHED_DISABLE;
333 }
334
335 g_active_task[cur_cpu_num]->msg = msg;
336 pend_to_blk_obj(&queue->blk_obj, g_active_task[cur_cpu_num], ticks);
337
338 TRACE_BUF_QUEUE_GET_BLK(g_active_task[cur_cpu_num], queue, ticks);
339
340 RHINO_CRITICAL_EXIT_SCHED();
341
342 RHINO_CPU_INTRPT_DISABLE();
343
344 cur_cpu_num = cpu_cur_get();
345
346 ret = pend_state_end_proc(g_active_task[cur_cpu_num], &queue->blk_obj);
347
348 switch (ret) {
349 case RHINO_SUCCESS:
350 *size = g_active_task[cur_cpu_num]->bq_msg_size;
351 break;
352
353 default:
354 *size = 0u;
355 break;
356 }
357
358 RHINO_CPU_INTRPT_ENABLE();
359
360 return ret;
361 }
362
krhino_buf_queue_flush(kbuf_queue_t * queue)363 kstat_t krhino_buf_queue_flush(kbuf_queue_t *queue)
364 {
365 CPSR_ALLOC();
366
367 NULL_PARA_CHK(queue);
368
369 RHINO_CRITICAL_ENTER();
370
371 INTRPT_NESTED_LEVEL_CHK();
372
373 if (queue->blk_obj.obj_type != RHINO_BUF_QUEUE_OBJ_TYPE) {
374 RHINO_CRITICAL_EXIT();
375 return RHINO_KOBJ_TYPE_ERR;
376 }
377
378 ringbuf_reset(&(queue->ringbuf));
379 queue->cur_num = 0u;
380 queue->peak_num = 0u;
381 queue->min_free_buf_size = queue->ringbuf.freesize;
382
383 RHINO_CRITICAL_EXIT();
384
385 return RHINO_SUCCESS;
386 }
387
krhino_buf_queue_info_get(kbuf_queue_t * queue,kbuf_queue_info_t * info)388 kstat_t krhino_buf_queue_info_get(kbuf_queue_t *queue, kbuf_queue_info_t *info)
389 {
390 CPSR_ALLOC();
391
392 NULL_PARA_CHK(queue);
393 NULL_PARA_CHK(info);
394
395 RHINO_CRITICAL_ENTER();
396
397 if (queue->blk_obj.obj_type != RHINO_BUF_QUEUE_OBJ_TYPE) {
398 RHINO_CRITICAL_EXIT();
399 return RHINO_KOBJ_TYPE_ERR;
400 }
401
402 info->free_buf_size = queue->ringbuf.freesize;
403 info->min_free_buf_size = queue->min_free_buf_size;
404 info->buf_size = queue->ringbuf.end - queue->ringbuf.buf;
405 info->max_msg_size = queue->max_msg_size;
406 info->cur_num = queue->cur_num;
407 info->peak_num = queue->peak_num;
408
409 RHINO_CRITICAL_EXIT();
410
411 return RHINO_SUCCESS;
412 }
413
414 #endif
415
416