1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2018-08-25 armink the first version
9 * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
10 */
11
12 #include <rthw.h>
13 #include <rtdevice.h>
14
15 /**
16 * ring block buffer object initialization
17 *
18 * @param rbb ring block buffer object
19 * @param buf buffer
20 * @param buf_size buffer size
21 * @param block_set block set
22 * @param blk_max_num max block number
23 *
24 * @note When your application need align access, please make the buffer address is aligned.
25 */
rt_rbb_init(rt_rbb_t rbb,rt_uint8_t * buf,rt_size_t buf_size,rt_rbb_blk_t block_set,rt_size_t blk_max_num)26 void rt_rbb_init(rt_rbb_t rbb, rt_uint8_t *buf, rt_size_t buf_size, rt_rbb_blk_t block_set, rt_size_t blk_max_num)
27 {
28 rt_size_t i;
29
30 RT_ASSERT(rbb);
31 RT_ASSERT(buf);
32 RT_ASSERT(block_set);
33
34 rbb->buf = buf;
35 rbb->buf_size = buf_size;
36 rbb->blk_set = block_set;
37 rbb->blk_max_num = blk_max_num;
38 rbb->tail = &rbb->blk_list;
39 rt_slist_init(&rbb->blk_list);
40 rt_slist_init(&rbb->free_list);
41 /* initialize block status */
42 for (i = 0; i < blk_max_num; i++)
43 {
44 block_set[i].status = RT_RBB_BLK_UNUSED;
45 rt_slist_init(&block_set[i].list);
46 rt_slist_insert(&rbb->free_list, &block_set[i].list);
47 }
48 rt_spin_lock_init(&(rbb->spinlock));
49 }
50 RTM_EXPORT(rt_rbb_init);
51
52 #ifdef RT_USING_HEAP
53
54 /**
55 * ring block buffer object create
56 *
57 * @param buf_size buffer size
58 * @param blk_max_num max block number
59 *
60 * @return != RT_NULL: ring block buffer object
61 * RT_NULL: create failed
62 */
rt_rbb_create(rt_size_t buf_size,rt_size_t blk_max_num)63 rt_rbb_t rt_rbb_create(rt_size_t buf_size, rt_size_t blk_max_num)
64 {
65 rt_rbb_t rbb = RT_NULL;
66 rt_uint8_t *buf;
67 rt_rbb_blk_t blk_set;
68
69 rbb = (rt_rbb_t)rt_malloc(sizeof(struct rt_rbb));
70 if (!rbb)
71 {
72 return RT_NULL;
73 }
74
75 buf = (rt_uint8_t *)rt_malloc(buf_size);
76 if (!buf)
77 {
78 rt_free(rbb);
79 return RT_NULL;
80 }
81
82 blk_set = (rt_rbb_blk_t)rt_malloc(sizeof(struct rt_rbb_blk) * blk_max_num);
83 if (!blk_set)
84 {
85 rt_free(buf);
86 rt_free(rbb);
87 return RT_NULL;
88 }
89
90 rt_rbb_init(rbb, buf, buf_size, blk_set, blk_max_num);
91
92 return rbb;
93 }
94 RTM_EXPORT(rt_rbb_create);
95
96 /**
97 * ring block buffer object destroy
98 *
99 * @param rbb ring block buffer object
100 */
rt_rbb_destroy(rt_rbb_t rbb)101 void rt_rbb_destroy(rt_rbb_t rbb)
102 {
103 RT_ASSERT(rbb);
104
105 rt_free(rbb->buf);
106 rt_free(rbb->blk_set);
107 rt_free(rbb);
108
109 }
110 RTM_EXPORT(rt_rbb_destroy);
111
112 #endif
113
find_empty_blk_in_set(rt_rbb_t rbb)114 static rt_rbb_blk_t find_empty_blk_in_set(rt_rbb_t rbb)
115 {
116 struct rt_rbb_blk *blk;
117
118 RT_ASSERT(rbb);
119
120 if (rt_slist_isempty(&rbb->free_list))
121 {
122 return RT_NULL;
123 }
124 blk = rt_slist_first_entry(&rbb->free_list, struct rt_rbb_blk, list);
125 rt_slist_remove(&rbb->free_list, &blk->list);
126 RT_ASSERT(blk->status == RT_RBB_BLK_UNUSED);
127 return blk;
128 }
129
list_append(rt_rbb_t rbb,rt_slist_t * n)130 rt_inline void list_append(rt_rbb_t rbb, rt_slist_t *n)
131 {
132 /* append the node to the tail */
133 rbb->tail->next = n;
134 n->next = RT_NULL;
135 /* save tail node */
136 rbb->tail = n;
137 }
138
list_remove(rt_rbb_t rbb,rt_slist_t * n)139 rt_inline rt_slist_t *list_remove(rt_rbb_t rbb, rt_slist_t *n)
140 {
141 rt_slist_t *l = &rbb->blk_list;
142 struct rt_slist_node *node = l;
143
144 /* remove slist head */
145 while (node->next && node->next != n) node = node->next;
146 /* remove node */
147 if (node->next != (rt_slist_t *)0)
148 {
149 node->next = node->next->next;
150 n->next = RT_NULL;
151 /* update tail node */
152 if (rbb->tail == n)
153 rbb->tail = node;
154 }
155 return l;
156 }
157
158 /**
159 * Allocate a block by given size. The block will add to blk_list when allocate success.
160 *
161 * @param rbb ring block buffer object
162 * @param blk_size block size
163 *
164 * @note When your application need align access, please make the blk_szie is aligned.
165 *
166 * @return != RT_NULL: allocated block
167 * RT_NULL: allocate failed
168 */
rt_rbb_blk_alloc(rt_rbb_t rbb,rt_size_t blk_size)169 rt_rbb_blk_t rt_rbb_blk_alloc(rt_rbb_t rbb, rt_size_t blk_size)
170 {
171 rt_base_t level;
172 rt_size_t empty1 = 0, empty2 = 0;
173 rt_rbb_blk_t head, tail, new_rbb = RT_NULL;
174
175 RT_ASSERT(rbb);
176 RT_ASSERT(blk_size < (1L << 24));
177
178 level = rt_spin_lock_irqsave(&(rbb->spinlock));
179
180 new_rbb = find_empty_blk_in_set(rbb);
181
182 if (new_rbb)
183 {
184 if (rt_slist_isempty(&rbb->blk_list) == 0)
185 {
186 head = rt_slist_first_entry(&rbb->blk_list, struct rt_rbb_blk, list);
187 /* get tail rbb blk object */
188 tail = rt_slist_entry(rbb->tail, struct rt_rbb_blk, list);
189 if (head->buf <= tail->buf)
190 {
191 /**
192 * head tail
193 * +--------------------------------------+-----------------+------------------+
194 * | empty2 | block1 | block2 | block3 | empty1 |
195 * +--------------------------------------+-----------------+------------------+
196 * rbb->buf
197 */
198 empty1 = (rbb->buf + rbb->buf_size) - (tail->buf + tail->size);
199 empty2 = head->buf - rbb->buf;
200
201 if (empty1 >= blk_size)
202 {
203 list_append(rbb, &new_rbb->list);
204 new_rbb->status = RT_RBB_BLK_INITED;
205 new_rbb->buf = tail->buf + tail->size;
206 new_rbb->size = blk_size;
207 }
208 else if (empty2 >= blk_size)
209 {
210 list_append(rbb, &new_rbb->list);
211 new_rbb->status = RT_RBB_BLK_INITED;
212 new_rbb->buf = rbb->buf;
213 new_rbb->size = blk_size;
214 }
215 else
216 {
217 /* no space */
218 rt_slist_insert(&rbb->free_list, &new_rbb->list);
219 new_rbb = RT_NULL;
220 }
221 }
222 else
223 {
224 /**
225 * tail head
226 * +----------------+-------------------------------------+--------+-----------+
227 * | block3 | empty1 | block1 | block2 |
228 * +----------------+-------------------------------------+--------+-----------+
229 * rbb->buf
230 */
231 empty1 = head->buf - (tail->buf + tail->size);
232
233 if (empty1 >= blk_size)
234 {
235 list_append(rbb, &new_rbb->list);
236 new_rbb->status = RT_RBB_BLK_INITED;
237 new_rbb->buf = tail->buf + tail->size;
238 new_rbb->size = blk_size;
239 }
240 else
241 {
242 /* no space */
243 rt_slist_insert(&rbb->free_list, &new_rbb->list);
244 new_rbb = RT_NULL;
245 }
246 }
247 }
248 else
249 {
250 /* the list is empty */
251 if(blk_size <= rbb->buf_size)
252 {
253 list_append(rbb, &new_rbb->list);
254 new_rbb->status = RT_RBB_BLK_INITED;
255 new_rbb->buf = rbb->buf;
256 new_rbb->size = blk_size;
257 }
258 else
259 {
260 /* no space */
261 rt_slist_insert(&rbb->free_list, &new_rbb->list);
262 new_rbb = RT_NULL;
263 }
264 }
265 }
266
267 rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
268
269 return new_rbb;
270 }
271 RTM_EXPORT(rt_rbb_blk_alloc);
272
273 /**
274 * put a block to ring block buffer object
275 *
276 * @param block the block
277 */
rt_rbb_blk_put(rt_rbb_blk_t block)278 void rt_rbb_blk_put(rt_rbb_blk_t block)
279 {
280 RT_ASSERT(block);
281 RT_ASSERT(block->status == RT_RBB_BLK_INITED);
282
283 block->status = RT_RBB_BLK_PUT;
284 }
285 RTM_EXPORT(rt_rbb_blk_put);
286
287 /**
288 * get a block from the ring block buffer object
289 *
290 * @param rbb ring block buffer object
291 *
292 * @return != RT_NULL: block
293 * RT_NULL: get failed
294 */
rt_rbb_blk_get(rt_rbb_t rbb)295 rt_rbb_blk_t rt_rbb_blk_get(rt_rbb_t rbb)
296 {
297 rt_base_t level;
298 rt_rbb_blk_t block = RT_NULL;
299 rt_slist_t *node;
300
301 RT_ASSERT(rbb);
302
303 if (rt_slist_isempty(&rbb->blk_list))
304 return 0;
305
306 level = rt_spin_lock_irqsave(&(rbb->spinlock));
307
308 for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
309 {
310 block = rt_slist_entry(node, struct rt_rbb_blk, list);
311 if (block->status == RT_RBB_BLK_PUT)
312 {
313 block->status = RT_RBB_BLK_GET;
314 goto __exit;
315 }
316 }
317 /* not found */
318 block = RT_NULL;
319
320 __exit:
321
322 rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
323
324 return block;
325 }
326 RTM_EXPORT(rt_rbb_blk_get);
327
328 /**
329 * return the block size
330 *
331 * @param block the block
332 *
333 * @return block size
334 */
rt_rbb_blk_size(rt_rbb_blk_t block)335 rt_size_t rt_rbb_blk_size(rt_rbb_blk_t block)
336 {
337 RT_ASSERT(block);
338
339 return block->size;
340 }
341 RTM_EXPORT(rt_rbb_blk_size);
342
343 /**
344 * return the block buffer
345 *
346 * @param block the block
347 *
348 * @return block buffer
349 */
rt_rbb_blk_buf(rt_rbb_blk_t block)350 rt_uint8_t *rt_rbb_blk_buf(rt_rbb_blk_t block)
351 {
352 RT_ASSERT(block);
353
354 return block->buf;
355 }
356 RTM_EXPORT(rt_rbb_blk_buf);
357
358 /**
359 * free the block
360 *
361 * @param rbb ring block buffer object
362 * @param block the block
363 */
rt_rbb_blk_free(rt_rbb_t rbb,rt_rbb_blk_t block)364 void rt_rbb_blk_free(rt_rbb_t rbb, rt_rbb_blk_t block)
365 {
366 rt_base_t level;
367
368 RT_ASSERT(rbb);
369 RT_ASSERT(block);
370 RT_ASSERT(block->status != RT_RBB_BLK_UNUSED);
371
372 level = rt_spin_lock_irqsave(&(rbb->spinlock));
373 /* remove it on rbb block list */
374 list_remove(rbb, &block->list);
375 block->status = RT_RBB_BLK_UNUSED;
376 rt_slist_insert(&rbb->free_list, &block->list);
377 rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
378 }
379 RTM_EXPORT(rt_rbb_blk_free);
380
381 /**
382 * get a continuous block to queue by given size
383 *
384 * tail head
385 * +------------------+---------------+--------+----------+--------+
386 * | block3 | empty1 | block1 | block2 |fragment|
387 * +------------------+------------------------+----------+--------+
388 * |<-- return_size -->| |
389 * |<--- queue_data_len --->|
390 *
391 * tail head
392 * +------------------+---------------+--------+----------+--------+
393 * | block3 | empty1 | block1 | block2 |fragment|
394 * +------------------+------------------------+----------+--------+
395 * |<-- return_size -->| out of len(b1+b2+b3) |
396 * |<-------------------- queue_data_len -------------------->|
397 *
398 * @param rbb ring block buffer object
399 * @param queue_data_len The max queue data size, and the return size must less then it.
400 * @param queue continuous block queue
401 *
402 * @return the block queue data total size
403 */
rt_rbb_blk_queue_get(rt_rbb_t rbb,rt_size_t queue_data_len,rt_rbb_blk_queue_t blk_queue)404 rt_size_t rt_rbb_blk_queue_get(rt_rbb_t rbb, rt_size_t queue_data_len, rt_rbb_blk_queue_t blk_queue)
405 {
406 rt_base_t level;
407 rt_size_t data_total_size = 0;
408 rt_slist_t *node, *tmp = RT_NULL;
409 rt_rbb_blk_t last_block = RT_NULL, block;
410
411 RT_ASSERT(rbb);
412 RT_ASSERT(blk_queue);
413
414 if (rt_slist_isempty(&rbb->blk_list))
415 return 0;
416
417 level = rt_spin_lock_irqsave(&(rbb->spinlock));
418
419 node = rt_slist_first(&rbb->blk_list);
420 if (node != RT_NULL)
421 {
422 tmp = rt_slist_next(node);
423 }
424 for (; node; node = tmp, tmp = rt_slist_next(node))
425 {
426 if (!last_block)
427 {
428 last_block = rt_slist_entry(node, struct rt_rbb_blk, list);
429 if (last_block->status == RT_RBB_BLK_PUT)
430 {
431 /* save the first put status block to queue */
432 blk_queue->blocks = last_block;
433 blk_queue->blk_num = 0;
434 }
435 else
436 {
437 /* the first block must be put status */
438 last_block = RT_NULL;
439 continue;
440 }
441 }
442 else
443 {
444 block = rt_slist_entry(node, struct rt_rbb_blk, list);
445 /*
446 * these following conditions will break the loop:
447 * 1. the current block is not put status
448 * 2. the last block and current block is not continuous
449 * 3. the data_total_size will out of range
450 */
451 if (block->status != RT_RBB_BLK_PUT ||
452 last_block->buf > block->buf ||
453 data_total_size + block->size > queue_data_len)
454 {
455 break;
456 }
457 /* backup last block */
458 last_block = block;
459 }
460 /* remove current block */
461 data_total_size += last_block->size;
462 last_block->status = RT_RBB_BLK_GET;
463 blk_queue->blk_num++;
464 }
465
466 rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
467
468 return data_total_size;
469 }
470 RTM_EXPORT(rt_rbb_blk_queue_get);
471
472 /**
473 * get all block length on block queue
474 *
475 * @param blk_queue the block queue
476 *
477 * @return total length
478 */
rt_rbb_blk_queue_len(rt_rbb_blk_queue_t blk_queue)479 rt_size_t rt_rbb_blk_queue_len(rt_rbb_blk_queue_t blk_queue)
480 {
481 rt_size_t i = 0, data_total_size = 0;
482 rt_rbb_blk_t blk;
483
484 RT_ASSERT(blk_queue);
485
486 for (blk = blk_queue->blocks; i < blk_queue->blk_num; i++)
487 {
488 data_total_size += blk->size;
489 blk = rt_slist_entry(blk->list.next, struct rt_rbb_blk, list);
490 }
491 return data_total_size;
492 }
493 RTM_EXPORT(rt_rbb_blk_queue_len);
494
495 /**
496 * return the block queue buffer
497 *
498 * @param blk_queue the block queue
499 *
500 * @return block queue buffer
501 */
rt_rbb_blk_queue_buf(rt_rbb_blk_queue_t blk_queue)502 rt_uint8_t *rt_rbb_blk_queue_buf(rt_rbb_blk_queue_t blk_queue)
503 {
504 RT_ASSERT(blk_queue);
505
506 return blk_queue->blocks[0].buf;
507 }
508 RTM_EXPORT(rt_rbb_blk_queue_buf);
509
510 /**
511 * free the block queue
512 *
513 * @param rbb ring block buffer object
514 * @param blk_queue the block queue
515 */
rt_rbb_blk_queue_free(rt_rbb_t rbb,rt_rbb_blk_queue_t blk_queue)516 void rt_rbb_blk_queue_free(rt_rbb_t rbb, rt_rbb_blk_queue_t blk_queue)
517 {
518 rt_size_t i = 0;
519 rt_rbb_blk_t blk, next_blk;
520
521 RT_ASSERT(rbb);
522 RT_ASSERT(blk_queue);
523
524 for (blk = blk_queue->blocks; i < blk_queue->blk_num; i++)
525 {
526 next_blk = rt_slist_entry(blk->list.next, struct rt_rbb_blk, list);
527 rt_rbb_blk_free(rbb, blk);
528 blk = next_blk;
529 }
530 }
531 RTM_EXPORT(rt_rbb_blk_queue_free);
532
533 /**
534 * The put status and buffer continuous blocks can be make a block queue.
535 * This function will return the length which from next can be make block queue.
536 *
537 * @param rbb ring block buffer object
538 *
539 * @return the next can be make block queue's length
540 */
rt_rbb_next_blk_queue_len(rt_rbb_t rbb)541 rt_size_t rt_rbb_next_blk_queue_len(rt_rbb_t rbb)
542 {
543 rt_base_t level;
544 rt_size_t data_len = 0;
545 rt_slist_t *node;
546 rt_rbb_blk_t last_block = RT_NULL, block;
547
548 RT_ASSERT(rbb);
549
550 if (rt_slist_isempty(&rbb->blk_list))
551 return 0;
552
553 level = rt_spin_lock_irqsave(&(rbb->spinlock));
554
555 for (node = rt_slist_first(&rbb->blk_list); node; node = rt_slist_next(node))
556 {
557 if (!last_block)
558 {
559 last_block = rt_slist_entry(node, struct rt_rbb_blk, list);
560 if (last_block->status != RT_RBB_BLK_PUT)
561 {
562 /* the first block must be put status */
563 last_block = RT_NULL;
564 continue;
565 }
566 }
567 else
568 {
569 block = rt_slist_entry(node, struct rt_rbb_blk, list);
570 /*
571 * these following conditions will break the loop:
572 * 1. the current block is not put status
573 * 2. the last block and current block is not continuous
574 */
575 if (block->status != RT_RBB_BLK_PUT || last_block->buf > block->buf)
576 {
577 break;
578 }
579 /* backup last block */
580 last_block = block;
581 }
582 data_len += last_block->size;
583 }
584
585 rt_spin_unlock_irqrestore(&(rbb->spinlock), level);
586
587 return data_len;
588 }
589 RTM_EXPORT(rt_rbb_next_blk_queue_len);
590
591 /**
592 * get the ring block buffer object buffer size
593 *
594 * @param rbb ring block buffer object
595 *
596 * @return buffer size
597 */
rt_rbb_get_buf_size(rt_rbb_t rbb)598 rt_size_t rt_rbb_get_buf_size(rt_rbb_t rbb)
599 {
600 RT_ASSERT(rbb);
601
602 return rbb->buf_size;
603 }
604 RTM_EXPORT(rt_rbb_get_buf_size);
605