1 /* buf.c - Buffer management */
2
3 /*
4 * Copyright (c) 2015 Intel Corporation
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #include <stdio.h>
10 #include <bt_errno.h>
11 #include <stddef.h>
12 #include <string.h>
13 #include <ble_os.h>
14 #include <misc/byteorder.h>
15 #include <stdlib.h>
16 #include <net/buf.h>
17 #include <misc/util.h>
18 #include "common/log.h"
19 #ifdef CONFIG_BT_USE_MM
20 #include <umm_heap.h>
21 #include <mm.h>
22 #endif
23
24 #if defined(CONFIG_NET_BUF_LOG)
25 #define SYS_LOG_DOMAIN "net/buf"
26 #define SYS_LOG_LEVEL CONFIG_SYS_LOG_NET_BUF_LEVEL
27 #include <logging/yoc_syslog.h>
28
29 #define NET_BUF_DBG(fmt, ...) SYS_LOG_DBG("(%p) " fmt, k_current_get(), \
30 ##__VA_ARGS__)
31 #define NET_BUF_ERR(fmt, ...) SYS_LOG_ERR(fmt, ##__VA_ARGS__)
32 #define NET_BUF_WARN(fmt, ...) SYS_LOG_WRN(fmt, ##__VA_ARGS__)
33 #define NET_BUF_INFO(fmt, ...) SYS_LOG_INF(fmt, ##__VA_ARGS__)
34 #define NET_BUF_ASSERT(cond) do { if (!(cond)) { \
35 NET_BUF_ERR("assert: '" #cond "' failed"); \
36 } } while (0)
37 #else
38
39 #define NET_BUF_DBG(fmt, ...)
40 #define NET_BUF_ERR(fmt, ...)
41 #define NET_BUF_WARN(fmt, ...)
42 #define NET_BUF_INFO(fmt, ...)
43 #define NET_BUF_ASSERT(cond)
44 #endif /* CONFIG_NET_BUF_LOG */
45
46 #if CONFIG_NET_BUF_WARN_ALLOC_INTERVAL > 0
47 #define WARN_ALLOC_INTERVAL K_SECONDS(CONFIG_NET_BUF_WARN_ALLOC_INTERVAL)
48 #else
49 #define WARN_ALLOC_INTERVAL K_FOREVER
50 #endif
51
52 #define MAX_POOL_LIST_SIZE (15)
53
54 /* Linker-defined symbol bound to the static pool structs */
55 //extern struct net_buf_pool _net_buf_pool_list[];
56 //extern struct net_buf_pool _net_buf_pool_list_end[];
57
58 static struct net_buf_pool* net_buf_pool_list[MAX_POOL_LIST_SIZE] = {0};
59 static struct net_buf_pool** net_buf_pool_list_end = net_buf_pool_list;
60
61
net_buf_pool_init(struct net_buf_pool * pool)62 int net_buf_pool_init(struct net_buf_pool *pool)
63 {
64 if (net_buf_pool_list_end >= net_buf_pool_list + MAX_POOL_LIST_SIZE)
65 {
66 return -1;
67 }
68 k_lifo_init(&pool->free);
69 *net_buf_pool_list_end = pool;
70 net_buf_pool_list_end++;
71 return 0;
72 }
73
net_buf_pool_get(int id)74 struct net_buf_pool *net_buf_pool_get(int id)
75 {
76 return net_buf_pool_list[id];
77 }
78
pool_id(struct net_buf_pool * pool)79 static int pool_id(struct net_buf_pool *pool)
80 {
81 int i = 0;
82 for (i = 0; net_buf_pool_list[i] && i < net_buf_pool_list_end - net_buf_pool_list; i++)
83 {
84 if (net_buf_pool_list[i] == pool)
85 {
86 return i;
87 }
88 }
89 return -1;
90 }
91
net_buf_pool_is_free(int id)92 int net_buf_pool_is_free(int id)
93 {
94 struct net_buf_pool *pool = net_buf_pool_get(id);
95 return (pool->buf_count == pool->uninit_count + k_lifo_num_get(&pool->free));
96 }
97
net_buf_poll_is_all_free()98 int net_buf_poll_is_all_free()
99 {
100 int count = net_buf_pool_list_end - net_buf_pool_list;
101 while(count)
102 {
103 if (!net_buf_pool_is_free(count - 1))
104 {
105 return 0;
106 }
107 count--;
108 }
109 return 1;
110 }
111
net_buf_id(struct net_buf * buf)112 int net_buf_id(struct net_buf *buf)
113 {
114 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
115
116 return buf - pool->__bufs;
117 }
118
pool_get_uninit(struct net_buf_pool * pool,u16_t uninit_count)119 static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool,
120 u16_t uninit_count)
121 {
122 struct net_buf *buf;
123
124 buf = &pool->__bufs[pool->buf_count - uninit_count];
125
126 buf->pool_id = pool_id(pool);
127
128 return buf;
129 }
130
net_buf_reset(struct net_buf * buf)131 void net_buf_reset(struct net_buf *buf)
132 {
133 __ASSERT_NO_MSG(buf->flags == 0U);
134 __ASSERT_NO_MSG(buf->frags == NULL);
135
136 net_buf_simple_reset(&buf->b);
137 }
138
generic_data_ref(struct net_buf * buf,u8_t * data)139 static u8_t *generic_data_ref(struct net_buf *buf, u8_t *data)
140 {
141 u8_t *ref_count;
142
143 ref_count = data - 1;
144 (*ref_count)++;
145
146 return data;
147 }
148
mem_pool_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)149 static u8_t *mem_pool_data_alloc(struct net_buf *buf, size_t *size,
150 k_timeout_t timeout)
151 {
152 #if 0
153 struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
154 struct k_mem_pool *pool = buf_pool->alloc->alloc_data;
155 struct k_mem_block block;
156 u8_t *ref_count;
157
158 /* Reserve extra space for k_mem_block_id and ref-count (u8_t) */
159 if (k_mem_pool_alloc(pool, &block,
160 sizeof(struct k_mem_block_id) + 1 + *size,
161 timeout)) {
162 return NULL;
163 }
164
165 /* save the block descriptor info at the start of the actual block */
166 memcpy(block.data, &block.id, sizeof(block.id));
167
168 ref_count = (u8_t *)block.data + sizeof(block.id);
169 *ref_count = 1U;
170
171 /* Return pointer to the byte following the ref count */
172 return ref_count + 1;
173 #endif
174 return NULL;
175 }
176
mem_pool_data_unref(struct net_buf * buf,u8_t * data)177 static void mem_pool_data_unref(struct net_buf *buf, u8_t *data)
178 {
179 #if 0
180 struct k_mem_block_id id;
181 u8_t *ref_count;
182
183 ref_count = data - 1;
184 if (--(*ref_count)) {
185 return;
186 }
187
188 /* Need to copy to local variable due to alignment */
189 memcpy(&id, ref_count - sizeof(id), sizeof(id));
190 k_mem_pool_free_id(&id);
191 #endif
192 }
193
194 const struct net_buf_data_cb net_buf_var_cb = {
195 .alloc = mem_pool_data_alloc,
196 .ref = generic_data_ref,
197 .unref = mem_pool_data_unref,
198 };
199
fixed_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)200 static u8_t *fixed_data_alloc(struct net_buf *buf, size_t *size,
201 k_timeout_t timeout)
202 {
203 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
204 const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
205
206 *size = MIN(fixed->data_size, *size);
207
208 #ifdef CONFIG_BT_USE_MM
209 bt_u32_t *ref_count;
210 unsigned int key;
211 key = irq_lock();
212 ref_count = mm_malloc(USR_HEAP, sizeof(*ref_count) + *size, __builtin_return_address(0));
213 irq_unlock(key);
214 if (!ref_count) {
215 return NULL;
216 }
217
218 *ref_count = 1;
219 return (u8_t *)(ref_count + 1);
220 #else
221 return fixed->data_pool + fixed->data_size * net_buf_id(buf);
222 #endif
223 }
224
fixed_data_unref(struct net_buf * buf,u8_t * data)225 static void fixed_data_unref(struct net_buf *buf, u8_t *data)
226 {
227 /* Nothing needed for fixed-size data pools */
228 #ifdef CONFIG_BT_USE_MM
229 bt_u32_t *ref_count;
230
231 ref_count = (bt_u32_t *)(data - sizeof(*ref_count));
232 if (--(*ref_count)) {
233 return;
234 }
235
236 unsigned int key;
237 key = irq_lock();
238 mm_free(USR_HEAP, ref_count, __builtin_return_address(0));
239 irq_unlock(key);
240 #endif
241 }
242
243 const struct net_buf_data_cb net_buf_fixed_cb = {
244 .alloc = fixed_data_alloc,
245 .unref = fixed_data_unref,
246 };
247
248 #if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
249
heap_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)250 static u8_t *heap_data_alloc(struct net_buf *buf, size_t *size,
251 k_timeout_t timeout)
252 {
253 u8_t *ref_count;
254
255 ref_count = malloc(1 + *size);
256 if (!ref_count) {
257 return NULL;
258 }
259
260 *ref_count = 1U;
261
262 return ref_count + 1;
263 }
264
heap_data_unref(struct net_buf * buf,u8_t * data)265 static void heap_data_unref(struct net_buf *buf, u8_t *data)
266 {
267 u8_t *ref_count;
268
269 ref_count = data - 1;
270 if (--(*ref_count)) {
271 return;
272 }
273
274 k_free(ref_count);
275 }
276
277 static const struct net_buf_data_cb net_buf_heap_cb = {
278 .alloc = heap_data_alloc,
279 .ref = generic_data_ref,
280 .unref = heap_data_unref,
281 };
282
283 const struct net_buf_data_alloc net_buf_heap_alloc = {
284 .cb = &net_buf_heap_cb,
285 };
286
287 #endif /* CONFIG_HEAP_MEM_POOL_SIZE > 0 */
288
data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)289 static u8_t *data_alloc(struct net_buf *buf, size_t *size, k_timeout_t timeout)
290 {
291 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
292
293 return pool->alloc->cb->alloc(buf, size, timeout);
294 }
295
data_ref(struct net_buf * buf,u8_t * data)296 static u8_t *data_ref(struct net_buf *buf, u8_t *data)
297 {
298 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
299
300 return pool->alloc->cb->ref(buf, data);
301 }
302
data_unref(struct net_buf * buf,u8_t * data)303 static void data_unref(struct net_buf *buf, u8_t *data)
304 {
305 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
306
307 if (buf->flags & NET_BUF_EXTERNAL_DATA) {
308 return;
309 }
310
311 pool->alloc->cb->unref(buf, data);
312 }
313
314 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_len_debug(struct net_buf_pool * pool,size_t size,k_timeout_t timeout,const char * func,int line)315 struct net_buf *net_buf_alloc_len_debug(struct net_buf_pool *pool, size_t size,
316 k_timeout_t timeout, const char *func,
317 int line)
318 #else
319 struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size,
320 k_timeout_t timeout)
321 #endif
322 {
323 bt_u32_t alloc_start = k_uptime_get_32();
324 struct net_buf *buf;
325 unsigned int key;
326
327 __ASSERT_NO_MSG(pool);
328
329 NET_BUF_DBG("%s():%d: pool %p size %zu timeout %d", func, line, pool,
330 size, timeout);
331
332 /* We need to lock interrupts temporarily to prevent race conditions
333 * when accessing pool->uninit_count.
334 */
335 key = irq_lock();
336
337 /* If there are uninitialized buffers we're guaranteed to succeed
338 * with the allocation one way or another.
339 */
340 if (pool->uninit_count) {
341 u16_t uninit_count;
342
343 /* If this is not the first access to the pool, we can
344 * be opportunistic and try to fetch a previously used
345 * buffer from the LIFO with K_NO_WAIT.
346 */
347 if (pool->uninit_count < pool->buf_count) {
348 buf = k_lifo_get(&pool->free, K_NO_WAIT);
349 if (buf) {
350 irq_unlock(key);
351 goto success;
352 }
353 }
354
355 uninit_count = pool->uninit_count--;
356 irq_unlock(key);
357
358 buf = pool_get_uninit(pool, uninit_count);
359 goto success;
360 }
361
362 irq_unlock(key);
363
364 #if defined(CONFIG_NET_BUF_LOG) && SYS_LOG_LEVEL >= SYS_LOG_LEVEL_WARNING
365 if (timeout == K_FOREVER) {
366 bt_u32_t ref = k_uptime_get_32();
367 buf = k_lifo_get(&pool->free, K_NO_WAIT);
368 while (!buf) {
369 #if defined(CONFIG_NET_BUF_POOL_USAGE)
370 NET_BUF_WARN("%s():%d: Pool %s low on buffers.",
371 func, line, pool->name);
372 #else
373 NET_BUF_WARN("%s():%d: Pool %p low on buffers.",
374 func, line, pool);
375 #endif
376 buf = k_lifo_get(&pool->free, WARN_ALLOC_INTERVAL);
377 #if defined(CONFIG_NET_BUF_POOL_USAGE)
378 NET_BUF_WARN("%s():%d: Pool %s blocked for %u secs",
379 func, line, pool->name,
380 (k_uptime_get_32() - ref) / MSEC_PER_SEC);
381 #else
382 NET_BUF_WARN("%s():%d: Pool %p blocked for %u secs",
383 func, line, pool,
384 (k_uptime_get_32() - ref) / MSEC_PER_SEC);
385 #endif
386 }
387 } else {
388 buf = k_lifo_get(&pool->free, timeout);
389 }
390 #else
391 buf = k_lifo_get(&pool->free, timeout);
392 #endif
393 if (!buf) {
394 NET_BUF_ERR("%s():%d: Failed to get free buffer", func, line);
395 return NULL;
396 }
397
398 success:
399 NET_BUF_DBG("allocated buf %p", buf);
400
401 if (size) {
402 if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
403 bt_u32_t diff = k_uptime_get_32() - alloc_start;
404
405 timeout -= MIN(timeout, diff);
406 }
407
408 buf->__buf = data_alloc(buf, &size, timeout);
409 if (!buf->__buf) {
410 NET_BUF_ERR("%s():%d: Failed to allocate data",
411 func, line);
412 net_buf_destroy(buf);
413 return NULL;
414 }
415
416 NET_BUF_ASSERT(req_size <= size);
417 } else {
418 buf->__buf = NULL;
419 }
420
421 buf->ref = 1U;
422 buf->flags = 0U;
423 buf->frags = NULL;
424 buf->size = size;
425 net_buf_reset(buf);
426
427 #if defined(CONFIG_NET_BUF_POOL_USAGE)
428 pool->avail_count--;
429 __ASSERT_NO_MSG(pool->avail_count >= 0);
430 #endif
431
432 return buf;
433 }
434
435 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_fixed_debug(struct net_buf_pool * pool,k_timeout_t timeout,const char * func,int line)436 struct net_buf *net_buf_alloc_fixed_debug(struct net_buf_pool *pool,
437 k_timeout_t timeout, const char *func,
438 int line)
439 {
440 const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
441
442 return net_buf_alloc_len_debug(pool, fixed->data_size, timeout, func,
443 line);
444 }
445 #else
net_buf_alloc_fixed(struct net_buf_pool * pool,k_timeout_t timeout)446 struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool,
447 k_timeout_t timeout)
448 {
449 const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
450
451 return net_buf_alloc_len(pool, fixed->data_size, timeout);
452 }
453 #endif
454
455 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_with_data_debug(struct net_buf_pool * pool,void * data,size_t size,k_timeout_t timeout,const char * func,int line)456 struct net_buf *net_buf_alloc_with_data_debug(struct net_buf_pool *pool,
457 void *data, size_t size,
458 k_timeout_t timeout,
459 const char *func, int line)
460 #else
461 struct net_buf *net_buf_alloc_with_data(struct net_buf_pool *pool,
462 void *data, size_t size,
463 k_timeout_t timeout)
464 #endif
465 {
466 struct net_buf *buf;
467
468 #if defined(CONFIG_NET_BUF_LOG)
469 buf = net_buf_alloc_len_debug(pool, 0, timeout, func, line);
470 #else
471 buf = net_buf_alloc_len(pool, 0, timeout);
472 #endif
473 if (!buf) {
474 return NULL;
475 }
476
477 net_buf_simple_init_with_data(&buf->b, data, size);
478 buf->flags = NET_BUF_EXTERNAL_DATA;
479
480 return buf;
481 }
482
483 #if defined(CONFIG_NET_BUF_LOG)
net_buf_get_debug(struct kfifo * fifo,k_timeout_t timeout,const char * func,int line)484 struct net_buf *net_buf_get_debug(struct kfifo *fifo, k_timeout_t timeout,
485 const char *func, int line)
486 #else
487 struct net_buf *net_buf_get(struct kfifo *fifo, k_timeout_t timeout)
488 #endif
489 {
490 struct net_buf *buf, *frag;
491
492 NET_BUF_DBG("%s():%d: fifo %p", func, line, fifo);
493
494 buf = k_fifo_get(fifo, timeout);
495 if (!buf) {
496 return NULL;
497 }
498
499 NET_BUF_DBG("%s():%d: buf %p fifo %p", func, line, buf, fifo);
500
501 /* Get any fragments belonging to this buffer */
502 for (frag = buf; (frag->flags & NET_BUF_FRAGS); frag = frag->frags) {
503 frag->frags = k_fifo_get(fifo, K_NO_WAIT);
504 __ASSERT_NO_MSG(frag->frags);
505
506 /* The fragments flag is only for FIFO-internal usage */
507 frag->flags &= ~NET_BUF_FRAGS;
508 }
509
510 /* Mark the end of the fragment list */
511 frag->frags = NULL;
512
513 return buf;
514 }
515
net_buf_simple_init_with_data(struct net_buf_simple * buf,void * data,size_t size)516 void net_buf_simple_init_with_data(struct net_buf_simple *buf,
517 void *data, size_t size)
518 {
519 buf->__buf = data;
520 buf->data = data;
521 buf->size = size;
522 buf->len = size;
523 }
524
net_buf_simple_reserve(struct net_buf_simple * buf,size_t reserve)525 void net_buf_simple_reserve(struct net_buf_simple *buf, size_t reserve)
526 {
527 __ASSERT_NO_MSG(buf);
528 __ASSERT_NO_MSG(buf->len == 0U);
529 NET_BUF_DBG("buf %p reserve %zu", buf, reserve);
530
531 buf->data = buf->__buf + reserve;
532 }
533
net_buf_slist_put(sys_slist_t * list,struct net_buf * buf)534 void net_buf_slist_put(sys_slist_t *list, struct net_buf *buf)
535 {
536 struct net_buf *tail;
537 unsigned int key;
538
539 __ASSERT_NO_MSG(list);
540 __ASSERT_NO_MSG(buf);
541
542 for (tail = buf; tail->frags; tail = tail->frags) {
543 tail->flags |= NET_BUF_FRAGS;
544 }
545
546 key = irq_lock();
547 sys_slist_append_list(list, &buf->node, &tail->node);
548 irq_unlock(key);
549 }
550
net_buf_slist_get(sys_slist_t * list)551 struct net_buf *net_buf_slist_get(sys_slist_t *list)
552 {
553 struct net_buf *buf, *frag;
554 unsigned int key;
555
556 __ASSERT_NO_MSG(list);
557
558 key = irq_lock();
559 buf = (void *)sys_slist_get(list);
560 irq_unlock(key);
561
562 if (!buf) {
563 return NULL;
564 }
565
566 /* Get any fragments belonging to this buffer */
567 for (frag = buf; (frag->flags & NET_BUF_FRAGS); frag = frag->frags) {
568 key = irq_lock();
569 frag->frags = (void *)sys_slist_get(list);
570 irq_unlock(key);
571
572 __ASSERT_NO_MSG(frag->frags);
573
574 /* The fragments flag is only for list-internal usage */
575 frag->flags &= ~NET_BUF_FRAGS;
576 }
577
578 /* Mark the end of the fragment list */
579 frag->frags = NULL;
580
581 return buf;
582 }
583
net_buf_put(struct kfifo * fifo,struct net_buf * buf)584 void net_buf_put(struct kfifo *fifo, struct net_buf *buf)
585 {
586 struct net_buf *tail;
587 if(NULL == fifo){
588 BT_WARN("fifo is NULL");
589 return;
590 }
591 if(NULL == buf){
592 BT_WARN("buf is NULL");
593 return;
594 }
595
596 if(NULL == fifo || NULL == buf){
597 BT_WARN("fifo is NULL");
598 return;
599 }
600
601 __ASSERT_NO_MSG(fifo);
602 __ASSERT_NO_MSG(buf);
603
604 for (tail = buf; tail->frags; tail = tail->frags) {
605 tail->flags |= NET_BUF_FRAGS;
606 }
607
608 k_fifo_put(fifo, buf);
609 }
610
611 #if defined(CONFIG_NET_BUF_LOG)
net_buf_unref_debug(struct net_buf * buf,const char * func,int line)612 void net_buf_unref_debug(struct net_buf *buf, const char *func, int line)
613 #else
614 void net_buf_unref(struct net_buf *buf)
615 #endif
616 {
617 __ASSERT_NO_MSG(buf);
618
619 while (buf) {
620 struct net_buf *frags = buf->frags;
621 struct net_buf_pool *pool;
622 u8_t flags = buf->flags;
623 #if defined(CONFIG_NET_BUF_LOG)
624 if (!buf->ref) {
625 NET_BUF_ERR("%s():%d: buf %p double free", func, line,
626 buf);
627 return;
628 }
629 #endif
630 NET_BUF_DBG("buf %p ref %u pool_id %u frags %p", buf, buf->ref,
631 buf->pool_id, buf->frags);
632
633 if (--buf->ref > 0) {
634 return;
635 }
636
637 if (buf->__buf) {
638 data_unref(buf, buf->__buf);
639 buf->__buf = NULL;
640 }
641
642 buf->data = NULL;
643 buf->frags = NULL;
644
645 pool = net_buf_pool_get(buf->pool_id);
646
647 #if defined(CONFIG_NET_BUF_POOL_USAGE)
648 pool->avail_count++;
649 __ASSERT_NO_MSG(pool->avail_count <= pool->buf_count);
650 #endif
651
652 if (pool->destroy) {
653 pool->destroy(buf);
654 } else {
655 net_buf_destroy(buf);
656 }
657
658 if (!flags)
659 {
660 return;
661 }
662
663 buf = frags;
664 }
665 }
666
net_buf_ref(struct net_buf * buf)667 struct net_buf *net_buf_ref(struct net_buf *buf)
668 {
669 __ASSERT_NO_MSG(buf);
670
671 NET_BUF_DBG("buf %p (old) ref %u pool_id %u",
672 buf, buf->ref, buf->pool_id);
673 buf->ref++;
674 return buf;
675 }
676
net_buf_clone(struct net_buf * buf,k_timeout_t timeout)677 struct net_buf *net_buf_clone(struct net_buf *buf, k_timeout_t timeout)
678 {
679 bt_u32_t alloc_start = k_uptime_get_32();
680 struct net_buf_pool *pool;
681 struct net_buf *clone;
682
683 __ASSERT_NO_MSG(buf);
684
685 pool = net_buf_pool_get(buf->pool_id);
686
687 clone = net_buf_alloc_len(pool, 0, timeout);
688 if (!clone) {
689 return NULL;
690 }
691
692 /* If the pool supports data referencing use that. Otherwise
693 * we need to allocate new data and make a copy.
694 */
695 if (pool->alloc->cb->ref && !(buf->flags & NET_BUF_EXTERNAL_DATA)) {
696 clone->__buf = data_ref(buf, buf->__buf);
697 clone->data = buf->data;
698 clone->len = buf->len;
699 clone->size = buf->size;
700 } else {
701 size_t size = buf->size;
702
703 if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
704 bt_u32_t diff = k_uptime_get_32() - alloc_start;
705
706 timeout -= MIN(timeout, diff);
707 }
708
709 clone->__buf = data_alloc(clone, &size, timeout);
710 if (!clone->__buf || size < buf->size) {
711 net_buf_destroy(clone);
712 return NULL;
713 }
714
715 clone->size = size;
716 clone->data = clone->__buf + net_buf_headroom(buf);
717 net_buf_add_mem(clone, buf->data, buf->len);
718 }
719
720 return clone;
721 }
722
net_buf_frag_last(struct net_buf * buf)723 struct net_buf *net_buf_frag_last(struct net_buf *buf)
724 {
725 __ASSERT_NO_MSG(buf);
726
727 while (buf->frags) {
728 buf = buf->frags;
729 }
730
731 return buf;
732 }
733
net_buf_frag_insert(struct net_buf * parent,struct net_buf * frag)734 void net_buf_frag_insert(struct net_buf *parent, struct net_buf *frag)
735 {
736 __ASSERT_NO_MSG(parent);
737 __ASSERT_NO_MSG(frag);
738
739 if (parent->frags) {
740 net_buf_frag_last(frag)->frags = parent->frags;
741 }
742 /* Take ownership of the fragment reference */
743 parent->frags = frag;
744 }
745
net_buf_frag_add(struct net_buf * head,struct net_buf * frag)746 struct net_buf *net_buf_frag_add(struct net_buf *head, struct net_buf *frag)
747 {
748 __ASSERT_NO_MSG(frag);
749
750 if (!head) {
751 return net_buf_ref(frag);
752 }
753
754 net_buf_frag_insert(net_buf_frag_last(head), frag);
755
756 return head;
757 }
758
759 #if defined(CONFIG_NET_BUF_LOG)
net_buf_frag_del_debug(struct net_buf * parent,struct net_buf * frag,const char * func,int line)760 struct net_buf *net_buf_frag_del_debug(struct net_buf *parent,
761 struct net_buf *frag,
762 const char *func, int line)
763 #else
764 struct net_buf *net_buf_frag_del(struct net_buf *parent, struct net_buf *frag)
765 #endif
766 {
767 struct net_buf *next_frag;
768
769 __ASSERT_NO_MSG(frag);
770
771 if (parent) {
772 __ASSERT_NO_MSG(parent->frags);
773 __ASSERT_NO_MSG(parent->frags == frag);
774 parent->frags = frag->frags;
775 }
776
777 next_frag = frag->frags;
778
779 frag->frags = NULL;
780
781 #if defined(CONFIG_NET_BUF_LOG)
782 net_buf_unref_debug(frag, func, line);
783 #else
784 net_buf_unref(frag);
785 #endif
786
787 return next_frag;
788 }
789
net_buf_linearize(void * dst,size_t dst_len,struct net_buf * src,size_t offset,size_t len)790 size_t net_buf_linearize(void *dst, size_t dst_len, struct net_buf *src,
791 size_t offset, size_t len)
792 {
793 struct net_buf *frag;
794 size_t to_copy;
795 size_t copied;
796
797 len = MIN(len, dst_len);
798
799 frag = src;
800
801 /* find the right fragment to start copying from */
802 while (frag && offset >= frag->len) {
803 offset -= frag->len;
804 frag = frag->frags;
805 }
806
807 /* traverse the fragment chain until len bytes are copied */
808 copied = 0;
809 while (frag && len > 0) {
810 to_copy = MIN(len, frag->len - offset);
811 memcpy((u8_t *)dst + copied, frag->data + offset, to_copy);
812
813 copied += to_copy;
814
815 /* to_copy is always <= len */
816 len -= to_copy;
817 frag = frag->frags;
818
819 /* after the first iteration, this value will be 0 */
820 offset = 0;
821 }
822
823 return copied;
824 }
825
826 /* This helper routine will append multiple bytes, if there is no place for
827 * the data in current fragment then create new fragment and add it to
828 * the buffer. It assumes that the buffer has at least one fragment.
829 */
net_buf_append_bytes(struct net_buf * buf,size_t len,const void * value,k_timeout_t timeout,net_buf_allocator_cb allocate_cb,void * user_data)830 size_t net_buf_append_bytes(struct net_buf *buf, size_t len,
831 const void *value, k_timeout_t timeout,
832 net_buf_allocator_cb allocate_cb, void *user_data)
833 {
834 struct net_buf *frag = net_buf_frag_last(buf);
835 size_t added_len = 0;
836 const u8_t *value8 = value;
837
838 do {
839 u16_t count = MIN(len, net_buf_tailroom(frag));
840
841 net_buf_add_mem(frag, value8, count);
842 len -= count;
843 added_len += count;
844 value8 += count;
845
846 if (len == 0) {
847 return added_len;
848 }
849
850 frag = allocate_cb(timeout, user_data);
851 if (!frag) {
852 return added_len;
853 }
854
855 net_buf_frag_add(buf, frag);
856 } while (1);
857
858 /* Unreachable */
859 return 0;
860 }
861
862 #if defined(CONFIG_NET_BUF_SIMPLE_LOG)
863 #define NET_BUF_SIMPLE_DBG(fmt, ...) NET_BUF_DBG(fmt, ##__VA_ARGS__)
864 #define NET_BUF_SIMPLE_ERR(fmt, ...) NET_BUF_ERR(fmt, ##__VA_ARGS__)
865 #define NET_BUF_SIMPLE_WARN(fmt, ...) NET_BUF_WARN(fmt, ##__VA_ARGS__)
866 #define NET_BUF_SIMPLE_INFO(fmt, ...) NET_BUF_INFO(fmt, ##__VA_ARGS__)
867 #else
868 #define NET_BUF_SIMPLE_DBG(fmt, ...)
869 #define NET_BUF_SIMPLE_ERR(fmt, ...)
870 #define NET_BUF_SIMPLE_WARN(fmt, ...)
871 #define NET_BUF_SIMPLE_INFO(fmt, ...)
872 #endif /* CONFIG_NET_BUF_SIMPLE_LOG */
873
net_buf_simple_clone(const struct net_buf_simple * original,struct net_buf_simple * clone)874 void net_buf_simple_clone(const struct net_buf_simple *original,
875 struct net_buf_simple *clone)
876 {
877 memcpy(clone, original, sizeof(struct net_buf_simple));
878 }
879
net_buf_simple_add(struct net_buf_simple * buf,size_t len)880 void *net_buf_simple_add(struct net_buf_simple *buf, size_t len)
881 {
882 u8_t *tail = net_buf_simple_tail(buf);
883
884 NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
885
886 __ASSERT_NO_MSG(net_buf_simple_tailroom(buf) >= len);
887
888 buf->len += len;
889 return tail;
890 }
891
net_buf_simple_add_mem(struct net_buf_simple * buf,const void * mem,size_t len)892 void *net_buf_simple_add_mem(struct net_buf_simple *buf, const void *mem,
893 size_t len)
894 {
895 NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
896
897 return memcpy(net_buf_simple_add(buf, len), mem, len);
898 }
899
net_buf_simple_add_u8(struct net_buf_simple * buf,u8_t val)900 u8_t *net_buf_simple_add_u8(struct net_buf_simple *buf, u8_t val)
901 {
902 u8_t *u8;
903
904 NET_BUF_SIMPLE_DBG("buf %p val 0x%02x", buf, val);
905
906 u8 = net_buf_simple_add(buf, 1);
907 *u8 = val;
908
909 return u8;
910 }
911
net_buf_simple_add_le16(struct net_buf_simple * buf,u16_t val)912 void net_buf_simple_add_le16(struct net_buf_simple *buf, u16_t val)
913 {
914 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
915
916 sys_put_le16(val, net_buf_simple_add(buf, sizeof(val)));
917 }
918
net_buf_simple_add_be16(struct net_buf_simple * buf,u16_t val)919 void net_buf_simple_add_be16(struct net_buf_simple *buf, u16_t val)
920 {
921 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
922
923 sys_put_be16(val, net_buf_simple_add(buf, sizeof(val)));
924 }
925
net_buf_simple_add_le24(struct net_buf_simple * buf,bt_u32_t val)926 void net_buf_simple_add_le24(struct net_buf_simple *buf, bt_u32_t val)
927 {
928 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
929
930 sys_put_le24(val, net_buf_simple_add(buf, 3));
931 }
932
net_buf_simple_add_be24(struct net_buf_simple * buf,bt_u32_t val)933 void net_buf_simple_add_be24(struct net_buf_simple *buf, bt_u32_t val)
934 {
935 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
936
937 sys_put_be24(val, net_buf_simple_add(buf, 3));
938 }
939
net_buf_simple_add_le32(struct net_buf_simple * buf,bt_u32_t val)940 void net_buf_simple_add_le32(struct net_buf_simple *buf, bt_u32_t val)
941 {
942 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
943
944 sys_put_le32(val, net_buf_simple_add(buf, sizeof(val)));
945 }
946
net_buf_simple_add_be32(struct net_buf_simple * buf,bt_u32_t val)947 void net_buf_simple_add_be32(struct net_buf_simple *buf, bt_u32_t val)
948 {
949 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
950
951 sys_put_be32(val, net_buf_simple_add(buf, sizeof(val)));
952 }
953
net_buf_simple_add_le48(struct net_buf_simple * buf,u64_t val)954 void net_buf_simple_add_le48(struct net_buf_simple *buf, u64_t val)
955 {
956 NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
957
958 sys_put_le48(val, net_buf_simple_add(buf, 6));
959 }
960
net_buf_simple_add_be48(struct net_buf_simple * buf,u64_t val)961 void net_buf_simple_add_be48(struct net_buf_simple *buf, u64_t val)
962 {
963 NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
964
965 sys_put_be48(val, net_buf_simple_add(buf, 6));
966 }
967
net_buf_simple_add_le64(struct net_buf_simple * buf,u64_t val)968 void net_buf_simple_add_le64(struct net_buf_simple *buf, u64_t val)
969 {
970 NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
971
972 sys_put_le64(val, net_buf_simple_add(buf, sizeof(val)));
973 }
974
net_buf_simple_add_be64(struct net_buf_simple * buf,u64_t val)975 void net_buf_simple_add_be64(struct net_buf_simple *buf, u64_t val)
976 {
977 NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
978
979 sys_put_be64(val, net_buf_simple_add(buf, sizeof(val)));
980 }
981
net_buf_simple_push(struct net_buf_simple * buf,size_t len)982 void *net_buf_simple_push(struct net_buf_simple *buf, size_t len)
983 {
984 NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
985
986 __ASSERT_NO_MSG(net_buf_simple_headroom(buf) >= len);
987
988 buf->data -= len;
989 buf->len += len;
990 return buf->data;
991 }
992
net_buf_simple_push_le16(struct net_buf_simple * buf,u16_t val)993 void net_buf_simple_push_le16(struct net_buf_simple *buf, u16_t val)
994 {
995 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
996
997 sys_put_le16(val, net_buf_simple_push(buf, sizeof(val)));
998 }
999
net_buf_simple_push_be16(struct net_buf_simple * buf,u16_t val)1000 void net_buf_simple_push_be16(struct net_buf_simple *buf, u16_t val)
1001 {
1002 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
1003
1004 sys_put_be16(val, net_buf_simple_push(buf, sizeof(val)));
1005 }
1006
net_buf_simple_push_u8(struct net_buf_simple * buf,u8_t val)1007 void net_buf_simple_push_u8(struct net_buf_simple *buf, u8_t val)
1008 {
1009 u8_t *data = net_buf_simple_push(buf, 1);
1010
1011 *data = val;
1012 }
1013
net_buf_simple_push_le24(struct net_buf_simple * buf,bt_u32_t val)1014 void net_buf_simple_push_le24(struct net_buf_simple *buf, bt_u32_t val)
1015 {
1016 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
1017
1018 sys_put_le24(val, net_buf_simple_push(buf, 3));
1019 }
1020
net_buf_simple_push_be24(struct net_buf_simple * buf,bt_u32_t val)1021 void net_buf_simple_push_be24(struct net_buf_simple *buf, bt_u32_t val)
1022 {
1023 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
1024
1025 sys_put_be24(val, net_buf_simple_push(buf, 3));
1026 }
1027
net_buf_simple_push_le32(struct net_buf_simple * buf,bt_u32_t val)1028 void net_buf_simple_push_le32(struct net_buf_simple *buf, bt_u32_t val)
1029 {
1030 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
1031
1032 sys_put_le32(val, net_buf_simple_push(buf, sizeof(val)));
1033 }
1034
net_buf_simple_push_be32(struct net_buf_simple * buf,bt_u32_t val)1035 void net_buf_simple_push_be32(struct net_buf_simple *buf, bt_u32_t val)
1036 {
1037 NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
1038
1039 sys_put_be32(val, net_buf_simple_push(buf, sizeof(val)));
1040 }
1041
net_buf_simple_push_le48(struct net_buf_simple * buf,u64_t val)1042 void net_buf_simple_push_le48(struct net_buf_simple *buf, u64_t val)
1043 {
1044 NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
1045
1046 sys_put_le48(val, net_buf_simple_push(buf, 6));
1047 }
1048
net_buf_simple_push_be48(struct net_buf_simple * buf,u64_t val)1049 void net_buf_simple_push_be48(struct net_buf_simple *buf, u64_t val)
1050 {
1051 NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
1052
1053 sys_put_be48(val, net_buf_simple_push(buf, 6));
1054 }
1055
net_buf_simple_push_le64(struct net_buf_simple * buf,u64_t val)1056 void net_buf_simple_push_le64(struct net_buf_simple *buf, u64_t val)
1057 {
1058 NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
1059
1060 sys_put_le64(val, net_buf_simple_push(buf, sizeof(val)));
1061 }
1062
net_buf_simple_push_be64(struct net_buf_simple * buf,u64_t val)1063 void net_buf_simple_push_be64(struct net_buf_simple *buf, u64_t val)
1064 {
1065 NET_BUF_SIMPLE_DBG("buf %p val %" PRIu64, buf, val);
1066
1067 sys_put_be64(val, net_buf_simple_push(buf, sizeof(val)));
1068 }
1069
net_buf_simple_pull(struct net_buf_simple * buf,size_t len)1070 void *net_buf_simple_pull(struct net_buf_simple *buf, size_t len)
1071 {
1072 NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
1073
1074 __ASSERT_NO_MSG(buf->len >= len);
1075
1076 buf->len -= len;
1077 return buf->data += len;
1078 }
1079
net_buf_simple_pull_mem(struct net_buf_simple * buf,size_t len)1080 void *net_buf_simple_pull_mem(struct net_buf_simple *buf, size_t len)
1081 {
1082 void *data = buf->data;
1083
1084 NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
1085
1086 __ASSERT_NO_MSG(buf->len >= len);
1087
1088 buf->len -= len;
1089 buf->data += len;
1090
1091 return data;
1092 }
1093
net_buf_simple_pull_u8(struct net_buf_simple * buf)1094 u8_t net_buf_simple_pull_u8(struct net_buf_simple *buf)
1095 {
1096 u8_t val;
1097
1098 val = buf->data[0];
1099 net_buf_simple_pull(buf, 1);
1100
1101 return val;
1102 }
1103
net_buf_simple_pull_le16(struct net_buf_simple * buf)1104 u16_t net_buf_simple_pull_le16(struct net_buf_simple *buf)
1105 {
1106 u16_t val;
1107
1108 val = UNALIGNED_GET((u16_t *)buf->data);
1109 net_buf_simple_pull(buf, sizeof(val));
1110
1111 return sys_le16_to_cpu(val);
1112 }
1113
net_buf_simple_pull_be16(struct net_buf_simple * buf)1114 u16_t net_buf_simple_pull_be16(struct net_buf_simple *buf)
1115 {
1116 u16_t val;
1117
1118 val = UNALIGNED_GET((u16_t *)buf->data);
1119 net_buf_simple_pull(buf, sizeof(val));
1120
1121 return sys_be16_to_cpu(val);
1122 }
1123
net_buf_simple_pull_le24(struct net_buf_simple * buf)1124 bt_u32_t net_buf_simple_pull_le24(struct net_buf_simple *buf)
1125 {
1126 struct uint24 {
1127 bt_u32_t u24:24;
1128 } __packed val;
1129
1130 val = UNALIGNED_GET((struct uint24 *)buf->data);
1131 net_buf_simple_pull(buf, sizeof(val));
1132
1133 return sys_le24_to_cpu(val.u24);
1134 }
1135
net_buf_simple_pull_be24(struct net_buf_simple * buf)1136 bt_u32_t net_buf_simple_pull_be24(struct net_buf_simple *buf)
1137 {
1138 struct uint24 {
1139 bt_u32_t u24:24;
1140 } __packed val;
1141
1142 val = UNALIGNED_GET((struct uint24 *)buf->data);
1143 net_buf_simple_pull(buf, sizeof(val));
1144
1145 return sys_be24_to_cpu(val.u24);
1146 }
1147
net_buf_simple_pull_le32(struct net_buf_simple * buf)1148 bt_u32_t net_buf_simple_pull_le32(struct net_buf_simple *buf)
1149 {
1150 bt_u32_t val;
1151
1152 val = UNALIGNED_GET((bt_u32_t *)buf->data);
1153 net_buf_simple_pull(buf, sizeof(val));
1154
1155 return sys_le32_to_cpu(val);
1156 }
1157
net_buf_simple_pull_be32(struct net_buf_simple * buf)1158 bt_u32_t net_buf_simple_pull_be32(struct net_buf_simple *buf)
1159 {
1160 bt_u32_t val;
1161
1162 val = UNALIGNED_GET((bt_u32_t *)buf->data);
1163 net_buf_simple_pull(buf, sizeof(val));
1164
1165 return sys_be32_to_cpu(val);
1166 }
1167
net_buf_simple_pull_le48(struct net_buf_simple * buf)1168 u64_t net_buf_simple_pull_le48(struct net_buf_simple *buf)
1169 {
1170 struct uint48 {
1171 u64_t u48:48;
1172 } __packed val;
1173
1174 val = UNALIGNED_GET((struct uint48 *)buf->data);
1175 net_buf_simple_pull(buf, sizeof(val));
1176
1177 return sys_le48_to_cpu(val.u48);
1178 }
1179
net_buf_simple_pull_be48(struct net_buf_simple * buf)1180 u64_t net_buf_simple_pull_be48(struct net_buf_simple *buf)
1181 {
1182 struct uint48 {
1183 u64_t u48:48;
1184 } __packed val;
1185
1186 val = UNALIGNED_GET((struct uint48 *)buf->data);
1187 net_buf_simple_pull(buf, sizeof(val));
1188
1189 return sys_be48_to_cpu(val.u48);
1190 }
1191
net_buf_simple_pull_le64(struct net_buf_simple * buf)1192 u64_t net_buf_simple_pull_le64(struct net_buf_simple *buf)
1193 {
1194 u64_t val;
1195
1196 val = UNALIGNED_GET((u64_t *)buf->data);
1197 net_buf_simple_pull(buf, sizeof(val));
1198
1199 return sys_le64_to_cpu(val);
1200 }
1201
net_buf_simple_pull_be64(struct net_buf_simple * buf)1202 u64_t net_buf_simple_pull_be64(struct net_buf_simple *buf)
1203 {
1204 u64_t val;
1205
1206 val = UNALIGNED_GET((u64_t *)buf->data);
1207 net_buf_simple_pull(buf, sizeof(val));
1208
1209 return sys_be64_to_cpu(val);
1210 }
1211
net_buf_simple_headroom(struct net_buf_simple * buf)1212 size_t net_buf_simple_headroom(struct net_buf_simple *buf)
1213 {
1214 return buf->data - buf->__buf;
1215 }
1216
net_buf_simple_tailroom(struct net_buf_simple * buf)1217 size_t net_buf_simple_tailroom(struct net_buf_simple *buf)
1218 {
1219 return buf->size - net_buf_simple_headroom(buf) - buf->len;
1220 }
1221