1 /* buf.c - Buffer management */
2
3 /*
4 * Copyright (c) 2015-2019 Intel Corporation
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #define LOG_MODULE_NAME net_buf
10 #define LOG_LEVEL CONFIG_NET_BUF_LOG_LEVEL
11
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
14
15 #include <stdio.h>
16 #include <errno.h>
17 #include <stddef.h>
18 #include <string.h>
19 #include <zephyr/sys/byteorder.h>
20
21 #include <zephyr/net_buf.h>
22
23 #if defined(CONFIG_NET_BUF_LOG)
24 #define NET_BUF_DBG(fmt, ...) LOG_DBG("(%p) " fmt, k_current_get(), \
25 ##__VA_ARGS__)
26 #define NET_BUF_ERR(fmt, ...) LOG_ERR(fmt, ##__VA_ARGS__)
27 #define NET_BUF_WARN(fmt, ...) LOG_WRN(fmt, ##__VA_ARGS__)
28 #define NET_BUF_INFO(fmt, ...) LOG_INF(fmt, ##__VA_ARGS__)
29 #else
30
31 #define NET_BUF_DBG(fmt, ...)
32 #define NET_BUF_ERR(fmt, ...)
33 #define NET_BUF_WARN(fmt, ...)
34 #define NET_BUF_INFO(fmt, ...)
35 #endif /* CONFIG_NET_BUF_LOG */
36
37 #if CONFIG_NET_BUF_WARN_ALLOC_INTERVAL > 0
38 #define WARN_ALLOC_INTERVAL K_SECONDS(CONFIG_NET_BUF_WARN_ALLOC_INTERVAL)
39 #else
40 #define WARN_ALLOC_INTERVAL K_FOREVER
41 #endif
42
43 #define GET_ALIGN(pool) MAX(sizeof(void *), pool->alloc->alignment)
44
45 /* Linker-defined symbol bound to the static pool structs */
46 STRUCT_SECTION_START_EXTERN(net_buf_pool);
47
net_buf_pool_get(int id)48 struct net_buf_pool *net_buf_pool_get(int id)
49 {
50 struct net_buf_pool *pool;
51
52 STRUCT_SECTION_GET(net_buf_pool, id, &pool);
53
54 return pool;
55 }
56
pool_id(struct net_buf_pool * pool)57 static int pool_id(struct net_buf_pool *pool)
58 {
59 return pool - TYPE_SECTION_START(net_buf_pool);
60 }
61
net_buf_id(const struct net_buf * buf)62 int net_buf_id(const struct net_buf *buf)
63 {
64 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
65 size_t struct_size = ROUND_UP(sizeof(struct net_buf) + pool->user_data_size,
66 __alignof__(struct net_buf));
67 ptrdiff_t offset = (uint8_t *)buf - (uint8_t *)pool->__bufs;
68
69 return offset / struct_size;
70 }
71
pool_get_uninit(struct net_buf_pool * pool,uint16_t uninit_count)72 static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool,
73 uint16_t uninit_count)
74 {
75 size_t struct_size = ROUND_UP(sizeof(struct net_buf) + pool->user_data_size,
76 __alignof__(struct net_buf));
77 size_t byte_offset = (pool->buf_count - uninit_count) * struct_size;
78 struct net_buf *buf;
79
80 buf = (struct net_buf *)(((uint8_t *)pool->__bufs) + byte_offset);
81
82 buf->pool_id = pool_id(pool);
83 buf->user_data_size = pool->user_data_size;
84
85 return buf;
86 }
87
net_buf_reset(struct net_buf * buf)88 void net_buf_reset(struct net_buf *buf)
89 {
90 __ASSERT_NO_MSG(buf->flags == 0U);
91 __ASSERT_NO_MSG(buf->frags == NULL);
92
93 net_buf_simple_reset(&buf->b);
94 }
95
generic_data_ref(struct net_buf * buf,uint8_t * data)96 static uint8_t *generic_data_ref(struct net_buf *buf, uint8_t *data)
97 {
98 struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
99 uint8_t *ref_count;
100
101 ref_count = data - GET_ALIGN(buf_pool);
102 (*ref_count)++;
103
104 return data;
105 }
106
mem_pool_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)107 static uint8_t *mem_pool_data_alloc(struct net_buf *buf, size_t *size,
108 k_timeout_t timeout)
109 {
110 struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
111 struct k_heap *pool = buf_pool->alloc->alloc_data;
112 uint8_t *ref_count;
113 void *b;
114
115 if (buf_pool->alloc->alignment == 0) {
116 /* Reserve extra space for a ref-count (uint8_t) */
117 b = k_heap_alloc(pool, sizeof(void *) + *size, timeout);
118
119 } else {
120 if (*size < buf_pool->alloc->alignment) {
121 NET_BUF_DBG("Requested size %zu is smaller than alignment %zu",
122 *size, buf_pool->alloc->alignment);
123 return NULL;
124 }
125
126 /* Reserve extra space for a ref-count (uint8_t) */
127 b = k_heap_aligned_alloc(pool,
128 buf_pool->alloc->alignment,
129 GET_ALIGN(buf_pool) +
130 ROUND_UP(*size, buf_pool->alloc->alignment),
131 timeout);
132 }
133
134 if (b == NULL) {
135 return NULL;
136 }
137
138 ref_count = (uint8_t *)b;
139 *ref_count = 1U;
140
141 /* Return pointer to the byte following the ref count */
142 return ref_count + GET_ALIGN(buf_pool);
143 }
144
mem_pool_data_unref(struct net_buf * buf,uint8_t * data)145 static void mem_pool_data_unref(struct net_buf *buf, uint8_t *data)
146 {
147 struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
148 struct k_heap *pool = buf_pool->alloc->alloc_data;
149 uint8_t *ref_count;
150
151 ref_count = data - GET_ALIGN(buf_pool);
152 if (--(*ref_count)) {
153 return;
154 }
155
156 /* Need to copy to local variable due to alignment */
157 k_heap_free(pool, ref_count);
158 }
159
160 const struct net_buf_data_cb net_buf_var_cb = {
161 .alloc = mem_pool_data_alloc,
162 .ref = generic_data_ref,
163 .unref = mem_pool_data_unref,
164 };
165
fixed_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)166 static uint8_t *fixed_data_alloc(struct net_buf *buf, size_t *size,
167 k_timeout_t timeout)
168 {
169 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
170 const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
171
172 *size = pool->alloc->max_alloc_size;
173
174 return fixed->data_pool + *size * net_buf_id(buf);
175 }
176
fixed_data_unref(struct net_buf * buf,uint8_t * data)177 static void fixed_data_unref(struct net_buf *buf, uint8_t *data)
178 {
179 /* Nothing needed for fixed-size data pools */
180 }
181
182 const struct net_buf_data_cb net_buf_fixed_cb = {
183 .alloc = fixed_data_alloc,
184 .unref = fixed_data_unref,
185 };
186
187 #if (K_HEAP_MEM_POOL_SIZE > 0)
188
heap_data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)189 static uint8_t *heap_data_alloc(struct net_buf *buf, size_t *size,
190 k_timeout_t timeout)
191 {
192 struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
193 uint8_t *ref_count;
194
195 ref_count = k_malloc(GET_ALIGN(buf_pool) + *size);
196 if (!ref_count) {
197 return NULL;
198 }
199
200 *ref_count = 1U;
201
202 return ref_count + GET_ALIGN(buf_pool);
203 }
204
heap_data_unref(struct net_buf * buf,uint8_t * data)205 static void heap_data_unref(struct net_buf *buf, uint8_t *data)
206 {
207 struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
208 uint8_t *ref_count;
209
210 ref_count = data - GET_ALIGN(buf_pool);
211 if (--(*ref_count)) {
212 return;
213 }
214
215 k_free(ref_count);
216 }
217
218 static const struct net_buf_data_cb net_buf_heap_cb = {
219 .alloc = heap_data_alloc,
220 .ref = generic_data_ref,
221 .unref = heap_data_unref,
222 };
223
224 const struct net_buf_data_alloc net_buf_heap_alloc = {
225 .cb = &net_buf_heap_cb,
226 .max_alloc_size = 0,
227 };
228
229 #endif /* K_HEAP_MEM_POOL_SIZE > 0 */
230
data_alloc(struct net_buf * buf,size_t * size,k_timeout_t timeout)231 static uint8_t *data_alloc(struct net_buf *buf, size_t *size, k_timeout_t timeout)
232 {
233 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
234
235 return pool->alloc->cb->alloc(buf, size, timeout);
236 }
237
data_ref(struct net_buf * buf,uint8_t * data)238 static uint8_t *data_ref(struct net_buf *buf, uint8_t *data)
239 {
240 struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
241
242 return pool->alloc->cb->ref(buf, data);
243 }
244
245 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_len_debug(struct net_buf_pool * pool,size_t size,k_timeout_t timeout,const char * func,int line)246 struct net_buf *net_buf_alloc_len_debug(struct net_buf_pool *pool, size_t size,
247 k_timeout_t timeout, const char *func,
248 int line)
249 #else
250 struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size,
251 k_timeout_t timeout)
252 #endif
253 {
254 k_timepoint_t end = sys_timepoint_calc(timeout);
255 struct net_buf *buf;
256 k_spinlock_key_t key;
257
258 __ASSERT_NO_MSG(pool);
259
260 NET_BUF_DBG("%s():%d: pool %p size %zu", func, line, pool, size);
261
262 /* We need to prevent race conditions
263 * when accessing pool->uninit_count.
264 */
265 key = k_spin_lock(&pool->lock);
266
267 /* If there are uninitialized buffers we're guaranteed to succeed
268 * with the allocation one way or another.
269 */
270 if (pool->uninit_count) {
271 uint16_t uninit_count;
272
273 /* If this is not the first access to the pool, we can
274 * be opportunistic and try to fetch a previously used
275 * buffer from the LIFO with K_NO_WAIT.
276 */
277 if (pool->uninit_count < pool->buf_count) {
278 buf = k_lifo_get(&pool->free, K_NO_WAIT);
279 if (buf) {
280 k_spin_unlock(&pool->lock, key);
281 goto success;
282 }
283 }
284
285 uninit_count = pool->uninit_count--;
286 k_spin_unlock(&pool->lock, key);
287
288 buf = pool_get_uninit(pool, uninit_count);
289 goto success;
290 }
291
292 k_spin_unlock(&pool->lock, key);
293
294 #if defined(CONFIG_NET_BUF_LOG) && (CONFIG_NET_BUF_LOG_LEVEL >= LOG_LEVEL_WRN)
295 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
296 uint32_t ref = k_uptime_get_32();
297 buf = k_lifo_get(&pool->free, K_NO_WAIT);
298 while (!buf) {
299 #if defined(CONFIG_NET_BUF_POOL_USAGE)
300 NET_BUF_WARN("%s():%d: Pool %s low on buffers.",
301 func, line, pool->name);
302 #else
303 NET_BUF_WARN("%s():%d: Pool %p low on buffers.",
304 func, line, pool);
305 #endif
306 buf = k_lifo_get(&pool->free, WARN_ALLOC_INTERVAL);
307 #if defined(CONFIG_NET_BUF_POOL_USAGE)
308 NET_BUF_WARN("%s():%d: Pool %s blocked for %u secs",
309 func, line, pool->name,
310 (k_uptime_get_32() - ref) / MSEC_PER_SEC);
311 #else
312 NET_BUF_WARN("%s():%d: Pool %p blocked for %u secs",
313 func, line, pool,
314 (k_uptime_get_32() - ref) / MSEC_PER_SEC);
315 #endif
316 }
317 } else {
318 buf = k_lifo_get(&pool->free, timeout);
319 }
320 #else
321 buf = k_lifo_get(&pool->free, timeout);
322 #endif
323 if (!buf) {
324 NET_BUF_ERR("%s():%d: Failed to get free buffer", func, line);
325 return NULL;
326 }
327
328 success:
329 NET_BUF_DBG("allocated buf %p", buf);
330
331 if (size) {
332 __maybe_unused size_t req_size = size;
333
334 timeout = sys_timepoint_timeout(end);
335 buf->__buf = data_alloc(buf, &size, timeout);
336 if (!buf->__buf) {
337 NET_BUF_ERR("%s():%d: Failed to allocate data",
338 func, line);
339 net_buf_destroy(buf);
340 return NULL;
341 }
342
343 __ASSERT_NO_MSG(req_size <= size);
344 } else {
345 buf->__buf = NULL;
346 }
347
348 buf->ref = 1U;
349 buf->flags = 0U;
350 buf->frags = NULL;
351 buf->size = size;
352 memset(buf->user_data, 0, buf->user_data_size);
353 net_buf_reset(buf);
354
355 #if defined(CONFIG_NET_BUF_POOL_USAGE)
356 atomic_dec(&pool->avail_count);
357 __ASSERT_NO_MSG(atomic_get(&pool->avail_count) >= 0);
358 pool->max_used = MAX(pool->max_used,
359 pool->buf_count - atomic_get(&pool->avail_count));
360 #endif
361 return buf;
362 }
363
364 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_fixed_debug(struct net_buf_pool * pool,k_timeout_t timeout,const char * func,int line)365 struct net_buf *net_buf_alloc_fixed_debug(struct net_buf_pool *pool,
366 k_timeout_t timeout, const char *func,
367 int line)
368 {
369 return net_buf_alloc_len_debug(pool, pool->alloc->max_alloc_size, timeout, func,
370 line);
371 }
372 #else
net_buf_alloc_fixed(struct net_buf_pool * pool,k_timeout_t timeout)373 struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool,
374 k_timeout_t timeout)
375 {
376 return net_buf_alloc_len(pool, pool->alloc->max_alloc_size, timeout);
377 }
378 #endif
379
380 #if defined(CONFIG_NET_BUF_LOG)
net_buf_alloc_with_data_debug(struct net_buf_pool * pool,void * data,size_t size,k_timeout_t timeout,const char * func,int line)381 struct net_buf *net_buf_alloc_with_data_debug(struct net_buf_pool *pool,
382 void *data, size_t size,
383 k_timeout_t timeout,
384 const char *func, int line)
385 #else
386 struct net_buf *net_buf_alloc_with_data(struct net_buf_pool *pool,
387 void *data, size_t size,
388 k_timeout_t timeout)
389 #endif
390 {
391 struct net_buf *buf;
392
393 #if defined(CONFIG_NET_BUF_LOG)
394 buf = net_buf_alloc_len_debug(pool, 0, timeout, func, line);
395 #else
396 buf = net_buf_alloc_len(pool, 0, timeout);
397 #endif
398 if (!buf) {
399 return NULL;
400 }
401
402 net_buf_simple_init_with_data(&buf->b, data, size);
403 buf->flags = NET_BUF_EXTERNAL_DATA;
404
405 return buf;
406 }
407
408 static struct k_spinlock net_buf_slist_lock;
409
net_buf_slist_put(sys_slist_t * list,struct net_buf * buf)410 void net_buf_slist_put(sys_slist_t *list, struct net_buf *buf)
411 {
412 k_spinlock_key_t key;
413
414 __ASSERT_NO_MSG(list);
415 __ASSERT_NO_MSG(buf);
416
417 key = k_spin_lock(&net_buf_slist_lock);
418 sys_slist_append(list, &buf->node);
419 k_spin_unlock(&net_buf_slist_lock, key);
420 }
421
net_buf_slist_get(sys_slist_t * list)422 struct net_buf *net_buf_slist_get(sys_slist_t *list)
423 {
424 struct net_buf *buf;
425 k_spinlock_key_t key;
426
427 __ASSERT_NO_MSG(list);
428
429 key = k_spin_lock(&net_buf_slist_lock);
430
431 buf = (void *)sys_slist_get(list);
432
433 k_spin_unlock(&net_buf_slist_lock, key);
434
435 return buf;
436 }
437
438 #if defined(CONFIG_NET_BUF_LOG)
net_buf_unref_debug(struct net_buf * buf,const char * func,int line)439 void net_buf_unref_debug(struct net_buf *buf, const char *func, int line)
440 #else
441 void net_buf_unref(struct net_buf *buf)
442 #endif
443 {
444 __ASSERT_NO_MSG(buf);
445
446 while (buf) {
447 struct net_buf *frags = buf->frags;
448 struct net_buf_pool *pool;
449
450 __ASSERT(buf->ref, "buf %p double free", buf);
451 if (!buf->ref) {
452 #if defined(CONFIG_NET_BUF_LOG)
453 NET_BUF_ERR("%s():%d: buf %p double free", func, line,
454 buf);
455 #endif
456 return;
457 }
458 NET_BUF_DBG("buf %p ref %u pool_id %u frags %p", buf, buf->ref,
459 buf->pool_id, buf->frags);
460
461 if (--buf->ref > 0) {
462 return;
463 }
464
465 buf->data = NULL;
466 buf->frags = NULL;
467
468 pool = net_buf_pool_get(buf->pool_id);
469
470 #if defined(CONFIG_NET_BUF_POOL_USAGE)
471 atomic_inc(&pool->avail_count);
472 __ASSERT_NO_MSG(atomic_get(&pool->avail_count) <= pool->buf_count);
473 #endif
474
475 if (pool->destroy) {
476 pool->destroy(buf);
477 } else {
478 net_buf_destroy(buf);
479 }
480
481 buf = frags;
482 }
483 }
484
net_buf_ref(struct net_buf * buf)485 struct net_buf *net_buf_ref(struct net_buf *buf)
486 {
487 __ASSERT_NO_MSG(buf);
488
489 NET_BUF_DBG("buf %p (old) ref %u pool_id %u",
490 buf, buf->ref, buf->pool_id);
491 buf->ref++;
492 return buf;
493 }
494
net_buf_clone(struct net_buf * buf,k_timeout_t timeout)495 struct net_buf *net_buf_clone(struct net_buf *buf, k_timeout_t timeout)
496 {
497 k_timepoint_t end = sys_timepoint_calc(timeout);
498 struct net_buf_pool *pool;
499 struct net_buf *clone;
500
501 __ASSERT_NO_MSG(buf);
502
503 pool = net_buf_pool_get(buf->pool_id);
504
505 clone = net_buf_alloc_len(pool, 0, timeout);
506 if (!clone) {
507 return NULL;
508 }
509
510 /* If the pool supports data referencing use that. Otherwise
511 * we need to allocate new data and make a copy.
512 */
513 if (pool->alloc->cb->ref && !(buf->flags & NET_BUF_EXTERNAL_DATA)) {
514 clone->__buf = buf->__buf ? data_ref(buf, buf->__buf) : NULL;
515 clone->data = buf->data;
516 clone->len = buf->len;
517 clone->size = buf->size;
518 } else {
519 size_t size = buf->size;
520
521 timeout = sys_timepoint_timeout(end);
522
523 clone->__buf = data_alloc(clone, &size, timeout);
524 if (!clone->__buf || size < buf->size) {
525 net_buf_destroy(clone);
526 return NULL;
527 }
528
529 clone->size = size;
530 clone->data = clone->__buf + net_buf_headroom(buf);
531 net_buf_add_mem(clone, buf->data, buf->len);
532 }
533
534 /* user_data_size should be the same for buffers from the same pool */
535 __ASSERT(buf->user_data_size == clone->user_data_size, "Unexpected user data size");
536
537 memcpy(clone->user_data, buf->user_data, clone->user_data_size);
538
539 return clone;
540 }
541
net_buf_user_data_copy(struct net_buf * dst,const struct net_buf * src)542 int net_buf_user_data_copy(struct net_buf *dst, const struct net_buf *src)
543 {
544 __ASSERT_NO_MSG(dst);
545 __ASSERT_NO_MSG(src);
546
547 if (dst == src) {
548 return 0;
549 }
550
551 if (dst->user_data_size < src->user_data_size) {
552 return -EINVAL;
553 }
554
555 memcpy(dst->user_data, src->user_data, src->user_data_size);
556
557 return 0;
558 }
559
net_buf_frag_last(struct net_buf * buf)560 struct net_buf *net_buf_frag_last(struct net_buf *buf)
561 {
562 __ASSERT_NO_MSG(buf);
563
564 while (buf->frags) {
565 buf = buf->frags;
566 }
567
568 return buf;
569 }
570
net_buf_frag_insert(struct net_buf * parent,struct net_buf * frag)571 void net_buf_frag_insert(struct net_buf *parent, struct net_buf *frag)
572 {
573 __ASSERT_NO_MSG(parent);
574 __ASSERT_NO_MSG(frag);
575
576 if (parent->frags) {
577 net_buf_frag_last(frag)->frags = parent->frags;
578 }
579 /* Take ownership of the fragment reference */
580 parent->frags = frag;
581 }
582
net_buf_frag_add(struct net_buf * head,struct net_buf * frag)583 struct net_buf *net_buf_frag_add(struct net_buf *head, struct net_buf *frag)
584 {
585 __ASSERT_NO_MSG(frag);
586
587 if (!head) {
588 return net_buf_ref(frag);
589 }
590
591 net_buf_frag_insert(net_buf_frag_last(head), frag);
592
593 return head;
594 }
595
596 #if defined(CONFIG_NET_BUF_LOG)
net_buf_frag_del_debug(struct net_buf * parent,struct net_buf * frag,const char * func,int line)597 struct net_buf *net_buf_frag_del_debug(struct net_buf *parent,
598 struct net_buf *frag,
599 const char *func, int line)
600 #else
601 struct net_buf *net_buf_frag_del(struct net_buf *parent, struct net_buf *frag)
602 #endif
603 {
604 struct net_buf *next_frag;
605
606 __ASSERT_NO_MSG(frag);
607
608 if (parent) {
609 __ASSERT_NO_MSG(parent->frags);
610 __ASSERT_NO_MSG(parent->frags == frag);
611 parent->frags = frag->frags;
612 }
613
614 next_frag = frag->frags;
615
616 frag->frags = NULL;
617
618 #if defined(CONFIG_NET_BUF_LOG)
619 net_buf_unref_debug(frag, func, line);
620 #else
621 net_buf_unref(frag);
622 #endif
623
624 return next_frag;
625 }
626
net_buf_linearize(void * dst,size_t dst_len,const struct net_buf * src,size_t offset,size_t len)627 size_t net_buf_linearize(void *dst, size_t dst_len, const struct net_buf *src,
628 size_t offset, size_t len)
629 {
630 const struct net_buf *frag;
631 size_t to_copy;
632 size_t copied;
633
634 len = MIN(len, dst_len);
635
636 frag = src;
637
638 /* find the right fragment to start copying from */
639 while (frag && offset >= frag->len) {
640 offset -= frag->len;
641 frag = frag->frags;
642 }
643
644 /* traverse the fragment chain until len bytes are copied */
645 copied = 0;
646 while (frag && len > 0) {
647 to_copy = MIN(len, frag->len - offset);
648 memcpy((uint8_t *)dst + copied, frag->data + offset, to_copy);
649
650 copied += to_copy;
651
652 /* to_copy is always <= len */
653 len -= to_copy;
654 frag = frag->frags;
655
656 /* after the first iteration, this value will be 0 */
657 offset = 0;
658 }
659
660 return copied;
661 }
662
663 /* This helper routine will append multiple bytes, if there is no place for
664 * the data in current fragment then create new fragment and add it to
665 * the buffer. It assumes that the buffer has at least one fragment.
666 */
net_buf_append_bytes(struct net_buf * buf,size_t len,const void * value,k_timeout_t timeout,net_buf_allocator_cb allocate_cb,void * user_data)667 size_t net_buf_append_bytes(struct net_buf *buf, size_t len,
668 const void *value, k_timeout_t timeout,
669 net_buf_allocator_cb allocate_cb, void *user_data)
670 {
671 struct net_buf *frag = net_buf_frag_last(buf);
672 size_t added_len = 0;
673 const uint8_t *value8 = value;
674 size_t max_size;
675
676 do {
677 uint16_t count = MIN(len, net_buf_tailroom(frag));
678
679 net_buf_add_mem(frag, value8, count);
680 len -= count;
681 added_len += count;
682 value8 += count;
683
684 if (len == 0) {
685 return added_len;
686 }
687
688 if (allocate_cb) {
689 frag = allocate_cb(timeout, user_data);
690 } else {
691 struct net_buf_pool *pool;
692
693 /* Allocate from the original pool if no callback has
694 * been provided.
695 */
696 pool = net_buf_pool_get(buf->pool_id);
697 max_size = pool->alloc->max_alloc_size;
698 frag = net_buf_alloc_len(pool,
699 max_size ? MIN(len, max_size) : len,
700 timeout);
701 }
702
703 if (!frag) {
704 return added_len;
705 }
706
707 net_buf_frag_add(buf, frag);
708 } while (1);
709
710 /* Unreachable */
711 return 0;
712 }
713
net_buf_data_match(const struct net_buf * buf,size_t offset,const void * data,size_t len)714 size_t net_buf_data_match(const struct net_buf *buf, size_t offset, const void *data, size_t len)
715 {
716 const uint8_t *dptr = data;
717 const uint8_t *bptr;
718 size_t compared = 0;
719 size_t to_compare;
720
721 if (!buf || !data) {
722 return compared;
723 }
724
725 /* find the right fragment to start comparison */
726 while (buf && offset >= buf->len) {
727 offset -= buf->len;
728 buf = buf->frags;
729 }
730
731 while (buf && len > 0) {
732 bptr = buf->data + offset;
733 to_compare = MIN(len, buf->len - offset);
734
735 for (size_t i = 0; i < to_compare; ++i) {
736 if (dptr[compared] != bptr[i]) {
737 return compared;
738 }
739 compared++;
740 }
741
742 len -= to_compare;
743 buf = buf->frags;
744 offset = 0;
745 }
746
747 return compared;
748 }
749