1 /**
2  * @file
3  * Packet buffer management
4  */
5 
6 /**
7  * @defgroup pbuf Packet buffers (PBUF)
8  * @ingroup infrastructure
9  *
10  * Packets are built from the pbuf data structure. It supports dynamic
11  * memory allocation for packet contents or can reference externally
12  * managed packet contents both in RAM and ROM. Quick allocation for
13  * incoming packets is provided through pools with fixed sized pbufs.
14  *
15  * A packet may span over multiple pbufs, chained as a singly linked
16  * list. This is called a "pbuf chain".
17  *
18  * Multiple packets may be queued, also using this singly linked list.
19  * This is called a "packet queue".
20  *
21  * So, a packet queue consists of one or more pbuf chains, each of
22  * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE
23  * NOT SUPPORTED!!! Use helper structs to queue multiple packets.
24  *
25  * The differences between a pbuf chain and a packet queue are very
26  * precise but subtle.
27  *
28  * The last pbuf of a packet has a ->tot_len field that equals the
29  * ->len field. It can be found by traversing the list. If the last
30  * pbuf of a packet has a ->next field other than NULL, more packets
31  * are on the queue.
32  *
33  * Therefore, looping through a pbuf of a single packet, has an
34  * loop end condition (tot_len == p->len), NOT (next == NULL).
35  *
36  * Example of custom pbuf usage for zero-copy RX:
37   @code{.c}
38 typedef struct my_custom_pbuf
39 {
40    struct pbuf_custom p;
41    void* dma_descriptor;
42 } my_custom_pbuf_t;
43 
44 LWIP_MEMPOOL_DECLARE(RX_POOL, 10, sizeof(my_custom_pbuf_t), "Zero-copy RX PBUF pool");
45 
46 void my_pbuf_free_custom(void* p)
47 {
48   my_custom_pbuf_t* my_puf = (my_custom_pbuf_t*)p;
49 
50   LOCK_INTERRUPTS();
51   free_rx_dma_descriptor(my_pbuf->dma_descriptor);
52   LWIP_MEMPOOL_FREE(RX_POOL, my_pbuf);
53   UNLOCK_INTERRUPTS();
54 }
55 
56 void eth_rx_irq()
57 {
58   dma_descriptor*   dma_desc = get_RX_DMA_descriptor_from_ethernet();
59   my_custom_pbuf_t* my_pbuf  = (my_custom_pbuf_t*)LWIP_MEMPOOL_ALLOC(RX_POOL);
60 
61   my_pbuf->p.custom_free_function = my_pbuf_free_custom;
62   my_pbuf->dma_descriptor         = dma_desc;
63 
64   invalidate_cpu_cache(dma_desc->rx_data, dma_desc->rx_length);
65 
66   struct pbuf* p = pbuf_alloced_custom(PBUF_RAW,
67      dma_desc->rx_length,
68      PBUF_REF,
69      &my_pbuf->p,
70      dma_desc->rx_data,
71      dma_desc->max_buffer_size);
72 
73   if(netif->input(p, netif) != ERR_OK) {
74     pbuf_free(p);
75   }
76 }
77   @endcode
78  */
79 
80 /*
81  * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
82  * All rights reserved.
83  *
84  * Redistribution and use in source and binary forms, with or without modification,
85  * are permitted provided that the following conditions are met:
86  *
87  * 1. Redistributions of source code must retain the above copyright notice,
88  *    this list of conditions and the following disclaimer.
89  * 2. Redistributions in binary form must reproduce the above copyright notice,
90  *    this list of conditions and the following disclaimer in the documentation
91  *    and/or other materials provided with the distribution.
92  * 3. The name of the author may not be used to endorse or promote products
93  *    derived from this software without specific prior written permission.
94  *
95  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
96  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
97  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
98  * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
99  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
100  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
101  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
102  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
103  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
104  * OF SUCH DAMAGE.
105  *
106  * This file is part of the lwIP TCP/IP stack.
107  *
108  * Author: Adam Dunkels <adam@sics.se>
109  *
110  */
111 
112 #include "lwip/opt.h"
113 
114 #include "lwip/stats.h"
115 #include "lwip/def.h"
116 #include "lwip/mem.h"
117 #include "lwip/memp.h"
118 #include "lwip/pbuf.h"
119 #include "lwip/sys.h"
120 #if LWIP_TCP && TCP_QUEUE_OOSEQ
121 #include "lwip/priv/tcp_priv.h"
122 #endif
123 #if LWIP_CHECKSUM_ON_COPY
124 #include "lwip/inet_chksum.h"
125 #endif
126 
127 #include <string.h>
128 
129 #define SIZEOF_STRUCT_PBUF        LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf))
130 /* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically
131    aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */
132 #if LWIP_XR_EXT_MBUF_SUPPORT
133 /* The real size of pool is not equal to PBUF_POOL_BUFSIZE */
134 #define PBUF_POOL_BUFSIZE_ALIGNED (LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE) + LWIP_XR_EXT_MBUF_HEAD_SPACE + LWIP_XR_EXT_MBUF_TAIL_SPACE)
135 #if LWIP_XR_EXT_PBUF_POOL_SMALL
136 #define PBUF_POOL_SMALL_BUFSIZE_ALIGNED (LWIP_MEM_ALIGN_SIZE(LWIP_XR_EXT_PBUF_POOL_SMALL_BUFSIZE) + LWIP_XR_EXT_MBUF_HEAD_SPACE + LWIP_XR_EXT_MBUF_TAIL_SPACE)
137 #endif /* LWIP_XR_EXT_PBUF_POOL_SMALL */
138 #else /* (LWIP_XR_EXT_MBUF_SUPPORT) */
139 #define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE)
140 #endif
141 
142 #if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ
143 #define PBUF_POOL_IS_EMPTY()
144 #else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
145 
146 #if !NO_SYS
147 #ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
148 #include "lwip/tcpip.h"
149 #define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL()  do { \
150   if (tcpip_callback_with_block(pbuf_free_ooseq_callback, NULL, 0) != ERR_OK) { \
151       SYS_ARCH_PROTECT(old_level); \
152       pbuf_free_ooseq_pending = 0; \
153       SYS_ARCH_UNPROTECT(old_level); \
154   } } while(0)
155 #endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
156 #endif /* !NO_SYS */
157 
158 volatile u8_t pbuf_free_ooseq_pending;
159 #define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty()
160 
161 /**
162  * Attempt to reclaim some memory from queued out-of-sequence TCP segments
163  * if we run out of pool pbufs. It's better to give priority to new packets
164  * if we're running out.
165  *
166  * This must be done in the correct thread context therefore this function
167  * can only be used with NO_SYS=0 and through tcpip_callback.
168  */
169 #if !NO_SYS
170 static
171 #endif /* !NO_SYS */
172 void
pbuf_free_ooseq(void)173 pbuf_free_ooseq(void)
174 {
175   struct tcp_pcb* pcb;
176   SYS_ARCH_SET(pbuf_free_ooseq_pending, 0);
177 
178   for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) {
179     if (NULL != pcb->ooseq) {
180       /** Free the ooseq pbufs of one PCB only */
181       LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n"));
182       tcp_segs_free(pcb->ooseq);
183       pcb->ooseq = NULL;
184       return;
185     }
186   }
187 }
188 
189 #if !NO_SYS
190 /**
191  * Just a callback function for tcpip_callback() that calls pbuf_free_ooseq().
192  */
193 static void
pbuf_free_ooseq_callback(void * arg)194 pbuf_free_ooseq_callback(void *arg)
195 {
196   LWIP_UNUSED_ARG(arg);
197   pbuf_free_ooseq();
198 }
199 #endif /* !NO_SYS */
200 
201 /** Queue a call to pbuf_free_ooseq if not already queued. */
202 static void
pbuf_pool_is_empty(void)203 pbuf_pool_is_empty(void)
204 {
205 #ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
206   SYS_ARCH_SET(pbuf_free_ooseq_pending, 1);
207 #else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
208   u8_t queued;
209   SYS_ARCH_DECL_PROTECT(old_level);
210   SYS_ARCH_PROTECT(old_level);
211   queued = pbuf_free_ooseq_pending;
212   pbuf_free_ooseq_pending = 1;
213   SYS_ARCH_UNPROTECT(old_level);
214 
215   if (!queued) {
216     /* queue a call to pbuf_free_ooseq if not already queued */
217     PBUF_POOL_FREE_OOSEQ_QUEUE_CALL();
218   }
219 #endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
220 }
221 #endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
222 
223 /**
224  * @ingroup pbuf
225  * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type).
226  *
227  * The actual memory allocated for the pbuf is determined by the
228  * layer at which the pbuf is allocated and the requested size
229  * (from the size parameter).
230  *
231  * @param layer flag to define header size
232  * @param length size of the pbuf's payload
233  * @param type this parameter decides how and where the pbuf
234  * should be allocated as follows:
235  *
236  * - PBUF_RAM: buffer memory for pbuf is allocated as one large
237  *             chunk. This includes protocol headers as well.
238  * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for
239  *             protocol headers. Additional headers must be prepended
240  *             by allocating another pbuf and chain in to the front of
241  *             the ROM pbuf. It is assumed that the memory used is really
242  *             similar to ROM in that it is immutable and will not be
243  *             changed. Memory which is dynamic should generally not
244  *             be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
245  * - PBUF_REF: no buffer memory is allocated for the pbuf, even for
246  *             protocol headers. It is assumed that the pbuf is only
247  *             being used in a single thread. If the pbuf gets queued,
248  *             then pbuf_take should be called to copy the buffer.
249  * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from
250  *              the pbuf pool that is allocated during pbuf_init().
251  *
252  * @return the allocated pbuf. If multiple pbufs where allocated, this
253  * is the first pbuf of a pbuf chain.
254  */
255 #if (LWIP_XR_EXT_MBUF_SUPPORT && LWIP_XR_EXT_PBUF_POOL_SMALL)
256 struct pbuf *
pbuf_alloc_ext(pbuf_layer layer,u16_t length,pbuf_type type,u8_t pbuf_pool_small)257 pbuf_alloc_ext(pbuf_layer layer, u16_t length, pbuf_type type, u8_t pbuf_pool_small)
258 #else /* (LWIP_XR_EXT_MBUF_SUPPORT && LWIP_XR_EXT_PBUF_POOL_SMALL) */
259 struct pbuf *
260 pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type)
261 #endif /* (LWIP_XR_EXT_MBUF_SUPPORT && LWIP_XR_EXT_PBUF_POOL_SMALL) */
262 {
263   struct pbuf *p, *q, *r;
264   u16_t offset;
265 #if LWIP_XR_EXT_MBUF_SUPPORT
266   u8_t tail_space = 0;
267 #if LWIP_XR_EXT_PBUF_POOL_SMALL
268   u16_t pbuf_pool_bufsize_aligned;
269 #endif /* LWIP_XR_EXT_MBUF_SUPPORT */
270 #endif /* LWIP_XR_EXT_PBUF_POOL_SMALL */
271   s32_t rem_len; /* remaining length */
272   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length));
273 
274   /* determine header offset */
275   switch (layer) {
276   case PBUF_TRANSPORT:
277     /* add room for transport (often TCP) layer header */
278     offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN;
279     break;
280   case PBUF_IP:
281     /* add room for IP layer header */
282     offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN;
283     break;
284   case PBUF_LINK:
285     /* add room for link layer header */
286     offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN;
287     break;
288   case PBUF_RAW_TX:
289     /* add room for encapsulating link layer headers (e.g. 802.11) */
290     offset = PBUF_LINK_ENCAPSULATION_HLEN;
291     break;
292   case PBUF_RAW:
293 #if LWIP_XR_EXT_MBUF_SUPPORT
294   case PBUF_MBUF_RAW:
295 #endif
296     /* no offset (e.g. RX buffers or chain successors) */
297     offset = 0;
298     break;
299   default:
300     LWIP_ASSERT("pbuf_alloc: bad pbuf layer", 0);
301     return NULL;
302   }
303 
304 #if LWIP_XR_EXT_MBUF_SUPPORT
305   /**
306    * Reserve head and tail space if necessary.
307    *  Note: PBUF_POOL is fixed size including head and tail space, always do reserve.
308    */
309   if ((type == PBUF_POOL) ||
310       ((type == PBUF_RAM) && (layer != PBUF_MBUF_RAW) &&
311        (length + offset > LWIP_XR_EXT_MBUF_HEAD_SPACE + LWIP_XR_EXT_MBUF_TAIL_SPACE))) {
312   	offset += LWIP_XR_EXT_MBUF_HEAD_SPACE;
313 	tail_space = LWIP_XR_EXT_MBUF_TAIL_SPACE;
314   }
315 #endif
316 
317   switch (type) {
318   case PBUF_POOL:
319     /* allocate head of pbuf chain into p */
320 #if (LWIP_XR_EXT_MBUF_SUPPORT && LWIP_XR_EXT_PBUF_POOL_SMALL)
321     p = (struct pbuf *)memp_malloc(pbuf_pool_small ? MEMP_PBUF_POOL_SMALL : MEMP_PBUF_POOL);
322 #else /* (LWIP_XR_EXT_MBUF_SUPPORT && LWIP_XR_EXT_PBUF_POOL_SMALL) */
323     p = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL);
324 #endif
325     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc: allocated pbuf %p\n", (void *)p));
326     if (p == NULL) {
327       PBUF_POOL_IS_EMPTY();
328       return NULL;
329     }
330     p->type = type;
331     p->next = NULL;
332 
333     /* make the payload pointer point 'offset' bytes into pbuf data memory */
334     p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + (SIZEOF_STRUCT_PBUF + offset)));
335     LWIP_ASSERT("pbuf_alloc: pbuf p->payload properly aligned",
336             ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
337     /* the total length of the pbuf chain is the requested size */
338     p->tot_len = length;
339     /* set the length of the first pbuf in the chain */
340 #if LWIP_XR_EXT_MBUF_SUPPORT
341     /* reserve tail space in the first pbuf, becasue mbuf including only one pbuf */
342 #if LWIP_XR_EXT_PBUF_POOL_SMALL
343     if (pbuf_pool_small) {
344       pbuf_pool_bufsize_aligned = PBUF_POOL_SMALL_BUFSIZE_ALIGNED;
345       p->mb_flags |= PBUF_FLAG_POOL_SMALL;
346     } else {
347       pbuf_pool_bufsize_aligned = PBUF_POOL_BUFSIZE_ALIGNED;
348       p->mb_flags &= ~PBUF_FLAG_POOL_SMALL;
349     }
350     p->len = LWIP_MIN(length, pbuf_pool_bufsize_aligned - LWIP_MEM_ALIGN_SIZE(offset) - tail_space);
351     LWIP_ASSERT("check p->payload + p->len does not overflow pbuf",
352                 ((u8_t*)p->payload + p->len <=
353                  (u8_t*)p + SIZEOF_STRUCT_PBUF + pbuf_pool_bufsize_aligned));
354     LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT",
355       (pbuf_pool_bufsize_aligned - LWIP_MEM_ALIGN_SIZE(offset)) > 0 );
356 #else /* LWIP_XR_EXT_PBUF_POOL_SMALL */
357     p->len = LWIP_MIN(length, PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset) - tail_space);
358     LWIP_ASSERT("check p->payload + p->len does not overflow pbuf",
359                 ((u8_t*)p->payload + p->len <=
360                  (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED));
361     LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT",
362       (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 );
363 #endif /* LWIP_XR_EXT_PBUF_POOL_SMALL */
364     LWIP_ASSERT("pbuf_alloc: bad pbuf length", p->len == length);
365 #else /* LWIP_XR_EXT_MBUF_SUPPORT */
366     p->len = LWIP_MIN(length, PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset));
367     LWIP_ASSERT("check p->payload + p->len does not overflow pbuf",
368                 ((u8_t*)p->payload + p->len <=
369                  (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED));
370     LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT",
371       (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 );
372 #endif
373     /* set reference count (needed here in case we fail) */
374     p->ref = 1;
375 
376     /* now allocate the tail of the pbuf chain */
377 
378     /* remember first pbuf for linkage in next iteration */
379     r = p;
380     /* remaining length to be allocated */
381     rem_len = length - p->len;
382     /* any remaining pbufs to be allocated? */
383     while (rem_len > 0) {
384       q = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL);
385       if (q == NULL) {
386         PBUF_POOL_IS_EMPTY();
387         /* free chain so far allocated */
388         pbuf_free(p);
389         /* bail out unsuccessfully */
390         return NULL;
391       }
392       q->type = type;
393       q->flags = 0;
394       q->next = NULL;
395       /* make previous pbuf point to this pbuf */
396       r->next = q;
397       /* set total length of this pbuf and next in chain */
398       LWIP_ASSERT("rem_len < max_u16_t", rem_len < 0xffff);
399       q->tot_len = (u16_t)rem_len;
400       /* this pbuf length is pool size, unless smaller sized tail */
401       q->len = LWIP_MIN((u16_t)rem_len, PBUF_POOL_BUFSIZE_ALIGNED);
402       q->payload = (void *)((u8_t *)q + SIZEOF_STRUCT_PBUF);
403       LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned",
404               ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0);
405       LWIP_ASSERT("check p->payload + p->len does not overflow pbuf",
406                   ((u8_t*)p->payload + p->len <=
407                    (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED));
408       q->ref = 1;
409       /* calculate remaining length to be allocated */
410       rem_len -= q->len;
411       /* remember this pbuf for linkage in next iteration */
412       r = q;
413     }
414     /* end of chain */
415     /*r->next = NULL;*/
416 
417     break;
418   case PBUF_RAM:
419     /* If pbuf is to be allocated in RAM, allocate memory for it. */
420 #if LWIP_XR_EXT_MBUF_SUPPORT
421     p = (struct pbuf*)mem_malloc(LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF + offset) + LWIP_MEM_ALIGN_SIZE(length) + tail_space);
422 #else
423     p = (struct pbuf*)mem_malloc(LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF + offset) + LWIP_MEM_ALIGN_SIZE(length));
424 #endif
425     if (p == NULL) {
426       return NULL;
427     }
428     /* Set up internal structure of the pbuf. */
429     p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset));
430     p->len = p->tot_len = length;
431     p->next = NULL;
432     p->type = type;
433 
434     LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned",
435            ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
436     break;
437   /* pbuf references existing (non-volatile static constant) ROM payload? */
438   case PBUF_ROM:
439   /* pbuf references existing (externally allocated) RAM payload? */
440   case PBUF_REF:
441     /* only allocate memory for the pbuf structure */
442     p = (struct pbuf *)memp_malloc(MEMP_PBUF);
443     if (p == NULL) {
444       LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
445                   ("pbuf_alloc: Could not allocate MEMP_PBUF for PBUF_%s.\n",
446                   (type == PBUF_ROM) ? "ROM" : "REF"));
447       return NULL;
448     }
449     /* caller must set this field properly, afterwards */
450     p->payload = NULL;
451     p->len = p->tot_len = length;
452     p->next = NULL;
453     p->type = type;
454     break;
455   default:
456     LWIP_ASSERT("pbuf_alloc: erroneous type", 0);
457     return NULL;
458   }
459   /* set reference count */
460   p->ref = 1;
461   /* set flags */
462   p->flags = 0;
463 #if LWIP_XR_EXT_MBUF_SUPPORT
464   p->mb_flags = (tail_space > 0) ? PBUF_FLAG_MBUF_SPACE : 0;
465 #if LWIP_XR_EXT_PBUF_POOL_SMALL
466   if (pbuf_pool_small)
467     p->mb_flags |= PBUF_FLAG_POOL_SMALL;
468 #endif /* LWIP_PBUF_POOL_SMALL */
469 #endif /* LWIP_MBUF_SUPPORT */
470   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p));
471   return p;
472 }
473 
474 #if LWIP_SUPPORT_CUSTOM_PBUF
475 /**
476  * @ingroup pbuf
477  * Initialize a custom pbuf (already allocated).
478  *
479  * @param l flag to define header size
480  * @param length size of the pbuf's payload
481  * @param type type of the pbuf (only used to treat the pbuf accordingly, as
482  *        this function allocates no memory)
483  * @param p pointer to the custom pbuf to initialize (already allocated)
484  * @param payload_mem pointer to the buffer that is used for payload and headers,
485  *        must be at least big enough to hold 'length' plus the header size,
486  *        may be NULL if set later.
487  *        ATTENTION: The caller is responsible for correct alignment of this buffer!!
488  * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least
489  *        big enough to hold 'length' plus the header size
490  */
491 struct pbuf*
pbuf_alloced_custom(pbuf_layer l,u16_t length,pbuf_type type,struct pbuf_custom * p,void * payload_mem,u16_t payload_mem_len)492 pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p,
493                     void *payload_mem, u16_t payload_mem_len)
494 {
495   u16_t offset;
496   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length));
497 
498   /* determine header offset */
499   switch (l) {
500   case PBUF_TRANSPORT:
501     /* add room for transport (often TCP) layer header */
502     offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN;
503     break;
504   case PBUF_IP:
505     /* add room for IP layer header */
506     offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN;
507     break;
508   case PBUF_LINK:
509     /* add room for link layer header */
510     offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN;
511     break;
512   case PBUF_RAW_TX:
513     /* add room for encapsulating link layer headers (e.g. 802.11) */
514     offset = PBUF_LINK_ENCAPSULATION_HLEN;
515     break;
516   case PBUF_RAW:
517     offset = 0;
518     break;
519   default:
520     LWIP_ASSERT("pbuf_alloced_custom: bad pbuf layer", 0);
521     return NULL;
522   }
523 
524   if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) {
525     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length));
526     return NULL;
527   }
528 
529   p->pbuf.next = NULL;
530   if (payload_mem != NULL) {
531     p->pbuf.payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset);
532   } else {
533     p->pbuf.payload = NULL;
534   }
535   p->pbuf.flags = PBUF_FLAG_IS_CUSTOM;
536   p->pbuf.len = p->pbuf.tot_len = length;
537   p->pbuf.type = type;
538   p->pbuf.ref = 1;
539   return &p->pbuf;
540 }
541 #endif /* LWIP_SUPPORT_CUSTOM_PBUF */
542 
543 /**
544  * @ingroup pbuf
545  * Shrink a pbuf chain to a desired length.
546  *
547  * @param p pbuf to shrink.
548  * @param new_len desired new length of pbuf chain
549  *
550  * Depending on the desired length, the first few pbufs in a chain might
551  * be skipped and left unchanged. The new last pbuf in the chain will be
552  * resized, and any remaining pbufs will be freed.
553  *
554  * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted.
555  * @note May not be called on a packet queue.
556  *
557  * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain).
558  */
559 void
pbuf_realloc(struct pbuf * p,u16_t new_len)560 pbuf_realloc(struct pbuf *p, u16_t new_len)
561 {
562   struct pbuf *q;
563   u16_t rem_len; /* remaining length */
564   s32_t grow;
565 
566   LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL);
567   LWIP_ASSERT("pbuf_realloc: sane p->type", p->type == PBUF_POOL ||
568               p->type == PBUF_ROM ||
569               p->type == PBUF_RAM ||
570               p->type == PBUF_REF);
571 
572   /* desired length larger than current length? */
573   if (new_len >= p->tot_len) {
574     /* enlarging not yet supported */
575     return;
576   }
577 
578   /* the pbuf chain grows by (new_len - p->tot_len) bytes
579    * (which may be negative in case of shrinking) */
580   grow = new_len - p->tot_len;
581 
582   /* first, step over any pbufs that should remain in the chain */
583   rem_len = new_len;
584   q = p;
585   /* should this pbuf be kept? */
586   while (rem_len > q->len) {
587     /* decrease remaining length by pbuf length */
588     rem_len -= q->len;
589     /* decrease total length indicator */
590     LWIP_ASSERT("grow < max_u16_t", grow < 0xffff);
591     q->tot_len += (u16_t)grow;
592     /* proceed to next pbuf in chain */
593     q = q->next;
594     LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL);
595   }
596   /* we have now reached the new last pbuf (in q) */
597   /* rem_len == desired length for pbuf q */
598 
599   /* shrink allocated memory for PBUF_RAM */
600   /* (other types merely adjust their length fields */
601   if ((q->type == PBUF_RAM) && (rem_len != q->len)
602 #if LWIP_SUPPORT_CUSTOM_PBUF
603       && ((q->flags & PBUF_FLAG_IS_CUSTOM) == 0)
604 #endif /* LWIP_SUPPORT_CUSTOM_PBUF */
605      ) {
606     /* reallocate and adjust the length of the pbuf that will be split */
607 #if LWIP_XR_EXT_MBUF_SUPPORT
608 	/* reserve tail space if (q->mb_flags & PBUF_FLAG_MBUF_SPACE) */
609     u8_t tail_space = (q->mb_flags & PBUF_FLAG_MBUF_SPACE) ? LWIP_XR_EXT_MBUF_TAIL_SPACE : 0;
610     q = (struct pbuf *)mem_trim(q, (u16_t)((u8_t *)q->payload - (u8_t *)q) + rem_len + tail_space);
611 #else /* LWIP_XR_EXT_MBUF_SUPPORT */
612     q = (struct pbuf *)mem_trim(q, (u16_t)((u8_t *)q->payload - (u8_t *)q) + rem_len);
613 #endif /* LWIP_XR_EXT_MBUF_SUPPORT */
614     LWIP_ASSERT("mem_trim returned q == NULL", q != NULL);
615   }
616   /* adjust length fields for new last pbuf */
617   q->len = rem_len;
618   q->tot_len = q->len;
619 
620   /* any remaining pbufs in chain? */
621   if (q->next != NULL) {
622     /* free remaining pbufs in chain */
623     pbuf_free(q->next);
624   }
625   /* q is last packet in chain */
626   q->next = NULL;
627 
628 }
629 
630 #if LWIP_XR_EXT_MBUF_SUPPORT
631 /**
632  * Count the empty space at the head of pbuf
633  *
634  * @param p pbuf to count
635  * @return the number of empty bytes at the head of pbuf
636  */
637 s32_t
pbuf_head_space(struct pbuf * p)638 pbuf_head_space(struct pbuf *p)
639 {
640   LWIP_ASSERT("p != NULL", p != NULL);
641 
642   u16_t type = p->type;
643   if (type == PBUF_RAM || type == PBUF_POOL)
644     return ((u8_t *)p->payload - ((u8_t *)p + SIZEOF_STRUCT_PBUF));
645 
646   return 0;
647 }
648 #endif /* LWIP_XR_EXT_MBUF_SUPPORT */
649 
650 /**
651  * Adjusts the payload pointer to hide or reveal headers in the payload.
652  * @see pbuf_header.
653  *
654  * @param p pbuf to change the header size.
655  * @param header_size_increment Number of bytes to increment header size.
656  * @param force Allow 'header_size_increment > 0' for PBUF_REF/PBUF_ROM types
657  *
658  * @return non-zero on failure, zero on success.
659  *
660  */
661 static u8_t
pbuf_header_impl(struct pbuf * p,s16_t header_size_increment,u8_t force)662 pbuf_header_impl(struct pbuf *p, s16_t header_size_increment, u8_t force)
663 {
664   u16_t type;
665   void *payload;
666   u16_t increment_magnitude;
667 
668   LWIP_ASSERT("p != NULL", p != NULL);
669   if ((header_size_increment == 0) || (p == NULL)) {
670     return 0;
671   }
672 
673   if (header_size_increment < 0) {
674     increment_magnitude = -header_size_increment;
675     /* Check that we aren't going to move off the end of the pbuf */
676     LWIP_ERROR("increment_magnitude <= p->len", (increment_magnitude <= p->len), return 1;);
677   } else {
678     increment_magnitude = header_size_increment;
679 #if 0
680     /* Can't assert these as some callers speculatively call
681          pbuf_header() to see if it's OK.  Will return 1 below instead. */
682     /* Check that we've got the correct type of pbuf to work with */
683     LWIP_ASSERT("p->type == PBUF_RAM || p->type == PBUF_POOL",
684                 p->type == PBUF_RAM || p->type == PBUF_POOL);
685     /* Check that we aren't going to move off the beginning of the pbuf */
686     LWIP_ASSERT("p->payload - increment_magnitude >= p + SIZEOF_STRUCT_PBUF",
687                 (u8_t *)p->payload - increment_magnitude >= (u8_t *)p + SIZEOF_STRUCT_PBUF);
688 #endif
689   }
690 
691   type = p->type;
692   /* remember current payload pointer */
693   payload = p->payload;
694 
695   /* pbuf types containing payloads? */
696   if (type == PBUF_RAM || type == PBUF_POOL) {
697     /* set new payload pointer */
698     p->payload = (u8_t *)p->payload - header_size_increment;
699     /* boundary check fails? */
700     if ((u8_t *)p->payload < (u8_t *)p + SIZEOF_STRUCT_PBUF) {
701       LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE,
702         ("pbuf_header: failed as %p < %p (not enough space for new header size)\n",
703         (void *)p->payload, (void *)((u8_t *)p + SIZEOF_STRUCT_PBUF)));
704       /* restore old payload pointer */
705       p->payload = payload;
706       /* bail out unsuccessfully */
707       return 1;
708     }
709   /* pbuf types referring to external payloads? */
710   } else if (type == PBUF_REF || type == PBUF_ROM) {
711     /* hide a header in the payload? */
712     if ((header_size_increment < 0) && (increment_magnitude <= p->len)) {
713       /* increase payload pointer */
714       p->payload = (u8_t *)p->payload - header_size_increment;
715     } else if ((header_size_increment > 0) && force) {
716       p->payload = (u8_t *)p->payload - header_size_increment;
717     } else {
718       /* cannot expand payload to front (yet!)
719        * bail out unsuccessfully */
720       return 1;
721     }
722   } else {
723     /* Unknown type */
724     LWIP_ASSERT("bad pbuf type", 0);
725     return 1;
726   }
727   /* modify pbuf length fields */
728   p->len += header_size_increment;
729   p->tot_len += header_size_increment;
730 
731   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_header: old %p new %p (%"S16_F")\n",
732     (void *)payload, (void *)p->payload, header_size_increment));
733 
734   return 0;
735 }
736 
737 /**
738  * Adjusts the payload pointer to hide or reveal headers in the payload.
739  *
740  * Adjusts the ->payload pointer so that space for a header
741  * (dis)appears in the pbuf payload.
742  *
743  * The ->payload, ->tot_len and ->len fields are adjusted.
744  *
745  * @param p pbuf to change the header size.
746  * @param header_size_increment Number of bytes to increment header size which
747  * increases the size of the pbuf. New space is on the front.
748  * (Using a negative value decreases the header size.)
749  * If hdr_size_inc is 0, this function does nothing and returns successful.
750  *
751  * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so
752  * the call will fail. A check is made that the increase in header size does
753  * not move the payload pointer in front of the start of the buffer.
754  * @return non-zero on failure, zero on success.
755  *
756  */
757 u8_t
pbuf_header(struct pbuf * p,s16_t header_size_increment)758 pbuf_header(struct pbuf *p, s16_t header_size_increment)
759 {
760    return pbuf_header_impl(p, header_size_increment, 0);
761 }
762 
763 /**
764  * Same as pbuf_header but does not check if 'header_size > 0' is allowed.
765  * This is used internally only, to allow PBUF_REF for RX.
766  */
767 u8_t
pbuf_header_force(struct pbuf * p,s16_t header_size_increment)768 pbuf_header_force(struct pbuf *p, s16_t header_size_increment)
769 {
770    return pbuf_header_impl(p, header_size_increment, 1);
771 }
772 
773 /**
774  * @ingroup pbuf
775  * Dereference a pbuf chain or queue and deallocate any no-longer-used
776  * pbufs at the head of this chain or queue.
777  *
778  * Decrements the pbuf reference count. If it reaches zero, the pbuf is
779  * deallocated.
780  *
781  * For a pbuf chain, this is repeated for each pbuf in the chain,
782  * up to the first pbuf which has a non-zero reference count after
783  * decrementing. So, when all reference counts are one, the whole
784  * chain is free'd.
785  *
786  * @param p The pbuf (chain) to be dereferenced.
787  *
788  * @return the number of pbufs that were de-allocated
789  * from the head of the chain.
790  *
791  * @note MUST NOT be called on a packet queue (Not verified to work yet).
792  * @note the reference counter of a pbuf equals the number of pointers
793  * that refer to the pbuf (or into the pbuf).
794  *
795  * @internal examples:
796  *
797  * Assuming existing chains a->b->c with the following reference
798  * counts, calling pbuf_free(a) results in:
799  *
800  * 1->2->3 becomes ...1->3
801  * 3->3->3 becomes 2->3->3
802  * 1->1->2 becomes ......1
803  * 2->1->1 becomes 1->1->1
804  * 1->1->1 becomes .......
805  *
806  */
807 u8_t
pbuf_free(struct pbuf * p)808 pbuf_free(struct pbuf *p)
809 {
810   u16_t type;
811   struct pbuf *q;
812   u8_t count;
813 
814   if (p == NULL) {
815     LWIP_ASSERT("p != NULL", p != NULL);
816     /* if assertions are disabled, proceed with debug output */
817     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
818       ("pbuf_free(p == NULL) was called.\n"));
819     return 0;
820   }
821   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p));
822 
823   PERF_START;
824 
825   LWIP_ASSERT("pbuf_free: sane type",
826     p->type == PBUF_RAM || p->type == PBUF_ROM ||
827     p->type == PBUF_REF || p->type == PBUF_POOL);
828 
829   count = 0;
830   /* de-allocate all consecutive pbufs from the head of the chain that
831    * obtain a zero reference count after decrementing*/
832   while (p != NULL) {
833     u16_t ref;
834     SYS_ARCH_DECL_PROTECT(old_level);
835     /* Since decrementing ref cannot be guaranteed to be a single machine operation
836      * we must protect it. We put the new ref into a local variable to prevent
837      * further protection. */
838     SYS_ARCH_PROTECT(old_level);
839     /* all pbufs in a chain are referenced at least once */
840     LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0);
841     /* decrease reference count (number of pointers to pbuf) */
842     ref = --(p->ref);
843     SYS_ARCH_UNPROTECT(old_level);
844     /* this pbuf is no longer referenced to? */
845     if (ref == 0) {
846       /* remember next pbuf in chain for next iteration */
847       q = p->next;
848       LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p));
849       type = p->type;
850 #if LWIP_SUPPORT_CUSTOM_PBUF
851       /* is this a custom pbuf? */
852       if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) {
853         struct pbuf_custom *pc = (struct pbuf_custom*)p;
854         LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL);
855         pc->custom_free_function(p);
856       } else
857 #endif /* LWIP_SUPPORT_CUSTOM_PBUF */
858       {
859         /* is this a pbuf from the pool? */
860         if (type == PBUF_POOL) {
861 #if (LWIP_XR_EXT_MBUF_SUPPORT && LWIP_XR_EXT_PBUF_POOL_SMALL)
862           memp_free((p->mb_flags & PBUF_FLAG_POOL_SMALL) ? MEMP_PBUF_POOL_SMALL : MEMP_PBUF_POOL, p);
863 #else /* (LWIP_XR_EXT_MBUF_SUPPORT && LWIP_XR_EXT_PBUF_POOL_SMALL) */
864           memp_free(MEMP_PBUF_POOL, p);
865 #endif /* (LWIP_XR_EXT_MBUF_SUPPORT && LWIP_XR_EXT_PBUF_POOL_SMALL) */
866         /* is this a ROM or RAM referencing pbuf? */
867         } else if (type == PBUF_ROM || type == PBUF_REF) {
868           memp_free(MEMP_PBUF, p);
869         /* type == PBUF_RAM */
870         } else {
871           mem_free(p);
872         }
873       }
874       count++;
875       /* proceed to next pbuf */
876       p = q;
877     /* p->ref > 0, this pbuf is still referenced to */
878     /* (and so the remaining pbufs in chain as well) */
879     } else {
880       LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, ref));
881       /* stop walking through the chain */
882       p = NULL;
883     }
884   }
885   PERF_STOP("pbuf_free");
886   /* return number of de-allocated pbufs */
887   return count;
888 }
889 
890 /**
891  * Count number of pbufs in a chain
892  *
893  * @param p first pbuf of chain
894  * @return the number of pbufs in a chain
895  */
896 u16_t
pbuf_clen(const struct pbuf * p)897 pbuf_clen(const struct pbuf *p)
898 {
899   u16_t len;
900 
901   len = 0;
902   while (p != NULL) {
903     ++len;
904     p = p->next;
905   }
906   return len;
907 }
908 
909 /**
910  * @ingroup pbuf
911  * Increment the reference count of the pbuf.
912  *
913  * @param p pbuf to increase reference counter of
914  *
915  */
916 void
pbuf_ref(struct pbuf * p)917 pbuf_ref(struct pbuf *p)
918 {
919   /* pbuf given? */
920   if (p != NULL) {
921     SYS_ARCH_INC(p->ref, 1);
922   }
923 }
924 
925 /**
926  * @ingroup pbuf
927  * Concatenate two pbufs (each may be a pbuf chain) and take over
928  * the caller's reference of the tail pbuf.
929  *
930  * @note The caller MAY NOT reference the tail pbuf afterwards.
931  * Use pbuf_chain() for that purpose.
932  *
933  * @see pbuf_chain()
934  */
935 void
pbuf_cat(struct pbuf * h,struct pbuf * t)936 pbuf_cat(struct pbuf *h, struct pbuf *t)
937 {
938   struct pbuf *p;
939 
940   LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)",
941              ((h != NULL) && (t != NULL)), return;);
942 
943   /* proceed to last pbuf of chain */
944   for (p = h; p->next != NULL; p = p->next) {
945     /* add total length of second chain to all totals of first chain */
946     p->tot_len += t->tot_len;
947   }
948   /* { p is last pbuf of first h chain, p->next == NULL } */
949   LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len);
950   LWIP_ASSERT("p->next == NULL", p->next == NULL);
951   /* add total length of second chain to last pbuf total of first chain */
952   p->tot_len += t->tot_len;
953   /* chain last pbuf of head (p) with first of tail (t) */
954   p->next = t;
955   /* p->next now references t, but the caller will drop its reference to t,
956    * so netto there is no change to the reference count of t.
957    */
958 }
959 
960 /**
961  * @ingroup pbuf
962  * Chain two pbufs (or pbuf chains) together.
963  *
964  * The caller MUST call pbuf_free(t) once it has stopped
965  * using it. Use pbuf_cat() instead if you no longer use t.
966  *
967  * @param h head pbuf (chain)
968  * @param t tail pbuf (chain)
969  * @note The pbufs MUST belong to the same packet.
970  * @note MAY NOT be called on a packet queue.
971  *
972  * The ->tot_len fields of all pbufs of the head chain are adjusted.
973  * The ->next field of the last pbuf of the head chain is adjusted.
974  * The ->ref field of the first pbuf of the tail chain is adjusted.
975  *
976  */
977 void
pbuf_chain(struct pbuf * h,struct pbuf * t)978 pbuf_chain(struct pbuf *h, struct pbuf *t)
979 {
980   pbuf_cat(h, t);
981   /* t is now referenced by h */
982   pbuf_ref(t);
983   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t));
984 }
985 
986 /**
987  * Dechains the first pbuf from its succeeding pbufs in the chain.
988  *
989  * Makes p->tot_len field equal to p->len.
990  * @param p pbuf to dechain
991  * @return remainder of the pbuf chain, or NULL if it was de-allocated.
992  * @note May not be called on a packet queue.
993  */
994 struct pbuf *
pbuf_dechain(struct pbuf * p)995 pbuf_dechain(struct pbuf *p)
996 {
997   struct pbuf *q;
998   u8_t tail_gone = 1;
999   /* tail */
1000   q = p->next;
1001   /* pbuf has successor in chain? */
1002   if (q != NULL) {
1003     /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
1004     LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len);
1005     /* enforce invariant if assertion is disabled */
1006     q->tot_len = p->tot_len - p->len;
1007     /* decouple pbuf from remainder */
1008     p->next = NULL;
1009     /* total length of pbuf p is its own length only */
1010     p->tot_len = p->len;
1011     /* q is no longer referenced by p, free it */
1012     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: unreferencing %p\n", (void *)q));
1013     tail_gone = pbuf_free(q);
1014     if (tail_gone > 0) {
1015       LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE,
1016                   ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q));
1017     }
1018     /* return remaining tail or NULL if deallocated */
1019   }
1020   /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
1021   LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len);
1022   return ((tail_gone > 0) ? NULL : q);
1023 }
1024 
1025 /**
1026  * @ingroup pbuf
1027  * Create PBUF_RAM copies of pbufs.
1028  *
1029  * Used to queue packets on behalf of the lwIP stack, such as
1030  * ARP based queueing.
1031  *
1032  * @note You MUST explicitly use p = pbuf_take(p);
1033  *
1034  * @note Only one packet is copied, no packet queue!
1035  *
1036  * @param p_to pbuf destination of the copy
1037  * @param p_from pbuf source of the copy
1038  *
1039  * @return ERR_OK if pbuf was copied
1040  *         ERR_ARG if one of the pbufs is NULL or p_to is not big
1041  *                 enough to hold p_from
1042  */
1043 err_t
pbuf_copy(struct pbuf * p_to,const struct pbuf * p_from)1044 pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from)
1045 {
1046   u16_t offset_to=0, offset_from=0, len;
1047 
1048   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy(%p, %p)\n",
1049     (const void*)p_to, (const void*)p_from));
1050 
1051   /* is the target big enough to hold the source? */
1052   LWIP_ERROR("pbuf_copy: target not big enough to hold source", ((p_to != NULL) &&
1053              (p_from != NULL) && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG;);
1054 
1055   /* iterate through pbuf chain */
1056   do
1057   {
1058     /* copy one part of the original chain */
1059     if ((p_to->len - offset_to) >= (p_from->len - offset_from)) {
1060       /* complete current p_from fits into current p_to */
1061       len = p_from->len - offset_from;
1062     } else {
1063       /* current p_from does not fit into current p_to */
1064       len = p_to->len - offset_to;
1065     }
1066     MEMCPY((u8_t*)p_to->payload + offset_to, (u8_t*)p_from->payload + offset_from, len);
1067     offset_to += len;
1068     offset_from += len;
1069     LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len);
1070     LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len);
1071     if (offset_from >= p_from->len) {
1072       /* on to next p_from (if any) */
1073       offset_from = 0;
1074       p_from = p_from->next;
1075     }
1076     if (offset_to == p_to->len) {
1077       /* on to next p_to (if any) */
1078       offset_to = 0;
1079       p_to = p_to->next;
1080       LWIP_ERROR("p_to != NULL", (p_to != NULL) || (p_from == NULL) , return ERR_ARG;);
1081     }
1082 
1083     if ((p_from != NULL) && (p_from->len == p_from->tot_len)) {
1084       /* don't copy more than one packet! */
1085       LWIP_ERROR("pbuf_copy() does not allow packet queues!",
1086                  (p_from->next == NULL), return ERR_VAL;);
1087     }
1088     if ((p_to != NULL) && (p_to->len == p_to->tot_len)) {
1089       /* don't copy more than one packet! */
1090       LWIP_ERROR("pbuf_copy() does not allow packet queues!",
1091                   (p_to->next == NULL), return ERR_VAL;);
1092     }
1093   } while (p_from);
1094   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy: end of chain reached.\n"));
1095   return ERR_OK;
1096 }
1097 
1098 /**
1099  * @ingroup pbuf
1100  * Copy (part of) the contents of a packet buffer
1101  * to an application supplied buffer.
1102  *
1103  * @param buf the pbuf from which to copy data
1104  * @param dataptr the application supplied buffer
1105  * @param len length of data to copy (dataptr must be big enough). No more
1106  * than buf->tot_len will be copied, irrespective of len
1107  * @param offset offset into the packet buffer from where to begin copying len bytes
1108  * @return the number of bytes copied, or 0 on failure
1109  */
1110 u16_t
pbuf_copy_partial(const struct pbuf * buf,void * dataptr,u16_t len,u16_t offset)1111 pbuf_copy_partial(const struct pbuf *buf, void *dataptr, u16_t len, u16_t offset)
1112 {
1113   const struct pbuf *p;
1114   u16_t left;
1115   u16_t buf_copy_len;
1116   u16_t copied_total = 0;
1117 
1118   LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0;);
1119   LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;);
1120 
1121   left = 0;
1122 
1123   /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */
1124   for (p = buf; len != 0 && p != NULL; p = p->next) {
1125     if ((offset != 0) && (offset >= p->len)) {
1126       /* don't copy from this buffer -> on to the next */
1127       offset -= p->len;
1128     } else {
1129       /* copy from this buffer. maybe only partially. */
1130       buf_copy_len = p->len - offset;
1131       if (buf_copy_len > len) {
1132         buf_copy_len = len;
1133       }
1134       /* copy the necessary parts of the buffer */
1135       MEMCPY(&((char*)dataptr)[left], &((char*)p->payload)[offset], buf_copy_len);
1136       copied_total += buf_copy_len;
1137       left += buf_copy_len;
1138       len -= buf_copy_len;
1139       offset = 0;
1140     }
1141   }
1142   return copied_total;
1143 }
1144 
1145 #if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
1146 /**
1147  * This method modifies a 'pbuf chain', so that its total length is
1148  * smaller than 64K. The remainder of the original pbuf chain is stored
1149  * in *rest.
1150  * This function never creates new pbufs, but splits an existing chain
1151  * in two parts. The tot_len of the modified packet queue will likely be
1152  * smaller than 64K.
1153  * 'packet queues' are not supported by this function.
1154  *
1155  * @param p the pbuf queue to be split
1156  * @param rest pointer to store the remainder (after the first 64K)
1157  */
pbuf_split_64k(struct pbuf * p,struct pbuf ** rest)1158 void pbuf_split_64k(struct pbuf *p, struct pbuf **rest)
1159 {
1160   *rest = NULL;
1161   if ((p != NULL) && (p->next != NULL)) {
1162     u16_t tot_len_front = p->len;
1163     struct pbuf *i = p;
1164     struct pbuf *r = p->next;
1165 
1166     /* continue until the total length (summed up as u16_t) overflows */
1167     while ((r != NULL) && ((u16_t)(tot_len_front + r->len) > tot_len_front)) {
1168       tot_len_front += r->len;
1169       i = r;
1170       r = r->next;
1171     }
1172     /* i now points to last packet of the first segment. Set next
1173        pointer to NULL */
1174     i->next = NULL;
1175 
1176     if (r != NULL) {
1177       /* Update the tot_len field in the first part */
1178       for (i = p; i != NULL; i = i->next) {
1179         i->tot_len -= r->tot_len;
1180         LWIP_ASSERT("tot_len/len mismatch in last pbuf",
1181                     (i->next != NULL) || (i->tot_len == i->len));
1182       }
1183       if (p->flags & PBUF_FLAG_TCP_FIN) {
1184         r->flags |= PBUF_FLAG_TCP_FIN;
1185       }
1186 
1187       /* tot_len field in rest does not need modifications */
1188       /* reference counters do not need modifications */
1189       *rest = r;
1190     }
1191   }
1192 }
1193 #endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
1194 
1195 /* Actual implementation of pbuf_skip() but returning const pointer... */
1196 static const struct pbuf*
pbuf_skip_const(const struct pbuf * in,u16_t in_offset,u16_t * out_offset)1197 pbuf_skip_const(const struct pbuf* in, u16_t in_offset, u16_t* out_offset)
1198 {
1199   u16_t offset_left = in_offset;
1200   const struct pbuf* q = in;
1201 
1202   /* get the correct pbuf */
1203   while ((q != NULL) && (q->len <= offset_left)) {
1204     offset_left -= q->len;
1205     q = q->next;
1206   }
1207   if (out_offset != NULL) {
1208     *out_offset = offset_left;
1209   }
1210   return q;
1211 }
1212 
1213 /**
1214  * @ingroup pbuf
1215  * Skip a number of bytes at the start of a pbuf
1216  *
1217  * @param in input pbuf
1218  * @param in_offset offset to skip
1219  * @param out_offset resulting offset in the returned pbuf
1220  * @return the pbuf in the queue where the offset is
1221  */
1222 struct pbuf*
pbuf_skip(struct pbuf * in,u16_t in_offset,u16_t * out_offset)1223 pbuf_skip(struct pbuf* in, u16_t in_offset, u16_t* out_offset)
1224 {
1225   return (struct pbuf*)(size_t)pbuf_skip_const(in, in_offset, out_offset);
1226 }
1227 
1228 /**
1229  * @ingroup pbuf
1230  * Copy application supplied data into a pbuf.
1231  * This function can only be used to copy the equivalent of buf->tot_len data.
1232  *
1233  * @param buf pbuf to fill with data
1234  * @param dataptr application supplied data buffer
1235  * @param len length of the application supplied data buffer
1236  *
1237  * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough
1238  */
1239 err_t
pbuf_take(struct pbuf * buf,const void * dataptr,u16_t len)1240 pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len)
1241 {
1242   struct pbuf *p;
1243   u16_t buf_copy_len;
1244   u16_t total_copy_len = len;
1245   u16_t copied_total = 0;
1246 
1247   LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return ERR_ARG;);
1248   LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return ERR_ARG;);
1249   LWIP_ERROR("pbuf_take: buf not large enough", (buf->tot_len >= len), return ERR_MEM;);
1250 
1251   if ((buf == NULL) || (dataptr == NULL) || (buf->tot_len < len)) {
1252     return ERR_ARG;
1253   }
1254 
1255   /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */
1256   for (p = buf; total_copy_len != 0; p = p->next) {
1257     LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL);
1258     buf_copy_len = total_copy_len;
1259     if (buf_copy_len > p->len) {
1260       /* this pbuf cannot hold all remaining data */
1261       buf_copy_len = p->len;
1262     }
1263     /* copy the necessary parts of the buffer */
1264     MEMCPY(p->payload, &((const char*)dataptr)[copied_total], buf_copy_len);
1265     total_copy_len -= buf_copy_len;
1266     copied_total += buf_copy_len;
1267   }
1268   LWIP_ASSERT("did not copy all data", total_copy_len == 0 && copied_total == len);
1269   return ERR_OK;
1270 }
1271 
1272 /**
1273  * @ingroup pbuf
1274  * Same as pbuf_take() but puts data at an offset
1275  *
1276  * @param buf pbuf to fill with data
1277  * @param dataptr application supplied data buffer
1278  * @param len length of the application supplied data buffer
1279  * @param offset offset in pbuf where to copy dataptr to
1280  *
1281  * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough
1282  */
1283 err_t
pbuf_take_at(struct pbuf * buf,const void * dataptr,u16_t len,u16_t offset)1284 pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset)
1285 {
1286   u16_t target_offset;
1287   struct pbuf* q = pbuf_skip(buf, offset, &target_offset);
1288 
1289   /* return requested data if pbuf is OK */
1290   if ((q != NULL) && (q->tot_len >= target_offset + len)) {
1291     u16_t remaining_len = len;
1292     const u8_t* src_ptr = (const u8_t*)dataptr;
1293     /* copy the part that goes into the first pbuf */
1294     u16_t first_copy_len = LWIP_MIN(q->len - target_offset, len);
1295     MEMCPY(((u8_t*)q->payload) + target_offset, dataptr, first_copy_len);
1296     remaining_len -= first_copy_len;
1297     src_ptr += first_copy_len;
1298     if (remaining_len > 0) {
1299       return pbuf_take(q->next, src_ptr, remaining_len);
1300     }
1301     return ERR_OK;
1302   }
1303   return ERR_MEM;
1304 }
1305 
1306 /**
1307  * @ingroup pbuf
1308  * Creates a single pbuf out of a queue of pbufs.
1309  *
1310  * @remark: Either the source pbuf 'p' is freed by this function or the original
1311  *          pbuf 'p' is returned, therefore the caller has to check the result!
1312  *
1313  * @param p the source pbuf
1314  * @param layer pbuf_layer of the new pbuf
1315  *
1316  * @return a new, single pbuf (p->next is NULL)
1317  *         or the old pbuf if allocation fails
1318  */
1319 struct pbuf*
pbuf_coalesce(struct pbuf * p,pbuf_layer layer)1320 pbuf_coalesce(struct pbuf *p, pbuf_layer layer)
1321 {
1322   struct pbuf *q;
1323   err_t err;
1324   if (p->next == NULL) {
1325     return p;
1326   }
1327   q = pbuf_alloc(layer, p->tot_len, PBUF_RAM);
1328   if (q == NULL) {
1329     /* @todo: what do we do now? */
1330     return p;
1331   }
1332   err = pbuf_copy(q, p);
1333   if(err != ERR_OK)
1334   {
1335       LWIP_ASSERT("pbuf_copy failed", err == ERR_OK);
1336   }
1337   pbuf_free(p);
1338   return q;
1339 }
1340 
1341 #if LWIP_CHECKSUM_ON_COPY
1342 /**
1343  * Copies data into a single pbuf (*not* into a pbuf queue!) and updates
1344  * the checksum while copying
1345  *
1346  * @param p the pbuf to copy data into
1347  * @param start_offset offset of p->payload where to copy the data to
1348  * @param dataptr data to copy into the pbuf
1349  * @param len length of data to copy into the pbuf
1350  * @param chksum pointer to the checksum which is updated
1351  * @return ERR_OK if successful, another error if the data does not fit
1352  *         within the (first) pbuf (no pbuf queues!)
1353  */
1354 err_t
pbuf_fill_chksum(struct pbuf * p,u16_t start_offset,const void * dataptr,u16_t len,u16_t * chksum)1355 pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr,
1356                  u16_t len, u16_t *chksum)
1357 {
1358   u32_t acc;
1359   u16_t copy_chksum;
1360   char *dst_ptr;
1361   LWIP_ASSERT("p != NULL", p != NULL);
1362   LWIP_ASSERT("dataptr != NULL", dataptr != NULL);
1363   LWIP_ASSERT("chksum != NULL", chksum != NULL);
1364   LWIP_ASSERT("len != 0", len != 0);
1365 
1366   if ((start_offset >= p->len) || (start_offset + len > p->len)) {
1367     return ERR_ARG;
1368   }
1369 
1370   dst_ptr = ((char*)p->payload) + start_offset;
1371   copy_chksum = LWIP_CHKSUM_COPY(dst_ptr, dataptr, len);
1372   if ((start_offset & 1) != 0) {
1373     copy_chksum = SWAP_BYTES_IN_WORD(copy_chksum);
1374   }
1375   acc = *chksum;
1376   acc += copy_chksum;
1377   *chksum = FOLD_U32T(acc);
1378   return ERR_OK;
1379 }
1380 #endif /* LWIP_CHECKSUM_ON_COPY */
1381 
1382 /**
1383  * @ingroup pbuf
1384  * Get one byte from the specified position in a pbuf
1385  * WARNING: returns zero for offset >= p->tot_len
1386  *
1387  * @param p pbuf to parse
1388  * @param offset offset into p of the byte to return
1389  * @return byte at an offset into p OR ZERO IF 'offset' >= p->tot_len
1390  */
1391 u8_t
pbuf_get_at(const struct pbuf * p,u16_t offset)1392 pbuf_get_at(const struct pbuf* p, u16_t offset)
1393 {
1394   int ret = pbuf_try_get_at(p, offset);
1395   if (ret >= 0) {
1396     return (u8_t)ret;
1397   }
1398   return 0;
1399 }
1400 
1401 /**
1402  * @ingroup pbuf
1403  * Get one byte from the specified position in a pbuf
1404  *
1405  * @param p pbuf to parse
1406  * @param offset offset into p of the byte to return
1407  * @return byte at an offset into p [0..0xFF] OR negative if 'offset' >= p->tot_len
1408  */
1409 int
pbuf_try_get_at(const struct pbuf * p,u16_t offset)1410 pbuf_try_get_at(const struct pbuf* p, u16_t offset)
1411 {
1412   u16_t q_idx;
1413   const struct pbuf* q = pbuf_skip_const(p, offset, &q_idx);
1414 
1415   /* return requested data if pbuf is OK */
1416   if ((q != NULL) && (q->len > q_idx)) {
1417     return ((u8_t*)q->payload)[q_idx];
1418   }
1419   return -1;
1420 }
1421 
1422 /**
1423  * @ingroup pbuf
1424  * Put one byte to the specified position in a pbuf
1425  * WARNING: silently ignores offset >= p->tot_len
1426  *
1427  * @param p pbuf to fill
1428  * @param offset offset into p of the byte to write
1429  * @param data byte to write at an offset into p
1430  */
1431 void
pbuf_put_at(struct pbuf * p,u16_t offset,u8_t data)1432 pbuf_put_at(struct pbuf* p, u16_t offset, u8_t data)
1433 {
1434   u16_t q_idx;
1435   struct pbuf* q = pbuf_skip(p, offset, &q_idx);
1436 
1437   /* write requested data if pbuf is OK */
1438   if ((q != NULL) && (q->len > q_idx)) {
1439     ((u8_t*)q->payload)[q_idx] = data;
1440   }
1441 }
1442 
1443 /**
1444  * @ingroup pbuf
1445  * Compare pbuf contents at specified offset with memory s2, both of length n
1446  *
1447  * @param p pbuf to compare
1448  * @param offset offset into p at which to start comparing
1449  * @param s2 buffer to compare
1450  * @param n length of buffer to compare
1451  * @return zero if equal, nonzero otherwise
1452  *         (0xffff if p is too short, diffoffset+1 otherwise)
1453  */
1454 u16_t
pbuf_memcmp(const struct pbuf * p,u16_t offset,const void * s2,u16_t n)1455 pbuf_memcmp(const struct pbuf* p, u16_t offset, const void* s2, u16_t n)
1456 {
1457   u16_t start = offset;
1458   const struct pbuf* q = p;
1459   u16_t i;
1460 
1461   /* pbuf long enough to perform check? */
1462   if(p->tot_len < (offset + n)) {
1463     return 0xffff;
1464   }
1465 
1466   /* get the correct pbuf from chain. We know it succeeds because of p->tot_len check above. */
1467   while ((q != NULL) && (q->len <= start)) {
1468     start -= q->len;
1469     q = q->next;
1470   }
1471 
1472   /* return requested data if pbuf is OK */
1473   for (i = 0; i < n; i++) {
1474     /* We know pbuf_get_at() succeeds because of p->tot_len check above. */
1475     u8_t a = pbuf_get_at(q, start + i);
1476     u8_t b = ((const u8_t*)s2)[i];
1477     if (a != b) {
1478       return i+1;
1479     }
1480   }
1481   return 0;
1482 }
1483 
1484 /**
1485  * @ingroup pbuf
1486  * Find occurrence of mem (with length mem_len) in pbuf p, starting at offset
1487  * start_offset.
1488  *
1489  * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as
1490  *        return value 'not found'
1491  * @param mem search for the contents of this buffer
1492  * @param mem_len length of 'mem'
1493  * @param start_offset offset into p at which to start searching
1494  * @return 0xFFFF if substr was not found in p or the index where it was found
1495  */
1496 u16_t
pbuf_memfind(const struct pbuf * p,const void * mem,u16_t mem_len,u16_t start_offset)1497 pbuf_memfind(const struct pbuf* p, const void* mem, u16_t mem_len, u16_t start_offset)
1498 {
1499   u16_t i;
1500   u16_t max = p->tot_len - mem_len;
1501   if (p->tot_len >= mem_len + start_offset) {
1502     for (i = start_offset; i <= max; i++) {
1503       u16_t plus = pbuf_memcmp(p, i, mem, mem_len);
1504       if (plus == 0) {
1505         return i;
1506       }
1507     }
1508   }
1509   return 0xFFFF;
1510 }
1511 
1512 /**
1513  * Find occurrence of substr with length substr_len in pbuf p, start at offset
1514  * start_offset
1515  * WARNING: in contrast to strstr(), this one does not stop at the first \0 in
1516  * the pbuf/source string!
1517  *
1518  * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as
1519  *        return value 'not found'
1520  * @param substr string to search for in p, maximum length is 0xFFFE
1521  * @return 0xFFFF if substr was not found in p or the index where it was found
1522  */
1523 u16_t
pbuf_strstr(const struct pbuf * p,const char * substr)1524 pbuf_strstr(const struct pbuf* p, const char* substr)
1525 {
1526   size_t substr_len;
1527   if ((substr == NULL) || (substr[0] == 0) || (p->tot_len == 0xFFFF)) {
1528     return 0xFFFF;
1529   }
1530   substr_len = strlen(substr);
1531   if (substr_len >= 0xFFFF) {
1532     return 0xFFFF;
1533   }
1534   return pbuf_memfind(p, substr, (u16_t)substr_len, 0);
1535 }
1536