1 /**
2  * @file
3  * Packet buffer management
4  *
5  * Packets are built from the pbuf data structure. It supports dynamic
6  * memory allocation for packet contents or can reference externally
7  * managed packet contents both in RAM and ROM. Quick allocation for
8  * incoming packets is provided through pools with fixed sized pbufs.
9  *
10  * A packet may span over multiple pbufs, chained as a singly linked
11  * list. This is called a "pbuf chain".
12  *
13  * Multiple packets may be queued, also using this singly linked list.
14  * This is called a "packet queue".
15  *
16  * So, a packet queue consists of one or more pbuf chains, each of
17  * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE
18  * NOT SUPPORTED!!! Use helper structs to queue multiple packets.
19  *
20  * The differences between a pbuf chain and a packet queue are very
21  * precise but subtle.
22  *
23  * The last pbuf of a packet has a ->tot_len field that equals the
24  * ->len field. It can be found by traversing the list. If the last
25  * pbuf of a packet has a ->next field other than NULL, more packets
26  * are on the queue.
27  *
28  * Therefore, looping through a pbuf of a single packet, has an
29  * loop end condition (tot_len == p->len), NOT (next == NULL).
30  */
31 
32 /*
33  * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without modification,
37  * are permitted provided that the following conditions are met:
38  *
39  * 1. Redistributions of source code must retain the above copyright notice,
40  *    this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright notice,
42  *    this list of conditions and the following disclaimer in the documentation
43  *    and/or other materials provided with the distribution.
44  * 3. The name of the author may not be used to endorse or promote products
45  *    derived from this software without specific prior written permission.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
48  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
49  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
50  * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
51  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
52  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
55  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
56  * OF SUCH DAMAGE.
57  *
58  * This file is part of the lwIP TCP/IP stack.
59  *
60  * Author: Adam Dunkels <adam@sics.se>
61  *
62  */
63 
64 #include "lwip/opt.h"
65 
66 #include "lwip/stats.h"
67 #include "lwip/def.h"
68 #include "lwip/mem.h"
69 #include "lwip/memp.h"
70 #include "lwip/pbuf.h"
71 #include "lwip/sys.h"
72 #include "arch/perf.h"
73 #if LWIP_TCP && TCP_QUEUE_OOSEQ
74 #include "lwip/tcp_impl.h"
75 #endif
76 #if LWIP_CHECKSUM_ON_COPY
77 #include "lwip/inet_chksum.h"
78 #endif
79 
80 #include <string.h>
81 
82 #define SIZEOF_STRUCT_PBUF        LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf))
83 /* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically
84    aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */
85 #define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE)
86 
87 #if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ
88 #define PBUF_POOL_IS_EMPTY()
89 #else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
90 
91 #if !NO_SYS
92 #ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
93 #include "lwip/tcpip.h"
94 #define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL()  do { \
95   if(tcpip_callback_with_block(pbuf_free_ooseq_callback, NULL, 0) != ERR_OK) { \
96       SYS_ARCH_PROTECT(old_level); \
97       pbuf_free_ooseq_pending = 0; \
98       SYS_ARCH_UNPROTECT(old_level); \
99   } } while(0)
100 #endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
101 #endif /* !NO_SYS */
102 
103 volatile u8_t pbuf_free_ooseq_pending;
104 #define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty()
105 
106 /**
107  * Attempt to reclaim some memory from queued out-of-sequence TCP segments
108  * if we run out of pool pbufs. It's better to give priority to new packets
109  * if we're running out.
110  *
111  * This must be done in the correct thread context therefore this function
112  * can only be used with NO_SYS=0 and through tcpip_callback.
113  */
114 #if !NO_SYS
115 static
116 #endif /* !NO_SYS */
117 void
pbuf_free_ooseq(void)118 pbuf_free_ooseq(void)
119 {
120   struct tcp_pcb* pcb;
121   SYS_ARCH_DECL_PROTECT(old_level);
122 
123   SYS_ARCH_PROTECT(old_level);
124   pbuf_free_ooseq_pending = 0;
125   SYS_ARCH_UNPROTECT(old_level);
126 
127   for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) {
128     if (NULL != pcb->ooseq) {
129       /** Free the ooseq pbufs of one PCB only */
130       LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n"));
131       tcp_segs_free(pcb->ooseq);
132       pcb->ooseq = NULL;
133       return;
134     }
135   }
136 }
137 
138 #if !NO_SYS
139 /**
140  * Just a callback function for tcpip_timeout() that calls pbuf_free_ooseq().
141  */
142 static void
pbuf_free_ooseq_callback(void * arg)143 pbuf_free_ooseq_callback(void *arg)
144 {
145   LWIP_UNUSED_ARG(arg);
146   pbuf_free_ooseq();
147 }
148 #endif /* !NO_SYS */
149 
150 /** Queue a call to pbuf_free_ooseq if not already queued. */
151 static void
pbuf_pool_is_empty(void)152 pbuf_pool_is_empty(void)
153 {
154 #ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
155   SYS_ARCH_DECL_PROTECT(old_level);
156   SYS_ARCH_PROTECT(old_level);
157   pbuf_free_ooseq_pending = 1;
158   SYS_ARCH_UNPROTECT(old_level);
159 #else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
160   u8_t queued;
161   SYS_ARCH_DECL_PROTECT(old_level);
162   SYS_ARCH_PROTECT(old_level);
163   queued = pbuf_free_ooseq_pending;
164   pbuf_free_ooseq_pending = 1;
165   SYS_ARCH_UNPROTECT(old_level);
166 
167   if(!queued) {
168     /* queue a call to pbuf_free_ooseq if not already queued */
169     PBUF_POOL_FREE_OOSEQ_QUEUE_CALL();
170   }
171 #endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
172 }
173 #endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
174 
175 /**
176  * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type).
177  *
178  * The actual memory allocated for the pbuf is determined by the
179  * layer at which the pbuf is allocated and the requested size
180  * (from the size parameter).
181  *
182  * @param layer flag to define header size
183  * @param length size of the pbuf's payload
184  * @param type this parameter decides how and where the pbuf
185  * should be allocated as follows:
186  *
187  * - PBUF_RAM: buffer memory for pbuf is allocated as one large
188  *             chunk. This includes protocol headers as well.
189  * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for
190  *             protocol headers. Additional headers must be prepended
191  *             by allocating another pbuf and chain in to the front of
192  *             the ROM pbuf. It is assumed that the memory used is really
193  *             similar to ROM in that it is immutable and will not be
194  *             changed. Memory which is dynamic should generally not
195  *             be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
196  * - PBUF_REF: no buffer memory is allocated for the pbuf, even for
197  *             protocol headers. It is assumed that the pbuf is only
198  *             being used in a single thread. If the pbuf gets queued,
199  *             then pbuf_take should be called to copy the buffer.
200  * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from
201  *              the pbuf pool that is allocated during pbuf_init().
202  *
203  * @return the allocated pbuf. If multiple pbufs where allocated, this
204  * is the first pbuf of a pbuf chain.
205  */
206 struct pbuf *
pbuf_alloc(pbuf_layer layer,u16_t length,pbuf_type type)207 pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type)
208 {
209   struct pbuf *p, *q, *r;
210   u16_t offset;
211   s32_t rem_len; /* remaining length */
212   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length));
213 
214   /* determine header offset */
215   switch (layer) {
216   case PBUF_TRANSPORT:
217     /* add room for transport (often TCP) layer header */
218     offset = PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN;
219     break;
220   case PBUF_IP:
221     /* add room for IP layer header */
222     offset = PBUF_LINK_HLEN + PBUF_IP_HLEN;
223     break;
224   case PBUF_LINK:
225     /* add room for link layer header */
226     offset = PBUF_LINK_HLEN;
227     break;
228   case PBUF_RAW:
229     offset = 0;
230     break;
231   default:
232     LWIP_ASSERT("pbuf_alloc: bad pbuf layer", 0);
233     return NULL;
234   }
235 
236   switch (type) {
237   case PBUF_POOL:
238     /* allocate head of pbuf chain into p */
239     p = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL);
240     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc: allocated pbuf %p\n", (void *)p));
241     if (p == NULL) {
242       PBUF_POOL_IS_EMPTY();
243       return NULL;
244     }
245     p->type = type;
246     p->next = NULL;
247 
248     /* make the payload pointer point 'offset' bytes into pbuf data memory */
249     p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + (SIZEOF_STRUCT_PBUF + offset)));
250     LWIP_ASSERT("pbuf_alloc: pbuf p->payload properly aligned",
251             ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
252     /* the total length of the pbuf chain is the requested size */
253     p->tot_len = length;
254     /* set the length of the first pbuf in the chain */
255     p->len = LWIP_MIN(length, PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset));
256     LWIP_ASSERT("check p->payload + p->len does not overflow pbuf",
257                 ((u8_t*)p->payload + p->len <=
258                  (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED));
259     LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT",
260       (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 );
261     /* set reference count (needed here in case we fail) */
262     p->ref = 1;
263 
264     /* now allocate the tail of the pbuf chain */
265 
266     /* remember first pbuf for linkage in next iteration */
267     r = p;
268     /* remaining length to be allocated */
269     rem_len = length - p->len;
270     /* any remaining pbufs to be allocated? */
271     while (rem_len > 0) {
272       q = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL);
273       if (q == NULL) {
274         PBUF_POOL_IS_EMPTY();
275         /* free chain so far allocated */
276         pbuf_free(p);
277         /* bail out unsuccesfully */
278         return NULL;
279       }
280       q->type = type;
281       q->flags = 0;
282       q->next = NULL;
283       /* make previous pbuf point to this pbuf */
284       r->next = q;
285       /* set total length of this pbuf and next in chain */
286       LWIP_ASSERT("rem_len < max_u16_t", rem_len < 0xffff);
287       q->tot_len = (u16_t)rem_len;
288       /* this pbuf length is pool size, unless smaller sized tail */
289       q->len = LWIP_MIN((u16_t)rem_len, PBUF_POOL_BUFSIZE_ALIGNED);
290       q->payload = (void *)((u8_t *)q + SIZEOF_STRUCT_PBUF);
291       LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned",
292               ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0);
293       LWIP_ASSERT("check p->payload + p->len does not overflow pbuf",
294                   ((u8_t*)p->payload + p->len <=
295                    (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED));
296       q->ref = 1;
297       /* calculate remaining length to be allocated */
298       rem_len -= q->len;
299       /* remember this pbuf for linkage in next iteration */
300       r = q;
301     }
302     /* end of chain */
303     /*r->next = NULL;*/
304 
305     break;
306   case PBUF_RAM:
307     /* If pbuf is to be allocated in RAM, allocate memory for it. */
308     p = (struct pbuf*)mem_malloc(LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF + offset) + LWIP_MEM_ALIGN_SIZE(length));
309     if (p == NULL) {
310       return NULL;
311     }
312     /* Set up internal structure of the pbuf. */
313     p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset));
314     p->len = p->tot_len = length;
315     p->next = NULL;
316     p->type = type;
317 
318     LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned",
319            ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
320     break;
321   /* pbuf references existing (non-volatile static constant) ROM payload? */
322   case PBUF_ROM:
323   /* pbuf references existing (externally allocated) RAM payload? */
324   case PBUF_REF:
325     /* only allocate memory for the pbuf structure */
326     p = (struct pbuf *)memp_malloc(MEMP_PBUF);
327     if (p == NULL) {
328       LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
329                   ("pbuf_alloc: Could not allocate MEMP_PBUF for PBUF_%s.\n",
330                   (type == PBUF_ROM) ? "ROM" : "REF"));
331       return NULL;
332     }
333     /* caller must set this field properly, afterwards */
334     p->payload = NULL;
335     p->len = p->tot_len = length;
336     p->next = NULL;
337     p->type = type;
338     break;
339   default:
340     LWIP_ASSERT("pbuf_alloc: erroneous type", 0);
341     return NULL;
342   }
343   /* set reference count */
344   p->ref = 1;
345   /* set flags */
346   p->flags = 0;
347   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p));
348   return p;
349 }
350 
351 #if LWIP_SUPPORT_CUSTOM_PBUF
352 /** Initialize a custom pbuf (already allocated).
353  *
354  * @param layer flag to define header size
355  * @param length size of the pbuf's payload
356  * @param type type of the pbuf (only used to treat the pbuf accordingly, as
357  *        this function allocates no memory)
358  * @param p pointer to the custom pbuf to initialize (already allocated)
359  * @param payload_mem pointer to the buffer that is used for payload and headers,
360  *        must be at least big enough to hold 'length' plus the header size,
361  *        may be NULL if set later.
362  *        ATTENTION: The caller is responsible for correct alignment of this buffer!!
363  * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least
364  *        big enough to hold 'length' plus the header size
365  */
366 struct pbuf*
pbuf_alloced_custom(pbuf_layer l,u16_t length,pbuf_type type,struct pbuf_custom * p,void * payload_mem,u16_t payload_mem_len)367 pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p,
368                     void *payload_mem, u16_t payload_mem_len)
369 {
370   u16_t offset;
371   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length));
372 
373   /* determine header offset */
374   switch (l) {
375   case PBUF_TRANSPORT:
376     /* add room for transport (often TCP) layer header */
377     offset = PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN;
378     break;
379   case PBUF_IP:
380     /* add room for IP layer header */
381     offset = PBUF_LINK_HLEN + PBUF_IP_HLEN;
382     break;
383   case PBUF_LINK:
384     /* add room for link layer header */
385     offset = PBUF_LINK_HLEN;
386     break;
387   case PBUF_RAW:
388     offset = 0;
389     break;
390   default:
391     LWIP_ASSERT("pbuf_alloced_custom: bad pbuf layer", 0);
392     return NULL;
393   }
394 
395   if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) {
396     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length));
397     return NULL;
398   }
399 
400   p->pbuf.next = NULL;
401   if (payload_mem != NULL) {
402     p->pbuf.payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset);
403   } else {
404     p->pbuf.payload = NULL;
405   }
406   p->pbuf.flags = PBUF_FLAG_IS_CUSTOM;
407   p->pbuf.len = p->pbuf.tot_len = length;
408   p->pbuf.type = type;
409   p->pbuf.ref = 1;
410   return &p->pbuf;
411 }
412 #endif /* LWIP_SUPPORT_CUSTOM_PBUF */
413 
414 /**
415  * Shrink a pbuf chain to a desired length.
416  *
417  * @param p pbuf to shrink.
418  * @param new_len desired new length of pbuf chain
419  *
420  * Depending on the desired length, the first few pbufs in a chain might
421  * be skipped and left unchanged. The new last pbuf in the chain will be
422  * resized, and any remaining pbufs will be freed.
423  *
424  * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted.
425  * @note May not be called on a packet queue.
426  *
427  * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain).
428  */
429 void
pbuf_realloc(struct pbuf * p,u16_t new_len)430 pbuf_realloc(struct pbuf *p, u16_t new_len)
431 {
432   struct pbuf *q;
433   u16_t rem_len; /* remaining length */
434   s32_t grow;
435 
436   LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL);
437   LWIP_ASSERT("pbuf_realloc: sane p->type", p->type == PBUF_POOL ||
438               p->type == PBUF_ROM ||
439               p->type == PBUF_RAM ||
440               p->type == PBUF_REF);
441 
442   /* desired length larger than current length? */
443   if (new_len >= p->tot_len) {
444     /* enlarging not yet supported */
445     return;
446   }
447 
448   /* the pbuf chain grows by (new_len - p->tot_len) bytes
449    * (which may be negative in case of shrinking) */
450   grow = new_len - p->tot_len;
451 
452   /* first, step over any pbufs that should remain in the chain */
453   rem_len = new_len;
454   q = p;
455   /* should this pbuf be kept? */
456   while (rem_len > q->len) {
457     /* decrease remaining length by pbuf length */
458     rem_len -= q->len;
459     /* decrease total length indicator */
460     LWIP_ASSERT("grow < max_u16_t", grow < 0xffff);
461     q->tot_len += (u16_t)grow;
462     /* proceed to next pbuf in chain */
463     q = q->next;
464     LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL);
465   }
466   /* we have now reached the new last pbuf (in q) */
467   /* rem_len == desired length for pbuf q */
468 
469   /* shrink allocated memory for PBUF_RAM */
470   /* (other types merely adjust their length fields */
471   if ((q->type == PBUF_RAM) && (rem_len != q->len)) {
472     /* reallocate and adjust the length of the pbuf that will be split */
473     q = (struct pbuf *)mem_trim(q, (u16_t)((u8_t *)q->payload - (u8_t *)q) + rem_len);
474     LWIP_ASSERT("mem_trim returned q == NULL", q != NULL);
475   }
476   /* adjust length fields for new last pbuf */
477   q->len = rem_len;
478   q->tot_len = q->len;
479 
480   /* any remaining pbufs in chain? */
481   if (q->next != NULL) {
482     /* free remaining pbufs in chain */
483     pbuf_free(q->next);
484   }
485   /* q is last packet in chain */
486   q->next = NULL;
487 
488 }
489 
490 /**
491  * Adjusts the payload pointer to hide or reveal headers in the payload.
492  *
493  * Adjusts the ->payload pointer so that space for a header
494  * (dis)appears in the pbuf payload.
495  *
496  * The ->payload, ->tot_len and ->len fields are adjusted.
497  *
498  * @param p pbuf to change the header size.
499  * @param header_size_increment Number of bytes to increment header size which
500  * increases the size of the pbuf. New space is on the front.
501  * (Using a negative value decreases the header size.)
502  * If hdr_size_inc is 0, this function does nothing and returns succesful.
503  *
504  * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so
505  * the call will fail. A check is made that the increase in header size does
506  * not move the payload pointer in front of the start of the buffer.
507  * @return non-zero on failure, zero on success.
508  *
509  */
510 u8_t
pbuf_header(struct pbuf * p,s16_t header_size_increment)511 pbuf_header(struct pbuf *p, s16_t header_size_increment)
512 {
513   u16_t type;
514   void *payload;
515   u16_t increment_magnitude;
516 
517   LWIP_ASSERT("p != NULL", p != NULL);
518   if ((header_size_increment == 0) || (p == NULL)) {
519     return 0;
520   }
521 
522   if (header_size_increment < 0){
523     increment_magnitude = -header_size_increment;
524     /* Check that we aren't going to move off the end of the pbuf */
525     LWIP_ERROR("increment_magnitude <= p->len", (increment_magnitude <= p->len), return 1;);
526   } else {
527     increment_magnitude = header_size_increment;
528 #if 0
529     /* Can't assert these as some callers speculatively call
530          pbuf_header() to see if it's OK.  Will return 1 below instead. */
531     /* Check that we've got the correct type of pbuf to work with */
532     LWIP_ASSERT("p->type == PBUF_RAM || p->type == PBUF_POOL",
533                 p->type == PBUF_RAM || p->type == PBUF_POOL);
534     /* Check that we aren't going to move off the beginning of the pbuf */
535     LWIP_ASSERT("p->payload - increment_magnitude >= p + SIZEOF_STRUCT_PBUF",
536                 (u8_t *)p->payload - increment_magnitude >= (u8_t *)p + SIZEOF_STRUCT_PBUF);
537 #endif
538   }
539 
540   type = p->type;
541   /* remember current payload pointer */
542   payload = p->payload;
543 
544   /* pbuf types containing payloads? */
545   if (type == PBUF_RAM || type == PBUF_POOL) {
546     /* set new payload pointer */
547     p->payload = (u8_t *)p->payload - header_size_increment;
548     /* boundary check fails? */
549     if ((u8_t *)p->payload < (u8_t *)p + SIZEOF_STRUCT_PBUF) {
550       LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
551         ("pbuf_header: failed as %p < %p (not enough space for new header size)\n",
552         (void *)p->payload, (void *)(p + 1)));
553       /* restore old payload pointer */
554       p->payload = payload;
555       /* bail out unsuccesfully */
556       return 1;
557     }
558   /* pbuf types refering to external payloads? */
559   } else if (type == PBUF_REF || type == PBUF_ROM) {
560     /* hide a header in the payload? */
561     if ((header_size_increment < 0) && (increment_magnitude <= p->len)) {
562       /* increase payload pointer */
563       p->payload = (u8_t *)p->payload - header_size_increment;
564     } else {
565       /* cannot expand payload to front (yet!)
566        * bail out unsuccesfully */
567       return 1;
568     }
569   } else {
570     /* Unknown type */
571     LWIP_ASSERT("bad pbuf type", 0);
572     return 1;
573   }
574   /* modify pbuf length fields */
575   p->len += header_size_increment;
576   p->tot_len += header_size_increment;
577 
578   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_header: old %p new %p (%"S16_F")\n",
579     (void *)payload, (void *)p->payload, header_size_increment));
580 
581   return 0;
582 }
583 
584 /**
585  * Dereference a pbuf chain or queue and deallocate any no-longer-used
586  * pbufs at the head of this chain or queue.
587  *
588  * Decrements the pbuf reference count. If it reaches zero, the pbuf is
589  * deallocated.
590  *
591  * For a pbuf chain, this is repeated for each pbuf in the chain,
592  * up to the first pbuf which has a non-zero reference count after
593  * decrementing. So, when all reference counts are one, the whole
594  * chain is free'd.
595  *
596  * @param p The pbuf (chain) to be dereferenced.
597  *
598  * @return the number of pbufs that were de-allocated
599  * from the head of the chain.
600  *
601  * @note MUST NOT be called on a packet queue (Not verified to work yet).
602  * @note the reference counter of a pbuf equals the number of pointers
603  * that refer to the pbuf (or into the pbuf).
604  *
605  * @internal examples:
606  *
607  * Assuming existing chains a->b->c with the following reference
608  * counts, calling pbuf_free(a) results in:
609  *
610  * 1->2->3 becomes ...1->3
611  * 3->3->3 becomes 2->3->3
612  * 1->1->2 becomes ......1
613  * 2->1->1 becomes 1->1->1
614  * 1->1->1 becomes .......
615  *
616  */
617 u8_t
pbuf_free(struct pbuf * p)618 pbuf_free(struct pbuf *p)
619 {
620   u16_t type;
621   struct pbuf *q;
622   u8_t count;
623 
624   if (p == NULL) {
625     LWIP_ASSERT("p != NULL", p != NULL);
626     /* if assertions are disabled, proceed with debug output */
627     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
628       ("pbuf_free(p == NULL) was called.\n"));
629     return 0;
630   }
631   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p));
632 
633   PERF_START;
634 
635   LWIP_ASSERT("pbuf_free: sane type",
636     p->type == PBUF_RAM || p->type == PBUF_ROM ||
637     p->type == PBUF_REF || p->type == PBUF_POOL);
638 
639   count = 0;
640   /* de-allocate all consecutive pbufs from the head of the chain that
641    * obtain a zero reference count after decrementing*/
642   while (p != NULL) {
643     u16_t ref;
644     SYS_ARCH_DECL_PROTECT(old_level);
645     /* Since decrementing ref cannot be guaranteed to be a single machine operation
646      * we must protect it. We put the new ref into a local variable to prevent
647      * further protection. */
648     SYS_ARCH_PROTECT(old_level);
649     /* all pbufs in a chain are referenced at least once */
650     LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0);
651     /* decrease reference count (number of pointers to pbuf) */
652     ref = --(p->ref);
653     SYS_ARCH_UNPROTECT(old_level);
654     /* this pbuf is no longer referenced to? */
655     if (ref == 0) {
656       /* remember next pbuf in chain for next iteration */
657       q = p->next;
658       LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p));
659       type = p->type;
660 #if LWIP_SUPPORT_CUSTOM_PBUF
661       /* is this a custom pbuf? */
662       if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) {
663         struct pbuf_custom *pc = (struct pbuf_custom*)p;
664         LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL);
665         pc->custom_free_function(p);
666       } else
667 #endif /* LWIP_SUPPORT_CUSTOM_PBUF */
668       {
669         /* is this a pbuf from the pool? */
670         if (type == PBUF_POOL) {
671           memp_free(MEMP_PBUF_POOL, p);
672         /* is this a ROM or RAM referencing pbuf? */
673         } else if (type == PBUF_ROM || type == PBUF_REF) {
674           memp_free(MEMP_PBUF, p);
675         /* type == PBUF_RAM */
676         } else {
677           mem_free(p);
678         }
679       }
680       count++;
681       /* proceed to next pbuf */
682       p = q;
683     /* p->ref > 0, this pbuf is still referenced to */
684     /* (and so the remaining pbufs in chain as well) */
685     } else {
686       LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, ref));
687       /* stop walking through the chain */
688       p = NULL;
689     }
690   }
691   PERF_STOP("pbuf_free");
692   /* return number of de-allocated pbufs */
693   return count;
694 }
695 
696 /**
697  * Count number of pbufs in a chain
698  *
699  * @param p first pbuf of chain
700  * @return the number of pbufs in a chain
701  */
702 
703 u8_t
pbuf_clen(struct pbuf * p)704 pbuf_clen(struct pbuf *p)
705 {
706   u8_t len;
707 
708   len = 0;
709   while (p != NULL) {
710     ++len;
711     p = p->next;
712   }
713   return len;
714 }
715 
716 /**
717  * Increment the reference count of the pbuf.
718  *
719  * @param p pbuf to increase reference counter of
720  *
721  */
722 void
pbuf_ref(struct pbuf * p)723 pbuf_ref(struct pbuf *p)
724 {
725   SYS_ARCH_DECL_PROTECT(old_level);
726   /* pbuf given? */
727   if (p != NULL) {
728     SYS_ARCH_PROTECT(old_level);
729     ++(p->ref);
730     SYS_ARCH_UNPROTECT(old_level);
731   }
732 }
733 
734 /**
735  * Concatenate two pbufs (each may be a pbuf chain) and take over
736  * the caller's reference of the tail pbuf.
737  *
738  * @note The caller MAY NOT reference the tail pbuf afterwards.
739  * Use pbuf_chain() for that purpose.
740  *
741  * @see pbuf_chain()
742  */
743 
744 void
pbuf_cat(struct pbuf * h,struct pbuf * t)745 pbuf_cat(struct pbuf *h, struct pbuf *t)
746 {
747   struct pbuf *p;
748 
749   LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)",
750              ((h != NULL) && (t != NULL)), return;);
751 
752   /* proceed to last pbuf of chain */
753   for (p = h; p->next != NULL; p = p->next) {
754     /* add total length of second chain to all totals of first chain */
755     p->tot_len += t->tot_len;
756   }
757   /* { p is last pbuf of first h chain, p->next == NULL } */
758   LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len);
759   LWIP_ASSERT("p->next == NULL", p->next == NULL);
760   /* add total length of second chain to last pbuf total of first chain */
761   p->tot_len += t->tot_len;
762   /* chain last pbuf of head (p) with first of tail (t) */
763   p->next = t;
764   /* p->next now references t, but the caller will drop its reference to t,
765    * so netto there is no change to the reference count of t.
766    */
767 }
768 
769 /**
770  * Chain two pbufs (or pbuf chains) together.
771  *
772  * The caller MUST call pbuf_free(t) once it has stopped
773  * using it. Use pbuf_cat() instead if you no longer use t.
774  *
775  * @param h head pbuf (chain)
776  * @param t tail pbuf (chain)
777  * @note The pbufs MUST belong to the same packet.
778  * @note MAY NOT be called on a packet queue.
779  *
780  * The ->tot_len fields of all pbufs of the head chain are adjusted.
781  * The ->next field of the last pbuf of the head chain is adjusted.
782  * The ->ref field of the first pbuf of the tail chain is adjusted.
783  *
784  */
785 void
pbuf_chain(struct pbuf * h,struct pbuf * t)786 pbuf_chain(struct pbuf *h, struct pbuf *t)
787 {
788   pbuf_cat(h, t);
789   /* t is now referenced by h */
790   pbuf_ref(t);
791   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t));
792 }
793 
794 /**
795  * Dechains the first pbuf from its succeeding pbufs in the chain.
796  *
797  * Makes p->tot_len field equal to p->len.
798  * @param p pbuf to dechain
799  * @return remainder of the pbuf chain, or NULL if it was de-allocated.
800  * @note May not be called on a packet queue.
801  */
802 struct pbuf *
pbuf_dechain(struct pbuf * p)803 pbuf_dechain(struct pbuf *p)
804 {
805   struct pbuf *q;
806   u8_t tail_gone = 1;
807   /* tail */
808   q = p->next;
809   /* pbuf has successor in chain? */
810   if (q != NULL) {
811     /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
812     LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len);
813     /* enforce invariant if assertion is disabled */
814     q->tot_len = p->tot_len - p->len;
815     /* decouple pbuf from remainder */
816     p->next = NULL;
817     /* total length of pbuf p is its own length only */
818     p->tot_len = p->len;
819     /* q is no longer referenced by p, free it */
820     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: unreferencing %p\n", (void *)q));
821     tail_gone = pbuf_free(q);
822     if (tail_gone > 0) {
823       LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE,
824                   ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q));
825     }
826     /* return remaining tail or NULL if deallocated */
827   }
828   /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
829   LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len);
830   return ((tail_gone > 0) ? NULL : q);
831 }
832 
833 /**
834  *
835  * Create PBUF_RAM copies of pbufs.
836  *
837  * Used to queue packets on behalf of the lwIP stack, such as
838  * ARP based queueing.
839  *
840  * @note You MUST explicitly use p = pbuf_take(p);
841  *
842  * @note Only one packet is copied, no packet queue!
843  *
844  * @param p_to pbuf destination of the copy
845  * @param p_from pbuf source of the copy
846  *
847  * @return ERR_OK if pbuf was copied
848  *         ERR_ARG if one of the pbufs is NULL or p_to is not big
849  *                 enough to hold p_from
850  */
851 err_t
pbuf_copy(struct pbuf * p_to,struct pbuf * p_from)852 pbuf_copy(struct pbuf *p_to, struct pbuf *p_from)
853 {
854   u16_t offset_to=0, offset_from=0, len;
855 
856   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy(%p, %p)\n",
857     (void*)p_to, (void*)p_from));
858 
859   /* is the target big enough to hold the source? */
860   LWIP_ERROR("pbuf_copy: target not big enough to hold source", ((p_to != NULL) &&
861              (p_from != NULL) && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG;);
862 
863   /* iterate through pbuf chain */
864   do
865   {
866     /* copy one part of the original chain */
867     if ((p_to->len - offset_to) >= (p_from->len - offset_from)) {
868       /* complete current p_from fits into current p_to */
869       len = p_from->len - offset_from;
870     } else {
871       /* current p_from does not fit into current p_to */
872       len = p_to->len - offset_to;
873     }
874     MEMCPY((u8_t*)p_to->payload + offset_to, (u8_t*)p_from->payload + offset_from, len);
875     offset_to += len;
876     offset_from += len;
877     LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len);
878     LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len);
879     if (offset_from >= p_from->len) {
880       /* on to next p_from (if any) */
881       offset_from = 0;
882       p_from = p_from->next;
883     }
884     if (offset_to == p_to->len) {
885       /* on to next p_to (if any) */
886       offset_to = 0;
887       p_to = p_to->next;
888       LWIP_ERROR("p_to != NULL", (p_to != NULL) || (p_from == NULL) , return ERR_ARG;);
889     }
890 
891     if((p_from != NULL) && (p_from->len == p_from->tot_len)) {
892       /* don't copy more than one packet! */
893       LWIP_ERROR("pbuf_copy() does not allow packet queues!\n",
894                  (p_from->next == NULL), return ERR_VAL;);
895     }
896     if((p_to != NULL) && (p_to->len == p_to->tot_len)) {
897       /* don't copy more than one packet! */
898       LWIP_ERROR("pbuf_copy() does not allow packet queues!\n",
899                   (p_to->next == NULL), return ERR_VAL;);
900     }
901   } while (p_from);
902   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy: end of chain reached.\n"));
903   return ERR_OK;
904 }
905 
906 /**
907  * Copy (part of) the contents of a packet buffer
908  * to an application supplied buffer.
909  *
910  * @param buf the pbuf from which to copy data
911  * @param dataptr the application supplied buffer
912  * @param len length of data to copy (dataptr must be big enough). No more
913  * than buf->tot_len will be copied, irrespective of len
914  * @param offset offset into the packet buffer from where to begin copying len bytes
915  * @return the number of bytes copied, or 0 on failure
916  */
917 u16_t
pbuf_copy_partial(struct pbuf * buf,void * dataptr,u16_t len,u16_t offset)918 pbuf_copy_partial(struct pbuf *buf, void *dataptr, u16_t len, u16_t offset)
919 {
920   struct pbuf *p;
921   u16_t left;
922   u16_t buf_copy_len;
923   u16_t copied_total = 0;
924 
925   LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0;);
926   LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;);
927 
928   left = 0;
929 
930   if((buf == NULL) || (dataptr == NULL)) {
931     return 0;
932   }
933 
934   /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */
935   for(p = buf; len != 0 && p != NULL; p = p->next) {
936     if ((offset != 0) && (offset >= p->len)) {
937       /* don't copy from this buffer -> on to the next */
938       offset -= p->len;
939     } else {
940       /* copy from this buffer. maybe only partially. */
941       buf_copy_len = p->len - offset;
942       if (buf_copy_len > len)
943           buf_copy_len = len;
944       /* copy the necessary parts of the buffer */
945       MEMCPY(&((char*)dataptr)[left], &((char*)p->payload)[offset], buf_copy_len);
946       copied_total += buf_copy_len;
947       left += buf_copy_len;
948       len -= buf_copy_len;
949       offset = 0;
950     }
951   }
952   return copied_total;
953 }
954 
955 /**
956  * Copy application supplied data into a pbuf.
957  * This function can only be used to copy the equivalent of buf->tot_len data.
958  *
959  * @param buf pbuf to fill with data
960  * @param dataptr application supplied data buffer
961  * @param len length of the application supplied data buffer
962  *
963  * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough
964  */
965 err_t
pbuf_take(struct pbuf * buf,const void * dataptr,u16_t len)966 pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len)
967 {
968   struct pbuf *p;
969   u16_t buf_copy_len;
970   u16_t total_copy_len = len;
971   u16_t copied_total = 0;
972 
973   LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return 0;);
974   LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return 0;);
975 
976   if ((buf == NULL) || (dataptr == NULL) || (buf->tot_len < len)) {
977     return ERR_ARG;
978   }
979 
980   /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */
981   for(p = buf; total_copy_len != 0; p = p->next) {
982     LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL);
983     buf_copy_len = total_copy_len;
984     if (buf_copy_len > p->len) {
985       /* this pbuf cannot hold all remaining data */
986       buf_copy_len = p->len;
987     }
988     /* copy the necessary parts of the buffer */
989     MEMCPY(p->payload, &((char*)dataptr)[copied_total], buf_copy_len);
990     total_copy_len -= buf_copy_len;
991     copied_total += buf_copy_len;
992   }
993   LWIP_ASSERT("did not copy all data", total_copy_len == 0 && copied_total == len);
994   return ERR_OK;
995 }
996 
997 /**
998  * Creates a single pbuf out of a queue of pbufs.
999  *
1000  * @remark: Either the source pbuf 'p' is freed by this function or the original
1001  *          pbuf 'p' is returned, therefore the caller has to check the result!
1002  *
1003  * @param p the source pbuf
1004  * @param layer pbuf_layer of the new pbuf
1005  *
1006  * @return a new, single pbuf (p->next is NULL)
1007  *         or the old pbuf if allocation fails
1008  */
1009 struct pbuf*
pbuf_coalesce(struct pbuf * p,pbuf_layer layer)1010 pbuf_coalesce(struct pbuf *p, pbuf_layer layer)
1011 {
1012   struct pbuf *q;
1013   err_t err;
1014   if (p->next == NULL) {
1015     return p;
1016   }
1017   q = pbuf_alloc(layer, p->tot_len, PBUF_RAM);
1018   if (q == NULL) {
1019     /* @todo: what do we do now? */
1020     return p;
1021   }
1022   err = pbuf_copy(q, p);
1023   LWIP_ASSERT("pbuf_copy failed", err == ERR_OK);
1024   pbuf_free(p);
1025   return q;
1026 }
1027 
1028 #if LWIP_CHECKSUM_ON_COPY
1029 /**
1030  * Copies data into a single pbuf (*not* into a pbuf queue!) and updates
1031  * the checksum while copying
1032  *
1033  * @param p the pbuf to copy data into
1034  * @param start_offset offset of p->payload where to copy the data to
1035  * @param dataptr data to copy into the pbuf
1036  * @param len length of data to copy into the pbuf
1037  * @param chksum pointer to the checksum which is updated
1038  * @return ERR_OK if successful, another error if the data does not fit
1039  *         within the (first) pbuf (no pbuf queues!)
1040  */
1041 err_t
pbuf_fill_chksum(struct pbuf * p,u16_t start_offset,const void * dataptr,u16_t len,u16_t * chksum)1042 pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr,
1043                  u16_t len, u16_t *chksum)
1044 {
1045   u32_t acc;
1046   u16_t copy_chksum;
1047   char *dst_ptr;
1048   LWIP_ASSERT("p != NULL", p != NULL);
1049   LWIP_ASSERT("dataptr != NULL", dataptr != NULL);
1050   LWIP_ASSERT("chksum != NULL", chksum != NULL);
1051   LWIP_ASSERT("len != 0", len != 0);
1052 
1053   if ((start_offset >= p->len) || (start_offset + len > p->len)) {
1054     return ERR_ARG;
1055   }
1056 
1057   dst_ptr = ((char*)p->payload) + start_offset;
1058   copy_chksum = LWIP_CHKSUM_COPY(dst_ptr, dataptr, len);
1059   if ((start_offset & 1) != 0) {
1060     copy_chksum = SWAP_BYTES_IN_WORD(copy_chksum);
1061   }
1062   acc = *chksum;
1063   acc += copy_chksum;
1064   *chksum = FOLD_U32T(acc);
1065   return ERR_OK;
1066 }
1067 #endif /* LWIP_CHECKSUM_ON_COPY */
1068 
1069  /** Get one byte from the specified position in a pbuf
1070  * WARNING: returns zero for offset >= p->tot_len
1071  *
1072  * @param p pbuf to parse
1073  * @param offset offset into p of the byte to return
1074  * @return byte at an offset into p OR ZERO IF 'offset' >= p->tot_len
1075  */
1076 u8_t
pbuf_get_at(struct pbuf * p,u16_t offset)1077 pbuf_get_at(struct pbuf* p, u16_t offset)
1078 {
1079   u16_t copy_from = offset;
1080   struct pbuf* q = p;
1081 
1082   /* get the correct pbuf */
1083   while ((q != NULL) && (q->len <= copy_from)) {
1084     copy_from -= q->len;
1085     q = q->next;
1086   }
1087   /* return requested data if pbuf is OK */
1088   if ((q != NULL) && (q->len > copy_from)) {
1089     return ((u8_t*)q->payload)[copy_from];
1090   }
1091   return 0;
1092 }
1093 
1094 /** Compare pbuf contents at specified offset with memory s2, both of length n
1095  *
1096  * @param p pbuf to compare
1097  * @param offset offset into p at wich to start comparing
1098  * @param s2 buffer to compare
1099  * @param n length of buffer to compare
1100  * @return zero if equal, nonzero otherwise
1101  *         (0xffff if p is too short, diffoffset+1 otherwise)
1102  */
1103 u16_t
pbuf_memcmp(struct pbuf * p,u16_t offset,const void * s2,u16_t n)1104 pbuf_memcmp(struct pbuf* p, u16_t offset, const void* s2, u16_t n)
1105 {
1106   u16_t start = offset;
1107   struct pbuf* q = p;
1108 
1109   /* get the correct pbuf */
1110   while ((q != NULL) && (q->len <= start)) {
1111     start -= q->len;
1112     q = q->next;
1113   }
1114   /* return requested data if pbuf is OK */
1115   if ((q != NULL) && (q->len > start)) {
1116     u16_t i;
1117     for(i = 0; i < n; i++) {
1118       u8_t a = pbuf_get_at(q, start + i);
1119       u8_t b = ((u8_t*)s2)[i];
1120       if (a != b) {
1121         return i+1;
1122       }
1123     }
1124     return 0;
1125   }
1126   return 0xffff;
1127 }
1128 
1129 /** Find occurrence of mem (with length mem_len) in pbuf p, starting at offset
1130  * start_offset.
1131  *
1132  * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as
1133  *        return value 'not found'
1134  * @param mem search for the contents of this buffer
1135  * @param mem_len length of 'mem'
1136  * @param start_offset offset into p at which to start searching
1137  * @return 0xFFFF if substr was not found in p or the index where it was found
1138  */
1139 u16_t
pbuf_memfind(struct pbuf * p,const void * mem,u16_t mem_len,u16_t start_offset)1140 pbuf_memfind(struct pbuf* p, const void* mem, u16_t mem_len, u16_t start_offset)
1141 {
1142   u16_t i;
1143   u16_t max = p->tot_len - mem_len;
1144   if (p->tot_len >= mem_len + start_offset) {
1145     for(i = start_offset; i <= max; ) {
1146       u16_t plus = pbuf_memcmp(p, i, mem, mem_len);
1147       if (plus == 0) {
1148         return i;
1149       } else {
1150         i += plus;
1151       }
1152     }
1153   }
1154   return 0xFFFF;
1155 }
1156 
1157 /** Find occurrence of substr with length substr_len in pbuf p, start at offset
1158  * start_offset
1159  * WARNING: in contrast to strstr(), this one does not stop at the first \0 in
1160  * the pbuf/source string!
1161  *
1162  * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as
1163  *        return value 'not found'
1164  * @param substr string to search for in p, maximum length is 0xFFFE
1165  * @return 0xFFFF if substr was not found in p or the index where it was found
1166  */
1167 u16_t
pbuf_strstr(struct pbuf * p,const char * substr)1168 pbuf_strstr(struct pbuf* p, const char* substr)
1169 {
1170   size_t substr_len;
1171   if ((substr == NULL) || (substr[0] == 0) || (p->tot_len == 0xFFFF)) {
1172     return 0xFFFF;
1173   }
1174   substr_len = strlen(substr);
1175   if (substr_len >= 0xFFFF) {
1176     return 0xFFFF;
1177   }
1178   return pbuf_memfind(p, substr, (u16_t)substr_len, 0);
1179 }
1180