1 /**
2 * @file
3 * Packet buffer management
4 */
5
6 /**
7 * @defgroup pbuf Packet buffers (PBUF)
8 * @ingroup infrastructure
9 *
10 * Packets are built from the pbuf data structure. It supports dynamic
11 * memory allocation for packet contents or can reference externally
12 * managed packet contents both in RAM and ROM. Quick allocation for
13 * incoming packets is provided through pools with fixed sized pbufs.
14 *
15 * A packet may span over multiple pbufs, chained as a singly linked
16 * list. This is called a "pbuf chain".
17 *
18 * Multiple packets may be queued, also using this singly linked list.
19 * This is called a "packet queue".
20 *
21 * So, a packet queue consists of one or more pbuf chains, each of
22 * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE
23 * NOT SUPPORTED!!! Use helper structs to queue multiple packets.
24 *
25 * The differences between a pbuf chain and a packet queue are very
26 * precise but subtle.
27 *
28 * The last pbuf of a packet has a ->tot_len field that equals the
29 * ->len field. It can be found by traversing the list. If the last
30 * pbuf of a packet has a ->next field other than NULL, more packets
31 * are on the queue.
32 *
33 * Therefore, looping through a pbuf of a single packet, has an
34 * loop end condition (tot_len == p->len), NOT (next == NULL).
35 *
36 * Example of custom pbuf usage: @ref zerocopyrx
37 */
38
39 /*
40 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without modification,
44 * are permitted provided that the following conditions are met:
45 *
46 * 1. Redistributions of source code must retain the above copyright notice,
47 * this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright notice,
49 * this list of conditions and the following disclaimer in the documentation
50 * and/or other materials provided with the distribution.
51 * 3. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
55 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
56 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
57 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
58 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
59 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
62 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
63 * OF SUCH DAMAGE.
64 *
65 * This file is part of the lwIP TCP/IP stack.
66 *
67 * Author: Adam Dunkels <adam@sics.se>
68 *
69 */
70
71 #include "lwip/opt.h"
72
73 #include "lwip/pbuf.h"
74 #include "lwip/stats.h"
75 #include "lwip/def.h"
76 #include "lwip/mem.h"
77 #include "lwip/memp.h"
78 #include "lwip/sys.h"
79 #include "lwip/netif.h"
80 #if LWIP_TCP && TCP_QUEUE_OOSEQ
81 #include "lwip/priv/tcp_priv.h"
82 #endif
83 #if LWIP_CHECKSUM_ON_COPY
84 #include "lwip/inet_chksum.h"
85 #endif
86
87 #include <string.h>
88
89 #define SIZEOF_STRUCT_PBUF LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf))
90 /* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically
91 aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */
92 #define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE)
93
94 static const struct pbuf *
95 pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset);
96
97 #if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ
98 #define PBUF_POOL_IS_EMPTY()
99 #else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
100
101 #if !NO_SYS
102 #ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
103 #include "lwip/tcpip.h"
104 #define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL() do { \
105 if (tcpip_try_callback(pbuf_free_ooseq_callback, NULL) != ERR_OK) { \
106 SYS_ARCH_PROTECT(old_level); \
107 pbuf_free_ooseq_pending = 0; \
108 SYS_ARCH_UNPROTECT(old_level); \
109 } } while(0)
110 #endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
111 #endif /* !NO_SYS */
112
113 volatile u8_t pbuf_free_ooseq_pending;
114 #define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty()
115
116 /**
117 * Attempt to reclaim some memory from queued out-of-sequence TCP segments
118 * if we run out of pool pbufs. It's better to give priority to new packets
119 * if we're running out.
120 *
121 * This must be done in the correct thread context therefore this function
122 * can only be used with NO_SYS=0 and through tcpip_callback.
123 */
124 #if !NO_SYS
125 static
126 #endif /* !NO_SYS */
127 void
pbuf_free_ooseq(void)128 pbuf_free_ooseq(void)
129 {
130 struct tcp_pcb *pcb;
131 SYS_ARCH_SET(pbuf_free_ooseq_pending, 0);
132
133 for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) {
134 if (pcb->ooseq != NULL) {
135 /** Free the ooseq pbufs of one PCB only */
136 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n"));
137 tcp_free_ooseq(pcb);
138 return;
139 }
140 }
141 }
142
143 #if !NO_SYS
144 /**
145 * Just a callback function for tcpip_callback() that calls pbuf_free_ooseq().
146 */
147 static void
pbuf_free_ooseq_callback(void * arg)148 pbuf_free_ooseq_callback(void *arg)
149 {
150 LWIP_UNUSED_ARG(arg);
151 pbuf_free_ooseq();
152 }
153 #endif /* !NO_SYS */
154
155 /** Queue a call to pbuf_free_ooseq if not already queued. */
156 static void
pbuf_pool_is_empty(void)157 pbuf_pool_is_empty(void)
158 {
159 #ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
160 SYS_ARCH_SET(pbuf_free_ooseq_pending, 1);
161 #else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
162 u8_t queued;
163 SYS_ARCH_DECL_PROTECT(old_level);
164 SYS_ARCH_PROTECT(old_level);
165 queued = pbuf_free_ooseq_pending;
166 pbuf_free_ooseq_pending = 1;
167 SYS_ARCH_UNPROTECT(old_level);
168
169 if (!queued) {
170 /* queue a call to pbuf_free_ooseq if not already queued */
171 PBUF_POOL_FREE_OOSEQ_QUEUE_CALL();
172 }
173 #endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
174 }
175 #endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
176
177 /* Initialize members of struct pbuf after allocation */
178 static void
pbuf_init_alloced_pbuf(struct pbuf * p,void * payload,u16_t tot_len,u16_t len,pbuf_type type,u8_t flags)179 pbuf_init_alloced_pbuf(struct pbuf *p, void *payload, u16_t tot_len, u16_t len, pbuf_type type, u8_t flags)
180 {
181 p->next = NULL;
182 p->payload = payload;
183 p->tot_len = tot_len;
184 p->len = len;
185 p->type_internal = (u8_t)type;
186 p->flags = flags;
187 p->ref = 1;
188 p->if_idx = NETIF_NO_INDEX;
189 #if ESP_PBUF
190 p->l2_owner = NULL;
191 p->l2_buf = NULL;
192 #endif
193 }
194
195 /**
196 * @ingroup pbuf
197 * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type).
198 *
199 * The actual memory allocated for the pbuf is determined by the
200 * layer at which the pbuf is allocated and the requested size
201 * (from the size parameter).
202 *
203 * @param layer header size
204 * @param length size of the pbuf's payload
205 * @param type this parameter decides how and where the pbuf
206 * should be allocated as follows:
207 *
208 * - PBUF_RAM: buffer memory for pbuf is allocated as one large
209 * chunk. This includes protocol headers as well.
210 * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for
211 * protocol headers. Additional headers must be prepended
212 * by allocating another pbuf and chain in to the front of
213 * the ROM pbuf. It is assumed that the memory used is really
214 * similar to ROM in that it is immutable and will not be
215 * changed. Memory which is dynamic should generally not
216 * be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
217 * - PBUF_REF: no buffer memory is allocated for the pbuf, even for
218 * protocol headers. It is assumed that the pbuf is only
219 * being used in a single thread. If the pbuf gets queued,
220 * then pbuf_take should be called to copy the buffer.
221 * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from
222 * the pbuf pool that is allocated during pbuf_init().
223 *
224 * @return the allocated pbuf. If multiple pbufs where allocated, this
225 * is the first pbuf of a pbuf chain.
226 */
227 struct pbuf *
pbuf_alloc(pbuf_layer layer,u16_t length,pbuf_type type)228 pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type)
229 {
230 struct pbuf *p;
231 u16_t offset = (u16_t)layer;
232 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length));
233
234 switch (type) {
235 case PBUF_REF: /* fall through */
236 case PBUF_ROM:
237 p = pbuf_alloc_reference(NULL, length, type);
238 break;
239 case PBUF_POOL: {
240 struct pbuf *q, *last;
241 u16_t rem_len; /* remaining length */
242 p = NULL;
243 last = NULL;
244 rem_len = length;
245 do {
246 u16_t qlen;
247 q = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL);
248 if (q == NULL) {
249 PBUF_POOL_IS_EMPTY();
250 /* free chain so far allocated */
251 if (p) {
252 pbuf_free(p);
253 }
254 /* bail out unsuccessfully */
255 return NULL;
256 }
257 qlen = LWIP_MIN(rem_len, (u16_t)(PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)));
258 pbuf_init_alloced_pbuf(q, LWIP_MEM_ALIGN((void *)((u8_t *)q + SIZEOF_STRUCT_PBUF + offset)),
259 rem_len, qlen, type, 0);
260 LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned",
261 ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0);
262 LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT",
263 (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 );
264 if (p == NULL) {
265 /* allocated head of pbuf chain (into p) */
266 p = q;
267 } else {
268 /* make previous pbuf point to this pbuf */
269 last->next = q;
270 }
271 last = q;
272 rem_len = (u16_t)(rem_len - qlen);
273 offset = 0;
274 } while (rem_len > 0);
275 break;
276 }
277 case PBUF_RAM: {
278 u16_t payload_len = (u16_t)(LWIP_MEM_ALIGN_SIZE(offset) + LWIP_MEM_ALIGN_SIZE(length));
279 mem_size_t alloc_len = (mem_size_t)(LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF) + payload_len);
280
281 /* bug #50040: Check for integer overflow when calculating alloc_len */
282 if ((payload_len < LWIP_MEM_ALIGN_SIZE(length)) ||
283 (alloc_len < LWIP_MEM_ALIGN_SIZE(length))) {
284 return NULL;
285 }
286
287 /* If pbuf is to be allocated in RAM, allocate memory for it. */
288 p = (struct pbuf *)mem_malloc(alloc_len);
289 if (p == NULL) {
290 return NULL;
291 }
292 pbuf_init_alloced_pbuf(p, LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset)),
293 length, length, type, 0);
294 LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned",
295 ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
296 break;
297 }
298 default:
299 LWIP_ASSERT("pbuf_alloc: erroneous type", 0);
300 return NULL;
301 }
302 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p));
303 return p;
304 }
305
306 /**
307 * @ingroup pbuf
308 * Allocates a pbuf for referenced data.
309 * Referenced data can be volatile (PBUF_REF) or long-lived (PBUF_ROM).
310 *
311 * The actual memory allocated for the pbuf is determined by the
312 * layer at which the pbuf is allocated and the requested size
313 * (from the size parameter).
314 *
315 * @param payload referenced payload
316 * @param length size of the pbuf's payload
317 * @param type this parameter decides how and where the pbuf
318 * should be allocated as follows:
319 *
320 * - PBUF_ROM: It is assumed that the memory used is really
321 * similar to ROM in that it is immutable and will not be
322 * changed. Memory which is dynamic should generally not
323 * be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
324 * - PBUF_REF: It is assumed that the pbuf is only
325 * being used in a single thread. If the pbuf gets queued,
326 * then pbuf_take should be called to copy the buffer.
327 *
328 * @return the allocated pbuf.
329 */
330 struct pbuf *
pbuf_alloc_reference(void * payload,u16_t length,pbuf_type type)331 pbuf_alloc_reference(void *payload, u16_t length, pbuf_type type)
332 {
333 struct pbuf *p;
334 LWIP_ASSERT("invalid pbuf_type", (type == PBUF_REF) || (type == PBUF_ROM));
335 /* only allocate memory for the pbuf structure */
336 p = (struct pbuf *)memp_malloc(MEMP_PBUF);
337 if (p == NULL) {
338 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
339 ("pbuf_alloc_reference: Could not allocate MEMP_PBUF for PBUF_%s.\n",
340 (type == PBUF_ROM) ? "ROM" : "REF"));
341 return NULL;
342 }
343 pbuf_init_alloced_pbuf(p, payload, length, length, type, 0);
344 return p;
345 }
346
347
348 #if LWIP_SUPPORT_CUSTOM_PBUF
349 /**
350 * @ingroup pbuf
351 * Initialize a custom pbuf (already allocated).
352 * Example of custom pbuf usage: @ref zerocopyrx
353 *
354 * @param l header size
355 * @param length size of the pbuf's payload
356 * @param type type of the pbuf (only used to treat the pbuf accordingly, as
357 * this function allocates no memory)
358 * @param p pointer to the custom pbuf to initialize (already allocated)
359 * @param payload_mem pointer to the buffer that is used for payload and headers,
360 * must be at least big enough to hold 'length' plus the header size,
361 * may be NULL if set later.
362 * ATTENTION: The caller is responsible for correct alignment of this buffer!!
363 * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least
364 * big enough to hold 'length' plus the header size
365 */
366 struct pbuf *
pbuf_alloced_custom(pbuf_layer l,u16_t length,pbuf_type type,struct pbuf_custom * p,void * payload_mem,u16_t payload_mem_len)367 pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p,
368 void *payload_mem, u16_t payload_mem_len)
369 {
370 u16_t offset = (u16_t)l;
371 void *payload;
372 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length));
373
374 if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) {
375 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length));
376 return NULL;
377 }
378
379 if (payload_mem != NULL) {
380 payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset);
381 } else {
382 payload = NULL;
383 }
384 pbuf_init_alloced_pbuf(&p->pbuf, payload, length, length, type, PBUF_FLAG_IS_CUSTOM);
385 return &p->pbuf;
386 }
387 #endif /* LWIP_SUPPORT_CUSTOM_PBUF */
388
389 /**
390 * @ingroup pbuf
391 * Shrink a pbuf chain to a desired length.
392 *
393 * @param p pbuf to shrink.
394 * @param new_len desired new length of pbuf chain
395 *
396 * Depending on the desired length, the first few pbufs in a chain might
397 * be skipped and left unchanged. The new last pbuf in the chain will be
398 * resized, and any remaining pbufs will be freed.
399 *
400 * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted.
401 * @note May not be called on a packet queue.
402 *
403 * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain).
404 */
405 void
pbuf_realloc(struct pbuf * p,u16_t new_len)406 pbuf_realloc(struct pbuf *p, u16_t new_len)
407 {
408 struct pbuf *q;
409 u16_t rem_len; /* remaining length */
410 u16_t shrink;
411
412 LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL);
413
414 /* desired length larger than current length? */
415 if (new_len >= p->tot_len) {
416 /* enlarging not yet supported */
417 return;
418 }
419
420 /* the pbuf chain grows by (new_len - p->tot_len) bytes
421 * (which may be negative in case of shrinking) */
422 shrink = (u16_t)(p->tot_len - new_len);
423
424 /* first, step over any pbufs that should remain in the chain */
425 rem_len = new_len;
426 q = p;
427 /* should this pbuf be kept? */
428 while (rem_len > q->len) {
429 /* decrease remaining length by pbuf length */
430 rem_len = (u16_t)(rem_len - q->len);
431 /* decrease total length indicator */
432 q->tot_len = (u16_t)(q->tot_len - shrink);
433 /* proceed to next pbuf in chain */
434 q = q->next;
435 LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL);
436 }
437 /* we have now reached the new last pbuf (in q) */
438 /* rem_len == desired length for pbuf q */
439
440 /* shrink allocated memory for PBUF_RAM */
441 /* (other types merely adjust their length fields */
442 if (pbuf_match_allocsrc(q, PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) && (rem_len != q->len)
443 #if LWIP_SUPPORT_CUSTOM_PBUF
444 && ((q->flags & PBUF_FLAG_IS_CUSTOM) == 0)
445 #endif /* LWIP_SUPPORT_CUSTOM_PBUF */
446 ) {
447 /* reallocate and adjust the length of the pbuf that will be split */
448 q = (struct pbuf *)mem_trim(q, (mem_size_t)(((u8_t *)q->payload - (u8_t *)q) + rem_len));
449 LWIP_ASSERT("mem_trim returned q == NULL", q != NULL);
450 }
451 /* adjust length fields for new last pbuf */
452 q->len = rem_len;
453 q->tot_len = q->len;
454
455 /* any remaining pbufs in chain? */
456 if (q->next != NULL) {
457 /* free remaining pbufs in chain */
458 pbuf_free(q->next);
459 }
460 /* q is last packet in chain */
461 q->next = NULL;
462
463 }
464
465 /**
466 * Adjusts the payload pointer to reveal headers in the payload.
467 * @see pbuf_add_header.
468 *
469 * @param p pbuf to change the header size.
470 * @param header_size_increment Number of bytes to increment header size.
471 * @param force Allow 'header_size_increment > 0' for PBUF_REF/PBUF_ROM types
472 *
473 * @return non-zero on failure, zero on success.
474 *
475 */
476 static u8_t
pbuf_add_header_impl(struct pbuf * p,size_t header_size_increment,u8_t force)477 pbuf_add_header_impl(struct pbuf *p, size_t header_size_increment, u8_t force)
478 {
479 u16_t type_internal;
480 void *payload;
481 u16_t increment_magnitude;
482
483 LWIP_ASSERT("p != NULL", p != NULL);
484 if ((p == NULL) || (header_size_increment > 0xFFFF)) {
485 return 1;
486 }
487 if (header_size_increment == 0) {
488 return 0;
489 }
490
491 increment_magnitude = (u16_t)header_size_increment;
492 /* Do not allow tot_len to wrap as a result. */
493 if ((u16_t)(increment_magnitude + p->tot_len) < increment_magnitude) {
494 return 1;
495 }
496
497 type_internal = p->type_internal;
498
499 /* pbuf types containing payloads? */
500 if (type_internal & PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS) {
501 /* set new payload pointer */
502 payload = (u8_t *)p->payload - header_size_increment;
503 /* boundary check fails? */
504 if ((u8_t *)payload < (u8_t *)p + SIZEOF_STRUCT_PBUF) {
505 LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE,
506 ("pbuf_add_header: failed as %p < %p (not enough space for new header size)\n",
507 (void *)payload, (void *)((u8_t *)p + SIZEOF_STRUCT_PBUF)));
508 /* bail out unsuccessfully */
509 return 1;
510 }
511 /* pbuf types referring to external payloads? */
512 } else {
513 /* hide a header in the payload? */
514 if (force) {
515 payload = (u8_t *)p->payload - header_size_increment;
516 } else {
517 /* cannot expand payload to front (yet!)
518 * bail out unsuccessfully */
519 return 1;
520 }
521 }
522 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_add_header: old %p new %p (%"U16_F")\n",
523 (void *)p->payload, (void *)payload, increment_magnitude));
524
525 /* modify pbuf fields */
526 p->payload = payload;
527 p->len = (u16_t)(p->len + increment_magnitude);
528 p->tot_len = (u16_t)(p->tot_len + increment_magnitude);
529
530
531 return 0;
532 }
533
534 /**
535 * Adjusts the payload pointer to reveal headers in the payload.
536 *
537 * Adjusts the ->payload pointer so that space for a header
538 * appears in the pbuf payload.
539 *
540 * The ->payload, ->tot_len and ->len fields are adjusted.
541 *
542 * @param p pbuf to change the header size.
543 * @param header_size_increment Number of bytes to increment header size which
544 * increases the size of the pbuf. New space is on the front.
545 * If header_size_increment is 0, this function does nothing and returns successful.
546 *
547 * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so
548 * the call will fail. A check is made that the increase in header size does
549 * not move the payload pointer in front of the start of the buffer.
550 *
551 * @return non-zero on failure, zero on success.
552 *
553 */
554 u8_t
pbuf_add_header(struct pbuf * p,size_t header_size_increment)555 pbuf_add_header(struct pbuf *p, size_t header_size_increment)
556 {
557 return pbuf_add_header_impl(p, header_size_increment, 0);
558 }
559
560 /**
561 * Same as @ref pbuf_add_header but does not check if 'header_size > 0' is allowed.
562 * This is used internally only, to allow PBUF_REF for RX.
563 */
564 u8_t
pbuf_add_header_force(struct pbuf * p,size_t header_size_increment)565 pbuf_add_header_force(struct pbuf *p, size_t header_size_increment)
566 {
567 return pbuf_add_header_impl(p, header_size_increment, 1);
568 }
569
570 /**
571 * Adjusts the payload pointer to hide headers in the payload.
572 *
573 * Adjusts the ->payload pointer so that space for a header
574 * disappears in the pbuf payload.
575 *
576 * The ->payload, ->tot_len and ->len fields are adjusted.
577 *
578 * @param p pbuf to change the header size.
579 * @param header_size_decrement Number of bytes to decrement header size which
580 * decreases the size of the pbuf.
581 * If header_size_decrement is 0, this function does nothing and returns successful.
582 * @return non-zero on failure, zero on success.
583 *
584 */
585 u8_t
pbuf_remove_header(struct pbuf * p,size_t header_size_decrement)586 pbuf_remove_header(struct pbuf *p, size_t header_size_decrement)
587 {
588 void *payload;
589 u16_t increment_magnitude;
590
591 LWIP_ASSERT("p != NULL", p != NULL);
592 if ((p == NULL) || (header_size_decrement > 0xFFFF)) {
593 return 1;
594 }
595 if (header_size_decrement == 0) {
596 return 0;
597 }
598
599 increment_magnitude = (u16_t)header_size_decrement;
600 /* Check that we aren't going to move off the end of the pbuf */
601 LWIP_ERROR("increment_magnitude <= p->len", (increment_magnitude <= p->len), return 1;);
602
603 /* remember current payload pointer */
604 payload = p->payload;
605 LWIP_UNUSED_ARG(payload); /* only used in LWIP_DEBUGF below */
606
607 /* increase payload pointer (guarded by length check above) */
608 p->payload = (u8_t *)p->payload + header_size_decrement;
609 /* modify pbuf length fields */
610 p->len = (u16_t)(p->len - increment_magnitude);
611 p->tot_len = (u16_t)(p->tot_len - increment_magnitude);
612
613 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_remove_header: old %p new %p (%"U16_F")\n",
614 (void *)payload, (void *)p->payload, increment_magnitude));
615
616 return 0;
617 }
618
619 static u8_t
pbuf_header_impl(struct pbuf * p,s16_t header_size_increment,u8_t force)620 pbuf_header_impl(struct pbuf *p, s16_t header_size_increment, u8_t force)
621 {
622 if (header_size_increment < 0) {
623 return pbuf_remove_header(p, (size_t) - header_size_increment);
624 } else {
625 return pbuf_add_header_impl(p, (size_t)header_size_increment, force);
626 }
627 }
628
629 /**
630 * Adjusts the payload pointer to hide or reveal headers in the payload.
631 *
632 * Adjusts the ->payload pointer so that space for a header
633 * (dis)appears in the pbuf payload.
634 *
635 * The ->payload, ->tot_len and ->len fields are adjusted.
636 *
637 * @param p pbuf to change the header size.
638 * @param header_size_increment Number of bytes to increment header size which
639 * increases the size of the pbuf. New space is on the front.
640 * (Using a negative value decreases the header size.)
641 * If header_size_increment is 0, this function does nothing and returns successful.
642 *
643 * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so
644 * the call will fail. A check is made that the increase in header size does
645 * not move the payload pointer in front of the start of the buffer.
646 * @return non-zero on failure, zero on success.
647 *
648 */
649 u8_t
pbuf_header(struct pbuf * p,s16_t header_size_increment)650 pbuf_header(struct pbuf *p, s16_t header_size_increment)
651 {
652 return pbuf_header_impl(p, header_size_increment, 0);
653 }
654
655 /**
656 * Same as pbuf_header but does not check if 'header_size > 0' is allowed.
657 * This is used internally only, to allow PBUF_REF for RX.
658 */
659 u8_t
pbuf_header_force(struct pbuf * p,s16_t header_size_increment)660 pbuf_header_force(struct pbuf *p, s16_t header_size_increment)
661 {
662 return pbuf_header_impl(p, header_size_increment, 1);
663 }
664
665 /** Similar to pbuf_header(-size) but de-refs header pbufs for (size >= p->len)
666 *
667 * @param q pbufs to operate on
668 * @param size The number of bytes to remove from the beginning of the pbuf list.
669 * While size >= p->len, pbufs are freed.
670 * ATTENTION: this is the opposite direction as @ref pbuf_header, but
671 * takes an u16_t not s16_t!
672 * @return the new head pbuf
673 */
674 struct pbuf *
pbuf_free_header(struct pbuf * q,u16_t size)675 pbuf_free_header(struct pbuf *q, u16_t size)
676 {
677 struct pbuf *p = q;
678 u16_t free_left = size;
679 while (free_left && p) {
680 if (free_left >= p->len) {
681 struct pbuf *f = p;
682 free_left = (u16_t)(free_left - p->len);
683 p = p->next;
684 f->next = 0;
685 pbuf_free(f);
686 } else {
687 pbuf_remove_header(p, free_left);
688 free_left = 0;
689 }
690 }
691 return p;
692 }
693
694 /**
695 * @ingroup pbuf
696 * Dereference a pbuf chain or queue and deallocate any no-longer-used
697 * pbufs at the head of this chain or queue.
698 *
699 * Decrements the pbuf reference count. If it reaches zero, the pbuf is
700 * deallocated.
701 *
702 * For a pbuf chain, this is repeated for each pbuf in the chain,
703 * up to the first pbuf which has a non-zero reference count after
704 * decrementing. So, when all reference counts are one, the whole
705 * chain is free'd.
706 *
707 * @param p The pbuf (chain) to be dereferenced.
708 *
709 * @return the number of pbufs that were de-allocated
710 * from the head of the chain.
711 *
712 * @note MUST NOT be called on a packet queue (Not verified to work yet).
713 * @note the reference counter of a pbuf equals the number of pointers
714 * that refer to the pbuf (or into the pbuf).
715 *
716 * @internal examples:
717 *
718 * Assuming existing chains a->b->c with the following reference
719 * counts, calling pbuf_free(a) results in:
720 *
721 * 1->2->3 becomes ...1->3
722 * 3->3->3 becomes 2->3->3
723 * 1->1->2 becomes ......1
724 * 2->1->1 becomes 1->1->1
725 * 1->1->1 becomes .......
726 *
727 */
728 u8_t
pbuf_free(struct pbuf * p)729 pbuf_free(struct pbuf *p)
730 {
731 u8_t alloc_src;
732 struct pbuf *q;
733 u8_t count;
734
735 if (p == NULL) {
736 LWIP_ASSERT("p != NULL", p != NULL);
737 /* if assertions are disabled, proceed with debug output */
738 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
739 ("pbuf_free(p == NULL) was called.\n"));
740 return 0;
741 }
742 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p));
743
744 PERF_START;
745
746 count = 0;
747 /* de-allocate all consecutive pbufs from the head of the chain that
748 * obtain a zero reference count after decrementing*/
749 while (p != NULL) {
750 LWIP_PBUF_REF_T ref;
751 SYS_ARCH_DECL_PROTECT(old_level);
752 /* Since decrementing ref cannot be guaranteed to be a single machine operation
753 * we must protect it. We put the new ref into a local variable to prevent
754 * further protection. */
755 SYS_ARCH_PROTECT(old_level);
756 /* all pbufs in a chain are referenced at least once */
757 LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0);
758 /* decrease reference count (number of pointers to pbuf) */
759 ref = --(p->ref);
760 SYS_ARCH_UNPROTECT(old_level);
761 /* this pbuf is no longer referenced to? */
762 if (ref == 0) {
763 /* remember next pbuf in chain for next iteration */
764 q = p->next;
765 LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p));
766 alloc_src = pbuf_get_allocsrc(p);
767 #if LWIP_SUPPORT_CUSTOM_PBUF
768 /* is this a custom pbuf? */
769 if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) {
770 struct pbuf_custom *pc = (struct pbuf_custom *)p;
771 LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL);
772 pc->custom_free_function(p);
773 } else
774 #endif /* LWIP_SUPPORT_CUSTOM_PBUF */
775 {
776 /* is this a pbuf from the pool? */
777 if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF_POOL) {
778 memp_free(MEMP_PBUF_POOL, p);
779 /* is this a ROM or RAM referencing pbuf? */
780 } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF) {
781 #if ESP_PBUF
782 if (p->l2_owner != NULL && p->l2_buf != NULL && p->l2_owner->l2_buffer_free_notify != NULL) {
783 p->l2_owner->l2_buffer_free_notify(p->l2_owner, p->l2_buf);
784 }
785 #endif
786 memp_free(MEMP_PBUF, p);
787 /* type == PBUF_RAM */
788 } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) {
789 mem_free(p);
790 } else {
791 /* @todo: support freeing other types */
792 LWIP_ASSERT("invalid pbuf type", 0);
793 }
794 }
795 count++;
796 /* proceed to next pbuf */
797 p = q;
798 /* p->ref > 0, this pbuf is still referenced to */
799 /* (and so the remaining pbufs in chain as well) */
800 } else {
801 LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, (u16_t)ref));
802 /* stop walking through the chain */
803 p = NULL;
804 }
805 }
806 PERF_STOP("pbuf_free");
807 /* return number of de-allocated pbufs */
808 return count;
809 }
810
811 /**
812 * Count number of pbufs in a chain
813 *
814 * @param p first pbuf of chain
815 * @return the number of pbufs in a chain
816 */
817 u16_t
pbuf_clen(const struct pbuf * p)818 pbuf_clen(const struct pbuf *p)
819 {
820 u16_t len;
821
822 len = 0;
823 while (p != NULL) {
824 ++len;
825 p = p->next;
826 }
827 return len;
828 }
829
830 /**
831 * @ingroup pbuf
832 * Increment the reference count of the pbuf.
833 *
834 * @param p pbuf to increase reference counter of
835 *
836 */
837 void
pbuf_ref(struct pbuf * p)838 pbuf_ref(struct pbuf *p)
839 {
840 /* pbuf given? */
841 if (p != NULL) {
842 SYS_ARCH_SET(p->ref, (LWIP_PBUF_REF_T)(p->ref + 1));
843 LWIP_ASSERT("pbuf ref overflow", p->ref > 0);
844 }
845 }
846
847 /**
848 * @ingroup pbuf
849 * Concatenate two pbufs (each may be a pbuf chain) and take over
850 * the caller's reference of the tail pbuf.
851 *
852 * @note The caller MAY NOT reference the tail pbuf afterwards.
853 * Use pbuf_chain() for that purpose.
854 *
855 * This function explicitly does not check for tot_len overflow to prevent
856 * failing to queue too long pbufs. This can produce invalid pbufs, so
857 * handle with care!
858 *
859 * @see pbuf_chain()
860 */
861 void
pbuf_cat(struct pbuf * h,struct pbuf * t)862 pbuf_cat(struct pbuf *h, struct pbuf *t)
863 {
864 struct pbuf *p;
865
866 LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)",
867 ((h != NULL) && (t != NULL)), return;);
868
869 /* proceed to last pbuf of chain */
870 for (p = h; p->next != NULL; p = p->next) {
871 /* add total length of second chain to all totals of first chain */
872 p->tot_len = (u16_t)(p->tot_len + t->tot_len);
873 }
874 /* { p is last pbuf of first h chain, p->next == NULL } */
875 LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len);
876 LWIP_ASSERT("p->next == NULL", p->next == NULL);
877 /* add total length of second chain to last pbuf total of first chain */
878 p->tot_len = (u16_t)(p->tot_len + t->tot_len);
879 /* chain last pbuf of head (p) with first of tail (t) */
880 p->next = t;
881 /* p->next now references t, but the caller will drop its reference to t,
882 * so netto there is no change to the reference count of t.
883 */
884 }
885
886 /**
887 * @ingroup pbuf
888 * Chain two pbufs (or pbuf chains) together.
889 *
890 * The caller MUST call pbuf_free(t) once it has stopped
891 * using it. Use pbuf_cat() instead if you no longer use t.
892 *
893 * @param h head pbuf (chain)
894 * @param t tail pbuf (chain)
895 * @note The pbufs MUST belong to the same packet.
896 * @note MAY NOT be called on a packet queue.
897 *
898 * The ->tot_len fields of all pbufs of the head chain are adjusted.
899 * The ->next field of the last pbuf of the head chain is adjusted.
900 * The ->ref field of the first pbuf of the tail chain is adjusted.
901 *
902 */
903 void
pbuf_chain(struct pbuf * h,struct pbuf * t)904 pbuf_chain(struct pbuf *h, struct pbuf *t)
905 {
906 pbuf_cat(h, t);
907 /* t is now referenced by h */
908 pbuf_ref(t);
909 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t));
910 }
911
912 /**
913 * Dechains the first pbuf from its succeeding pbufs in the chain.
914 *
915 * Makes p->tot_len field equal to p->len.
916 * @param p pbuf to dechain
917 * @return remainder of the pbuf chain, or NULL if it was de-allocated.
918 * @note May not be called on a packet queue.
919 */
920 struct pbuf *
pbuf_dechain(struct pbuf * p)921 pbuf_dechain(struct pbuf *p)
922 {
923 struct pbuf *q;
924 u8_t tail_gone = 1;
925 /* tail */
926 q = p->next;
927 /* pbuf has successor in chain? */
928 if (q != NULL) {
929 /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
930 LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len);
931 /* enforce invariant if assertion is disabled */
932 q->tot_len = (u16_t)(p->tot_len - p->len);
933 /* decouple pbuf from remainder */
934 p->next = NULL;
935 /* total length of pbuf p is its own length only */
936 p->tot_len = p->len;
937 /* q is no longer referenced by p, free it */
938 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: unreferencing %p\n", (void *)q));
939 tail_gone = pbuf_free(q);
940 if (tail_gone > 0) {
941 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE,
942 ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q));
943 }
944 /* return remaining tail or NULL if deallocated */
945 }
946 /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
947 LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len);
948 return ((tail_gone > 0) ? NULL : q);
949 }
950
951 /**
952 * @ingroup pbuf
953 * Create PBUF_RAM copies of pbufs.
954 *
955 * Used to queue packets on behalf of the lwIP stack, such as
956 * ARP based queueing.
957 *
958 * @note You MUST explicitly use p = pbuf_take(p);
959 *
960 * @note Only one packet is copied, no packet queue!
961 *
962 * @param p_to pbuf destination of the copy
963 * @param p_from pbuf source of the copy
964 *
965 * @return ERR_OK if pbuf was copied
966 * ERR_ARG if one of the pbufs is NULL or p_to is not big
967 * enough to hold p_from
968 */
969 err_t
pbuf_copy(struct pbuf * p_to,const struct pbuf * p_from)970 pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from)
971 {
972 size_t offset_to = 0, offset_from = 0, len;
973
974 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy(%p, %p)\n",
975 (const void *)p_to, (const void *)p_from));
976
977 /* is the target big enough to hold the source? */
978 LWIP_ERROR("pbuf_copy: target not big enough to hold source", ((p_to != NULL) &&
979 (p_from != NULL) && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG;);
980
981 /* iterate through pbuf chain */
982 do {
983 /* copy one part of the original chain */
984 if ((p_to->len - offset_to) >= (p_from->len - offset_from)) {
985 /* complete current p_from fits into current p_to */
986 len = p_from->len - offset_from;
987 } else {
988 /* current p_from does not fit into current p_to */
989 len = p_to->len - offset_to;
990 }
991 MEMCPY((u8_t *)p_to->payload + offset_to, (u8_t *)p_from->payload + offset_from, len);
992 offset_to += len;
993 offset_from += len;
994 LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len);
995 LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len);
996 if (offset_from >= p_from->len) {
997 /* on to next p_from (if any) */
998 offset_from = 0;
999 p_from = p_from->next;
1000 }
1001 if (offset_to == p_to->len) {
1002 /* on to next p_to (if any) */
1003 offset_to = 0;
1004 p_to = p_to->next;
1005 LWIP_ERROR("p_to != NULL", (p_to != NULL) || (p_from == NULL), return ERR_ARG;);
1006 }
1007
1008 if ((p_from != NULL) && (p_from->len == p_from->tot_len)) {
1009 /* don't copy more than one packet! */
1010 LWIP_ERROR("pbuf_copy() does not allow packet queues!",
1011 (p_from->next == NULL), return ERR_VAL;);
1012 }
1013 if ((p_to != NULL) && (p_to->len == p_to->tot_len)) {
1014 /* don't copy more than one packet! */
1015 LWIP_ERROR("pbuf_copy() does not allow packet queues!",
1016 (p_to->next == NULL), return ERR_VAL;);
1017 }
1018 } while (p_from);
1019 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy: end of chain reached.\n"));
1020 return ERR_OK;
1021 }
1022
1023 /**
1024 * @ingroup pbuf
1025 * Copy (part of) the contents of a packet buffer
1026 * to an application supplied buffer.
1027 *
1028 * @param buf the pbuf from which to copy data
1029 * @param dataptr the application supplied buffer
1030 * @param len length of data to copy (dataptr must be big enough). No more
1031 * than buf->tot_len will be copied, irrespective of len
1032 * @param offset offset into the packet buffer from where to begin copying len bytes
1033 * @return the number of bytes copied, or 0 on failure
1034 */
1035 u16_t
pbuf_copy_partial(const struct pbuf * buf,void * dataptr,u16_t len,u16_t offset)1036 pbuf_copy_partial(const struct pbuf *buf, void *dataptr, u16_t len, u16_t offset)
1037 {
1038 const struct pbuf *p;
1039 u16_t left = 0;
1040 u16_t buf_copy_len;
1041 u16_t copied_total = 0;
1042
1043 LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0;);
1044 LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;);
1045
1046 /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */
1047 for (p = buf; len != 0 && p != NULL; p = p->next) {
1048 if ((offset != 0) && (offset >= p->len)) {
1049 /* don't copy from this buffer -> on to the next */
1050 offset = (u16_t)(offset - p->len);
1051 } else {
1052 /* copy from this buffer. maybe only partially. */
1053 buf_copy_len = (u16_t)(p->len - offset);
1054 if (buf_copy_len > len) {
1055 buf_copy_len = len;
1056 }
1057 /* copy the necessary parts of the buffer */
1058 MEMCPY(&((char *)dataptr)[left], &((char *)p->payload)[offset], buf_copy_len);
1059 copied_total = (u16_t)(copied_total + buf_copy_len);
1060 left = (u16_t)(left + buf_copy_len);
1061 len = (u16_t)(len - buf_copy_len);
1062 offset = 0;
1063 }
1064 }
1065 return copied_total;
1066 }
1067
1068 /**
1069 * @ingroup pbuf
1070 * Get part of a pbuf's payload as contiguous memory. The returned memory is
1071 * either a pointer into the pbuf's payload or, if split over multiple pbufs,
1072 * a copy into the user-supplied buffer.
1073 *
1074 * @param p the pbuf from which to copy data
1075 * @param buffer the application supplied buffer
1076 * @param bufsize size of the application supplied buffer
1077 * @param len length of data to copy (dataptr must be big enough). No more
1078 * than buf->tot_len will be copied, irrespective of len
1079 * @param offset offset into the packet buffer from where to begin copying len bytes
1080 * @return the number of bytes copied, or 0 on failure
1081 */
1082 void *
pbuf_get_contiguous(const struct pbuf * p,void * buffer,size_t bufsize,u16_t len,u16_t offset)1083 pbuf_get_contiguous(const struct pbuf *p, void *buffer, size_t bufsize, u16_t len, u16_t offset)
1084 {
1085 const struct pbuf *q;
1086 u16_t out_offset;
1087
1088 LWIP_ERROR("pbuf_get_contiguous: invalid buf", (p != NULL), return NULL;);
1089 LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (buffer != NULL), return NULL;);
1090 LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (bufsize >= len), return NULL;);
1091
1092 q = pbuf_skip_const(p, offset, &out_offset);
1093 if (q != NULL) {
1094 if (q->len >= (out_offset + len)) {
1095 /* all data in this pbuf, return zero-copy */
1096 return (u8_t *)q->payload + out_offset;
1097 }
1098 /* need to copy */
1099 if (pbuf_copy_partial(q, buffer, len, out_offset) != len) {
1100 /* copying failed: pbuf is too short */
1101 return NULL;
1102 }
1103 return buffer;
1104 }
1105 /* pbuf is too short (offset does not fit in) */
1106 return NULL;
1107 }
1108
1109 #if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
1110 /**
1111 * This method modifies a 'pbuf chain', so that its total length is
1112 * smaller than 64K. The remainder of the original pbuf chain is stored
1113 * in *rest.
1114 * This function never creates new pbufs, but splits an existing chain
1115 * in two parts. The tot_len of the modified packet queue will likely be
1116 * smaller than 64K.
1117 * 'packet queues' are not supported by this function.
1118 *
1119 * @param p the pbuf queue to be split
1120 * @param rest pointer to store the remainder (after the first 64K)
1121 */
pbuf_split_64k(struct pbuf * p,struct pbuf ** rest)1122 void pbuf_split_64k(struct pbuf *p, struct pbuf **rest)
1123 {
1124 *rest = NULL;
1125 if ((p != NULL) && (p->next != NULL)) {
1126 u16_t tot_len_front = p->len;
1127 struct pbuf *i = p;
1128 struct pbuf *r = p->next;
1129
1130 /* continue until the total length (summed up as u16_t) overflows */
1131 while ((r != NULL) && ((u16_t)(tot_len_front + r->len) >= tot_len_front)) {
1132 tot_len_front = (u16_t)(tot_len_front + r->len);
1133 i = r;
1134 r = r->next;
1135 }
1136 /* i now points to last packet of the first segment. Set next
1137 pointer to NULL */
1138 i->next = NULL;
1139
1140 if (r != NULL) {
1141 /* Update the tot_len field in the first part */
1142 for (i = p; i != NULL; i = i->next) {
1143 i->tot_len = (u16_t)(i->tot_len - r->tot_len);
1144 LWIP_ASSERT("tot_len/len mismatch in last pbuf",
1145 (i->next != NULL) || (i->tot_len == i->len));
1146 }
1147 if (p->flags & PBUF_FLAG_TCP_FIN) {
1148 r->flags |= PBUF_FLAG_TCP_FIN;
1149 }
1150
1151 /* tot_len field in rest does not need modifications */
1152 /* reference counters do not need modifications */
1153 *rest = r;
1154 }
1155 }
1156 }
1157 #endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
1158
1159 /* Actual implementation of pbuf_skip() but returning const pointer... */
1160 static const struct pbuf *
pbuf_skip_const(const struct pbuf * in,u16_t in_offset,u16_t * out_offset)1161 pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset)
1162 {
1163 u16_t offset_left = in_offset;
1164 const struct pbuf *q = in;
1165
1166 /* get the correct pbuf */
1167 while ((q != NULL) && (q->len <= offset_left)) {
1168 offset_left = (u16_t)(offset_left - q->len);
1169 q = q->next;
1170 }
1171 if (out_offset != NULL) {
1172 *out_offset = offset_left;
1173 }
1174 return q;
1175 }
1176
1177 /**
1178 * @ingroup pbuf
1179 * Skip a number of bytes at the start of a pbuf
1180 *
1181 * @param in input pbuf
1182 * @param in_offset offset to skip
1183 * @param out_offset resulting offset in the returned pbuf
1184 * @return the pbuf in the queue where the offset is
1185 */
1186 struct pbuf *
pbuf_skip(struct pbuf * in,u16_t in_offset,u16_t * out_offset)1187 pbuf_skip(struct pbuf *in, u16_t in_offset, u16_t *out_offset)
1188 {
1189 const struct pbuf *out = pbuf_skip_const(in, in_offset, out_offset);
1190 return LWIP_CONST_CAST(struct pbuf *, out);
1191 }
1192
1193 /**
1194 * @ingroup pbuf
1195 * Copy application supplied data into a pbuf.
1196 * This function can only be used to copy the equivalent of buf->tot_len data.
1197 *
1198 * @param buf pbuf to fill with data
1199 * @param dataptr application supplied data buffer
1200 * @param len length of the application supplied data buffer
1201 *
1202 * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough
1203 */
1204 err_t
pbuf_take(struct pbuf * buf,const void * dataptr,u16_t len)1205 pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len)
1206 {
1207 struct pbuf *p;
1208 size_t buf_copy_len;
1209 size_t total_copy_len = len;
1210 size_t copied_total = 0;
1211
1212 LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return ERR_ARG;);
1213 LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return ERR_ARG;);
1214 LWIP_ERROR("pbuf_take: buf not large enough", (buf->tot_len >= len), return ERR_MEM;);
1215
1216 if ((buf == NULL) || (dataptr == NULL) || (buf->tot_len < len)) {
1217 return ERR_ARG;
1218 }
1219
1220 /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */
1221 for (p = buf; total_copy_len != 0; p = p->next) {
1222 LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL);
1223 buf_copy_len = total_copy_len;
1224 if (buf_copy_len > p->len) {
1225 /* this pbuf cannot hold all remaining data */
1226 buf_copy_len = p->len;
1227 }
1228 /* copy the necessary parts of the buffer */
1229 MEMCPY(p->payload, &((const char *)dataptr)[copied_total], buf_copy_len);
1230 total_copy_len -= buf_copy_len;
1231 copied_total += buf_copy_len;
1232 }
1233 LWIP_ASSERT("did not copy all data", total_copy_len == 0 && copied_total == len);
1234 return ERR_OK;
1235 }
1236
1237 /**
1238 * @ingroup pbuf
1239 * Same as pbuf_take() but puts data at an offset
1240 *
1241 * @param buf pbuf to fill with data
1242 * @param dataptr application supplied data buffer
1243 * @param len length of the application supplied data buffer
1244 * @param offset offset in pbuf where to copy dataptr to
1245 *
1246 * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough
1247 */
1248 err_t
pbuf_take_at(struct pbuf * buf,const void * dataptr,u16_t len,u16_t offset)1249 pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset)
1250 {
1251 u16_t target_offset;
1252 struct pbuf *q = pbuf_skip(buf, offset, &target_offset);
1253
1254 /* return requested data if pbuf is OK */
1255 if ((q != NULL) && (q->tot_len >= target_offset + len)) {
1256 u16_t remaining_len = len;
1257 const u8_t *src_ptr = (const u8_t *)dataptr;
1258 /* copy the part that goes into the first pbuf */
1259 u16_t first_copy_len;
1260 LWIP_ASSERT("check pbuf_skip result", target_offset < q->len);
1261 first_copy_len = (u16_t)LWIP_MIN(q->len - target_offset, len);
1262 MEMCPY(((u8_t *)q->payload) + target_offset, dataptr, first_copy_len);
1263 remaining_len = (u16_t)(remaining_len - first_copy_len);
1264 src_ptr += first_copy_len;
1265 if (remaining_len > 0) {
1266 return pbuf_take(q->next, src_ptr, remaining_len);
1267 }
1268 return ERR_OK;
1269 }
1270 return ERR_MEM;
1271 }
1272
1273 /**
1274 * @ingroup pbuf
1275 * Creates a single pbuf out of a queue of pbufs.
1276 *
1277 * @remark: Either the source pbuf 'p' is freed by this function or the original
1278 * pbuf 'p' is returned, therefore the caller has to check the result!
1279 *
1280 * @param p the source pbuf
1281 * @param layer pbuf_layer of the new pbuf
1282 *
1283 * @return a new, single pbuf (p->next is NULL)
1284 * or the old pbuf if allocation fails
1285 */
1286 struct pbuf *
pbuf_coalesce(struct pbuf * p,pbuf_layer layer)1287 pbuf_coalesce(struct pbuf *p, pbuf_layer layer)
1288 {
1289 struct pbuf *q;
1290 if (p->next == NULL) {
1291 return p;
1292 }
1293 q = pbuf_clone(layer, PBUF_RAM, p);
1294 if (q == NULL) {
1295 /* @todo: what do we do now? */
1296 return p;
1297 }
1298 pbuf_free(p);
1299 return q;
1300 }
1301
1302 /**
1303 * @ingroup pbuf
1304 * Allocates a new pbuf of same length (via pbuf_alloc()) and copies the source
1305 * pbuf into this new pbuf (using pbuf_copy()).
1306 *
1307 * @param layer pbuf_layer of the new pbuf
1308 * @param type this parameter decides how and where the pbuf should be allocated
1309 * (@see pbuf_alloc())
1310 * @param p the source pbuf
1311 *
1312 * @return a new pbuf or NULL if allocation fails
1313 */
1314 struct pbuf *
pbuf_clone(pbuf_layer layer,pbuf_type type,struct pbuf * p)1315 pbuf_clone(pbuf_layer layer, pbuf_type type, struct pbuf *p)
1316 {
1317 struct pbuf *q;
1318 err_t err;
1319 q = pbuf_alloc(layer, p->tot_len, type);
1320 if (q == NULL) {
1321 return NULL;
1322 }
1323 err = pbuf_copy(q, p);
1324 LWIP_UNUSED_ARG(err); /* in case of LWIP_NOASSERT */
1325 LWIP_ASSERT("pbuf_copy failed", err == ERR_OK);
1326 return q;
1327 }
1328
1329 #if LWIP_CHECKSUM_ON_COPY
1330 /**
1331 * Copies data into a single pbuf (*not* into a pbuf queue!) and updates
1332 * the checksum while copying
1333 *
1334 * @param p the pbuf to copy data into
1335 * @param start_offset offset of p->payload where to copy the data to
1336 * @param dataptr data to copy into the pbuf
1337 * @param len length of data to copy into the pbuf
1338 * @param chksum pointer to the checksum which is updated
1339 * @return ERR_OK if successful, another error if the data does not fit
1340 * within the (first) pbuf (no pbuf queues!)
1341 */
1342 err_t
pbuf_fill_chksum(struct pbuf * p,u16_t start_offset,const void * dataptr,u16_t len,u16_t * chksum)1343 pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr,
1344 u16_t len, u16_t *chksum)
1345 {
1346 u32_t acc;
1347 u16_t copy_chksum;
1348 char *dst_ptr;
1349 LWIP_ASSERT("p != NULL", p != NULL);
1350 LWIP_ASSERT("dataptr != NULL", dataptr != NULL);
1351 LWIP_ASSERT("chksum != NULL", chksum != NULL);
1352 LWIP_ASSERT("len != 0", len != 0);
1353
1354 if ((start_offset >= p->len) || (start_offset + len > p->len)) {
1355 return ERR_ARG;
1356 }
1357
1358 dst_ptr = ((char *)p->payload) + start_offset;
1359 copy_chksum = LWIP_CHKSUM_COPY(dst_ptr, dataptr, len);
1360 if ((start_offset & 1) != 0) {
1361 copy_chksum = SWAP_BYTES_IN_WORD(copy_chksum);
1362 }
1363 acc = *chksum;
1364 acc += copy_chksum;
1365 *chksum = FOLD_U32T(acc);
1366 return ERR_OK;
1367 }
1368 #endif /* LWIP_CHECKSUM_ON_COPY */
1369
1370 /**
1371 * @ingroup pbuf
1372 * Get one byte from the specified position in a pbuf
1373 * WARNING: returns zero for offset >= p->tot_len
1374 *
1375 * @param p pbuf to parse
1376 * @param offset offset into p of the byte to return
1377 * @return byte at an offset into p OR ZERO IF 'offset' >= p->tot_len
1378 */
1379 u8_t
pbuf_get_at(const struct pbuf * p,u16_t offset)1380 pbuf_get_at(const struct pbuf *p, u16_t offset)
1381 {
1382 int ret = pbuf_try_get_at(p, offset);
1383 if (ret >= 0) {
1384 return (u8_t)ret;
1385 }
1386 return 0;
1387 }
1388
1389 /**
1390 * @ingroup pbuf
1391 * Get one byte from the specified position in a pbuf
1392 *
1393 * @param p pbuf to parse
1394 * @param offset offset into p of the byte to return
1395 * @return byte at an offset into p [0..0xFF] OR negative if 'offset' >= p->tot_len
1396 */
1397 int
pbuf_try_get_at(const struct pbuf * p,u16_t offset)1398 pbuf_try_get_at(const struct pbuf *p, u16_t offset)
1399 {
1400 u16_t q_idx;
1401 const struct pbuf *q = pbuf_skip_const(p, offset, &q_idx);
1402
1403 /* return requested data if pbuf is OK */
1404 if ((q != NULL) && (q->len > q_idx)) {
1405 return ((u8_t *)q->payload)[q_idx];
1406 }
1407 return -1;
1408 }
1409
1410 /**
1411 * @ingroup pbuf
1412 * Put one byte to the specified position in a pbuf
1413 * WARNING: silently ignores offset >= p->tot_len
1414 *
1415 * @param p pbuf to fill
1416 * @param offset offset into p of the byte to write
1417 * @param data byte to write at an offset into p
1418 */
1419 void
pbuf_put_at(struct pbuf * p,u16_t offset,u8_t data)1420 pbuf_put_at(struct pbuf *p, u16_t offset, u8_t data)
1421 {
1422 u16_t q_idx;
1423 struct pbuf *q = pbuf_skip(p, offset, &q_idx);
1424
1425 /* write requested data if pbuf is OK */
1426 if ((q != NULL) && (q->len > q_idx)) {
1427 ((u8_t *)q->payload)[q_idx] = data;
1428 }
1429 }
1430
1431 /**
1432 * @ingroup pbuf
1433 * Compare pbuf contents at specified offset with memory s2, both of length n
1434 *
1435 * @param p pbuf to compare
1436 * @param offset offset into p at which to start comparing
1437 * @param s2 buffer to compare
1438 * @param n length of buffer to compare
1439 * @return zero if equal, nonzero otherwise
1440 * (0xffff if p is too short, diffoffset+1 otherwise)
1441 */
1442 u16_t
pbuf_memcmp(const struct pbuf * p,u16_t offset,const void * s2,u16_t n)1443 pbuf_memcmp(const struct pbuf *p, u16_t offset, const void *s2, u16_t n)
1444 {
1445 u16_t start = offset;
1446 const struct pbuf *q = p;
1447 u16_t i;
1448
1449 /* pbuf long enough to perform check? */
1450 if (p->tot_len < (offset + n)) {
1451 return 0xffff;
1452 }
1453
1454 /* get the correct pbuf from chain. We know it succeeds because of p->tot_len check above. */
1455 while ((q != NULL) && (q->len <= start)) {
1456 start = (u16_t)(start - q->len);
1457 q = q->next;
1458 }
1459
1460 /* return requested data if pbuf is OK */
1461 for (i = 0; i < n; i++) {
1462 /* We know pbuf_get_at() succeeds because of p->tot_len check above. */
1463 u8_t a = pbuf_get_at(q, (u16_t)(start + i));
1464 u8_t b = ((const u8_t *)s2)[i];
1465 if (a != b) {
1466 return (u16_t)LWIP_MIN(i + 1, 0xFFFF);
1467 }
1468 }
1469 return 0;
1470 }
1471
1472 /**
1473 * @ingroup pbuf
1474 * Find occurrence of mem (with length mem_len) in pbuf p, starting at offset
1475 * start_offset.
1476 *
1477 * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as
1478 * return value 'not found'
1479 * @param mem search for the contents of this buffer
1480 * @param mem_len length of 'mem'
1481 * @param start_offset offset into p at which to start searching
1482 * @return 0xFFFF if substr was not found in p or the index where it was found
1483 */
1484 u16_t
pbuf_memfind(const struct pbuf * p,const void * mem,u16_t mem_len,u16_t start_offset)1485 pbuf_memfind(const struct pbuf *p, const void *mem, u16_t mem_len, u16_t start_offset)
1486 {
1487 u16_t i;
1488 u16_t max_cmp_start = (u16_t)(p->tot_len - mem_len);
1489 if (p->tot_len >= mem_len + start_offset) {
1490 for (i = start_offset; i <= max_cmp_start; i++) {
1491 u16_t plus = pbuf_memcmp(p, i, mem, mem_len);
1492 if (plus == 0) {
1493 return i;
1494 }
1495 }
1496 }
1497 return 0xFFFF;
1498 }
1499
1500 /**
1501 * Find occurrence of substr with length substr_len in pbuf p, start at offset
1502 * start_offset
1503 * WARNING: in contrast to strstr(), this one does not stop at the first \0 in
1504 * the pbuf/source string!
1505 *
1506 * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as
1507 * return value 'not found'
1508 * @param substr string to search for in p, maximum length is 0xFFFE
1509 * @return 0xFFFF if substr was not found in p or the index where it was found
1510 */
1511 u16_t
pbuf_strstr(const struct pbuf * p,const char * substr)1512 pbuf_strstr(const struct pbuf *p, const char *substr)
1513 {
1514 size_t substr_len;
1515 if ((substr == NULL) || (substr[0] == 0) || (p->tot_len == 0xFFFF)) {
1516 return 0xFFFF;
1517 }
1518 substr_len = strlen(substr);
1519 if (substr_len >= 0xFFFF) {
1520 return 0xFFFF;
1521 }
1522 return pbuf_memfind(p, substr, (u16_t)substr_len, 0);
1523 }
1524