• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * @file
3  * Packet buffer management
4  */
5 
6 /**
7  * @defgroup pbuf Packet buffers (PBUF)
8  * @ingroup infrastructure
9  *
10  * Packets are built from the pbuf data structure. It supports dynamic
11  * memory allocation for packet contents or can reference externally
12  * managed packet contents both in RAM and ROM. Quick allocation for
13  * incoming packets is provided through pools with fixed sized pbufs.
14  *
15  * A packet may span over multiple pbufs, chained as a singly linked
16  * list. This is called a "pbuf chain".
17  *
18  * Multiple packets may be queued, also using this singly linked list.
19  * This is called a "packet queue".
20  *
21  * So, a packet queue consists of one or more pbuf chains, each of
22  * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE
23  * NOT SUPPORTED!!! Use helper structs to queue multiple packets.
24  *
25  * The differences between a pbuf chain and a packet queue are very
26  * precise but subtle.
27  *
28  * The last pbuf of a packet has a ->tot_len field that equals the
29  * ->len field. It can be found by traversing the list. If the last
30  * pbuf of a packet has a ->next field other than NULL, more packets
31  * are on the queue.
32  *
33  * Therefore, looping through a pbuf of a single packet, has an
34  * loop end condition (tot_len == p->len), NOT (next == NULL).
35  *
36  * Example of custom pbuf usage: @ref zerocopyrx
37  */
38 
39 /*
40  * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
41  * All rights reserved.
42  *
43  * Redistribution and use in source and binary forms, with or without modification,
44  * are permitted provided that the following conditions are met:
45  *
46  * 1. Redistributions of source code must retain the above copyright notice,
47  *    this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright notice,
49  *    this list of conditions and the following disclaimer in the documentation
50  *    and/or other materials provided with the distribution.
51  * 3. The name of the author may not be used to endorse or promote products
52  *    derived from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
55  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
56  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
57  * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
58  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
59  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
62  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
63  * OF SUCH DAMAGE.
64  *
65  * This file is part of the lwIP TCP/IP stack.
66  *
67  * Author: Adam Dunkels <adam@sics.se>
68  *
69  */
70 
71 #include "lwip/opt.h"
72 
73 #include "lwip/pbuf.h"
74 #include "lwip/stats.h"
75 #include "lwip/def.h"
76 #include "lwip/mem.h"
77 #include "lwip/memp.h"
78 #include "lwip/sys.h"
79 #include "lwip/netif.h"
80 #if LWIP_TCP && TCP_QUEUE_OOSEQ
81 #include "lwip/priv/tcp_priv.h"
82 #endif
83 #if LWIP_CHECKSUM_ON_COPY
84 #include "lwip/inet_chksum.h"
85 #endif
86 
87 #include <string.h>
88 #if LWIP_RIPPLE
89 #include "lwip/ip6.h"
90 #endif
91 
92 #if defined(_PRE_LWIP_ZERO_COPY_MEM_ALLOC_PKT_BUF) && (_PRE_LWIP_ZERO_COPY_MEM_ALLOC_PKT_BUF)
93 #include "oal_net_pkt_rom.h"
94 #include "oal_mem_pool.h"
95 #endif
96 
97 /* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically
98    aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */
99 #define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE)
100 
101 static const struct pbuf *
102 pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset);
103 
104 #if MEM_PBUF_RAM_SIZE_LIMIT
105 u32_t pbuf_ram_size = MEM_SIZE;
106 
107 atomic_t pbuf_ram_using = {0};
108 #endif
109 
110 #if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ
111 #define PBUF_POOL_IS_EMPTY()
112 #else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
113 
114 #if !NO_SYS
115 #ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
116 #include "lwip/tcpip.h"
117 #define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL()  do { \
118   if (tcpip_try_callback(pbuf_free_ooseq_callback, NULL) != ERR_OK) { \
119       SYS_ARCH_PROTECT(old_level); \
120       pbuf_free_ooseq_pending = 0; \
121       SYS_ARCH_UNPROTECT(old_level); \
122   } } while(0)
123 #endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
124 #endif /* !NO_SYS */
125 
126 volatile u8_t pbuf_free_ooseq_pending;
127 #define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty()
128 
129 #if PBUF_RX_RATIONING
130 u8_t g_pbuf_ram_shortage_flag;
131 #endif
132 
133 /**
134  * Attempt to reclaim some memory from queued out-of-sequence TCP segments
135  * if we run out of pool pbufs. It's better to give priority to new packets
136  * if we're running out.
137  *
138  * This must be done in the correct thread context therefore this function
139  * can only be used with NO_SYS=0 and through tcpip_callback.
140  */
141 #if !NO_SYS
142 static
143 #endif /* !NO_SYS */
144 void
pbuf_free_ooseq(void)145 pbuf_free_ooseq(void)
146 {
147   struct tcp_pcb *pcb;
148   SYS_ARCH_SET(pbuf_free_ooseq_pending, 0);
149 
150   for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) {
151     if (pcb->ooseq != NULL) {
152       /** Free the ooseq pbufs of one PCB only */
153       LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n"));
154       tcp_free_ooseq(pcb);
155       return;
156     }
157   }
158 }
159 
160 #if !NO_SYS
161 /**
162  * Just a callback function for tcpip_callback() that calls pbuf_free_ooseq().
163  */
164 static void
pbuf_free_ooseq_callback(void * arg)165 pbuf_free_ooseq_callback(void *arg)
166 {
167   LWIP_UNUSED_ARG(arg);
168   pbuf_free_ooseq();
169 }
170 #endif /* !NO_SYS */
171 
172 /** Queue a call to pbuf_free_ooseq if not already queued. */
173 static void
pbuf_pool_is_empty(void)174 pbuf_pool_is_empty(void)
175 {
176 #ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
177   SYS_ARCH_SET(pbuf_free_ooseq_pending, 1);
178 #else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
179   u8_t queued;
180   SYS_ARCH_DECL_PROTECT(old_level);
181   SYS_ARCH_PROTECT(old_level);
182   queued = pbuf_free_ooseq_pending;
183   pbuf_free_ooseq_pending = 1;
184   SYS_ARCH_UNPROTECT(old_level);
185 
186   if (!queued) {
187     /* queue a call to pbuf_free_ooseq if not already queued */
188     PBUF_POOL_FREE_OOSEQ_QUEUE_CALL();
189   }
190 #endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
191 }
192 #endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
193 
194 #if MEM_PBUF_RAM_SIZE_LIMIT
195 #ifdef LWIP_DEBUG
pbuf_ram_display(void)196 void pbuf_ram_display(void)
197 {
198   LWIP_DEBUGF(PBUF_DEBUG, ("totlen: %u bytes\n", atomic_read(&pbuf_ram_using)));
199 }
200 
201 #else
202 #define pbuf_ram_display()
203 #endif
204 
205 #endif
206 
mem_pbuf_check_ram_size_return(u32_t malloc_len)207 static int mem_pbuf_check_ram_size_return(u32_t malloc_len)
208 {
209 #if MEM_PBUF_RAM_SIZE_LIMIT
210   u32_t ram_using = (u32_t)atomic_read(&pbuf_ram_using);
211   if (ram_using > (0xFFFFFFFFU - malloc_len)) {
212     pbuf_ram_display();
213     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE,
214                 ("pbuf_alloc: allocated pbuf(PBUF_RAM) fail due to memory size limited\n"));
215     return 1;
216   }
217   if ((u32_t)atomic_add_return((int)malloc_len, ((atomic_t*)&pbuf_ram_using)) >= pbuf_ram_size) {
218     (void)atomic_sub((int)malloc_len, &pbuf_ram_using);
219     pbuf_ram_display();
220     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE,
221                 ("pbuf_alloc: allocated pbuf(PBUF_RAM) fail due to memory size limited\n"));
222     return 1;
223   }
224 #endif /* MEM_PBUF_RAM_SIZE_LIMIT */
225   return 0;
226 }
227 
228 #if PBUF_RX_RATIONING
229 inline u8_t
pbuf_ram_in_shortage(void)230 pbuf_ram_in_shortage(void)
231 {
232   return g_pbuf_ram_shortage_flag;
233 }
234 #endif
235 
236 #if defined(_PRE_LWIP_ZERO_COPY_MEM_ALLOC_PKT_BUF) && (_PRE_LWIP_ZERO_COPY_MEM_ALLOC_PKT_BUF)
pbuf_alloc_from_ptk_buf(mem_size_t alloc_len,u16_t len,u16_t * offset,u8_t * hbhflag)237 struct pbuf *pbuf_alloc_from_ptk_buf(mem_size_t alloc_len, u16_t len, u16_t *offset
238 #if LWIP_RIPPLE
239     , u8_t *hbhflag
240 #endif
241 )
242 {
243     oal_dmac_netbuf_stru *pkt_buf = NULL;
244     struct pbuf *p = NULL;
245     u16_t layer = *offset - PBUF_ZERO_COPY_RESERVE;
246 
247     pkt_buf = oal_mem_netbuf_alloc(OAL_NORMAL_NETBUF, (len + layer), OAL_NETBUF_PRIORITY_MID);
248     if (pkt_buf == NULL) {
249         return NULL;
250     }
251 
252     pkt_buf->pkt_src = PKT_BUF_SRC_LWIP;
253     // pbuf + skb + 80 resv + data
254     p = (struct pbuf *)oal_netbuf_lwip(pkt_buf);
255 
256     p->pkt_buf = (void *)pkt_buf;
257 
258 #if MEM_PBUF_RAM_SIZE_LIMIT
259     p->malloc_len = (u16_t)alloc_len;
260 #endif // MEM_PBUF_RAM_SIZE_LIMIT
261     p->next = NULL;
262     p->list = NULL;
263 
264 #if LWIP_RIPPLE
265     if (*hbhflag == 1) {
266     /* 2 : set hop-by-hop flag value we use later */
267     *hbhflag = 2;
268     *offset -= lwip_hbh_len(NULL);
269     }
270 #endif
271     p->payload = (void *)((u8_t *)p + LWIP_ZERO_COPY_HDR + *offset);
272     p->tot_len = len;
273     p->len = len;
274     p->type_internal = (u8_t)PBUF_RAM;
275     p->flags = 0;
276     (void)atomic_set(&p->ref, 1);
277     p->if_idx = NETIF_NO_INDEX;
278     return p;
279 }
280 #endif
281 
282 #if (MEM_MALLOC_DMA_ALIGN != 1)
283 
284 static inline struct pbuf *
pbuf_dma_alloc_ext(u32_t len)285 pbuf_dma_alloc_ext(u32_t len)
286 {
287   struct pbuf *p = NULL;
288   void  *dma = NULL;
289   u32_t  dma_len;
290   u32_t malloc_len;
291 
292   dma_len = LWIP_MEM_DMA_ALIGN_SIZE(len);
293 
294   malloc_len = (u32_t)(LWIP_MEM_DMA_ALIGN_SIZE(dma_len + sizeof(struct pbuf) + sizeof(struct pbuf_dma_info)));
295   if (malloc_len > MAX_PBUF_RAM_SIZE_TO_ALLOC) {
296     LWIP_DEBUGF(PBUF_DEBUG, ("pbuf_dma_alloc: Invalid aligned memory length which is  greater than %d\n",
297                 MAX_PBUF_RAM_SIZE_TO_ALLOC));
298     return NULL;
299   }
300 
301   if (mem_pbuf_check_ram_size_return(malloc_len) != 0) {
302     return NULL;
303   }
304 
305   dma = sys_align_malloc((u16_t)malloc_len);
306   if (dma == NULL) {
307 #if MEM_PBUF_RAM_SIZE_LIMIT
308     atomic_sub((int)malloc_len, &pbuf_ram_using);
309 #endif
310     return NULL;
311   }
312 
313   p = (struct pbuf *)((u8_t *)dma + LWIP_MEM_DMA_ALIGN_SIZE(len));
314 
315 #if MEM_PBUF_RAM_SIZE_LIMIT
316   p->malloc_len = (u16_t)malloc_len;
317 #endif // MEM_PBUF_RAM_SIZE_LIMIT
318 
319   p->dma_info = (struct pbuf_dma_info *)((u8_t *)p + sizeof(struct pbuf));
320   p->dma_info->dma = dma;
321   (void)atomic_set(&p->dma_info->dma_ref, 1);
322   p->dma_info->dma_len = (u16_t)dma_len;
323 
324   return p;
325 }
326 
327 
328 struct pbuf *
pbuf_dma_alloc(u16_t len)329 pbuf_dma_alloc(u16_t len)
330 {
331   return pbuf_dma_alloc_ext(len);
332 }
333 
334 err_t
pbuf_dma_ref(struct pbuf_dma_info * dma_info)335 pbuf_dma_ref(struct pbuf_dma_info *dma_info)
336 {
337   if (dma_info == NULL) {
338     return ERR_VAL;
339   }
340 
341   atomic_inc(&dma_info->dma_ref);
342   return ERR_OK;
343 }
344 
345 void
pbuf_dma_free(struct pbuf_dma_info * dma_info)346 pbuf_dma_free(struct pbuf_dma_info *dma_info)
347 {
348   LWIP_ERROR("Invalid argument in pbuf_dma_free \n", (dma_info != NULL), return);
349   if (atomic_dec_and_test(&dma_info->dma_ref)) {
350 #if MEM_PBUF_RAM_SIZE_LIMIT
351     u32_t malloc_len;
352     malloc_len = (u32_t)(LWIP_MEM_DMA_ALIGN_SIZE(dma_info->dma_len +
353                  sizeof(struct pbuf) + sizeof(struct pbuf_dma_info)));
354     LWIP_ASSERT("pbuf_ram_using less than the length to be freed \n",
355                 (atomic_read(&pbuf_ram_using) >= (int)malloc_len));
356     atomic_sub((int)malloc_len, &pbuf_ram_using);
357 #endif  /* MEM_PBUF_RAM_SIZE_LIMIT */
358 
359     sys_align_free(dma_info->dma);
360   }
361 }
362 
363 #endif
364 
365 /* Initialize members of struct pbuf after allocation */
366 static void
pbuf_init_alloced_pbuf(struct pbuf * p,void * payload,u16_t tot_len,u16_t len,pbuf_type type,u8_t flags)367 pbuf_init_alloced_pbuf(struct pbuf *p, void *payload, u16_t tot_len, u16_t len, pbuf_type type, u8_t flags)
368 {
369   p->next = NULL;
370   p->list = NULL;
371   p->payload = payload;
372   p->tot_len = tot_len;
373   p->len = len;
374   p->type_internal = (u8_t)type;
375   p->flags = flags;
376   (void)atomic_set(&p->ref, 1);
377   p->if_idx = NETIF_NO_INDEX;
378 #if defined(_PRE_LWIP_ZERO_COPY_MEM_ALLOC_PKT_BUF) && (_PRE_LWIP_ZERO_COPY_MEM_ALLOC_PKT_BUF)
379   p->pkt_buf = NULL;
380 #endif
381 #if defined(_PRE_LWIP_SYSCHANNEL_MEM_ALLOC_BUF) && (_PRE_LWIP_SYSCHANNEL_MEM_ALLOC_BUF)
382   p->syschannel_buf = NULL;
383 #endif
384 }
385 
386 /**
387  * @ingroup pbuf
388  * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type).
389  *
390  * The actual memory allocated for the pbuf is determined by the
391  * layer at which the pbuf is allocated and the requested size
392  * (from the size parameter).
393  *
394  * @param layer header size
395  * @param length size of the pbuf's payload
396  * @param type this parameter decides how and where the pbuf
397  * should be allocated as follows:
398  *
399  * - PBUF_RAM: buffer memory for pbuf is allocated as one large
400  *             chunk. This includes protocol headers as well.
401  * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for
402  *             protocol headers. Additional headers must be prepended
403  *             by allocating another pbuf and chain in to the front of
404  *             the ROM pbuf. It is assumed that the memory used is really
405  *             similar to ROM in that it is immutable and will not be
406  *             changed. Memory which is dynamic should generally not
407  *             be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
408  * - PBUF_REF: no buffer memory is allocated for the pbuf, even for
409  *             protocol headers. It is assumed that the pbuf is only
410  *             being used in a single thread. If the pbuf gets queued,
411  *             then pbuf_take should be called to copy the buffer.
412  * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from
413  *              the pbuf pool that is allocated during pbuf_init().
414  *
415  * @return the allocated pbuf. If multiple pbufs where allocated, this
416  * is the first pbuf of a pbuf chain.
417  */
418 struct pbuf *
pbuf_alloc(pbuf_layer layer,u16_t length,pbuf_type type)419 pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type)
420 {
421   struct pbuf *p;
422   u16_t offset = (u16_t)layer;
423 #if LWIP_RIPPLE
424   u8_t hbhflag;
425 #endif
426   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length));
427 #if LWIP_RIPPLE
428   hbhflag = 1;
429   /* reserve hop-by-hop ipv6 option for forward and backward space */
430   offset += 2 * lwip_hbh_len(NULL);
431 #endif
432 
433 #if LWIP_IP6IN4
434   offset += PBUF_IP4_HLEN;
435 #endif
436 
437   offset += PBUF_ZERO_COPY_RESERVE;
438 
439   switch (type) {
440     case PBUF_REF: /* fall through */
441     case PBUF_ROM:
442       p = pbuf_alloc_reference(NULL, length, type);
443       if (p == NULL) {
444         return NULL;
445       }
446       break;
447     case PBUF_POOL: {
448       struct pbuf *q, *last;
449       u16_t rem_len; /* remaining length */
450       p = NULL;
451       last = NULL;
452       rem_len = length;
453       do {
454         u16_t qlen;
455         q = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL);
456         if (q == NULL) {
457           PBUF_POOL_IS_EMPTY();
458           /* free chain so far allocated */
459           if (p) {
460             pbuf_free(p);
461           }
462           /* bail out unsuccessfully */
463           return NULL;
464         }
465         qlen = LWIP_MIN(rem_len, (u16_t)(PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)));
466         pbuf_init_alloced_pbuf(q, LWIP_MEM_ALIGN((void *)((u8_t *)q + SIZEOF_STRUCT_PBUF + offset)),
467                                rem_len, qlen, type, 0);
468         LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned",
469                     ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0);
470         LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT",
471                     (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 );
472         if (p == NULL) {
473           /* allocated head of pbuf chain (into p) */
474           p = q;
475         } else {
476           /* make previous pbuf point to this pbuf */
477           last->next = q;
478         }
479         last = q;
480         rem_len = (u16_t)(rem_len - qlen);
481         offset = 0;
482       } while (rem_len > 0);
483       break;
484     }
485     case PBUF_RAM: {
486 #if (MEM_MALLOC_DMA_ALIGN == 1)
487       mem_size_t payload_len = (mem_size_t)(LWIP_MEM_ALIGN_SIZE(offset) + LWIP_MEM_ALIGN_SIZE(length));
488       mem_size_t alloc_len = (mem_size_t)(LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF) + payload_len);
489       alloc_len = (mem_size_t)(LWIP_MEM_ALIGN_SIZE(PBUF_ZERO_COPY_TAILROOM) + alloc_len);
490 
491       if (alloc_len > MAX_PBUF_RAM_SIZE_TO_ALLOC) {
492         LWIP_DEBUGF(PBUF_DEBUG, ("pbuf_dma_alloc: Invalid aligned memory length which is  greater than %d\n",
493                                  MAX_PBUF_RAM_SIZE_TO_ALLOC));
494         return NULL;
495       }
496 
497       /* bug #50040: Check for integer overflow when calculating alloc_len */
498       if ((payload_len < LWIP_MEM_ALIGN_SIZE(length)) ||
499           (alloc_len < LWIP_MEM_ALIGN_SIZE(length))) {
500         return NULL;
501       }
502 #if defined(_PRE_LWIP_ZERO_COPY_MEM_ALLOC_PKT_BUF) && (_PRE_LWIP_ZERO_COPY_MEM_ALLOC_PKT_BUF)
503       /* �����ڴ�����볤�� ���ڴ������ */
504       if (length <= 1600) {
505         p = pbuf_alloc_from_ptk_buf(alloc_len, length , &offset
506 #if LWIP_RIPPLE
507           , &hbhflag
508 #endif
509         );
510         if (p != NULL) {
511           /* ������뵽�ˣ�ֱ��break switch���ߺ������� */
512           break;
513         } else {
514           /* ���ڴ�����벻���ڴ� ����NULL */
515           return NULL;
516         }
517       }
518 #endif
519 
520       if (mem_pbuf_check_ram_size_return(alloc_len) != 0) {
521         return NULL;
522       }
523       /* If pbuf is to be allocated in RAM, allocate memory for it. */
524       p = (struct pbuf *)mem_malloc(alloc_len);
525       if (p == NULL) {
526 #if MEM_PBUF_RAM_SIZE_LIMIT
527         (void)atomic_sub((int)alloc_len, &pbuf_ram_using);
528 #endif
529         LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE,
530                     ("pbuf_alloc: allocated pbuf(PBUF_RAM) fail due to mem_malloc \n"));
531         return NULL;
532       }
533 #if MEM_PBUF_RAM_SIZE_LIMIT
534       p->malloc_len = (u16_t)alloc_len;
535 #endif // MEM_PBUF_RAM_SIZE_LIMIT
536 #if LWIP_RIPPLE
537       if (hbhflag == 1) {
538         /* 2 : set hop-by-hop flag value we use later */
539         hbhflag = 2;
540         offset -= lwip_hbh_len(NULL);
541       }
542 #endif
543 
544       pbuf_init_alloced_pbuf(p, LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset)),
545                              length, length, type, 0);
546       LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned",
547                   ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
548 #else
549       u32_t malloc_len = (u32_t)(length + offset + PBUF_LINK_CHKSUM_LEN);
550       p = pbuf_dma_alloc_ext(malloc_len);
551       if (p == NULL) {
552         return NULL;
553       }
554 #if LWIP_RIPPLE
555       if (hbhflag == 1) {
556         /* 2 : set hop-by-hop flag value we use later */
557         hbhflag = 2;
558         offset -= lwip_hbh_len(NULL);
559       }
560 #endif
561 
562       pbuf_init_alloced_pbuf(p, (void *)((u8_t *)p->dma_info->dma + offset), length, length, type, 0);
563 #endif
564       break;
565     }
566     default:
567       LWIP_ASSERT("pbuf_alloc: erroneous type", 0);
568       return NULL;
569   }
570 
571 #if LWIP_SO_PRIORITY
572   /* By Default all pbuf priority will be set to LWIP_PKT_PRIORITY_MIN */
573   p->priority = LWIP_PKT_PRIORITY_MIN;
574 #endif /* LWIP_SO_PRIORITY */
575 
576 #if LWIP_RIPPLE
577   /* 2 : set PBUF_FLAG_HBH_SPACE before set hbhflag */
578   if (hbhflag == 2) {
579     p->flags |= PBUF_FLAG_HBH_SPACE;
580   }
581 #endif
582 
583 #if LWIP_IP6IN4
584   p->ip6in4_ip4 = lwIP_FALSE;
585   p->ip6in4_ip6 = lwIP_FALSE;
586 #endif
587 
588   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p));
589   return p;
590 }
591 
592 /**
593  * @ingroup pbuf
594  * Allocates a pbuf for referenced data.
595  * Referenced data can be volatile (PBUF_REF) or long-lived (PBUF_ROM).
596  *
597  * The actual memory allocated for the pbuf is determined by the
598  * layer at which the pbuf is allocated and the requested size
599  * (from the size parameter).
600  *
601  * @param payload referenced payload
602  * @param length size of the pbuf's payload
603  * @param type this parameter decides how and where the pbuf
604  * should be allocated as follows:
605  *
606  * - PBUF_ROM: It is assumed that the memory used is really
607  *             similar to ROM in that it is immutable and will not be
608  *             changed. Memory which is dynamic should generally not
609  *             be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
610  * - PBUF_REF: It is assumed that the pbuf is only
611  *             being used in a single thread. If the pbuf gets queued,
612  *             then pbuf_take should be called to copy the buffer.
613  *
614  * @return the allocated pbuf.
615  */
616 struct pbuf *
pbuf_alloc_reference(void * payload,u16_t length,pbuf_type type)617 pbuf_alloc_reference(void *payload, u16_t length, pbuf_type type)
618 {
619   struct pbuf *p;
620   LWIP_ASSERT("invalid pbuf_type", (type == PBUF_REF) || (type == PBUF_ROM));
621   /* only allocate memory for the pbuf structure */
622   p = (struct pbuf *)memp_malloc(MEMP_PBUF);
623   if (p == NULL) {
624     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
625                 ("pbuf_alloc_reference: Could not allocate MEMP_PBUF for PBUF_%s.\n",
626                  (type == PBUF_ROM) ? "ROM" : "REF"));
627     return NULL;
628   }
629   pbuf_init_alloced_pbuf(p, payload, length, length, type, 0);
630   return p;
631 }
632 
633 #if PBUF_RX_RATIONING
634 struct pbuf *
pbuf_alloc_for_rx(pbuf_layer layer,u16_t length)635 pbuf_alloc_for_rx(pbuf_layer layer, u16_t length)
636 {
637   struct pbuf *p = pbuf_alloc(layer, length, PBUF_RAM);
638   if (p == NULL) {
639     g_pbuf_ram_shortage_flag = lwIP_TRUE;
640     return pbuf_alloc(layer, length, PBUF_POOL);
641   }
642   g_pbuf_ram_shortage_flag = lwIP_FALSE;
643   return p;
644 }
645 #endif
646 
647 #if LWIP_SUPPORT_CUSTOM_PBUF
648 /**
649  * @ingroup pbuf
650  * Initialize a custom pbuf (already allocated).
651  * Example of custom pbuf usage: @ref zerocopyrx
652  *
653  * @param l header size
654  * @param length size of the pbuf's payload
655  * @param type type of the pbuf (only used to treat the pbuf accordingly, as
656  *        this function allocates no memory)
657  * @param p pointer to the custom pbuf to initialize (already allocated)
658  * @param payload_mem pointer to the buffer that is used for payload and headers,
659  *        must be at least big enough to hold 'length' plus the header size,
660  *        may be NULL if set later.
661  *        ATTENTION: The caller is responsible for correct alignment of this buffer!!
662  * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least
663  *        big enough to hold 'length' plus the header size
664  */
665 struct pbuf *
pbuf_alloced_custom(pbuf_layer l,u16_t length,pbuf_type type,struct pbuf_custom * p,void * payload_mem,u16_t payload_mem_len)666 pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p,
667                     void *payload_mem, u16_t payload_mem_len)
668 {
669   u16_t offset = (u16_t)l;
670   void *payload;
671   LWIP_ERROR("Invalid arguments in pbuf_alloced_custom \n", (p != NULL), return NULL);
672   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length));
673 
674   if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) {
675     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length));
676     return NULL;
677   }
678 
679   if (payload_mem != NULL) {
680     payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset);
681   } else {
682     payload = NULL;
683   }
684   pbuf_init_alloced_pbuf(&p->pbuf, payload, length, length, type, PBUF_FLAG_IS_CUSTOM);
685   return &p->pbuf;
686 }
687 #endif /* LWIP_SUPPORT_CUSTOM_PBUF */
688 
689 /**
690  * @ingroup pbuf
691  * Shrink a pbuf chain to a desired length.
692  *
693  * @param p pbuf to shrink.
694  * @param new_len desired new length of pbuf chain
695  *
696  * Depending on the desired length, the first few pbufs in a chain might
697  * be skipped and left unchanged. The new last pbuf in the chain will be
698  * resized, and any remaining pbufs will be freed.
699  *
700  * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted.
701  * @note May not be called on a packet queue.
702  *
703  * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain).
704  */
705 void
pbuf_realloc(struct pbuf * p,u16_t new_len)706 pbuf_realloc(struct pbuf *p, u16_t new_len)
707 {
708   struct pbuf *q;
709   u16_t rem_len; /* remaining length */
710   u16_t shrink;
711   LWIP_ERROR("Invalid arguments in pbuf_realloc \n", (p != NULL), return);
712   LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL);
713 
714   /* desired length larger than current length? */
715   if (new_len >= p->tot_len) {
716     /* enlarging not yet supported */
717     return;
718   }
719 
720   /* the pbuf chain grows by (new_len - p->tot_len) bytes
721    * (which may be negative in case of shrinking) */
722   shrink = (u16_t)(p->tot_len - new_len);
723 
724   /* first, step over any pbufs that should remain in the chain */
725   rem_len = new_len;
726   q = p;
727   /* should this pbuf be kept? */
728   while (rem_len > q->len) {
729     /* decrease remaining length by pbuf length */
730     rem_len = (u16_t)(rem_len - q->len);
731     /* decrease total length indicator */
732     q->tot_len = (u16_t)(q->tot_len - shrink);
733     /* proceed to next pbuf in chain */
734     q = q->next;
735     LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL);
736   }
737   /* we have now reached the new last pbuf (in q) */
738   /* rem_len == desired length for pbuf q */
739 
740   /* shrink allocated memory for PBUF_RAM */
741   /* (other types merely adjust their length fields */
742   if (pbuf_match_allocsrc(q, PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) && (rem_len != q->len)
743 #if LWIP_SUPPORT_CUSTOM_PBUF
744       && ((q->flags & PBUF_FLAG_IS_CUSTOM) == 0)
745 #endif /* LWIP_SUPPORT_CUSTOM_PBUF */
746      ) {
747     /* reallocate and adjust the length of the pbuf that will be split */
748     q = (struct pbuf *)mem_trim(q, (mem_size_t)(((u8_t *)q->payload - (u8_t *)q) + rem_len));
749     LWIP_ASSERT("mem_trim returned q == NULL", q != NULL);
750 #if !MEM_LIBC_MALLOC
751     if (q == NULL) {
752       return;
753     }
754 #endif
755   }
756   /* adjust length fields for new last pbuf */
757   q->len = rem_len;
758   q->tot_len = q->len;
759   q->list = NULL;
760 
761   /* any remaining pbufs in chain? */
762   if (q->next != NULL) {
763     /* free remaining pbufs in chain */
764     pbuf_free(q->next);
765   }
766   /* q is last packet in chain */
767   q->next = NULL;
768 
769 }
770 
771 /**
772  * Adjusts the payload pointer to reveal headers in the payload.
773  * @see pbuf_add_header.
774  *
775  * @param p pbuf to change the header size.
776  * @param header_size_increment Number of bytes to increment header size.
777  * @param force Allow 'header_size_increment > 0' for PBUF_REF/PBUF_ROM types
778  *
779  * @return non-zero on failure, zero on success.
780  *
781  */
782 static u8_t
pbuf_add_header_impl(struct pbuf * p,size_t header_size_increment,u8_t force)783 pbuf_add_header_impl(struct pbuf *p, size_t header_size_increment, u8_t force)
784 {
785   u16_t type_internal;
786   void *payload;
787   u16_t increment_magnitude;
788 
789   if ((p == NULL) || (header_size_increment > 0xFFFF)) {
790     return 1;
791   }
792   if (header_size_increment == 0) {
793     return 0;
794   }
795 
796   increment_magnitude = (u16_t)header_size_increment;
797   /* Do not allow tot_len to wrap as a result. */
798   if ((u16_t)(increment_magnitude + p->tot_len) < increment_magnitude) {
799     return 1;
800   }
801 
802   type_internal = p->type_internal;
803   /* remember current payload pointer */
804   payload = p->payload;
805 
806 #if (MEM_MALLOC_DMA_ALIGN != 1)
807   if (pbuf_match_allocsrc(p, PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP)) {
808     /* set new payload pointer */
809     payload = (u8_t *)p->payload - header_size_increment;
810     /* boundary check fails? */
811     if ((u8_t *)payload < (u8_t *)p->dma_info->dma) {
812       LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
813                   ("pbuf_header: failed as %p < %p (not enough space for new header size)\n",
814                    (void *)p->payload, (void *)(p + 1)));
815       /* bail out unsuccesfully */
816       return 1;
817     }
818   } else
819   /* pbuf types refering to external payloads? */
820 #endif
821   /* pbuf types containing payloads? */
822   if (type_internal & PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS) {
823     /* set new payload pointer */
824     payload = (u8_t *)p->payload - header_size_increment;
825     /* boundary check fails? */
826     if ((u8_t *)payload < (u8_t *)p + SIZEOF_STRUCT_PBUF) {
827       LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE,
828                    ("pbuf_add_header: failed as %p < %p (not enough space for new header size)\n",
829                     (void *)payload, (void *)((u8_t *)p + SIZEOF_STRUCT_PBUF)));
830       /* bail out unsuccessfully */
831       return 1;
832     }
833     /* pbuf types referring to external payloads? */
834   } else {
835     /* hide a header in the payload? */
836     if (force) {
837       payload = (u8_t *)p->payload - header_size_increment;
838     } else {
839       /* cannot expand payload to front (yet!)
840        * bail out unsuccessfully */
841       return 1;
842     }
843   }
844   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_add_header: old %p new %p (%"U16_F")\n",
845               (void *)p->payload, (void *)payload, increment_magnitude));
846 
847   /* modify pbuf fields */
848   p->payload = payload;
849   p->len = (u16_t)(p->len + increment_magnitude);
850   p->tot_len = (u16_t)(p->tot_len + increment_magnitude);
851 
852 
853   return 0;
854 }
855 
856 /**
857  * Adjusts the payload pointer to reveal headers in the payload.
858  *
859  * Adjusts the ->payload pointer so that space for a header
860  * appears in the pbuf payload.
861  *
862  * The ->payload, ->tot_len and ->len fields are adjusted.
863  *
864  * @param p pbuf to change the header size.
865  * @param header_size_increment Number of bytes to increment header size which
866  *          increases the size of the pbuf. New space is on the front.
867  *          If header_size_increment is 0, this function does nothing and returns successful.
868  *
869  * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so
870  * the call will fail. A check is made that the increase in header size does
871  * not move the payload pointer in front of the start of the buffer.
872  *
873  * @return non-zero on failure, zero on success.
874  *
875  */
876 u8_t
pbuf_add_header(struct pbuf * p,size_t header_size_increment)877 pbuf_add_header(struct pbuf *p, size_t header_size_increment)
878 {
879   return pbuf_add_header_impl(p, header_size_increment, 0);
880 }
881 
882 /**
883  * Same as @ref pbuf_add_header but does not check if 'header_size > 0' is allowed.
884  * This is used internally only, to allow PBUF_REF for RX.
885  */
886 u8_t
pbuf_add_header_force(struct pbuf * p,size_t header_size_increment)887 pbuf_add_header_force(struct pbuf *p, size_t header_size_increment)
888 {
889   return pbuf_add_header_impl(p, header_size_increment, 1);
890 }
891 
892 /**
893  * Adjusts the payload pointer to hide headers in the payload.
894  *
895  * Adjusts the ->payload pointer so that space for a header
896  * disappears in the pbuf payload.
897  *
898  * The ->payload, ->tot_len and ->len fields are adjusted.
899  *
900  * @param p pbuf to change the header size.
901  * @param header_size_decrement Number of bytes to decrement header size which
902  *          decreases the size of the pbuf.
903  *          If header_size_decrement is 0, this function does nothing and returns successful.
904  * @return non-zero on failure, zero on success.
905  *
906  */
907 u8_t
pbuf_remove_header(struct pbuf * p,size_t header_size_decrement)908 pbuf_remove_header(struct pbuf *p, size_t header_size_decrement)
909 {
910   void *payload;
911   u16_t increment_magnitude;
912 
913   if ((p == NULL) || (header_size_decrement > 0xFFFF)) {
914     return 1;
915   }
916   if (header_size_decrement == 0) {
917     return 0;
918   }
919 
920   increment_magnitude = (u16_t)header_size_decrement;
921   /* Check that we aren't going to move off the end of the pbuf */
922   LWIP_ERROR("increment_magnitude <= p->len", (increment_magnitude <= p->len), return 1;);
923 
924   /* remember current payload pointer */
925   payload = p->payload;
926   LWIP_UNUSED_ARG(payload); /* only used in LWIP_DEBUGF below */
927 
928   /* increase payload pointer (guarded by length check above) */
929   p->payload = (u8_t *)p->payload + header_size_decrement;
930   /* modify pbuf length fields */
931   p->len = (u16_t)(p->len - increment_magnitude);
932   p->tot_len = (u16_t)(p->tot_len - increment_magnitude);
933 
934   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_remove_header: old %p new %p (%"U16_F")\n",
935               (void *)payload, (void *)p->payload, increment_magnitude));
936 
937   return 0;
938 }
939 
940 static u8_t
pbuf_header_impl(struct pbuf * p,s16_t header_size_increment,u8_t force)941 pbuf_header_impl(struct pbuf *p, s16_t header_size_increment, u8_t force)
942 {
943   if (header_size_increment < 0) {
944     return pbuf_remove_header(p, (size_t) - header_size_increment);
945   } else {
946     return pbuf_add_header_impl(p, (size_t)header_size_increment, force);
947   }
948 }
949 
950 /**
951  * Adjusts the payload pointer to hide or reveal headers in the payload.
952  *
953  * Adjusts the ->payload pointer so that space for a header
954  * (dis)appears in the pbuf payload.
955  *
956  * The ->payload, ->tot_len and ->len fields are adjusted.
957  *
958  * @param p pbuf to change the header size.
959  * @param header_size_increment Number of bytes to increment header size which
960  * increases the size of the pbuf. New space is on the front.
961  * (Using a negative value decreases the header size.)
962  * If header_size_increment is 0, this function does nothing and returns successful.
963  *
964  * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so
965  * the call will fail. A check is made that the increase in header size does
966  * not move the payload pointer in front of the start of the buffer.
967  * @return non-zero on failure, zero on success.
968  *
969  */
970 u8_t
pbuf_header(struct pbuf * p,s16_t header_size_increment)971 pbuf_header(struct pbuf *p, s16_t header_size_increment)
972 {
973   return pbuf_header_impl(p, header_size_increment, 0);
974 }
975 
976 /**
977  * Same as pbuf_header but does not check if 'header_size > 0' is allowed.
978  * This is used internally only, to allow PBUF_REF for RX.
979  */
980 u8_t
pbuf_header_force(struct pbuf * p,s16_t header_size_increment)981 pbuf_header_force(struct pbuf *p, s16_t header_size_increment)
982 {
983   return pbuf_header_impl(p, header_size_increment, 1);
984 }
985 
986 /** Similar to pbuf_header(-size) but de-refs header pbufs for (size >= p->len)
987  *
988  * @param q pbufs to operate on
989  * @param size The number of bytes to remove from the beginning of the pbuf list.
990  *             While size >= p->len, pbufs are freed.
991  *        ATTENTION: this is the opposite direction as @ref pbuf_header, but
992  *                   takes an u16_t not s16_t!
993  * @return the new head pbuf
994  */
995 struct pbuf *
pbuf_free_header(struct pbuf * q,u16_t size)996 pbuf_free_header(struct pbuf *q, u16_t size)
997 {
998   struct pbuf *p = q;
999   u16_t free_left = size;
1000   while (free_left && p) {
1001     if (free_left >= p->len) {
1002       struct pbuf *f = p;
1003       free_left = (u16_t)(free_left - p->len);
1004       p = p->next;
1005       f->next = 0;
1006       pbuf_free(f);
1007     } else {
1008       pbuf_remove_header(p, free_left);
1009       free_left = 0;
1010     }
1011   }
1012   return p;
1013 }
1014 
1015 /**
1016  * @ingroup pbuf
1017  * Dereference a pbuf chain or queue and deallocate any no-longer-used
1018  * pbufs at the head of this chain or queue.
1019  *
1020  * Decrements the pbuf reference count. If it reaches zero, the pbuf is
1021  * deallocated.
1022  *
1023  * For a pbuf chain, this is repeated for each pbuf in the chain,
1024  * up to the first pbuf which has a non-zero reference count after
1025  * decrementing. So, when all reference counts are one, the whole
1026  * chain is free'd.
1027  *
1028  * @param p The pbuf (chain) to be dereferenced.
1029  *
1030  * @return the number of pbufs that were de-allocated
1031  * from the head of the chain.
1032  *
1033  * @note MUST NOT be called on a packet queue (Not verified to work yet).
1034  * @note the reference counter of a pbuf equals the number of pointers
1035  * that refer to the pbuf (or into the pbuf).
1036  *
1037  * @internal examples:
1038  *
1039  * Assuming existing chains a->b->c with the following reference
1040  * counts, calling pbuf_free(a) results in:
1041  *
1042  * 1->2->3 becomes ...1->3
1043  * 3->3->3 becomes 2->3->3
1044  * 1->1->2 becomes ......1
1045  * 2->1->1 becomes 1->1->1
1046  * 1->1->1 becomes .......
1047  *
1048  */
1049 u8_t
pbuf_free(struct pbuf * p)1050 pbuf_free(struct pbuf *p)
1051 {
1052   u8_t alloc_src;
1053   struct pbuf *q;
1054   u8_t count;
1055 
1056   if (p == NULL) {
1057     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
1058                 ("pbuf_free(p == NULL) was called.\n"));
1059     return 0;
1060   }
1061   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p));
1062 
1063   PERF_START;
1064 
1065   count = 0;
1066   /* de-allocate all consecutive pbufs from the head of the chain that
1067    * obtain a zero reference count after decrementing*/
1068   while (p != NULL) {
1069     /* Since decrementing ref cannot be guaranteed to be a single machine operation
1070      * we must protect it. We put the new ref into a local variable to prevent
1071      * further protection. */
1072     /* all pbufs in a chain are referenced at least once */
1073     LWIP_ASSERT("pbuf_free: p->ref > 0", atomic_read(&p->ref) > 0);
1074     /* decrease reference count (number of pointers to pbuf) */
1075     /* this pbuf is no longer referenced to? */
1076     if (atomic_dec_and_test(&p->ref)) {
1077       /* remember next pbuf in chain for next iteration */
1078       q = p->next;
1079       LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p));
1080       alloc_src = pbuf_get_allocsrc(p);
1081 #if LWIP_SUPPORT_CUSTOM_PBUF
1082       /* is this a custom pbuf? */
1083       if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) {
1084         struct pbuf_custom *pc = (struct pbuf_custom *)p;
1085         LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL);
1086         pc->custom_free_function(p);
1087       } else
1088 #endif /* LWIP_SUPPORT_CUSTOM_PBUF */
1089       {
1090         /* is this a pbuf from the pool? */
1091         if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF_POOL) {
1092           memp_free(MEMP_PBUF_POOL, p);
1093           /* is this a ROM or RAM referencing pbuf? */
1094         } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF) {
1095           memp_free(MEMP_PBUF, p);
1096           /* type == PBUF_RAM */
1097         } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) {
1098 #if (MEM_MALLOC_DMA_ALIGN != 1)
1099           pbuf_dma_free(p->dma_info);
1100 #else  /* MEM_MALLOC_DMA_ALIGN */
1101 
1102 #if MEM_PBUF_RAM_SIZE_LIMIT
1103           u16_t malloc_len = p->malloc_len;
1104 #endif
1105 #if defined(_PRE_LWIP_ZERO_COPY_MEM_ALLOC_PKT_BUF) && (_PRE_LWIP_ZERO_COPY_MEM_ALLOC_PKT_BUF)
1106           if (p->pkt_buf != NULL) {
1107             malloc_len = 0;  // pkt_buf��ʽ����IJ��������limit������
1108             ((oal_dmac_netbuf_stru *)p->pkt_buf)->pkt_src = PKT_BUF_SRC_DMAC;
1109             oal_mem_netbuf_free((oal_dmac_netbuf_stru *)p->pkt_buf);
1110           } else {
1111             mem_free(p);
1112           }
1113 #else
1114           mem_free(p);
1115 #endif
1116 #if MEM_PBUF_RAM_SIZE_LIMIT
1117           LWIP_ASSERT("pbuf_ram_using less than the length to be freed \n",
1118                       (atomic_read(&pbuf_ram_using) >= malloc_len));
1119           (void)atomic_sub((int)malloc_len, &pbuf_ram_using);
1120 #endif  /* MEM_PBUF_RAM_SIZE_LIMIT */
1121 #endif /* MEM_MALLOC_DMA_ALIGN */
1122         } else {
1123           /* @todo: support freeing other types */
1124           LWIP_ASSERT("invalid pbuf type", 0);
1125         }
1126       }
1127       count++;
1128       /* proceed to next pbuf */
1129       p = q;
1130       /* p->ref > 0, this pbuf is still referenced to */
1131       /* (and so the remaining pbufs in chain as well) */
1132     } else {
1133       LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n",
1134                                                 (void *)p, atomic_read(&p->ref)));
1135       /* stop walking through the chain */
1136 #if defined(_PRE_LWIP_SYSCHANNEL_MEM_ALLOC_BUF) && (_PRE_LWIP_SYSCHANNEL_MEM_ALLOC_BUF)
1137       if (p->syschannel_buf != NULL) {
1138         syschannel_mem_push(p->syschannel_buf);
1139       }
1140 #endif
1141       p = NULL;
1142     }
1143   }
1144   PERF_STOP("pbuf_free");
1145   /* return number of de-allocated pbufs */
1146   return count;
1147 }
1148 
1149 /**
1150  * Count number of pbufs in a chain
1151  *
1152  * @param p first pbuf of chain
1153  * @return the number of pbufs in a chain
1154  */
1155 u16_t
pbuf_clen(const struct pbuf * p)1156 pbuf_clen(const struct pbuf *p)
1157 {
1158   u16_t len;
1159 
1160   len = 0;
1161   while (p != NULL) {
1162     ++len;
1163     p = p->next;
1164   }
1165   return len;
1166 }
1167 
1168 /**
1169  * @ingroup pbuf
1170  * Increment the reference count of the pbuf.
1171  *
1172  * @param p pbuf to increase reference counter of
1173  *
1174  */
1175 void
pbuf_ref(struct pbuf * p)1176 pbuf_ref(struct pbuf *p)
1177 {
1178   /* pbuf given? */
1179   if (p != NULL) {
1180     atomic_inc(&p->ref);
1181   }
1182 }
1183 
1184 /**
1185  * @ingroup pbuf
1186  * Concatenate two pbufs (each may be a pbuf chain) and take over
1187  * the caller's reference of the tail pbuf.
1188  *
1189  * @note The caller MAY NOT reference the tail pbuf afterwards.
1190  * Use pbuf_chain() for that purpose.
1191  *
1192  * This function explicitly does not check for tot_len overflow to prevent
1193  * failing to queue too long pbufs. This can produce invalid pbufs, so
1194  * handle with care!
1195  *
1196  * @see pbuf_chain()
1197  */
1198 void
pbuf_cat(struct pbuf * h,struct pbuf * t)1199 pbuf_cat(struct pbuf *h, struct pbuf *t)
1200 {
1201   struct pbuf *p;
1202 
1203   LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)",
1204              ((h != NULL) && (t != NULL)), return;);
1205 
1206   /* proceed to last pbuf of chain */
1207   for (p = h; p->next != NULL; p = p->next) {
1208     /* add total length of second chain to all totals of first chain */
1209     p->tot_len = (u16_t)(p->tot_len + t->tot_len);
1210   }
1211   /* { p is last pbuf of first h chain, p->next == NULL } */
1212   LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len);
1213   LWIP_ASSERT("p->next == NULL", p->next == NULL);
1214   /* add total length of second chain to last pbuf total of first chain */
1215   p->tot_len = (u16_t)(p->tot_len + t->tot_len);
1216   /* chain last pbuf of head (p) with first of tail (t) */
1217   p->next = t;
1218   /* p->next now references t, but the caller will drop its reference to t,
1219    * so netto there is no change to the reference count of t.
1220    */
1221 }
1222 
1223 /**
1224  * @ingroup pbuf
1225  * Chain two pbufs (or pbuf chains) together.
1226  *
1227  * The caller MUST call pbuf_free(t) once it has stopped
1228  * using it. Use pbuf_cat() instead if you no longer use t.
1229  *
1230  * @param h head pbuf (chain)
1231  * @param t tail pbuf (chain)
1232  * @note The pbufs MUST belong to the same packet.
1233  * @note MAY NOT be called on a packet queue.
1234  *
1235  * The ->tot_len fields of all pbufs of the head chain are adjusted.
1236  * The ->next field of the last pbuf of the head chain is adjusted.
1237  * The ->ref field of the first pbuf of the tail chain is adjusted.
1238  *
1239  */
1240 void
pbuf_chain(struct pbuf * h,struct pbuf * t)1241 pbuf_chain(struct pbuf *h, struct pbuf *t)
1242 {
1243   pbuf_cat(h, t);
1244   /* t is now referenced by h */
1245   pbuf_ref(t);
1246   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t));
1247 }
1248 
1249 /**
1250  * Dechains the first pbuf from its succeeding pbufs in the chain.
1251  *
1252  * Makes p->tot_len field equal to p->len.
1253  * @param p pbuf to dechain
1254  * @return remainder of the pbuf chain, or NULL if it was de-allocated.
1255  * @note May not be called on a packet queue.
1256  */
1257 struct pbuf *
pbuf_dechain(struct pbuf * p)1258 pbuf_dechain(struct pbuf *p)
1259 {
1260   struct pbuf *q;
1261   u8_t tail_gone = 1;
1262   /* tail */
1263   LWIP_ERROR("Invalid argument in pbuf_dechain \n", (p != NULL), return NULL);
1264 
1265   q = p->next;
1266   /* pbuf has successor in chain? */
1267   if (q != NULL) {
1268     /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
1269     LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len);
1270     /* enforce invariant if assertion is disabled */
1271     q->tot_len = (u16_t)(p->tot_len - p->len);
1272     /* decouple pbuf from remainder */
1273     p->next = NULL;
1274     /* total length of pbuf p is its own length only */
1275     p->tot_len = p->len;
1276     /* q is no longer referenced by p, free it */
1277     LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: unreferencing %p\n", (void *)q));
1278     tail_gone = pbuf_free(q);
1279     if (tail_gone > 0) {
1280       LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE,
1281                   ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q));
1282     }
1283     /* return remaining tail or NULL if deallocated */
1284   }
1285   /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
1286   LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len);
1287   return ((tail_gone > 0) ? NULL : q);
1288 }
1289 
1290 /**
1291  * @ingroup pbuf
1292  * Create PBUF_RAM copies of pbufs.
1293  *
1294  * Used to queue packets on behalf of the lwIP stack, such as
1295  * ARP based queueing.
1296  *
1297  * @note You MUST explicitly use p = pbuf_take(p);
1298  *
1299  * @note Only one packet is copied, no packet queue!
1300  *
1301  * @param p_to pbuf destination of the copy
1302  * @param p_from pbuf source of the copy
1303  *
1304  * @return ERR_OK if pbuf was copied
1305  *         ERR_ARG if one of the pbufs is NULL or p_to is not big
1306  *                 enough to hold p_from
1307  */
1308 err_t
pbuf_copy(struct pbuf * p_to,const struct pbuf * p_from)1309 pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from)
1310 {
1311   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy(%p, %p)\n",
1312               (const void *)p_to, (const void *)p_from));
1313 
1314   LWIP_ERROR("pbuf_copy: invalid source", p_from != NULL, return ERR_ARG;);
1315   return pbuf_copy_partial_pbuf(p_to, p_from, p_from->tot_len, 0);
1316 }
1317 
1318 /**
1319  * @ingroup pbuf
1320  * Copy part or all of one packet buffer into another, to a specified offset.
1321  *
1322  * @note Only data in one packet is copied, no packet queue!
1323  * @note Argument order is shared with pbuf_copy, but different than pbuf_copy_partial.
1324  *
1325  * @param p_to pbuf destination of the copy
1326  * @param p_from pbuf source of the copy
1327  * @param copy_len number of bytes to copy
1328  * @param offset offset in destination pbuf where to copy to
1329  *
1330  * @return ERR_OK if copy_len bytes were copied
1331  *         ERR_ARG if one of the pbufs is NULL or p_from is shorter than copy_len
1332  *                 or p_to is not big enough to hold copy_len at offset
1333  *         ERR_VAL if any of the pbufs are part of a queue
1334  */
1335 err_t
pbuf_copy_partial_pbuf(struct pbuf * p_to,const struct pbuf * p_from,u16_t copy_len,u16_t offset)1336 pbuf_copy_partial_pbuf(struct pbuf *p_to, const struct pbuf *p_from, u16_t copy_len, u16_t offset)
1337 {
1338   size_t offset_to = offset, offset_from = 0, len_calc;
1339   u16_t len;
1340 
1341   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy_partial_pbuf(%p, %p, %"U16_F", %"U16_F")\n",
1342               (const void *)p_to, (const void *)p_from, copy_len, offset));
1343 
1344   /* is the copy_len in range? */
1345   LWIP_ERROR("pbuf_copy_partial_pbuf: copy_len bigger than source", ((p_from != NULL) &&
1346              (p_from->tot_len >= copy_len)), return ERR_ARG;);
1347   /* is the target big enough to hold the source? */
1348   LWIP_ERROR("pbuf_copy_partial_pbuf: target not big enough", ((p_to != NULL) &&
1349              (p_to->tot_len >= (offset + copy_len))), return ERR_ARG;);
1350 
1351   /* iterate through pbuf chain */
1352   do {
1353     /* copy one part of the original chain */
1354     if ((p_to->len - offset_to) >= (p_from->len - offset_from)) {
1355       /* complete current p_from fits into current p_to */
1356       len_calc = p_from->len - offset_from;
1357     } else {
1358       /* current p_from does not fit into current p_to */
1359       len_calc = p_to->len - offset_to;
1360     }
1361     len = (u16_t)LWIP_MIN(copy_len, len_calc);
1362     MEMCPY((u8_t *)p_to->payload + offset_to, (u8_t *)p_from->payload + offset_from, len);
1363     offset_to += len;
1364     offset_from += len;
1365     copy_len -= len;
1366     LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len);
1367     LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len);
1368     if (offset_from >= p_from->len) {
1369       /* on to next p_from (if any) */
1370       offset_from = 0;
1371       p_from = p_from->next;
1372       LWIP_ERROR("p_from != NULL", (p_from != NULL) || (copy_len == 0), return ERR_ARG;);
1373     }
1374     if (offset_to == p_to->len) {
1375       /* on to next p_to (if any) */
1376       offset_to = 0;
1377       p_to = p_to->next;
1378       LWIP_ERROR("p_to != NULL", (p_to != NULL) || (copy_len == 0), return ERR_ARG;);
1379     }
1380 
1381     if ((p_from != NULL) && (p_from->len == p_from->tot_len)) {
1382       /* don't copy more than one packet! */
1383       LWIP_ERROR("pbuf_copy_partial_pbuf() does not allow packet queues!",
1384                  (p_from->next == NULL), return ERR_VAL;);
1385     }
1386     if ((p_to != NULL) && (p_to->len == p_to->tot_len)) {
1387       /* don't copy more than one packet! */
1388       LWIP_ERROR("pbuf_copy_partial_pbuf() does not allow packet queues!",
1389                  (p_to->next == NULL), return ERR_VAL;);
1390     }
1391   } while (copy_len);
1392   LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy_partial_pbuf: copy complete.\n"));
1393   return ERR_OK;
1394 }
1395 
1396 /**
1397  * @ingroup pbuf
1398  * Copy (part of) the contents of a packet buffer
1399  * to an application supplied buffer.
1400  *
1401  * @param buf the pbuf from which to copy data
1402  * @param dataptr the application supplied buffer
1403  * @param len length of data to copy (dataptr must be big enough). No more
1404  * than buf->tot_len will be copied, irrespective of len
1405  * @param offset offset into the packet buffer from where to begin copying len bytes
1406  * @return the number of bytes copied, or 0 on failure
1407  */
1408 u16_t
pbuf_copy_partial(const struct pbuf * buf,void * dataptr,u16_t len,u16_t offset)1409 pbuf_copy_partial(const struct pbuf *buf, void *dataptr, u16_t len, u16_t offset)
1410 {
1411   const struct pbuf *p;
1412   u16_t left = 0;
1413   u16_t buf_copy_len;
1414   u16_t copied_total = 0;
1415 
1416   LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0;);
1417   LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;);
1418 
1419   /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */
1420   for (p = buf; len != 0 && p != NULL; p = p->next) {
1421     if ((offset != 0) && (offset >= p->len)) {
1422       /* don't copy from this buffer -> on to the next */
1423       offset = (u16_t)(offset - p->len);
1424     } else {
1425       /* copy from this buffer. maybe only partially. */
1426       buf_copy_len = (u16_t)(p->len - offset);
1427       if (buf_copy_len > len) {
1428         buf_copy_len = len;
1429       }
1430       /* copy the necessary parts of the buffer */
1431       MEMCPY(&((char *)dataptr)[left], &((char *)p->payload)[offset], buf_copy_len);
1432       copied_total = (u16_t)(copied_total + buf_copy_len);
1433       left = (u16_t)(left + buf_copy_len);
1434       len = (u16_t)(len - buf_copy_len);
1435       offset = 0;
1436     }
1437   }
1438   return copied_total;
1439 }
1440 
1441 /**
1442  * @ingroup pbuf
1443  * Get part of a pbuf's payload as contiguous memory. The returned memory is
1444  * either a pointer into the pbuf's payload or, if split over multiple pbufs,
1445  * a copy into the user-supplied buffer.
1446  *
1447  * @param p the pbuf from which to copy data
1448  * @param buffer the application supplied buffer
1449  * @param bufsize size of the application supplied buffer
1450  * @param len length of data to copy (dataptr must be big enough). No more
1451  * than buf->tot_len will be copied, irrespective of len
1452  * @param offset offset into the packet buffer from where to begin copying len bytes
1453  * @return the number of bytes copied, or 0 on failure
1454  */
1455 void *
pbuf_get_contiguous(const struct pbuf * p,void * buffer,size_t bufsize,u16_t len,u16_t offset)1456 pbuf_get_contiguous(const struct pbuf *p, void *buffer, size_t bufsize, u16_t len, u16_t offset)
1457 {
1458   const struct pbuf *q;
1459   u16_t out_offset;
1460 
1461   LWIP_ERROR("pbuf_get_contiguous: invalid buf", (p != NULL), return NULL;);
1462   LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (buffer != NULL), return NULL;);
1463   LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (bufsize >= len), return NULL;);
1464 
1465   q = pbuf_skip_const(p, offset, &out_offset);
1466   if (q != NULL) {
1467     if (q->len >= (out_offset + len)) {
1468       /* all data in this pbuf, return zero-copy */
1469       return (u8_t *)q->payload + out_offset;
1470     }
1471     /* need to copy */
1472     if (pbuf_copy_partial(q, buffer, len, out_offset) != len) {
1473       /* copying failed: pbuf is too short */
1474       return NULL;
1475     }
1476     return buffer;
1477   }
1478   /* pbuf is too short (offset does not fit in) */
1479   return NULL;
1480 }
1481 
1482 #if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
1483 /**
1484  * This method modifies a 'pbuf chain', so that its total length is
1485  * smaller than 64K. The remainder of the original pbuf chain is stored
1486  * in *rest.
1487  * This function never creates new pbufs, but splits an existing chain
1488  * in two parts. The tot_len of the modified packet queue will likely be
1489  * smaller than 64K.
1490  * 'packet queues' are not supported by this function.
1491  *
1492  * @param p the pbuf queue to be split
1493  * @param rest pointer to store the remainder (after the first 64K)
1494  */
pbuf_split_64k(struct pbuf * p,struct pbuf ** rest)1495 void pbuf_split_64k(struct pbuf *p, struct pbuf **rest)
1496 {
1497   LWIP_ERROR("Invalid argument in pbuf_split_64k \n", (rest != NULL), return);
1498 
1499   *rest = NULL;
1500   if ((p != NULL) && (p->next != NULL)) {
1501     u16_t tot_len_front = p->len;
1502     struct pbuf *i = p;
1503     struct pbuf *r = p->next;
1504 
1505     /* continue until the total length (summed up as u16_t) overflows */
1506     while ((r != NULL) && ((u16_t)(tot_len_front + r->len) >= tot_len_front)) {
1507       tot_len_front = (u16_t)(tot_len_front + r->len);
1508       i = r;
1509       r = r->next;
1510     }
1511     /* i now points to last packet of the first segment. Set next
1512        pointer to NULL */
1513     i->next = NULL;
1514 
1515     if (r != NULL) {
1516       /* Update the tot_len field in the first part */
1517       for (i = p; i != NULL; i = i->next) {
1518         i->tot_len = (u16_t)(i->tot_len - r->tot_len);
1519         LWIP_ASSERT("tot_len/len mismatch in last pbuf",
1520                     (i->next != NULL) || (i->tot_len == i->len));
1521       }
1522       if (p->flags & PBUF_FLAG_TCP_FIN) {
1523         r->flags |= PBUF_FLAG_TCP_FIN;
1524       }
1525 
1526       /* tot_len field in rest does not need modifications */
1527       /* reference counters do not need modifications */
1528       *rest = r;
1529     }
1530   }
1531 }
1532 #endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
1533 
1534 /* Actual implementation of pbuf_skip() but returning const pointer... */
1535 static const struct pbuf *
pbuf_skip_const(const struct pbuf * in,u16_t in_offset,u16_t * out_offset)1536 pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset)
1537 {
1538   u16_t offset_left = in_offset;
1539   const struct pbuf *q = in;
1540 
1541   /* get the correct pbuf */
1542   while ((q != NULL) && (q->len <= offset_left)) {
1543     offset_left = (u16_t)(offset_left - q->len);
1544     q = q->next;
1545   }
1546   if (out_offset != NULL) {
1547     *out_offset = offset_left;
1548   }
1549   return q;
1550 }
1551 
1552 /**
1553  * @ingroup pbuf
1554  * Skip a number of bytes at the start of a pbuf
1555  *
1556  * @param in input pbuf
1557  * @param in_offset offset to skip
1558  * @param out_offset resulting offset in the returned pbuf
1559  * @return the pbuf in the queue where the offset is
1560  */
1561 struct pbuf *
pbuf_skip(struct pbuf * in,u16_t in_offset,u16_t * out_offset)1562 pbuf_skip(struct pbuf *in, u16_t in_offset, u16_t *out_offset)
1563 {
1564   const struct pbuf *out = pbuf_skip_const(in, in_offset, out_offset);
1565   return LWIP_CONST_CAST(struct pbuf *, out);
1566 }
1567 
1568 /**
1569  * @ingroup pbuf
1570  * Copy application supplied data into a pbuf.
1571  * This function can only be used to copy the equivalent of buf->tot_len data.
1572  *
1573  * @param buf pbuf to fill with data
1574  * @param dataptr application supplied data buffer
1575  * @param len length of the application supplied data buffer
1576  *
1577  * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough
1578  */
1579 err_t
pbuf_take(struct pbuf * buf,const void * dataptr,u16_t len)1580 pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len)
1581 {
1582   struct pbuf *p;
1583   size_t buf_copy_len;
1584   size_t total_copy_len = len;
1585   size_t copied_total = 0;
1586 
1587   LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return ERR_ARG;);
1588   LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return ERR_ARG;);
1589   LWIP_ERROR("pbuf_take: buf not large enough", (buf->tot_len >= len), return ERR_MEM;);
1590 
1591   /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */
1592   for (p = buf; total_copy_len != 0; p = p->next) {
1593     LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL);
1594     buf_copy_len = total_copy_len;
1595     if (buf_copy_len > p->len) {
1596       /* this pbuf cannot hold all remaining data */
1597       buf_copy_len = p->len;
1598     }
1599     /* copy the necessary parts of the buffer */
1600     MEMCPY(p->payload, &((const char *)dataptr)[copied_total], buf_copy_len);
1601     total_copy_len -= buf_copy_len;
1602     copied_total += buf_copy_len;
1603   }
1604   LWIP_ASSERT("did not copy all data", total_copy_len == 0 && copied_total == len);
1605   return ERR_OK;
1606 }
1607 
1608 /**
1609  * @ingroup pbuf
1610  * Same as pbuf_take() but puts data at an offset
1611  *
1612  * @param buf pbuf to fill with data
1613  * @param dataptr application supplied data buffer
1614  * @param len length of the application supplied data buffer
1615  * @param offset offset in pbuf where to copy dataptr to
1616  *
1617  * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough
1618  */
1619 err_t
pbuf_take_at(struct pbuf * buf,const void * dataptr,u16_t len,u16_t offset)1620 pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset)
1621 {
1622   u16_t target_offset;
1623   struct pbuf *q = NULL;
1624   LWIP_ERROR("Invalid argument in pbuf_take_at \n", (dataptr != NULL), return ERR_ARG);
1625 
1626   q = pbuf_skip(buf, offset, &target_offset);
1627   /* return requested data if pbuf is OK */
1628   if ((q != NULL) && (q->tot_len >= target_offset + len)) {
1629     u16_t remaining_len = len;
1630     const u8_t *src_ptr = (const u8_t *)dataptr;
1631     /* copy the part that goes into the first pbuf */
1632     u16_t first_copy_len;
1633     LWIP_ASSERT("check pbuf_skip result", target_offset < q->len);
1634     first_copy_len = (u16_t)LWIP_MIN(q->len - target_offset, len);
1635     MEMCPY(((u8_t *)q->payload) + target_offset, dataptr, first_copy_len);
1636     remaining_len = (u16_t)(remaining_len - first_copy_len);
1637     src_ptr += first_copy_len;
1638     if (remaining_len > 0) {
1639       return pbuf_take(q->next, src_ptr, remaining_len);
1640     }
1641     return ERR_OK;
1642   }
1643   return ERR_MEM;
1644 }
1645 
1646 /**
1647  * @ingroup pbuf
1648  * Creates a single pbuf out of a queue of pbufs.
1649  *
1650  * @remark: Either the source pbuf 'p' is freed by this function or the original
1651  *          pbuf 'p' is returned, therefore the caller has to check the result!
1652  *
1653  * @param p the source pbuf
1654  * @param layer pbuf_layer of the new pbuf
1655  *
1656  * @return a new, single pbuf (p->next is NULL)
1657  *         or the old pbuf if allocation fails
1658  */
1659 struct pbuf *
pbuf_coalesce(struct pbuf * p,pbuf_layer layer)1660 pbuf_coalesce(struct pbuf *p, pbuf_layer layer)
1661 {
1662   struct pbuf *q;
1663   LWIP_ERROR("Invalid argument in pbuf_coalesce \n", (p != NULL), return NULL);
1664   if (p->next == NULL) {
1665     return p;
1666   }
1667   q = pbuf_clone(layer, PBUF_RAM, p);
1668   if (q == NULL) {
1669     /* @todo: what do we do now? */
1670     return p;
1671   }
1672   pbuf_free(p);
1673   return q;
1674 }
1675 
1676 /**
1677  * @ingroup pbuf
1678  * Allocates a new pbuf of same length (via pbuf_alloc()) and copies the source
1679  * pbuf into this new pbuf (using pbuf_copy()).
1680  *
1681  * @param layer pbuf_layer of the new pbuf
1682  * @param type this parameter decides how and where the pbuf should be allocated
1683  *             (@see pbuf_alloc())
1684  * @param p the source pbuf
1685  *
1686  * @return a new pbuf or NULL if allocation fails
1687  */
1688 struct pbuf *
pbuf_clone(pbuf_layer layer,pbuf_type type,struct pbuf * p)1689 pbuf_clone(pbuf_layer layer, pbuf_type type, struct pbuf *p)
1690 {
1691   struct pbuf *q;
1692   err_t err;
1693   q = pbuf_alloc(layer, p->tot_len, type);
1694   if (q == NULL) {
1695     return NULL;
1696   }
1697   err = pbuf_copy(q, p);
1698   LWIP_UNUSED_ARG(err); /* in case of LWIP_NOASSERT */
1699   LWIP_ASSERT("pbuf_copy failed", err == ERR_OK);
1700   return q;
1701 }
1702 
1703 #if LWIP_CHECKSUM_ON_COPY
1704 /**
1705  * Copies data into a single pbuf (*not* into a pbuf queue!) and updates
1706  * the checksum while copying
1707  *
1708  * @param p the pbuf to copy data into
1709  * @param start_offset offset of p->payload where to copy the data to
1710  * @param dataptr data to copy into the pbuf
1711  * @param len length of data to copy into the pbuf
1712  * @param chksum pointer to the checksum which is updated
1713  * @return ERR_OK if successful, another error if the data does not fit
1714  *         within the (first) pbuf (no pbuf queues!)
1715  */
1716 err_t
pbuf_fill_chksum(struct pbuf * p,u16_t start_offset,const void * dataptr,u16_t len,u16_t * chksum)1717 pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr,
1718                  u16_t len, u16_t *chksum)
1719 {
1720   u32_t acc;
1721   u16_t copy_chksum;
1722   char *dst_ptr;
1723   LWIP_ASSERT("p != NULL", p != NULL);
1724   LWIP_ASSERT("dataptr != NULL", dataptr != NULL);
1725   LWIP_ASSERT("chksum != NULL", chksum != NULL);
1726   LWIP_ASSERT("len != 0", len != 0);
1727 
1728   if ((start_offset >= p->len) || (start_offset + len > p->len)) {
1729     return ERR_ARG;
1730   }
1731 
1732   dst_ptr = ((char *)p->payload) + start_offset;
1733   copy_chksum = LWIP_CHKSUM_COPY(dst_ptr, dataptr, len);
1734   if ((start_offset & 1) != 0) {
1735     copy_chksum = SWAP_BYTES_IN_WORD(copy_chksum);
1736   }
1737   acc = *chksum;
1738   acc += copy_chksum;
1739   *chksum = FOLD_U32T(acc);
1740   return ERR_OK;
1741 }
1742 #endif /* LWIP_CHECKSUM_ON_COPY */
1743 
1744 /**
1745  * @ingroup pbuf
1746  * Get one byte from the specified position in a pbuf
1747  * WARNING: returns zero for offset >= p->tot_len
1748  *
1749  * @param p pbuf to parse
1750  * @param offset offset into p of the byte to return
1751  * @return byte at an offset into p OR ZERO IF 'offset' >= p->tot_len
1752  */
1753 u8_t
pbuf_get_at(const struct pbuf * p,u16_t offset)1754 pbuf_get_at(const struct pbuf *p, u16_t offset)
1755 {
1756   int ret = pbuf_try_get_at(p, offset);
1757   if (ret >= 0) {
1758     return (u8_t)ret;
1759   }
1760   return 0;
1761 }
1762 
1763 /**
1764  * @ingroup pbuf
1765  * Get one byte from the specified position in a pbuf
1766  *
1767  * @param p pbuf to parse
1768  * @param offset offset into p of the byte to return
1769  * @return byte at an offset into p [0..0xFF] OR negative if 'offset' >= p->tot_len
1770  */
1771 int
pbuf_try_get_at(const struct pbuf * p,u16_t offset)1772 pbuf_try_get_at(const struct pbuf *p, u16_t offset)
1773 {
1774   u16_t q_idx;
1775   const struct pbuf *q = pbuf_skip_const(p, offset, &q_idx);
1776 
1777   /* return requested data if pbuf is OK */
1778   if ((q != NULL) && (q->len > q_idx)) {
1779     return ((u8_t *)q->payload)[q_idx];
1780   }
1781   return -1;
1782 }
1783 
1784 /**
1785  * @ingroup pbuf
1786  * Put one byte to the specified position in a pbuf
1787  * WARNING: silently ignores offset >= p->tot_len
1788  *
1789  * @param p pbuf to fill
1790  * @param offset offset into p of the byte to write
1791  * @param data byte to write at an offset into p
1792  */
1793 void
pbuf_put_at(struct pbuf * p,u16_t offset,u8_t data)1794 pbuf_put_at(struct pbuf *p, u16_t offset, u8_t data)
1795 {
1796   u16_t q_idx;
1797   struct pbuf *q = pbuf_skip(p, offset, &q_idx);
1798 
1799   /* write requested data if pbuf is OK */
1800   if ((q != NULL) && (q->len > q_idx)) {
1801     ((u8_t *)q->payload)[q_idx] = data;
1802   }
1803 }
1804 
1805 /**
1806  * @ingroup pbuf
1807  * Compare pbuf contents at specified offset with memory s2, both of length n
1808  *
1809  * @param p pbuf to compare
1810  * @param offset offset into p at which to start comparing
1811  * @param s2 buffer to compare
1812  * @param n length of buffer to compare
1813  * @return zero if equal, nonzero otherwise
1814  *         (0xffff if p is too short, diffoffset+1 otherwise)
1815  */
1816 u16_t
pbuf_memcmp(const struct pbuf * p,u16_t offset,const void * s2,u16_t n)1817 pbuf_memcmp(const struct pbuf *p, u16_t offset, const void *s2, u16_t n)
1818 {
1819   u16_t start = offset;
1820   const struct pbuf *q = p;
1821   u16_t i;
1822   LWIP_ERROR("Invalid argument in pbuf_memcmp pbuf \n", (p != NULL), return 0xffff);
1823   LWIP_ERROR("Invalid argument in pbuf_memcmp buf to compare \n", (s2 != NULL), return 0xffff);
1824 
1825   /* pbuf long enough to perform check? */
1826   if (p->tot_len < (offset + n)) {
1827     return 0xffff;
1828   }
1829 
1830   /* get the correct pbuf from chain. We know it succeeds because of p->tot_len check above. */
1831   while ((q != NULL) && (q->len <= start)) {
1832     start = (u16_t)(start - q->len);
1833     q = q->next;
1834   }
1835 
1836   /* return requested data if pbuf is OK */
1837   for (i = 0; i < n; i++) {
1838     /* We know pbuf_get_at() succeeds because of p->tot_len check above. */
1839     u8_t a = pbuf_get_at(q, (u16_t)(start + i));
1840     u8_t b = ((const u8_t *)s2)[i];
1841     if (a != b) {
1842       return (u16_t)LWIP_MIN(i + 1, 0xFFFF);
1843     }
1844   }
1845   return 0;
1846 }
1847 
1848 /**
1849  * @ingroup pbuf
1850  * Find occurrence of mem (with length mem_len) in pbuf p, starting at offset
1851  * start_offset.
1852  *
1853  * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as
1854  *        return value 'not found'
1855  * @param mem search for the contents of this buffer
1856  * @param mem_len length of 'mem'
1857  * @param start_offset offset into p at which to start searching
1858  * @return 0xFFFF if substr was not found in p or the index where it was found
1859  */
1860 u16_t
pbuf_memfind(const struct pbuf * p,const void * mem,u16_t mem_len,u16_t start_offset)1861 pbuf_memfind(const struct pbuf *p, const void *mem, u16_t mem_len, u16_t start_offset)
1862 {
1863   u16_t i;
1864   u16_t max_cmp_start;
1865   LWIP_ERROR("Invalid argument in pbuf_coalesce pbuf\n", (p != NULL), return 0xFFFF);
1866   LWIP_ERROR("Invalid argument in pbuf_coalesce mem\n", (mem != NULL), return 0xFFFF);
1867 
1868   max_cmp_start = (u16_t)(p->tot_len - mem_len);
1869   if (p->tot_len >= mem_len + start_offset) {
1870     for (i = start_offset; i <= max_cmp_start; i++) {
1871       u16_t plus = pbuf_memcmp(p, i, mem, mem_len);
1872       if (plus == 0) {
1873         return i;
1874       }
1875     }
1876   }
1877   return 0xFFFF;
1878 }
1879 
1880 /**
1881  * Find occurrence of substr with length substr_len in pbuf p, start at offset
1882  * start_offset
1883  * WARNING: in contrast to strstr(), this one does not stop at the first \0 in
1884  * the pbuf/source string!
1885  *
1886  * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as
1887  *        return value 'not found'
1888  * @param substr string to search for in p, maximum length is 0xFFFE
1889  * @return 0xFFFF if substr was not found in p or the index where it was found
1890  */
1891 u16_t
pbuf_strstr(const struct pbuf * p,const char * substr)1892 pbuf_strstr(const struct pbuf *p, const char *substr)
1893 {
1894   size_t substr_len;
1895   LWIP_ERROR("Invalid argument in pbuf_strstr \n", (p != NULL), return 0xffff);
1896 
1897   if ((substr == NULL) || (substr[0] == 0) || (p->tot_len == 0xFFFF)) {
1898     return 0xFFFF;
1899   }
1900   substr_len = strlen(substr);
1901   if (substr_len >= 0xFFFF) {
1902     return 0xFFFF;
1903   }
1904   return pbuf_memfind(p, substr, (u16_t)substr_len, 0);
1905 }
1906 
1907 #if MEM_PBUF_RAM_SIZE_LIMIT
1908 /* set the max size of pbuf with type PBUF_RAM
1909  *
1910  * @param ram_max_size the setting size.
1911  * @return the orgin size of pbuf with type PBUF_RAM.
1912  */
1913 u32_t
pbuf_ram_size_set(u32_t ram_max_size)1914 pbuf_ram_size_set(u32_t ram_max_size)
1915 {
1916   u32_t pbuf_ram_size_origin;
1917 
1918   if (tcpip_init_finish == 1) {
1919     LWIP_DEBUGF(PBUF_DEBUG, ("LWIP already initialized. Cannot change ram size \n"));
1920     return 0;
1921   }
1922 
1923   if ((ram_max_size < PBUF_RAM_SIZE_MIN) || (ram_max_size > (0xFFFFFFFF>>1))) {
1924     LWIP_DEBUGF(PBUF_DEBUG, ("ram_max_size (%u) less than %d\n", ram_max_size, PBUF_RAM_SIZE_MIN));
1925     return 0;
1926   }
1927 
1928   pbuf_ram_size_origin = pbuf_ram_size;
1929   pbuf_ram_size = ram_max_size;
1930 
1931   return pbuf_ram_size_origin;
1932 }
1933 #endif
1934