• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * @file
3  * This is the IPv4 packet segmentation and reassembly implementation.
4  *
5  */
6 
7 /*
8  * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without modification,
12  * are permitted provided that the following conditions are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright notice,
15  *    this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright notice,
17  *    this list of conditions and the following disclaimer in the documentation
18  *    and/or other materials provided with the distribution.
19  * 3. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
23  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
25  * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
27  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
31  * OF SUCH DAMAGE.
32  *
33  * This file is part of the lwIP TCP/IP stack.
34  *
35  * Author: Jani Monoses <jani@iv.ro>
36  *         Simon Goldschmidt
37  * original reassembly code by Adam Dunkels <adam@sics.se>
38  *
39  */
40 
41 #include "lwip/opt.h"
42 
43 #if LWIP_IPV4
44 
45 #include "lwip/ip4_frag.h"
46 #include "lwip/def.h"
47 #include "lwip/inet_chksum.h"
48 #include "lwip/netif.h"
49 #include "lwip/stats.h"
50 #include "lwip/icmp.h"
51 
52 #include <string.h>
53 
54 #if IP_REASSEMBLY
55 /**
56  * The IP reassembly code currently has the following limitations:
57  * - IP header options are not supported
58  * - fragments must not overlap (e.g. due to different routes),
59  *   currently, overlapping or duplicate fragments are thrown away
60  *   if IP_REASS_CHECK_OVERLAP=1 (the default)!
61  *
62  * @todo: work with IP header options
63  */
64 
65 /** Setting this to 0, you can turn off checking the fragments for overlapping
66  * regions. The code gets a little smaller. Only use this if you know that
67  * overlapping won't occur on your network! */
68 #ifndef IP_REASS_CHECK_OVERLAP
69 #define IP_REASS_CHECK_OVERLAP 1
70 #endif /* IP_REASS_CHECK_OVERLAP */
71 
72 /** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is
73  * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller.
74  * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA
75  * is set to 1, so one datagram can be reassembled at a time, only. */
76 #ifndef IP_REASS_FREE_OLDEST
77 #define IP_REASS_FREE_OLDEST 1
78 #endif /* IP_REASS_FREE_OLDEST */
79 
80 #define IP_REASS_FLAG_LASTFRAG 0x01
81 
82 #define IP_REASS_VALIDATE_TELEGRAM_FINISHED  1
83 #define IP_REASS_VALIDATE_PBUF_QUEUED        0
84 #define IP_REASS_VALIDATE_PBUF_DROPPED       -1
85 
86 /** This is a helper struct which holds the starting
87  * offset and the ending offset of this fragment to
88  * easily chain the fragments.
89  * It has the same packing requirements as the IP header, since it replaces
90  * the IP header in memory in incoming fragments (after copying it) to keep
91  * track of the various fragments. (-> If the IP header doesn't need packing,
92  * this struct doesn't need packing, too.)
93  */
94 #ifdef PACK_STRUCT_USE_INCLUDES
95 #  include "arch/bpstruct.h"
96 #endif
97 PACK_STRUCT_BEGIN
98 struct ip_reass_helper {
99   PACK_STRUCT_FIELD(struct pbuf *next_pbuf);
100   PACK_STRUCT_FIELD(u16_t start);
101   PACK_STRUCT_FIELD(u16_t end);
102 } PACK_STRUCT_STRUCT;
103 PACK_STRUCT_END
104 #ifdef PACK_STRUCT_USE_INCLUDES
105 #  include "arch/epstruct.h"
106 #endif
107 
108 #define IP_ADDRESSES_AND_ID_MATCH(iphdrA, iphdrB)  \
109   (ip4_addr_cmp(&(iphdrA)->src, &(iphdrB)->src) && \
110    ip4_addr_cmp(&(iphdrA)->dest, &(iphdrB)->dest) && \
111    IPH_ID(iphdrA) == IPH_ID(iphdrB)) ? 1 : 0
112 
113 /* global variables */
114 static struct ip_reassdata *reassdatagrams;
115 static u32_t ip_reass_pbufcount;
116 static inline
117 int ipreass_discard_large_packet(const struct ip_hdr *fraghdr, struct ip_reassdata *ipr,
118                                  struct ip_reassdata *ipr_prev, u16_t offset);
119 
120 /* function prototypes */
121 static void ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev);
122 static int ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev);
123 
124 /**
125  * Reassembly timer base function
126  * for both NO_SYS == 0 and 1 (!).
127  *
128  * Should be called every 1000 msec (defined by IP_TMR_INTERVAL).
129  */
130 void
ip_reass_tmr(void)131 ip_reass_tmr(void)
132 {
133   struct ip_reassdata *r, *prev = NULL;
134 
135   r = reassdatagrams;
136   while (r != NULL) {
137     /* Decrement the timer. Once it reaches 0,
138      * clean up the incomplete fragment assembly */
139     if (r->timer > 0) {
140       r->timer--;
141       LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer dec %"U16_F"\n", (u16_t)r->timer));
142       prev = r;
143       r = r->next;
144     } else {
145       /* reassembly timed out */
146       struct ip_reassdata *tmp;
147       LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer timed out\n"));
148       tmp = r;
149       /* get the next pointer before freeing */
150       r = r->next;
151       /* free the helper struct and all enqueued pbufs */
152       ip_reass_free_complete_datagram(tmp, prev);
153     }
154   }
155 }
156 
157 /**
158  * Free a datagram (struct ip_reassdata) and all its pbufs.
159  * Updates the total count of enqueued pbufs (ip_reass_pbufcount),
160  * SNMP counters and sends an ICMP time exceeded packet.
161  *
162  * @param ipr datagram to free
163  * @param prev the previous datagram in the linked list
164  * @return the number of pbufs freed
165  */
166 static int
ip_reass_free_complete_datagram(struct ip_reassdata * ipr,struct ip_reassdata * prev)167 ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev)
168 {
169   u16_t pbufs_freed = 0;
170   u16_t clen;
171   struct pbuf *p;
172   struct ip_reass_helper *iprh;
173 
174   LWIP_ASSERT("prev != ipr", prev != ipr);
175   if (prev != NULL) {
176     LWIP_ASSERT("prev->next == ipr", prev->next == ipr);
177   }
178 
179   MIB2_STATS_INC(mib2.ipreasmfails);
180 #if LWIP_ICMP
181   iprh = (struct ip_reass_helper *)ipr->p->payload;
182   if (iprh->start == 0) {
183     /* The first fragment was received, send ICMP time exceeded. */
184     /* First, de-queue the first pbuf from r->p. */
185     p = ipr->p;
186     ipr->p = iprh->next_pbuf;
187     /* Then, copy the original header into it. */
188     SMEMCPY(p->payload, &ipr->iphdr, IP_HLEN);
189     icmp_time_exceeded(p, ICMP_TE_FRAG);
190     clen = pbuf_clen(p);
191     LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
192     pbufs_freed = (u16_t)(pbufs_freed + clen);
193     pbuf_free(p);
194   }
195 #endif /* LWIP_ICMP */
196 
197   /* First, free all received pbufs.  The individual pbufs need to be released
198      separately as they have not yet been chained */
199   p = ipr->p;
200   while (p != NULL) {
201     struct pbuf *pcur;
202     iprh = (struct ip_reass_helper *)p->payload;
203     pcur = p;
204     /* get the next pointer before freeing */
205     p = iprh->next_pbuf;
206     clen = pbuf_clen(pcur);
207     LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
208     pbufs_freed = (u16_t)(pbufs_freed + clen);
209     pbuf_free(pcur);
210   }
211   LWIP_ASSERT("ipr->count >= pbufs_freed", ipr->count >= pbufs_freed);
212   /* Then, unchain the struct ip_reassdata from the list and free it. */
213   ip_reass_dequeue_datagram(ipr, prev);
214   LWIP_ASSERT("ip_reass_pbufcount >= pbufs_freed", ip_reass_pbufcount >= pbufs_freed);
215   ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount - pbufs_freed);
216 
217   return pbufs_freed;
218 }
219 
220 #if LWIP_LOWPOWER
221 #include "lwip/lowpower.h"
222 u32_t
ip_reass_tmr_tick(void)223 ip_reass_tmr_tick(void)
224 {
225   struct ip_reassdata *r = NULL;
226   u32_t tick = 0;
227   u32_t val;
228 
229   r = reassdatagrams;
230   while (r != NULL) {
231     val = r->timer + 1;
232     SET_TMR_TICK(tick, val);
233     r = r->next;
234   }
235   LOWPOWER_DEBUG(("%s tmr tick: %u\n", __func__, tick));
236   return tick;
237 }
238 #endif /* LWIP_LOWPOWER */
239 
240 #if IP_REASS_FREE_OLDEST
241 /**
242  * Free the oldest datagram to make room for enqueueing new fragments.
243  * The datagram 'fraghdr' belongs to is not freed!
244  *
245  * @param fraghdr IP header of the current fragment
246  * @param pbufs_needed number of pbufs needed to enqueue
247  *        (used for freeing other datagrams if not enough space)
248  * @return the number of pbufs freed
249  */
250 static int
ip_reass_remove_oldest_datagram(struct ip_hdr * fraghdr,int pbufs_needed)251 ip_reass_remove_oldest_datagram(struct ip_hdr *fraghdr, int pbufs_needed)
252 {
253   /* @todo Can't we simply remove the last datagram in the
254    *       linked list behind reassdatagrams?
255    */
256   struct ip_reassdata *r, *oldest, *prev, *oldest_prev;
257   int pbufs_freed = 0, pbufs_freed_current;
258   int other_datagrams;
259 
260   /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs,
261    * but don't free the datagram that 'fraghdr' belongs to! */
262   do {
263     oldest = NULL;
264     prev = NULL;
265     oldest_prev = NULL;
266     other_datagrams = 0;
267     r = reassdatagrams;
268     while (r != NULL) {
269       if (!IP_ADDRESSES_AND_ID_MATCH(&r->iphdr, fraghdr)) {
270         /* Not the same datagram as fraghdr */
271         other_datagrams++;
272         if (oldest == NULL) {
273           oldest = r;
274           oldest_prev = prev;
275         } else if (r->timer <= oldest->timer) {
276           /* older than the previous oldest */
277           oldest = r;
278           oldest_prev = prev;
279         }
280       }
281       if (r->next != NULL) {
282         prev = r;
283       }
284       r = r->next;
285     }
286     if (oldest != NULL) {
287       pbufs_freed_current = ip_reass_free_complete_datagram(oldest, oldest_prev);
288       pbufs_freed += pbufs_freed_current;
289     }
290   } while ((pbufs_freed < pbufs_needed) && (other_datagrams > 1));
291   return pbufs_freed;
292 }
293 #endif /* IP_REASS_FREE_OLDEST */
294 
295 /**
296  * Enqueues a new fragment into the fragment queue
297  * @param fraghdr points to the new fragments IP hdr
298  * @param clen number of pbufs needed to enqueue (used for freeing other datagrams if not enough space)
299  * @return A pointer to the queue location into which the fragment was enqueued
300  */
301 static struct ip_reassdata *
ip_reass_enqueue_new_datagram(struct ip_hdr * fraghdr,int clen)302 ip_reass_enqueue_new_datagram(struct ip_hdr *fraghdr, int clen)
303 {
304   struct ip_reassdata *ipr;
305 #if ! IP_REASS_FREE_OLDEST
306   LWIP_UNUSED_ARG(clen);
307 #endif
308 
309   /* No matching previous fragment found, allocate a new reassdata struct */
310   ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA);
311   if (ipr == NULL) {
312 #if IP_REASS_FREE_OLDEST
313     if (ip_reass_remove_oldest_datagram(fraghdr, clen) >= clen) {
314       ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA);
315     }
316     if (ipr == NULL)
317 #endif /* IP_REASS_FREE_OLDEST */
318     {
319       IPFRAG_STATS_INC(ip_frag.memerr);
320       LWIP_DEBUGF(IP_REASS_DEBUG, ("Failed to alloc reassdata struct\n"));
321       return NULL;
322     }
323   }
324   memset(ipr, 0, sizeof(struct ip_reassdata));
325   ipr->timer = IP_REASS_MAXAGE;
326 
327   /* enqueue the new structure to the front of the list */
328   ipr->next = reassdatagrams;
329   reassdatagrams = ipr;
330   /* copy the ip header for later tests and input */
331   /* @todo: no ip options supported? */
332   SMEMCPY(&(ipr->iphdr), fraghdr, IP_HLEN);
333   return ipr;
334 }
335 
336 /**
337  * Dequeues a datagram from the datagram queue. Doesn't deallocate the pbufs.
338  * @param ipr points to the queue entry to dequeue
339  */
340 static void
ip_reass_dequeue_datagram(struct ip_reassdata * ipr,struct ip_reassdata * prev)341 ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev)
342 {
343   /* dequeue the reass struct  */
344   if (reassdatagrams == ipr) {
345     /* it was the first in the list */
346     reassdatagrams = ipr->next;
347   } else if (prev != NULL) {
348     prev->next = ipr->next;
349   } else {
350     LWIP_DEBUGF(IP_REASS_DEBUG, ("parameter prev should not be NULL\n"));
351   }
352 
353   /* now we can free the ip_reassdata struct */
354   memp_free(MEMP_REASSDATA, ipr);
355 }
356 
357 /**
358  * Chain a new pbuf into the pbuf list that composes the datagram.  The pbuf list
359  * will grow over time as  new pbufs are rx.
360  * Also checks that the datagram passes basic continuity checks (if the last
361  * fragment was received at least once).
362  * @param ipr points to the reassembly state
363  * @param new_p points to the pbuf for the current fragment
364  * @param is_last is 1 if this pbuf has MF==0 (ipr->flags not updated yet)
365  * @return see IP_REASS_VALIDATE_* defines
366  */
367 static int
ip_reass_chain_frag_into_datagram_and_validate(struct ip_reassdata * ipr,struct pbuf * new_p,int is_last)368 ip_reass_chain_frag_into_datagram_and_validate(struct ip_reassdata *ipr, struct pbuf *new_p, int is_last)
369 {
370   struct ip_reass_helper *iprh, *iprh_tmp, *iprh_prev = NULL;
371   struct pbuf *q;
372   u16_t offset, len;
373   u8_t hlen;
374   struct ip_hdr *fraghdr;
375   int valid = 1;
376 
377   /* Extract length and fragment offset from current fragment */
378   fraghdr = (struct ip_hdr *)new_p->payload;
379   len = lwip_ntohs(IPH_LEN(fraghdr));
380   hlen = IPH_HL_BYTES(fraghdr);
381   if (hlen > len) {
382     /* invalid datagram */
383     return IP_REASS_VALIDATE_PBUF_DROPPED;
384   }
385   len = (u16_t)(len - hlen);
386   offset = IPH_OFFSET_BYTES(fraghdr);
387 
388   /* overwrite the fragment's ip header from the pbuf with our helper struct,
389    * and setup the embedded helper structure. */
390   /* make sure the struct ip_reass_helper fits into the IP header */
391   LWIP_ASSERT("sizeof(struct ip_reass_helper) <= IP_HLEN",
392               sizeof(struct ip_reass_helper) <= IP_HLEN);
393   iprh = (struct ip_reass_helper *)new_p->payload;
394   iprh->next_pbuf = NULL;
395   iprh->start = offset;
396   iprh->end = (u16_t)(offset + len);
397   if (iprh->end < offset) {
398     /* u16_t overflow, cannot handle this */
399     return IP_REASS_VALIDATE_PBUF_DROPPED;
400   }
401 
402   /* Iterate through until we either get to the end of the list (append),
403    * or we find one with a larger offset (insert). */
404   for (q = ipr->p; q != NULL;) {
405     iprh_tmp = (struct ip_reass_helper *)q->payload;
406     if (iprh->start < iprh_tmp->start) {
407       /* the new pbuf should be inserted before this */
408       iprh->next_pbuf = q;
409       if (iprh_prev != NULL) {
410         /* not the fragment with the lowest offset */
411 #if IP_REASS_CHECK_OVERLAP
412         if ((iprh->start < iprh_prev->end) || (iprh->end > iprh_tmp->start)) {
413           /* fragment overlaps with previous or following, throw away */
414           return IP_REASS_VALIDATE_PBUF_DROPPED;
415         }
416 #endif /* IP_REASS_CHECK_OVERLAP */
417         iprh_prev->next_pbuf = new_p;
418         if (iprh_prev->end != iprh->start) {
419           /* There is a fragment missing between the current
420            * and the previous fragment */
421           valid = 0;
422         }
423       } else {
424 #if IP_REASS_CHECK_OVERLAP
425         if (iprh->end > iprh_tmp->start) {
426           /* fragment overlaps with following, throw away */
427           return IP_REASS_VALIDATE_PBUF_DROPPED;
428         }
429 #endif /* IP_REASS_CHECK_OVERLAP */
430         /* fragment with the lowest offset */
431         ipr->p = new_p;
432       }
433       break;
434     } else if (iprh->start == iprh_tmp->start) {
435       /* received the same datagram twice: no need to keep the datagram */
436       return IP_REASS_VALIDATE_PBUF_DROPPED;
437 #if IP_REASS_CHECK_OVERLAP
438     } else if (iprh->start < iprh_tmp->end) {
439       /* overlap: no need to keep the new datagram */
440       return IP_REASS_VALIDATE_PBUF_DROPPED;
441 #endif /* IP_REASS_CHECK_OVERLAP */
442     } else {
443       /* Check if the fragments received so far have no holes. */
444       if (iprh_prev != NULL) {
445         if (iprh_prev->end != iprh_tmp->start) {
446           /* There is a fragment missing between the current
447            * and the previous fragment */
448           valid = 0;
449         }
450       }
451     }
452     q = iprh_tmp->next_pbuf;
453     iprh_prev = iprh_tmp;
454   }
455 
456   /* If q is NULL, then we made it to the end of the list. Determine what to do now */
457   if (q == NULL) {
458     if (iprh_prev != NULL) {
459       /* this is (for now), the fragment with the highest offset:
460        * chain it to the last fragment */
461 #if IP_REASS_CHECK_OVERLAP
462       LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start);
463 #endif /* IP_REASS_CHECK_OVERLAP */
464       iprh_prev->next_pbuf = new_p;
465       if (iprh_prev->end != iprh->start) {
466         valid = 0;
467       }
468     } else {
469 #if IP_REASS_CHECK_OVERLAP
470       LWIP_ASSERT("no previous fragment, this must be the first fragment!",
471                   ipr->p == NULL);
472 #endif /* IP_REASS_CHECK_OVERLAP */
473       /* this is the first fragment we ever received for this ip datagram */
474       ipr->p = new_p;
475     }
476   }
477 
478   /* At this point, the validation part begins: */
479   /* If we already received the last fragment */
480   if (is_last || ((ipr->flags & IP_REASS_FLAG_LASTFRAG) != 0)) {
481     /* and had no holes so far */
482     if (valid) {
483       /* then check if the rest of the fragments is here */
484       /* Check if the queue starts with the first datagram */
485       if ((ipr->p == NULL) || (((struct ip_reass_helper *)ipr->p->payload)->start != 0)) {
486         valid = 0;
487       } else {
488         /* and check that there are no holes after this datagram */
489         iprh_prev = iprh;
490         q = iprh->next_pbuf;
491         while (q != NULL) {
492           iprh = (struct ip_reass_helper *)q->payload;
493           if (iprh_prev->end != iprh->start) {
494             valid = 0;
495             break;
496           }
497           iprh_prev = iprh;
498           q = iprh->next_pbuf;
499         }
500         /* if still valid, all fragments are received
501          * (because to the MF==0 already arrived */
502         if (valid) {
503           LWIP_ASSERT("sanity check", ipr->p != NULL);
504           LWIP_ASSERT("sanity check",
505                       ((struct ip_reass_helper *)ipr->p->payload) != iprh);
506           LWIP_ASSERT("validate_datagram:next_pbuf!=NULL",
507                       iprh->next_pbuf == NULL);
508         }
509       }
510     }
511     /* If valid is 0 here, there are some fragments missing in the middle
512      * (since MF == 0 has already arrived). Such datagrams simply time out if
513      * no more fragments are received... */
514     return valid ? IP_REASS_VALIDATE_TELEGRAM_FINISHED : IP_REASS_VALIDATE_PBUF_QUEUED;
515   }
516   /* If we come here, not all fragments were received, yet! */
517   return IP_REASS_VALIDATE_PBUF_QUEUED; /* not yet valid! */
518 }
519 
520 /**
521  * As per RFC 791, having an datagram length of 65535 is impractical
522  * for most hosts and networks. So we are not allowing datagrams
523  * greater than 65535.
524  */
ipreass_discard_large_packet(const struct ip_hdr * fraghdr,struct ip_reassdata * ipr,struct ip_reassdata * ipr_prev,u16_t offset)525 static int ipreass_discard_large_packet(const struct ip_hdr *fraghdr, struct ip_reassdata *ipr,
526     struct ip_reassdata *ipr_prev, u16_t offset)
527 {
528   struct pbuf *r = NULL;
529   struct pbuf *pcur = NULL;
530   struct ip_reass_helper *iprh = NULL;
531 
532   if (((u32_t)offset + (u32_t)(ntohs(IPH_LEN(fraghdr)))) > 65535) {
533     /* check if there is any reassembly buffer */
534     if (ipr) {
535       /* get pbuf count contained within the reass_data list, and
536           release the chain of pbufs */
537       r = ipr->p;
538       while (r != NULL) {
539         pcur = r;
540 
541         /* and adjust the number of pbufs currently queued for reassembly. */
542         ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount - pbuf_clen(r));
543         iprh = (struct ip_reass_helper*)r->payload;
544         if (iprh == NULL) {
545           pbuf_free(pcur);
546           break;
547         }
548         r = iprh->next_pbuf;
549         pbuf_free(pcur);
550       }
551 
552       /* release the sources allocate for the fragment queue entry */
553       ip_reass_dequeue_datagram(ipr, ipr_prev);
554       LWIP_DEBUGF(IP_REASS_DEBUG,
555         ("ip_reass: discarding datagram of IP packet(id=0x%04"X16_F" len=%"U16_F" offset=%"U16_F") which is greater than 65535\n",
556         ntohs(IPH_ID(fraghdr)), ntohs(IPH_LEN(fraghdr)), (ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK)*8));
557     }
558     return 1;
559   }
560   return 0;
561 }
562 
563 /**
564  * Reassembles incoming IP fragments into an IP datagram.
565  *
566  * @param p points to a pbuf chain of the fragment
567  * @return NULL if reassembly is incomplete, ? otherwise
568  */
569 struct pbuf *
ip4_reass(struct pbuf * p)570 ip4_reass(struct pbuf *p)
571 {
572   struct pbuf *r;
573   struct ip_hdr *fraghdr;
574   struct ip_reassdata *ipr;
575   struct ip_reass_helper *iprh;
576   u16_t offset, len, clen;
577   u8_t hlen;
578   int valid;
579   int is_last;
580   struct ip_reassdata *ipr_prev = NULL;
581 
582   IPFRAG_STATS_INC(ip_frag.recv);
583   MIB2_STATS_INC(mib2.ipreasmreqds);
584 
585   fraghdr = (struct ip_hdr *)p->payload;
586 
587   if (IPH_HL_BYTES(fraghdr) != IP_HLEN) {
588     LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: IP options currently not supported!\n"));
589     IPFRAG_STATS_INC(ip_frag.err);
590     goto nullreturn;
591   }
592 
593   offset = IPH_OFFSET_BYTES(fraghdr);
594   len = lwip_ntohs(IPH_LEN(fraghdr));
595   hlen = IPH_HL_BYTES(fraghdr);
596   if (hlen > len) {
597     /* invalid datagram */
598     goto nullreturn;
599   }
600   len = (u16_t)(len - hlen);
601 
602   /* Check if we are allowed to enqueue more datagrams. */
603   clen = pbuf_clen(p);
604   if ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) {
605 #if IP_REASS_FREE_OLDEST
606     if (!ip_reass_remove_oldest_datagram(fraghdr, clen) ||
607         ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS))
608 #endif /* IP_REASS_FREE_OLDEST */
609     {
610       /* No datagram could be freed and still too many pbufs enqueued */
611       LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: Overflow condition: pbufct=%d, clen=%d, MAX=%d\n",
612                                    ip_reass_pbufcount, clen, IP_REASS_MAX_PBUFS));
613       IPFRAG_STATS_INC(ip_frag.memerr);
614       /* @todo: send ICMP time exceeded here? */
615       /* drop this pbuf */
616       goto nullreturn;
617     }
618   }
619 
620   /* Look for the datagram the fragment belongs to in the current datagram queue,
621    * remembering the previous in the queue for later dequeueing. */
622   for (ipr = reassdatagrams; ipr != NULL; ipr = ipr->next) {
623     /* Check if the incoming fragment matches the one currently present
624        in the reassembly buffer. If so, we proceed with copying the
625        fragment into the buffer. */
626     if (IP_ADDRESSES_AND_ID_MATCH(&ipr->iphdr, fraghdr)) {
627       LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: matching previous fragment ID=%"X16_F"\n",
628                                    lwip_ntohs(IPH_ID(fraghdr))));
629       IPFRAG_STATS_INC(ip_frag.cachehit);
630       break;
631     }
632     ipr_prev = ipr;
633   }
634 
635   if (ipreass_discard_large_packet(fraghdr, ipr, ipr_prev, offset)) {
636     goto nullreturn;
637   }
638 
639   if (ipr == NULL) {
640     /* Enqueue a new datagram into the datagram queue */
641     ipr = ip_reass_enqueue_new_datagram(fraghdr, clen);
642     /* Bail if unable to enqueue */
643     if (ipr == NULL) {
644       goto nullreturn;
645     }
646   } else {
647     /* Number of fragments exeeds MAX */
648     if ((ipr->count + clen) > IP_REASS_MAX_PBUFS_PER_PKT) {
649         LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: Overflow condition: Number of fragments exeeds per packet, "
650                                     "fragment count=%d, clen=%d, MAX per packet=%d\n",
651                                     ipr->count, clen, IP_REASS_MAX_PBUFS_PER_PKT));
652         /* free all fragments */
653         (void)ip_reass_free_complete_datagram(ipr, ipr_prev);
654         IPFRAG_STATS_INC(ip_frag.memerr);
655         /* drop this pbuf */
656         goto nullreturn;
657     }
658     if (((lwip_ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) == 0) &&
659         ((lwip_ntohs(IPH_OFFSET(&ipr->iphdr)) & IP_OFFMASK) != 0)) {
660       /* ipr->iphdr is not the header from the first fragment, but fraghdr is
661        * -> copy fraghdr into ipr->iphdr since we want to have the header
662        * of the first fragment (for ICMP time exceeded and later, for copying
663        * all options, if supported)*/
664       SMEMCPY(&ipr->iphdr, fraghdr, IP_HLEN);
665     }
666   }
667 
668   /* At this point, we have either created a new entry or pointing
669    * to an existing one */
670 
671   /* check for 'no more fragments', and update queue entry*/
672   is_last = (IPH_OFFSET(fraghdr) & PP_NTOHS(IP_MF)) == 0;
673   if (is_last) {
674     u16_t datagram_len = (u16_t)(offset + len);
675     if ((datagram_len < offset) || (datagram_len > (0xFFFF - IP_HLEN))) {
676       /* u16_t overflow, cannot handle this */
677       goto nullreturn_ipr;
678     }
679   }
680   /* find the right place to insert this pbuf */
681   /* @todo: trim pbufs if fragments are overlapping */
682   valid = ip_reass_chain_frag_into_datagram_and_validate(ipr, p, is_last);
683   if (valid == IP_REASS_VALIDATE_PBUF_DROPPED) {
684     goto nullreturn_ipr;
685   }
686   /* if we come here, the pbuf has been enqueued */
687 
688   /* Track the current number of pbufs current 'in-flight', in order to limit
689      the number of fragments that may be enqueued at any one time
690      (overflow checked by testing against IP_REASS_MAX_PBUFS) */
691   ipr->count += clen;
692   ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount + clen);
693   if (is_last) {
694     u16_t datagram_len = (u16_t)(offset + len);
695     ipr->datagram_len = datagram_len;
696     ipr->flags |= IP_REASS_FLAG_LASTFRAG;
697     LWIP_DEBUGF(IP_REASS_DEBUG,
698                 ("ip4_reass: last fragment seen, total len %"S16_F"\n",
699                  ipr->datagram_len));
700   }
701 
702   if (valid == IP_REASS_VALIDATE_TELEGRAM_FINISHED) {
703     struct ip_reassdata *ipr_prev_tmp;
704     /* the totally last fragment (flag more fragments = 0) was received at least
705      * once AND all fragments are received */
706     u16_t datagram_len = (u16_t)(ipr->datagram_len + IP_HLEN);
707 
708     /* save the second pbuf before copying the header over the pointer */
709     r = ((struct ip_reass_helper *)ipr->p->payload)->next_pbuf;
710 
711     /* copy the original ip header back to the first pbuf */
712     fraghdr = (struct ip_hdr *)(ipr->p->payload);
713     SMEMCPY(fraghdr, &ipr->iphdr, IP_HLEN);
714     IPH_LEN_SET(fraghdr, lwip_htons(datagram_len));
715     IPH_OFFSET_SET(fraghdr, 0);
716     IPH_CHKSUM_SET(fraghdr, 0);
717     /* @todo: do we need to set/calculate the correct checksum? */
718 #if CHECKSUM_GEN_IP
719     IF__NETIF_CHECKSUM_ENABLED(ip_current_input_netif(), NETIF_CHECKSUM_GEN_IP) {
720       IPH_CHKSUM_SET(fraghdr, inet_chksum(fraghdr, IP_HLEN));
721     }
722 #endif /* CHECKSUM_GEN_IP */
723 
724     p = ipr->p;
725 
726     /* chain together the pbufs contained within the reass_data list. */
727     while (r != NULL) {
728       iprh = (struct ip_reass_helper *)r->payload;
729       if (iprh == NULL) {
730         goto nullreturn_ipr;
731       }
732       /* hide the ip header for every succeeding fragment */
733       pbuf_remove_header(r, IP_HLEN);
734       pbuf_cat(p, r);
735       r = iprh->next_pbuf;
736     }
737 
738     /* find the previous entry in the linked list */
739     if (ipr == reassdatagrams) {
740       ipr_prev_tmp = NULL;
741     } else {
742       for (ipr_prev_tmp = reassdatagrams; ipr_prev_tmp != NULL; ipr_prev_tmp = ipr_prev_tmp->next) {
743         if (ipr_prev_tmp->next == ipr) {
744           break;
745         }
746       }
747     }
748 
749     /* release the sources allocate for the fragment queue entry */
750     ip_reass_dequeue_datagram(ipr, ipr_prev_tmp);
751 
752     /* and adjust the number of pbufs currently queued for reassembly. */
753     clen = pbuf_clen(p);
754     LWIP_ASSERT("ip_reass_pbufcount >= clen", ip_reass_pbufcount >= clen);
755     ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount - clen);
756 
757     MIB2_STATS_INC(mib2.ipreasmoks);
758 
759     /* Return the pbuf chain */
760     return p;
761   }
762   /* the datagram is not (yet?) reassembled completely */
763   LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_pbufcount: %d out\n", ip_reass_pbufcount));
764   return NULL;
765 
766 nullreturn_ipr:
767   LWIP_ASSERT("ipr != NULL", ipr != NULL);
768   if (ipr->p == NULL) {
769     /* dropped pbuf after creating a new datagram entry: remove the entry, too */
770     LWIP_ASSERT("not firstalthough just enqueued", ipr == reassdatagrams);
771     ip_reass_dequeue_datagram(ipr, NULL);
772   }
773 
774 nullreturn:
775   LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: nullreturn\n"));
776   IPFRAG_STATS_INC(ip_frag.drop);
777   pbuf_free(p);
778   return NULL;
779 }
780 #endif /* IP_REASSEMBLY */
781 
782 #if IP_FRAG
783 #if !LWIP_NETIF_TX_SINGLE_PBUF
784 /** Allocate a new struct pbuf_custom_ref */
785 static struct pbuf_custom_ref *
ip_frag_alloc_pbuf_custom_ref(void)786 ip_frag_alloc_pbuf_custom_ref(void)
787 {
788   return (struct pbuf_custom_ref *)memp_malloc(MEMP_FRAG_PBUF);
789 }
790 
791 /** Free a struct pbuf_custom_ref */
792 static void
ip_frag_free_pbuf_custom_ref(struct pbuf_custom_ref * p)793 ip_frag_free_pbuf_custom_ref(struct pbuf_custom_ref *p)
794 {
795   LWIP_ASSERT("p != NULL", p != NULL);
796   memp_free(MEMP_FRAG_PBUF, p);
797 }
798 
799 /** Free-callback function to free a 'struct pbuf_custom_ref', called by
800  * pbuf_free. */
801 static void
ipfrag_free_pbuf_custom(struct pbuf * p)802 ipfrag_free_pbuf_custom(struct pbuf *p)
803 {
804   struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref *)p;
805   LWIP_ASSERT("pcr != NULL", pcr != NULL);
806   LWIP_ASSERT("pcr == p", (void *)pcr == (void *)p);
807   if (pcr->original != NULL) {
808     pbuf_free(pcr->original);
809   }
810   ip_frag_free_pbuf_custom_ref(pcr);
811 }
812 #endif /* !LWIP_NETIF_TX_SINGLE_PBUF */
813 
814 /**
815  * Fragment an IP datagram if too large for the netif.
816  *
817  * Chop the datagram in MTU sized chunks and send them in order
818  * by pointing PBUF_REFs into p.
819  *
820  * @param p ip packet to send
821  * @param netif the netif on which to send
822  * @param dest destination ip address to which to send
823  *
824  * @return ERR_OK if sent successfully, err_t otherwise
825  */
826 err_t
ip4_frag(struct pbuf * p,struct netif * netif,const ip4_addr_t * dest)827 ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest)
828 {
829   struct pbuf *rambuf;
830 #if !LWIP_NETIF_TX_SINGLE_PBUF
831   struct pbuf *newpbuf;
832   u16_t newpbuflen = 0;
833   u16_t left_to_copy;
834 #endif
835   struct ip_hdr *original_iphdr;
836   struct ip_hdr *iphdr;
837   const u16_t nfb = (u16_t)((netif->mtu - IP_HLEN) / 8);
838   u16_t left, fragsize;
839   u16_t ofo;
840   int last;
841   u16_t poff = IP_HLEN;
842   u16_t tmp;
843   int mf_set;
844 
845   original_iphdr = (struct ip_hdr *)p->payload;
846   iphdr = original_iphdr;
847   if (IPH_HL_BYTES(iphdr) != IP_HLEN) {
848     /* ip4_frag() does not support IP options */
849     return ERR_VAL;
850   }
851   LWIP_ERROR("ip4_frag(): pbuf too short", p->len >= IP_HLEN, return ERR_VAL);
852 
853   /* Save original offset */
854   tmp = lwip_ntohs(IPH_OFFSET(iphdr));
855   ofo = tmp & IP_OFFMASK;
856   /* already fragmented? if so, the last fragment we create must have MF, too */
857   mf_set = tmp & IP_MF;
858 
859   left = (u16_t)(p->tot_len - IP_HLEN);
860 
861   while (left) {
862     /* Fill this fragment */
863     fragsize = LWIP_MIN(left, (u16_t)(nfb * 8));
864 
865 #if LWIP_NETIF_TX_SINGLE_PBUF
866     rambuf = pbuf_alloc(PBUF_IP, fragsize, PBUF_RAM);
867     if (rambuf == NULL) {
868       goto memerr;
869     }
870     LWIP_ASSERT("this needs a pbuf in one piece!",
871                 (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL));
872     poff += pbuf_copy_partial(p, rambuf->payload, fragsize, poff);
873     /* make room for the IP header */
874     if (pbuf_add_header(rambuf, IP_HLEN)) {
875       pbuf_free(rambuf);
876       goto memerr;
877     }
878     /* fill in the IP header */
879     SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN);
880     iphdr = (struct ip_hdr *)rambuf->payload;
881 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
882     /* When not using a static buffer, create a chain of pbufs.
883      * The first will be a PBUF_RAM holding the link and IP header.
884      * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged,
885      * but limited to the size of an mtu.
886      */
887     rambuf = pbuf_alloc(PBUF_LINK, IP_HLEN, PBUF_RAM);
888     if (rambuf == NULL) {
889       goto memerr;
890     }
891     LWIP_ASSERT("this needs a pbuf in one piece!",
892                 (rambuf->len >= (IP_HLEN)));
893     SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN);
894     iphdr = (struct ip_hdr *)rambuf->payload;
895 
896     left_to_copy = fragsize;
897     while (left_to_copy) {
898       struct pbuf_custom_ref *pcr;
899       u16_t plen = (u16_t)(p->len - poff);
900       LWIP_ASSERT("p->len >= poff", p->len >= poff);
901       newpbuflen = LWIP_MIN(left_to_copy, plen);
902       /* Is this pbuf already empty? */
903       if (!newpbuflen) {
904         poff = 0;
905         p = p->next;
906         continue;
907       }
908       pcr = ip_frag_alloc_pbuf_custom_ref();
909       if (pcr == NULL) {
910         pbuf_free(rambuf);
911         goto memerr;
912       }
913       /* Mirror this pbuf, although we might not need all of it. */
914       newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc,
915                                     (u8_t *)p->payload + poff, newpbuflen);
916       if (newpbuf == NULL) {
917         ip_frag_free_pbuf_custom_ref(pcr);
918         pbuf_free(rambuf);
919         goto memerr;
920       }
921       pbuf_ref(p);
922       pcr->original = p;
923       pcr->pc.custom_free_function = ipfrag_free_pbuf_custom;
924 
925       /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain
926        * so that it is removed when pbuf_dechain is later called on rambuf.
927        */
928       pbuf_cat(rambuf, newpbuf);
929       left_to_copy = (u16_t)(left_to_copy - newpbuflen);
930       if (left_to_copy) {
931         poff = 0;
932         p = p->next;
933       }
934     }
935     poff = (u16_t)(poff + newpbuflen);
936 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
937 
938     /* Correct header */
939     last = (left <= netif->mtu - IP_HLEN);
940 
941     /* Set new offset and MF flag */
942     tmp = (IP_OFFMASK & (ofo));
943     if (!last || mf_set) {
944       /* the last fragment has MF set if the input frame had it */
945       tmp = tmp | IP_MF;
946     }
947     IPH_OFFSET_SET(iphdr, lwip_htons(tmp));
948     IPH_LEN_SET(iphdr, lwip_htons((u16_t)(fragsize + IP_HLEN)));
949     IPH_CHKSUM_SET(iphdr, 0);
950 #if CHECKSUM_GEN_IP
951     IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) {
952       IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN));
953     }
954 #endif /* CHECKSUM_GEN_IP */
955 
956     /* No need for separate header pbuf - we allowed room for it in rambuf
957      * when allocated.
958      */
959 #if LWIP_SO_PRIORITY
960     rambuf->priority = p->priority;
961 #endif /* LWIP_SO_PRIORITY */
962 
963     netif->output(netif, rambuf, dest);
964     IPFRAG_STATS_INC(ip_frag.xmit);
965 
966     /* Unfortunately we can't reuse rambuf - the hardware may still be
967      * using the buffer. Instead we free it (and the ensuing chain) and
968      * recreate it next time round the loop. If we're lucky the hardware
969      * will have already sent the packet, the free will really free, and
970      * there will be zero memory penalty.
971      */
972 
973     pbuf_free(rambuf);
974     left = (u16_t)(left - fragsize);
975     ofo = (u16_t)(ofo + nfb);
976   }
977   MIB2_STATS_INC(mib2.ipfragoks);
978   return ERR_OK;
979 memerr:
980   MIB2_STATS_INC(mib2.ipfragfails);
981   return ERR_MEM;
982 }
983 #endif /* IP_FRAG */
984 
985 #endif /* LWIP_IPV4 */
986