• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  * Description: implementation for NAT64
15  * Author: NA
16  * Create: 2019
17  */
18 #include "lwip/opt.h"
19 
20 #if LWIP_NAT64
21 #include "lwip/pbuf.h"
22 #include "lwip/netif.h"
23 #include "lwip/ip.h"
24 #include "lwip/icmp.h"
25 #include "lwip/dhcp.h"
26 #include "lwip/nd6.h"
27 #include "lwip/nat64.h"
28 #include "lwip/nat64_addr.h"
29 #include "lwip/nat64_dns64.h"
30 #include "lwip/nat64_v4_dhcpc.h"
31 #include "lwip/stats.h"
32 #include "lwip/lwip_rpl.h"
33 #include "lwip/netifapi.h"
34 #include "lwip/ip6in4.h"
35 #include "rpl_common.h"
36 #include "rpl_event_api.h"
37 #include "lwip/udp.h"
38 #include "lwip/tcp.h"
39 #include "lwip/inet_chksum.h"
40 #include "lwip/ip6_frag.h"
41 #include "lwip/ip4_frag.h"
42 #if LWIP_NA_PROXY_UNSOLICITED
43 #include "lwip/prot/nd6.h"
44 #endif /* LWIP_NA_PROXY_UNSOLICITED */
45 #if LWIP_LOWPOWER
46 #include "lwip/lowpower.h"
47 #endif
48 #if RPL_CONF_SWITCH_MBR_BY_AUTOLINK
49 #include "mesh_route_api.h"
50 #endif
51 #define NAT64_NETIF_NAME "wlan"
52 #define NAT64_NETIF_NAME_LEN_MIN 4
53 
54 #define NAT64_WAIT_DHCP_RELEASE_PERIOD 2
55 
56 #if LWIP_ICMP6
57 #define NAT64_ICMP6_NO_ROUTE(iphdr, p)   do {    \
58   if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) {    \
59     icmp6_dest_unreach((p), ICMP6_DUR_NO_ROUTE); \
60   } \
61 } while (0)
62 #else
63 #define NAT64_ICMP6_NO_ROUTE(iphdr, p)
64 #endif /* LWIP_ICMP6 */
65 
66 typedef struct {
67   ip4_addr_t nhop;
68   struct ip_hdr ip4hdr;
69   u16_t ip4_hdr_len;
70   u16_t ip6_hdr_len;
71   u16_t ip_pbuf_data_len;
72   u8_t nexth;
73 } nat64_ip6_data_t;
74 
75 #if RPL_CONF_SWITCH_MBR_BY_AUTOLINK
76 typedef struct {
77   linklayer_addr_t mac; /* the mg node mac address */
78   u32_t local_ip; /* the sender msg mbr ip address */
79   u8_t type; /* the event type */
80 } lwip_autolink_event_t;
81 
82 enum lwip_mbr_msg_type_e {
83   MSG_NODE_CHANGE_MBR = 0,
84   MSG_NODE_CHANGE_MBR_ACK,
85   MSG_MAX
86 };
87 #endif
88 
89 /*
90  * the memory MAY be alloc dynamicly in the init function,
91  * when we want to support stateful dynamicly.
92  */
93 static nat64_entry_t *g_nat64_table = NULL;
94 static struct netif *g_nat64_netif = NULL;
95 
96 static void nat64_dhcp_proxy_stop(nat64_entry_t *entry);
97 static nat64_entry_t *nat64_entry_new(const linklayer_addr_t *lladdr, u8_t dao_sn, u8_t mnid, u32_t lifetime,
98                                       u8_t nat64_sync, u32_t conn_time);
99 
100 err_t
nat64_entry_add_new(nat64_entry_t * entry)101 nat64_entry_add_new(nat64_entry_t *entry)
102 {
103   int i;
104   err_t ret;
105   nat64_entry_t *nate_new = NULL;
106   if (g_nat64_table == NULL) {
107     return ERR_VAL;
108   }
109 
110   for (i = 0; i < LWIP_NAT64_ENTRY_SIZE; i++) {
111     if (g_nat64_table[i].state == NAT64_STATE_INIT) {
112       continue;
113     }
114     if (lwip_lladdr_cmp(&g_nat64_table[i].mac, &entry->mac, sizeof(linklayer_addr_t))) {
115       if (!ip4_addr_isany_val(g_nat64_table[i].ip)) {
116         return ERR_OK;
117       }
118       /* when mg change in diffrent mbr, can't add nat entry from other mbr if it has not got ipv4 addr yet */
119       if (g_nat64_table[i].nat64_sync == lwIP_FALSE) {
120         ret = nat64_dhcp_stop(g_nat64_netif, &g_nat64_table[i].mac, lwIP_FALSE);
121         if (ret != ERR_OK) {
122           LWIP_DEBUGF(NAT64_DEBUG, ("stop nat64 dhcp proxy fail, mac:\n"));
123         }
124       }
125       (void)nat64_entry_update(entry, lwIP_FALSE);
126       return ERR_OK;
127     }
128   }
129 
130   nate_new = nat64_entry_new(&entry->mac, entry->dao_sn, entry->mnid, entry->lifetime, lwIP_TRUE, entry->conn_time);
131   if (nate_new != NULL) {
132     ip4_addr_copy(nate_new->ip, entry->ip);
133     nate_new->orig_mnid = entry->orig_mnid;
134     nate_new->state = entry->state;
135   } else {
136     return ERR_VAL;
137   }
138   return ERR_OK;
139 }
140 
141 
142 err_t
nat64_entry_mac_to_idx(const u8_t * hwaddr,u8_t hwaddr_len,dhcp_num_t * mac_idx)143 nat64_entry_mac_to_idx(const u8_t *hwaddr, u8_t hwaddr_len, dhcp_num_t *mac_idx)
144 {
145   u16_t i;
146   nat64_entry_t *entry = NULL;
147   if (g_nat64_table == NULL) {
148     return ERR_VAL;
149   }
150   if ((hwaddr == NULL) || (mac_idx == NULL)) {
151     return ERR_ARG;
152   }
153   for (i = 0; i < LWIP_NAT64_ENTRY_SIZE; i++) {
154     entry = &g_nat64_table[i];
155     if (entry->state == NAT64_STATE_INIT) {
156       continue;
157     }
158     if ((entry->mac.addrlen == hwaddr_len) &&
159         (memcmp(entry->mac.addr, hwaddr, entry->mac.addrlen) == 0)) {
160       *mac_idx = i + 1;
161       return ERR_OK;
162     }
163   }
164 
165   return ERR_VAL;
166 }
167 
168 nat64_entry_t *
nat64_get_next_inuse_entry(int * state)169 nat64_get_next_inuse_entry(int *state)
170 {
171   nat64_entry_t *nate = NULL;
172   uint16_t i;
173   if ((g_nat64_table == NULL) || (state == NULL)) {
174     return NULL;
175   }
176 
177   if (*state < 0) {
178     *state = 0;
179   }
180   i = (uint16_t)*state;
181   for (; i < LWIP_NAT64_ENTRY_SIZE; i++) {
182     nate = &g_nat64_table[i];
183     if (nate->state != NAT64_STATE_INIT) {
184       (*state) = i + 1;
185       return nate;
186     }
187   }
188   return NULL;
189 }
190 
191 u8_t
nat64_entry_traverse(rpl_nate_cb cb,void * arg)192 nat64_entry_traverse(rpl_nate_cb cb, void *arg)
193 {
194   int state = 0;
195   nat64_entry_t *nate = NULL;
196   if (cb == NULL) {
197     return ERR_OK;
198   }
199 
200   while ((nate = nat64_get_next_inuse_entry(&state)) != NULL) {
201     if (cb(nate, arg) != ERR_OK) {
202       continue;
203     }
204   }
205 
206   return ERR_OK;
207 }
208 
209 err_t
nat64_entry_idx_to_mac(dhcp_num_t mac_idx,u8_t * hwaddr,u8_t * hwaddr_len)210 nat64_entry_idx_to_mac(dhcp_num_t mac_idx, u8_t *hwaddr, u8_t *hwaddr_len)
211 {
212   int ret;
213   u16_t i = mac_idx;
214   nat64_entry_t *entry = NULL;
215   /* hwaddr_len should not be less than half of the max len */
216   if ((i < 1) || (i > LWIP_NAT64_ENTRY_SIZE) || (g_nat64_table == NULL) ||
217       (hwaddr == NULL) || (hwaddr_len == NULL) || (*hwaddr_len < (NETIF_MAX_HWADDR_LEN / 2))) {
218     return ERR_ARG;
219   }
220 
221   entry = &g_nat64_table[i - 1];
222   if (entry->state == NAT64_STATE_INIT) {
223     return ERR_VAL;
224   }
225 
226   ret = memcpy_s(hwaddr, *hwaddr_len, entry->mac.addr, entry->mac.addrlen);
227   if (ret != EOK) {
228     return ERR_VAL;
229   }
230 
231   *hwaddr_len = entry->mac.addrlen;
232   return ERR_OK;
233 }
234 
235 err_t
nat64_entry_idx_to_ip6addr(dhcp_num_t mac_idx,ip6_addr_t * ip6addr)236 nat64_entry_idx_to_ip6addr(dhcp_num_t mac_idx, ip6_addr_t *ip6addr)
237 {
238   u16_t i = mac_idx;
239   nat64_entry_t *entry = NULL;
240   if ((i < 1) || (i > LWIP_NAT64_ENTRY_SIZE) || (ip6addr == NULL) || (g_nat64_table == NULL)) {
241     return ERR_ARG;
242   }
243 
244   entry = &g_nat64_table[i - 1];
245   if (entry->state == NAT64_STATE_INIT) {
246     return ERR_VAL;
247   }
248 
249   if (nat64_entry_to6(entry, ip6addr) != 0) {
250     return ERR_VAL;
251   }
252   return ERR_OK;
253 }
254 
255 static nat64_entry_t *
nat64_entry_lookup_by_ip4addr(const ip4_addr_t * ip4addr)256 nat64_entry_lookup_by_ip4addr(const ip4_addr_t *ip4addr)
257 {
258   s16_t i;
259 #if LWIP_NAT64_MIN_SUBSTITUTE
260   ip4_addr_t ip;
261 #endif
262   nat64_entry_t *entry = NULL;
263   if ((g_nat64_table == NULL) || (ip4addr == NULL)) {
264     return NULL;
265   }
266 
267 #if LWIP_NAT64_MIN_SUBSTITUTE
268   (void)memset_s(&ip, sizeof(ip4_addr_t), 0, sizeof(ip4_addr_t));
269 #endif
270   for (i = 0; i < LWIP_NAT64_ENTRY_SIZE; i++) {
271     entry = &g_nat64_table[i];
272     if ((entry->state == NAT64_STATE_ESTABLISH) &&
273 #if !LWIP_NAT64_MIN_SUBSTITUTE
274         ip4_addr_cmp(&entry->ip, ip4addr)
275 #elif LWIP_DHCP_SUBSTITUTE
276         (dhcp_substitute_idx_to_ip(g_nat64_netif, i + 1, &ip) == ERR_OK) &&
277         ip4_addr_cmp(&ip, ip4addr)
278 #else
279         (lwIP_FALSE)
280 #endif
281        ) {
282       return entry;
283     }
284   }
285   return NULL;
286 }
287 
288 nat64_entry_t *
nat64_entry_lookup_by_mac(const linklayer_addr_t * mac)289 nat64_entry_lookup_by_mac(const linklayer_addr_t *mac)
290 {
291   s16_t i;
292   nat64_entry_t *entry = NULL;
293   if ((mac == NULL) || (g_nat64_table == NULL)) {
294     return NULL;
295   }
296 
297   for (i = 0; i < LWIP_NAT64_ENTRY_SIZE; i++) {
298     entry = &g_nat64_table[i];
299     if (memcmp(&entry->mac, mac, sizeof(linklayer_addr_t)) == 0) {
300       return entry;
301     }
302   }
303   return NULL;
304 }
305 
306 err_t
nat64_entry_remove(nat64_entry_t * entry,u8_t evt_flag)307 nat64_entry_remove(nat64_entry_t *entry, u8_t evt_flag)
308 {
309   if ((entry == NULL) || (entry->state == NAT64_STATE_INIT)) {
310     return ERR_ARG;
311   }
312 
313 #if LWIP_MMBR && LWIP_NAT64_CHANGE_MSG
314   if ((evt_flag == lwIP_TRUE) && (entry->nat64_sync == lwIP_FALSE) && (!ip4_addr_isany_val(entry->ip))) {
315     nat64_send_change_entry_msg(entry, RPL_EVT_NODE_NAT64_DEL);
316   }
317 #else
318   (void)evt_flag;
319 #endif /* LWIP_MMBR && LWIP_NAT64_CHANGE_MSG */
320 
321   (void)memset_s(entry, sizeof(nat64_entry_t), 0, sizeof(nat64_entry_t));
322   entry->state = NAT64_STATE_INIT;
323   return ERR_OK;
324 }
325 
326 err_t
nat64_entry_update(nat64_entry_t * entry,u8_t evt_flag)327 nat64_entry_update(nat64_entry_t *entry, u8_t evt_flag)
328 {
329   if ((g_nat64_table == NULL) || (entry == NULL)) {
330     return ERR_ARG;
331   }
332 
333   nat64_entry_t *nate_exist = nat64_entry_lookup_by_mac(&entry->mac);
334   if (nate_exist == NULL) {
335     LWIP_DEBUGF(NAT64_DEBUG, ("%s:old nate has disappeared\n", __FUNCTION__));
336     /*
337      * the nat64 entry may delete for timeout for loss period synchronization packet,
338      * let to nat64 entry inconsistency issue.
339      */
340     return nat64_entry_add_new(entry);
341   }
342 #if !LWIP_NAT64_MIN_SUBSTITUTE
343   ip4_addr_copy(nate_exist->ip, entry->ip);
344 #endif
345   nate_exist->lifetime = entry->lifetime;
346   nate_exist->mnid = entry->mnid;
347   nate_exist->orig_mnid = entry->orig_mnid;
348   nate_exist->state = entry->state;
349   nate_exist->nat64_sync = evt_flag;
350   nate_exist->conn_time = entry->conn_time;
351   nate_exist->dao_sn = entry->dao_sn;
352   return ERR_OK;
353 }
354 
355 static void
nat64_table_init(void)356 nat64_table_init(void)
357 {
358   g_nat64_table = (nat64_entry_t *)mem_malloc(sizeof(nat64_entry_t) * LWIP_NAT64_ENTRY_SIZE);
359   if (g_nat64_table == NULL) {
360     return;
361   }
362   (void)memset_s(g_nat64_table, sizeof(nat64_entry_t) * LWIP_NAT64_ENTRY_SIZE,
363                  0, sizeof(nat64_entry_t) * LWIP_NAT64_ENTRY_SIZE);
364 }
365 
366 static void
nat64_table_deinit(void)367 nat64_table_deinit(void)
368 {
369   s16_t i;
370   for (i = 0; i < LWIP_NAT64_ENTRY_SIZE; i++) {
371     if (g_nat64_table[i].state == NAT64_STATE_INIT) {
372       continue;
373     }
374 
375     if (g_nat64_table[i].nat64_sync == lwIP_FALSE) {
376       (void)nat64_dhcp_stop(g_nat64_netif, &(g_nat64_table[i].mac), lwIP_TRUE);
377     }
378     /* when mbr exit, we just remove nat64 entry */
379     (void)nat64_entry_remove(&g_nat64_table[i], lwIP_TRUE);
380   }
381   mem_free(g_nat64_table);
382   g_nat64_table = NULL;
383 }
384 
385 void
nat64_set_statful_enable(void)386 nat64_set_statful_enable(void)
387 {
388   if (g_nat64_table != NULL) {
389     return;
390   }
391   /* init the table */
392   nat64_table_init();
393   lwip_rpl_trigger_global_dao();
394   lwip_rpl_trigger_msta();
395 }
396 
397 void
nat64_set_statful_disable(void)398 nat64_set_statful_disable(void)
399 {
400   nat64_table_deinit();
401 }
402 
403 static nat64_entry_t *
nat64_entry_new(const linklayer_addr_t * lladdr,u8_t dao_sn,u8_t mnid,u32_t lifetime,u8_t nat64_sync,u32_t conn_time)404 nat64_entry_new(const linklayer_addr_t *lladdr, u8_t dao_sn, u8_t mnid, u32_t lifetime, u8_t nat64_sync,
405                 u32_t conn_time)
406 {
407   s16_t i;
408   if (g_nat64_table == NULL) {
409     return NULL;
410   }
411   for (i = 0; i < LWIP_NAT64_ENTRY_SIZE; i++) {
412     if (g_nat64_table[i].state == NAT64_STATE_INIT) {
413       g_nat64_table[i].mac = *lladdr;
414       g_nat64_table[i].dao_sn = dao_sn;
415       g_nat64_table[i].mnid = mnid;
416       g_nat64_table[i].orig_mnid = mnid;
417       g_nat64_table[i].lifetime = lifetime;
418       g_nat64_table[i].conn_time = conn_time;
419       g_nat64_table[i].nat64_sync = nat64_sync;
420       g_nat64_table[i].state = NAT64_STATE_CREATED;
421       return &g_nat64_table[i];
422     }
423   }
424 
425   LWIP_DEBUGF(NAT64_DEBUG, ("no available entry\r\n"));
426   return NULL;
427 }
428 
429 static err_t
nat64_ip4_dest_addr_exist(const ip4_addr_t * ip4addr)430 nat64_ip4_dest_addr_exist(const ip4_addr_t *ip4addr)
431 {
432   ip6_addr_t ip6addr;
433   nat64_entry_t *entry = NULL;
434 
435   if (ip4addr == NULL) {
436     LWIP_DEBUGF(NAT64_DEBUG, ("%s:ip4addr is NULL\n", __FUNCTION__));
437     return ERR_ARG;
438   }
439   (void)memset_s(&ip6addr, sizeof(ip6_addr_t), 0, sizeof(ip6_addr_t));
440 
441   /* stateful checkout */
442   entry = nat64_entry_lookup_by_ip4addr(ip4addr);
443   if (entry != NULL) {
444     if (entry->nat64_sync == lwIP_TRUE) {
445       return ERR_VAL;
446     }
447     return ERR_OK;
448   }
449 #ifdef LWIP_ARP_PROXY_BY_ROUTE_ENTRY
450 #if LWIP_RIPPLE
451   if (nat64_stateless_addr_4to6(ip4addr, &ip6addr) != 0) {
452     return ERR_ARG;
453   }
454   /* stateless checkout */
455   if (lwip_rpl_route_entry_lookup(&ip6addr) != NULL) {
456     return ERR_OK;
457   }
458 #endif
459 #endif
460   return ERR_VAL;
461 }
462 
463 u8_t
nat64_arp_ip4_is_proxy(ip4_addr_t sipaddr,ip4_addr_t dipaddr)464 nat64_arp_ip4_is_proxy(ip4_addr_t sipaddr, ip4_addr_t dipaddr)
465 {
466 #if LWIP_RIPPLE
467   if ((lwip_rpl_is_br()) && !(ip4_addr_isany_val(sipaddr)) && (nat64_ip4_dest_addr_exist(&dipaddr) == ERR_OK)) {
468     return lwIP_TRUE;
469   }
470   if (!(lwip_rpl_is_br()) && lwip_rpl_is_router() && (sipaddr.addr != dipaddr.addr) &&
471       !(ip4_addr_isany_val(sipaddr)) && (nat64_ip4_dest_addr_exist(&dipaddr) == ERR_VAL)) {
472     return lwIP_TRUE;
473   }
474 #endif
475   return lwIP_FALSE;
476 }
477 
478 static int
nat64_ip4_dest_addr_check(const struct pbuf * p)479 nat64_ip4_dest_addr_check(const struct pbuf *p)
480 {
481   u32_t addr = lwip_htonl(ip4_addr_get_u32(ip4_current_dest_addr()));
482 
483   if (ip4_addr_islinklocal(ip4_current_dest_addr())) {
484     return 0;
485   }
486 
487   if (p->flags & PBUF_FLAG_LLBCAST) {
488     /* don't route link-layer broadcasts */
489     return 0;
490   }
491 
492   if ((p->flags & PBUF_FLAG_LLMCAST) || IP_MULTICAST(addr)) {
493     /* don't route link-layer multicasts (use LWIP_HOOK_IP4_CANFORWARD instead) */
494     return 0;
495   }
496 
497   if (IP_EXPERIMENTAL(addr)) {
498     return 0;
499   }
500 
501   if (IP_CLASSA(addr)) {
502     u32_t net = addr & IP_CLASSA_NET;
503     if ((net == 0) || (net == ((u32_t)IP_LOOPBACKNET << IP_CLASSA_NSHIFT))) {
504       /* don't route loopback packets */
505       return 0;
506     }
507   }
508   return 1;
509 }
510 
511 static u8_t
nat64_ip4_forward(struct pbuf * p,struct ip_hdr * iphdr,struct netif * inp)512 nat64_ip4_forward(struct pbuf *p, struct ip_hdr *iphdr, struct netif *inp)
513 {
514   /* decrement TTL */
515   if (IPH_TTL(iphdr)) {
516     IPH_TTL_SET(iphdr, IPH_TTL(iphdr) - 1);
517   }
518 
519   /* send ICMP if TTL == 0 */
520   if (IPH_TTL(iphdr) == 0) {
521     MIB2_STATS_INC(mib2.ipinhdrerrors);
522 #if LWIP_ICMP
523     /* Don't send ICMP messages in response to ICMP error messages */
524     if (IPH_PROTO(iphdr) == IP_PROTO_ICMP) {
525       (void)pbuf_header(p, -IP_HLEN);
526       if ((((u8_t *)p->payload)[0] == ICMP_ER) || (((u8_t *)p->payload)[0] == ICMP_ECHO) ||
527           (((u8_t *)p->payload)[0] == ICMP_TS) || (((u8_t *)p->payload)[0] == ICMP_TSR)) {
528         (void)pbuf_header(p, IP_HLEN);
529         icmp_time_exceeded(p, ICMP_TE_TTL);
530       } else {
531         (void)pbuf_header(p, IP_HLEN);
532       }
533     } else {
534       icmp_time_exceeded(p, ICMP_TE_TTL);
535     }
536 #endif /* LWIP_ICMP */
537     return 1;
538   }
539 
540   /* Incrementally update the IP checksum. */
541   if (IPH_CHKSUM(iphdr) >= PP_HTONS(0xffffU - 0x100)) {
542     IPH_CHKSUM_SET(iphdr, (u16_t)(IPH_CHKSUM(iphdr) + PP_HTONS(0x100) + 1));
543   } else {
544     IPH_CHKSUM_SET(iphdr, (u16_t)(IPH_CHKSUM(iphdr) + PP_HTONS(0x100)));
545   }
546 
547   LWIP_DEBUGF(NAT64_DEBUG, ("ip4_forward: forwarding packet to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
548                             ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()),
549                             ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr())));
550 
551   IP_STATS_INC(ip.fw);
552   MIB2_STATS_INC(mib2.ipforwdatagrams);
553   IP_STATS_INC(ip.xmit);
554 
555   /* transmit pbuf on chosen interface */
556   (void)inp->output(inp, p, ip4_current_dest_addr());
557   return 1;
558 }
559 
560 int
nat64_status_check(void)561 nat64_status_check(void)
562 {
563   /* we need start the nat64 dynamicly */
564   if (g_nat64_table == NULL) {
565     LWIP_DEBUGF(NAT64_DEBUG, ("nat64_status_check: nat64 is not init\n"));
566     return NAT64_RET_FAIL;
567   }
568 #if LWIP_RIPPLE
569   /* the node must be mesh node */
570   if (lwip_rpl_is_router() == lwIP_FALSE) {
571     LWIP_DEBUGF(NAT64_DEBUG, ("nat64_status_check: not mesh node\n"));
572     return NAT64_RET_FAIL;
573   }
574 #endif
575   return NAT64_RET_OK;
576 }
577 static int
nat64_ip4_packet_check(struct pbuf * p,const struct ip_hdr * iphdr,const struct netif * inp)578 nat64_ip4_packet_check(struct pbuf *p, const struct ip_hdr *iphdr, const struct netif *inp)
579 {
580   /* do not handle the broadcast packet */
581   if (ip4_addr_isbroadcast(ip4_current_dest_addr(), inp)) {
582     LWIP_DEBUGF(NAT64_DEBUG, ("nat64_ip4_packet_check: broadcast address\n"));
583     return NAT64_RET_FAIL;
584   }
585   if (nat64_ip4_dest_addr_check(p) == 0) {
586     LWIP_DEBUGF(NAT64_DEBUG, ("nat64_ip4_packet_check: nat64_ip4_dest_addr_check fail\n"));
587     return NAT64_RET_FAIL;
588   }
589   if ((IPH_TTL(iphdr) == 0) || (IPH_TTL(iphdr) == 1)) {
590 #if LWIP_ICMP
591     /* Don't send ICMP messages in response to ICMP error messages */
592     if (IPH_PROTO(iphdr) == IP_PROTO_ICMP) {
593       (void)pbuf_header(p, -IP_HLEN);
594       if ((((u8_t *)p->payload)[0] == ICMP_ER) || (((u8_t *)p->payload)[0] == ICMP_ECHO) ||
595           (((u8_t *)p->payload)[0] == ICMP_TS) || (((u8_t *)p->payload)[0] == ICMP_TSR)) {
596         (void)pbuf_header(p, IP_HLEN);
597         icmp_time_exceeded(p, ICMP_TE_TTL);
598       } else {
599         (void)pbuf_header(p, IP_HLEN);
600       }
601     } else {
602       icmp_time_exceeded(p, ICMP_TE_TTL);
603     }
604 #endif /* LWIP_ICMP */
605     IP_STATS_INC(ip.natdrop);
606     LWIP_DEBUGF(NAT64_DEBUG, ("ttl exceeded\n"));
607     return NAT64_RET_ERR;
608   }
609   return NAT64_RET_OK;
610 }
611 
612 static int
nat64_ip4_no_route(struct pbuf * p,struct ip_hdr * iphdr,const struct netif * inp,struct netif * outp)613 nat64_ip4_no_route(struct pbuf *p, struct ip_hdr *iphdr,
614                    const struct netif *inp, struct netif *outp)
615 {
616   if (lwip_rpl_is_rpl_netif(inp) == lwIP_TRUE) {
617     /* use the default router */
618     if (lwip_rpl_is_br() == lwIP_TRUE) {
619       (void)nat64_ip4_forward(p, iphdr, outp);
620       LWIP_DEBUGF(NAT64_DEBUG, ("%s:%d: ip6_route default forward.\n", __FUNCTION__, __LINE__));
621       return NAT64_RET_ERR;
622     }
623   } else {
624     /* the pkt should be drop */
625     IP_STATS_INC(ip.natdrop);
626     LWIP_DEBUGF(NAT64_DEBUG, ("%s:%d: ip6_route drop pkt.\n", __FUNCTION__, __LINE__));
627     return NAT64_RET_ERR;
628   }
629 
630   return NAT64_RET_OK;
631 }
632 
633 static int
nat64_ip4_translate_to_ip6src(ip6_addr_t * ip6src,const struct netif * inp)634 nat64_ip4_translate_to_ip6src(ip6_addr_t *ip6src, const struct netif *inp)
635 {
636   int ret;
637   nat64_entry_t *entry = NULL;
638   if ((lwip_rpl_is_br() == lwIP_TRUE) &&
639       (lwip_rpl_is_rpl_netif(inp) == lwIP_FALSE)) {
640     entry = nat64_entry_lookup_by_ip4addr(ip4_current_src_addr());
641     if ((entry != NULL) && (entry->nat64_sync == lwIP_TRUE)) {
642       ret = nat64_entry_to6(entry, ip6src);
643     } else {
644       ret = nat64_stateless_addr_4to6(ip4_current_src_addr(), ip6src);
645     }
646   } else {
647     ret = nat64_stateless_addr_4to6(ip4_current_src_addr(), ip6src);
648   }
649   return ret;
650 }
651 
652 static int
nat64_ip4_translate_to_ip6dst(ip6_addr_t * ip6dest,const struct netif * inp)653 nat64_ip4_translate_to_ip6dst(ip6_addr_t *ip6dest, const struct netif *inp)
654 {
655   int ret;
656   nat64_entry_t *entry = NULL;
657   if ((lwip_rpl_is_br() == lwIP_TRUE) &&
658       (lwip_rpl_is_rpl_netif(inp) == lwIP_FALSE)) {
659     entry = nat64_entry_lookup_by_ip4addr(ip4_current_dest_addr());
660     if (entry != NULL) {
661       ret = nat64_entry_to6(entry, ip6dest);
662     } else {
663       /* just assume stateless, maybe drop the pkt later */
664       ret = nat64_stateless_addr_4to6(ip4_current_dest_addr(), ip6dest);
665     }
666   } else {
667     /* stateless ip address convert */
668     ret = nat64_stateless_addr_4to6(ip4_current_dest_addr(), ip6dest);
669   }
670   return ret;
671 }
672 
673 
674 static int
nat64_ip4_route(struct pbuf * p,struct ip_hdr * iphdr,const struct netif * inp,ip6_addr_t * ip6src,ip6_addr_t * ip6dest,struct netif ** outp)675 nat64_ip4_route(struct pbuf *p, struct ip_hdr *iphdr, const struct netif *inp,
676                 ip6_addr_t *ip6src, ip6_addr_t *ip6dest, struct netif **outp)
677 {
678   void *route = NULL;
679   int ret;
680   /* handle the two ipv4 sta connecting on the same MG */
681   /* here we convert the address */
682   ret = nat64_ip4_translate_to_ip6src(ip6src, inp);
683   if (ret != 0) {
684     IP_STATS_INC(ip.natdrop);
685     return NAT64_RET_ERR;
686   }
687   ret = nat64_ip4_translate_to_ip6dst(ip6dest, inp);
688   if (ret != 0) {
689     IP_STATS_INC(ip.natdrop);
690     return NAT64_RET_ERR;
691   }
692   /* here we can */
693   *outp = ip6_route(ip6src, ip6dest);
694   if (*outp == NULL) {
695     IP_STATS_INC(ip.natdrop);
696 #if LWIP_ICMP
697     icmp_dest_unreach(p, ICMP_DUR_SR);
698 #endif
699     LWIP_DEBUGF(NAT64_DEBUG, ("%s:%d: ip6_route is null\n", __FUNCTION__, __LINE__));
700     return NAT64_RET_ERR;
701   }
702 
703   route = lwip_rpl_route_entry_lookup(ip6dest);
704   if (route != NULL) {
705     if (lwip_rpl_route_is_ipv4sta(route) == 1) {
706       /* ipv4 forward the packet */
707       LWIP_DEBUGF(NAT64_DEBUG, ("%s:%d: ip6_route forward.\n", __FUNCTION__, __LINE__));
708       (void)nat64_ip4_forward(p, iphdr, *outp);
709       return NAT64_RET_ERR;
710     }
711   } else {
712     return nat64_ip4_no_route(p, iphdr, inp, *outp);
713   }
714   return NAT64_RET_OK;
715 }
716 
717 static int
nat64_ip4_reass_check(struct pbuf ** p,struct ip_hdr ** iphdr,const struct netif * inp,const ip6_addr_t * nexthop,const struct netif * outp)718 nat64_ip4_reass_check(struct pbuf **p, struct ip_hdr **iphdr, const struct netif *inp,
719                       const ip6_addr_t *nexthop, const struct netif *outp)
720 {
721   u16_t ip4_hdr_len;
722   u16_t ip6_hdr_len;
723   u16_t ip_pbuf_data_len;
724   if ((IPH_OFFSET(*iphdr) & lwip_htons(IP_OFFMASK | IP_MF)) != 0) {
725 #if IP_REASSEMBLY /* packet fragment reassembly code present? */
726     /* reassemble the packet */
727     *p = ip4_reass(*p);
728     /* packet not fully reassembled yet? */
729     if (*p == NULL) {
730       LWIP_DEBUGF(NAT64_DEBUG, ("nat64_ip4_input:fragment is not fully reassembled yet\n"));
731       /* note: the pkt has been free in the ip4_reass */
732       return NAT64_RET_ASSEMBLE;
733     }
734     *iphdr = (struct ip_hdr *)(*p)->payload;
735 #else /* IP_REASSEMBLY == 0, no packet fragment reassembly code present */
736     (void)p;
737     LWIP_DEBUGF(NAT64_DEBUG, ("IP packet dropped since it was fragmented (0x%"X16_F
738                               ") (while IP_REASSEMBLY == 0).\n", lwip_ntohs(IPH_OFFSET(*iphdr))));
739     return NAT64_RET_FAIL;
740 #endif /* IP_REASSEMBLY */
741   }
742 
743   /*
744    * MAY convert the ipv4 header options into ipv6 options, the ip_pbuf_data_len should include the options.
745    * now we just drop the options.
746    */
747   ip6_hdr_len = IP6_HLEN;
748   ip4_hdr_len = IPH_HL(*iphdr) << 2; /* ipv4 header len is equal to HL * 4 */
749   ip_pbuf_data_len = (u16_t)(lwip_ntohs(IPH_LEN(*iphdr)) - ip4_hdr_len);
750 #if !LWIP_IPV6_FRAG
751   /*
752    * we can calculate the total len, cmp with the mtu.
753    * now we donot support the ip fragment, so just drop it.
754    */
755   if (netif_mtu6(outp) && ((ip6_hdr_len + ip_pbuf_data_len) > nd6_get_destination_mtu(nexthop, outp))) {
756     IP_STATS_INC(ip.natdrop);
757     return NAT64_RET_ERR;
758   }
759 #else
760   (void)ip6_hdr_len;
761   (void)ip4_hdr_len;
762   (void)ip_pbuf_data_len;
763   (void)nexthop;
764   (void)outp;
765 #endif
766   (void)inp;
767   return NAT64_RET_OK;
768 }
769 #if LWIP_DNS64
770 static u16_t
nat64_dns64_extra_size(struct pbuf * p,const struct ip_hdr * iphdr)771 nat64_dns64_extra_size(struct pbuf *p, const struct ip_hdr *iphdr)
772 {
773   struct udp_hdr *udphdr = NULL;
774   u16_t ip4_hdr_len;
775   u16_t count = 0;
776 
777   if (IPH_PROTO(iphdr) != IP_PROTO_UDP) {
778     return 0;
779   }
780 
781   /* ipv4 header len is equal to HL * 4 */
782   ip4_hdr_len = IPH_HL(iphdr) << 2;
783   (void)pbuf_header(p, -((s16_t)(ip4_hdr_len)));
784   udphdr = (struct udp_hdr *)p->payload;
785   if (udphdr->src != lwip_ntohs(NAT64_DNS_PORT)) {
786     (void)pbuf_header(p, (s16_t)(ip4_hdr_len));
787     return 0;
788   }
789 
790   (void)pbuf_header(p, -((s16_t)(sizeof(struct udp_hdr))));
791 
792   if (nat64_dns64_extra_count(p, &count) != ERR_OK) {
793     (void)pbuf_header(p, (s16_t)(sizeof(struct udp_hdr)));
794     (void)pbuf_header(p, (s16_t)(ip4_hdr_len));
795     return 0;
796   }
797 
798   (void)pbuf_header(p, (s16_t)(sizeof(struct udp_hdr)));
799   (void)pbuf_header(p, (s16_t)(ip4_hdr_len));
800   return count * (sizeof(ip6_addr_t) - sizeof(ip4_addr_t) + DNS_MAX_NAME_LENGTH);
801 }
802 #endif
803 static int
nat64_ip4_send(struct pbuf * new_buf,const struct ip6_hdr * ip6hdr,struct netif * outp,const ip6_addr_t * ip6dest)804 nat64_ip4_send(struct pbuf *new_buf, const struct ip6_hdr *ip6hdr,
805                struct netif *outp, const ip6_addr_t *ip6dest)
806 {
807   int ret;
808   u8_t *ip6hdr_payload = NULL;
809   const ip6_addr_t *nexthop = ip6dest;
810 
811   (void)pbuf_header(new_buf, IP6_HLEN);
812 
813   ip6hdr_payload = (u8_t *)new_buf->payload;
814   /* the endian convert */
815   ret = memcpy_s(ip6hdr_payload, sizeof(struct ip6_hdr), ip6hdr, sizeof(struct ip6_hdr));
816   if (ret != EOK) {
817     LWIP_DEBUGF(NAT64_DEBUG, ("%s:memcpy_s fail(%d)\n", __FUNCTION__, ret));
818     (void)pbuf_free(new_buf);
819     return NAT64_PKT_HANDLED;
820   }
821 #if LWIP_RIPPLE
822   if (new_buf->flags & PBUF_FLAG_HBH_SPACE) {
823     struct pbuf *new_p = NULL;
824     /*
825      * add Hop by Hop header for rpl. If space for HBH is not allocated then pbuf
826      * will be expanded.
827      */
828     if (lwip_get_pkt_route_status()) {
829       new_buf->pkt_up = lwIP_TRUE;
830     } else {
831       new_buf->pkt_up = lwIP_FALSE;
832     }
833     new_p = lwip_add_rpi_hdr(new_buf, IP6H_NEXTH(ip6hdr), lwip_hbh_len(new_buf), 0);
834     if (new_p == NULL) {
835       LWIP_ERROR("Could not add HBH header.\n", 0, ;);
836       (void)pbuf_free(new_buf);
837       return NAT64_PKT_HANDLED;
838     } else {
839       new_buf = new_p;
840     }
841   }
842 #endif
843 
844 #if LWIP_IPV6_FRAG
845   if (netif_mtu6(outp) && (new_buf->tot_len > nd6_get_destination_mtu(nexthop, outp))) {
846     (void)ip6_frag(new_buf, outp, nexthop);
847     (void)pbuf_free(new_buf);
848     return NAT64_PKT_HANDLED;
849   }
850 #endif
851 
852   outp->output_ip6(outp, new_buf, ip6dest);
853   IP6_STATS_INC(ip.natfw);
854   IP6_STATS_INC(ip6.xmit);
855   (void)pbuf_free(new_buf);
856   return NAT64_PKT_HANDLED;
857 }
858 
859 static int
nat64_ip4_translate_udp(struct pbuf * p,struct pbuf * new_buf,struct ip6_hdr * ip6hdr,const ip6_addr_t * ip6src,const ip6_addr_t * ip6dest)860 nat64_ip4_translate_udp(struct pbuf *p, struct pbuf *new_buf, struct ip6_hdr *ip6hdr,
861                         const ip6_addr_t *ip6src, const ip6_addr_t *ip6dest)
862 {
863   struct udp_hdr *udphdr = (struct udp_hdr *)new_buf->payload;
864 #if LWIP_DNS64
865   if (udphdr->src == lwip_ntohs(NAT64_DNS_PORT)) {
866     (void)pbuf_header(p, -((s16_t)(sizeof(struct udp_hdr))));
867     (void)pbuf_header(new_buf, -((s16_t)(sizeof(struct udp_hdr))));
868     if (nat64_dns64_4to6(p, new_buf) != ERR_OK) {
869       IP_STATS_INC(ip.natdrop);
870 #if LWIP_ICMP
871       icmp_dest_unreach(p, ICMP_DUR_SR);
872 #endif
873       (void)pbuf_free(new_buf);
874       return NAT64_RET_ERR;
875     }
876     udphdr->len = lwip_htons(new_buf->tot_len);
877     (void)pbuf_header(p, (s16_t)(sizeof(struct udp_hdr)));
878     (void)pbuf_header(new_buf, (s16_t)(sizeof(struct udp_hdr)));
879     IP6H_PLEN_SET(ip6hdr, new_buf->tot_len);
880   }
881 #else
882   (void)p;
883   (void)ip6hdr;
884 #endif
885   udphdr->chksum = 0;
886   udphdr->chksum = ip6_chksum_pseudo(new_buf, IP_PROTO_UDP, new_buf->tot_len,
887                                      ip6src, ip6dest);
888   return NAT64_RET_OK;
889 }
890 
891 static void
nat64_ip4_translate_tcp(struct pbuf * new_buf,const ip6_addr_t * ip6src,const ip6_addr_t * ip6dest)892 nat64_ip4_translate_tcp(struct pbuf *new_buf, const ip6_addr_t *ip6src, const ip6_addr_t *ip6dest)
893 {
894   struct tcp_hdr *tcphdr = (struct tcp_hdr *)new_buf->payload;
895   tcphdr->chksum = 0;
896   tcphdr->chksum = ip6_chksum_pseudo(new_buf, IP_PROTO_TCP, new_buf->tot_len, ip6src, ip6dest);
897 }
898 
899 static int
nat64_ip4_translate_icmp(struct pbuf * new_buf,const ip6_addr_t * ip6src,const ip6_addr_t * ip6dest)900 nat64_ip4_translate_icmp(struct pbuf *new_buf, const ip6_addr_t *ip6src, const ip6_addr_t *ip6dest)
901 {
902   struct icmp_echo_hdr *icmp4hdr = (struct icmp_echo_hdr *)new_buf->payload;
903   struct icmpv6_hdr *icmp6hdr = (struct icmpv6_hdr *)new_buf->payload;
904   /* just handle the echo icmp */
905   if (ICMPH_TYPE(icmp4hdr) == ICMP_ECHO) {
906     icmp6hdr->type = ICMP6_TYPE_EREQ;
907   } else if (ICMPH_TYPE(icmp4hdr) == ICMP_ER) {
908     icmp6hdr->type = ICMP6_TYPE_EREP;
909   } else {
910     LWIP_DEBUGF(NAT64_DEBUG, ("icmp4 not handle %x\n", ICMPH_TYPE(icmp4hdr)));
911     (void)pbuf_free(new_buf);
912     IP_STATS_INC(ip.natdrop);
913     return NAT64_RET_ERR;
914   }
915 
916   icmp6hdr->chksum = 0;
917 #if CHECKSUM_GEN_ICMP6
918   IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) {
919     icmp6hdr->chksum = ip6_chksum_pseudo(new_buf, IP6_NEXTH_ICMP6, new_buf->tot_len,
920                                          ip6src, ip6dest);
921   }
922 #endif /* CHECKSUM_GEN_ICMP6 */
923   return NAT64_RET_OK;
924 }
925 
926 static int
nat64_ip4_translate_default(struct pbuf * p,struct pbuf * new_buf,const struct netif * inp,u16_t ip4_hdr_len)927 nat64_ip4_translate_default(struct pbuf *p, struct pbuf *new_buf,
928                             const struct netif *inp, u16_t ip4_hdr_len)
929 {
930 #if LWIP_ICMP
931   /* send ICMP destination protocol unreachable unless is was a broadcast */
932   if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), inp) &&
933       !ip4_addr_ismulticast(ip4_current_dest_addr())) {
934     (void)pbuf_header_force(p, (s16_t)(ip4_hdr_len));
935     icmp_dest_unreach(p, ICMP_DUR_PROTO);
936   }
937 #endif /* LWIP_ICMP */
938   LWIP_DEBUGF(NAT64_DEBUG, ("pkt not handle\n"));
939   (void)pbuf_free(new_buf);
940   IP_STATS_INC(ip.natdrop);
941   return NAT64_PKT_HANDLED;
942 }
943 
944 static struct pbuf *
nat64_ip4_new_buf(struct pbuf * p,const struct ip_hdr * iphdr,struct ip6_hdr * ip6hdr,u16_t * ip4_hdr_len)945 nat64_ip4_new_buf(struct pbuf *p, const struct ip_hdr *iphdr, struct ip6_hdr *ip6hdr, u16_t *ip4_hdr_len)
946 {
947   u16_t ip_pbuf_data_len;
948   u16_t dns64_extra_size;
949   struct pbuf *new_buf = NULL;
950   /* ipv4 header len is equal to HL * 4 */
951   *ip4_hdr_len = IPH_HL(iphdr) << 2;
952   ip_pbuf_data_len = (u16_t)(p->tot_len - *ip4_hdr_len);
953 #if LWIP_DNS64
954   dns64_extra_size = nat64_dns64_extra_size(p, iphdr);
955 #else
956   dns64_extra_size = 0;
957 #endif
958   ip_pbuf_data_len += dns64_extra_size;
959   /* here PBUF_LINK should be used */
960   new_buf = pbuf_alloc(PBUF_IP, ip_pbuf_data_len, PBUF_RAM);
961   if (new_buf == NULL) {
962     LWIP_DEBUGF(NAT64_DEBUG, ("pbuf alloc fail:no MEMORY\r\n"));
963     return NULL;
964   }
965 
966 #if defined(LWIP_NAT64_PRIORITY_KEEP) && LWIP_NAT64_PRIORITY_KEEP
967   IP6H_VTCFL_SET(ip6hdr, IP_PROTO_VERSION_6, IPH_TOS(iphdr), 0);
968 #else
969   IP6H_VTCFL_SET(ip6hdr, IP_PROTO_VERSION_6, 0, 0);
970 #endif
971 
972   IP6H_PLEN_SET(ip6hdr, ip_pbuf_data_len);
973   IP6H_HOPLIM_SET(ip6hdr, (IPH_TTL(iphdr) - 1));
974 
975   return new_buf;
976 }
977 
978 static int
nat64_ip4_translate(struct pbuf * p,const struct ip_hdr * iphdr,const struct netif * inp,const ip6_addr_t * ip6src,const ip6_addr_t * ip6dest,struct netif * outp)979 nat64_ip4_translate(struct pbuf *p, const struct ip_hdr *iphdr, const struct netif *inp,
980                     const ip6_addr_t *ip6src, const ip6_addr_t *ip6dest, struct netif *outp)
981 {
982   int ret;
983   struct ip6_hdr ip6hdr;
984   u16_t ip4_hdr_len;
985   struct pbuf *new_buf = NULL;
986 
987   (void)memset_s(&ip6hdr, sizeof(ip6hdr), 0, sizeof(ip6hdr));
988 
989   new_buf = nat64_ip4_new_buf(p, iphdr, &ip6hdr, &ip4_hdr_len);
990   if (new_buf == NULL) {
991     return NAT64_PKT_NOT_HANDLED;
992   }
993 
994   ip6_addr_copy_to_packed(ip6hdr.dest, *ip6dest);
995   ip6_addr_copy_to_packed(ip6hdr.src, *ip6src);
996 
997   (void)pbuf_header(p, -((s16_t)(ip4_hdr_len)));
998   if (pbuf_copy(new_buf, p) != ERR_OK) {
999     (void)pbuf_free(new_buf);
1000     return NAT64_PKT_HANDLED;
1001   }
1002 
1003   switch (IPH_PROTO(iphdr)) {
1004 #if LWIP_UDP
1005     case IP_PROTO_UDP:
1006       IP6H_NEXTH_SET(&ip6hdr, IP6_NEXTH_UDP);
1007       ret = nat64_ip4_translate_udp(p, new_buf, &ip6hdr, ip6src, ip6dest);
1008       if (ret != NAT64_RET_OK) {
1009         return NAT64_PKT_HANDLED;
1010       }
1011       break;
1012 #endif /* LWIP_UDP */
1013 #if LWIP_TCP
1014     case IP_PROTO_TCP:
1015       IP6H_NEXTH_SET(&ip6hdr, IP6_NEXTH_TCP);
1016       nat64_ip4_translate_tcp(new_buf, ip6src, ip6dest);
1017       break;
1018 #endif /* LWIP_TCP */
1019 #if LWIP_ICMP
1020     case IP_PROTO_ICMP:
1021       IP6H_NEXTH_SET(&ip6hdr, IP6_NEXTH_ICMP6);
1022       ret = nat64_ip4_translate_icmp(new_buf, ip6src, ip6dest);
1023       if (ret != NAT64_RET_OK) {
1024         return NAT64_PKT_HANDLED;
1025       }
1026       break;
1027 #endif /* LWIP_ICMP */
1028     default:
1029       return nat64_ip4_translate_default(p, new_buf, inp, ip4_hdr_len);
1030   }
1031 
1032   return nat64_ip4_send(new_buf, &ip6hdr, outp, ip6dest);
1033 }
1034 
1035 /* we should handle the stateless & stateful */
1036 int
nat64_ip4_input(struct pbuf * p,struct ip_hdr * iphdr,const struct netif * inp)1037 nat64_ip4_input(struct pbuf *p, struct ip_hdr *iphdr, const struct netif *inp)
1038 {
1039   struct netif *outp = NULL;
1040   ip6_addr_t ip6src;
1041   ip6_addr_t ip6dest;
1042   int ret;
1043 
1044   (void)memset_s(&ip6src, sizeof(ip6src), 0, sizeof(ip6src));
1045   (void)memset_s(&ip6dest, sizeof(ip6dest), 0, sizeof(ip6dest));
1046 
1047   if (nat64_status_check() != NAT64_RET_OK) {
1048     return NAT64_PKT_NOT_HANDLED;
1049   }
1050 
1051   ret = nat64_ip4_packet_check(p, iphdr, inp);
1052   if (ret == NAT64_RET_FAIL) {
1053     return NAT64_PKT_NOT_HANDLED;
1054   } else if (ret == NAT64_RET_ERR) {
1055     (void)pbuf_free(p);
1056     return NAT64_PKT_HANDLED;
1057   }
1058 
1059   ret = nat64_ip4_route(p, iphdr, inp, &ip6src, &ip6dest, &outp);
1060   if (ret == NAT64_RET_FAIL) {
1061     return NAT64_PKT_NOT_HANDLED;
1062   } else if (ret == NAT64_RET_ERR) {
1063     (void)pbuf_free(p);
1064     return NAT64_PKT_HANDLED;
1065   }
1066 
1067   if (outp == NULL) {
1068     (void)pbuf_free(p);
1069     return NAT64_PKT_HANDLED;
1070   }
1071 
1072   ret = nat64_ip4_reass_check(&p, &iphdr, inp, &ip6dest, outp);
1073   if (ret == NAT64_RET_FAIL) {
1074     return NAT64_PKT_NOT_HANDLED;
1075   } else if (ret == NAT64_RET_ERR) {
1076     (void)pbuf_free(p);
1077     return NAT64_PKT_HANDLED;
1078   } else if (ret == NAT64_RET_ASSEMBLE) {
1079     return NAT64_PKT_HANDLED;
1080   }
1081 
1082   /* the function will never return fail, but the return value must be checked */
1083   ret = nat64_ip4_translate(p, iphdr, inp, &ip6src, &ip6dest, outp);
1084   if (ret == NAT64_RET_FAIL) {
1085     return NAT64_PKT_NOT_HANDLED;
1086   }
1087 
1088   (void)pbuf_free(p);
1089   return NAT64_PKT_HANDLED;
1090 }
1091 
1092 static int
nat64_ip6_addr_convert(struct pbuf * p,const struct ip6_hdr * iphdr,struct ip_hdr * ip4hdr)1093 nat64_ip6_addr_convert(struct pbuf *p, const struct ip6_hdr *iphdr, struct ip_hdr *ip4hdr)
1094 {
1095   int ip4src;
1096   int ip4dst;
1097 
1098   /* here we convert the address */
1099   if ((nat64_addr_6to4(ip6_current_dest_addr(), (ip4_addr_t *)&ip4hdr->dest) != 0) ||
1100       (nat64_addr_6to4(ip6_current_src_addr(), (ip4_addr_t *)&ip4hdr->src) != 0)) {
1101     LWIP_DEBUGF(NAT64_DEBUG, ("nat64_ip6_input: the ip6 address is not ip4-converted.\n"));
1102     ip4dst = nat64_addr_is_ip4(ip6_current_dest_addr());
1103     ip4src = nat64_addr_is_ip4(ip6_current_src_addr());
1104     if ((ip4src == lwIP_TRUE) || (ip4dst == lwIP_TRUE)) {
1105       /* the pkt should be drop */
1106       IP_STATS_INC(ip6.natdrop);
1107       NAT64_ICMP6_NO_ROUTE(iphdr, p);
1108       (void)pbuf_free(p);
1109       return NAT64_RET_ERR;
1110     }
1111 
1112     return NAT64_RET_FAIL;
1113   }
1114 
1115   return NAT64_RET_OK;
1116 }
1117 
1118 static int
nat64_ip6_packet_check(struct pbuf * p,const struct ip6_hdr * iphdr)1119 nat64_ip6_packet_check(struct pbuf *p, const struct ip6_hdr *iphdr)
1120 {
1121   /* do not handle the linklocal/loopback/multicast packet */
1122   if (ip6_addr_islinklocal(ip6_current_dest_addr()) ||
1123       ip6_addr_isloopback(ip6_current_dest_addr()) ||
1124       ip6_addr_ismulticast(ip6_current_dest_addr())) {
1125     LWIP_DEBUGF(NAT64_DEBUG, ("nat64_ip6_input:pkt not handle\n"));
1126     return NAT64_RET_FAIL;
1127   }
1128 
1129   /* send ICMP6 if HL == 0 */
1130   if ((IP6H_HOPLIM(iphdr) == 0) || (IP6H_HOPLIM(iphdr) == 1)) {
1131 #if LWIP_ICMP6
1132     /* Don't send ICMP messages in response to ICMP messages */
1133     if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) {
1134       icmp6_time_exceeded(p, ICMP6_TE_HL);
1135     }
1136 #endif /* LWIP_ICMP6 */
1137     LWIP_DEBUGF(NAT64_DEBUG, ("nat64_ip6_input:hop limit drop\n"));
1138     IP6_STATS_INC(ip6.natdrop);
1139     (void)pbuf_free(p);
1140     return NAT64_RET_ERR;
1141   }
1142 
1143   return NAT64_RET_OK;
1144 }
1145 
1146 static int
nat64_ip6_route(struct pbuf * p,const struct ip6_hdr * iphdr,const struct netif * inp,nat64_ip6_data_t * data,struct netif ** outp)1147 nat64_ip6_route(struct pbuf *p, const struct ip6_hdr *iphdr, const struct netif *inp,
1148                 nat64_ip6_data_t *data, struct netif **outp)
1149 {
1150   int ret;
1151   struct ip_hdr *ip4hdr = &data->ip4hdr;
1152 #if LWIP_IP6IN4 && LWIP_NAT64_IP6IN4
1153   ip6_addr_t nhop = {0};
1154 #endif
1155   void *route = NULL;
1156   route = lwip_rpl_route_entry_lookup(ip6_current_dest_addr());
1157   if (route != NULL) {
1158     if ((lwip_rpl_route_is_sync(route) == 0) && (lwip_rpl_route_is_ipv4sta(route) == 0)) {
1159       /* ipv6 forward the packet */
1160       return NAT64_RET_FAIL;
1161     }
1162 #if LWIP_IP6IN4 && LWIP_NAT64_IP6IN4
1163     ret = lwip_rpl_route_nhop(route, &nhop);
1164     if (ret == ERR_OK) {
1165       (void)ip6in4_entry_ip4_get(&nhop, &data->nhop);
1166     }
1167 #endif
1168   } else {
1169     if (lwip_rpl_is_rpl_netif(inp) == lwIP_TRUE) {
1170       /* connect to the ipv4 router */
1171       if (lwip_rpl_is_br() == lwIP_FALSE) {
1172         LWIP_DEBUGF(NAT64_DEBUG, ("nat64_ip6_input:no netif\n"));
1173         return NAT64_RET_FAIL;
1174       }
1175     } else {
1176       /* the pkt should be drop */
1177       IP_STATS_INC(ip6.natdrop);
1178       NAT64_ICMP6_NO_ROUTE(iphdr, p);
1179       LWIP_DEBUGF(NAT64_DEBUG, ("nat64_ip6_input:no router\n"));
1180       (void)pbuf_free(p);
1181       return NAT64_RET_ERR;
1182     }
1183   }
1184 
1185   ret = nat64_ip6_addr_convert(p, iphdr, ip4hdr);
1186   if (ret != NAT64_RET_OK) {
1187     return ret;
1188   }
1189 
1190   *outp = ip4_route((ip4_addr_t *)&ip4hdr->dest);
1191   if (*outp == NULL) {
1192     IP_STATS_INC(ip6.natdrop);
1193     NAT64_ICMP6_NO_ROUTE(iphdr, p);
1194     LWIP_DEBUGF(NAT64_DEBUG, ("nat64_ip6_input:no netif\n"));
1195     (void)pbuf_free(p);
1196     return NAT64_RET_ERR;
1197   }
1198 
1199   return NAT64_RET_OK;
1200 }
1201 
1202 static void
nat64_ip6_assemble(struct pbuf * p,struct ip6_hdr ** iphdr,nat64_ip6_data_t * data)1203 nat64_ip6_assemble(struct pbuf *p, struct ip6_hdr **iphdr, nat64_ip6_data_t *data)
1204 {
1205   /*
1206    * Returned p point to IPv6 header.
1207    * Update all our variables and pointers and continue.
1208    */
1209   *iphdr = (struct ip6_hdr *)p->payload;
1210   data->nexth = IP6H_NEXTH(*iphdr);
1211   data->ip6_hdr_len = 0;
1212   data->ip_pbuf_data_len = IP6H_PLEN(*iphdr);
1213   (void)pbuf_header(p, -IP6_HLEN);
1214   data->ip6_hdr_len = IP6_HLEN;
1215 }
1216 
1217 static int
nat64_ip6_option_frag(struct pbuf ** q,struct ip6_hdr ** iphdr,nat64_ip6_data_t * data)1218 nat64_ip6_option_frag(struct pbuf **q, struct ip6_hdr **iphdr, nat64_ip6_data_t *data)
1219 {
1220   u16_t optlen;
1221   struct pbuf *p = *q;
1222   struct ip6_frag_hdr *frag_hdr = (struct ip6_frag_hdr *)p->payload;
1223 
1224   if (p->len < IP6_EXTENSION_HEADER_MIN_LEN) {
1225     LWIP_DEBUGF(NAT64_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
1226                 ("IP6_NEXTH_FRAGMENT:(pbuf len %"U16_F" is less than 2), IPv6 packet dropped.\n", p->len));
1227     IP6_STATS_INC(ip6.natdrop);
1228     (void)pbuf_free(p);
1229     return NAT64_RET_ERR;
1230   }
1231 
1232   /* Get next header type. */
1233   data->nexth = frag_hdr->_nexth;
1234 
1235   /* 8 : Fragment Header length. */
1236   optlen = 8;
1237   data->ip6_hdr_len += optlen;
1238 
1239   /* Make sure this header fits in current pbuf. */
1240   if (optlen > p->len) {
1241     LWIP_DEBUGF(NAT64_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
1242                 ("IPv6 opt header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 pac dropped.\n",
1243                  optlen, p->len));
1244     IP6_STATS_INC(ip6.natdrop);
1245     (void)pbuf_free(p);
1246     return NAT64_RET_ERR;
1247   }
1248 
1249   /* check payload length is multiple of 8 octets when mbit is set */
1250   if (IP6_FRAG_MBIT(frag_hdr) && (IP6H_PLEN(*iphdr) & 0x7)) {
1251     /* ipv6 payload length is not multiple of 8 octets */
1252     icmp6_param_problem(p, ICMP6_PP_FIELD, (u8_t *)(*iphdr) + (u32_t)(data->ip6_hdr_len - optlen));
1253     LWIP_DEBUGF(NAT64_DEBUG, ("nat64_ip6_input: packet with invalid payload length dropped\n"));
1254     IP6_STATS_INC(ip6.natdrop);
1255     (void)pbuf_free(p);
1256     return NAT64_RET_ERR;
1257   }
1258 
1259   /* Offset == 0 and more_fragments == 0? */
1260   if ((frag_hdr->_fragment_offset &
1261        PP_HTONS(IP6_FRAG_OFFSET_MASK | IP6_FRAG_MORE_FLAG)) == 0) {
1262     /*
1263      * This is a 1-fragment packet, usually a packet that we have
1264      * already reassembled. Skip this header anc continue.
1265      */
1266     (void)pbuf_header(p, -(s16_t)(optlen));
1267   } else {
1268 #if LWIP_IPV6_REASS
1269     /* reassemble the packet */
1270     p = ip6_reass(p);
1271     *q = p;
1272     /* packet not fully reassembled yet? */
1273     LWIP_ERROR("nat64_ip6_input:fragment is not fully reassembled yet\n", (p != NULL), return NAT64_RET_ASSEMBLE);
1274     nat64_ip6_assemble(p, iphdr, data);
1275 #else /* LWIP_IPV6_REASS */
1276     /* free (drop) packet pbufs */
1277     LWIP_DEBUGF(NAT64_DEBUG, ("nat64_ip6_input: packet with Fragment header dropped.\n"));
1278     IP6_STATS_INC(ip6.natdrop);
1279     (void)pbuf_free(p);
1280     return NAT64_RET_ERR;
1281 #endif /* LWIP_IPV6_REASS */
1282   }
1283   return NAT64_RET_OK;
1284 }
1285 
1286 static int
nat64_ip6_option_normal(struct pbuf * p,nat64_ip6_data_t * data)1287 nat64_ip6_option_normal(struct pbuf *p, nat64_ip6_data_t *data)
1288 {
1289   u16_t optlen;
1290   /* Get next header type. */
1291   data->nexth = *((u8_t *)p->payload);
1292 
1293   if (p->len < IP6_EXTENSION_HEADER_MIN_LEN) {
1294     LWIP_DEBUGF(NAT64_DEBUG, ("pbuf (len %"U16_F") is less than 2.\n", p->len));
1295     IP_STATS_INC(ip6.natdrop);
1296     (void)pbuf_free(p);
1297     return NAT64_RET_ERR;
1298   }
1299   /* Get the header length. */
1300   if ((1 + *((u8_t *)p->payload + 1)) > (p->len / IP6_HDR_EXT_LEN_UNIT)) {
1301     LWIP_DEBUGF(NAT64_DEBUG, ("pbuf option (len %"U8_F") is too big.\n", *((u8_t *)p->payload + 1)));
1302     IP_STATS_INC(ip6.natdrop);
1303     (void)pbuf_free(p);
1304     return NAT64_RET_ERR;
1305   }
1306   optlen = (u16_t)(IP6_HDR_EXT_LEN_UNIT * (1 + *((u8_t *)p->payload + 1)));
1307   /* Skip over this header. */
1308   if (optlen > p->len) {
1309     LWIP_DEBUGF(NAT64_DEBUG,
1310                 ("IPv6 opt header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 pac dropped.\n",
1311                  optlen, p->len));
1312     IP_STATS_INC(ip6.natdrop);
1313     (void)pbuf_free(p);
1314     return NAT64_RET_ERR;
1315   }
1316 
1317   data->ip6_hdr_len += optlen;
1318   (void)pbuf_header(p, -((s16_t)(optlen)));
1319   return NAT64_RET_OK;
1320 }
1321 
1322 static int
nat64_ip6_option(struct pbuf ** q,struct ip6_hdr ** iphdr,nat64_ip6_data_t * data)1323 nat64_ip6_option(struct pbuf **q, struct ip6_hdr **iphdr, nat64_ip6_data_t *data)
1324 {
1325   int ret;
1326   u8_t flags;
1327   struct pbuf *p = *q;
1328   /*
1329    * convert the ipv6 header options into ipv4 options, the ip_pbuf_data_len should include the options.
1330    * now we just drop the options.
1331    */
1332   data->nexth = IP6H_NEXTH(*iphdr);
1333   data->ip4_hdr_len = IP_HLEN;
1334   data->ip6_hdr_len = IP6_HLEN;
1335   data->ip_pbuf_data_len = IP6H_PLEN(*iphdr);
1336 
1337   /* find the data in ip6 pkt */
1338   /* Move to payload. */
1339   (void)pbuf_header(p, -IP6_HLEN);
1340 
1341   flags = 0;
1342   /* Process option extension headers, if present. */
1343   while (data->nexth != IP6_NEXTH_NONE) {
1344     if ((data->nexth == IP6_NEXTH_TCP) || (data->nexth == IP6_NEXTH_UDP) ||
1345         (data->nexth == IP6_NEXTH_ICMP6) || (flags != 0)) {
1346       break;
1347     }
1348 
1349     /* maybe the pbuf assembled */
1350     if (p != *q) {
1351       p = *q;
1352     }
1353 
1354     switch (data->nexth) {
1355       case IP6_NEXTH_HOPBYHOP:
1356       case IP6_NEXTH_ENCAPS:
1357       case IP6_NEXTH_ROUTING:
1358       case IP6_NEXTH_DESTOPTS:
1359         ret = nat64_ip6_option_normal(p, data);
1360         if (ret != NAT64_RET_OK) {
1361           return ret;
1362         }
1363         break;
1364 
1365       case IP6_NEXTH_FRAGMENT:
1366         ret = nat64_ip6_option_frag(q, iphdr, data);
1367         if (ret != NAT64_RET_OK) {
1368           return ret;
1369         }
1370         break;
1371       default:
1372         flags = 1;
1373         break;
1374     }
1375   }
1376   return NAT64_RET_OK;
1377 }
1378 
1379 static int
nat64_ip6_send(struct pbuf * p,struct pbuf * new_buf,struct ip_hdr * ip4hdr,struct netif * outp,const ip4_addr_t * nhop)1380 nat64_ip6_send(struct pbuf *p, struct pbuf *new_buf,
1381                struct ip_hdr *ip4hdr, struct netif *outp, const ip4_addr_t *nhop)
1382 {
1383   u16_t chksum = 0;
1384   struct ip_hdr *ip4hdr_payload = NULL;
1385   const ip4_addr_t *target = NULL;
1386 
1387   (void)ip4_iphdr_chksum(ip4hdr, outp, &chksum);
1388 
1389   IF__NETIF_CHECKSUM_ENABLED(outp, NETIF_CHECKSUM_GEN_IP) {
1390     IPH_CHKSUM_SET(ip4hdr, chksum);
1391   }
1392 #if LWIP_CHECKSUM_CTRL_PER_NETIF
1393   else {
1394     IPH_CHKSUM_SET(ip4hdr, 0);
1395   }
1396 #endif
1397   ip4hdr_payload = new_buf->payload;
1398   *ip4hdr_payload = *ip4hdr;
1399 
1400 #if IP_FRAG
1401   if ((outp->mtu != 0) && (new_buf->tot_len > outp->mtu)) {
1402     (void)ip4_frag(new_buf, outp, (ip4_addr_t *)&ip4hdr->dest);
1403     (void)pbuf_free(p);
1404     (void)pbuf_free(new_buf);
1405     return NAT64_RET_OK;
1406   }
1407 #endif /* IP_FRAG */
1408 
1409   if (nhop == NULL) {
1410     target = (ip4_addr_t *)&ip4hdr->dest;
1411   } else {
1412     target = nhop;
1413   }
1414 
1415   LWIP_DEBUGF(NAT64_DEBUG, ("nat64_ip6_input ipv4 output\n"));
1416   outp->output(outp, new_buf, target);
1417   IP6_STATS_INC(ip6.natfw);
1418   IP6_STATS_INC(ip.xmit);
1419   (void)pbuf_free(p);
1420   (void)pbuf_free(new_buf);
1421   return NAT64_RET_OK;
1422 }
1423 
1424 static int
nat64_ip6_translate_udp(struct pbuf * p,const struct ip6_hdr * iphdr,struct pbuf * new_buf,const struct ip_hdr * ip4hdr)1425 nat64_ip6_translate_udp(struct pbuf *p, const struct ip6_hdr *iphdr,
1426                         struct pbuf *new_buf, const struct ip_hdr *ip4hdr)
1427 {
1428   struct udp_hdr *udphdr = (struct udp_hdr *)new_buf->payload;
1429 #if LWIP_DNS64
1430   if (udphdr->dest == lwip_ntohs(NAT64_DNS_PORT)) {
1431     (void)pbuf_header(p, -(s16_t)(sizeof(struct udp_hdr)));
1432     (void)pbuf_header(new_buf, -(s16_t)(sizeof(struct udp_hdr)));
1433     if (nat64_dns64_6to4(new_buf) != ERR_OK) {
1434       IP_STATS_INC(ip6.natdrop);
1435 #if LWIP_ICMP6
1436       LWIP_ERROR("send icmp\n", (IP6H_NEXTH(iphdr) == IP6_NEXTH_ICMP6),
1437                  icmp6_dest_unreach(p, ICMP6_DUR_PORT));
1438 #endif /* LWIP_ICMP6 */
1439       (void)pbuf_free(p);
1440       (void)pbuf_free(new_buf);
1441       return NAT64_RET_ERR;
1442     }
1443     (void)pbuf_header(p, (s16_t)(sizeof(struct udp_hdr)));
1444     (void)pbuf_header(new_buf, (s16_t)(sizeof(struct udp_hdr)));
1445   }
1446 #else
1447   (void)p;
1448   (void)iphdr;
1449 #endif
1450   udphdr->chksum = 0;
1451   udphdr->chksum = inet_chksum_pseudo(new_buf, IP_PROTO_UDP, new_buf->tot_len,
1452                                       (ip4_addr_t *)&ip4hdr->src, (ip4_addr_t *)&ip4hdr->dest);
1453   return NAT64_RET_OK;
1454 }
1455 
1456 static void
nat64_ip6_translate_tcp(struct pbuf * new_buf,const struct ip_hdr * ip4hdr)1457 nat64_ip6_translate_tcp(struct pbuf *new_buf, const struct ip_hdr *ip4hdr)
1458 {
1459   struct tcp_hdr *tcphdr = (struct tcp_hdr *)new_buf->payload;
1460   tcphdr->chksum = 0;
1461   tcphdr->chksum = inet_chksum_pseudo(new_buf, IP_PROTO_TCP, new_buf->tot_len,
1462                                       (ip4_addr_t *)&ip4hdr->src, (ip4_addr_t *)&ip4hdr->dest);
1463 }
1464 
1465 static int
nat64_ip6_translate_icmp(struct pbuf * p,struct netif * outp,struct pbuf * new_buf)1466 nat64_ip6_translate_icmp(struct pbuf *p, struct netif *outp, struct pbuf *new_buf)
1467 {
1468   struct icmp_echo_hdr *icmp4hdr = (struct icmp_echo_hdr *)new_buf->payload;
1469   struct icmpv6_hdr *icmp6hdr = (struct icmpv6_hdr *)new_buf->payload;
1470   /* just handle the echo icmp */
1471   if (icmp6hdr->type == ICMP6_TYPE_EREP) {
1472     ICMPH_TYPE_SET(icmp4hdr, ICMP_ER);
1473   } else if (icmp6hdr->type == ICMP6_TYPE_EREQ) {
1474     ICMPH_TYPE_SET(icmp4hdr, ICMP_ECHO);
1475   } else {
1476     LWIP_DEBUGF(NAT64_DEBUG, ("icmp6 not handle\n"));
1477     (void)pbuf_free(new_buf);
1478     IP_STATS_INC(ip6.natdrop);
1479     (void)pbuf_free(p);
1480     return NAT64_RET_ERR;
1481   }
1482 
1483   icmp4hdr->chksum = 0;
1484 #if CHECKSUM_GEN_ICMP
1485   IF__NETIF_CHECKSUM_ENABLED(outp, NETIF_CHECKSUM_GEN_ICMP) {
1486     icmp4hdr->chksum = inet_chksum(icmp4hdr, new_buf->len);
1487   }
1488 #endif
1489   LWIP_UNUSED_ARG(outp);
1490   return NAT64_RET_OK;
1491 }
1492 
1493 static int
nat64_ip6_translate_default(struct pbuf * p,const struct ip6_hdr * iphdr,struct pbuf * new_buf)1494 nat64_ip6_translate_default(struct pbuf *p, const struct ip6_hdr *iphdr, struct pbuf *new_buf)
1495 {
1496 #if LWIP_ICMP6
1497   /* send ICMP parameter problem unless it was a multicast or ICMPv6 */
1498   if ((!ip6_addr_ismulticast(ip6_current_dest_addr())) &&
1499       (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6)) {
1500     icmp6_dest_unreach(p, ICMP6_DUR_NO_ROUTE);
1501   }
1502 #endif /* LWIP_ICMP6 */
1503   LWIP_DEBUGF(NAT64_DEBUG, ("pkt not handle\n"));
1504   (void)pbuf_free(new_buf);
1505   IP_STATS_INC(ip6.natdrop);
1506   (void)pbuf_free(p);
1507   return NAT64_RET_ERR;
1508 }
1509 
1510 static void
nat64_ip6_set_ip4hdr(const struct ip6_hdr * iphdr,const nat64_ip6_data_t * data,struct ip_hdr * ip4hdr)1511 nat64_ip6_set_ip4hdr(const struct ip6_hdr *iphdr, const nat64_ip6_data_t *data,
1512                      struct ip_hdr *ip4hdr)
1513 {
1514   /* ipv4 version and len */
1515   IPH_VHL_SET(ip4hdr, 4, data->ip4_hdr_len >> 2);
1516 
1517 #if defined(LWIP_NAT64_PRIORITY_KEEP) && LWIP_NAT64_PRIORITY_KEEP
1518   IPH_TOS_SET(ip4hdr, IP6H_TC(iphdr));
1519 #else
1520   IPH_TOS_SET(ip4hdr, 0);
1521 #endif
1522 
1523   IPH_LEN_SET(ip4hdr, lwip_htons(data->ip4_hdr_len + data->ip_pbuf_data_len));
1524   IPH_ID_SET(ip4hdr, lwip_htons(ip4_get_ip_id()));
1525   IPH_OFFSET_SET(ip4hdr, 0);
1526   ip4_inc_ip_id();
1527 
1528   IPH_TTL_SET(ip4hdr, (IP6H_HOPLIM(iphdr) - 1));
1529 }
1530 
1531 static int
nat64_ip6_translate(struct pbuf * p,const struct ip6_hdr * iphdr,const nat64_ip6_data_t * data,struct ip_hdr * ip4hdr,struct netif * outp)1532 nat64_ip6_translate(struct pbuf *p, const struct ip6_hdr *iphdr, const nat64_ip6_data_t *data,
1533                     struct ip_hdr *ip4hdr, struct netif *outp)
1534 {
1535   int ret;
1536   struct pbuf *new_buf = NULL;
1537 
1538   /* MAY here PBUF_LINK should be used */
1539   new_buf = pbuf_alloc(PBUF_IP, data->ip_pbuf_data_len, PBUF_RAM);
1540   if (new_buf == NULL) {
1541     LWIP_DEBUGF(NAT64_DEBUG, ("pbuf alloc fail:no MEMORY\r\n"));
1542     (void)pbuf_header(p, (s16_t)(data->ip6_hdr_len));
1543     return NAT64_RET_FAIL;
1544   }
1545 
1546   nat64_ip6_set_ip4hdr(iphdr, data, ip4hdr);
1547 
1548   if (pbuf_copy(new_buf, p) != ERR_OK) {
1549     (void)pbuf_free(p);
1550     (void)pbuf_free(new_buf);
1551     return NAT64_RET_ERR;
1552   }
1553   LWIP_DEBUGF(NAT64_DEBUG, ("%s:%d\n", __FUNCTION__, __LINE__));
1554   switch (data->nexth) {
1555 #if LWIP_UDP
1556     case IP6_NEXTH_UDP:
1557       IPH_PROTO_SET(ip4hdr, IP_PROTO_UDP);
1558       ret = nat64_ip6_translate_udp(p, iphdr, new_buf, ip4hdr);
1559       if (ret != NAT64_RET_OK) {
1560         return NAT64_RET_ERR;
1561       }
1562       break;
1563 #endif /* LWIP_UDP */
1564 #if LWIP_TCP
1565     case IP6_NEXTH_TCP:
1566       IPH_PROTO_SET(ip4hdr, IP_PROTO_TCP);
1567       nat64_ip6_translate_tcp(new_buf, ip4hdr);
1568       break;
1569 #endif /* LWIP_TCP */
1570 #if LWIP_ICMP
1571     case IP6_NEXTH_ICMP6:
1572       IPH_PROTO_SET(ip4hdr, IP_PROTO_ICMP);
1573       ret = nat64_ip6_translate_icmp(p, outp, new_buf);
1574       if (ret != NAT64_RET_OK) {
1575         return NAT64_RET_ERR;
1576       }
1577       break;
1578 #endif /* LWIP_ICMP */
1579     default:
1580       return nat64_ip6_translate_default(p, iphdr, new_buf);
1581   }
1582 
1583   (void)pbuf_header(new_buf, (s16_t)data->ip4_hdr_len);
1584 
1585   return nat64_ip6_send(p, new_buf, ip4hdr, outp, ip4_addr_isany(&data->nhop) ? NULL : &data->nhop);
1586 }
1587 
1588 static int
nat64_ip6_frag(struct pbuf * p,const struct netif * outp,nat64_ip6_data_t * data)1589 nat64_ip6_frag(struct pbuf *p, const struct netif *outp, nat64_ip6_data_t *data)
1590 {
1591   data->ip_pbuf_data_len -= (data->ip6_hdr_len - IP6_HLEN);
1592 #if !IP_FRAG
1593   /*
1594    * we can calculate the total len, cmp with the mtu.
1595    * now we donot support the ip fragment, so just drop it.
1596    */
1597   if ((outp->mtu != 0) && ((data->ip4_hdr_len + data->ip_pbuf_data_len) > outp->mtu)) {
1598     IP_STATS_INC(ip6.natdrop);
1599     (void)pbuf_free(p);
1600     return NAT64_RET_ERR;
1601   }
1602 #else
1603   LWIP_UNUSED_ARG(p);
1604   LWIP_UNUSED_ARG(outp);
1605 #endif
1606   return NAT64_RET_OK;
1607 }
1608 
1609 int
nat64_ip6_input(struct pbuf * p,struct ip6_hdr * iphdr,const struct netif * inp)1610 nat64_ip6_input(struct pbuf *p, struct ip6_hdr *iphdr, const struct netif *inp)
1611 {
1612   int ret;
1613   struct netif *outp = NULL;
1614   nat64_ip6_data_t data;
1615 
1616   (void)memset_s(&data, sizeof(data), 0, sizeof(data));
1617 
1618   if (nat64_status_check() != NAT64_RET_OK) {
1619     return NAT64_PKT_NOT_HANDLED;
1620   }
1621 
1622   ret = nat64_ip6_packet_check(p, iphdr);
1623   if (ret == NAT64_RET_FAIL) {
1624     return NAT64_PKT_NOT_HANDLED;
1625   } else if (ret == NAT64_RET_ERR) {
1626     return NAT64_PKT_HANDLED;
1627   }
1628 
1629   ret = nat64_ip6_route(p, iphdr, inp, &data, &outp);
1630   if (ret == NAT64_RET_FAIL) {
1631     return NAT64_PKT_NOT_HANDLED;
1632   } else if (ret == NAT64_RET_ERR) {
1633     return NAT64_PKT_HANDLED;
1634   }
1635 
1636   ret = nat64_ip6_option(&p, &iphdr, &data);
1637   if (ret == NAT64_RET_ERR) {
1638     return NAT64_PKT_HANDLED;
1639   } else if (ret == NAT64_RET_ASSEMBLE) {
1640     return NAT64_PKT_HANDLED;
1641   }
1642 
1643   if ((data.nexth != IP6_NEXTH_TCP) && (data.nexth != IP6_NEXTH_UDP) &&
1644       (data.nexth != IP6_NEXTH_ICMP6)) {
1645     LWIP_DEBUGF(NAT64_DEBUG, ("nat64_ip6_input: nexth is not correct.\n"));
1646     (void)pbuf_free(p);
1647     return NAT64_PKT_HANDLED;
1648   }
1649 
1650   ret = nat64_ip6_frag(p, outp, &data);
1651   if (ret != NAT64_RET_OK) {
1652     return NAT64_PKT_HANDLED;
1653   }
1654 
1655   ret = nat64_ip6_translate(p, iphdr, &data, &data.ip4hdr, outp);
1656   if (ret == NAT64_RET_FAIL) {
1657     (void)pbuf_free(p);
1658     return NAT64_PKT_HANDLED;
1659   }
1660 
1661   return NAT64_PKT_HANDLED;
1662 }
1663 
1664 static void
nat64_dhcp_proxy_start(nat64_entry_t * entry)1665 nat64_dhcp_proxy_start(nat64_entry_t *entry)
1666 {
1667   err_t ret;
1668   /* we need start the nat64 dynamicly */
1669   if (g_nat64_table == NULL) {
1670     LWIP_DEBUGF(NAT64_DEBUG, ("nat64 stateful is not init\n"));
1671     return;
1672   }
1673   if ((entry->state == NAT64_STATE_DHCP_REQUEST) || (entry->nat64_sync == lwIP_TRUE)) {
1674     return;
1675   }
1676   ret = nat64_dhcp_request_ip(g_nat64_netif, &(entry->mac), &(entry->ip));
1677   if (ret != ERR_OK) {
1678     entry->state = NAT64_STATE_DHCP_FAIL;
1679     LWIP_DEBUGF(NAT64_DEBUG, ("start nat64 dhcp proxy fail, mac:\n"));
1680     return;
1681   }
1682 
1683   LWIP_DEBUGF(NAT64_DEBUG, ("start nat64 dhcp proxy.\n"));
1684   entry->state = NAT64_STATE_DHCP_REQUEST;
1685   return;
1686 }
1687 
1688 #define NAT64_DELAY_RELEASE_PERIOD 10 /* the max time for mg switch mbr to do the join succ */
1689 
1690 static void
nat64_dhcp_proxy_stop(nat64_entry_t * entry)1691 nat64_dhcp_proxy_stop(nat64_entry_t *entry)
1692 {
1693   /* we need start the nat64 dynamicly */
1694   if ((g_nat64_table == NULL) || (entry == NULL)) {
1695     LWIP_DEBUGF(NAT64_DEBUG, ("nat64 stateful is not init\n"));
1696     return;
1697   }
1698   switch (entry->state) {
1699     case NAT64_STATE_ESTABLISH:
1700 #if LWIP_MMBR
1701       entry->state = NAT64_STATE_DELAY_RELEASE;
1702       entry->lifetime = NAT64_DELAY_RELEASE_PERIOD;
1703       break;
1704 #endif
1705     case NAT64_STATE_CREATED:
1706     case NAT64_STATE_DHCP_REQUEST:
1707     case NAT64_STATE_DHCP_FAIL:
1708       entry->state = NAT64_STATE_DIRECT_RELEASE;
1709       entry->lifetime = 0;
1710       break;
1711     /*
1712      * this behind state should not handle
1713      * tmr handle : NAT64_STATE_DELAY_RELEASE / NAT64_STATE_DIRECT_RELEASE
1714      * dao & linkdao handle : NAT64_STATE_TIMEOUT_FOR_MBR
1715      */
1716     default:
1717       break;
1718   }
1719   return;
1720 }
1721 
1722 static void
handle_dhcp_and_nat64_timeout(nat64_entry_t * entry)1723 handle_dhcp_and_nat64_timeout(nat64_entry_t *entry)
1724 {
1725   err_t ret;
1726   /* we need start the nat64 dynamicly */
1727   if (g_nat64_table == NULL) {
1728     LWIP_DEBUGF(NAT64_DEBUG, ("nat64 stateful is not init\n"));
1729     return;
1730   }
1731 
1732   switch (entry->state) {
1733     case NAT64_STATE_ESTABLISH:
1734 #if LWIP_MMBR
1735       entry->state = NAT64_STATE_DELAY_RELEASE;
1736       entry->lifetime = NAT64_DELAY_RELEASE_PERIOD;
1737       break;
1738 #endif
1739     case NAT64_STATE_CREATED:
1740     case NAT64_STATE_DHCP_REQUEST:
1741     case NAT64_STATE_DIRECT_RELEASE:
1742       if (entry->nat64_sync == lwIP_FALSE) {
1743         ret = nat64_dhcp_stop(g_nat64_netif, &(entry->mac), lwIP_FALSE);
1744         if (ret == ERR_OK) {
1745           LWIP_DEBUGF(NAT64_DEBUG, ("nat64 stop ok\n"));
1746         }
1747       }
1748       (void)nat64_entry_remove(entry, lwIP_TRUE);
1749       break;
1750 #if LWIP_MMBR
1751     case NAT64_STATE_DELAY_RELEASE:
1752       LWIP_DEBUGF(NAT64_DEBUG, ("stop nat64 dhcp proxy.\n"));
1753       ret = nat64_dhcp_stop(g_nat64_netif, &(entry->mac), lwIP_FALSE);
1754       if (ret != ERR_OK) {
1755         LWIP_DEBUGF(NAT64_DEBUG, ("stop nat64 dhcp proxy fail, mac:\n"));
1756         return;
1757       }
1758       entry->lifetime = NAT64_WAIT_DHCP_RELEASE_PERIOD;
1759       entry->state = NAT64_STATE_DIRECT_RELEASE;
1760       break;
1761 #endif
1762     default:
1763       LWIP_DEBUGF(NAT64_DEBUG, ("unexpected state = %d.\n", (int)(entry->state)));
1764       break;
1765   }
1766 }
1767 
1768 int
nat64_delete_entry_by_mnid(u8_t mnid)1769 nat64_delete_entry_by_mnid(u8_t mnid)
1770 {
1771   s16_t i;
1772   nat64_entry_t *entry = NULL;
1773 
1774   if (g_nat64_table == NULL) {
1775     LWIP_DEBUGF(NAT64_DEBUG, ("nat64 is not init\n"));
1776     return ERR_OK;
1777   }
1778 
1779   if (mnid == 0) {
1780     return ERR_ARG;
1781   }
1782 
1783   for (i = 0; i < LWIP_NAT64_ENTRY_SIZE; i++) {
1784     if (g_nat64_table[i].mnid != mnid) {
1785       continue;
1786     }
1787 
1788     entry = &g_nat64_table[i];
1789     if (entry->nat64_sync == lwIP_FALSE) {
1790       nat64_dhcp_proxy_stop(entry);
1791     }
1792   }
1793 
1794   return ERR_OK;
1795 }
1796 
1797 int
nat64_delete_ipv4_addr(linklayer_addr_t * lladdr,u8_t mnid)1798 nat64_delete_ipv4_addr(linklayer_addr_t *lladdr, u8_t mnid)
1799 {
1800   s16_t i;
1801   nat64_entry_t *entry = NULL;
1802 
1803   if (g_nat64_table == NULL) {
1804     LWIP_DEBUGF(NAT64_DEBUG, ("nat64 is not init\n"));
1805     return ERR_OK;
1806   }
1807 
1808   if (lladdr == NULL) {
1809     return ERR_ARG;
1810   }
1811 
1812   for (i = 0; i < LWIP_NAT64_ENTRY_SIZE; i++) {
1813     if (lwip_lladdr_cmp(&g_nat64_table[i].mac, lladdr, sizeof(linklayer_addr_t)) &&
1814         ((mnid == 0) || (g_nat64_table[i].mnid == 0) || (g_nat64_table[i].mnid == mnid))) {
1815       break;
1816     }
1817   }
1818 
1819   if (i == LWIP_NAT64_ENTRY_SIZE) {
1820     return ERR_OK;
1821   }
1822 
1823   entry = &g_nat64_table[i];
1824   nat64_dhcp_proxy_stop(entry);
1825   return ERR_OK;
1826 }
1827 
1828 static int
nat64_get_ipv4_addr(s16_t index,uint32_t * ipv4)1829 nat64_get_ipv4_addr(s16_t index, uint32_t *ipv4)
1830 {
1831 #if LWIP_NAT64_MIN_SUBSTITUTE
1832   ip4_addr_t ip;
1833   (void)memset_s(&ip, sizeof(ip4_addr_t), 0, sizeof(ip4_addr_t));
1834 #endif
1835   if (g_nat64_table == NULL) {
1836     return NAT64_RET_ERR;
1837   }
1838   if ((g_nat64_table[index].state == NAT64_STATE_ESTABLISH) &&
1839 #if !LWIP_NAT64_MIN_SUBSTITUTE
1840       (!ip4_addr_isany_val(g_nat64_table[index].ip))
1841 #elif LWIP_DHCP_SUBSTITUTE
1842       (dhcp_substitute_idx_to_ip(g_nat64_netif, index + 1, &ip) == ERR_OK) &&
1843       (!ip4_addr_isany_val(ip))
1844 #else
1845       (lwIP_FALSE)
1846 #endif
1847      ) {
1848 #if !LWIP_NAT64_MIN_SUBSTITUTE
1849     *ipv4 = g_nat64_table[index].ip.addr;
1850 #elif LWIP_DHCP_SUBSTITUTE
1851     *ipv4 = ip.addr;
1852 #else
1853     *ipv4 = 0;
1854 #endif
1855     LWIP_DEBUGF(NAT64_DEBUG, ("nat64: the ipv4 is ready. ip: %u\n", *ipv4));
1856     return NAT64_RET_OK;
1857   }
1858   LWIP_DEBUGF(NAT64_DEBUG, ("nat64: the ipv4 is not ready. ip is null\n"));
1859   return NAT64_RET_ERR;
1860 }
1861 
1862 #if LWIP_NA_PROXY_UNSOLICITED
1863 static void
nat64_send_unsolicited_na(nat64_entry_t * entry)1864 nat64_send_unsolicited_na(nat64_entry_t *entry)
1865 {
1866   ip6_addr_t addr;
1867   int ret;
1868   u16_t msecs;
1869 
1870   msecs = ND6_RETRANS_TIMER;
1871   entry->na_to = (u8_t)((msecs + NAT64_TMR_INTERVAL - 1) / NAT64_TMR_INTERVAL);
1872   if (g_nat64_netif == NULL) {
1873     LWIP_DEBUGF(NAT64_DEBUG, ("uns na nul if\n"));
1874     return;
1875   }
1876 
1877   ret = nat64_entry_to6((const nat64_entry_t *)entry, &addr);
1878   if (ret != 0) {
1879     LWIP_DEBUGF(NAT64_DEBUG, ("uns na addr fail\n"));
1880     return;
1881   }
1882 
1883   nd6_send_na(g_nat64_netif, (const ip6_addr_t *)&addr, ND6_FLAG_OVERRIDE | ND6_SEND_FLAG_ALLNODES_DEST);
1884   if (entry->na_tries < ND6_MAX_NEIGHBOR_ADVERTISEMENT) {
1885     entry->na_tries++;
1886   }
1887 }
1888 
1889 void
nat64_proxy_unsolicited_na(const struct netif * netif)1890 nat64_proxy_unsolicited_na(const struct netif *netif)
1891 {
1892   int i;
1893   nat64_entry_t *entry = NULL;
1894 
1895   if (g_nat64_table == NULL) {
1896     LWIP_DEBUGF(NAT64_DEBUG, ("proxy na: nat64 statful is not init\n"));
1897     return;
1898   }
1899   if (g_nat64_netif != netif) {
1900     LWIP_DEBUGF(NAT64_DEBUG, ("proxy na: no nat64 netif\n"));
1901     return;
1902   }
1903   if (!(netif_is_up(netif) && netif_is_link_up(netif))) {
1904     return;
1905   }
1906 
1907   for (i = 0; i < LWIP_NAT64_ENTRY_SIZE; i++) {
1908     entry = &g_nat64_table[i];
1909     if (entry->state == NAT64_STATE_INIT) {
1910       continue;
1911     }
1912     entry->na_tries = 0;
1913     nat64_send_unsolicited_na(entry);
1914   }
1915 
1916   return;
1917 }
1918 #endif /* LWIP_NA_PROXY_UNSOLICITED */
1919 
1920 #if LWIP_MMBR && LWIP_NAT64_CHANGE_MSG
1921 void
nat64_send_change_entry_msg(nat64_entry_t * entry,u8_t type)1922 nat64_send_change_entry_msg(nat64_entry_t *entry, u8_t type)
1923 {
1924   nat64_entry_t *entry_transmit = NULL;
1925   if (entry->nat64_sync == lwIP_TRUE) {
1926     return;
1927   }
1928   entry_transmit = (nat64_entry_t *)mem_malloc(sizeof(nat64_entry_t));
1929   if (entry_transmit == NULL) {
1930     LWIP_DEBUGF(NAT64_DEBUG, ("mem_malloc fail!\n"));
1931     return;
1932   }
1933 
1934   (void)memcpy_s(entry_transmit, sizeof(nat64_entry_t), entry, sizeof(nat64_entry_t));
1935   entry_transmit->mnid = rpl_get_mbr_mnid();
1936   entry_transmit->nat64_sync = lwIP_TRUE;
1937 
1938   rpl_event_indicate(type, 0, entry_transmit);
1939 }
1940 #endif /* LWIP_MMBR && LWIP_NAT64_CHANGE_MSG */
1941 
1942 #if RPL_CONF_SWITCH_MBR_BY_AUTOLINK
1943 static err_t
lwip_mmbr_autolink_event_do(struct tcpip_api_call_data * m)1944 lwip_mmbr_autolink_event_do(struct tcpip_api_call_data *m)
1945 {
1946   nat64_entry_t *entry = NULL;
1947   lwip_autolink_event_t *reply_ack_msg = NULL;
1948   err_t ret;
1949   struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m;
1950   lwip_autolink_event_t *autolink_event = (lwip_autolink_event_t *)(msg->msg.arg_cb.arg);
1951   entry = nat64_entry_lookup_by_mac(&(autolink_event->mac));
1952   if (entry == NULL) {
1953     return ERR_ARG;
1954   }
1955   switch (autolink_event->type) {
1956     case MSG_NODE_CHANGE_MBR:
1957       reply_ack_msg = (lwip_autolink_event_t *)mem_malloc(sizeof(lwip_autolink_event_t));
1958       if (reply_ack_msg == NULL) {
1959         LWIP_DEBUGF(NAT64_DEBUG, ("mem_malloc fail!\n"));
1960         return ERR_MEM;
1961       }
1962       (void)memset_s(reply_ack_msg, 0, sizeof(lwip_autolink_event_t), 0);
1963       reply_ack_msg->type = MSG_NODE_CHANGE_MBR_ACK;
1964       reply_ack_msg->mac = autolink_event->mac;
1965       /* here we should send this ack info to the peer MBR */
1966       mesh_lwip_send_msg(autolink_event->local_ip, (u8_t *)reply_ack_msg, sizeof(lwip_autolink_event_t));
1967       mem_free(reply_ack_msg);
1968       /* when receive the MSG_NODE_CHANGE_MBR, we should stop dhcp and release nat64 anyway */
1969       if ((entry->nat64_sync == lwIP_FALSE) && ((entry->state == NAT64_STATE_DHCP_REQUEST) ||
1970           (entry->state == NAT64_STATE_ESTABLISH))) {
1971         ret = nat64_dhcp_stop(nat64_netif_get(), &(entry->mac), lwIP_FALSE);
1972         if (ret != ERR_OK) {
1973           LWIP_DEBUGF(NAT64_DEBUG, ("stop nat64 dhcp proxy fail, mac:\n"));
1974           return ERR_ARG;
1975         }
1976         entry->nat64_sync = lwIP_TRUE;
1977       }
1978       break;
1979     case MSG_NODE_CHANGE_MBR_ACK:
1980       entry->timeouts = 0;
1981       if ((entry->nat64_sync == lwIP_FALSE) && (entry->state == NAT64_STATE_TIMEOUT_FOR_MBR)) {
1982         nat64_dhcp_proxy_start(entry);
1983       }
1984       break;
1985     default:
1986       break;
1987   }
1988   return ERR_OK;
1989 }
1990 
1991 static void
lwip_mmbr_autolink_event(const u8_t * data,u32_t data_lenth)1992 lwip_mmbr_autolink_event(const u8_t *data, u32_t data_lenth)
1993 {
1994   if ((data == NULL) || (data_lenth < sizeof(lwip_autolink_event_t))) {
1995     LWIP_DEBUGF(NAT64_DEBUG, ("invalid param!\n"));
1996   }
1997   lwip_autolink_event_t *autolink_event = (lwip_autolink_event_t *)data;
1998   LWIP_API_VAR_DECLARE(msg);
1999   LWIP_API_VAR_ALLOC(msg);
2000   LWIP_API_VAR_REF(msg).netif = NULL;
2001   LWIP_API_VAR_REF(msg).msg.arg_cb.argfunc = NULL;
2002   LWIP_API_VAR_REF(msg).msg.arg_cb.arg = (void *)autolink_event;
2003   (void)tcpip_linklayer_event_call(lwip_mmbr_autolink_event_do, &API_VAR_REF(msg).call);
2004   LWIP_API_VAR_FREE(msg);
2005 }
2006 
2007 static void
lwip_send_autolink_event_othermbr(nat64_entry_t * entry,u32_t peer_mbr_ipv4)2008 lwip_send_autolink_event_othermbr(nat64_entry_t *entry, u32_t peer_mbr_ipv4)
2009 {
2010   struct netif *netif = nat64_netif_get();
2011   if (netif == NULL) {
2012     return;
2013   }
2014   lwip_autolink_event_t *autolink_event = (lwip_autolink_event_t *)mem_malloc(sizeof(lwip_autolink_event_t));
2015   if (autolink_event == NULL) {
2016     LWIP_DEBUGF(NAT64_DEBUG, ("mem_malloc fail!\n"));
2017     return;
2018   }
2019   (void)memcpy_s(&(autolink_event->mac), sizeof(linklayer_addr_t), &(entry->mac), sizeof(linklayer_addr_t));
2020   (void)netif_get_addr(netif, (ip4_addr_t *)&autolink_event->local_ip, NULL, NULL);
2021   autolink_event->type = MSG_NODE_CHANGE_MBR;
2022   mesh_lwip_send_msg(peer_mbr_ipv4, (u8_t *)(autolink_event), sizeof(lwip_autolink_event_t));
2023   entry->state = NAT64_STATE_TIMEOUT_FOR_MBR;
2024   mem_free(autolink_event);
2025 }
2026 
2027 uint8_t
lwip_mmbr_autolink_event_init(void)2028 lwip_mmbr_autolink_event_init(void)
2029 {
2030   mesh_set_lwip_msg_call_back(lwip_mmbr_autolink_event);
2031   return RPL_OK;
2032 }
2033 
2034 void
lwip_mmbr_autolink_event_deinit(void)2035 lwip_mmbr_autolink_event_deinit(void)
2036 {
2037   mesh_set_lwip_msg_call_back(NULL);
2038   return;
2039 }
2040 #endif
2041 
2042 static void
update_entry_from_dao(nat64_entry_t * nate,const nat64_ipv4_rqst_t * ipv4_rqst)2043 update_entry_from_dao(nat64_entry_t *nate, const nat64_ipv4_rqst_t *ipv4_rqst)
2044 {
2045   /* should renew the daoSn when the ip is not ready */
2046   nate->dao_sn = ipv4_rqst->dao_sn;
2047   nate->mnid = ipv4_rqst->mnid;
2048   nate->orig_mnid = ipv4_rqst->mnid;
2049   nate->lifetime = ipv4_rqst->lifetime;
2050   nate->conn_time = ipv4_rqst->conn_time;
2051   nate->nat64_sync = lwIP_FALSE;
2052 }
2053 
2054 int
nat64_request_ipv4_addr(const nat64_ipv4_rqst_t * ipv4_rqst)2055 nat64_request_ipv4_addr(const nat64_ipv4_rqst_t *ipv4_rqst)
2056 {
2057   s16_t i;
2058   u32_t pref_ipv4 = 0;
2059 #if RPL_CONF_SWITCH_MBR_BY_AUTOLINK
2060   u32_t peer_mbr_ipv4 = 0;
2061 #endif
2062   nat64_entry_t *entry = NULL;
2063 #if LWIP_NAT64_MIN_SUBSTITUTE
2064   ip4_addr_t ip;
2065 #endif
2066 
2067   if ((ipv4_rqst == NULL) || (ipv4_rqst->lladdr == NULL) || (ipv4_rqst->ipv4 == NULL) || (g_nat64_table == NULL)) {
2068     return NAT64_RET_FAIL;
2069   }
2070 
2071   for (i = 0; i < LWIP_NAT64_ENTRY_SIZE; i++) {
2072     if (lwip_lladdr_cmp(&g_nat64_table[i].mac, ipv4_rqst->lladdr, sizeof(linklayer_addr_t))) {
2073       break;
2074     }
2075   }
2076 
2077   /* find exist nat64 */
2078   if (i != LWIP_NAT64_ENTRY_SIZE) {
2079     switch (g_nat64_table[i].state) {
2080       case NAT64_STATE_ESTABLISH:
2081         if (g_nat64_table[i].nat64_sync == lwIP_FALSE) {
2082           if ((ipv4_rqst->mnid == g_nat64_table[i].mnid) ||
2083               (g_nat64_table[i].lifetime == NAT64_TMR_INFINITY)) {
2084 #if !LWIP_NAT64_MIN_SUBSTITUTE
2085             /* the lifetime is NAT64_TMR_INFINITY can't send update event when sta behind mbr */
2086             if (!ip4_addr_isany_val(g_nat64_table[i].ip) && (g_nat64_table[i].lifetime != NAT64_TMR_INFINITY)) {
2087 #endif
2088               update_entry_from_dao(&g_nat64_table[i], ipv4_rqst);
2089 #if LWIP_MMBR && LWIP_NAT64_CHANGE_MSG
2090               nat64_send_change_entry_msg(&g_nat64_table[i], RPL_EVT_NODE_NAT64_UPDATE);
2091 #endif /* LWIP_MMBR && LWIP_NAT64_CHANGE_MSG */
2092               return nat64_get_ipv4_addr(i, ipv4_rqst->ipv4);
2093             }
2094           }
2095           /* handle sta exchange diffrent mg in one mbr */
2096           if ((ipv4_rqst->mnid != g_nat64_table[i].mnid) && (g_nat64_table[i].mnid != 0) && (ipv4_rqst->mnid != 0)) {
2097 #if !LWIP_NAT64_MIN_SUBSTITUTE
2098             if (!ip4_addr_isany_val(g_nat64_table[i].ip)) {
2099               /* remove old dao proxy in one mbr */
2100               if (ipv4_rqst->conn_time > g_nat64_table[i].conn_time) {
2101                 return NAT64_RET_ERR;
2102               }
2103               update_entry_from_dao(&g_nat64_table[i], ipv4_rqst);
2104 #if LWIP_MMBR && LWIP_NAT64_CHANGE_MSG
2105               nat64_send_change_entry_msg(&g_nat64_table[i], RPL_EVT_NODE_NAT64_ADD);
2106 #endif /* LWIP_MMBR && LWIP_NAT64_CHANGE_MSG */
2107               return nat64_get_ipv4_addr(i, ipv4_rqst->ipv4);
2108             }
2109 #endif
2110           }
2111         } else if (g_nat64_table[i].nat64_sync == lwIP_TRUE) {
2112           if (ipv4_rqst->conn_time > g_nat64_table[i].conn_time) {
2113             return NAT64_RET_ERR;
2114           }
2115 #if !LWIP_NAT64_MIN_SUBSTITUTE
2116           if (!ip4_addr_isany_val(g_nat64_table[i].ip)) {
2117             pref_ipv4 = g_nat64_table[i].ip.addr;
2118           }
2119 #endif
2120 
2121 #if RPL_CONF_SWITCH_MBR_BY_AUTOLINK
2122             (void)mesh_get_mbr_ip(g_nat64_table[i].mnid, &peer_mbr_ipv4);
2123 #endif
2124           /*
2125            * in sync state, dhcp is not exist, if we go to here, this could Frequent switching,
2126            * we just stop dhcp and remove sync nat64, then create nat64 entry and start dhcp.
2127            */
2128           (void)nat64_dhcp_stop(nat64_netif_get(), &(g_nat64_table[i].mac), lwIP_FALSE);
2129           update_entry_from_dao(&g_nat64_table[i], ipv4_rqst);
2130           if (pref_ipv4 != 0) {
2131 #if RPL_CONF_SWITCH_MBR_BY_AUTOLINK
2132             lwip_send_autolink_event_othermbr(&g_nat64_table[i], peer_mbr_ipv4);
2133 #endif
2134           }
2135           return NAT64_RET_OK;
2136         }
2137         break;
2138 #if LWIP_MMBR
2139       case NAT64_STATE_DELAY_RELEASE:
2140         if (g_nat64_table[i].nat64_sync == lwIP_FALSE) {
2141           update_entry_from_dao(&g_nat64_table[i], ipv4_rqst);
2142           g_nat64_table[i].state = NAT64_STATE_ESTABLISH;
2143 #if LWIP_NAT64_CHANGE_MSG
2144           if (!ip4_addr_isany_val(g_nat64_table[i].ip)) {
2145             nat64_send_change_entry_msg(&g_nat64_table[i], RPL_EVT_NODE_NAT64_ADD);
2146           }
2147 #endif
2148           return nat64_get_ipv4_addr(i, ipv4_rqst->ipv4);
2149         } else {
2150           return NAT64_RET_ERR;
2151         }
2152 #endif
2153       case NAT64_STATE_CREATED:
2154       case NAT64_STATE_DHCP_FAIL:
2155         if ((g_nat64_table[i].state == NAT64_STATE_CREATED) && (g_nat64_table[i].nat64_sync == lwIP_TRUE)) {
2156           /*
2157            * Normally we expect a synced nat64 entry not to have an active dhcp proxy.
2158            * Reaching here implies MG switching form an MBR to one another frequently,
2159            * so we restart the dhcp proxy to avoid inconsistency between dhcp and nat64 tables.
2160            */
2161           (void)nat64_dhcp_stop(nat64_netif_get(), &(g_nat64_table[i].mac), lwIP_FALSE);
2162         }
2163         if (g_nat64_table[i].nat64_sync == lwIP_FALSE || (g_nat64_table[i].state == NAT64_STATE_CREATED)) {
2164           update_entry_from_dao(&g_nat64_table[i], ipv4_rqst);
2165           nat64_dhcp_proxy_start(&g_nat64_table[i]);
2166         }
2167         return NAT64_RET_ERR;
2168       case NAT64_STATE_DHCP_REQUEST:
2169         return NAT64_RET_ERR;
2170       case NAT64_STATE_DIRECT_RELEASE:
2171         /*
2172          * when sta parent from mg switch to mbr, when connect to mbr,
2173          * this sta current nat64 state is NAT64_STATE_DIRECT_RELEASE,
2174          * we will delete its nat64 entry for timeout.
2175          * in this case, we should update dao and start dhcp.
2176          */
2177         if ((ipv4_rqst->lifetime == MAX_UINT32) && (g_nat64_table[i].nat64_sync == lwIP_FALSE)) {
2178           update_entry_from_dao(&g_nat64_table[i], ipv4_rqst);
2179           nat64_dhcp_proxy_start(&g_nat64_table[i]);
2180           return NAT64_RET_OK;
2181         }
2182         return NAT64_RET_ERR;
2183       default:
2184         return NAT64_RET_ERR;
2185     }
2186   }
2187 
2188   /* not find exist nat64 entry or mg change one mbr to other mbr */
2189   if (i == LWIP_NAT64_ENTRY_SIZE) {
2190     entry = nat64_entry_new(ipv4_rqst->lladdr, ipv4_rqst->dao_sn, ipv4_rqst->mnid, ipv4_rqst->lifetime, lwIP_FALSE,
2191                             ipv4_rqst->conn_time);
2192     if (entry != NULL) {
2193 #if LWIP_NA_PROXY_UNSOLICITED
2194       nat64_send_unsolicited_na(entry);
2195 #endif /* LWIP_NA_PROXY_UNSOLICITED */
2196       /* triger the dhcp to get ip */
2197       LWIP_DEBUGF(NAT64_DEBUG, ("nat64_dhcp_proxy_start\n"));
2198       nat64_dhcp_proxy_start(entry); /* NAT64_STATE_INIT change to NAT64_STATE_DHCP_REQUEST */
2199       return NAT64_RET_ERR;
2200     }
2201   }
2202   return NAT64_RET_FAIL;
2203 }
2204 
2205 #if LWIP_LOWPOWER
2206 u32_t
nat64_tmr_tick(void)2207 nat64_tmr_tick(void)
2208 {
2209   s16_t i;
2210   u32_t tick = 0;
2211   nat64_entry_t *entry = NULL;
2212 
2213   if (g_nat64_table == NULL) {
2214     LOWPOWER_DEBUG(("%s tmr tick: 0\n", __func__));
2215     return 0;
2216   }
2217 
2218   for (i = 0; i < LWIP_NAT64_ENTRY_SIZE; i++) {
2219     entry = &g_nat64_table[i];
2220     if (entry->state == NAT64_STATE_INIT) {
2221       continue;
2222     }
2223 #if LWIP_NA_PROXY_UNSOLICITED
2224     SET_TMR_TICK(tick, entry->na_to);
2225 #endif /* LWIP_NA_PROXY_UNSOLICITED */
2226 
2227     if (entry->lifetime == NAT64_TMR_INFINITY) {
2228       continue;
2229     }
2230     SET_TMR_TICK(tick, entry->lifetime);
2231   }
2232 
2233   LOWPOWER_DEBUG(("%s tmr tick: %u\n", __func__, tick));
2234   return tick;
2235 }
2236 #endif
2237 
2238 #if LWIP_NA_PROXY_UNSOLICITED
2239 static void
nat64_na_tmr(nat64_entry_t * entry)2240 nat64_na_tmr(nat64_entry_t *entry)
2241 {
2242   if (entry->na_to > 1) {
2243     entry->na_to--;
2244   } else if (entry->na_to == 1) {
2245     entry->na_to--;
2246     if (entry->na_tries < ND6_MAX_NEIGHBOR_ADVERTISEMENT) {
2247       nat64_send_unsolicited_na(entry);
2248     } else {
2249       entry->na_tries = 0;
2250     }
2251   } else {
2252     /* do nothing if na_to is 0 */
2253   }
2254 }
2255 #endif /* LWIP_NA_PROXY_UNSOLICITED */
2256 
2257 void
nat64_tmr(void)2258 nat64_tmr(void)
2259 {
2260   int i;
2261   nat64_entry_t *entry = NULL;
2262   if (g_nat64_table == NULL) {
2263     return;
2264   }
2265   for (i = 0; i < LWIP_NAT64_ENTRY_SIZE; i++) {
2266     entry = &g_nat64_table[i];
2267     if (entry->state == NAT64_STATE_INIT) {
2268       continue;
2269     }
2270     /* the mbr should increase every sta/mg conn_time  */
2271     entry->conn_time++; /* the max time is 136 years */
2272 #if RPL_CONF_SWITCH_MBR_BY_AUTOLINK
2273     if (entry->state == NAT64_STATE_TIMEOUT_FOR_MBR) {
2274       entry->timeouts++;
2275       if (entry->timeouts == NAT64_MBR_EXIT_WAIT_ACK_TIME) {
2276         nat64_dhcp_proxy_start(entry);
2277         entry->timeouts = 0;
2278       }
2279     }
2280 #endif
2281 #if LWIP_NA_PROXY_UNSOLICITED
2282     nat64_na_tmr(entry);
2283 #endif /* LWIP_NA_PROXY_UNSOLICITED */
2284 
2285     if (entry->lifetime == NAT64_TMR_INFINITY) {
2286       /* when sta connect MBR, if start dhcp fail, we should start dhcp */
2287       if (entry->state == NAT64_STATE_DHCP_FAIL) {
2288         nat64_dhcp_proxy_start(entry);
2289       }
2290       continue;
2291     }
2292 
2293     if (entry->lifetime <= (NAT64_TMR_INTERVAL / MS_PER_SECOND)) {
2294       handle_dhcp_and_nat64_timeout(entry);
2295     } else {
2296       entry->lifetime -= (NAT64_TMR_INTERVAL / MS_PER_SECOND);
2297     }
2298   }
2299 }
2300 
2301 struct netif *
nat64_netif_get(void)2302 nat64_netif_get(void)
2303 {
2304   return g_nat64_netif;
2305 }
2306 
2307 err_t
nat64_init(struct netif * ntf)2308 nat64_init(struct netif *ntf)
2309 {
2310   if (g_nat64_table != NULL) {
2311     LWIP_DEBUGF(NAT64_DEBUG, ("nat64 already inited.\n"));
2312     return ERR_OK;
2313   }
2314   if (ntf != NULL) {
2315     g_nat64_netif = ntf;
2316     nat64_set_statful_enable();
2317     LWIP_DEBUGF(NAT64_DEBUG, ("nat64 stateful..\n"));
2318   } else {
2319     LWIP_DEBUGF(NAT64_DEBUG, ("nat64 stateless..\n"));
2320   }
2321 #if RPL_CONF_SWITCH_MBR_BY_AUTOLINK
2322   if (lwip_mmbr_autolink_event_init() != ERR_OK) {
2323     LWIP_DEBUGF(NAT64_DEBUG, ("register mmbr autolink event fail!\n"));
2324   }
2325 #endif
2326   return ERR_OK;
2327 }
2328 
2329 err_t
nat64_deinit(void)2330 nat64_deinit(void)
2331 {
2332   if (g_nat64_netif != NULL) {
2333     nat64_set_statful_disable();
2334     g_nat64_netif = NULL;
2335   }
2336 #if RPL_CONF_SWITCH_MBR_BY_AUTOLINK
2337   lwip_mmbr_autolink_event_deinit();
2338 #endif
2339   return ERR_OK;
2340 }
2341 
2342 err_t
nat64_deinit_netif(const struct netif * ntf)2343 nat64_deinit_netif(const struct netif *ntf)
2344 {
2345   if ((ntf == NULL) || (g_nat64_table == NULL) || (ntf != g_nat64_netif)) {
2346     return ERR_ARG;
2347   }
2348 
2349   return nat64_deinit();
2350 }
2351 
2352 static err_t
nat64_init_with_name(struct netif * nif,void * data)2353 nat64_init_with_name(struct netif *nif, void *data)
2354 {
2355   nif = netif_find((const char *)data);
2356   if (nif == NULL) {
2357     return ERR_ARG;
2358   }
2359 
2360   return nat64_init(nif);
2361 }
2362 
2363 err_t
nat64_entry_remove_by_mnid(struct netif * nif,void * arg)2364 nat64_entry_remove_by_mnid(struct netif *nif, void *arg)
2365 {
2366   int i;
2367   uint8_t *mnid = (uint8_t *)arg;
2368   if ((*mnid == 0) || (g_nat64_table == NULL)) {
2369     return ERR_VAL;
2370   }
2371   for (i = 0; i < LWIP_NAT64_ENTRY_SIZE; i++) {
2372     if ((g_nat64_table[i].mnid == *mnid) && (g_nat64_table[i].nat64_sync == lwIP_TRUE)) {
2373       /* delete all this mbr nat64 sync when mbr leave */
2374       (void)nat64_entry_remove(&g_nat64_table[i], lwIP_FALSE);
2375     }
2376   }
2377   LWIP_UNUSED_ARG(nif);
2378   return ERR_OK;
2379 }
2380 
2381 int
lwip_nat64_init(const char * name,uint8_t len)2382 lwip_nat64_init(const char *name, uint8_t len)
2383 {
2384   err_t ret;
2385 
2386   if ((name == NULL) || (len >= NETIF_NAMESIZE) || (len < NAT64_NETIF_NAME_LEN_MIN) ||
2387       (strncmp(name, NAT64_NETIF_NAME, NAT64_NETIF_NAME_LEN_MIN) != 0)) {
2388     return -1;
2389   }
2390 
2391   ret = netifapi_call_argcb(nat64_init_with_name, (void *)name);
2392   if (ret != ERR_OK) {
2393     return -1;
2394   }
2395 
2396   return 0;
2397 }
2398 
2399 static err_t
nat64_exit(struct netif * nif)2400 nat64_exit(struct netif *nif)
2401 {
2402   LWIP_UNUSED_ARG(nif);
2403   return nat64_deinit();
2404 }
2405 
2406 int
lwip_nat64_deinit(void)2407 lwip_nat64_deinit(void)
2408 {
2409   err_t ret;
2410   ret = netifapi_netif_common(NULL, NULL, nat64_exit);
2411   if (ret != ERR_OK) {
2412     return -1;
2413   }
2414 
2415   return 0;
2416 }
2417 #endif
2418