• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/netdevice.h>
10 #include <linux/if_ether.h>
11 #include <net/ip.h>
12 #include <net/ipv6.h>
13 #include <net/ip6_route.h>
14 #include <net/neighbour.h>
15 #include <net/netfilter/nf_flow_table.h>
16 #include <net/netfilter/nf_conntrack_acct.h>
17 /* For layer 4 checksum field offset. */
18 #include <linux/tcp.h>
19 #include <linux/udp.h>
20 
nf_flow_state_check(struct flow_offload * flow,int proto,struct sk_buff * skb,unsigned int thoff)21 static int nf_flow_state_check(struct flow_offload *flow, int proto,
22 			       struct sk_buff *skb, unsigned int thoff)
23 {
24 	struct tcphdr *tcph;
25 
26 	if (proto != IPPROTO_TCP)
27 		return 0;
28 
29 	tcph = (void *)(skb_network_header(skb) + thoff);
30 	if (unlikely(tcph->fin || tcph->rst)) {
31 		flow_offload_teardown(flow);
32 		return -1;
33 	}
34 
35 	return 0;
36 }
37 
nf_flow_nat_ip_tcp(struct sk_buff * skb,unsigned int thoff,__be32 addr,__be32 new_addr)38 static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
39 			       __be32 addr, __be32 new_addr)
40 {
41 	struct tcphdr *tcph;
42 
43 	tcph = (void *)(skb_network_header(skb) + thoff);
44 	inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
45 }
46 
nf_flow_nat_ip_udp(struct sk_buff * skb,unsigned int thoff,__be32 addr,__be32 new_addr)47 static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
48 			       __be32 addr, __be32 new_addr)
49 {
50 	struct udphdr *udph;
51 
52 	udph = (void *)(skb_network_header(skb) + thoff);
53 	if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
54 		inet_proto_csum_replace4(&udph->check, skb, addr,
55 					 new_addr, true);
56 		if (!udph->check)
57 			udph->check = CSUM_MANGLED_0;
58 	}
59 }
60 
nf_flow_nat_ip_l4proto(struct sk_buff * skb,struct iphdr * iph,unsigned int thoff,__be32 addr,__be32 new_addr)61 static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
62 				   unsigned int thoff, __be32 addr,
63 				   __be32 new_addr)
64 {
65 	switch (iph->protocol) {
66 	case IPPROTO_TCP:
67 		nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr);
68 		break;
69 	case IPPROTO_UDP:
70 		nf_flow_nat_ip_udp(skb, thoff, addr, new_addr);
71 		break;
72 	}
73 }
74 
nf_flow_snat_ip(const struct flow_offload * flow,struct sk_buff * skb,struct iphdr * iph,unsigned int thoff,enum flow_offload_tuple_dir dir)75 static void nf_flow_snat_ip(const struct flow_offload *flow,
76 			    struct sk_buff *skb, struct iphdr *iph,
77 			    unsigned int thoff, enum flow_offload_tuple_dir dir)
78 {
79 	__be32 addr, new_addr;
80 
81 	switch (dir) {
82 	case FLOW_OFFLOAD_DIR_ORIGINAL:
83 		addr = iph->saddr;
84 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
85 		iph->saddr = new_addr;
86 		break;
87 	case FLOW_OFFLOAD_DIR_REPLY:
88 		addr = iph->daddr;
89 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
90 		iph->daddr = new_addr;
91 		break;
92 	}
93 	csum_replace4(&iph->check, addr, new_addr);
94 
95 	nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
96 }
97 
nf_flow_dnat_ip(const struct flow_offload * flow,struct sk_buff * skb,struct iphdr * iph,unsigned int thoff,enum flow_offload_tuple_dir dir)98 static void nf_flow_dnat_ip(const struct flow_offload *flow,
99 			    struct sk_buff *skb, struct iphdr *iph,
100 			    unsigned int thoff, enum flow_offload_tuple_dir dir)
101 {
102 	__be32 addr, new_addr;
103 
104 	switch (dir) {
105 	case FLOW_OFFLOAD_DIR_ORIGINAL:
106 		addr = iph->daddr;
107 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
108 		iph->daddr = new_addr;
109 		break;
110 	case FLOW_OFFLOAD_DIR_REPLY:
111 		addr = iph->saddr;
112 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
113 		iph->saddr = new_addr;
114 		break;
115 	}
116 	csum_replace4(&iph->check, addr, new_addr);
117 
118 	nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
119 }
120 
nf_flow_nat_ip(const struct flow_offload * flow,struct sk_buff * skb,unsigned int thoff,enum flow_offload_tuple_dir dir,struct iphdr * iph)121 static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
122 			  unsigned int thoff, enum flow_offload_tuple_dir dir,
123 			  struct iphdr *iph)
124 {
125 	if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
126 		nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir);
127 		nf_flow_snat_ip(flow, skb, iph, thoff, dir);
128 	}
129 	if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
130 		nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir);
131 		nf_flow_dnat_ip(flow, skb, iph, thoff, dir);
132 	}
133 }
134 
ip_has_options(unsigned int thoff)135 static bool ip_has_options(unsigned int thoff)
136 {
137 	return thoff != sizeof(struct iphdr);
138 }
139 
nf_flow_tuple_encap(struct sk_buff * skb,struct flow_offload_tuple * tuple)140 static void nf_flow_tuple_encap(struct sk_buff *skb,
141 				struct flow_offload_tuple *tuple)
142 {
143 	struct vlan_ethhdr *veth;
144 	struct pppoe_hdr *phdr;
145 	int i = 0;
146 
147 	if (skb_vlan_tag_present(skb)) {
148 		tuple->encap[i].id = skb_vlan_tag_get(skb);
149 		tuple->encap[i].proto = skb->vlan_proto;
150 		i++;
151 	}
152 	switch (skb->protocol) {
153 	case htons(ETH_P_8021Q):
154 		veth = (struct vlan_ethhdr *)skb_mac_header(skb);
155 		tuple->encap[i].id = ntohs(veth->h_vlan_TCI);
156 		tuple->encap[i].proto = skb->protocol;
157 		break;
158 	case htons(ETH_P_PPP_SES):
159 		phdr = (struct pppoe_hdr *)skb_mac_header(skb);
160 		tuple->encap[i].id = ntohs(phdr->sid);
161 		tuple->encap[i].proto = skb->protocol;
162 		break;
163 	}
164 }
165 
nf_flow_tuple_ip(struct sk_buff * skb,const struct net_device * dev,struct flow_offload_tuple * tuple,u32 * hdrsize,u32 offset)166 static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
167 			    struct flow_offload_tuple *tuple, u32 *hdrsize,
168 			    u32 offset)
169 {
170 	struct flow_ports *ports;
171 	unsigned int thoff;
172 	struct iphdr *iph;
173 
174 	if (!pskb_may_pull(skb, sizeof(*iph) + offset))
175 		return -1;
176 
177 	iph = (struct iphdr *)(skb_network_header(skb) + offset);
178 	thoff = (iph->ihl * 4);
179 
180 	if (ip_is_fragment(iph) ||
181 	    unlikely(ip_has_options(thoff)))
182 		return -1;
183 
184 	thoff += offset;
185 
186 	switch (iph->protocol) {
187 	case IPPROTO_TCP:
188 		*hdrsize = sizeof(struct tcphdr);
189 		break;
190 	case IPPROTO_UDP:
191 		*hdrsize = sizeof(struct udphdr);
192 		break;
193 	default:
194 		return -1;
195 	}
196 
197 	if (iph->ttl <= 1)
198 		return -1;
199 
200 	if (!pskb_may_pull(skb, thoff + *hdrsize))
201 		return -1;
202 
203 	iph = (struct iphdr *)(skb_network_header(skb) + offset);
204 	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
205 
206 	tuple->src_v4.s_addr	= iph->saddr;
207 	tuple->dst_v4.s_addr	= iph->daddr;
208 	tuple->src_port		= ports->source;
209 	tuple->dst_port		= ports->dest;
210 	tuple->l3proto		= AF_INET;
211 	tuple->l4proto		= iph->protocol;
212 	tuple->iifidx		= dev->ifindex;
213 	nf_flow_tuple_encap(skb, tuple);
214 
215 	return 0;
216 }
217 
218 /* Based on ip_exceeds_mtu(). */
nf_flow_exceeds_mtu(const struct sk_buff * skb,unsigned int mtu)219 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
220 {
221 	if (skb->len <= mtu)
222 		return false;
223 
224 	if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
225 		return false;
226 
227 	return true;
228 }
229 
nf_flow_dst_check(struct flow_offload_tuple * tuple)230 static inline bool nf_flow_dst_check(struct flow_offload_tuple *tuple)
231 {
232 	if (tuple->xmit_type != FLOW_OFFLOAD_XMIT_NEIGH &&
233 	    tuple->xmit_type != FLOW_OFFLOAD_XMIT_XFRM)
234 		return true;
235 
236 	return dst_check(tuple->dst_cache, tuple->dst_cookie);
237 }
238 
nf_flow_xmit_xfrm(struct sk_buff * skb,const struct nf_hook_state * state,struct dst_entry * dst)239 static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
240 				      const struct nf_hook_state *state,
241 				      struct dst_entry *dst)
242 {
243 	skb_orphan(skb);
244 	skb_dst_set_noref(skb, dst);
245 	dst_output(state->net, state->sk, skb);
246 	return NF_STOLEN;
247 }
248 
nf_flow_skb_encap_protocol(const struct sk_buff * skb,__be16 proto,u32 * offset)249 static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
250 				       u32 *offset)
251 {
252 	struct vlan_ethhdr *veth;
253 
254 	switch (skb->protocol) {
255 	case htons(ETH_P_8021Q):
256 		veth = (struct vlan_ethhdr *)skb_mac_header(skb);
257 		if (veth->h_vlan_encapsulated_proto == proto) {
258 			*offset += VLAN_HLEN;
259 			return true;
260 		}
261 		break;
262 	case htons(ETH_P_PPP_SES):
263 		if (nf_flow_pppoe_proto(skb) == proto) {
264 			*offset += PPPOE_SES_HLEN;
265 			return true;
266 		}
267 		break;
268 	}
269 
270 	return false;
271 }
272 
nf_flow_encap_pop(struct sk_buff * skb,struct flow_offload_tuple_rhash * tuplehash)273 static void nf_flow_encap_pop(struct sk_buff *skb,
274 			      struct flow_offload_tuple_rhash *tuplehash)
275 {
276 	struct vlan_hdr *vlan_hdr;
277 	int i;
278 
279 	for (i = 0; i < tuplehash->tuple.encap_num; i++) {
280 		if (skb_vlan_tag_present(skb)) {
281 			__vlan_hwaccel_clear_tag(skb);
282 			continue;
283 		}
284 		switch (skb->protocol) {
285 		case htons(ETH_P_8021Q):
286 			vlan_hdr = (struct vlan_hdr *)skb->data;
287 			__skb_pull(skb, VLAN_HLEN);
288 			vlan_set_encap_proto(skb, vlan_hdr);
289 			skb_reset_network_header(skb);
290 			break;
291 		case htons(ETH_P_PPP_SES):
292 			skb->protocol = nf_flow_pppoe_proto(skb);
293 			skb_pull(skb, PPPOE_SES_HLEN);
294 			skb_reset_network_header(skb);
295 			break;
296 		}
297 	}
298 }
299 
nf_flow_queue_xmit(struct net * net,struct sk_buff * skb,const struct flow_offload_tuple_rhash * tuplehash,unsigned short type)300 static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
301 				       const struct flow_offload_tuple_rhash *tuplehash,
302 				       unsigned short type)
303 {
304 	struct net_device *outdev;
305 
306 	outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx);
307 	if (!outdev)
308 		return NF_DROP;
309 
310 	skb->dev = outdev;
311 	dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest,
312 			tuplehash->tuple.out.h_source, skb->len);
313 	dev_queue_xmit(skb);
314 
315 	return NF_STOLEN;
316 }
317 
318 unsigned int
nf_flow_offload_ip_hook(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)319 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
320 			const struct nf_hook_state *state)
321 {
322 	struct flow_offload_tuple_rhash *tuplehash;
323 	struct nf_flowtable *flow_table = priv;
324 	struct flow_offload_tuple tuple = {};
325 	enum flow_offload_tuple_dir dir;
326 	struct flow_offload *flow;
327 	struct net_device *outdev;
328 	u32 hdrsize, offset = 0;
329 	unsigned int thoff, mtu;
330 	struct rtable *rt;
331 	struct iphdr *iph;
332 	__be32 nexthop;
333 	int ret;
334 
335 	if (skb->protocol != htons(ETH_P_IP) &&
336 	    !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &offset))
337 		return NF_ACCEPT;
338 
339 	if (nf_flow_tuple_ip(skb, state->in, &tuple, &hdrsize, offset) < 0)
340 		return NF_ACCEPT;
341 
342 	tuplehash = flow_offload_lookup(flow_table, &tuple);
343 	if (tuplehash == NULL)
344 		return NF_ACCEPT;
345 
346 	dir = tuplehash->tuple.dir;
347 	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
348 
349 	mtu = flow->tuplehash[dir].tuple.mtu + offset;
350 	if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
351 		return NF_ACCEPT;
352 
353 	iph = (struct iphdr *)(skb_network_header(skb) + offset);
354 	thoff = (iph->ihl * 4) + offset;
355 	if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
356 		return NF_ACCEPT;
357 
358 	if (!nf_flow_dst_check(&tuplehash->tuple)) {
359 		flow_offload_teardown(flow);
360 		return NF_ACCEPT;
361 	}
362 
363 	if (skb_try_make_writable(skb, thoff + hdrsize))
364 		return NF_DROP;
365 
366 	flow_offload_refresh(flow_table, flow);
367 
368 	nf_flow_encap_pop(skb, tuplehash);
369 	thoff -= offset;
370 
371 	iph = ip_hdr(skb);
372 	nf_flow_nat_ip(flow, skb, thoff, dir, iph);
373 
374 	ip_decrease_ttl(iph);
375 	skb->tstamp = 0;
376 
377 	if (flow_table->flags & NF_FLOWTABLE_COUNTER)
378 		nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
379 
380 	if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
381 		rt = (struct rtable *)tuplehash->tuple.dst_cache;
382 		memset(skb->cb, 0, sizeof(struct inet_skb_parm));
383 		IPCB(skb)->iif = skb->dev->ifindex;
384 		IPCB(skb)->flags = IPSKB_FORWARDED;
385 		return nf_flow_xmit_xfrm(skb, state, &rt->dst);
386 	}
387 
388 	switch (tuplehash->tuple.xmit_type) {
389 	case FLOW_OFFLOAD_XMIT_NEIGH:
390 		rt = (struct rtable *)tuplehash->tuple.dst_cache;
391 		outdev = rt->dst.dev;
392 		skb->dev = outdev;
393 		nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
394 		skb_dst_set_noref(skb, &rt->dst);
395 		neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
396 		ret = NF_STOLEN;
397 		break;
398 	case FLOW_OFFLOAD_XMIT_DIRECT:
399 		ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP);
400 		if (ret == NF_DROP)
401 			flow_offload_teardown(flow);
402 		break;
403 	}
404 
405 	return ret;
406 }
407 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
408 
nf_flow_nat_ipv6_tcp(struct sk_buff * skb,unsigned int thoff,struct in6_addr * addr,struct in6_addr * new_addr,struct ipv6hdr * ip6h)409 static void nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
410 				 struct in6_addr *addr,
411 				 struct in6_addr *new_addr,
412 				 struct ipv6hdr *ip6h)
413 {
414 	struct tcphdr *tcph;
415 
416 	tcph = (void *)(skb_network_header(skb) + thoff);
417 	inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
418 				  new_addr->s6_addr32, true);
419 }
420 
nf_flow_nat_ipv6_udp(struct sk_buff * skb,unsigned int thoff,struct in6_addr * addr,struct in6_addr * new_addr)421 static void nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
422 				 struct in6_addr *addr,
423 				 struct in6_addr *new_addr)
424 {
425 	struct udphdr *udph;
426 
427 	udph = (void *)(skb_network_header(skb) + thoff);
428 	if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
429 		inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
430 					  new_addr->s6_addr32, true);
431 		if (!udph->check)
432 			udph->check = CSUM_MANGLED_0;
433 	}
434 }
435 
nf_flow_nat_ipv6_l4proto(struct sk_buff * skb,struct ipv6hdr * ip6h,unsigned int thoff,struct in6_addr * addr,struct in6_addr * new_addr)436 static void nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
437 				     unsigned int thoff, struct in6_addr *addr,
438 				     struct in6_addr *new_addr)
439 {
440 	switch (ip6h->nexthdr) {
441 	case IPPROTO_TCP:
442 		nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr, ip6h);
443 		break;
444 	case IPPROTO_UDP:
445 		nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr);
446 		break;
447 	}
448 }
449 
nf_flow_snat_ipv6(const struct flow_offload * flow,struct sk_buff * skb,struct ipv6hdr * ip6h,unsigned int thoff,enum flow_offload_tuple_dir dir)450 static void nf_flow_snat_ipv6(const struct flow_offload *flow,
451 			      struct sk_buff *skb, struct ipv6hdr *ip6h,
452 			      unsigned int thoff,
453 			      enum flow_offload_tuple_dir dir)
454 {
455 	struct in6_addr addr, new_addr;
456 
457 	switch (dir) {
458 	case FLOW_OFFLOAD_DIR_ORIGINAL:
459 		addr = ip6h->saddr;
460 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
461 		ip6h->saddr = new_addr;
462 		break;
463 	case FLOW_OFFLOAD_DIR_REPLY:
464 		addr = ip6h->daddr;
465 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
466 		ip6h->daddr = new_addr;
467 		break;
468 	}
469 
470 	nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
471 }
472 
nf_flow_dnat_ipv6(const struct flow_offload * flow,struct sk_buff * skb,struct ipv6hdr * ip6h,unsigned int thoff,enum flow_offload_tuple_dir dir)473 static void nf_flow_dnat_ipv6(const struct flow_offload *flow,
474 			      struct sk_buff *skb, struct ipv6hdr *ip6h,
475 			      unsigned int thoff,
476 			      enum flow_offload_tuple_dir dir)
477 {
478 	struct in6_addr addr, new_addr;
479 
480 	switch (dir) {
481 	case FLOW_OFFLOAD_DIR_ORIGINAL:
482 		addr = ip6h->daddr;
483 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
484 		ip6h->daddr = new_addr;
485 		break;
486 	case FLOW_OFFLOAD_DIR_REPLY:
487 		addr = ip6h->saddr;
488 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
489 		ip6h->saddr = new_addr;
490 		break;
491 	}
492 
493 	nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
494 }
495 
nf_flow_nat_ipv6(const struct flow_offload * flow,struct sk_buff * skb,enum flow_offload_tuple_dir dir,struct ipv6hdr * ip6h)496 static void nf_flow_nat_ipv6(const struct flow_offload *flow,
497 			     struct sk_buff *skb,
498 			     enum flow_offload_tuple_dir dir,
499 			     struct ipv6hdr *ip6h)
500 {
501 	unsigned int thoff = sizeof(*ip6h);
502 
503 	if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
504 		nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir);
505 		nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir);
506 	}
507 	if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
508 		nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir);
509 		nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir);
510 	}
511 }
512 
nf_flow_tuple_ipv6(struct sk_buff * skb,const struct net_device * dev,struct flow_offload_tuple * tuple,u32 * hdrsize,u32 offset)513 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
514 			      struct flow_offload_tuple *tuple, u32 *hdrsize,
515 			      u32 offset)
516 {
517 	struct flow_ports *ports;
518 	struct ipv6hdr *ip6h;
519 	unsigned int thoff;
520 
521 	thoff = sizeof(*ip6h) + offset;
522 	if (!pskb_may_pull(skb, thoff))
523 		return -1;
524 
525 	ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
526 
527 	switch (ip6h->nexthdr) {
528 	case IPPROTO_TCP:
529 		*hdrsize = sizeof(struct tcphdr);
530 		break;
531 	case IPPROTO_UDP:
532 		*hdrsize = sizeof(struct udphdr);
533 		break;
534 	default:
535 		return -1;
536 	}
537 
538 	if (ip6h->hop_limit <= 1)
539 		return -1;
540 
541 	if (!pskb_may_pull(skb, thoff + *hdrsize))
542 		return -1;
543 
544 	ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
545 	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
546 
547 	tuple->src_v6		= ip6h->saddr;
548 	tuple->dst_v6		= ip6h->daddr;
549 	tuple->src_port		= ports->source;
550 	tuple->dst_port		= ports->dest;
551 	tuple->l3proto		= AF_INET6;
552 	tuple->l4proto		= ip6h->nexthdr;
553 	tuple->iifidx		= dev->ifindex;
554 	nf_flow_tuple_encap(skb, tuple);
555 
556 	return 0;
557 }
558 
559 unsigned int
nf_flow_offload_ipv6_hook(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)560 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
561 			  const struct nf_hook_state *state)
562 {
563 	struct flow_offload_tuple_rhash *tuplehash;
564 	struct nf_flowtable *flow_table = priv;
565 	struct flow_offload_tuple tuple = {};
566 	enum flow_offload_tuple_dir dir;
567 	const struct in6_addr *nexthop;
568 	struct flow_offload *flow;
569 	struct net_device *outdev;
570 	unsigned int thoff, mtu;
571 	u32 hdrsize, offset = 0;
572 	struct ipv6hdr *ip6h;
573 	struct rt6_info *rt;
574 	int ret;
575 
576 	if (skb->protocol != htons(ETH_P_IPV6) &&
577 	    !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &offset))
578 		return NF_ACCEPT;
579 
580 	if (nf_flow_tuple_ipv6(skb, state->in, &tuple, &hdrsize, offset) < 0)
581 		return NF_ACCEPT;
582 
583 	tuplehash = flow_offload_lookup(flow_table, &tuple);
584 	if (tuplehash == NULL)
585 		return NF_ACCEPT;
586 
587 	dir = tuplehash->tuple.dir;
588 	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
589 
590 	mtu = flow->tuplehash[dir].tuple.mtu + offset;
591 	if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
592 		return NF_ACCEPT;
593 
594 	ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
595 	thoff = sizeof(*ip6h) + offset;
596 	if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
597 		return NF_ACCEPT;
598 
599 	if (!nf_flow_dst_check(&tuplehash->tuple)) {
600 		flow_offload_teardown(flow);
601 		return NF_ACCEPT;
602 	}
603 
604 	if (skb_try_make_writable(skb, thoff + hdrsize))
605 		return NF_DROP;
606 
607 	flow_offload_refresh(flow_table, flow);
608 
609 	nf_flow_encap_pop(skb, tuplehash);
610 
611 	ip6h = ipv6_hdr(skb);
612 	nf_flow_nat_ipv6(flow, skb, dir, ip6h);
613 
614 	ip6h->hop_limit--;
615 	skb->tstamp = 0;
616 
617 	if (flow_table->flags & NF_FLOWTABLE_COUNTER)
618 		nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
619 
620 	if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
621 		rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
622 		memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
623 		IP6CB(skb)->iif = skb->dev->ifindex;
624 		IP6CB(skb)->flags = IP6SKB_FORWARDED;
625 		return nf_flow_xmit_xfrm(skb, state, &rt->dst);
626 	}
627 
628 	switch (tuplehash->tuple.xmit_type) {
629 	case FLOW_OFFLOAD_XMIT_NEIGH:
630 		rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
631 		outdev = rt->dst.dev;
632 		skb->dev = outdev;
633 		nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
634 		skb_dst_set_noref(skb, &rt->dst);
635 		neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
636 		ret = NF_STOLEN;
637 		break;
638 	case FLOW_OFFLOAD_XMIT_DIRECT:
639 		ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6);
640 		if (ret == NF_DROP)
641 			flow_offload_teardown(flow);
642 		break;
643 	}
644 
645 	return ret;
646 }
647 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);
648