• Home
  • Raw
  • Download

Lines Matching refs:skb

58 static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
60 static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr) in ipv6_select_ident() argument
72 int __ip6_local_out(struct sk_buff *skb) in __ip6_local_out() argument
76 len = skb->len - sizeof(struct ipv6hdr); in __ip6_local_out()
79 ipv6_hdr(skb)->payload_len = htons(len); in __ip6_local_out()
81 return nf_hook(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dst->dev, in __ip6_local_out()
85 int ip6_local_out(struct sk_buff *skb) in ip6_local_out() argument
89 err = __ip6_local_out(skb); in ip6_local_out()
91 err = dst_output(skb); in ip6_local_out()
97 static int ip6_output_finish(struct sk_buff *skb) in ip6_output_finish() argument
99 struct dst_entry *dst = skb->dst; in ip6_output_finish()
102 return neigh_hh_output(dst->hh, skb); in ip6_output_finish()
104 return dst->neighbour->output(skb); in ip6_output_finish()
108 kfree_skb(skb); in ip6_output_finish()
127 static int ip6_output2(struct sk_buff *skb) in ip6_output2() argument
129 struct dst_entry *dst = skb->dst; in ip6_output2()
132 skb->protocol = htons(ETH_P_IPV6); in ip6_output2()
133 skb->dev = dev; in ip6_output2()
135 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { in ip6_output2()
136 struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL; in ip6_output2()
137 struct inet6_dev *idev = ip6_dst_idev(skb->dst); in ip6_output2()
141 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || in ip6_output2()
142 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, in ip6_output2()
143 &ipv6_hdr(skb)->saddr))) { in ip6_output2()
144 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); in ip6_output2()
154 if (ipv6_hdr(skb)->hop_limit == 0) { in ip6_output2()
157 kfree_skb(skb); in ip6_output2()
165 return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb->dev, in ip6_output2()
169 static inline int ip6_skb_dst_mtu(struct sk_buff *skb) in ip6_skb_dst_mtu() argument
171 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; in ip6_skb_dst_mtu()
174 skb->dst->dev->mtu : dst_mtu(skb->dst); in ip6_skb_dst_mtu()
177 int ip6_output(struct sk_buff *skb) in ip6_output() argument
179 struct inet6_dev *idev = ip6_dst_idev(skb->dst); in ip6_output()
181 IP6_INC_STATS(dev_net(skb->dst->dev), idev, in ip6_output()
183 kfree_skb(skb); in ip6_output()
187 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || in ip6_output()
188 dst_allfrag(skb->dst)) in ip6_output()
189 return ip6_fragment(skb, ip6_output2); in ip6_output()
191 return ip6_output2(skb); in ip6_output()
198 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, in ip6_xmit() argument
204 struct dst_entry *dst = skb->dst; in ip6_xmit()
207 int seg_len = skb->len; in ip6_xmit()
221 if (skb_headroom(skb) < head_room) { in ip6_xmit()
222 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); in ip6_xmit()
224 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), in ip6_xmit()
226 kfree_skb(skb); in ip6_xmit()
229 kfree_skb(skb); in ip6_xmit()
230 skb = skb2; in ip6_xmit()
232 skb_set_owner_w(skb, sk); in ip6_xmit()
235 ipv6_push_frag_opts(skb, opt, &proto); in ip6_xmit()
237 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop); in ip6_xmit()
240 skb_push(skb, sizeof(struct ipv6hdr)); in ip6_xmit()
241 skb_reset_network_header(skb); in ip6_xmit()
242 hdr = ipv6_hdr(skb); in ip6_xmit()
246 skb->local_df = 1; in ip6_xmit()
273 skb->priority = sk->sk_priority; in ip6_xmit()
274 skb->mark = sk->sk_mark; in ip6_xmit()
277 if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { in ip6_xmit()
278 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), in ip6_xmit()
280 return NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, in ip6_xmit()
286 skb->dev = dst->dev; in ip6_xmit()
287 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); in ip6_xmit()
288 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS); in ip6_xmit()
289 kfree_skb(skb); in ip6_xmit()
302 int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev, in ip6_nd_hdr() argument
310 skb->protocol = htons(ETH_P_IPV6); in ip6_nd_hdr()
311 skb->dev = dev; in ip6_nd_hdr()
315 skb_reset_network_header(skb); in ip6_nd_hdr()
316 skb_put(skb, sizeof(struct ipv6hdr)); in ip6_nd_hdr()
317 hdr = ipv6_hdr(skb); in ip6_nd_hdr()
331 static int ip6_call_ra_chain(struct sk_buff *skb, int sel) in ip6_call_ra_chain() argument
341 sk->sk_bound_dev_if == skb->dev->ifindex)) { in ip6_call_ra_chain()
343 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); in ip6_call_ra_chain()
352 rawv6_rcv(last, skb); in ip6_call_ra_chain()
360 static int ip6_forward_proxy_check(struct sk_buff *skb) in ip6_forward_proxy_check() argument
362 struct ipv6hdr *hdr = ipv6_hdr(skb); in ip6_forward_proxy_check()
367 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr); in ip6_forward_proxy_check()
376 if (!pskb_may_pull(skb, (skb_network_header(skb) + in ip6_forward_proxy_check()
377 offset + 1 - skb->data))) in ip6_forward_proxy_check()
380 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset); in ip6_forward_proxy_check()
404 dst_link_failure(skb); in ip6_forward_proxy_check()
411 static inline int ip6_forward_finish(struct sk_buff *skb) in ip6_forward_finish() argument
413 return dst_output(skb); in ip6_forward_finish()
416 int ip6_forward(struct sk_buff *skb) in ip6_forward() argument
418 struct dst_entry *dst = skb->dst; in ip6_forward()
419 struct ipv6hdr *hdr = ipv6_hdr(skb); in ip6_forward()
420 struct inet6_skb_parm *opt = IP6CB(skb); in ip6_forward()
426 if (skb_warn_if_lro(skb)) in ip6_forward()
429 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { in ip6_forward()
434 skb_forward_csum(skb); in ip6_forward()
450 u8 *ptr = skb_network_header(skb) + opt->ra; in ip6_forward()
451 if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3])) in ip6_forward()
460 skb->dev = dst->dev; in ip6_forward()
461 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, in ip6_forward()
462 0, skb->dev); in ip6_forward()
466 kfree_skb(skb); in ip6_forward()
472 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { in ip6_forward()
473 int proxied = ip6_forward_proxy_check(skb); in ip6_forward()
475 return ip6_input(skb); in ip6_forward()
483 if (!xfrm6_route_forward(skb)) { in ip6_forward()
487 dst = skb->dst; in ip6_forward()
493 if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0 && in ip6_forward()
494 !skb_sec_path(skb)) { in ip6_forward()
514 ndisc_send_redirect(skb, n, target); in ip6_forward()
523 icmpv6_send(skb, ICMPV6_DEST_UNREACH, in ip6_forward()
524 ICMPV6_NOT_NEIGHBOUR, 0, skb->dev); in ip6_forward()
529 if (skb->len > dst_mtu(dst)) { in ip6_forward()
531 skb->dev = dst->dev; in ip6_forward()
532 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev); in ip6_forward()
537 kfree_skb(skb); in ip6_forward()
541 if (skb_cow(skb, dst->dev->hard_header_len)) { in ip6_forward()
546 hdr = ipv6_hdr(skb); in ip6_forward()
553 return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dst->dev, in ip6_forward()
559 kfree_skb(skb); in ip6_forward()
584 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) in ip6_find_1stfragopt() argument
588 (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); in ip6_find_1stfragopt()
589 unsigned int packet_len = skb->tail - skb->network_header; in ip6_find_1stfragopt()
591 *nexthdr = &ipv6_hdr(skb)->nexthdr; in ip6_find_1stfragopt()
604 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) in ip6_find_1stfragopt()
616 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + in ip6_find_1stfragopt()
623 static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) in ip6_fragment() argument
626 struct rt6_info *rt = (struct rt6_info*)skb->dst; in ip6_fragment()
627 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; in ip6_fragment()
634 struct net *net = dev_net(skb->dst->dev); in ip6_fragment()
636 hlen = ip6_find_1stfragopt(skb, &prevhdr); in ip6_fragment()
639 mtu = ip6_skb_dst_mtu(skb); in ip6_fragment()
645 if (!skb->local_df) { in ip6_fragment()
646 skb->dev = skb->dst->dev; in ip6_fragment()
647 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); in ip6_fragment()
648 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), in ip6_fragment()
650 kfree_skb(skb); in ip6_fragment()
660 if (skb_shinfo(skb)->frag_list) { in ip6_fragment()
661 int first_len = skb_pagelen(skb); in ip6_fragment()
666 skb_cloned(skb)) in ip6_fragment()
669 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { in ip6_fragment()
681 if (skb->sk) { in ip6_fragment()
682 sock_hold(skb->sk); in ip6_fragment()
683 frag->sk = skb->sk; in ip6_fragment()
691 frag = skb_shinfo(skb)->frag_list; in ip6_fragment()
692 skb_shinfo(skb)->frag_list = NULL; in ip6_fragment()
696 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); in ip6_fragment()
698 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), in ip6_fragment()
703 __skb_pull(skb, hlen); in ip6_fragment()
704 fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); in ip6_fragment()
705 __skb_push(skb, hlen); in ip6_fragment()
706 skb_reset_network_header(skb); in ip6_fragment()
707 memcpy(skb_network_header(skb), tmp_hdr, hlen); in ip6_fragment()
709 ipv6_select_ident(skb, fh); in ip6_fragment()
715 first_len = skb_pagelen(skb); in ip6_fragment()
716 skb->data_len = first_len - skb_headlen(skb); in ip6_fragment()
717 skb->truesize -= truesizes; in ip6_fragment()
718 skb->len = first_len; in ip6_fragment()
719 ipv6_hdr(skb)->payload_len = htons(first_len - in ip6_fragment()
735 offset += skb->len - hlen - sizeof(struct frag_hdr); in ip6_fragment()
745 ip6_copy_metadata(frag, skb); in ip6_fragment()
748 err = output(skb); in ip6_fragment()
756 skb = frag; in ip6_fragment()
757 frag = skb->next; in ip6_fragment()
758 skb->next = NULL; in ip6_fragment()
771 skb = frag->next; in ip6_fragment()
773 frag = skb; in ip6_fragment()
783 left = skb->len - hlen; /* Space per frame */ in ip6_fragment()
811 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), in ip6_fragment()
821 ip6_copy_metadata(frag, skb); in ip6_fragment()
833 if (skb->sk) in ip6_fragment()
834 skb_set_owner_w(frag, skb->sk); in ip6_fragment()
839 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); in ip6_fragment()
847 ipv6_select_ident(skb, fh); in ip6_fragment()
855 if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len)) in ip6_fragment()
875 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), in ip6_fragment()
878 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), in ip6_fragment()
880 kfree_skb(skb); in ip6_fragment()
884 IP6_INC_STATS(net, ip6_dst_idev(skb->dst), in ip6_fragment()
886 kfree_skb(skb); in ip6_fragment()
1049 int odd, struct sk_buff *skb), in ip6_ufo_append_data() argument
1054 struct sk_buff *skb; in ip6_ufo_append_data() local
1061 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { in ip6_ufo_append_data()
1062 skb = sock_alloc_send_skb(sk, in ip6_ufo_append_data()
1065 if (skb == NULL) in ip6_ufo_append_data()
1069 skb_reserve(skb, hh_len); in ip6_ufo_append_data()
1072 skb_put(skb,fragheaderlen + transhdrlen); in ip6_ufo_append_data()
1075 skb_reset_network_header(skb); in ip6_ufo_append_data()
1078 skb->transport_header = skb->network_header + fragheaderlen; in ip6_ufo_append_data()
1080 skb->ip_summed = CHECKSUM_PARTIAL; in ip6_ufo_append_data()
1081 skb->csum = 0; in ip6_ufo_append_data()
1085 err = skb_append_datato_frags(sk,skb, getfrag, from, in ip6_ufo_append_data()
1091 skb_shinfo(skb)->gso_size = mtu - fragheaderlen - in ip6_ufo_append_data()
1093 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; in ip6_ufo_append_data()
1094 ipv6_select_ident(skb, &fhdr); in ip6_ufo_append_data()
1095 skb_shinfo(skb)->ip6_frag_id = fhdr.identification; in ip6_ufo_append_data()
1096 __skb_queue_tail(&sk->sk_write_queue, skb); in ip6_ufo_append_data()
1103 kfree_skb(skb); in ip6_ufo_append_data()
1121 int offset, int len, int odd, struct sk_buff *skb), in ip6_append_data() argument
1128 struct sk_buff *skb; in ip6_append_data() local
1249 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) in ip6_append_data()
1254 …= (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len; in ip6_append_data()
1256 copy = maxfraglen - skb->len; in ip6_append_data()
1266 skb_prev = skb; in ip6_append_data()
1305 skb = sock_alloc_send_skb(sk, in ip6_append_data()
1309 skb = NULL; in ip6_append_data()
1312 skb = sock_wmalloc(sk, in ip6_append_data()
1315 if (unlikely(skb == NULL)) in ip6_append_data()
1318 if (skb == NULL) in ip6_append_data()
1323 skb->ip_summed = csummode; in ip6_append_data()
1324 skb->csum = 0; in ip6_append_data()
1326 skb_reserve(skb, hh_len+sizeof(struct frag_hdr)); in ip6_append_data()
1331 data = skb_put(skb, fraglen); in ip6_append_data()
1332 skb_set_network_header(skb, exthdrlen); in ip6_append_data()
1334 skb->transport_header = (skb->network_header + in ip6_append_data()
1337 skb->csum = skb_copy_and_csum_bits( in ip6_append_data()
1341 skb->csum); in ip6_append_data()
1348 kfree_skb(skb); in ip6_append_data()
1350 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) { in ip6_append_data()
1352 kfree_skb(skb); in ip6_append_data()
1365 __skb_queue_tail(&sk->sk_write_queue, skb); in ip6_append_data()
1375 off = skb->len; in ip6_append_data()
1376 if (getfrag(from, skb_put(skb, copy), in ip6_append_data()
1377 offset, copy, off, skb) < 0) { in ip6_append_data()
1378 __skb_trim(skb, off); in ip6_append_data()
1383 int i = skb_shinfo(skb)->nr_frags; in ip6_append_data()
1384 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1]; in ip6_append_data()
1398 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0); in ip6_append_data()
1399 frag = &skb_shinfo(skb)->frags[i]; in ip6_append_data()
1412 skb_fill_page_desc(skb, i, page, 0, 0); in ip6_append_data()
1413 frag = &skb_shinfo(skb)->frags[i]; in ip6_append_data()
1418 …ag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) { in ip6_append_data()
1424 skb->len += copy; in ip6_append_data()
1425 skb->data_len += copy; in ip6_append_data()
1426 skb->truesize += copy; in ip6_append_data()
1460 struct sk_buff *skb, *tmp_skb; in ip6_push_pending_frames() local
1473 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL) in ip6_push_pending_frames()
1475 tail_skb = &(skb_shinfo(skb)->frag_list); in ip6_push_pending_frames()
1478 if (skb->data < skb_network_header(skb)) in ip6_push_pending_frames()
1479 __skb_pull(skb, skb_network_offset(skb)); in ip6_push_pending_frames()
1481 __skb_pull(tmp_skb, skb_network_header_len(skb)); in ip6_push_pending_frames()
1484 skb->len += tmp_skb->len; in ip6_push_pending_frames()
1485 skb->data_len += tmp_skb->len; in ip6_push_pending_frames()
1486 skb->truesize += tmp_skb->truesize; in ip6_push_pending_frames()
1494 skb->local_df = 1; in ip6_push_pending_frames()
1497 __skb_pull(skb, skb_network_header_len(skb)); in ip6_push_pending_frames()
1499 ipv6_push_frag_opts(skb, opt, &proto); in ip6_push_pending_frames()
1501 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst); in ip6_push_pending_frames()
1503 skb_push(skb, sizeof(struct ipv6hdr)); in ip6_push_pending_frames()
1504 skb_reset_network_header(skb); in ip6_push_pending_frames()
1505 hdr = ipv6_hdr(skb); in ip6_push_pending_frames()
1515 skb->priority = sk->sk_priority; in ip6_push_pending_frames()
1516 skb->mark = sk->sk_mark; in ip6_push_pending_frames()
1518 skb->dst = dst_clone(&rt->u.dst); in ip6_push_pending_frames()
1521 struct inet6_dev *idev = ip6_dst_idev(skb->dst); in ip6_push_pending_frames()
1523 ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type); in ip6_push_pending_frames()
1527 err = ip6_local_out(skb); in ip6_push_pending_frames()
1544 struct sk_buff *skb; in ip6_flush_pending_frames() local
1546 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) { in ip6_flush_pending_frames()
1547 if (skb->dst) in ip6_flush_pending_frames()
1548 IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb->dst), in ip6_flush_pending_frames()
1550 kfree_skb(skb); in ip6_flush_pending_frames()