Home
last modified time | relevance | path

Searched refs:skb2 (Results 1 – 25 of 29) sorted by relevance

12

/net/sched/
Dact_mirred.c151 struct sk_buff *skb2 = NULL; in tcf_mirred() local
164 if (skb2 != NULL) in tcf_mirred()
165 kfree_skb(skb2); in tcf_mirred()
176 skb2 = skb_act_clone(skb, GFP_ATOMIC); in tcf_mirred()
177 if (skb2 == NULL) in tcf_mirred()
187 m->tcf_bstats.bytes += qdisc_pkt_len(skb2); in tcf_mirred()
191 skb_push(skb2, skb2->dev->hard_header_len); in tcf_mirred()
195 skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at); in tcf_mirred()
197 skb2->dev = dev; in tcf_mirred()
198 skb2->iif = skb->dev->ifindex; in tcf_mirred()
[all …]
Dsch_netem.c159 struct sk_buff *skb2; in netem_enqueue() local
186 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { in netem_enqueue()
191 qdisc_enqueue_root(skb2, rootq); in netem_enqueue()
/net/bridge/
Dbr_input.c44 struct sk_buff *skb2; in br_handle_frame_finish() local
57 skb2 = NULL; in br_handle_frame_finish()
60 skb2 = skb; in br_handle_frame_finish()
66 skb2 = skb; in br_handle_frame_finish()
68 skb2 = skb; in br_handle_frame_finish()
73 if (skb2 == skb) in br_handle_frame_finish()
74 skb2 = skb_clone(skb, GFP_ATOMIC); in br_handle_frame_finish()
76 if (skb2) in br_handle_frame_finish()
77 br_pass_frame_up(br, skb2); in br_handle_frame_finish()
Dbr_forward.c118 struct sk_buff *skb2; in br_flood() local
120 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) { in br_flood()
126 __packet_hook(prev, skb2); in br_flood()
/net/ipv6/
Dip6_tunnel.c498 struct sk_buff *skb2; in ip4ip6_err() local
531 skb2 = skb_clone(skb, GFP_ATOMIC); in ip4ip6_err()
532 if (!skb2) in ip4ip6_err()
535 dst_release(skb2->dst); in ip4ip6_err()
536 skb2->dst = NULL; in ip4ip6_err()
537 skb_pull(skb2, offset); in ip4ip6_err()
538 skb_reset_network_header(skb2); in ip4ip6_err()
539 eiph = ip_hdr(skb2); in ip4ip6_err()
549 skb2->dev = rt->u.dst.dev; in ip4ip6_err()
563 skb2->dst = (struct dst_entry *)rt; in ip4ip6_err()
[all …]
Dip6_input.c264 struct sk_buff *skb2; in ip6_mc_input() local
313 skb2 = skb_clone(skb, GFP_ATOMIC); in ip6_mc_input()
315 skb2 = skb; in ip6_mc_input()
319 if (skb2) { in ip6_mc_input()
320 ip6_mr_input(skb2); in ip6_mc_input()
Dip6mr.c1627 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); in ip6_mr_forward() local
1628 if (skb2) in ip6_mr_forward()
1629 ip6mr_forward2(skb2, cache, psend); in ip6_mr_forward()
1731 struct sk_buff *skb2; in ip6mr_get_route() local
1748 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC); in ip6mr_get_route()
1749 if (!skb2) { in ip6mr_get_route()
1754 skb_reset_transport_header(skb2); in ip6mr_get_route()
1756 skb_put(skb2, sizeof(struct ipv6hdr)); in ip6mr_get_route()
1757 skb_reset_network_header(skb2); in ip6mr_get_route()
1759 iph = ipv6_hdr(skb2); in ip6mr_get_route()
[all …]
Dip6_output.c222 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); in ip6_xmit() local
223 if (skb2 == NULL) { in ip6_xmit()
230 skb = skb2; in ip6_xmit()
343 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); in ip6_call_ra_chain() local
344 if (skb2) in ip6_call_ra_chain()
345 rawv6_rcv(last, skb2); in ip6_call_ra_chain()
/net/decnet/netfilter/
Ddn_rtmsg.c66 struct sk_buff *skb2; in dnrmg_send_peer() local
82 skb2 = dnrmg_build_message(skb, &status); in dnrmg_send_peer()
83 if (skb2 == NULL) in dnrmg_send_peer()
85 NETLINK_CB(skb2).dst_group = group; in dnrmg_send_peer()
86 netlink_broadcast(dnrmg, skb2, 0, group, GFP_ATOMIC); in dnrmg_send_peer()
/net/decnet/
Ddn_nsp_out.c216 struct sk_buff *skb2; in dn_nsp_clone_and_send() local
219 if ((skb2 = skb_clone(skb, gfp)) != NULL) { in dn_nsp_clone_and_send()
223 skb2->sk = skb->sk; in dn_nsp_clone_and_send()
224 dn_nsp_send(skb2); in dn_nsp_clone_and_send()
385 struct sk_buff *skb2, *list, *ack = NULL; in dn_nsp_check_xmit_queue() local
393 skb2 = q->next; in dn_nsp_check_xmit_queue()
395 while(list != skb2) { in dn_nsp_check_xmit_queue()
396 struct dn_skb_cb *cb2 = DN_SKB_CB(skb2); in dn_nsp_check_xmit_queue()
399 ack = skb2; in dn_nsp_check_xmit_queue()
403 skb2 = skb2->next; in dn_nsp_check_xmit_queue()
Ddn_neigh.c237 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); in dn_long_output() local
238 if (skb2 == NULL) { in dn_long_output()
245 skb = skb2; in dn_long_output()
283 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); in dn_short_output() local
284 if (skb2 == NULL) { in dn_short_output()
291 skb = skb2; in dn_short_output()
325 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); in dn_phase3_output() local
326 if (skb2 == NULL) { in dn_phase3_output()
333 skb = skb2; in dn_phase3_output()
/net/ipv4/
Dip_output.c191 struct sk_buff *skb2; in ip_finish_output2() local
193 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); in ip_finish_output2()
194 if (skb2 == NULL) { in ip_finish_output2()
199 skb_set_owner_w(skb2, skb->sk); in ip_finish_output2()
201 skb = skb2; in ip_finish_output2()
439 struct sk_buff *skb2; in ip_fragment() local
605 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) { in ip_fragment()
615 ip_copy_metadata(skb2, skb); in ip_fragment()
616 skb_reserve(skb2, ll_rs); in ip_fragment()
617 skb_put(skb2, len + hlen); in ip_fragment()
[all …]
Dipmr.c1376 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); in ip_mr_forward() local
1377 if (skb2) in ip_mr_forward()
1378 ipmr_queue_xmit(skb2, cache, psend); in ip_mr_forward()
1385 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); in ip_mr_forward() local
1386 if (skb2) in ip_mr_forward()
1387 ipmr_queue_xmit(skb2, cache, psend); in ip_mr_forward()
1448 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); in ip_mr_input() local
1450 if (skb2 == NULL) { in ip_mr_input()
1454 skb = skb2; in ip_mr_input()
1626 struct sk_buff *skb2; in ipmr_get_route() local
[all …]
Dip_input.c175 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); in ip_call_ra_chain() local
176 if (skb2) in ip_call_ra_chain()
177 raw_rcv(last, skb2); in ip_call_ra_chain()
/net/atm/
Dlec.c143 struct sk_buff *skb2; in lec_handle_bridge() local
146 skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); in lec_handle_bridge()
147 if (skb2 == NULL) in lec_handle_bridge()
149 skb2->len = sizeof(struct atmlec_msg); in lec_handle_bridge()
150 mesg = (struct atmlec_msg *)skb2->data; in lec_handle_bridge()
156 atm_force_charge(priv->lecd, skb2->truesize); in lec_handle_bridge()
158 skb_queue_tail(&sk->sk_receive_queue, skb2); in lec_handle_bridge()
159 sk->sk_data_ready(sk, skb2->len); in lec_handle_bridge()
254 struct sk_buff *skb2; in lec_start_xmit() local
290 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); in lec_start_xmit()
[all …]
Dbr2684.c158 struct sk_buff *skb2 = skb_realloc_headroom(skb, minheadroom); in br2684_xmit_vcc() local
161 if (skb2 == NULL) { in br2684_xmit_vcc()
165 skb = skb2; in br2684_xmit_vcc()
/net/ipx/
Daf_ipx.c471 struct sk_buff *skb1 = NULL, *skb2 = NULL; in ipxitf_demux_socket() local
529 skb2 = skb_clone(skb1, GFP_ATOMIC); in ipxitf_demux_socket()
531 skb2 = skb1; in ipxitf_demux_socket()
536 if (!skb2) in ipxitf_demux_socket()
540 ipxitf_def_skb_handler(sock2, skb2); in ipxitf_demux_socket()
556 struct sk_buff *skb2; in ipxitf_adjust_skbuff() local
567 skb2 = alloc_skb(len, GFP_ATOMIC); in ipxitf_adjust_skbuff()
568 if (skb2) { in ipxitf_adjust_skbuff()
569 skb_reserve(skb2, out_offset); in ipxitf_adjust_skbuff()
570 skb_reset_network_header(skb2); in ipxitf_adjust_skbuff()
[all …]
/net/netlink/
Daf_netlink.c956 struct sk_buff *skb, *skb2; member
981 if (p->skb2 == NULL) { in do_one_broadcast()
983 p->skb2 = skb_clone(p->skb, p->allocation); in do_one_broadcast()
985 p->skb2 = skb_get(p->skb); in do_one_broadcast()
990 skb_orphan(p->skb2); in do_one_broadcast()
993 if (p->skb2 == NULL) { in do_one_broadcast()
997 } else if (sk_filter(sk, p->skb2)) { in do_one_broadcast()
998 kfree_skb(p->skb2); in do_one_broadcast()
999 p->skb2 = NULL; in do_one_broadcast()
1000 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) { in do_one_broadcast()
[all …]
/net/netfilter/
Dnf_conntrack_netlink.c770 struct sk_buff *skb2 = NULL; in ctnetlink_get_conntrack() local
796 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); in ctnetlink_get_conntrack()
797 if (!skb2) { in ctnetlink_get_conntrack()
803 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, in ctnetlink_get_conntrack()
810 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); in ctnetlink_get_conntrack()
817 kfree_skb(skb2); in ctnetlink_get_conntrack()
1603 struct sk_buff *skb2; in ctnetlink_get_expect() local
1635 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); in ctnetlink_get_expect()
1636 if (!skb2) in ctnetlink_get_expect()
1640 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, in ctnetlink_get_expect()
[all …]
/net/core/
Dskbuff.c821 struct sk_buff *skb2; in skb_realloc_headroom() local
825 skb2 = pskb_copy(skb, GFP_ATOMIC); in skb_realloc_headroom()
827 skb2 = skb_clone(skb, GFP_ATOMIC); in skb_realloc_headroom()
828 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, in skb_realloc_headroom()
830 kfree_skb(skb2); in skb_realloc_headroom()
831 skb2 = NULL; in skb_realloc_headroom()
834 return skb2; in skb_realloc_headroom()
2828 struct sk_buff *skb2; in skb_cow_data() local
2832 skb2 = skb_copy(skb1, GFP_ATOMIC); in skb_cow_data()
2834 skb2 = skb_copy_expand(skb1, in skb_cow_data()
[all …]
Ddev.c1349 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC); in dev_queue_xmit_nit() local
1350 if (!skb2) in dev_queue_xmit_nit()
1357 skb_reset_mac_header(skb2); in dev_queue_xmit_nit()
1359 if (skb_network_header(skb2) < skb2->data || in dev_queue_xmit_nit()
1360 skb2->network_header > skb2->tail) { in dev_queue_xmit_nit()
1364 skb2->protocol, dev->name); in dev_queue_xmit_nit()
1365 skb_reset_network_header(skb2); in dev_queue_xmit_nit()
1368 skb2->transport_header = skb2->network_header; in dev_queue_xmit_nit()
1369 skb2->pkt_type = PACKET_OUTGOING; in dev_queue_xmit_nit()
1370 ptype->func(skb2, skb->dev, ptype, skb->dev); in dev_queue_xmit_nit()
/net/mac80211/
Dmain.c473 struct sk_buff *skb2; in ieee80211_tx_status() local
646 skb2 = skb_clone(skb, GFP_ATOMIC); in ieee80211_tx_status()
647 if (skb2) { in ieee80211_tx_status()
648 skb2->dev = prev_dev; in ieee80211_tx_status()
649 netif_rx(skb2); in ieee80211_tx_status()
Drx.c234 struct sk_buff *skb, *skb2; in ieee80211_rx_monitor() local
317 skb2 = skb_clone(skb, GFP_ATOMIC); in ieee80211_rx_monitor()
318 if (skb2) { in ieee80211_rx_monitor()
319 skb2->dev = prev_dev; in ieee80211_rx_monitor()
320 netif_rx(skb2); in ieee80211_rx_monitor()
1693 struct sk_buff *skb = rx->skb, *skb2; in ieee80211_rx_cooked_monitor() local
1736 skb2 = skb_clone(skb, GFP_ATOMIC); in ieee80211_rx_cooked_monitor()
1737 if (skb2) { in ieee80211_rx_cooked_monitor()
1738 skb2->dev = prev_dev; in ieee80211_rx_cooked_monitor()
1739 netif_rx(skb2); in ieee80211_rx_cooked_monitor()
/net/key/
Daf_key.c230 static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, in pfkey_broadcast_one() argument
236 if (*skb2 == NULL) { in pfkey_broadcast_one()
238 *skb2 = skb_clone(skb, allocation); in pfkey_broadcast_one()
240 *skb2 = skb; in pfkey_broadcast_one()
244 if (*skb2 != NULL) { in pfkey_broadcast_one()
246 skb_orphan(*skb2); in pfkey_broadcast_one()
247 skb_set_owner_r(*skb2, sk); in pfkey_broadcast_one()
248 skb_queue_tail(&sk->sk_receive_queue, *skb2); in pfkey_broadcast_one()
249 sk->sk_data_ready(sk, (*skb2)->len); in pfkey_broadcast_one()
250 *skb2 = NULL; in pfkey_broadcast_one()
[all …]
/net/llc/
Dllc_conn.c359 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); in llc_conn_send_pdus() local
362 if (!skb2) in llc_conn_send_pdus()
364 skb = skb2; in llc_conn_send_pdus()

12