Home
last modified time | relevance | path

Searched refs:nskb (Results 1 – 25 of 55) sorted by relevance

123

/net/ipv4/netfilter/
Dnf_reject_ipv4.c45 struct sk_buff *nskb; in nf_reject_skb_v4_tcp_reset() local
56 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + in nf_reject_skb_v4_tcp_reset()
58 if (!nskb) in nf_reject_skb_v4_tcp_reset()
61 nskb->dev = (struct net_device *)dev; in nf_reject_skb_v4_tcp_reset()
63 skb_reserve(nskb, LL_MAX_HEADER); in nf_reject_skb_v4_tcp_reset()
64 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, in nf_reject_skb_v4_tcp_reset()
66 nf_reject_ip_tcphdr_put(nskb, oldskb, oth); in nf_reject_skb_v4_tcp_reset()
67 niph->tot_len = htons(nskb->len); in nf_reject_skb_v4_tcp_reset()
70 return nskb; in nf_reject_skb_v4_tcp_reset()
79 struct sk_buff *nskb; in nf_reject_skb_v4_unreach() local
[all …]
/net/llc/
Dllc_c_ac.c202 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); in llc_conn_ac_send_disc_cmd_p_set_x() local
204 if (nskb) { in llc_conn_ac_send_disc_cmd_p_set_x()
207 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, in llc_conn_ac_send_disc_cmd_p_set_x()
209 llc_pdu_init_as_disc_cmd(nskb, 1); in llc_conn_ac_send_disc_cmd_p_set_x()
210 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); in llc_conn_ac_send_disc_cmd_p_set_x()
213 llc_conn_send_pdu(sk, nskb); in llc_conn_ac_send_disc_cmd_p_set_x()
219 kfree_skb(nskb); in llc_conn_ac_send_disc_cmd_p_set_x()
227 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); in llc_conn_ac_send_dm_rsp_f_set_p() local
229 if (nskb) { in llc_conn_ac_send_dm_rsp_f_set_p()
234 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, in llc_conn_ac_send_dm_rsp_f_set_p()
[all …]
Dllc_station.c52 struct sk_buff *nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, in llc_station_ac_send_xid_r() local
55 if (!nskb) in llc_station_ac_send_xid_r()
59 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP); in llc_station_ac_send_xid_r()
60 llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 127); in llc_station_ac_send_xid_r()
61 rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da); in llc_station_ac_send_xid_r()
64 dev_queue_xmit(nskb); in llc_station_ac_send_xid_r()
68 kfree_skb(nskb); in llc_station_ac_send_xid_r()
77 struct sk_buff *nskb; in llc_station_ac_send_test_r() local
84 nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size); in llc_station_ac_send_test_r()
86 if (!nskb) in llc_station_ac_send_test_r()
[all …]
Dllc_s_ac.c105 struct sk_buff *nskb; in llc_sap_action_send_xid_r() local
110 nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, in llc_sap_action_send_xid_r()
112 if (!nskb) in llc_sap_action_send_xid_r()
114 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap, in llc_sap_action_send_xid_r()
116 llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 0); in llc_sap_action_send_xid_r()
117 rc = llc_mac_hdr_init(nskb, mac_sa, mac_da); in llc_sap_action_send_xid_r()
119 rc = dev_queue_xmit(nskb); in llc_sap_action_send_xid_r()
152 struct sk_buff *nskb; in llc_sap_action_send_test_r() local
165 nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size); in llc_sap_action_send_test_r()
166 if (!nskb) in llc_sap_action_send_test_r()
[all …]
/net/ipv6/netfilter/
Dnf_reject_ipv6.c64 struct sk_buff *nskb; in nf_reject_skb_v6_tcp_reset() local
77 nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) + in nf_reject_skb_v6_tcp_reset()
79 if (!nskb) in nf_reject_skb_v6_tcp_reset()
82 nskb->dev = (struct net_device *)dev; in nf_reject_skb_v6_tcp_reset()
84 skb_reserve(nskb, LL_MAX_HEADER); in nf_reject_skb_v6_tcp_reset()
85 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, in nf_reject_skb_v6_tcp_reset()
87 nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen); in nf_reject_skb_v6_tcp_reset()
88 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr)); in nf_reject_skb_v6_tcp_reset()
90 return nskb; in nf_reject_skb_v6_tcp_reset()
99 struct sk_buff *nskb; in nf_reject_skb_v6_unreach() local
[all …]
/net/bridge/netfilter/
Dnft_reject_bridge.c24 struct sk_buff *nskb) in nft_reject_br_push_etherhdr() argument
28 eth = skb_push(nskb, ETH_HLEN); in nft_reject_br_push_etherhdr()
29 skb_reset_mac_header(nskb); in nft_reject_br_push_etherhdr()
33 skb_pull(nskb, ETH_HLEN); in nft_reject_br_push_etherhdr()
38 __vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid); in nft_reject_br_push_etherhdr()
50 struct sk_buff *nskb; in nft_reject_br_send_v4_tcp_reset() local
52 nskb = nf_reject_skb_v4_tcp_reset(net, oldskb, NULL, hook); in nft_reject_br_send_v4_tcp_reset()
53 if (!nskb) in nft_reject_br_send_v4_tcp_reset()
56 nft_reject_br_push_etherhdr(oldskb, nskb); in nft_reject_br_send_v4_tcp_reset()
58 br_forward(br_port_get_rcu(dev), nskb, false, true); in nft_reject_br_send_v4_tcp_reset()
[all …]
/net/netfilter/
Dnf_synproxy_core.c419 const struct sk_buff *skb, struct sk_buff *nskb, in synproxy_send_tcp() argument
425 nskb->ip_summed = CHECKSUM_PARTIAL; in synproxy_send_tcp()
426 nskb->csum_start = (unsigned char *)nth - nskb->head; in synproxy_send_tcp()
427 nskb->csum_offset = offsetof(struct tcphdr, check); in synproxy_send_tcp()
429 skb_dst_set_noref(nskb, skb_dst(skb)); in synproxy_send_tcp()
430 nskb->protocol = htons(ETH_P_IP); in synproxy_send_tcp()
431 if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC)) in synproxy_send_tcp()
435 nf_ct_set(nskb, (struct nf_conn *)nfct, ctinfo); in synproxy_send_tcp()
439 ip_local_out(net, nskb->sk, nskb); in synproxy_send_tcp()
443 kfree_skb(nskb); in synproxy_send_tcp()
[all …]
Dnft_reject_netdev.c19 static void nft_reject_queue_xmit(struct sk_buff *nskb, struct sk_buff *oldskb) in nft_reject_queue_xmit() argument
21 dev_hard_header(nskb, nskb->dev, ntohs(oldskb->protocol), in nft_reject_queue_xmit()
23 nskb->len); in nft_reject_queue_xmit()
24 dev_queue_xmit(nskb); in nft_reject_queue_xmit()
32 struct sk_buff *nskb; in nft_reject_netdev_send_v4_tcp_reset() local
34 nskb = nf_reject_skb_v4_tcp_reset(net, oldskb, dev, hook); in nft_reject_netdev_send_v4_tcp_reset()
35 if (!nskb) in nft_reject_netdev_send_v4_tcp_reset()
38 nft_reject_queue_xmit(nskb, oldskb); in nft_reject_netdev_send_v4_tcp_reset()
46 struct sk_buff *nskb; in nft_reject_netdev_send_v4_unreach() local
48 nskb = nf_reject_skb_v4_unreach(net, oldskb, dev, hook, code); in nft_reject_netdev_send_v4_unreach()
[all …]
/net/tls/
Dtls_device_fallback.c207 static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln) in complete_skb() argument
212 skb_copy_header(nskb, skb); in complete_skb()
214 skb_put(nskb, skb->len); in complete_skb()
215 memcpy(nskb->data, skb->data, headln); in complete_skb()
217 nskb->destructor = skb->destructor; in complete_skb()
218 nskb->sk = sk; in complete_skb()
222 update_chksum(nskb, headln); in complete_skb()
225 if (nskb->destructor == sock_efree) in complete_skb()
228 delta = nskb->truesize - skb->truesize; in complete_skb()
306 struct sk_buff *nskb, in fill_sg_out() argument
[all …]
Dtls_strp.c283 struct sk_buff *nskb, *first, *last; in tls_strp_copyin_skb() local
294 nskb = tls_strp_skb_copy(strp, in_skb, offset, chunk); in tls_strp_copyin_skb()
295 if (!nskb) in tls_strp_copyin_skb()
300 shinfo->frag_list = nskb; in tls_strp_copyin_skb()
301 nskb->prev = nskb; in tls_strp_copyin_skb()
305 last->next = nskb; in tls_strp_copyin_skb()
306 first->prev = nskb; in tls_strp_copyin_skb()
324 __pskb_trim(nskb, nskb->len - over); in tls_strp_copyin_skb()
/net/bluetooth/cmtp/
Dcore.c109 struct sk_buff *skb = session->reassembly[id], *nskb; in cmtp_add_msgpart() local
116 nskb = alloc_skb(size, GFP_ATOMIC); in cmtp_add_msgpart()
117 if (!nskb) { in cmtp_add_msgpart()
123 skb_copy_from_linear_data(skb, skb_put(nskb, skb->len), skb->len); in cmtp_add_msgpart()
125 skb_put_data(nskb, buf, count); in cmtp_add_msgpart()
127 session->reassembly[id] = nskb; in cmtp_add_msgpart()
211 struct sk_buff *skb, *nskb; in cmtp_process_transmit() local
217 nskb = alloc_skb(session->mtu, GFP_ATOMIC); in cmtp_process_transmit()
218 if (!nskb) { in cmtp_process_transmit()
226 tail = session->mtu - nskb->len; in cmtp_process_transmit()
[all …]
/net/bluetooth/bnep/
Dcore.c301 struct sk_buff *nskb; in bnep_rx_frame() local
366 nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); in bnep_rx_frame()
367 if (!nskb) { in bnep_rx_frame()
372 skb_reserve(nskb, 2); in bnep_rx_frame()
377 __skb_put_data(nskb, &s->eh, ETH_HLEN); in bnep_rx_frame()
381 __skb_put_data(nskb, s->eh.h_dest, ETH_ALEN); in bnep_rx_frame()
382 __skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN); in bnep_rx_frame()
383 put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); in bnep_rx_frame()
387 __skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN); in bnep_rx_frame()
388 __skb_put_data(nskb, s->eh.h_source, ETH_ALEN); in bnep_rx_frame()
[all …]
/net/dsa/
Dtag.c59 struct sk_buff *nskb = NULL; in dsa_switch_rcv() local
81 nskb = skb; in dsa_switch_rcv()
84 nskb = cpu_dp->rcv(skb, dev); in dsa_switch_rcv()
87 if (!nskb) { in dsa_switch_rcv()
92 skb = nskb; in dsa_switch_rcv()
109 nskb = dsa_untag_bridge_pvid(skb); in dsa_switch_rcv()
110 if (!nskb) { in dsa_switch_rcv()
114 skb = nskb; in dsa_switch_rcv()
Dtag_brcm.c191 struct sk_buff *nskb; in brcm_tag_rcv() local
194 nskb = brcm_tag_rcv_ll(skb, dev, 2); in brcm_tag_rcv()
195 if (!nskb) in brcm_tag_rcv()
196 return nskb; in brcm_tag_rcv()
200 return nskb; in brcm_tag_rcv()
/net/sched/
Dsch_etf.c75 static bool is_packet_valid(struct Qdisc *sch, struct sk_buff *nskb) in is_packet_valid() argument
78 ktime_t txtime = nskb->tstamp; in is_packet_valid()
79 struct sock *sk = nskb->sk; in is_packet_valid()
162 static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch, in etf_enqueue_timesortedlist() argument
167 ktime_t txtime = nskb->tstamp; in etf_enqueue_timesortedlist()
170 if (!is_packet_valid(sch, nskb)) { in etf_enqueue_timesortedlist()
171 report_sock_error(nskb, EINVAL, in etf_enqueue_timesortedlist()
173 return qdisc_drop(nskb, sch, to_free); in etf_enqueue_timesortedlist()
188 rb_link_node(&nskb->rbnode, parent, p); in etf_enqueue_timesortedlist()
189 rb_insert_color_cached(&nskb->rbnode, &q->head, leftmost); in etf_enqueue_timesortedlist()
[all …]
/net/core/
Dskbuff.c1767 static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, in skb_zerocopy_clone() argument
1771 if (skb_zcopy(nskb)) { in skb_zerocopy_clone()
1777 if (skb_uarg(nskb) == skb_uarg(orig)) in skb_zerocopy_clone()
1779 if (skb_copy_ubufs(nskb, GFP_ATOMIC)) in skb_zerocopy_clone()
1782 skb_zcopy_set(nskb, skb_uarg(orig), NULL); in skb_zerocopy_clone()
2267 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); in skb_expand_head() local
2269 if (unlikely(!nskb)) in skb_expand_head()
2273 skb_set_owner_w(nskb, sk); in skb_expand_head()
2275 skb = nskb; in skb_expand_head()
4352 struct sk_buff *nskb, *tmp; in skb_segment_list() local
[all …]
Ddatagram.c138 struct sk_buff *nskb; in skb_set_peeked() local
147 nskb = skb_clone(skb, GFP_ATOMIC); in skb_set_peeked()
148 if (!nskb) in skb_set_peeked()
151 skb->prev->next = nskb; in skb_set_peeked()
152 skb->next->prev = nskb; in skb_set_peeked()
153 nskb->prev = skb->prev; in skb_set_peeked()
154 nskb->next = skb->next; in skb_set_peeked()
157 skb = nskb; in skb_set_peeked()
Ddrop_monitor.c499 struct sk_buff *nskb; in net_dm_packet_trace_kfree_skb_hit() local
505 nskb = skb_clone(skb, GFP_ATOMIC); in net_dm_packet_trace_kfree_skb_hit()
506 if (!nskb) in net_dm_packet_trace_kfree_skb_hit()
509 cb = NET_DM_SKB_CB(nskb); in net_dm_packet_trace_kfree_skb_hit()
515 nskb->tstamp = tstamp; in net_dm_packet_trace_kfree_skb_hit()
521 __skb_queue_tail(&data->drop_queue, nskb); in net_dm_packet_trace_kfree_skb_hit()
535 consume_skb(nskb); in net_dm_packet_trace_kfree_skb_hit()
968 struct sk_buff *nskb; in net_dm_hw_trap_packet_probe() local
977 nskb = skb_clone(skb, GFP_ATOMIC); in net_dm_hw_trap_packet_probe()
978 if (!nskb) in net_dm_hw_trap_packet_probe()
[all …]
/net/vmw_vsock/
Daf_vsock_tap.c64 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); in __vsock_deliver_tap_skb() local
66 if (nskb) { in __vsock_deliver_tap_skb()
69 nskb->dev = dev; in __vsock_deliver_tap_skb()
70 ret = dev_queue_xmit(nskb); in __vsock_deliver_tap_skb()
/net/can/
Disotp.c215 struct sk_buff *nskb; in isotp_send_fc() local
220 nskb = alloc_skb(so->ll.mtu + sizeof(struct can_skb_priv), gfp_any()); in isotp_send_fc()
221 if (!nskb) in isotp_send_fc()
226 kfree_skb(nskb); in isotp_send_fc()
230 can_skb_reserve(nskb); in isotp_send_fc()
231 can_skb_prv(nskb)->ifindex = dev->ifindex; in isotp_send_fc()
232 can_skb_prv(nskb)->skbcnt = 0; in isotp_send_fc()
234 nskb->dev = dev; in isotp_send_fc()
235 can_skb_set_owner(nskb, sk); in isotp_send_fc()
236 ncf = (struct canfd_frame *)nskb->data; in isotp_send_fc()
[all …]
/net/x25/
Dx25_dev.c97 struct sk_buff *nskb; in x25_lapb_receive_frame() local
103 nskb = skb_copy(skb, GFP_ATOMIC); in x25_lapb_receive_frame()
104 if (!nskb) in x25_lapb_receive_frame()
107 skb = nskb; in x25_lapb_receive_frame()
/net/mac802154/
Dtx.c60 struct sk_buff *nskb; in ieee802154_tx() local
64 nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN, in ieee802154_tx()
66 if (likely(nskb)) { in ieee802154_tx()
68 skb = nskb; in ieee802154_tx()
/net/sctp/
Doutput.c444 struct sk_buff *nskb; in sctp_packet_pack() local
451 nskb = head; in sctp_packet_pack()
472 nskb = alloc_skb(pkt_size + MAX_HEADER, gfp); in sctp_packet_pack()
473 if (!nskb) in sctp_packet_pack()
475 skb_reserve(nskb, packet->overhead + MAX_HEADER); in sctp_packet_pack()
498 skb_tail_pointer(nskb); in sctp_packet_pack()
500 skb_put_data(nskb, chunk->skb->data, chunk->skb->len); in sctp_packet_pack()
520 sctp_auth_calculate_hmac(tp->asoc, nskb, auth, in sctp_packet_pack()
531 sctp_packet_gso_append(head, nskb); in sctp_packet_pack()
/net/ipv4/
Dtcp_output.c2398 struct sk_buff *skb, *nskb, *next; in tcp_mtu_probe() local
2461 nskb = tcp_stream_alloc_skb(sk, GFP_ATOMIC, false); in tcp_mtu_probe()
2462 if (!nskb) in tcp_mtu_probe()
2466 if (tcp_clone_payload(sk, nskb, probe_size)) { in tcp_mtu_probe()
2467 tcp_skb_tsorted_anchor_cleanup(nskb); in tcp_mtu_probe()
2468 consume_skb(nskb); in tcp_mtu_probe()
2471 sk_wmem_queued_add(sk, nskb->truesize); in tcp_mtu_probe()
2472 sk_mem_charge(sk, nskb->truesize); in tcp_mtu_probe()
2475 skb_copy_decrypted(nskb, skb); in tcp_mtu_probe()
2476 mptcp_skb_ext_copy(nskb, skb); in tcp_mtu_probe()
[all …]
/net/nfc/
Drawsock.c362 struct sk_buff *skb_copy = NULL, *nskb; in nfc_send_to_raw_sock() local
382 nskb = skb_clone(skb_copy, GFP_ATOMIC); in nfc_send_to_raw_sock()
383 if (!nskb) in nfc_send_to_raw_sock()
386 if (sock_queue_rcv_skb(sk, nskb)) in nfc_send_to_raw_sock()
387 kfree_skb(nskb); in nfc_send_to_raw_sock()

123