Home
last modified time | relevance | path

Searched refs:nskb (Results 1 – 25 of 31) sorted by relevance

12

/net/llc/
Dllc_c_ac.c202 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); in llc_conn_ac_send_disc_cmd_p_set_x() local
204 if (nskb) { in llc_conn_ac_send_disc_cmd_p_set_x()
207 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, in llc_conn_ac_send_disc_cmd_p_set_x()
209 llc_pdu_init_as_disc_cmd(nskb, 1); in llc_conn_ac_send_disc_cmd_p_set_x()
210 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); in llc_conn_ac_send_disc_cmd_p_set_x()
213 llc_conn_send_pdu(sk, nskb); in llc_conn_ac_send_disc_cmd_p_set_x()
219 kfree_skb(nskb); in llc_conn_ac_send_disc_cmd_p_set_x()
227 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); in llc_conn_ac_send_dm_rsp_f_set_p() local
229 if (nskb) { in llc_conn_ac_send_dm_rsp_f_set_p()
234 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, in llc_conn_ac_send_dm_rsp_f_set_p()
[all …]
Dllc_station.c52 struct sk_buff *nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, in llc_station_ac_send_xid_r() local
55 if (!nskb) in llc_station_ac_send_xid_r()
60 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP); in llc_station_ac_send_xid_r()
61 llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 127); in llc_station_ac_send_xid_r()
62 rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da); in llc_station_ac_send_xid_r()
65 dev_queue_xmit(nskb); in llc_station_ac_send_xid_r()
69 kfree_skb(nskb); in llc_station_ac_send_xid_r()
78 struct sk_buff *nskb; in llc_station_ac_send_test_r() local
82 nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size); in llc_station_ac_send_test_r()
84 if (!nskb) in llc_station_ac_send_test_r()
[all …]
Dllc_s_ac.c101 struct sk_buff *nskb; in llc_sap_action_send_xid_r() local
106 nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, in llc_sap_action_send_xid_r()
108 if (!nskb) in llc_sap_action_send_xid_r()
110 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap, in llc_sap_action_send_xid_r()
112 llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 0); in llc_sap_action_send_xid_r()
113 rc = llc_mac_hdr_init(nskb, mac_sa, mac_da); in llc_sap_action_send_xid_r()
115 rc = dev_queue_xmit(nskb); in llc_sap_action_send_xid_r()
146 struct sk_buff *nskb; in llc_sap_action_send_test_r() local
156 nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size); in llc_sap_action_send_test_r()
157 if (!nskb) in llc_sap_action_send_test_r()
[all …]
/net/ipv4/netfilter/
Dipt_REJECT.c38 struct sk_buff *nskb; in send_reset() local
65 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + in send_reset()
67 if (!nskb) in send_reset()
70 skb_reserve(nskb, LL_MAX_HEADER); in send_reset()
72 skb_reset_network_header(nskb); in send_reset()
73 niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); in send_reset()
84 skb_reset_transport_header(nskb); in send_reset()
85 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); in send_reset()
103 nskb->ip_summed = CHECKSUM_PARTIAL; in send_reset()
104 nskb->csum_start = (unsigned char *)tcph - nskb->head; in send_reset()
[all …]
/net/dsa/
Dtag_trailer.c20 struct sk_buff *nskb; in trailer_xmit() local
37 nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC); in trailer_xmit()
38 if (nskb == NULL) { in trailer_xmit()
42 skb_reserve(nskb, NET_IP_ALIGN); in trailer_xmit()
44 skb_reset_mac_header(nskb); in trailer_xmit()
45 skb_set_network_header(nskb, skb_network_header(skb) - skb->head); in trailer_xmit()
46 skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head); in trailer_xmit()
47 skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len)); in trailer_xmit()
51 u8 *pad = skb_put(nskb, padlen); in trailer_xmit()
55 trailer = skb_put(nskb, 4); in trailer_xmit()
[all …]
/net/bluetooth/cmtp/
Dcore.c108 struct sk_buff *skb = session->reassembly[id], *nskb; in cmtp_add_msgpart() local
115 nskb = alloc_skb(size, GFP_ATOMIC); in cmtp_add_msgpart()
116 if (!nskb) { in cmtp_add_msgpart()
122 skb_copy_from_linear_data(skb, skb_put(nskb, skb->len), skb->len); in cmtp_add_msgpart()
124 memcpy(skb_put(nskb, count), buf, count); in cmtp_add_msgpart()
126 session->reassembly[id] = nskb; in cmtp_add_msgpart()
211 struct sk_buff *skb, *nskb; in cmtp_process_transmit() local
217 nskb = alloc_skb(session->mtu, GFP_ATOMIC); in cmtp_process_transmit()
218 if (!nskb) { in cmtp_process_transmit()
226 tail = session->mtu - nskb->len; in cmtp_process_transmit()
[all …]
/net/ipv6/netfilter/
Dip6t_REJECT.c44 struct sk_buff *nskb; in send_reset() local
114 nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) in send_reset()
118 if (!nskb) { in send_reset()
124 skb_dst_set(nskb, dst); in send_reset()
126 skb_reserve(nskb, hh_len + dst->header_len); in send_reset()
128 skb_put(nskb, sizeof(struct ipv6hdr)); in send_reset()
129 skb_reset_network_header(nskb); in send_reset()
130 ip6h = ipv6_hdr(nskb); in send_reset()
137 skb_reset_transport_header(nskb); in send_reset()
138 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); in send_reset()
[all …]
/net/sctp/
Doutput.c395 struct sk_buff *nskb; in sctp_packet_transmit() local
416 nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC); in sctp_packet_transmit()
417 if (!nskb) in sctp_packet_transmit()
421 skb_reserve(nskb, packet->overhead + LL_MAX_HEADER); in sctp_packet_transmit()
426 sctp_packet_set_owner_w(nskb, sk); in sctp_packet_transmit()
435 skb_dst_set(nskb, dst); in sctp_packet_transmit()
440 sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr)); in sctp_packet_transmit()
441 skb_reset_transport_header(nskb); in sctp_packet_transmit()
502 auth = skb_tail_pointer(nskb); in sctp_packet_transmit()
505 memcpy(skb_put(nskb, chunk->skb->len), in sctp_packet_transmit()
[all …]
/net/bluetooth/bnep/
Dcore.c293 struct sk_buff *nskb; in bnep_rx_frame() local
332 nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); in bnep_rx_frame()
333 if (!nskb) { in bnep_rx_frame()
338 skb_reserve(nskb, 2); in bnep_rx_frame()
343 memcpy(__skb_put(nskb, ETH_HLEN), &s->eh, ETH_HLEN); in bnep_rx_frame()
347 memcpy(__skb_put(nskb, ETH_ALEN), s->eh.h_dest, ETH_ALEN); in bnep_rx_frame()
348 memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb), ETH_ALEN); in bnep_rx_frame()
349 put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); in bnep_rx_frame()
353 memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb), in bnep_rx_frame()
355 memcpy(__skb_put(nskb, ETH_ALEN + 2), s->eh.h_source, in bnep_rx_frame()
[all …]
/net/core/
Dskbuff.c2791 struct sk_buff *nskb; in skb_segment() local
2810 nskb = skb_clone(fskb, GFP_ATOMIC); in skb_segment()
2813 if (unlikely(!nskb)) in skb_segment()
2816 hsize = skb_end_offset(nskb); in skb_segment()
2817 if (skb_cow_head(nskb, doffset + headroom)) { in skb_segment()
2818 kfree_skb(nskb); in skb_segment()
2822 nskb->truesize += skb_end_offset(nskb) - hsize; in skb_segment()
2823 skb_release_head_state(nskb); in skb_segment()
2824 __skb_push(nskb, doffset); in skb_segment()
2826 nskb = __alloc_skb(hsize + doffset + headroom, in skb_segment()
[all …]
Ddev.c2416 struct sk_buff *nskb = skb->next; in dev_gso_skb_destructor() local
2418 skb->next = nskb->next; in dev_gso_skb_destructor()
2419 nskb->next = NULL; in dev_gso_skb_destructor()
2420 kfree_skb(nskb); in dev_gso_skb_destructor()
2589 struct sk_buff *nskb = skb->next; in dev_hard_start_xmit() local
2591 skb->next = nskb->next; in dev_hard_start_xmit()
2592 nskb->next = NULL; in dev_hard_start_xmit()
2595 dev_queue_xmit_nit(nskb, dev); in dev_hard_start_xmit()
2597 skb_len = nskb->len; in dev_hard_start_xmit()
2598 rc = ops->ndo_start_xmit(nskb, dev); in dev_hard_start_xmit()
[all …]
/net/bluetooth/
Dhci_sock.c81 struct sk_buff *nskb; in hci_send_to_sock() local
127 nskb = skb_clone(skb_copy, GFP_ATOMIC); in hci_send_to_sock()
128 if (!nskb) in hci_send_to_sock()
131 if (sock_queue_rcv_skb(sk, nskb)) in hci_send_to_sock()
132 kfree_skb(nskb); in hci_send_to_sock()
150 struct sk_buff *nskb; in hci_send_to_control() local
162 nskb = skb_clone(skb, GFP_ATOMIC); in hci_send_to_control()
163 if (!nskb) in hci_send_to_control()
166 if (sock_queue_rcv_skb(sk, nskb)) in hci_send_to_control()
167 kfree_skb(nskb); in hci_send_to_control()
[all …]
/net/x25/
Dx25_dev.c100 struct sk_buff *nskb; in x25_lapb_receive_frame() local
106 nskb = skb_copy(skb, GFP_ATOMIC); in x25_lapb_receive_frame()
107 if (!nskb) in x25_lapb_receive_frame()
110 skb = nskb; in x25_lapb_receive_frame()
/net/ipv4/
Dtcp_output.c1678 struct sk_buff *skb, *nskb, *next; in tcp_mtu_probe() local
1723 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) in tcp_mtu_probe()
1725 sk->sk_wmem_queued += nskb->truesize; in tcp_mtu_probe()
1726 sk_mem_charge(sk, nskb->truesize); in tcp_mtu_probe()
1730 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; in tcp_mtu_probe()
1731 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; in tcp_mtu_probe()
1732 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; in tcp_mtu_probe()
1733 TCP_SKB_CB(nskb)->sacked = 0; in tcp_mtu_probe()
1734 nskb->csum = 0; in tcp_mtu_probe()
1735 nskb->ip_summed = skb->ip_summed; in tcp_mtu_probe()
[all …]
Dip_output.c1468 struct sk_buff *nskb; in ip_send_unicast_reply() local
1511 nskb = skb_peek(&sk->sk_write_queue); in ip_send_unicast_reply()
1512 if (nskb) { in ip_send_unicast_reply()
1514 *((__sum16 *)skb_transport_header(nskb) + in ip_send_unicast_reply()
1515 arg->csumoffset) = csum_fold(csum_add(nskb->csum, in ip_send_unicast_reply()
1517 nskb->ip_summed = CHECKSUM_NONE; in ip_send_unicast_reply()
1518 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); in ip_send_unicast_reply()
Dtcp_input.c4499 struct sk_buff *nskb; in tcp_collapse() local
4508 nskb = alloc_skb(copy + header, GFP_ATOMIC); in tcp_collapse()
4509 if (!nskb) in tcp_collapse()
4512 skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); in tcp_collapse()
4513 skb_set_network_header(nskb, (skb_network_header(skb) - in tcp_collapse()
4515 skb_set_transport_header(nskb, (skb_transport_header(skb) - in tcp_collapse()
4517 skb_reserve(nskb, header); in tcp_collapse()
4518 memcpy(nskb->head, skb->head, header); in tcp_collapse()
4519 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); in tcp_collapse()
4520 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; in tcp_collapse()
[all …]
/net/xfrm/
Dxfrm_output.c166 struct sk_buff *nskb = segs->next; in xfrm_output_gso() local
173 while ((segs = nskb)) { in xfrm_output_gso()
174 nskb = segs->next; in xfrm_output_gso()
181 segs = nskb; in xfrm_output_gso()
/net/iucv/
Daf_iucv.c321 struct sk_buff *nskb; in afiucv_hs_send() local
367 nskb = skb_clone(skb, GFP_ATOMIC); in afiucv_hs_send()
368 if (!nskb) in afiucv_hs_send()
370 skb_queue_tail(&iucv->send_skb_q, nskb); in afiucv_hs_send()
373 skb_unlink(nskb, &iucv->send_skb_q); in afiucv_hs_send()
374 kfree_skb(nskb); in afiucv_hs_send()
1207 struct sk_buff *nskb; in iucv_fragment_skb() local
1216 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); in iucv_fragment_skb()
1217 if (!nskb) in iucv_fragment_skb()
1221 IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class; in iucv_fragment_skb()
[all …]
/net/netfilter/
Dnfnetlink_queue_core.c500 struct sk_buff *nskb; in __nfqnl_enqueue_packet() local
505 nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr); in __nfqnl_enqueue_packet()
506 if (nskb == NULL) { in __nfqnl_enqueue_packet()
531 err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); in __nfqnl_enqueue_packet()
543 kfree_skb(nskb); in __nfqnl_enqueue_packet()
665 struct sk_buff *nskb = segs->next; in nfqnl_enqueue_packet() local
673 segs = nskb; in nfqnl_enqueue_packet()
690 struct sk_buff *nskb; in nfqnl_mangle() local
699 nskb = skb_copy_expand(e->skb, skb_headroom(e->skb), in nfqnl_mangle()
701 if (!nskb) { in nfqnl_mangle()
[all …]
/net/can/
Dgw.c352 struct sk_buff *nskb; in can_can_gw_rcv() local
394 nskb = skb_copy(skb, GFP_ATOMIC); in can_can_gw_rcv()
396 nskb = skb_clone(skb, GFP_ATOMIC); in can_can_gw_rcv()
398 if (!nskb) { in can_can_gw_rcv()
404 cgw_hops(nskb) = cgw_hops(skb) + 1; in can_can_gw_rcv()
405 nskb->dev = gwj->dst.dev; in can_can_gw_rcv()
408 cf = (struct can_frame *)nskb->data; in can_can_gw_rcv()
425 nskb->tstamp.tv64 = 0; in can_can_gw_rcv()
428 if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO)) in can_can_gw_rcv()
/net/openvswitch/
Ddatapath.c322 struct sk_buff *segs, *nskb; in queue_gso_packets() local
353 nskb = skb->next; in queue_gso_packets()
358 } while ((skb = nskb)); in queue_gso_packets()
396 struct sk_buff *nskb = NULL; in queue_userspace_packet() local
402 nskb = skb_clone(skb, GFP_ATOMIC); in queue_userspace_packet()
403 if (!nskb) in queue_userspace_packet()
406 nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb)); in queue_userspace_packet()
407 if (!nskb) in queue_userspace_packet()
410 nskb->vlan_tci = 0; in queue_userspace_packet()
411 skb = nskb; in queue_userspace_packet()
[all …]
/net/sched/
Dsch_choke.c250 struct sk_buff *nskb, in choke_match_random() argument
260 return choke_get_classid(nskb) == choke_get_classid(oskb); in choke_match_random()
262 return choke_match_flow(oskb, nskb); in choke_match_random()
Dsch_netem.c334 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) in tfifo_enqueue() argument
337 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; in tfifo_enqueue()
342 return __skb_queue_tail(list, nskb); in tfifo_enqueue()
349 __skb_queue_after(list, skb, nskb); in tfifo_enqueue()
/net/nfc/
Dllcp_core.c672 struct sk_buff *skb_copy = NULL, *nskb; in nfc_llcp_send_to_raw_sock() local
695 nskb = skb_clone(skb_copy, GFP_ATOMIC); in nfc_llcp_send_to_raw_sock()
696 if (!nskb) in nfc_llcp_send_to_raw_sock()
699 if (sock_queue_rcv_skb(sk, nskb)) in nfc_llcp_send_to_raw_sock()
700 kfree_skb(nskb); in nfc_llcp_send_to_raw_sock()
/net/mac80211/
Drx.c2642 struct sk_buff *nskb; in ieee80211_rx_h_action_return() local
2671 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, in ieee80211_rx_h_action_return()
2673 if (nskb) { in ieee80211_rx_h_action_return()
2674 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; in ieee80211_rx_h_action_return()
2680 memset(nskb->cb, 0, sizeof(nskb->cb)); in ieee80211_rx_h_action_return()
2683 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); in ieee80211_rx_h_action_return()
2693 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, in ieee80211_rx_h_action_return()

12