Home
last modified time | relevance | path

Searched refs:skb (Results 1 – 25 of 781) sorted by relevance

12345678910>>...32

/net/bridge/
Dbr_netfilter.c45 #define skb_origaddr(skb) (((struct bridge_skb_cb *) \ argument
46 (skb->nf_bridge->data))->daddr.ipv4)
47 #define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr) argument
48 #define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr) argument
67 #define IS_IP(skb) \ argument
68 (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
70 #define IS_IPV6(skb) \ argument
71 (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
73 #define IS_ARP(skb) \ argument
74 (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
[all …]
Dbr_forward.c25 struct sk_buff *skb,
27 struct sk_buff *skb));
31 const struct sk_buff *skb) in should_deliver() argument
33 return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && in should_deliver()
34 br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) && in should_deliver()
38 static inline unsigned int packet_length(const struct sk_buff *skb) in packet_length() argument
40 return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0); in packet_length()
43 int br_dev_queue_push_xmit(struct sk_buff *skb) in br_dev_queue_push_xmit() argument
46 if (nf_bridge_maybe_copy_header(skb) || in br_dev_queue_push_xmit()
47 (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))) { in br_dev_queue_push_xmit()
[all …]
Dbr_input.c27 static int br_pass_frame_up(struct sk_buff *skb) in br_pass_frame_up() argument
29 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; in br_pass_frame_up()
35 brstats->rx_bytes += skb->len; in br_pass_frame_up()
43 !br_allowed_egress(br, br_get_vlan_info(br), skb)) { in br_pass_frame_up()
44 kfree_skb(skb); in br_pass_frame_up()
48 skb = br_handle_vlan(br, br_get_vlan_info(br), skb); in br_pass_frame_up()
49 if (!skb) in br_pass_frame_up()
52 indev = skb->dev; in br_pass_frame_up()
53 skb->dev = brdev; in br_pass_frame_up()
55 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, in br_pass_frame_up()
[all …]
/net/openvswitch/
Dactions.c38 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
41 static int make_writable(struct sk_buff *skb, int write_len) in make_writable() argument
43 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) in make_writable()
46 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in make_writable()
50 static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) in __pop_vlan_tci() argument
55 err = make_writable(skb, VLAN_ETH_HLEN); in __pop_vlan_tci()
59 if (skb->ip_summed == CHECKSUM_COMPLETE) in __pop_vlan_tci()
60 skb->csum = csum_sub(skb->csum, csum_partial(skb->data in __pop_vlan_tci()
63 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); in __pop_vlan_tci()
66 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); in __pop_vlan_tci()
[all …]
/net/ipv6/
Dexthdrs.c62 bool (*func)(struct sk_buff *skb, int offset);
71 static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff) in ip6_tlvopt_unknown() argument
73 switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { in ip6_tlvopt_unknown()
84 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) in ip6_tlvopt_unknown()
87 icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff); in ip6_tlvopt_unknown()
91 kfree_skb(skb); in ip6_tlvopt_unknown()
97 static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb) in ip6_parse_tlv() argument
100 const unsigned char *nh = skb_network_header(skb); in ip6_parse_tlv()
101 int off = skb_network_header_len(skb); in ip6_parse_tlv()
102 int len = (skb_transport_header(skb)[1] + 1) << 3; in ip6_parse_tlv()
[all …]
Dxfrm6_output.c23 int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, in xfrm6_find_1stfragopt() argument
26 return ip6_find_1stfragopt(skb, prevhdr); in xfrm6_find_1stfragopt()
31 static int xfrm6_local_dontfrag(struct sk_buff *skb) in xfrm6_local_dontfrag() argument
34 struct sock *sk = skb->sk; in xfrm6_local_dontfrag()
46 static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu) in xfrm6_local_rxpmtu() argument
49 struct sock *sk = skb->sk; in xfrm6_local_rxpmtu()
52 fl6.daddr = ipv6_hdr(skb)->daddr; in xfrm6_local_rxpmtu()
57 static void xfrm6_local_error(struct sk_buff *skb, u32 mtu) in xfrm6_local_error() argument
60 struct sock *sk = skb->sk; in xfrm6_local_error()
63 fl6.daddr = ipv6_hdr(skb)->daddr; in xfrm6_local_error()
[all …]
Dip6_input.c50 int ip6_rcv_finish(struct sk_buff *skb) in ip6_rcv_finish() argument
52 if (sysctl_ip_early_demux && !skb_dst(skb)) { in ip6_rcv_finish()
55 ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]); in ip6_rcv_finish()
57 ipprot->early_demux(skb); in ip6_rcv_finish()
59 if (!skb_dst(skb)) in ip6_rcv_finish()
60 ip6_route_input(skb); in ip6_rcv_finish()
62 return dst_input(skb); in ip6_rcv_finish()
65 int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device… in ipv6_rcv() argument
70 struct net *net = dev_net(skb->dev); in ipv6_rcv()
72 if (skb->pkt_type == PACKET_OTHERHOST) { in ipv6_rcv()
[all …]
Dip6_output.c59 int __ip6_local_out(struct sk_buff *skb) in __ip6_local_out() argument
63 len = skb->len - sizeof(struct ipv6hdr); in __ip6_local_out()
66 ipv6_hdr(skb)->payload_len = htons(len); in __ip6_local_out()
68 return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, in __ip6_local_out()
69 skb_dst(skb)->dev, dst_output); in __ip6_local_out()
72 int ip6_local_out(struct sk_buff *skb) in ip6_local_out() argument
76 err = __ip6_local_out(skb); in ip6_local_out()
78 err = dst_output(skb); in ip6_local_out()
84 static int ip6_finish_output2(struct sk_buff *skb) in ip6_finish_output2() argument
86 struct dst_entry *dst = skb_dst(skb); in ip6_finish_output2()
[all …]
Dip6_offload.c22 static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) in ipv6_gso_pull_exthdrs() argument
40 if (unlikely(!pskb_may_pull(skb, 8))) in ipv6_gso_pull_exthdrs()
43 opth = (void *)skb->data; in ipv6_gso_pull_exthdrs()
46 if (unlikely(!pskb_may_pull(skb, len))) in ipv6_gso_pull_exthdrs()
50 __skb_pull(skb, len); in ipv6_gso_pull_exthdrs()
56 static int ipv6_gso_send_check(struct sk_buff *skb) in ipv6_gso_send_check() argument
62 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) in ipv6_gso_send_check()
65 ipv6h = ipv6_hdr(skb); in ipv6_gso_send_check()
66 __skb_pull(skb, sizeof(*ipv6h)); in ipv6_gso_send_check()
71 ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]); in ipv6_gso_send_check()
[all …]
Dudp_offload.c19 static int udp6_ufo_send_check(struct sk_buff *skb) in udp6_ufo_send_check() argument
25 if (skb->encapsulation) in udp6_ufo_send_check()
28 if (!pskb_may_pull(skb, sizeof(*uh))) in udp6_ufo_send_check()
31 ipv6h = ipv6_hdr(skb); in udp6_ufo_send_check()
32 uh = udp_hdr(skb); in udp6_ufo_send_check()
34 uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, in udp6_ufo_send_check()
36 skb->csum_start = skb_transport_header(skb) - skb->head; in udp6_ufo_send_check()
37 skb->csum_offset = offsetof(struct udphdr, check); in udp6_ufo_send_check()
38 skb->ip_summed = CHECKSUM_PARTIAL; in udp6_ufo_send_check()
42 static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, in udp6_ufo_fragment() argument
[all …]
/net/core/
Dskbuff.c119 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, in skb_panic() argument
123 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
124 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
125 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
129 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_over_panic() argument
131 skb_panic(skb, sz, addr, __func__); in skb_over_panic()
134 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_under_panic() argument
136 skb_panic(skb, sz, addr, __func__); in skb_under_panic()
184 struct sk_buff *skb; in __alloc_skb_head() local
187 skb = kmem_cache_alloc_node(skbuff_head_cache, in __alloc_skb_head()
[all …]
/net/lapb/
Dlapb_subr.c53 struct sk_buff *skb; in lapb_frames_acked() local
63 skb = skb_dequeue(&lapb->ack_queue); in lapb_frames_acked()
64 kfree_skb(skb); in lapb_frames_acked()
71 struct sk_buff *skb, *skb_prev = NULL; in lapb_requeue_frames() local
78 while ((skb = skb_dequeue(&lapb->ack_queue)) != NULL) { in lapb_requeue_frames()
80 skb_queue_head(&lapb->write_queue, skb); in lapb_requeue_frames()
82 skb_append(skb_prev, skb, &lapb->write_queue); in lapb_requeue_frames()
83 skb_prev = skb; in lapb_requeue_frames()
111 int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb, in lapb_decode() argument
118 skb->data[0], skb->data[1], skb->data[2]); in lapb_decode()
[all …]
/net/ipv4/
Dip_input.c152 bool ip_call_ra_chain(struct sk_buff *skb) in ip_call_ra_chain() argument
155 u8 protocol = ip_hdr(skb)->protocol; in ip_call_ra_chain()
157 struct net_device *dev = skb->dev; in ip_call_ra_chain()
169 if (ip_is_fragment(ip_hdr(skb))) { in ip_call_ra_chain()
170 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) in ip_call_ra_chain()
174 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); in ip_call_ra_chain()
183 raw_rcv(last, skb); in ip_call_ra_chain()
189 static int ip_local_deliver_finish(struct sk_buff *skb) in ip_local_deliver_finish() argument
191 struct net *net = dev_net(skb->dev); in ip_local_deliver_finish()
193 __skb_pull(skb, ip_hdrlen(skb)); in ip_local_deliver_finish()
[all …]
Dip_output.c94 int __ip_local_out(struct sk_buff *skb) in __ip_local_out() argument
96 struct iphdr *iph = ip_hdr(skb); in __ip_local_out()
98 iph->tot_len = htons(skb->len); in __ip_local_out()
100 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, in __ip_local_out()
101 skb_dst(skb)->dev, dst_output); in __ip_local_out()
104 int ip_local_out(struct sk_buff *skb) in ip_local_out() argument
108 err = __ip_local_out(skb); in ip_local_out()
110 err = dst_output(skb); in ip_local_out()
129 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, in ip_build_and_send_pkt() argument
133 struct rtable *rt = skb_rtable(skb); in ip_build_and_send_pkt()
[all …]
Dtcp_output.c72 static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) in tcp_event_new_data_sent() argument
78 tcp_advance_send_head(sk, skb); in tcp_event_new_data_sent()
79 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_event_new_data_sent()
81 tp->packets_out += tcp_skb_pcount(skb); in tcp_event_new_data_sent()
299 static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb) in TCP_ECN_send_synack() argument
301 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; in TCP_ECN_send_synack()
303 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; in TCP_ECN_send_synack()
307 static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) in TCP_ECN_send_syn() argument
313 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; in TCP_ECN_send_syn()
328 static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, in TCP_ECN_send() argument
[all …]
Dnetfilter.c20 int ip_route_me_harder(struct sk_buff *skb, unsigned int addr_type) in ip_route_me_harder() argument
22 struct net *net = dev_net(skb_dst(skb)->dev); in ip_route_me_harder()
23 const struct iphdr *iph = ip_hdr(skb); in ip_route_me_harder()
27 __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; in ip_route_me_harder()
43 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; in ip_route_me_harder()
44 fl4.flowi4_mark = skb->mark; in ip_route_me_harder()
51 skb_dst_drop(skb); in ip_route_me_harder()
52 skb_dst_set(skb, &rt->dst); in ip_route_me_harder()
54 if (skb_dst(skb)->error) in ip_route_me_harder()
55 return skb_dst(skb)->error; in ip_route_me_harder()
[all …]
Dxfrm4_output.c21 static int xfrm4_tunnel_check_size(struct sk_buff *skb) in xfrm4_tunnel_check_size() argument
26 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) in xfrm4_tunnel_check_size()
29 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) in xfrm4_tunnel_check_size()
32 dst = skb_dst(skb); in xfrm4_tunnel_check_size()
34 if (skb->len > mtu) { in xfrm4_tunnel_check_size()
35 if (skb->sk) in xfrm4_tunnel_check_size()
36 ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr, in xfrm4_tunnel_check_size()
37 inet_sk(skb->sk)->inet_dport, mtu); in xfrm4_tunnel_check_size()
39 icmp_send(skb, ICMP_DEST_UNREACH, in xfrm4_tunnel_check_size()
47 int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb) in xfrm4_extract_output() argument
[all …]
Dip_forward.c42 static int ip_forward_finish(struct sk_buff *skb) in ip_forward_finish() argument
44 struct ip_options *opt = &(IPCB(skb)->opt); in ip_forward_finish()
46 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); in ip_forward_finish()
47 IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len); in ip_forward_finish()
50 ip_forward_options(skb); in ip_forward_finish()
52 return dst_output(skb); in ip_forward_finish()
55 int ip_forward(struct sk_buff *skb) in ip_forward() argument
59 struct ip_options *opt = &(IPCB(skb)->opt); in ip_forward()
61 if (skb_warn_if_lro(skb)) in ip_forward()
64 if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb)) in ip_forward()
[all …]
/net/ax25/
Dax25_in.c38 static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb) in ax25_rx_fragment() argument
43 if (!(*skb->data & AX25_SEG_FIRST)) { in ax25_rx_fragment()
44 if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) { in ax25_rx_fragment()
46 ax25->fragno = *skb->data & AX25_SEG_REM; in ax25_rx_fragment()
47 skb_pull(skb, 1); /* skip fragno */ in ax25_rx_fragment()
48 ax25->fraglen += skb->len; in ax25_rx_fragment()
49 skb_queue_tail(&ax25->frag_queue, skb); in ax25_rx_fragment()
86 if (*skb->data & AX25_SEG_FIRST) { in ax25_rx_fragment()
88 ax25->fragno = *skb->data & AX25_SEG_REM; in ax25_rx_fragment()
89 skb_pull(skb, 1); /* skip fragno */ in ax25_rx_fragment()
[all …]
/net/netfilter/ipvs/
Dip_vs_xmit.c107 __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu) in __mtu_check_toobig_v6() argument
109 if (IP6CB(skb)->frag_max_size) { in __mtu_check_toobig_v6()
113 if (IP6CB(skb)->frag_max_size > mtu) in __mtu_check_toobig_v6()
116 else if (skb->len > mtu && !skb_is_gso(skb)) { in __mtu_check_toobig_v6()
161 __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest, in __ip_vs_get_out_rt() argument
164 struct net *net = dev_net(skb_dst(skb)->dev); in __ip_vs_get_out_rt()
227 iph = ip_hdr(skb); in __ip_vs_get_out_rt()
236 ort = skb_rtable(skb); in __ip_vs_get_out_rt()
254 struct sock *sk = skb->sk; in __ip_vs_get_out_rt()
261 ort = skb_rtable(skb); in __ip_vs_get_out_rt()
[all …]
/net/llc/
Dllc_c_ev.c78 struct sk_buff *skb; in llc_util_nr_inside_tx_window() local
88 skb = skb_peek(&llc->pdu_unack_q); in llc_util_nr_inside_tx_window()
89 pdu = llc_pdu_sn_hdr(skb); in llc_util_nr_inside_tx_window()
91 skb = skb_peek_tail(&llc->pdu_unack_q); in llc_util_nr_inside_tx_window()
92 pdu = llc_pdu_sn_hdr(skb); in llc_util_nr_inside_tx_window()
99 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb) in llc_conn_ev_conn_req() argument
101 const struct llc_conn_state_ev *ev = llc_conn_ev(skb); in llc_conn_ev_conn_req()
107 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb) in llc_conn_ev_data_req() argument
109 const struct llc_conn_state_ev *ev = llc_conn_ev(skb); in llc_conn_ev_data_req()
115 int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb) in llc_conn_ev_disc_req() argument
[all …]
/net/decnet/
Ddn_nsp_in.c81 static void dn_log_martian(struct sk_buff *skb, const char *msg) in dn_log_martian() argument
84 char *devname = skb->dev ? skb->dev->name : "???"; in dn_log_martian()
85 struct dn_skb_cb *cb = DN_SKB_CB(skb); in dn_log_martian()
100 static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack) in dn_ack() argument
110 wakeup |= dn_nsp_check_xmit_queue(sk, skb, in dn_ack()
120 wakeup |= dn_nsp_check_xmit_queue(sk, skb, in dn_ack()
136 static int dn_process_ack(struct sock *sk, struct sk_buff *skb, int oth) in dn_process_ack() argument
138 __le16 *ptr = (__le16 *)skb->data; in dn_process_ack()
142 if (skb->len < 2) in dn_process_ack()
146 skb_pull(skb, 2); in dn_process_ack()
[all …]
/net/x25/
Dx25_dev.c29 static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) in x25_receive_data() argument
35 if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) in x25_receive_data()
38 frametype = skb->data[2]; in x25_receive_data()
39 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); in x25_receive_data()
46 x25_link_control(skb, nb, frametype); in x25_receive_data()
56 skb_reset_transport_header(skb); in x25_receive_data()
59 queued = x25_process_rx_frame(sk, skb); in x25_receive_data()
61 queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf); in x25_receive_data()
72 return x25_rx_call_request(skb, nb, lci); in x25_receive_data()
79 if (x25_forward_data(lci, nb, skb)) { in x25_receive_data()
[all …]
/net/dsa/
Dtag_edsa.c20 netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev) in edsa_xmit() argument
26 dev->stats.tx_bytes += skb->len; in edsa_xmit()
34 if (skb->protocol == htons(ETH_P_8021Q)) { in edsa_xmit()
35 if (skb_cow_head(skb, DSA_HLEN) < 0) in edsa_xmit()
37 skb_push(skb, DSA_HLEN); in edsa_xmit()
39 memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN); in edsa_xmit()
44 edsa_header = skb->data + 2 * ETH_ALEN; in edsa_xmit()
60 if (skb_cow_head(skb, EDSA_HLEN) < 0) in edsa_xmit()
62 skb_push(skb, EDSA_HLEN); in edsa_xmit()
64 memmove(skb->data, skb->data + EDSA_HLEN, 2 * ETH_ALEN); in edsa_xmit()
[all …]
/net/mac802154/
Dwpan.c38 static inline int mac802154_fetch_skb_u8(struct sk_buff *skb, u8 *val) in mac802154_fetch_skb_u8() argument
40 if (unlikely(!pskb_may_pull(skb, 1))) in mac802154_fetch_skb_u8()
43 *val = skb->data[0]; in mac802154_fetch_skb_u8()
44 skb_pull(skb, 1); in mac802154_fetch_skb_u8()
49 static inline int mac802154_fetch_skb_u16(struct sk_buff *skb, u16 *val) in mac802154_fetch_skb_u16() argument
51 if (unlikely(!pskb_may_pull(skb, 2))) in mac802154_fetch_skb_u16()
54 *val = skb->data[0] | (skb->data[1] << 8); in mac802154_fetch_skb_u16()
55 skb_pull(skb, 2); in mac802154_fetch_skb_u16()
128 static int mac802154_header_create(struct sk_buff *skb, in mac802154_header_create() argument
146 head[pos++] = mac_cb(skb)->seq; /* DSN/BSN */ in mac802154_header_create()
[all …]

12345678910>>...32