Home
last modified time | relevance | path

Searched refs:skb (Results 1 – 25 of 1081) sorted by relevance

12345678910>>...44

/net/xfrm/
Dxfrm_output.c27 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb);
28 static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
30 static int xfrm_skb_check_space(struct sk_buff *skb) in xfrm_skb_check_space() argument
32 struct dst_entry *dst = skb_dst(skb); in xfrm_skb_check_space()
34 - skb_headroom(skb); in xfrm_skb_check_space()
35 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb); in xfrm_skb_check_space()
44 return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC); in xfrm_skb_check_space()
51 static struct dst_entry *skb_dst_pop(struct sk_buff *skb) in skb_dst_pop() argument
53 struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb))); in skb_dst_pop()
55 skb_dst_drop(skb); in skb_dst_pop()
[all …]
Dxfrm_input.c39 int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
101 static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol, in xfrm_rcv_cb() argument
112 ret = afinfo->callback(skb, protocol, err); in xfrm_rcv_cb()
118 struct sec_path *secpath_set(struct sk_buff *skb) in secpath_set() argument
120 struct sec_path *sp, *tmp = skb_ext_find(skb, SKB_EXT_SEC_PATH); in secpath_set()
122 sp = skb_ext_add(skb, SKB_EXT_SEC_PATH); in secpath_set()
141 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) in xfrm_parse_spi() argument
158 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr))) in xfrm_parse_spi()
160 *spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2))); in xfrm_parse_spi()
167 if (!pskb_may_pull(skb, hlen)) in xfrm_parse_spi()
[all …]
/net/bridge/
Dbr_netfilter_hooks.c66 #define IS_IP(skb) \ argument
67 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
69 #define IS_IPV6(skb) \ argument
70 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
72 #define IS_ARP(skb) \ argument
73 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
75 static inline __be16 vlan_proto(const struct sk_buff *skb) in vlan_proto() argument
77 if (skb_vlan_tag_present(skb)) in vlan_proto()
78 return skb->protocol; in vlan_proto()
79 else if (skb->protocol == htons(ETH_P_8021Q)) in vlan_proto()
[all …]
Dbr_forward.c22 const struct sk_buff *skb) in should_deliver() argument
27 return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && in should_deliver()
28 p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) && in should_deliver()
29 nbp_switchdev_allowed_egress(p, skb) && in should_deliver()
30 !br_skb_isolated(p, skb); in should_deliver()
33 int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) in br_dev_queue_push_xmit() argument
35 skb_push(skb, ETH_HLEN); in br_dev_queue_push_xmit()
36 if (!is_skb_forwardable(skb->dev, skb)) in br_dev_queue_push_xmit()
39 br_drop_fake_rtable(skb); in br_dev_queue_push_xmit()
41 if (skb->ip_summed == CHECKSUM_PARTIAL && in br_dev_queue_push_xmit()
[all …]
Dbr_input.c27 br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) in br_netif_receive_skb() argument
29 br_drop_fake_rtable(skb); in br_netif_receive_skb()
30 return netif_receive_skb(skb); in br_netif_receive_skb()
33 static int br_pass_frame_up(struct sk_buff *skb) in br_pass_frame_up() argument
35 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; in br_pass_frame_up()
39 dev_sw_netstats_rx_add(brdev, skb->len); in br_pass_frame_up()
47 br_switchdev_frame_unmark(skb); in br_pass_frame_up()
54 !br_allowed_egress(vg, skb)) { in br_pass_frame_up()
55 kfree_skb(skb); in br_pass_frame_up()
59 indev = skb->dev; in br_pass_frame_up()
[all …]
Dbr_netfilter_ipv6.c46 static int br_nf_check_hbh_len(struct sk_buff *skb) in br_nf_check_hbh_len() argument
48 unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1); in br_nf_check_hbh_len()
50 const unsigned char *nh = skb_network_header(skb); in br_nf_check_hbh_len()
54 if ((raw + len) - skb->data > skb_headlen(skb)) in br_nf_check_hbh_len()
76 ipv6_hdr(skb)->payload_len) in br_nf_check_hbh_len()
78 if (pkt_len > skb->len - sizeof(struct ipv6hdr)) in br_nf_check_hbh_len()
80 if (pskb_trim_rcsum(skb, in br_nf_check_hbh_len()
83 nh = skb_network_header(skb); in br_nf_check_hbh_len()
99 int br_validate_ipv6(struct net *net, struct sk_buff *skb) in br_validate_ipv6() argument
102 struct inet6_dev *idev = __in6_dev_get(skb->dev); in br_validate_ipv6()
[all …]
/net/ipv6/
Dexthdrs.c64 static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff, in ip6_tlvopt_unknown() argument
78 switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { in ip6_tlvopt_unknown()
89 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) in ip6_tlvopt_unknown()
93 icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff); in ip6_tlvopt_unknown()
98 kfree_skb(skb); in ip6_tlvopt_unknown()
102 static bool ipv6_hop_ra(struct sk_buff *skb, int optoff);
103 static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff);
104 static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff);
105 static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff);
107 static bool ipv6_dest_hao(struct sk_buff *skb, int optoff);
[all …]
Dip6_input.c49 struct sk_buff *skb) in ip6_rcv_finish_core() argument
52 !skb_dst(skb) && !skb->sk) { in ip6_rcv_finish_core()
53 switch (ipv6_hdr(skb)->nexthdr) { in ip6_rcv_finish_core()
56 tcp_v6_early_demux(skb); in ip6_rcv_finish_core()
60 udp_v6_early_demux(skb); in ip6_rcv_finish_core()
65 if (!skb_valid_dst(skb)) in ip6_rcv_finish_core()
66 ip6_route_input(skb); in ip6_rcv_finish_core()
69 int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) in ip6_rcv_finish() argument
74 skb = l3mdev_ip6_rcv(skb); in ip6_rcv_finish()
75 if (!skb) in ip6_rcv_finish()
[all …]
Dip6_offload.c32 #define indirect_call_gro_receive_l4(f2, f1, cb, head, skb) \ argument
34 unlikely(gro_recursion_inc_test(skb)) ? \
35 NAPI_GRO_CB(skb)->flush |= 1, NULL : \
36 INDIRECT_CALL_L4(cb, f2, f1, head, skb); \
39 static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) in ipv6_gso_pull_exthdrs() argument
57 if (unlikely(!pskb_may_pull(skb, 8))) in ipv6_gso_pull_exthdrs()
60 opth = (void *)skb->data; in ipv6_gso_pull_exthdrs()
63 if (unlikely(!pskb_may_pull(skb, len))) in ipv6_gso_pull_exthdrs()
66 opth = (void *)skb->data; in ipv6_gso_pull_exthdrs()
68 __skb_pull(skb, len); in ipv6_gso_pull_exthdrs()
[all …]
Dudp_offload.c17 static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, in udp6_ufo_fragment() argument
31 if (skb->encapsulation && skb_shinfo(skb)->gso_type & in udp6_ufo_fragment()
33 segs = skb_udp_tunnel_segment(skb, features, true); in udp6_ufo_fragment()
38 if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4))) in udp6_ufo_fragment()
41 if (!pskb_may_pull(skb, sizeof(struct udphdr))) in udp6_ufo_fragment()
44 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) in udp6_ufo_fragment()
45 return __udp_gso_segment(skb, features, true); in udp6_ufo_fragment()
47 mss = skb_shinfo(skb)->gso_size; in udp6_ufo_fragment()
48 if (unlikely(skb->len <= mss)) in udp6_ufo_fragment()
55 uh = udp_hdr(skb); in udp6_ufo_fragment()
[all …]
Desp6_offload.c50 struct sk_buff *skb) in esp6_gro_receive() argument
52 int offset = skb_gro_offset(skb); in esp6_gro_receive()
60 if (!pskb_pull(skb, offset)) in esp6_gro_receive()
63 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) in esp6_gro_receive()
66 xo = xfrm_offload(skb); in esp6_gro_receive()
68 struct sec_path *sp = secpath_set(skb); in esp6_gro_receive()
76 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, in esp6_gro_receive()
77 (xfrm_address_t *)&ipv6_hdr(skb)->daddr, in esp6_gro_receive()
82 skb->mark = xfrm_smark_get(skb->mark, x); in esp6_gro_receive()
87 xo = xfrm_offload(skb); in esp6_gro_receive()
[all …]
/net/core/
Dskbuff.c106 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, in skb_panic() argument
110 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
111 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
112 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
116 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_over_panic() argument
118 skb_panic(skb, sz, addr, __func__); in skb_over_panic()
121 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_under_panic() argument
123 skb_panic(skb, sz, addr, __func__); in skb_under_panic()
176 struct sk_buff *skb; in napi_skb_cache_get() local
186 skb = nc->skb_cache[--nc->skb_count]; in napi_skb_cache_get()
[all …]
/net/ipv4/
Dudp_offload.c14 static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, in __skb_udp_tunnel_segment() argument
16 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, in __skb_udp_tunnel_segment() argument
20 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); in __skb_udp_tunnel_segment()
23 struct udphdr *uh = udp_hdr(skb); in __skb_udp_tunnel_segment()
24 u16 mac_offset = skb->mac_header; in __skb_udp_tunnel_segment()
25 __be16 protocol = skb->protocol; in __skb_udp_tunnel_segment()
26 u16 mac_len = skb->mac_len; in __skb_udp_tunnel_segment()
31 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) in __skb_udp_tunnel_segment()
40 if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) in __skb_udp_tunnel_segment()
43 partial = (__force __wsum)htonl(skb->len); in __skb_udp_tunnel_segment()
[all …]
Dip_input.c148 bool ip_call_ra_chain(struct sk_buff *skb) in ip_call_ra_chain() argument
151 u8 protocol = ip_hdr(skb)->protocol; in ip_call_ra_chain()
153 struct net_device *dev = skb->dev; in ip_call_ra_chain()
165 if (ip_is_fragment(ip_hdr(skb))) { in ip_call_ra_chain()
166 if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN)) in ip_call_ra_chain()
170 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); in ip_call_ra_chain()
179 raw_rcv(last, skb); in ip_call_ra_chain()
187 void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol) in ip_protocol_deliver_rcu() argument
193 raw = raw_local_deliver(skb, protocol); in ip_protocol_deliver_rcu()
198 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { in ip_protocol_deliver_rcu()
[all …]
Dgre_offload.c14 static struct sk_buff *gre_gso_segment(struct sk_buff *skb, in gre_gso_segment() argument
17 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); in gre_gso_segment()
20 u16 mac_offset = skb->mac_header; in gre_gso_segment()
21 __be16 protocol = skb->protocol; in gre_gso_segment()
22 u16 mac_len = skb->mac_len; in gre_gso_segment()
25 if (!skb->encapsulation) in gre_gso_segment()
31 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) in gre_gso_segment()
35 skb->encapsulation = 0; in gre_gso_segment()
36 SKB_GSO_CB(skb)->encap_level = 0; in gre_gso_segment()
37 __skb_pull(skb, tnl_hlen); in gre_gso_segment()
[all …]
Dtcp_offload.c14 static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, in tcp_gso_tstamp() argument
17 while (skb) { in tcp_gso_tstamp()
19 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP; in tcp_gso_tstamp()
20 skb_shinfo(skb)->tskey = ts_seq; in tcp_gso_tstamp()
24 skb = skb->next; in tcp_gso_tstamp()
29 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, in tcp4_gso_segment() argument
32 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)) in tcp4_gso_segment()
35 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) in tcp4_gso_segment()
38 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { in tcp4_gso_segment()
39 const struct iphdr *iph = ip_hdr(skb); in tcp4_gso_segment()
[all …]
Desp4_offload.c29 struct sk_buff *skb) in esp4_gro_receive() argument
31 int offset = skb_gro_offset(skb); in esp4_gro_receive()
37 if (!pskb_pull(skb, offset)) in esp4_gro_receive()
40 if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0) in esp4_gro_receive()
43 xo = xfrm_offload(skb); in esp4_gro_receive()
45 struct sec_path *sp = secpath_set(skb); in esp4_gro_receive()
53 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, in esp4_gro_receive()
54 (xfrm_address_t *)&ip_hdr(skb)->daddr, in esp4_gro_receive()
59 skb->mark = xfrm_smark_get(skb->mark, x); in esp4_gro_receive()
64 xo = xfrm_offload(skb); in esp4_gro_receive()
[all …]
/net/bridge/netfilter/
Dnf_conntrack_bridge.c28 struct sk_buff *skb, in nf_br_ip_fragment() argument
34 int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; in nf_br_ip_fragment()
36 ktime_t tstamp = skb->tstamp; in nf_br_ip_fragment()
42 if (skb->ip_summed == CHECKSUM_PARTIAL && in nf_br_ip_fragment()
43 (err = skb_checksum_help(skb))) in nf_br_ip_fragment()
46 iph = ip_hdr(skb); in nf_br_ip_fragment()
54 ll_rs = LL_RESERVED_SPACE(skb->dev); in nf_br_ip_fragment()
55 mtu = skb->dev->mtu; in nf_br_ip_fragment()
57 if (skb_has_frag_list(skb)) { in nf_br_ip_fragment()
58 unsigned int first_len = skb_pagelen(skb); in nf_br_ip_fragment()
[all …]
/net/sched/
Dsch_frag.c17 int (*xmit)(struct sk_buff *skb);
22 static int sch_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) in sch_frag_xmit() argument
26 if (skb_cow_head(skb, data->l2_len) < 0) { in sch_frag_xmit()
27 kfree_skb(skb); in sch_frag_xmit()
31 __skb_dst_copy(skb, data->dst); in sch_frag_xmit()
32 *qdisc_skb_cb(skb) = data->cb; in sch_frag_xmit()
33 skb->inner_protocol = data->inner_protocol; in sch_frag_xmit()
35 __vlan_hwaccel_put_tag(skb, data->vlan_proto, in sch_frag_xmit()
38 __vlan_hwaccel_clear_tag(skb); in sch_frag_xmit()
41 skb_push(skb, data->l2_len); in sch_frag_xmit()
[all …]
/net/openvswitch/
Dactions.c36 struct sk_buff *skb; member
123 static struct deferred_action *add_deferred_actions(struct sk_buff *skb, in add_deferred_actions() argument
134 da->skb = skb; in add_deferred_actions()
153 static int clone_execute(struct datapath *dp, struct sk_buff *skb,
159 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
163 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, in push_mpls() argument
168 err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len); in push_mpls()
179 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key, in pop_mpls() argument
184 err = skb_mpls_pop(skb, ethertype, skb->mac_len, in pop_mpls()
196 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key, in set_mpls() argument
[all …]
/net/netfilter/ipvs/
Dip_vs_xmit.c107 __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu) in __mtu_check_toobig_v6() argument
109 if (IP6CB(skb)->frag_max_size) { in __mtu_check_toobig_v6()
113 if (IP6CB(skb)->frag_max_size > mtu) in __mtu_check_toobig_v6()
116 else if (skb->len > mtu && !skb_is_gso(skb)) { in __mtu_check_toobig_v6()
165 static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb, in crosses_local_route_boundary() argument
177 int addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr); in crosses_local_route_boundary()
180 (!skb->dev || skb->dev->flags & IFF_LOOPBACK) && in crosses_local_route_boundary()
183 (struct rt6_info *)skb_dst(skb)); in crosses_local_route_boundary()
187 source_is_loopback = ipv4_is_loopback(ip_hdr(skb)->saddr); in crosses_local_route_boundary()
188 old_rt_is_local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; in crosses_local_route_boundary()
[all …]
/net/ieee802154/6lowpan/
Drx.c21 static int lowpan_give_skb_to_device(struct sk_buff *skb) in lowpan_give_skb_to_device() argument
23 skb->protocol = htons(ETH_P_IPV6); in lowpan_give_skb_to_device()
24 skb->dev->stats.rx_packets++; in lowpan_give_skb_to_device()
25 skb->dev->stats.rx_bytes += skb->len; in lowpan_give_skb_to_device()
27 return netif_rx(skb); in lowpan_give_skb_to_device()
30 static int lowpan_rx_handlers_result(struct sk_buff *skb, lowpan_rx_result res) in lowpan_rx_handlers_result() argument
40 kfree_skb(skb); in lowpan_rx_handlers_result()
46 return lowpan_give_skb_to_device(skb); in lowpan_rx_handlers_result()
64 static lowpan_rx_result lowpan_rx_h_frag(struct sk_buff *skb) in lowpan_rx_h_frag() argument
68 if (!(lowpan_is_frag1(*skb_network_header(skb)) || in lowpan_rx_h_frag()
[all …]
/net/netfilter/
Dnf_flow_table_ip.c22 struct sk_buff *skb, unsigned int thoff) in nf_flow_state_check() argument
29 tcph = (void *)(skb_network_header(skb) + thoff); in nf_flow_state_check()
38 static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff, in nf_flow_nat_ip_tcp() argument
43 tcph = (void *)(skb_network_header(skb) + thoff); in nf_flow_nat_ip_tcp()
44 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true); in nf_flow_nat_ip_tcp()
47 static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff, in nf_flow_nat_ip_udp() argument
52 udph = (void *)(skb_network_header(skb) + thoff); in nf_flow_nat_ip_udp()
53 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { in nf_flow_nat_ip_udp()
54 inet_proto_csum_replace4(&udph->check, skb, addr, in nf_flow_nat_ip_udp()
61 static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph, in nf_flow_nat_ip_l4proto() argument
[all …]
/net/lapb/
Dlapb_subr.c48 struct sk_buff *skb; in lapb_frames_acked() local
58 skb = skb_dequeue(&lapb->ack_queue); in lapb_frames_acked()
59 kfree_skb(skb); in lapb_frames_acked()
66 struct sk_buff *skb, *skb_prev = NULL; in lapb_requeue_frames() local
73 while ((skb = skb_dequeue(&lapb->ack_queue)) != NULL) { in lapb_requeue_frames()
75 skb_queue_head(&lapb->write_queue, skb); in lapb_requeue_frames()
77 skb_append(skb_prev, skb, &lapb->write_queue); in lapb_requeue_frames()
78 skb_prev = skb; in lapb_requeue_frames()
106 int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb, in lapb_decode() argument
111 lapb_dbg(2, "(%p) S%d RX %3ph\n", lapb->dev, lapb->state, skb->data); in lapb_decode()
[all …]
/net/ax25/
Dax25_in.c34 static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb) in ax25_rx_fragment() argument
39 if (!(*skb->data & AX25_SEG_FIRST)) { in ax25_rx_fragment()
40 if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) { in ax25_rx_fragment()
42 ax25->fragno = *skb->data & AX25_SEG_REM; in ax25_rx_fragment()
43 skb_pull(skb, 1); /* skip fragno */ in ax25_rx_fragment()
44 ax25->fraglen += skb->len; in ax25_rx_fragment()
45 skb_queue_tail(&ax25->frag_queue, skb); in ax25_rx_fragment()
82 if (*skb->data & AX25_SEG_FIRST) { in ax25_rx_fragment()
84 ax25->fragno = *skb->data & AX25_SEG_REM; in ax25_rx_fragment()
85 skb_pull(skb, 1); /* skip fragno */ in ax25_rx_fragment()
[all …]

12345678910>>...44