/net/sched/ |
D | act_csum.c | 101 unsigned int ihl, unsigned int ipl, in tcf_csum_skb_nextlayer() argument 105 int hl = ihl + jhl; in tcf_csum_skb_nextlayer() 111 return (void *)(skb_network_header(skb) + ihl); in tcf_csum_skb_nextlayer() 115 unsigned int ihl, unsigned int ipl) in tcf_csum_ipv4_icmp() argument 119 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph)); in tcf_csum_ipv4_icmp() 124 skb->csum = csum_partial(icmph, ipl - ihl, 0); in tcf_csum_ipv4_icmp() 133 unsigned int ihl, unsigned int ipl) in tcf_csum_ipv4_igmp() argument 137 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph)); in tcf_csum_ipv4_igmp() 142 skb->csum = csum_partial(igmph, ipl - ihl, 0); in tcf_csum_ipv4_igmp() 151 unsigned int ihl, unsigned int ipl) in tcf_csum_ipv6_icmp() argument [all …]
|
D | act_nat.c | 98 int ihl; in tcf_nat() local 148 ihl = iph->ihl * 4; in tcf_nat() 156 if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) || in tcf_nat() 157 skb_try_make_writable(skb, ihl + sizeof(*tcph) + noff)) in tcf_nat() 160 tcph = (void *)(skb_network_header(skb) + ihl); in tcf_nat() 169 if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) || in tcf_nat() 170 skb_try_make_writable(skb, ihl + sizeof(*udph) + noff)) in tcf_nat() 173 udph = (void *)(skb_network_header(skb) + ihl); in tcf_nat() 186 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + noff)) in tcf_nat() 189 icmph = (void *)(skb_network_header(skb) + ihl); in tcf_nat() [all …]
|
/net/ipv4/ |
D | ah4.c | 81 int l = iph->ihl*4 - sizeof(struct iphdr); in ip_clear_mutable_options() 127 int ihl = ip_hdrlen(skb); in ah_output_done() local 130 icv = ah_tmp_icv(ahp->ahash, iph, ihl); in ah_output_done() 136 if (top_iph->ihl != 5) { in ah_output_done() 138 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); in ah_output_done() 149 int ihl; in ah_output() local 172 ihl = ip_hdrlen(skb); in ah_output() 179 iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl + seqhi_len); in ah_output() 182 seqhi = (__be32 *)((char *)iph + ihl); in ah_output() 196 if (top_iph->ihl != 5) { in ah_output() [all …]
|
D | xfrm4_mode_transport.c | 24 int ihl = iph->ihl * 4; in xfrm4_transport_output() local 29 skb->transport_header = skb->network_header + ihl; in xfrm4_transport_output() 30 __skb_pull(skb, ihl); in xfrm4_transport_output() 31 memmove(skb_network_header(skb), iph, ihl); in xfrm4_transport_output() 45 int ihl = skb->data - skb_transport_header(skb); in xfrm4_transport_input() local 49 skb_network_header(skb), ihl); in xfrm4_transport_input() 52 ip_hdr(skb)->tot_len = htons(skb->len + ihl); in xfrm4_transport_input()
|
D | xfrm4_mode_beet.c | 24 iph->ihl = 5; in xfrm4_beet_make_header() 51 hdrlen + (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph))); in xfrm4_beet_output() 61 __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen); in xfrm4_beet_output() 75 top_iph->ihl = sizeof(struct iphdr) / 4; in xfrm4_beet_output() 119 iph->ihl += optlen / 4; in xfrm4_beet_input() 124 iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); in xfrm4_beet_input()
|
D | xfrm4_policy.c | 115 int ihl = iph->ihl; in _decode_session4() local 116 u8 *xprth = skb_network_header(skb) + ihl * 4; in _decode_session4() 143 xprth = skb_network_header(skb) + ihl * 4; in _decode_session4() 156 xprth = skb_network_header(skb) + ihl * 4; in _decode_session4() 169 xprth = skb_network_header(skb) + ihl * 4; in _decode_session4() 181 xprth = skb_network_header(skb) + ihl * 4; in _decode_session4() 193 xprth = skb_network_header(skb) + ihl * 4; in _decode_session4() 206 xprth = skb_network_header(skb) + ihl * 4; in _decode_session4()
|
D | ip_input.c | 282 opt->optlen = iph->ihl*4 - sizeof(struct iphdr); in ip_rcv_options() 356 if (iph->ihl > 5 && ip_rcv_options(skb)) in ip_rcv_finish() 413 if (iph->ihl < 5 || iph->version != 4) in ip_rcv() 423 if (!pskb_may_pull(skb, iph->ihl*4)) in ip_rcv() 428 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) in ip_rcv() 435 } else if (len < (iph->ihl*4)) in ip_rcv() 448 skb->transport_header = skb->network_header + iph->ihl*4; in ip_rcv()
|
D | xfrm4_state.c | 61 XFRM_MODE_SKB_CB(skb)->ihl = sizeof(*iph); in xfrm4_extract_header() 66 XFRM_MODE_SKB_CB(skb)->optlen = iph->ihl * 4 - sizeof(*iph); in xfrm4_extract_header()
|
D | ip_fragment.c | 350 int ihl, end, skb1_run_end; in ip_frag_queue() local 372 ihl = ip_hdrlen(skb); in ip_frag_queue() 375 end = offset + skb->len - skb_network_offset(skb) - ihl; in ip_frag_queue() 405 if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) in ip_frag_queue() 481 fragsize = skb->len + ihl; in ip_frag_queue() 719 if (iph.ihl < 5 || iph.version != 4) in ip_check_defrag() 723 if (skb->len < netoff + len || len < (iph.ihl * 4)) in ip_check_defrag() 729 if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) { in ip_check_defrag()
|
D | esp4.c | 316 int ihl; in esp_input_done2() local 336 ihl = iph->ihl * 4; in esp_input_done2() 340 struct udphdr *uh = (void *)(skb_network_header(skb) + ihl); in esp_input_done2() 380 skb_set_transport_header(skb, -ihl); in esp_input_done2() 531 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); in esp4_err()
|
D | ip_vti.c | 340 esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); in vti4_err() 344 ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2)); in vti4_err() 348 ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); in vti4_err() 390 p.iph.ihl != 5) in vti_tunnel_ioctl() 456 iph->ihl = 5; in vti_fb_tunnel_init()
|
D | af_inet.c | 1226 int ihl; in inet_gso_segment() local 1251 ihl = iph->ihl * 4; in inet_gso_segment() 1252 if (ihl < sizeof(*iph)) in inet_gso_segment() 1259 if (unlikely(!pskb_may_pull(skb, ihl))) in inet_gso_segment() 1261 __skb_pull(skb, ihl); in inet_gso_segment() 1266 SKB_GSO_CB(skb)->encap_level += ihl; in inet_gso_segment() 1293 offset += skb->len - nhoff - ihl; in inet_gso_segment()
|
D | gre_demux.c | 95 u8 ver = skb->data[(iph->ihl<<2) + 1]&0x7f; in gre_err()
|
D | inet_lro.c | 39 #define IP_HDR_LEN(iph) (iph->ihl << 2) 65 if (iph->ihl != IPH_LEN_WO_OPTIONS) in lro_tcp_ip_check()
|
D | ip_output.c | 96 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); in ip_send_check() 152 iph->ihl = 5; in ip_build_and_send_pkt() 174 iph->ihl += opt->opt.optlen>>2; in ip_build_and_send_pkt() 459 iph->ihl += inet_opt->opt.optlen >> 2; in ip_queue_xmit() 571 hlen = iph->ihl * 4; in ip_do_fragment() 1432 iph->ihl = 5; in __ip_make_skb() 1441 iph->ihl += opt->optlen>>2; in __ip_make_skb()
|
/net/ipv4/netfilter/ |
D | nf_log_ipv4.c | 80 ih->ihl * 4 > sizeof(struct iphdr)) { in dump_ipv4_packet() 85 optsize = ih->ihl * 4 - sizeof(struct iphdr); in dump_ipv4_packet() 104 iphoff+ih->ihl*4, logflags)) in dump_ipv4_packet() 111 iphoff+ih->ihl*4)) in dump_ipv4_packet() 142 ich = skb_header_pointer(skb, iphoff + ih->ihl * 4, in dump_ipv4_packet() 146 skb->len - iphoff - ih->ihl*4); in dump_ipv4_packet() 156 skb->len-iphoff-ih->ihl*4 < required_len[ich->type]) { in dump_ipv4_packet() 158 skb->len - iphoff - ih->ihl*4); in dump_ipv4_packet() 187 iphoff + ih->ihl*4+sizeof(_icmph)); in dump_ipv4_packet() 212 ah = skb_header_pointer(skb, iphoff+ih->ihl*4, in dump_ipv4_packet() [all …]
|
D | nf_tables_ipv4.c | 37 ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) { in nft_ipv4_output()
|
/net/ipv6/ |
D | xfrm6_mode_transport.c | 51 int ihl = skb->data - skb_transport_header(skb); in xfrm6_transport_input() local 55 skb_network_header(skb), ihl); in xfrm6_transport_input() 58 ipv6_hdr(skb)->payload_len = htons(skb->len + ihl - in xfrm6_transport_input()
|
/net/bridge/netfilter/ |
D | nf_tables_bridge.c | 31 if (iph->ihl < 5 || iph->version != 4) in nft_bridge_iphdr_validate() 37 else if (len < (iph->ihl*4)) in nft_bridge_iphdr_validate() 40 if (!pskb_may_pull(skb, iph->ihl*4)) in nft_bridge_iphdr_validate()
|
/net/netfilter/ipvs/ |
D | ip_vs_core.c | 735 unsigned int icmp_offset = iph->ihl*4; in ip_vs_nat_icmp() 755 __be16 *ports = (void *)ciph + ciph->ihl*4; in ip_vs_nat_icmp() 843 unsigned int offset, unsigned int ihl, in handle_response_icmp() argument 852 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { in handle_response_icmp() 907 unsigned int offset, ihl; in ip_vs_out_icmp() local 919 offset = ihl = iph->ihl * 4; in ip_vs_out_icmp() 969 pp, ciph.len, ihl, hooknum); in ip_vs_out_icmp() 1406 unsigned int offset, offset2, ihl, verdict; in ip_vs_in_icmp() local 1418 offset = ihl = iph->ihl * 4; in ip_vs_in_icmp() 1455 offset += cih->ihl * 4; in ip_vs_in_icmp() [all …]
|
D | ip_vs_ftp.c | 205 th = (struct tcphdr *)&(((char *)iph)[iph->ihl*4]); in ip_vs_ftp_out() 272 iph->ihl * 4, in ip_vs_ftp_out() 345 th = (struct tcphdr *)&(((char *)iph)[iph->ihl*4]); in ip_vs_ftp_in()
|
D | ip_vs_proto_sctp.c | 384 int ihl, cofs; in set_sctp_state() local 387 ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); in set_sctp_state() 389 ihl = ip_hdrlen(skb); in set_sctp_state() 392 cofs = ihl + sizeof(sctp_sctphdr_t); in set_sctp_state()
|
/net/netfilter/ |
D | nf_tables_netdev.c | 34 if (iph->ihl < 5 || iph->version != 4) in nft_netdev_set_pktinfo_ipv4() 38 thoff = iph->ihl * 4; in nft_netdev_set_pktinfo_ipv4()
|
D | xt_HMARK.c | 264 if (get_inner_hdr(skb, ip->ihl * 4, &nhoff)) { in hmark_pkt_set_htuple_ipv4() 287 hmark_set_tuple_ports(skb, (ip->ihl * 4) + nhoff, t, info); in hmark_pkt_set_htuple_ipv4()
|
/net/bridge/ |
D | br_netfilter_hooks.c | 201 if (iph->ihl < 5 || iph->version != 4) in br_validate_ipv4() 204 if (!pskb_may_pull(skb, iph->ihl*4)) in br_validate_ipv4() 208 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) in br_validate_ipv4() 215 } else if (len < (iph->ihl*4)) in br_validate_ipv4() 510 skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4; in br_nf_pre_routing()
|