Home
last modified time | relevance | path

Searched refs:ip_hdr (Results 1 – 25 of 127) sorted by relevance

123456

/net/ipv4/
Dip_input.c157 u8 protocol = ip_hdr(skb)->protocol; in ip_call_ra_chain()
172 if (ip_is_fragment(ip_hdr(skb))) { in ip_call_ra_chain()
198 int protocol = ip_hdr(skb)->protocol; in ip_local_deliver_finish()
252 if (ip_is_fragment(ip_hdr(skb))) { in ip_local_deliver()
280 iph = ip_hdr(skb); in ip_rcv_options()
313 const struct iphdr *iph = ip_hdr(skb); in ip_rcv_finish()
335 iph = ip_hdr(skb); in ip_rcv_finish()
431 iph = ip_hdr(skb); in ip_rcv()
457 iph = ip_hdr(skb); in ip_rcv()
Dxfrm4_input.c29 const struct iphdr *iph = ip_hdr(skb); in xfrm4_rcv_encap_finish()
43 struct iphdr *iph = ip_hdr(skb); in xfrm4_transport_finish()
132 iph = ip_hdr(skb); in xfrm4_udp_encap_rcv()
157 return xfrm4_rcv_spi(skb, ip_hdr(skb)->protocol, 0); in xfrm4_rcv()
Dnetfilter.c22 const struct iphdr *iph = ip_hdr(skb); in ip_route_me_harder()
101 const struct iphdr *iph = ip_hdr(skb); in nf_ip_saveroute()
116 const struct iphdr *iph = ip_hdr(skb); in nf_ip_reroute()
130 const struct iphdr *iph = ip_hdr(skb); in nf_ip_checksum()
162 const struct iphdr *iph = ip_hdr(skb); in nf_ip_checksum_partial()
Dtcp_ipv4.c100 return secure_tcp_sequence_number(ip_hdr(skb)->daddr, in tcp_v4_init_sequence()
101 ip_hdr(skb)->saddr, in tcp_v4_init_sequence()
643 &ip_hdr(skb)->saddr, AF_INET); in tcp_v4_send_reset()
653 ip_hdr(skb)->saddr, in tcp_v4_send_reset()
654 th->source, ip_hdr(skb)->daddr, in tcp_v4_send_reset()
661 &ip_hdr(skb)->saddr, AF_INET); in tcp_v4_send_reset()
682 key, ip_hdr(skb)->saddr, in tcp_v4_send_reset()
683 ip_hdr(skb)->daddr, &rep.th); in tcp_v4_send_reset()
686 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, in tcp_v4_send_reset()
687 ip_hdr(skb)->saddr, /* XXX */ in tcp_v4_send_reset()
[all …]
Dxfrm4_output.c28 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df) in xfrm4_tunnel_check_size()
54 XFRM_MODE_SKB_CB(skb)->protocol = ip_hdr(skb)->protocol; in xfrm4_extract_output()
111 hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); in xfrm4_local_error()
Dip_forward.c47 if (unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0)) in ip_exceeds_mtu()
109 if (ip_hdr(skb)->ttl <= 1) in ip_forward()
132 iph = ip_hdr(skb); in ip_forward()
Dxfrm4_mode_beet.c22 struct iphdr *iph = ip_hdr(skb); in xfrm4_beet_make_header()
63 top_iph = ip_hdr(skb); in xfrm4_beet_output()
117 iph = ip_hdr(skb); in xfrm4_beet_input()
Dxfrm4_mode_transport.c23 struct iphdr *iph = ip_hdr(skb); in xfrm4_transport_output()
52 ip_hdr(skb)->tot_len = htons(skb->len + ihl); in xfrm4_transport_input()
Dxfrm4_tunnel.c23 return ip_hdr(skb)->protocol; in ipip_xfrm_rcv()
55 return xfrm4_rcv_spi(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr); in xfrm_tunnel_rcv()
Dsyncookies.c194 const struct iphdr *iph = ip_hdr(skb); in cookie_v4_init_sequence()
313 mss = __cookie_v4_check(ip_hdr(skb), th, cookie); in cookie_v4_check()
341 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); in cookie_v4_check()
342 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); in cookie_v4_check()
Dip_output.c99 struct iphdr *iph = ip_hdr(skb); in __ip_local_out()
154 iph = ip_hdr(skb); in ip_build_and_send_pkt()
220 nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr); in ip_finish_output2()
372 if (ip_hdr(skb)->ttl == 0) { in ip_mc_output()
476 iph = ip_hdr(skb); in ip_queue_xmit()
539 struct iphdr *iph = ip_hdr(skb); in ip_fragment()
585 iph = ip_hdr(skb); in ip_do_fragment()
656 iph = ip_hdr(frag); in ip_do_fragment()
705 iph = ip_hdr(skb); in ip_do_fragment()
775 iph = ip_hdr(skb2); in ip_do_fragment()
[all …]
Dipmr.c648 if (ip_hdr(skb)->version == 0) { in ipmr_destroy_unres()
922 if (ip_hdr(skb)->version == 0) { in ipmr_cache_resolve()
981 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; in ipmr_cache_report()
982 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + in ipmr_cache_report()
990 ip_hdr(skb)->protocol = 0; in ipmr_cache_report()
999 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */ in ipmr_cache_report()
1029 const struct iphdr *iph = ip_hdr(skb); in ipmr_cache_unresolved()
1637 const struct iphdr *old_iph = ip_hdr(skb); in ip_encap()
1642 iph = ip_hdr(skb); in ip_encap()
1679 const struct iphdr *iph = ip_hdr(skb); in ipmr_queue_xmit()
[all …]
/net/ipv4/netfilter/
Dnf_reject_ipv4.c24 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) in nf_reject_ip_tcphdr_get()
27 if (ip_hdr(oldskb)->protocol != IPPROTO_TCP) in nf_reject_ip_tcphdr_get()
51 struct iphdr *niph, *oiph = ip_hdr(oldskb); in nf_reject_iphdr_put()
75 struct iphdr *niph = ip_hdr(nskb); in nf_reject_ip_tcphdr_put()
119 oiph = ip_hdr(oldskb); in nf_send_reset()
175 struct iphdr *iph = ip_hdr(skb_in); in nf_send_unreach()
Dipt_ECN.c31 struct iphdr *iph = ip_hdr(skb); in set_ect_ip()
37 iph = ip_hdr(skb); in set_ect_ip()
66 tcph = (void *)ip_hdr(skb) + ip_hdrlen(skb); in set_ect_tcp()
89 ip_hdr(skb)->protocol == IPPROTO_TCP) in ecn_tg()
Dnft_chain_route_ipv4.c44 iph = ip_hdr(skb); in nf_route_table_hook()
51 iph = ip_hdr(skb); in nf_route_table_hook()
Dnf_dup_ipv4.c29 const struct iphdr *iph = ip_hdr(skb); in nf_dup_ipv4_route()
85 iph = ip_hdr(skb); in nf_dup_ipv4()
Dipt_SYNPROXY.c83 iph = ip_hdr(skb); in synproxy_send_client_synack()
125 iph = ip_hdr(skb); in synproxy_send_server_syn()
170 iph = ip_hdr(skb); in synproxy_send_server_ack()
208 iph = ip_hdr(skb); in synproxy_send_client_ack()
245 mss = __cookie_v4_check(ip_hdr(skb), th, ntohl(th->ack_seq) - 1); in synproxy_recv_client_ack()
Diptable_mangle.c59 iph = ip_hdr(skb); in ipt_mangle_out()
67 iph = ip_hdr(skb); in ipt_mangle_out()
/net/netfilter/
Dxt_DSCP.c34 u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT; in dscp_tg()
40 ipv4_change_dsfield(ip_hdr(skb), in dscp_tg()
80 struct iphdr *iph = ip_hdr(skb); in tos_tg()
89 iph = ip_hdr(skb); in tos_tg()
Dxt_dscp.c31 u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT; in dscp_mt()
62 return ((ip_hdr(skb)->tos & info->tos_mask) == in tos_mt()
/net/bridge/netfilter/
Dnft_reject_bridge.c47 iph = ip_hdr(skb); in nft_bridge_iphdr_validate()
118 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) in nft_reject_br_send_v4_unreach()
127 if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len))) in nft_reject_br_send_v4_unreach()
130 if (ip_hdr(oldskb)->protocol == IPPROTO_TCP || in nft_reject_br_send_v4_unreach()
131 ip_hdr(oldskb)->protocol == IPPROTO_UDP) in nft_reject_br_send_v4_unreach()
132 proto = ip_hdr(oldskb)->protocol; in nft_reject_br_send_v4_unreach()
/net/batman-adv/
Dmain.c358 struct iphdr ip_hdr_tmp, *ip_hdr; in batadv_skb_set_priority() local
382 ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr), in batadv_skb_set_priority()
383 sizeof(*ip_hdr), &ip_hdr_tmp); in batadv_skb_set_priority()
384 if (!ip_hdr) in batadv_skb_set_priority()
386 prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5; in batadv_skb_set_priority()
/net/dccp/
Dipv4.c382 return secure_dccp_sequence_number(ip_hdr(skb)->daddr, in dccp_v4_init_sequence()
383 ip_hdr(skb)->saddr, in dccp_v4_init_sequence()
419 newinet->mc_ttl = ip_hdr(skb)->ttl; in dccp_v4_request_recv_sock()
457 const struct iphdr *iph = ip_hdr(skb); in dccp_v4_route_skb()
531 rxiph = ip_hdr(rxskb); in dccp_v4_ctl_send_reset()
621 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); in dccp_v4_conn_request()
622 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); in dccp_v4_conn_request()
787 iph = ip_hdr(skb); in dccp_v4_rcv()
/net/netfilter/ipvs/
Dip_vs_core.c702 ip_send_check(ip_hdr(skb)); in ip_vs_gather_frags()
738 struct iphdr *iph = ip_hdr(skb); in ip_vs_nat_icmp()
917 if (ip_is_fragment(ip_hdr(skb))) { in ip_vs_out_icmp()
922 iph = ip_hdr(skb); in ip_vs_out_icmp()
1269 ip_hdr(skb)->saddr = cp->vaddr.ip; in handle_response()
1270 ip_send_check(ip_hdr(skb)); in handle_response()
1375 if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) { in ip_vs_out()
1571 if (ip_is_fragment(ip_hdr(skb))) { in ip_vs_in_icmp()
1576 iph = ip_hdr(skb); in ip_vs_in_icmp()
1682 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu); in ip_vs_in_icmp()
[all …]
Dip_vs_xmit.c188 source_is_loopback = ipv4_is_loopback(ip_hdr(skb)->saddr); in crosses_local_route_boundary()
243 if (unlikely(ip_hdr(skb)->frag_off & htons(IP_DF) && in ensure_mtu_is_adequate()
249 &ip_hdr(skb)->saddr); in ensure_mtu_is_adequate()
623 struct iphdr *iph = ip_hdr(skb); in ip_vs_bypass_xmit()
751 ip_hdr(skb)->daddr = cp->daddr.ip; in ip_vs_nat_xmit()
752 ip_send_check(ip_hdr(skb)); in ip_vs_nat_xmit()
913 old_iph = ip_hdr(skb); in ip_vs_prepare_tunneled_skb()
1025 iph = ip_hdr(skb); in ip_vs_tunnel_xmit()
1174 ip_send_check(ip_hdr(skb)); in ip_vs_dr_xmit()

123456