Home
last modified time | relevance | path

Searched refs:ipv4 (Results 1 – 25 of 92) sorted by relevance

1234

/net/ipv4/
Dsysctl_net_ipv4.c53 write_seqlock_bh(&net->ipv4.ip_local_ports.lock); in set_local_port_range()
54 if (same_parity && !net->ipv4.ip_local_ports.warned) { in set_local_port_range()
55 net->ipv4.ip_local_ports.warned = true; in set_local_port_range()
58 net->ipv4.ip_local_ports.range[0] = range[0]; in set_local_port_range()
59 net->ipv4.ip_local_ports.range[1] = range[1]; in set_local_port_range()
60 write_sequnlock_bh(&net->ipv4.ip_local_ports.lock); in set_local_port_range()
68 container_of(table->data, struct net, ipv4.ip_local_ports.range); in ipv4_local_port_range()
89 (range[0] < READ_ONCE(net->ipv4.sysctl_ip_prot_sock))) in ipv4_local_port_range()
103 ipv4.sysctl_ip_prot_sock); in ipv4_privileged_ports()
115 pports = READ_ONCE(net->ipv4.sysctl_ip_prot_sock); in ipv4_privileged_ports()
[all …]
Dtcp_ipv4.c111 int reuse = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse); in tcp_twsk_unique()
250 tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; in tcp_v4_connect()
488 sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, in tcp_v4_err()
754 sk1 = __inet_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo, in tcp_v4_send_reset()
1027 tos = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ? in tcp_v4_send_synack()
1547 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)) in tcp_v4_syn_recv_sock()
1745 sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, in tcp_v4_early_demux()
1974 sk = __inet_lookup_skb(net->ipv4.tcp_death_row.hashinfo, in tcp_v4_rcv()
2158 net->ipv4.tcp_death_row.hashinfo, in tcp_v4_rcv()
2309 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo; in listening_get_first()
[all …]
Dfib_rules.c93 err = fib_rules_lookup(net->ipv4.rules_ops, flowi4_to_flowi(flp), 0, &arg); in __fib_lookup()
270 atomic_inc(&net->ipv4.fib_num_tclassid_users); in fib4_rule_configure()
275 net->ipv4.fib_rules_require_fldissect++; in fib4_rule_configure()
282 net->ipv4.fib_has_custom_rules = true; in fib4_rule_configure()
301 atomic_dec(&net->ipv4.fib_num_tclassid_users); in fib4_rule_delete()
303 net->ipv4.fib_has_custom_rules = true; in fib4_rule_delete()
305 if (net->ipv4.fib_rules_require_fldissect && in fib4_rule_delete()
307 net->ipv4.fib_rules_require_fldissect--; in fib4_rule_delete()
422 net->ipv4.rules_ops = ops; in fib4_rules_init()
423 net->ipv4.fib_has_custom_rules = false; in fib4_rules_init()
[all …]
Dip_fragment.c92 inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) : in ip4_frag_init()
217 q = inet_frag_find(net->ipv4.fqdir, &key); in ip_find()
604 table[0].data = &net->ipv4.fqdir->high_thresh; in ip4_frags_ns_ctl_register()
605 table[0].extra1 = &net->ipv4.fqdir->low_thresh; in ip4_frags_ns_ctl_register()
606 table[1].data = &net->ipv4.fqdir->low_thresh; in ip4_frags_ns_ctl_register()
607 table[1].extra2 = &net->ipv4.fqdir->high_thresh; in ip4_frags_ns_ctl_register()
608 table[2].data = &net->ipv4.fqdir->timeout; in ip4_frags_ns_ctl_register()
609 table[3].data = &net->ipv4.fqdir->max_dist; in ip4_frags_ns_ctl_register()
615 net->ipv4.frags_hdr = hdr; in ip4_frags_ns_ctl_register()
629 table = net->ipv4.frags_hdr->ctl_table_arg; in ip4_frags_ns_ctl_unregister()
[all …]
Dtcp_fastopen.c13 ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx); in tcp_fastopen_init_key_once()
52 ctxt = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, NULL); in tcp_fastopen_ctx_destroy()
85 octx = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, ctx); in tcp_fastopen_reset_cipher()
104 ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx); in tcp_fastopen_get_cipher()
338 return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) || in tcp_fastopen_no_cookie()
353 int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen); in tcp_try_fastopen()
495 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout)) in tcp_fastopen_active_disable()
499 WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies); in tcp_fastopen_active_disable()
505 atomic_inc(&net->ipv4.tfo_active_disable_times); in tcp_fastopen_active_disable()
517 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout); in tcp_fastopen_active_should_disable()
[all …]
Dfib_notifier.c25 net->ipv4.fib_seq++; in call_fib4_notifiers()
33 return net->ipv4.fib_seq + fib4_rules_seq_read(net); in fib4_seq_read()
59 net->ipv4.fib_seq = 0; in fib4_notifier_init()
64 net->ipv4.notifier_ops = ops; in fib4_notifier_init()
71 fib_notifier_ops_unregister(net->ipv4.notifier_ops); in fib4_notifier_exit()
Dfib_frontend.c65 &net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX]); in fib4_rules_init()
67 &net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]); in fib4_rules_init()
87 if (id == RT_TABLE_LOCAL && !net->ipv4.fib_has_custom_rules) in fib_new_table()
96 rcu_assign_pointer(net->ipv4.fib_main, tb); in fib_new_table()
99 rcu_assign_pointer(net->ipv4.fib_default, tb); in fib_new_table()
106 hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]); in fib_new_table()
122 head = &net->ipv4.fib_table_hash[h]; in fib_get_table()
138 rcu_assign_pointer(net->ipv4.fib_main, new); in fib_replace_table()
141 rcu_assign_pointer(net->ipv4.fib_default, new); in fib_replace_table()
190 struct hlist_head *head = &net->ipv4.fib_table_hash[h]; in fib_flush()
[all …]
Dtcp_timer.c146 int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */ in tcp_orphan_retries()
166 if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing)) in tcp_mtu_probing()
174 mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss); in tcp_mtu_probing()
175 mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor)); in tcp_mtu_probing()
176 mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss)); in tcp_mtu_probing()
243 READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); in tcp_write_timeout()
246 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) { in tcp_write_timeout()
253 retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2); in tcp_write_timeout()
388 max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); in tcp_probe_timer()
421 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1; in tcp_fastopen_synack_timer()
[all …]
Daf_inet.c220 tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen); in inet_listen()
341 if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) in inet_create()
1276 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) > 1) { in inet_sk_reselect_saddr()
1329 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) || in inet_sk_rebuild_header()
1840 seqlock_init(&net->ipv4.ip_local_ports.lock); in inet_init_net()
1841 net->ipv4.ip_local_ports.range[0] = 32768; in inet_init_net()
1842 net->ipv4.ip_local_ports.range[1] = 60999; in inet_init_net()
1844 seqlock_init(&net->ipv4.ping_group_range.lock); in inet_init_net()
1849 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1); in inet_init_net()
1850 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0); in inet_init_net()
[all …]
Dicmp.c285 if (!((1 << type) & READ_ONCE(net->ipv4.sysctl_icmp_ratemask))) in icmpv4_mask_allow()
322 peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1); in icmpv4_xrlim_allow()
324 READ_ONCE(net->ipv4.sysctl_icmp_ratelimit)); in icmpv4_xrlim_allow()
697 READ_ONCE(net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)) in __icmp_send()
889 switch (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) { in icmp_unreach()
942 if (!READ_ONCE(net->ipv4.sysctl_icmp_ignore_bogus_error_responses) && in icmp_unreach()
1002 if (READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_all)) in icmp_echo()
1037 if (!READ_ONCE(net->ipv4.sysctl_icmp_echo_enable_probe)) in icmp_build_probe()
1258 READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_broadcasts)) { in icmp_rcv()
1451 net->ipv4.sysctl_icmp_echo_ignore_all = 0; in icmp_sk_init()
[all …]
Ddevinet.c96 IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
265 memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt, in inetdev_init()
627 struct sock *sk = net->ipv4.mc_autojoin_sk; in ip_mc_autojoin_config()
1811 u32 res = atomic_read(&net->ipv4.dev_addr_genid) + in inet_base_seq()
2205 devconf = net->ipv4.devconf_all; in inet_netconf_get_devconf()
2208 devconf = net->ipv4.devconf_dflt; in inet_netconf_get_devconf()
2300 net->ipv4.devconf_all, in inet_netconf_dump_devconf()
2311 net->ipv4.devconf_dflt, in inet_netconf_dump_devconf()
2339 in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i]; in devinet_copy_dflt_conf()
2355 net->ipv4.devconf_all); in inet_forward_change()
[all …]
Droute.c488 if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key))) in __ip_select_ident()
489 get_random_bytes(&net->ipv4.ip_id_key, in __ip_select_ident()
490 sizeof(net->ipv4.ip_id_key)); in __ip_select_ident()
495 &net->ipv4.ip_id_key); in __ip_select_ident()
894 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1); in ip_rt_send_redirect()
998 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, in ip_error()
1036 if (mtu < net->ipv4.ip_rt_min_pmtu) { in __ip_rt_update_pmtu()
1038 mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu); in __ip_rt_update_pmtu()
1042 time_before(jiffies, dst->expires - net->ipv4.ip_rt_mtu_expires / 2)) in __ip_rt_update_pmtu()
1052 jiffies + net->ipv4.ip_rt_mtu_expires); in __ip_rt_update_pmtu()
[all …]
Dxfrm4_policy.c187 net->ipv4.xfrm4_hdr = hdr; in xfrm4_net_sysctl_init()
201 if (!net->ipv4.xfrm4_hdr) in xfrm4_net_sysctl_exit()
204 table = net->ipv4.xfrm4_hdr->ctl_table_arg; in xfrm4_net_sysctl_exit()
205 unregister_net_sysctl_table(net->ipv4.xfrm4_hdr); in xfrm4_net_sysctl_exit()
Dipmr.c118 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list, \
120 list_empty(&net->ipv4.mr_tables))
128 ret = list_entry_rcu(net->ipv4.mr_tables.next, in ipmr_mr_table_iter()
134 if (&ret->list == &net->ipv4.mr_tables) in ipmr_mr_table_iter()
163 err = fib_rules_lookup(net->ipv4.mr_rules_ops, in ipmr_fib_lookup()
248 INIT_LIST_HEAD(&net->ipv4.mr_tables); in ipmr_rules_init()
260 net->ipv4.mr_rules_ops = ops; in ipmr_rules_init()
277 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { in ipmr_rules_exit()
281 fib_rules_unregister(net->ipv4.mr_rules_ops); in ipmr_rules_exit()
302 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
[all …]
Dsyncookies.c244 if (!READ_ONCE(net->ipv4.sysctl_tcp_timestamps)) in cookie_timestamp_decode()
249 if (tcp_opt->sack_ok && !READ_ONCE(net->ipv4.sysctl_tcp_sack)) in cookie_timestamp_decode()
258 return READ_ONCE(net->ipv4.sysctl_tcp_window_scaling) != 0; in cookie_timestamp_decode()
270 if (READ_ONCE(net->ipv4.sysctl_tcp_ecn)) in cookie_ecn_ok()
336 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) || in cookie_v4_check()
Dtcp_minisocks.c176 if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) { in tcp_timewait_state_process()
253 tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state); in tcp_time_wait()
322 inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo); in tcp_time_wait()
356 if (net->ipv4.tcp_death_row.hashinfo->pernet) { in tcp_twsk_purge()
358 inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family); in tcp_twsk_purge()
361 if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1) in tcp_twsk_purge()
812 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) { in tcp_check_req()
Digmp.c474 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) in add_grec()
600 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) in igmpv3_send_report()
744 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) in igmp_send_report()
833 WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv)); in igmp_ifc_event()
929 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) in igmp_heard_report()
1015 in_dev->mr_qrv = ih3->qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); in igmp_heard_query()
1054 !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) in igmp_heard_query()
1195 pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); in igmpv3_add_delrec()
1247 READ_ONCE(net->ipv4.sysctl_igmp_qrv); in igmpv3_del_delrec()
1250 READ_ONCE(net->ipv4.sysctl_igmp_qrv); in igmpv3_del_delrec()
[all …]
Dtcp_diag.c186 hinfo = sock_net(cb->skb->sk)->ipv4.tcp_death_row.hashinfo; in tcp_diag_dump()
196 hinfo = sock_net(cb->skb->sk)->ipv4.tcp_death_row.hashinfo; in tcp_diag_dump_one()
210 hinfo = net->ipv4.tcp_death_row.hashinfo; in tcp_diag_destroy()
/net/core/
Dlwt_bpf.c181 bool ipv4; in bpf_lwt_xmit_reroute() local
184 ipv4 = true; in bpf_lwt_xmit_reroute()
186 ipv4 = false; in bpf_lwt_xmit_reroute()
199 if (ipv4) { in bpf_lwt_xmit_reroute()
535 static int handle_gso_encap(struct sk_buff *skb, bool ipv4, int encap_len) in handle_gso_encap() argument
548 if (ipv4) { in handle_gso_encap()
581 if (ipv4) in handle_gso_encap()
594 bool ipv4; in bpf_lwt_push_ip_encap() local
603 ipv4 = true; in bpf_lwt_push_ip_encap()
607 ipv4 = false; in bpf_lwt_push_ip_encap()
[all …]
/net/tipc/
Dudp_media.c77 struct in_addr ipv4; member
109 return ipv4_is_multicast(addr->ipv4.s_addr); in tipc_udp_is_mcast_addr()
135 snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->port)); in tipc_udp_addr2str()
181 .daddr = dst->ipv4.s_addr, in tipc_udp_xmit()
182 .saddr = src->ipv4.s_addr, in tipc_udp_xmit()
195 udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr, in tipc_udp_xmit()
196 dst->ipv4.s_addr, 0, ttl, 0, src->port, in tipc_udp_xmit()
321 pr_info("New replicast peer: %pI4\n", &rcast->addr.ipv4); in tipc_udp_rcast_add()
346 src.ipv4.s_addr = iphdr->saddr; in tipc_udp_rcast_disc()
412 mreqn.imr_multiaddr = remote->ipv4; in enable_mcast()
[all …]
/net/openvswitch/
Dflow.c691 memset(&key->ipv4, 0, sizeof(key->ipv4)); in key_extract_l3l4()
700 key->ipv4.addr.src = nh->saddr; in key_extract_l3l4()
701 key->ipv4.addr.dst = nh->daddr; in key_extract_l3l4()
778 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); in key_extract_l3l4()
779 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); in key_extract_l3l4()
780 ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha); in key_extract_l3l4()
781 ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha); in key_extract_l3l4()
784 memset(&key->ipv4, 0, sizeof(key->ipv4)); in key_extract_l3l4()
Dflow_netlink.c670 bool ttl = false, ipv4 = false, ipv6 = false; in ip_tun_from_nlattr() local
701 SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src, in ip_tun_from_nlattr()
703 ipv4 = true; in ip_tun_from_nlattr()
706 SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst, in ip_tun_from_nlattr()
708 ipv4 = true; in ip_tun_from_nlattr()
790 ipv4 = true; in ip_tun_from_nlattr()
812 if (ipv4 && ipv6) { in ip_tun_from_nlattr()
818 if (!ipv4 && !ipv6) { in ip_tun_from_nlattr()
822 if (ipv4) { in ip_tun_from_nlattr()
824 if (match->key->tun_key.u.ipv4.src || in ip_tun_from_nlattr()
[all …]
/net/psample/
Dpsample.c235 if (tun_key->u.ipv4.src && in __psample_ip_tun_to_nlattr()
237 tun_key->u.ipv4.src)) in __psample_ip_tun_to_nlattr()
239 if (tun_key->u.ipv4.dst && in __psample_ip_tun_to_nlattr()
241 tun_key->u.ipv4.dst)) in __psample_ip_tun_to_nlattr()
325 if (tun_key->u.ipv4.src) in psample_tunnel_meta_len()
327 if (tun_key->u.ipv4.dst) in psample_tunnel_meta_len()
/net/netfilter/
Dnf_conntrack_bpf.c77 case sizeof(bpf_tuple->ipv4): in bpf_nf_ct_tuple_parse()
79 src->ip = bpf_tuple->ipv4.saddr; in bpf_nf_ct_tuple_parse()
80 sport->tcp.port = bpf_tuple->ipv4.sport; in bpf_nf_ct_tuple_parse()
81 dst->ip = bpf_tuple->ipv4.daddr; in bpf_nf_ct_tuple_parse()
82 dport->tcp.port = bpf_tuple->ipv4.dport; in bpf_nf_ct_tuple_parse()
/net/sctp/
Doffload.c111 goto ipv4; in sctp_offload_init()
116 ipv4: in sctp_offload_init()

1234