• Home
  • Raw
  • Download

Lines Matching refs:sock_net

215 		dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);  in tcp_gro_dev_warn()
428 min(sndmem, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[2]))); in tcp_sndbuf_expand()
463 int window = tcp_win_from_space(sk, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])) >> 1; in __tcp_grow_window()
528 int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win); in tcp_init_buffer_space()
567 struct net *net = sock_net(sk); in tcp_clamp_window()
719 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) && in tcp_rcv_space_adjust()
740 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); in tcp_rcv_space_adjust()
905 rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio; in tcp_update_pacing_rate()
907 rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio; in tcp_update_pacing_rate()
1037 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)); in tcp_check_sack_reordering()
1042 NET_INC_STATS(sock_net(sk), in tcp_check_sack_reordering()
1082 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT, in tcp_mark_skb_lost()
1242 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV); in tcp_check_dsack()
1249 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV); in tcp_check_dsack()
1256 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKIGNOREDDUBIOUS); in tcp_check_dsack()
1260 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECVSEGS, dup_segs); in tcp_check_dsack()
1463 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED); in tcp_shifted_skb()
1490 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED); in tcp_shifted_skb()
1661 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); in tcp_shift_skb_data()
1853 NET_INC_STATS(sock_net(sk), mib_idx); in tcp_sacktag_write_queue()
2016 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)); in tcp_check_reno_reordering()
2018 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER); in tcp_check_reno_reordering()
2081 return READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & in tcp_is_rack()
2098 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); in tcp_timeout_mark_lost()
2124 struct net *net = sock_net(sk); in tcp_enter_loss()
2533 NET_INC_STATS(sock_net(sk), mib_idx); in tcp_try_undo_recovery()
2554 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); in tcp_try_undo_dsack()
2569 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); in tcp_try_undo_loss()
2571 NET_INC_STATS(sock_net(sk), in tcp_try_undo_loss()
2702 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL); in tcp_mtup_probe_failed()
2725 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS); in tcp_mtup_probe_success()
2780 NET_INC_STATS(sock_net(sk), mib_idx); in tcp_enter_recovery()
2882 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); in tcp_try_undo_partial()
3042 u32 wlen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen) * HZ; in tcp_update_rtt_min()
3454 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering)) in tcp_may_raise_cwnd()
3603 struct net *net = sock_net(sk); in tcp_send_challenge_ack()
3674 NET_INC_STATS(sock_net(sk), in tcp_process_tlp_ack()
3715 const struct net *net = sock_net(sk); in tcp_newly_delivered()
3803 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS); in tcp_ack()
3810 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS); in tcp_ack()
4406 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) { in tcp_dsack_set()
4414 NET_INC_STATS(sock_net(sk), mib_idx); in tcp_dsack_set()
4441 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDUPLICATEDATAREHASH); in tcp_rcv_spurious_retrans()
4450 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); in tcp_send_dupack()
4453 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) { in tcp_send_dupack()
4509 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, in tcp_sack_compress_send_ack()
4642 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); in tcp_try_coalesce()
4760 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); in tcp_data_queue_ofo()
4771 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); in tcp_data_queue_ofo()
4823 NET_INC_STATS(sock_net(sk), in tcp_data_queue_ofo()
4842 NET_INC_STATS(sock_net(sk), in tcp_data_queue_ofo()
4871 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); in tcp_data_queue_ofo()
4937 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP); in tcp_send_rcvq()
4999 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); in tcp_data_queue()
5008 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP); in tcp_data_queue()
5044 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); in tcp_data_queue()
5067 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); in tcp_data_queue()
5096 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); in tcp_collapse_one()
5301 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); in tcp_prune_ofo_queue()
5341 NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED); in tcp_prune_queue()
5374 NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED); in tcp_prune_queue()
5478 tp->compressed_ack >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)) in __tcp_ack_snd_check()
5500 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns), in __tcp_ack_snd_check()
5504 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns), in __tcp_ack_snd_check()
5532 if (ptr && !READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_stdurg)) in tcp_check_urg()
5645 if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) && in tcp_validate_incoming()
5649 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); in tcp_validate_incoming()
5650 if (!tcp_oow_rate_limited(sock_net(sk), skb, in tcp_validate_incoming()
5670 if (!tcp_oow_rate_limited(sock_net(sk), skb, in tcp_validate_incoming()
5733 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); in tcp_validate_incoming()
5734 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); in tcp_validate_incoming()
5861 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); in tcp_rcv_established()
5885 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); in tcp_rcv_established()
5943 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); in tcp_rcv_established()
5944 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); in tcp_rcv_established()
6023 tcp_parse_options(sock_net(sk), synack, &opt, 0, NULL); in tcp_rcv_fastopen_synack()
6057 NET_INC_STATS(sock_net(sk), in tcp_rcv_fastopen_synack()
6063 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); in tcp_rcv_fastopen_synack()
6108 tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc); in tcp_rcv_synsent_state_process()
6134 NET_INC_STATS(sock_net(sk), in tcp_rcv_synsent_state_process()
6519 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_rcv_state_process()
6528 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_rcv_state_process()
6589 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_rcv_state_process()
6652 const struct net *net = sock_net(listen_sk); in tcp_ecn_create_request()
6713 write_pnet(&ireq->ireq_net, sock_net(sk_listener)); in inet_reqsk_alloc()
6728 struct net *net = sock_net(sk); in tcp_syn_flood_action()
6738 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); in tcp_syn_flood_action()
6741 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); in tcp_syn_flood_action()
6792 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) != 2 && in tcp_get_syncookie_mss()
6800 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); in tcp_get_syncookie_mss()
6820 struct net *net = sock_net(sk); in tcp_conn_request()
6841 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); in tcp_conn_request()
6859 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, in tcp_conn_request()