/net/ipv4/ |
D | tcp_input.c | 215 static void tcp_ecn_queue_cwr(struct tcp_sock *tp) in tcp_ecn_queue_cwr() argument 217 if (tp->ecn_flags & TCP_ECN_OK) in tcp_ecn_queue_cwr() 218 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; in tcp_ecn_queue_cwr() 221 static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) in tcp_ecn_accept_cwr() argument 224 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; in tcp_ecn_accept_cwr() 227 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) in tcp_ecn_withdraw_cwr() argument 229 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; in tcp_ecn_withdraw_cwr() 234 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ecn_check_ce() local 242 if (tp->ecn_flags & TCP_ECN_SEEN) in __tcp_ecn_check_ce() 249 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { in __tcp_ecn_check_ce() [all …]
|
D | tcp_output.c | 75 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent() local 76 unsigned int prior_packets = tp->packets_out; in tcp_event_new_data_sent() 79 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_event_new_data_sent() 81 tp->packets_out += tcp_skb_pcount(skb); in tcp_event_new_data_sent() 99 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq() local 101 if (!before(tcp_wnd_end(tp), tp->snd_nxt)) in tcp_acceptable_seq() 102 return tp->snd_nxt; in tcp_acceptable_seq() 104 return tcp_wnd_end(tp); in tcp_acceptable_seq() 123 struct tcp_sock *tp = tcp_sk(sk); in tcp_advertise_mss() local 125 int mss = tp->advmss; in tcp_advertise_mss() [all …]
|
D | tcp.c | 383 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_sock() local 385 tp->out_of_order_queue = RB_ROOT; in tcp_init_sock() 387 tcp_prequeue_init(tp); in tcp_init_sock() 388 INIT_LIST_HEAD(&tp->tsq_node); in tcp_init_sock() 391 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); in tcp_init_sock() 392 tp->rtt_min[0].rtt = ~0U; in tcp_init_sock() 399 tp->snd_cwnd = TCP_INIT_CWND; in tcp_init_sock() 404 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; in tcp_init_sock() 405 tp->snd_cwnd_clamp = ~0; in tcp_init_sock() 406 tp->mss_cache = TCP_MSS_DEFAULT; in tcp_init_sock() [all …]
|
D | tcp_yeah.c | 44 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_init() local 56 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); in tcp_yeah_init() 72 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_cong_avoid() local 78 if (tcp_in_slow_start(tp)) in tcp_yeah_cong_avoid() 79 tcp_slow_start(tp, acked); in tcp_yeah_cong_avoid() 84 tp->snd_cwnd_cnt += yeah->pkts_acked; in tcp_yeah_cong_avoid() 85 if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)) { in tcp_yeah_cong_avoid() 86 if (tp->snd_cwnd < tp->snd_cwnd_clamp) in tcp_yeah_cong_avoid() 87 tp->snd_cwnd++; in tcp_yeah_cong_avoid() 88 tp->snd_cwnd_cnt = 0; in tcp_yeah_cong_avoid() [all …]
|
D | tcp_vegas.c | 72 const struct tcp_sock *tp = tcp_sk(sk); in vegas_enable() local 79 vegas->beg_snd_nxt = tp->snd_nxt; in vegas_enable() 159 static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp) in tcp_vegas_ssthresh() argument 161 return min(tp->snd_ssthresh, tp->snd_cwnd); in tcp_vegas_ssthresh() 166 struct tcp_sock *tp = tcp_sk(sk); in tcp_vegas_cong_avoid() local 180 vegas->beg_snd_nxt = tp->snd_nxt; in tcp_vegas_cong_avoid() 219 target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT; in tcp_vegas_cong_avoid() 226 diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT; in tcp_vegas_cong_avoid() 228 if (diff > gamma && tcp_in_slow_start(tp)) { in tcp_vegas_cong_avoid() 240 tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1); in tcp_vegas_cong_avoid() [all …]
|
D | tcp_highspeed.c | 101 struct tcp_sock *tp = tcp_sk(sk); in hstcp_init() local 108 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); in hstcp_init() 113 struct tcp_sock *tp = tcp_sk(sk); in hstcp_cong_avoid() local 119 if (tcp_in_slow_start(tp)) in hstcp_cong_avoid() 120 tcp_slow_start(tp, acked); in hstcp_cong_avoid() 129 if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { in hstcp_cong_avoid() 130 while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && in hstcp_cong_avoid() 133 } else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) { in hstcp_cong_avoid() 134 while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) in hstcp_cong_avoid() 139 if (tp->snd_cwnd < tp->snd_cwnd_clamp) { in hstcp_cong_avoid() [all …]
|
D | tcp_recovery.c | 24 struct tcp_sock *tp = tcp_sk(sk); in tcp_rack_mark_lost() local 26 u32 reo_wnd, prior_retrans = tp->retrans_out; in tcp_rack_mark_lost() 28 if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced) in tcp_rack_mark_lost() 32 tp->rack.advanced = 0; in tcp_rack_mark_lost() 43 if (tp->rack.reord && tcp_min_rtt(tp) != ~0U) in tcp_rack_mark_lost() 44 reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd); in tcp_rack_mark_lost() 53 if (!after(scb->end_seq, tp->snd_una) || in tcp_rack_mark_lost() 57 if (skb_mstamp_after(&tp->rack.mstamp, &skb->skb_mstamp)) { in tcp_rack_mark_lost() 59 if (skb_mstamp_us_delta(&tp->rack.mstamp, in tcp_rack_mark_lost() 64 tcp_skb_mark_lost_uncond_verify(tp, skb); in tcp_rack_mark_lost() [all …]
|
D | tcp_hybla.c | 47 struct tcp_sock *tp = tcp_sk(sk); in hybla_init() local 56 tp->snd_cwnd = 2; in hybla_init() 57 tp->snd_cwnd_clamp = 65535; in hybla_init() 63 ca->minrtt_us = tp->srtt_us; in hybla_init() 64 tp->snd_cwnd = ca->rho; in hybla_init() 91 struct tcp_sock *tp = tcp_sk(sk); in hybla_cong_avoid() local 97 if (tp->srtt_us < ca->minrtt_us) { in hybla_cong_avoid() 99 ca->minrtt_us = tp->srtt_us; in hybla_cong_avoid() 115 if (tcp_in_slow_start(tp)) { in hybla_cong_avoid() 139 increment = ca->rho2_7ls / tp->snd_cwnd; in hybla_cong_avoid() [all …]
|
D | tcp_veno.c | 119 struct tcp_sock *tp = tcp_sk(sk); in tcp_veno_cong_avoid() local 147 target_cwnd = (u64)tp->snd_cwnd * veno->basertt; in tcp_veno_cong_avoid() 151 veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd; in tcp_veno_cong_avoid() 153 if (tcp_in_slow_start(tp)) { in tcp_veno_cong_avoid() 155 tcp_slow_start(tp, acked); in tcp_veno_cong_avoid() 162 tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1); in tcp_veno_cong_avoid() 167 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { in tcp_veno_cong_avoid() 169 tp->snd_cwnd < tp->snd_cwnd_clamp) { in tcp_veno_cong_avoid() 170 tp->snd_cwnd++; in tcp_veno_cong_avoid() 174 tp->snd_cwnd_cnt = 0; in tcp_veno_cong_avoid() [all …]
|
D | tcp_timer.c | 65 struct tcp_sock *tp = tcp_sk(sk); in tcp_out_of_resources() local 70 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) in tcp_out_of_resources() 80 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN || in tcp_out_of_resources() 82 (!tp->snd_wnd && !tp->packets_out)) in tcp_out_of_resources() 129 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_probing() local 134 mss = max(mss, 68 - tp->tcp_header_len); in tcp_mtu_probing() 178 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_timeout() local 185 if (tp->syn_fastopen || tp->syn_data) in tcp_write_timeout() 187 if (tp->syn_data && icsk->icsk_retransmits == 1) in tcp_write_timeout() 200 if (tp->syn_data_acked && in tcp_write_timeout() [all …]
|
D | tcp_dctcp.c | 71 static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca) in dctcp_reset() argument 73 ca->next_seq = tp->snd_nxt; in dctcp_reset() 81 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_init() local 83 if ((tp->ecn_flags & TCP_ECN_OK) || in dctcp_init() 88 ca->prior_snd_una = tp->snd_una; in dctcp_init() 89 ca->prior_rcv_nxt = tp->rcv_nxt; in dctcp_init() 96 dctcp_reset(tp, ca); in dctcp_init() 110 struct tcp_sock *tp = tcp_sk(sk); in dctcp_ssthresh() local 112 ca->loss_cwnd = tp->snd_cwnd; in dctcp_ssthresh() 113 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); in dctcp_ssthresh() [all …]
|
D | tcp_cdg.c | 141 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_hystart_update() local 162 tp->snd_cwnd); in tcp_cdg_hystart_update() 163 tp->snd_ssthresh = tp->snd_cwnd; in tcp_cdg_hystart_update() 181 tp->snd_cwnd); in tcp_cdg_hystart_update() 182 tp->snd_ssthresh = tp->snd_cwnd; in tcp_cdg_hystart_update() 242 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_backoff() local 253 ca->shadow_wnd = max(ca->shadow_wnd, tp->snd_cwnd); in tcp_cdg_backoff() 263 struct tcp_sock *tp = tcp_sk(sk); in tcp_cdg_cong_avoid() local 267 if (tcp_in_slow_start(tp) && hystart_detect) in tcp_cdg_cong_avoid() 275 ca->rtt_seq = tp->snd_nxt; in tcp_cdg_cong_avoid() [all …]
|
D | tcp_cong.c | 378 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked) in tcp_slow_start() argument 380 u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh); in tcp_slow_start() 382 acked -= cwnd - tp->snd_cwnd; in tcp_slow_start() 383 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); in tcp_slow_start() 392 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) in tcp_cong_avoid_ai() argument 395 if (tp->snd_cwnd_cnt >= w) { in tcp_cong_avoid_ai() 396 tp->snd_cwnd_cnt = 0; in tcp_cong_avoid_ai() 397 tp->snd_cwnd++; in tcp_cong_avoid_ai() 400 tp->snd_cwnd_cnt += acked; in tcp_cong_avoid_ai() 401 if (tp->snd_cwnd_cnt >= w) { in tcp_cong_avoid_ai() [all …]
|
D | tcp_bic.c | 143 struct tcp_sock *tp = tcp_sk(sk); in bictcp_cong_avoid() local 149 if (tcp_in_slow_start(tp)) in bictcp_cong_avoid() 150 tcp_slow_start(tp, acked); in bictcp_cong_avoid() 152 bictcp_update(ca, tp->snd_cwnd); in bictcp_cong_avoid() 153 tcp_cong_avoid_ai(tp, ca->cnt, 1); in bictcp_cong_avoid() 163 const struct tcp_sock *tp = tcp_sk(sk); in bictcp_recalc_ssthresh() local 169 if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence) in bictcp_recalc_ssthresh() 170 ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta)) in bictcp_recalc_ssthresh() 173 ca->last_max_cwnd = tp->snd_cwnd; in bictcp_recalc_ssthresh() 175 ca->loss_cwnd = tp->snd_cwnd; in bictcp_recalc_ssthresh() [all …]
|
D | tcp_illinois.c | 58 struct tcp_sock *tp = tcp_sk(sk); in rtt_reset() local 61 ca->end_seq = tp->snd_nxt; in rtt_reset() 222 struct tcp_sock *tp = tcp_sk(sk); in update_params() local 225 if (tp->snd_cwnd < win_thresh) { in update_params() 260 struct tcp_sock *tp = tcp_sk(sk); in tcp_illinois_cong_avoid() local 271 if (tcp_in_slow_start(tp)) in tcp_illinois_cong_avoid() 272 tcp_slow_start(tp, acked); in tcp_illinois_cong_avoid() 278 tp->snd_cwnd_cnt += ca->acked; in tcp_illinois_cong_avoid() 284 delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT; in tcp_illinois_cong_avoid() 285 if (delta >= tp->snd_cwnd) { in tcp_illinois_cong_avoid() [all …]
|
D | tcp_westwood.c | 163 const struct tcp_sock *tp = tcp_sk(sk); in westwood_fast_bw() local 168 w->bk += tp->snd_una - w->snd_una; in westwood_fast_bw() 169 w->snd_una = tp->snd_una; in westwood_fast_bw() 180 const struct tcp_sock *tp = tcp_sk(sk); in westwood_acked_count() local 183 w->cumul_ack = tp->snd_una - w->snd_una; in westwood_acked_count() 189 w->accounted += tp->mss_cache; in westwood_acked_count() 190 w->cumul_ack = tp->mss_cache; in westwood_acked_count() 193 if (w->cumul_ack > tp->mss_cache) { in westwood_acked_count() 197 w->cumul_ack = tp->mss_cache; in westwood_acked_count() 204 w->snd_una = tp->snd_una; in westwood_acked_count() [all …]
|
/net/sched/ |
D | cls_api.c | 103 struct nlmsghdr *n, struct tcf_proto *tp, 109 static inline u32 tcf_auto_prio(struct tcf_proto *tp) in tcf_auto_prio() argument 113 if (tp) in tcf_auto_prio() 114 first = tp->prio - 1; in tcf_auto_prio() 134 struct tcf_proto *tp; in tc_ctl_tfilter() local 208 (tp = rtnl_dereference(*back)) != NULL; in tc_ctl_tfilter() 209 back = &tp->next) { in tc_ctl_tfilter() 210 if (tp->prio >= prio) { in tc_ctl_tfilter() 211 if (tp->prio == prio) { in tc_ctl_tfilter() 213 (tp->protocol != protocol && protocol)) in tc_ctl_tfilter() [all …]
|
D | cls_basic.c | 35 struct tcf_proto *tp; member 40 static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp, in basic_classify() argument 44 struct basic_head *head = rcu_dereference_bh(tp->root); in basic_classify() 59 static unsigned long basic_get(struct tcf_proto *tp, u32 handle) in basic_get() argument 62 struct basic_head *head = rtnl_dereference(tp->root); in basic_get() 75 static int basic_init(struct tcf_proto *tp) in basic_init() argument 83 rcu_assign_pointer(tp->root, head); in basic_init() 96 static bool basic_destroy(struct tcf_proto *tp, bool force) in basic_destroy() argument 98 struct basic_head *head = rtnl_dereference(tp->root); in basic_destroy() 106 tcf_unbind_filter(tp, &f->res); in basic_destroy() [all …]
|
D | cls_cgroup.c | 25 struct tcf_proto *tp; member 29 static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp, in cls_cgroup_classify() argument 32 struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); in cls_cgroup_classify() 46 static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle) in cls_cgroup_get() argument 51 static int cls_cgroup_init(struct tcf_proto *tp) in cls_cgroup_init() argument 72 struct tcf_proto *tp, unsigned long base, in cls_cgroup_change() argument 77 struct cls_cgroup_head *head = rtnl_dereference(tp->root); in cls_cgroup_change() 98 new->tp = tp; in cls_cgroup_change() 105 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); in cls_cgroup_change() 109 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t); in cls_cgroup_change() [all …]
|
D | cls_fw.c | 48 struct tcf_proto *tp; member 59 static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp, in fw_classify() argument 62 struct fw_head *head = rcu_dereference_bh(tp->root); in fw_classify() 88 !(TC_H_MAJ(id ^ tp->q->handle)))) { in fw_classify() 98 static unsigned long fw_get(struct tcf_proto *tp, u32 handle) in fw_get() argument 100 struct fw_head *head = rtnl_dereference(tp->root); in fw_get() 114 static int fw_init(struct tcf_proto *tp) in fw_init() argument 130 static bool fw_destroy(struct tcf_proto *tp, bool force) in fw_destroy() argument 132 struct fw_head *head = rtnl_dereference(tp->root); in fw_destroy() 149 tcf_unbind_filter(tp, &f->res); in fw_destroy() [all …]
|
D | cls_flower.c | 37 struct flow_dissector_key_ports tp; member 122 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, in fl_classify() argument 125 struct cls_fl_head *head = rcu_dereference_bh(tp->root); in fl_classify() 151 static int fl_init(struct tcf_proto *tp) in fl_init() argument 160 rcu_assign_pointer(tp->root, head); in fl_init() 191 static bool fl_destroy(struct tcf_proto *tp, bool force) in fl_destroy() argument 193 struct cls_fl_head *head = rtnl_dereference(tp->root); in fl_destroy() 209 static unsigned long fl_get(struct tcf_proto *tp, u32 handle) in fl_get() argument 211 struct cls_fl_head *head = rtnl_dereference(tp->root); in fl_get() 308 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, in fl_set_key() [all …]
|
D | cls_tcindex.c | 44 struct tcf_proto *tp; member 82 static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp, in tcindex_classify() argument 85 struct tcindex_data *p = rcu_dereference_bh(tp->root); in tcindex_classify() 90 skb, tp, res, p); in tcindex_classify() 96 res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle), key); in tcindex_classify() 108 static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle) in tcindex_get() argument 110 struct tcindex_data *p = rtnl_dereference(tp->root); in tcindex_get() 113 pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle); in tcindex_get() 120 static int tcindex_init(struct tcf_proto *tp) in tcindex_init() argument 124 pr_debug("tcindex_init(tp %p)\n", tp); in tcindex_init() [all …]
|
D | cls_u32.c | 66 struct tcf_proto *tp; member 105 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) in u32_classify() argument 112 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root); in u32_classify() 286 static unsigned long u32_get(struct tcf_proto *tp, u32 handle) in u32_get() argument 289 struct tc_u_common *tp_c = tp->data; in u32_get() 292 ht = rtnl_dereference(tp->root); in u32_get() 320 static int u32_init(struct tcf_proto *tp) in u32_init() argument 325 tp_c = tp->q->u32_node; in u32_init() 334 root_ht->prio = tp->prio; in u32_init() 342 tp_c->q = tp->q; in u32_init() [all …]
|
D | cls_bpf.c | 50 struct tcf_proto *tp; member 78 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, in cls_bpf_classify() argument 81 struct cls_bpf_head *head = rcu_dereference_bh(tp->root); in cls_bpf_classify() 145 static int cls_bpf_init(struct tcf_proto *tp) in cls_bpf_init() argument 154 rcu_assign_pointer(tp->root, head); in cls_bpf_init() 159 static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog) in cls_bpf_delete_prog() argument 177 cls_bpf_delete_prog(prog->tp, prog); in __cls_bpf_delete_prog() 180 static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg) in cls_bpf_delete() argument 185 tcf_unbind_filter(tp, &prog->res); in cls_bpf_delete() 191 static bool cls_bpf_destroy(struct tcf_proto *tp, bool force) in cls_bpf_destroy() argument [all …]
|
/net/sctp/ |
D | transport.c | 314 void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) in sctp_transport_update_rto() argument 316 if (unlikely(!tp->rto_pending)) in sctp_transport_update_rto() 318 pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp); in sctp_transport_update_rto() 320 if (tp->rttvar || tp->srtt) { in sctp_transport_update_rto() 321 struct net *net = sock_net(tp->asoc->base.sk); in sctp_transport_update_rto() 333 tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta) in sctp_transport_update_rto() 334 + (((__u32)abs((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta); in sctp_transport_update_rto() 335 tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha) in sctp_transport_update_rto() 341 tp->srtt = rtt; in sctp_transport_update_rto() 342 tp->rttvar = rtt >> 1; in sctp_transport_update_rto() [all …]
|