• Home
  • Raw
  • Download

Lines Matching refs:ca

124 static inline void tcpnv_reset(struct tcpnv *ca, struct sock *sk)  in tcpnv_reset()  argument
128 ca->nv_reset = 0; in tcpnv_reset()
129 ca->nv_no_cong_cnt = 0; in tcpnv_reset()
130 ca->nv_rtt_cnt = 0; in tcpnv_reset()
131 ca->nv_last_rtt = 0; in tcpnv_reset()
132 ca->nv_rtt_max_rate = 0; in tcpnv_reset()
133 ca->nv_rtt_start_seq = tp->snd_una; in tcpnv_reset()
134 ca->nv_eval_call_cnt = 0; in tcpnv_reset()
135 ca->nv_last_snd_una = tp->snd_una; in tcpnv_reset()
140 struct tcpnv *ca = inet_csk_ca(sk); in tcpnv_init() local
143 tcpnv_reset(ca, sk); in tcpnv_init()
152 ca->nv_base_rtt = base_rtt; in tcpnv_init()
153 ca->nv_lower_bound_rtt = (base_rtt * 205) >> 8; /* 80% */ in tcpnv_init()
155 ca->nv_base_rtt = 0; in tcpnv_init()
156 ca->nv_lower_bound_rtt = 0; in tcpnv_init()
159 ca->nv_allow_cwnd_growth = 1; in tcpnv_init()
160 ca->nv_min_rtt_reset_jiffies = jiffies + 2 * HZ; in tcpnv_init()
161 ca->nv_min_rtt = NV_INIT_RTT; in tcpnv_init()
162 ca->nv_min_rtt_new = NV_INIT_RTT; in tcpnv_init()
163 ca->nv_min_cwnd = NV_MIN_CWND; in tcpnv_init()
164 ca->nv_catchup = 0; in tcpnv_init()
165 ca->cwnd_growth_factor = 0; in tcpnv_init()
171 inline u32 nv_get_bounded_rtt(struct tcpnv *ca, u32 val) in nv_get_bounded_rtt() argument
173 if (ca->nv_lower_bound_rtt > 0 && val < ca->nv_lower_bound_rtt) in nv_get_bounded_rtt()
174 return ca->nv_lower_bound_rtt; in nv_get_bounded_rtt()
175 else if (ca->nv_base_rtt > 0 && val > ca->nv_base_rtt) in nv_get_bounded_rtt()
176 return ca->nv_base_rtt; in nv_get_bounded_rtt()
184 struct tcpnv *ca = inet_csk_ca(sk); in tcpnv_cong_avoid() local
191 if (!ca->nv_allow_cwnd_growth) in tcpnv_cong_avoid()
200 if (ca->cwnd_growth_factor < 0) { in tcpnv_cong_avoid()
201 cnt = tcp_snd_cwnd(tp) << -ca->cwnd_growth_factor; in tcpnv_cong_avoid()
204 cnt = max(4U, tcp_snd_cwnd(tp) >> ca->cwnd_growth_factor); in tcpnv_cong_avoid()
218 struct tcpnv *ca = inet_csk_ca(sk); in tcpnv_state() local
220 if (new_state == TCP_CA_Open && ca->nv_reset) { in tcpnv_state()
221 tcpnv_reset(ca, sk); in tcpnv_state()
224 ca->nv_reset = 1; in tcpnv_state()
225 ca->nv_allow_cwnd_growth = 0; in tcpnv_state()
228 if (ca->cwnd_growth_factor > 0) in tcpnv_state()
229 ca->cwnd_growth_factor = 0; in tcpnv_state()
232 ca->cwnd_growth_factor > -8) in tcpnv_state()
233 ca->cwnd_growth_factor--; in tcpnv_state()
244 struct tcpnv *ca = inet_csk_ca(sk); in tcpnv_acked() local
261 if (ca->nv_catchup && tcp_snd_cwnd(tp) >= nv_min_cwnd) { in tcpnv_acked()
262 ca->nv_catchup = 0; in tcpnv_acked()
263 ca->nv_allow_cwnd_growth = 0; in tcpnv_acked()
266 bytes_acked = tp->snd_una - ca->nv_last_snd_una; in tcpnv_acked()
267 ca->nv_last_snd_una = tp->snd_una; in tcpnv_acked()
274 if (ca->nv_last_rtt > 0) { in tcpnv_acked()
276 ((u64)ca->nv_last_rtt) in tcpnv_acked()
280 ca->nv_min_rtt = avg_rtt << 1; in tcpnv_acked()
282 ca->nv_last_rtt = avg_rtt; in tcpnv_acked()
296 if (ca->nv_rtt_max_rate < rate) in tcpnv_acked()
297 ca->nv_rtt_max_rate = rate; in tcpnv_acked()
300 if (ca->nv_eval_call_cnt < 255) in tcpnv_acked()
301 ca->nv_eval_call_cnt++; in tcpnv_acked()
304 avg_rtt = nv_get_bounded_rtt(ca, avg_rtt); in tcpnv_acked()
307 if (avg_rtt < ca->nv_min_rtt) in tcpnv_acked()
308 ca->nv_min_rtt = avg_rtt; in tcpnv_acked()
311 if (avg_rtt < ca->nv_min_rtt_new) in tcpnv_acked()
312 ca->nv_min_rtt_new = avg_rtt; in tcpnv_acked()
323 if (time_after_eq(now, ca->nv_min_rtt_reset_jiffies)) { in tcpnv_acked()
326 ca->nv_min_rtt = ca->nv_min_rtt_new; in tcpnv_acked()
327 ca->nv_min_rtt_new = NV_INIT_RTT; in tcpnv_acked()
329 ca->nv_min_rtt_reset_jiffies = in tcpnv_acked()
334 ca->nv_min_cwnd = max(ca->nv_min_cwnd / 2, NV_MIN_CWND); in tcpnv_acked()
338 if (before(ca->nv_rtt_start_seq, tp->snd_una)) { in tcpnv_acked()
339 ca->nv_rtt_start_seq = tp->snd_nxt; in tcpnv_acked()
340 if (ca->nv_rtt_cnt < 0xff) in tcpnv_acked()
342 ca->nv_rtt_cnt++; in tcpnv_acked()
349 if (ca->nv_eval_call_cnt == 1 && in tcpnv_acked()
350 bytes_acked >= (ca->nv_min_cwnd - 1) * tp->mss_cache && in tcpnv_acked()
351 ca->nv_min_cwnd < (NV_TSO_CWND_BOUND + 1)) { in tcpnv_acked()
352 ca->nv_min_cwnd = min(ca->nv_min_cwnd in tcpnv_acked()
355 ca->nv_rtt_start_seq = tp->snd_nxt + in tcpnv_acked()
356 ca->nv_min_cwnd * tp->mss_cache; in tcpnv_acked()
357 ca->nv_eval_call_cnt = 0; in tcpnv_acked()
358 ca->nv_allow_cwnd_growth = 1; in tcpnv_acked()
367 div64_u64(((u64)ca->nv_rtt_max_rate) * ca->nv_min_rtt, in tcpnv_acked()
383 if (ca->nv_rtt_cnt < nv_rtt_min_cnt) { in tcpnv_acked()
386 if (ca->nv_eval_call_cnt < in tcpnv_acked()
390 } else if (ca->nv_eval_call_cnt < in tcpnv_acked()
392 if (ca->nv_allow_cwnd_growth && in tcpnv_acked()
393 ca->nv_rtt_cnt > nv_stop_rtt_cnt) in tcpnv_acked()
394 ca->nv_allow_cwnd_growth = 0; in tcpnv_acked()
399 ca->nv_allow_cwnd_growth = 0; in tcpnv_acked()
412 if (ca->cwnd_growth_factor > 0) in tcpnv_acked()
413 ca->cwnd_growth_factor = 0; in tcpnv_acked()
414 ca->nv_no_cong_cnt = 0; in tcpnv_acked()
417 if (ca->nv_eval_call_cnt < nv_inc_eval_min_calls) in tcpnv_acked()
420 ca->nv_allow_cwnd_growth = 1; in tcpnv_acked()
421 ca->nv_no_cong_cnt++; in tcpnv_acked()
422 if (ca->cwnd_growth_factor < 0 && in tcpnv_acked()
424 ca->nv_no_cong_cnt > nv_cwnd_growth_rate_neg) { in tcpnv_acked()
425 ca->cwnd_growth_factor++; in tcpnv_acked()
426 ca->nv_no_cong_cnt = 0; in tcpnv_acked()
427 } else if (ca->cwnd_growth_factor >= 0 && in tcpnv_acked()
429 ca->nv_no_cong_cnt > in tcpnv_acked()
431 ca->cwnd_growth_factor++; in tcpnv_acked()
432 ca->nv_no_cong_cnt = 0; in tcpnv_acked()
440 ca->nv_eval_call_cnt = 0; in tcpnv_acked()
441 ca->nv_rtt_cnt = 0; in tcpnv_acked()
442 ca->nv_rtt_max_rate = 0; in tcpnv_acked()
457 const struct tcpnv *ca = inet_csk_ca(sk); in tcpnv_get_info() local
461 info->vegas.tcpv_rttcnt = ca->nv_rtt_cnt; in tcpnv_get_info()
462 info->vegas.tcpv_rtt = ca->nv_last_rtt; in tcpnv_get_info()
463 info->vegas.tcpv_minrtt = ca->nv_min_rtt; in tcpnv_get_info()