/net/ipv4/ |
D | tcp_cong.c | 403 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked) in tcp_slow_start() argument 405 u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh); in tcp_slow_start() 407 acked -= cwnd - tp->snd_cwnd; in tcp_slow_start() 410 return acked; in tcp_slow_start() 417 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) in tcp_cong_avoid_ai() argument 425 tp->snd_cwnd_cnt += acked; in tcp_cong_avoid_ai() 443 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_reno_cong_avoid() argument 452 acked = tcp_slow_start(tp, acked); in tcp_reno_cong_avoid() 453 if (!acked) in tcp_reno_cong_avoid() 457 tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked); in tcp_reno_cong_avoid()
|
D | tcp_illinois.c | 52 u16 acked; /* # packets acked by current ACK */ member 78 ca->acked = 0; in tcp_illinois_init() 91 ca->acked = sample->pkts_acked; in tcp_illinois_acked() 260 static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_illinois_cong_avoid() argument 274 tcp_slow_start(tp, acked); in tcp_illinois_cong_avoid() 280 tp->snd_cwnd_cnt += ca->acked; in tcp_illinois_cong_avoid() 281 ca->acked = 1; in tcp_illinois_cong_avoid()
|
D | tcp_cubic.c | 225 static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked) in bictcp_update() argument 230 ca->ack_cnt += acked; /* count the number of ACKed packets */ in bictcp_update() 248 ca->ack_cnt = acked; /* start counting */ in bictcp_update() 335 static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in bictcp_cong_avoid() argument 344 acked = tcp_slow_start(tp, acked); in bictcp_cong_avoid() 345 if (!acked) in bictcp_cong_avoid() 348 bictcp_update(ca, tp->snd_cwnd, acked); in bictcp_cong_avoid() 349 tcp_cong_avoid_ai(tp, ca->cnt, acked); in bictcp_cong_avoid()
|
D | tcp_vegas.c | 165 static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_vegas_cong_avoid() argument 171 tcp_reno_cong_avoid(sk, ack, acked); in tcp_vegas_cong_avoid() 196 tcp_reno_cong_avoid(sk, ack, acked); in tcp_vegas_cong_avoid() 246 tcp_slow_start(tp, acked); in tcp_vegas_cong_avoid() 286 tcp_slow_start(tp, acked); in tcp_vegas_cong_avoid()
|
D | tcp_veno.c | 119 static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_veno_cong_avoid() argument 125 tcp_reno_cong_avoid(sk, ack, acked); in tcp_veno_cong_avoid() 138 tcp_reno_cong_avoid(sk, ack, acked); in tcp_veno_cong_avoid() 157 tcp_slow_start(tp, acked); in tcp_veno_cong_avoid()
|
D | tcp_nv.c | 181 static void tcpnv_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcpnv_cong_avoid() argument 195 acked = tcp_slow_start(tp, acked); in tcpnv_cong_avoid() 196 if (!acked) in tcpnv_cong_avoid() 202 tcp_cong_avoid_ai(tp, cnt, acked); in tcpnv_cong_avoid() 205 tcp_cong_avoid_ai(tp, cnt, acked); in tcpnv_cong_avoid()
|
D | tcp_scalable.c | 19 static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_scalable_cong_avoid() argument 27 tcp_slow_start(tp, acked); in tcp_scalable_cong_avoid()
|
D | tcp_highspeed.c | 112 static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in hstcp_cong_avoid() argument 121 tcp_slow_start(tp, acked); in hstcp_cong_avoid()
|
D | tcp_bbr.c | 478 struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd) in bbr_set_cwnd_to_recover_or_restore() argument 497 cwnd = tcp_packets_in_flight(tp) + acked; in bbr_set_cwnd_to_recover_or_restore() 506 *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); in bbr_set_cwnd_to_recover_or_restore() 517 u32 acked, u32 bw, int gain) in bbr_set_cwnd() argument 523 if (!acked) in bbr_set_cwnd() 526 if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd)) in bbr_set_cwnd() 539 cwnd = min(cwnd + acked, target_cwnd); in bbr_set_cwnd() 541 cwnd = cwnd + acked; in bbr_set_cwnd()
|
D | tcp_hybla.c | 90 static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked) in hybla_cong_avoid() argument 107 tcp_reno_cong_avoid(sk, ack, acked); in hybla_cong_avoid()
|
D | tcp_bic.c | 140 static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in bictcp_cong_avoid() argument 149 tcp_slow_start(tp, acked); in bictcp_cong_avoid()
|
D | tcp_yeah.c | 72 static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_yeah_cong_avoid() argument 81 tcp_slow_start(tp, acked); in tcp_yeah_cong_avoid()
|
D | tcp_lp.c | 119 static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_lp_cong_avoid() argument 124 tcp_reno_cong_avoid(sk, ack, acked); in tcp_lp_cong_avoid()
|
D | tcp_htcp.c | 231 static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in htcp_cong_avoid() argument 240 tcp_slow_start(tp, acked); in htcp_cong_avoid()
|
D | tcp_cdg.c | 262 static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_cdg_cong_avoid() argument 293 tcp_reno_cong_avoid(sk, ack, acked); in tcp_cdg_cong_avoid()
|
D | tcp_minisocks.c | 762 inet_rsk(req)->acked = 1; in tcp_check_req() 785 inet_rsk(req)->acked = 1; in tcp_check_req()
|
D | inet_connection_sock.c | 679 (!inet_rsk(req)->acked || req->num_timeout >= max_retries); in syn_ack_recalc() 685 *resend = !inet_rsk(req)->acked || in syn_ack_recalc() 789 inet_rsk(req)->acked)) { in reqsk_timer_handler()
|
/net/mac80211/ |
D | status.c | 626 bool acked, bool dropped) in ieee80211_report_ack_skb() argument 648 cookie, acked, in ieee80211_report_ack_skb() 655 acked, GFP_ATOMIC); in ieee80211_report_ack_skb() 664 skb_complete_wifi_ack(skb, acked); in ieee80211_report_ack_skb() 673 bool acked = info->flags & IEEE80211_TX_STAT_ACK; in ieee80211_report_used_skb() local 676 acked = false; in ieee80211_report_used_skb() 700 acked); in ieee80211_report_used_skb() 705 ieee80211_report_ack_skb(local, info, acked, dropped); in ieee80211_report_used_skb() 710 skb->wifi_acked = acked; in ieee80211_report_used_skb() 875 bool acked; in __ieee80211_tx_status() local [all …]
|
D | mesh_ps.c | 535 bool tx, bool acked) in ieee80211_mpsp_trigger_process() argument 541 if (rspi && acked) in ieee80211_mpsp_trigger_process() 546 else if (acked && in ieee80211_mpsp_trigger_process()
|
/net/tipc/ |
D | link.c | 184 u16 acked; member 248 static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap, 347 rcv_l->acked = snd_l->snd_nxt - 1; in tipc_link_add_bc_peer() 407 return l->acked; in tipc_link_acked() 775 u16 bc_acked = l->bc_rcvlink->acked; in tipc_link_timeout() 921 l->acked = 0; in tipc_link_reset() 1096 if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr))) in link_retransmit_failure() 1333 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked) in tipc_link_release_pkts() argument 1339 if (more(buf_seqno(skb), acked)) in tipc_link_release_pkts() 1405 static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap, in tipc_link_advance_transmq() argument [all …]
|
D | link.h | 144 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
|
D | bcast.c | 467 u16 acked = msg_bcast_ack(hdr); in tipc_bcast_ack_rcv() local 477 tipc_link_bc_ack_rcv(l, acked, &xmitq); in tipc_bcast_ack_rcv()
|
/net/dccp/ccids/ |
D | ccid3.c | 357 struct tfrc_tx_hist_entry *acked; in ccid3_hc_tx_packet_recv() local 374 acked = tfrc_tx_hist_find_entry(hc->tx_hist, dccp_hdr_ack_seq(skb)); in ccid3_hc_tx_packet_recv() 375 if (acked == NULL) in ccid3_hc_tx_packet_recv() 378 tfrc_tx_hist_purge(&acked->next); in ccid3_hc_tx_packet_recv() 382 r_sample = dccp_sample_rtt(sk, ktime_us_delta(now, acked->stamp)); in ccid3_hc_tx_packet_recv()
|
/net/dccp/ |
D | output.c | 414 if (inet_rsk(req)->acked) /* increase GSS upon retransmission */ in dccp_make_response() 442 inet_rsk(req)->acked = 1; in dccp_make_response()
|
D | minisocks.c | 263 inet_rsk(req)->acked = 0; in dccp_reqsk_init()
|