Lines Matching refs:sk
156 static __always_inline __u32 bictcp_clock_us(const struct sock *sk) in bictcp_clock_us() argument
158 return tcp_sk(sk)->tcp_mstamp; in bictcp_clock_us()
161 static __always_inline void bictcp_hystart_reset(struct sock *sk) in bictcp_hystart_reset() argument
163 struct tcp_sock *tp = tcp_sk(sk); in bictcp_hystart_reset()
164 struct bictcp *ca = inet_csk_ca(sk); in bictcp_hystart_reset()
166 ca->round_start = ca->last_ack = bictcp_clock_us(sk); in bictcp_hystart_reset()
178 void BPF_PROG(bpf_cubic_init, struct sock *sk) in BPF_PROG() argument
180 struct bictcp *ca = inet_csk_ca(sk); in BPF_PROG()
185 bictcp_hystart_reset(sk); in BPF_PROG()
188 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; in BPF_PROG()
195 void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event) in BPF_PROG() argument
198 struct bictcp *ca = inet_csk_ca(sk); in BPF_PROG()
202 delta = now - tcp_sk(sk)->lsndtime; in BPF_PROG()
387 void BPF_STRUCT_OPS(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acked) in BPF_STRUCT_OPS() argument
389 struct tcp_sock *tp = tcp_sk(sk); in BPF_STRUCT_OPS()
390 struct bictcp *ca = inet_csk_ca(sk); in BPF_STRUCT_OPS()
392 if (!tcp_is_cwnd_limited(sk)) in BPF_STRUCT_OPS()
397 bictcp_hystart_reset(sk); in BPF_STRUCT_OPS()
406 __u32 BPF_STRUCT_OPS(bpf_cubic_recalc_ssthresh, struct sock *sk) in BPF_STRUCT_OPS() argument
408 const struct tcp_sock *tp = tcp_sk(sk); in BPF_STRUCT_OPS()
409 struct bictcp *ca = inet_csk_ca(sk); in BPF_STRUCT_OPS()
423 void BPF_STRUCT_OPS(bpf_cubic_state, struct sock *sk, __u8 new_state) in BPF_STRUCT_OPS() argument
426 bictcp_reset(inet_csk_ca(sk)); in BPF_STRUCT_OPS()
427 bictcp_hystart_reset(sk); in BPF_STRUCT_OPS()
442 static __always_inline __u32 hystart_ack_delay(struct sock *sk) in hystart_ack_delay() argument
446 rate = sk->sk_pacing_rate; in hystart_ack_delay()
453 static __always_inline void hystart_update(struct sock *sk, __u32 delay) in hystart_update() argument
455 struct tcp_sock *tp = tcp_sk(sk); in hystart_update()
456 struct bictcp *ca = inet_csk_ca(sk); in hystart_update()
460 __u32 now = bictcp_clock_us(sk); in hystart_update()
466 threshold = ca->delay_min + hystart_ack_delay(sk); in hystart_update()
473 if (sk->sk_pacing_status == SK_PACING_NONE) in hystart_update()
499 void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk, in BPF_STRUCT_OPS() argument
502 const struct tcp_sock *tp = tcp_sk(sk); in BPF_STRUCT_OPS()
503 struct bictcp *ca = inet_csk_ca(sk); in BPF_STRUCT_OPS()
525 hystart_update(sk, delay); in BPF_STRUCT_OPS()
528 extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym;
530 __u32 BPF_STRUCT_OPS(bpf_cubic_undo_cwnd, struct sock *sk) in BPF_STRUCT_OPS() argument
532 return tcp_reno_undo_cwnd(sk); in BPF_STRUCT_OPS()