Lines Matching refs:req
360 void tcp_openreq_init_rwin(struct request_sock *req, in tcp_openreq_init_rwin() argument
364 struct inet_request_sock *ireq = inet_rsk(req); in tcp_openreq_init_rwin()
375 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); in tcp_openreq_init_rwin()
379 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) in tcp_openreq_init_rwin()
380 req->rsk_window_clamp = full_space; in tcp_openreq_init_rwin()
382 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req); in tcp_openreq_init_rwin()
391 &req->rsk_rcv_wnd, in tcp_openreq_init_rwin()
392 &req->rsk_window_clamp, in tcp_openreq_init_rwin()
401 const struct request_sock *req) in tcp_ecn_openreq_child() argument
403 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; in tcp_ecn_openreq_child()
436 struct request_sock *req, in smc_check_reset_syn_req() argument
443 ireq = inet_rsk(req); in smc_check_reset_syn_req()
457 struct request_sock *req, in tcp_create_openreq_child() argument
460 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); in tcp_create_openreq_child()
461 const struct inet_request_sock *ireq = inet_rsk(req); in tcp_create_openreq_child()
462 struct tcp_request_sock *treq = tcp_rsk(req); in tcp_create_openreq_child()
474 smc_check_reset_syn_req(oldtp, req, newtp); in tcp_create_openreq_child()
500 newtp->total_retrans = req->num_retrans; in tcp_create_openreq_child()
511 newtp->window_clamp = req->rsk_window_clamp; in tcp_create_openreq_child()
512 newtp->rcv_ssthresh = req->rsk_rcv_wnd; in tcp_create_openreq_child()
513 newtp->rcv_wnd = req->rsk_rcv_wnd; in tcp_create_openreq_child()
526 newtp->rx_opt.ts_recent = req->ts_recent; in tcp_create_openreq_child()
533 if (req->num_timeout) { in tcp_create_openreq_child()
541 if (treq->af_specific->req_md5_lookup(sk, req_to_sk(req))) in tcp_create_openreq_child()
546 newtp->rx_opt.mss_clamp = req->mss; in tcp_create_openreq_child()
547 tcp_ecn_openreq_child(newtp, req); in tcp_create_openreq_child()
572 struct request_sock *req, in tcp_check_req() argument
587 tmp_opt.ts_recent = req->ts_recent; in tcp_check_req()
589 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off; in tcp_check_req()
594 tmp_opt.ts_recent_stamp = ktime_get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout); in tcp_check_req()
600 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && in tcp_check_req()
628 &tcp_rsk(req)->last_oow_ack_time) && in tcp_check_req()
630 !inet_rtx_syn_ack(sk, req)) { in tcp_check_req()
633 expires += min(TCP_TIMEOUT_INIT << req->num_timeout, in tcp_check_req()
636 mod_timer_pending(&req->rsk_timer, expires); in tcp_check_req()
638 req->rsk_timer.expires = expires; in tcp_check_req()
702 tcp_rsk(req)->snt_isn + 1)) in tcp_check_req()
713 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) { in tcp_check_req()
718 &tcp_rsk(req)->last_oow_ack_time)) in tcp_check_req()
719 req->rsk_ops->send_ack(sk, skb, req); in tcp_check_req()
727 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) in tcp_check_req()
728 req->ts_recent = tmp_opt.rcv_tsval; in tcp_check_req()
730 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { in tcp_check_req()
760 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && in tcp_check_req()
761 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { in tcp_check_req()
762 inet_rsk(req)->acked = 1; in tcp_check_req()
773 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in tcp_check_req()
774 req, &own_req); in tcp_check_req()
779 tcp_synack_rtt_meas(child, req); in tcp_check_req()
781 return inet_csk_complete_hashdance(sk, child, req, own_req); in tcp_check_req()
785 inet_rsk(req)->acked = 1; in tcp_check_req()
796 req->rsk_ops->send_reset(sk, skb); in tcp_check_req()
798 reqsk_fastopen_remove(sk, req, true); in tcp_check_req()
802 bool unlinked = inet_csk_reqsk_queue_drop(sk, req); in tcp_check_req()