Home
last modified time | relevance | path

Searched refs:end_seq (Results 1 – 17 of 17) sorted by relevance

/net/ipv4/
Dtcp_input.c340 if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) in tcp_ecn_accept_cwr()
695 if (TCP_SKB_CB(skb)->end_seq - in tcp_rcv_rtt_measure_ts()
1002 u32 end_seq, struct tcp_sacktag_state *state) in tcp_dsack_seen() argument
1006 if (!before(start_seq, end_seq)) in tcp_dsack_seen()
1009 seq_len = end_seq - start_seq; in tcp_dsack_seen()
1015 else if (tp->tlp_high_seq && tp->tlp_high_seq == end_seq) in tcp_dsack_seen()
1225 u32 start_seq, u32 end_seq) in tcp_is_sackblock_valid() argument
1228 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) in tcp_is_sackblock_valid()
1245 if (after(end_seq, tp->snd_una)) in tcp_is_sackblock_valid()
1252 if (!after(end_seq, tp->undo_marker)) in tcp_is_sackblock_valid()
[all …]
Dtcp_recovery.c78 tp->rack.end_seq, scb->end_seq)) in tcp_rack_detect_loss()
118 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, in tcp_rack_advance() argument
140 end_seq, tp->rack.end_seq)) { in tcp_rack_advance()
142 tp->rack.end_seq = end_seq; in tcp_rack_advance()
Dtcp_minisocks.c26 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) in tcp_in_window() argument
30 if (after(end_seq, s_win) && before(seq, e_win)) in tcp_in_window()
32 return seq == e_win && seq == end_seq; in tcp_in_window()
110 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, in tcp_timewait_state_process()
124 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || in tcp_timewait_state_process()
125 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { in tcp_timewait_state_process()
134 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) in tcp_timewait_state_process()
139 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_timewait_state_process()
168 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { in tcp_timewait_state_process()
728 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, in tcp_check_req()
[all …]
Dtcp_output.c71 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); in tcp_event_new_data_sent()
400 TCP_SKB_CB(skb)->end_seq = seq; in tcp_init_nondata_skb()
671 *ptr++ = htonl(sp[this_sack].end_seq); in tcp_options_write()
1383 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) in __tcp_transmit_skb()
1433 WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq); in tcp_queue_skb()
1579 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_fragment()
1580 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tcp_fragment()
1606 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { in tcp_fragment()
1940 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; in tcp_minshall_update()
2108 u32 end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_snd_wnd_test() local
[all …]
Dtcp_illinois.c49 u32 end_seq; /* right edge of current RTT */ member
62 ca->end_seq = tp->snd_nxt; in rtt_reset()
265 if (after(ack, ca->end_seq)) in tcp_illinois_cong_avoid()
Dtcp_rate.c93 scb->end_seq, rs->last_end_seq)) { in tcp_rate_skb_delivered()
99 rs->last_end_seq = scb->end_seq; in tcp_rate_skb_delivered()
Dtcp_cubic.c102 u32 end_seq; /* end_seq of the round */ member
124 ca->end_seq = tp->snd_nxt; in bictcp_hystart_reset()
392 if (after(tp->snd_una, ca->end_seq)) in hystart_update()
Dtcp_fastopen.c173 if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt) in tcp_fastopen_add_skb()
196 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_fastopen_add_skb()
352 bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; in tcp_try_fastopen()
Dtcp.c663 tcb->seq = tcb->end_seq = tp->write_seq; in tcp_skb_entail()
935 if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { in tcp_remove_empty_skb()
1035 TCP_SKB_CB(skb)->end_seq += copy; in tcp_build_frag()
1423 TCP_SKB_CB(skb)->end_seq += copy; in tcp_sendmsg_locked()
1629 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), in tcp_cleanup_rbuf()
1631 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); in tcp_cleanup_rbuf()
2882 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; in __tcp_close()
3176 tp->duplicate_sack[0].end_seq = 0; in tcp_disconnect()
Dtcp_ipv4.c1810 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq || in tcp_add_backlog()
1837 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_add_backlog()
1917 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + in tcp_v4_fill_cb()
/net/netfilter/
Dnf_conntrack_seqadj.c94 if (after(ntohl(sack->end_seq) - seq->offset_before, in nf_ct_sack_block_adjust()
96 new_end_seq = htonl(ntohl(sack->end_seq) - in nf_ct_sack_block_adjust()
99 new_end_seq = htonl(ntohl(sack->end_seq) - in nf_ct_sack_block_adjust()
104 ntohl(sack->end_seq), ntohl(new_end_seq)); in nf_ct_sack_block_adjust()
109 sack->end_seq, new_end_seq, false); in nf_ct_sack_block_adjust()
111 sack->end_seq = new_end_seq; in nf_ct_sack_block_adjust()
/net/tls/
Dtls_device.c177 if (info && !before(acked_seq, info->end_seq)) in tls_icsk_clean_acked()
181 if (before(acked_seq, info->end_seq)) in tls_icsk_clean_acked()
295 record->end_seq = tp->write_seq + record->len; in tls_push_record()
634 before(seq, info->end_seq - info->len)) { in tls_get_record()
657 last->end_seq)) in tls_get_record()
666 if (before(seq, info->end_seq)) { in tls_get_record()
668 after(info->end_seq, in tls_get_record()
669 context->retransmit_hint->end_seq)) { in tls_get_record()
1159 start_marker_record->end_seq = tcp_sk(sk)->write_seq; in tls_set_device_offload()
/net/mptcp/
Dprotocol.c41 u64 end_seq; member
157 to->len, MPTCP_SKB_CB(from)->end_seq); in mptcp_try_coalesce()
158 MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; in mptcp_try_coalesce()
174 if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq) in mptcp_ooo_try_coalesce()
226 u64 seq, end_seq, max_seq; in mptcp_data_queue_ofo() local
230 end_seq = MPTCP_SKB_CB(skb)->end_seq; in mptcp_data_queue_ofo()
235 if (after64(end_seq, max_seq)) { in mptcp_data_queue_ofo()
239 (unsigned long long)end_seq - (unsigned long)max_seq, in mptcp_data_queue_ofo()
264 if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) { in mptcp_data_queue_ofo()
280 if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) { in mptcp_data_queue_ofo()
[all …]
Doptions.c407 subflow->snd_isn = TCP_SKB_CB(skb)->end_seq; in mptcp_syn_options()
934 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq && in check_fully_established()
1185 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { in mptcp_incoming_options()
Dsubflow.c981 TCP_SKB_CB(skb)->end_seq, in get_mapping_status()
1112 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq)) in mptcp_subflow_discard_data()
/net/ipv6/
Dtcp_ipv6.c1544 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && in tcp_v6_do_rcv()
1580 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + in tcp_v6_fill_cb()
/net/sched/
Dsch_cake.c1024 u32 end_a = get_unaligned_be32(&sack_a->end_seq); in cake_tcph_sack_compare()
1036 u32 end_b = get_unaligned_be32(&sack_tmp->end_seq); in cake_tcph_sack_compare()