Home
last modified time | relevance | path

Searched refs:ack (Results 1 – 25 of 63) sorted by relevance

123

/net/rxrpc/
Dconn_event.c88 struct rxrpc_ackpacket ack; in rxrpc_conn_retransmit_call() member
100 &pkt.ack, sizeof(pkt.ack)) < 0) in rxrpc_conn_retransmit_call()
102 if (pkt.ack.reason == RXRPC_ACK_PING_RESPONSE) in rxrpc_conn_retransmit_call()
154 pkt.ack.bufferSpace = 0; in rxrpc_conn_retransmit_call()
155 pkt.ack.maxSkew = htons(skb ? skb->priority : 0); in rxrpc_conn_retransmit_call()
156 pkt.ack.firstPacket = htonl(chan->last_seq + 1); in rxrpc_conn_retransmit_call()
157 pkt.ack.previousPacket = htonl(chan->last_seq); in rxrpc_conn_retransmit_call()
158 pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0); in rxrpc_conn_retransmit_call()
159 pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE; in rxrpc_conn_retransmit_call()
160 pkt.ack.nAcks = 0; in rxrpc_conn_retransmit_call()
[all …]
Dcall_event.c88 txb->ack.bufferSpace = 0; in rxrpc_send_ACK()
89 txb->ack.maxSkew = 0; in rxrpc_send_ACK()
90 txb->ack.firstPacket = 0; in rxrpc_send_ACK()
91 txb->ack.previousPacket = 0; in rxrpc_send_ACK()
92 txb->ack.serial = htonl(serial); in rxrpc_send_ACK()
93 txb->ack.reason = ack_reason; in rxrpc_send_ACK()
94 txb->ack.nAcks = 0; in rxrpc_send_ACK()
114 struct rxrpc_ackpacket *ack = NULL; in rxrpc_resend() local
144 ack = (void *)ack_skb->data + sizeof(struct rxrpc_wire_header); in rxrpc_resend()
149 if (ack->acks[i] & 1) in rxrpc_resend()
[all …]
Doutput.c101 txb->ack.firstPacket = htonl(window); in rxrpc_fill_out_ack()
102 txb->ack.nAcks = wtop - window; in rxrpc_fill_out_ack()
106 to = min_t(unsigned int, txb->ack.nAcks, RXRPC_SACK_SIZE); in rxrpc_fill_out_ack()
108 if (sack + txb->ack.nAcks <= RXRPC_SACK_SIZE) { in rxrpc_fill_out_ack()
109 memcpy(txb->acks, call->ackr_sack_table + sack, txb->ack.nAcks); in rxrpc_fill_out_ack()
119 } else if (txb->ack.reason == RXRPC_ACK_DELAY) { in rxrpc_fill_out_ack()
120 txb->ack.reason = RXRPC_ACK_IDLE; in rxrpc_fill_out_ack()
138 return txb->ack.nAcks + 3 + sizeof(trailer); in rxrpc_fill_out_ack()
208 if (txb->ack.reason == RXRPC_ACK_PING) in rxrpc_send_ack_packet()
216 iov[0].iov_len = sizeof(txb->wire) + sizeof(txb->ack) + n; in rxrpc_send_ack_packet()
[all …]
Dinput.c737 struct rxrpc_ackpacket ack; in rxrpc_input_check_prev_ack() local
741 u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(ack); in rxrpc_input_check_prev_ack()
859 struct rxrpc_ackpacket ack; in rxrpc_input_ack() local
869 if (skb_copy_bits(skb, offset, &ack, sizeof(ack)) < 0) in rxrpc_input_ack()
871 offset += sizeof(ack); in rxrpc_input_ack()
874 acked_serial = ntohl(ack.serial); in rxrpc_input_ack()
875 first_soft_ack = ntohl(ack.firstPacket); in rxrpc_input_ack()
876 prev_pkt = ntohl(ack.previousPacket); in rxrpc_input_ack()
878 nr_acks = ack.nAcks; in rxrpc_input_ack()
881 summary.ack_reason = (ack.reason < RXRPC_ACK__INVALID ? in rxrpc_input_ack()
[all …]
/net/netfilter/
Dnf_conntrack_proto_tcp.c278 else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET); in get_conntrack_index()
280 else if (tcph->ack) return TCP_ACK_SET; in get_conntrack_index()
518 __u32 seq, ack, sack, end, win, swin; in tcp_in_window() local
527 ack = sack = ntohl(tcph->ack_seq); in tcp_in_window()
536 receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1); in tcp_in_window()
537 ack -= receiver_offset; in tcp_in_window()
548 if (!tcph->ack) in tcp_in_window()
592 if (dir == IP_CT_DIR_REPLY && !tcph->ack) in tcp_in_window()
596 if (!(tcph->ack)) { in tcp_in_window()
600 ack = sack = receiver->td_end; in tcp_in_window()
[all …]
Dnfnetlink.c455 goto ack; in nfnetlink_rcv_batch()
469 goto ack; in nfnetlink_rcv_batch()
477 goto ack; in nfnetlink_rcv_batch()
483 goto ack; in nfnetlink_rcv_batch()
488 goto ack; in nfnetlink_rcv_batch()
509 goto ack; in nfnetlink_rcv_batch()
517 goto ack; in nfnetlink_rcv_batch()
530 ack: in nfnetlink_rcv_batch()
Dnf_synproxy_core.c678 if (!th->syn || th->ack || in ipv4_synproxy_hook()
694 if (!th->syn && th->ack && in ipv4_synproxy_hook()
717 if (!th->syn || !th->ack) in ipv4_synproxy_hook()
1101 if (!th->syn || th->ack || in ipv6_synproxy_hook()
1117 if (!th->syn && th->ack && in ipv6_synproxy_hook()
1140 if (!th->syn || !th->ack) in ipv6_synproxy_hook()
/net/ipv4/
Dtcp_vegas.c165 static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_vegas_cong_avoid() argument
171 tcp_reno_cong_avoid(sk, ack, acked); in tcp_vegas_cong_avoid()
175 if (after(ack, vegas->beg_snd_nxt)) { in tcp_vegas_cong_avoid()
196 tcp_reno_cong_avoid(sk, ack, acked); in tcp_vegas_cong_avoid()
Dtcp_veno.c119 static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_veno_cong_avoid() argument
125 tcp_reno_cong_avoid(sk, ack, acked); in tcp_veno_cong_avoid()
138 tcp_reno_cong_avoid(sk, ack, acked); in tcp_veno_cong_avoid()
Dtcp_hybla.c90 static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked) in hybla_cong_avoid() argument
107 tcp_reno_cong_avoid(sk, ack, acked); in hybla_cong_avoid()
Dtcp_yeah.c58 static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_yeah_cong_avoid() argument
103 if (after(ack, yeah->vegas.beg_snd_nxt)) { in tcp_yeah_cong_avoid()
Dtcp_input.c3204 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_cong_avoid() argument
3208 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); in tcp_cong_avoid()
3552 static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked, in tcp_cong_control() argument
3567 tcp_cong_avoid(sk, ack, acked_sacked); in tcp_cong_control()
3576 const u32 ack, const u32 ack_seq, in tcp_may_update_window() argument
3579 return after(ack, tp->snd_una) || in tcp_may_update_window()
3585 static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) in tcp_snd_una_update() argument
3587 u32 delta = ack - tp->snd_una; in tcp_snd_una_update()
3591 tp->snd_una = ack; in tcp_snd_una_update()
3609 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, in tcp_ack_update_window() argument
[all …]
Dtcp_cdg.c262 static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_cdg_cong_avoid() argument
272 if (after(ack, ca->rtt_seq) && ca->rtt.v64) { in tcp_cdg_cong_avoid()
293 tcp_reno_cong_avoid(sk, ack, acked); in tcp_cdg_cong_avoid()
Dtcp_lp.c121 static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_lp_cong_avoid() argument
126 tcp_reno_cong_avoid(sk, ack, acked); in tcp_lp_cong_avoid()
Dtcp_illinois.c260 static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_illinois_cong_avoid() argument
265 if (after(ack, ca->end_seq)) in tcp_illinois_cong_avoid()
Dtcp_scalable.c18 static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_scalable_cong_avoid() argument
/net/sctp/
Dassociola.c1665 struct sctp_chunk *ack; in sctp_assoc_free_asconf_acks() local
1668 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, in sctp_assoc_free_asconf_acks()
1670 list_del_init(&ack->transmitted_list); in sctp_assoc_free_asconf_acks()
1671 sctp_chunk_free(ack); in sctp_assoc_free_asconf_acks()
1678 struct sctp_chunk *ack; in sctp_assoc_clean_asconf_ack_cache() local
1684 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, in sctp_assoc_clean_asconf_ack_cache()
1686 if (ack->subh.addip_hdr->serial == in sctp_assoc_clean_asconf_ack_cache()
1690 list_del_init(&ack->transmitted_list); in sctp_assoc_clean_asconf_ack_cache()
1691 sctp_chunk_free(ack); in sctp_assoc_clean_asconf_ack_cache()
1700 struct sctp_chunk *ack; in sctp_assoc_lookup_asconf_ack() local
[all …]
/net/ipv4/netfilter/
Dipt_SYNPROXY.c31 if (th->syn && !(th->ack || th->fin || th->rst)) { in synproxy_tg4()
51 } else if (th->ack && !(th->fin || th->rst || th->syn)) { in synproxy_tg4()
Dnf_reject_ipv4.c205 if (oth->ack) { in nf_reject_ip_tcphdr_put()
211 tcph->ack = 1; in nf_reject_ip_tcphdr_put()
/net/ipv6/netfilter/
Dip6t_SYNPROXY.c31 if (th->syn && !(th->ack || th->fin || th->rst)) { in synproxy_tg6()
52 } else if (th->ack && !(th->fin || th->rst || th->syn)) { in synproxy_tg6()
/net/tipc/
Dlink.c401 u16 ack = snd_l->snd_nxt - 1; in tipc_link_remove_bc_peer() local
406 tipc_link_bc_ack_rcv(rcv_l, ack, 0, NULL, xmitq, NULL); in tipc_link_remove_bc_peer()
1043 u16 ack = l->rcv_nxt - 1; in tipc_link_xmit() local
1086 msg_set_ack(hdr, ack); in tipc_link_xmit()
1176 u16 ack = l->rcv_nxt - 1; in tipc_link_advance_backlog() local
1201 msg_set_ack(hdr, ack); in tipc_link_advance_backlog()
1484 gacks[n].ack = htons(expect - 1); in __tipc_build_gap_ack_blks()
1500 gacks[n].ack = htons(seqno); in __tipc_build_gap_ack_blks()
1569 u16 ack = l->rcv_nxt - 1; in tipc_link_advance_transmq() local
1614 end = ntohs(last_ga->gacks[si].ack); in tipc_link_advance_transmq()
[all …]
Dgroup.c395 void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack) in tipc_group_update_bc_members() argument
412 if (ack) in tipc_group_update_bc_members()
500 bool ack, deliver, update, leave = false; in tipc_group_filter_msg() local
533 ack = false; in tipc_group_filter_msg()
549 ack = msg_grp_bc_ack_req(hdr); in tipc_group_filter_msg()
570 if (ack) in tipc_group_filter_msg()
Dgroup.h68 void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack);
/net/rds/
Dtcp_send.c172 static int rds_tcp_is_acked(struct rds_message *rm, uint64_t ack) in rds_tcp_is_acked() argument
176 return (__s32)((u32)rm->m_ack_seq - (u32)ack) < 0; in rds_tcp_is_acked()
Dsend.c485 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack, in rds_send_is_acked() argument
489 return is_acked(rm, ack); in rds_send_is_acked()
490 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack; in rds_send_is_acked()
685 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack, in rds_send_path_drop_acked() argument
695 if (!rds_send_is_acked(rm, ack, is_acked)) in rds_send_path_drop_acked()
713 void rds_send_drop_acked(struct rds_connection *conn, u64 ack, in rds_send_drop_acked() argument
717 rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked); in rds_send_drop_acked()

123