• Home
  • Raw
  • Download

Lines Matching +full:serial +full:- +full:state

1 // SPDX-License-Identifier: GPL-2.0-or-later
24 #include "ar-internal.h"
29 if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, -EBADMSG)) { in rxrpc_proto_abort()
30 set_bit(RXRPC_CALL_EV_ABORT, &call->events); in rxrpc_proto_abort()
36 * Do TCP-style congestion management [RFC 5681].
44 unsigned int cumulative_acks = call->cong_cumul_acks; in rxrpc_congestion_management()
45 unsigned int cwnd = call->cong_cwnd; in rxrpc_congestion_management()
48 summary->flight_size = in rxrpc_congestion_management()
49 (call->tx_top - call->tx_hard_ack) - summary->nr_acks; in rxrpc_congestion_management()
51 if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) { in rxrpc_congestion_management()
52 summary->retrans_timeo = true; in rxrpc_congestion_management()
53 call->cong_ssthresh = max_t(unsigned int, in rxrpc_congestion_management()
54 summary->flight_size / 2, 2); in rxrpc_congestion_management()
56 if (cwnd >= call->cong_ssthresh && in rxrpc_congestion_management()
57 call->cong_mode == RXRPC_CALL_SLOW_START) { in rxrpc_congestion_management()
58 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; in rxrpc_congestion_management()
59 call->cong_tstamp = skb->tstamp; in rxrpc_congestion_management()
64 cumulative_acks += summary->nr_new_acks; in rxrpc_congestion_management()
65 cumulative_acks += summary->nr_rot_new_acks; in rxrpc_congestion_management()
69 summary->mode = call->cong_mode; in rxrpc_congestion_management()
70 summary->cwnd = call->cong_cwnd; in rxrpc_congestion_management()
71 summary->ssthresh = call->cong_ssthresh; in rxrpc_congestion_management()
72 summary->cumulative_acks = cumulative_acks; in rxrpc_congestion_management()
73 summary->dup_acks = call->cong_dup_acks; in rxrpc_congestion_management()
75 switch (call->cong_mode) { in rxrpc_congestion_management()
77 if (summary->nr_nacks > 0) in rxrpc_congestion_management()
79 if (summary->cumulative_acks > 0) in rxrpc_congestion_management()
81 if (cwnd >= call->cong_ssthresh) { in rxrpc_congestion_management()
82 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; in rxrpc_congestion_management()
83 call->cong_tstamp = skb->tstamp; in rxrpc_congestion_management()
88 if (summary->nr_nacks > 0) in rxrpc_congestion_management()
94 if (call->peer->rtt_count == 0) in rxrpc_congestion_management()
96 if (ktime_before(skb->tstamp, in rxrpc_congestion_management()
97 ktime_add_us(call->cong_tstamp, in rxrpc_congestion_management()
98 call->peer->srtt_us >> 3))) in rxrpc_congestion_management()
101 call->cong_tstamp = skb->tstamp; in rxrpc_congestion_management()
107 if (summary->nr_nacks == 0) in rxrpc_congestion_management()
110 if (summary->new_low_nack) { in rxrpc_congestion_management()
112 call->cong_dup_acks = 1; in rxrpc_congestion_management()
113 if (call->cong_extra > 1) in rxrpc_congestion_management()
114 call->cong_extra = 1; in rxrpc_congestion_management()
118 call->cong_dup_acks++; in rxrpc_congestion_management()
119 if (call->cong_dup_acks < 3) in rxrpc_congestion_management()
123 call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT; in rxrpc_congestion_management()
124 call->cong_ssthresh = max_t(unsigned int, in rxrpc_congestion_management()
125 summary->flight_size / 2, 2); in rxrpc_congestion_management()
126 cwnd = call->cong_ssthresh + 3; in rxrpc_congestion_management()
127 call->cong_extra = 0; in rxrpc_congestion_management()
128 call->cong_dup_acks = 0; in rxrpc_congestion_management()
133 if (!summary->new_low_nack) { in rxrpc_congestion_management()
134 if (summary->nr_new_acks == 0) in rxrpc_congestion_management()
136 call->cong_dup_acks++; in rxrpc_congestion_management()
137 if (call->cong_dup_acks == 2) { in rxrpc_congestion_management()
139 call->cong_dup_acks = 0; in rxrpc_congestion_management()
144 cwnd = call->cong_ssthresh; in rxrpc_congestion_management()
145 if (summary->nr_nacks == 0) in rxrpc_congestion_management()
157 call->cong_dup_acks = 0; in rxrpc_congestion_management()
158 call->cong_extra = 0; in rxrpc_congestion_management()
159 call->cong_tstamp = skb->tstamp; in rxrpc_congestion_management()
160 if (cwnd < call->cong_ssthresh) in rxrpc_congestion_management()
161 call->cong_mode = RXRPC_CALL_SLOW_START; in rxrpc_congestion_management()
163 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; in rxrpc_congestion_management()
167 if (cwnd >= RXRPC_RXTX_BUFF_SIZE - 1) in rxrpc_congestion_management()
168 cwnd = RXRPC_RXTX_BUFF_SIZE - 1; in rxrpc_congestion_management()
169 call->cong_cwnd = cwnd; in rxrpc_congestion_management()
170 call->cong_cumul_acks = cumulative_acks; in rxrpc_congestion_management()
172 if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) in rxrpc_congestion_management()
178 call->cong_mode = RXRPC_CALL_PACKET_LOSS; in rxrpc_congestion_management()
179 call->cong_dup_acks = 0; in rxrpc_congestion_management()
184 * state. in rxrpc_congestion_management()
186 if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] & in rxrpc_congestion_management()
188 summary->nr_acks != call->tx_top - call->tx_hard_ack) { in rxrpc_congestion_management()
189 call->cong_extra++; in rxrpc_congestion_management()
190 wake_up(&call->waitq); in rxrpc_congestion_management()
206 if (call->acks_lowest_nak == call->tx_hard_ack) { in rxrpc_rotate_tx_window()
207 call->acks_lowest_nak = to; in rxrpc_rotate_tx_window()
208 } else if (before_eq(call->acks_lowest_nak, to)) { in rxrpc_rotate_tx_window()
209 summary->new_low_nack = true; in rxrpc_rotate_tx_window()
210 call->acks_lowest_nak = to; in rxrpc_rotate_tx_window()
213 spin_lock(&call->lock); in rxrpc_rotate_tx_window()
215 while (before(call->tx_hard_ack, to)) { in rxrpc_rotate_tx_window()
216 call->tx_hard_ack++; in rxrpc_rotate_tx_window()
217 ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK; in rxrpc_rotate_tx_window()
218 skb = call->rxtx_buffer[ix]; in rxrpc_rotate_tx_window()
219 annotation = call->rxtx_annotations[ix]; in rxrpc_rotate_tx_window()
221 call->rxtx_buffer[ix] = NULL; in rxrpc_rotate_tx_window()
222 call->rxtx_annotations[ix] = 0; in rxrpc_rotate_tx_window()
223 skb->next = list; in rxrpc_rotate_tx_window()
227 set_bit(RXRPC_CALL_TX_LAST, &call->flags); in rxrpc_rotate_tx_window()
231 summary->nr_rot_new_acks++; in rxrpc_rotate_tx_window()
234 spin_unlock(&call->lock); in rxrpc_rotate_tx_window()
239 wake_up(&call->waitq); in rxrpc_rotate_tx_window()
243 list = skb->next; in rxrpc_rotate_tx_window()
260 unsigned int state; in rxrpc_end_tx_phase() local
262 ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags)); in rxrpc_end_tx_phase()
264 write_lock(&call->state_lock); in rxrpc_end_tx_phase()
266 state = call->state; in rxrpc_end_tx_phase()
267 switch (state) { in rxrpc_end_tx_phase()
271 call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY; in rxrpc_end_tx_phase()
273 call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY; in rxrpc_end_tx_phase()
278 state = call->state; in rxrpc_end_tx_phase()
285 write_unlock(&call->state_lock); in rxrpc_end_tx_phase()
286 if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY) in rxrpc_end_tx_phase()
294 write_unlock(&call->state_lock); in rxrpc_end_tx_phase()
295 kdebug("end_tx %s", rxrpc_call_states[call->state]); in rxrpc_end_tx_phase()
296 rxrpc_proto_abort(abort_why, call, call->tx_top); in rxrpc_end_tx_phase()
307 rxrpc_seq_t top = READ_ONCE(call->tx_top); in rxrpc_receiving_reply()
309 if (call->ackr_reason) { in rxrpc_receiving_reply()
310 spin_lock_bh(&call->lock); in rxrpc_receiving_reply()
311 call->ackr_reason = 0; in rxrpc_receiving_reply()
312 spin_unlock_bh(&call->lock); in rxrpc_receiving_reply()
315 WRITE_ONCE(call->resend_at, timo); in rxrpc_receiving_reply()
316 WRITE_ONCE(call->ack_at, timo); in rxrpc_receiving_reply()
320 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { in rxrpc_receiving_reply()
328 call->tx_phase = false; in rxrpc_receiving_reply()
340 * RXRPC_JUMBO_PACKET must be set on all but the last subpacket - and all but
348 unsigned int len = skb->len; in rxrpc_validate_data()
349 u8 flags = sp->hdr.flags; in rxrpc_validate_data()
353 __set_bit(sp->nr_subpackets, sp->rx_req_ack); in rxrpc_validate_data()
354 sp->nr_subpackets++; in rxrpc_validate_data()
359 if (len - offset < RXRPC_JUMBO_SUBPKTLEN) in rxrpc_validate_data()
370 sp->rx_flags |= RXRPC_SKB_INCL_LAST; in rxrpc_validate_data()
402 call->nr_jumbo_bad++; in rxrpc_input_dup_data()
414 enum rxrpc_call_state state; in rxrpc_input_data() local
416 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = serial; in rxrpc_input_data() local
417 rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack; in rxrpc_input_data()
422 call->rx_hard_ack, call->rx_top, skb->len, seq0); in rxrpc_input_data()
425 sp->hdr.serial, seq0, sp->hdr.flags, sp->nr_subpackets); in rxrpc_input_data()
427 state = READ_ONCE(call->state); in rxrpc_input_data()
428 if (state >= RXRPC_CALL_COMPLETE) { in rxrpc_input_data()
433 if (state == RXRPC_CALL_SERVER_RECV_REQUEST) { in rxrpc_input_data()
434 unsigned long timo = READ_ONCE(call->next_req_timo); in rxrpc_input_data()
440 WRITE_ONCE(call->expect_req_by, expect_req_by); in rxrpc_input_data()
446 spin_lock(&call->input_lock); in rxrpc_input_data()
451 if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST || in rxrpc_input_data()
452 state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && in rxrpc_input_data()
456 hard_ack = READ_ONCE(call->rx_hard_ack); in rxrpc_input_data()
458 nr_subpackets = sp->nr_subpackets; in rxrpc_input_data()
460 if (call->nr_jumbo_bad > 3) { in rxrpc_input_data()
462 ack_serial = serial; in rxrpc_input_data()
468 rxrpc_serial_t serial = sp->hdr.serial + j; in rxrpc_input_data() local
471 bool terminal = (j == nr_subpackets - 1); in rxrpc_input_data()
472 bool last = terminal && (sp->rx_flags & RXRPC_SKB_INCL_LAST); in rxrpc_input_data()
476 j, serial, seq, terminal, last); in rxrpc_input_data()
479 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && in rxrpc_input_data()
480 seq != call->rx_top) { in rxrpc_input_data()
485 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && in rxrpc_input_data()
486 after_eq(seq, call->rx_top)) { in rxrpc_input_data()
497 if (test_bit(j, sp->rx_req_ack)) in rxrpc_input_data()
499 trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); in rxrpc_input_data()
503 ack_serial = serial; in rxrpc_input_data()
507 if (call->rxtx_buffer[ix]) { in rxrpc_input_data()
512 ack_serial = serial; in rxrpc_input_data()
518 if (after(seq, hard_ack + call->rx_winsize)) { in rxrpc_input_data()
520 ack_serial = serial; in rxrpc_input_data()
523 call->nr_jumbo_bad++; in rxrpc_input_data()
533 ack_serial = serial; in rxrpc_input_data()
536 if (after(seq0, call->ackr_highest_seq)) in rxrpc_input_data()
537 call->ackr_highest_seq = seq0; in rxrpc_input_data()
549 call->rxtx_annotations[ix] = annotation; in rxrpc_input_data()
551 call->rxtx_buffer[ix] = skb; in rxrpc_input_data()
552 if (after(seq, call->rx_top)) { in rxrpc_input_data()
553 smp_store_release(&call->rx_top, seq); in rxrpc_input_data()
554 } else if (before(seq, call->rx_top)) { in rxrpc_input_data()
558 ack_serial = serial; in rxrpc_input_data()
575 set_bit(RXRPC_CALL_RX_LAST, &call->flags); in rxrpc_input_data()
578 ack_serial = serial; in rxrpc_input_data()
580 trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq); in rxrpc_input_data()
582 trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq); in rxrpc_input_data()
585 if (after_eq(seq, call->rx_expect_next)) { in rxrpc_input_data()
586 if (after(seq, call->rx_expect_next)) { in rxrpc_input_data()
587 _net("OOS %u > %u", seq, call->rx_expect_next); in rxrpc_input_data()
589 ack_serial = serial; in rxrpc_input_data()
591 call->rx_expect_next = seq + 1; in rxrpc_input_data()
594 ack_serial = serial; in rxrpc_input_data()
598 if (atomic_add_return(nr_unacked, &call->ackr_nr_unacked) > 2 && !ack) in rxrpc_input_data()
606 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, in rxrpc_input_data()
610 trace_rxrpc_notify_socket(call->debug_id, serial); in rxrpc_input_data()
614 spin_unlock(&call->input_lock); in rxrpc_input_data()
634 avail = READ_ONCE(call->rtt_avail); in rxrpc_complete_rtt_probe()
637 for (i = 0; i < ARRAY_SIZE(call->rtt_serial); i++) { in rxrpc_complete_rtt_probe()
641 sent_at = call->rtt_sent_at[i]; in rxrpc_complete_rtt_probe()
642 orig_serial = call->rtt_serial[i]; in rxrpc_complete_rtt_probe()
645 clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); in rxrpc_complete_rtt_probe()
647 set_bit(i, &call->rtt_avail); in rxrpc_complete_rtt_probe()
657 /* If a later serial is being acked, then mark this slot as in rxrpc_complete_rtt_probe()
663 clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); in rxrpc_complete_rtt_probe()
665 set_bit(i, &call->rtt_avail); in rxrpc_complete_rtt_probe()
678 * sent between the response tx_top and the ping-time tx_top to have been lost.
685 spin_lock_bh(&call->lock); in rxrpc_input_check_for_lost_ack()
687 bottom = call->tx_hard_ack + 1; in rxrpc_input_check_for_lost_ack()
688 top = call->acks_lost_top; in rxrpc_input_check_for_lost_ack()
692 u8 annotation = call->rxtx_annotations[ix]; in rxrpc_input_check_for_lost_ack()
699 call->rxtx_annotations[ix] = annotation; in rxrpc_input_check_for_lost_ack()
704 spin_unlock_bh(&call->lock); in rxrpc_input_check_for_lost_ack()
706 if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) in rxrpc_input_check_for_lost_ack()
718 if (acked_serial == call->acks_lost_ping) in rxrpc_input_ping_response()
732 u32 rwind = ntohl(ackinfo->rwind); in rxrpc_input_ackinfo()
735 sp->hdr.serial, in rxrpc_input_ackinfo()
736 ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU), in rxrpc_input_ackinfo()
737 rwind, ntohl(ackinfo->jumbo_max)); in rxrpc_input_ackinfo()
739 if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) in rxrpc_input_ackinfo()
740 rwind = RXRPC_RXTX_BUFF_SIZE - 1; in rxrpc_input_ackinfo()
741 if (call->tx_winsize != rwind) { in rxrpc_input_ackinfo()
742 if (rwind > call->tx_winsize) in rxrpc_input_ackinfo()
744 trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake); in rxrpc_input_ackinfo()
745 call->tx_winsize = rwind; in rxrpc_input_ackinfo()
748 if (call->cong_ssthresh > rwind) in rxrpc_input_ackinfo()
749 call->cong_ssthresh = rwind; in rxrpc_input_ackinfo()
751 mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU)); in rxrpc_input_ackinfo()
753 peer = call->peer; in rxrpc_input_ackinfo()
754 if (mtu < peer->maxdata) { in rxrpc_input_ackinfo()
755 spin_lock_bh(&peer->lock); in rxrpc_input_ackinfo()
756 peer->maxdata = mtu; in rxrpc_input_ackinfo()
757 peer->mtu = mtu + peer->hdrsize; in rxrpc_input_ackinfo()
758 spin_unlock_bh(&peer->lock); in rxrpc_input_ackinfo()
759 _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata); in rxrpc_input_ackinfo()
763 wake_up(&call->waitq); in rxrpc_input_ackinfo()
782 for (; nr_acks > 0; nr_acks--, seq++) { in rxrpc_input_soft_acks()
784 annotation = call->rxtx_annotations[ix]; in rxrpc_input_soft_acks()
789 summary->nr_acks++; in rxrpc_input_soft_acks()
792 summary->nr_new_acks++; in rxrpc_input_soft_acks()
793 call->rxtx_annotations[ix] = in rxrpc_input_soft_acks()
797 if (!summary->nr_nacks && in rxrpc_input_soft_acks()
798 call->acks_lowest_nak != seq) { in rxrpc_input_soft_acks()
799 call->acks_lowest_nak = seq; in rxrpc_input_soft_acks()
800 summary->new_low_nack = true; in rxrpc_input_soft_acks()
802 summary->nr_nacks++; in rxrpc_input_soft_acks()
805 summary->nr_new_nacks++; in rxrpc_input_soft_acks()
808 call->rxtx_annotations[ix] = in rxrpc_input_soft_acks()
818 * Return true if the ACK is valid - ie. it doesn't appear to have regressed
819 * with respect to the ack state conveyed by preceding ACKs.
824 rxrpc_seq_t base = READ_ONCE(call->acks_first_seq); in rxrpc_is_ack_valid()
832 if (after_eq(prev_pkt, call->acks_prev_seq)) in rxrpc_is_ack_valid()
835 /* Some rx implementations put a serial number in previousPacket. */ in rxrpc_is_ack_valid()
836 if (after_eq(prev_pkt, base + call->tx_winsize)) in rxrpc_is_ack_valid()
844 * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet
845 * in the ACK array. Anything before that is hard-ACK'd and may be discarded.
847 * A hard-ACK means that a packet has been processed and may be discarded; a
848 * soft-ACK means that the packet may be discarded and retransmission
849 * requested. A phase is complete when all packets are hard-ACK'd.
873 ack_serial = sp->hdr.serial; in rxrpc_input_ack()
874 acked_serial = ntohl(buf.ack.serial); in rxrpc_input_ack()
877 hard_ack = first_soft_ack - 1; in rxrpc_input_ack()
888 rxrpc_input_ping_response(call, skb->tstamp, acked_serial, in rxrpc_input_ack()
890 rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, in rxrpc_input_ack()
894 rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, in rxrpc_input_ack()
899 rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, in rxrpc_input_ack()
909 } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { in rxrpc_input_ack()
915 /* Discard any out-of-order or duplicate ACKs (outside lock). */ in rxrpc_input_ack()
917 trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial, in rxrpc_input_ack()
918 first_soft_ack, call->acks_first_seq, in rxrpc_input_ack()
919 prev_pkt, call->acks_prev_seq); in rxrpc_input_ack()
925 if (skb->len >= ioffset + sizeof(buf.info) && in rxrpc_input_ack()
929 spin_lock(&call->input_lock); in rxrpc_input_ack()
931 /* Discard any out-of-order or duplicate ACKs (inside lock). */ in rxrpc_input_ack()
933 trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial, in rxrpc_input_ack()
934 first_soft_ack, call->acks_first_seq, in rxrpc_input_ack()
935 prev_pkt, call->acks_prev_seq); in rxrpc_input_ack()
938 call->acks_latest_ts = skb->tstamp; in rxrpc_input_ack()
940 call->acks_first_seq = first_soft_ack; in rxrpc_input_ack()
941 call->acks_prev_seq = prev_pkt; in rxrpc_input_ack()
953 switch (READ_ONCE(call->state)) { in rxrpc_input_ack()
963 if (before(hard_ack, call->tx_hard_ack) || in rxrpc_input_ack()
964 after(hard_ack, call->tx_top)) { in rxrpc_input_ack()
968 if (nr_acks > call->tx_top - hard_ack) { in rxrpc_input_ack()
973 if (after(hard_ack, call->tx_hard_ack)) { in rxrpc_input_ack()
989 if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] & in rxrpc_input_ack()
991 summary.nr_acks == call->tx_top - hard_ack && in rxrpc_input_ack()
999 spin_unlock(&call->input_lock); in rxrpc_input_ack()
1010 _proto("Rx ACKALL %%%u", sp->hdr.serial); in rxrpc_input_ackall()
1012 spin_lock(&call->input_lock); in rxrpc_input_ackall()
1014 if (rxrpc_rotate_tx_window(call, call->tx_top, &summary)) in rxrpc_input_ackall()
1017 spin_unlock(&call->input_lock); in rxrpc_input_ackall()
1031 if (skb->len >= 4 && in rxrpc_input_abort()
1036 trace_rxrpc_rx_abort(call, sp->hdr.serial, abort_code); in rxrpc_input_abort()
1038 _proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code); in rxrpc_input_abort()
1041 abort_code, -ECONNABORTED); in rxrpc_input_abort()
1055 timo = READ_ONCE(call->next_rx_timo); in rxrpc_input_call_packet()
1060 WRITE_ONCE(call->expect_rx_by, expect_rx_by); in rxrpc_input_call_packet()
1065 switch (sp->hdr.type) { in rxrpc_input_call_packet()
1075 _proto("Rx BUSY %%%u", sp->hdr.serial); in rxrpc_input_call_packet()
1110 switch (READ_ONCE(call->state)) { in rxrpc_input_implicit_end_call()
1117 if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, -ESHUTDOWN)) { in rxrpc_input_implicit_end_call()
1118 set_bit(RXRPC_CALL_EV_ABORT, &call->events); in rxrpc_input_implicit_end_call()
1125 spin_lock(&rx->incoming_lock); in rxrpc_input_implicit_end_call()
1127 spin_unlock(&rx->incoming_lock); in rxrpc_input_implicit_end_call()
1131 * post connection-level events to the connection
1132 * - this includes challenges, responses, some aborts and call terminal packet
1140 skb_queue_tail(&conn->rx_queue, skb); in rxrpc_post_packet_to_conn()
1145 * post endpoint-level events to the local endpoint
1146 * - this includes debug and version messages
1154 skb_queue_tail(&local->event_queue, skb); in rxrpc_post_packet_to_local()
1162 * put a packet up for transport-level abort
1167 skb_queue_tail(&local->reject_queue, skb); in rxrpc_reject_packet()
1184 trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, in rxrpc_extract_header()
1186 return -EBADMSG; in rxrpc_extract_header()
1190 sp->hdr.epoch = ntohl(whdr.epoch); in rxrpc_extract_header()
1191 sp->hdr.cid = ntohl(whdr.cid); in rxrpc_extract_header()
1192 sp->hdr.callNumber = ntohl(whdr.callNumber); in rxrpc_extract_header()
1193 sp->hdr.seq = ntohl(whdr.seq); in rxrpc_extract_header()
1194 sp->hdr.serial = ntohl(whdr.serial); in rxrpc_extract_header()
1195 sp->hdr.flags = whdr.flags; in rxrpc_extract_header()
1196 sp->hdr.type = whdr.type; in rxrpc_extract_header()
1197 sp->hdr.userStatus = whdr.userStatus; in rxrpc_extract_header()
1198 sp->hdr.securityIndex = whdr.securityIndex; in rxrpc_extract_header()
1199 sp->hdr._rsvd = ntohs(whdr._rsvd); in rxrpc_extract_header()
1200 sp->hdr.serviceId = ntohs(whdr.serviceId); in rxrpc_extract_header()
1206 * - may be called in interrupt context
1231 if (skb->tstamp == 0) in rxrpc_input_packet()
1232 skb->tstamp = ktime_get_real(); in rxrpc_input_packet()
1256 if (skb->tstamp == 0) in rxrpc_input_packet()
1257 skb->tstamp = ktime_get_real(); in rxrpc_input_packet()
1260 switch (sp->hdr.type) { in rxrpc_input_packet()
1273 if (sp->hdr.callNumber == 0) in rxrpc_input_packet()
1280 if (sp->hdr.callNumber == 0 || in rxrpc_input_packet()
1281 sp->hdr.seq == 0) in rxrpc_input_packet()
1286 /* Unshare the packet so that it can be modified for in-place in rxrpc_input_packet()
1289 if (sp->hdr.securityIndex != 0) { in rxrpc_input_packet()
1314 /* Packet types 9-11 should just be ignored. */ in rxrpc_input_packet()
1321 _proto("Rx Bad Packet Type %u", sp->hdr.type); in rxrpc_input_packet()
1325 if (sp->hdr.serviceId == 0) in rxrpc_input_packet()
1333 rx = rcu_dereference(local->service); in rxrpc_input_packet()
1334 if (!rx || (sp->hdr.serviceId != rx->srx.srx_service && in rxrpc_input_packet()
1335 sp->hdr.serviceId != rx->second_service)) { in rxrpc_input_packet()
1336 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && in rxrpc_input_packet()
1337 sp->hdr.seq == 1) in rxrpc_input_packet()
1345 if (sp->hdr.securityIndex != conn->security_ix) in rxrpc_input_packet()
1348 if (sp->hdr.serviceId != conn->service_id) { in rxrpc_input_packet()
1351 if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) in rxrpc_input_packet()
1353 old_id = cmpxchg(&conn->service_id, conn->params.service_id, in rxrpc_input_packet()
1354 sp->hdr.serviceId); in rxrpc_input_packet()
1356 if (old_id != conn->params.service_id && in rxrpc_input_packet()
1357 old_id != sp->hdr.serviceId) in rxrpc_input_packet()
1361 if (sp->hdr.callNumber == 0) { in rxrpc_input_packet()
1362 /* Connection-level packet */ in rxrpc_input_packet()
1363 _debug("CONN %p {%d}", conn, conn->debug_id); in rxrpc_input_packet()
1368 if ((int)sp->hdr.serial - (int)conn->hi_serial > 0) in rxrpc_input_packet()
1369 conn->hi_serial = sp->hdr.serial; in rxrpc_input_packet()
1371 /* Call-bound packets are routed by connection channel. */ in rxrpc_input_packet()
1372 channel = sp->hdr.cid & RXRPC_CHANNELMASK; in rxrpc_input_packet()
1373 chan = &conn->channels[channel]; in rxrpc_input_packet()
1376 if (sp->hdr.callNumber < chan->last_call) in rxrpc_input_packet()
1379 if (sp->hdr.callNumber == chan->last_call) { in rxrpc_input_packet()
1380 if (chan->call || in rxrpc_input_packet()
1381 sp->hdr.type == RXRPC_PACKET_TYPE_ABORT) in rxrpc_input_packet()
1388 chan->last_type == RXRPC_PACKET_TYPE_ACK) in rxrpc_input_packet()
1394 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) in rxrpc_input_packet()
1395 trace_rxrpc_rx_data(chan->call_debug_id, in rxrpc_input_packet()
1396 sp->hdr.seq, in rxrpc_input_packet()
1397 sp->hdr.serial, in rxrpc_input_packet()
1398 sp->hdr.flags, 0); in rxrpc_input_packet()
1403 call = rcu_dereference(chan->call); in rxrpc_input_packet()
1405 if (sp->hdr.callNumber > chan->call_id) { in rxrpc_input_packet()
1414 if (sp->hdr.serviceId != call->service_id) in rxrpc_input_packet()
1415 call->service_id = sp->hdr.serviceId; in rxrpc_input_packet()
1416 if ((int)sp->hdr.serial - (int)call->rx_serial > 0) in rxrpc_input_packet()
1417 call->rx_serial = sp->hdr.serial; in rxrpc_input_packet()
1418 if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags)) in rxrpc_input_packet()
1419 set_bit(RXRPC_CALL_RX_HEARD, &call->flags); in rxrpc_input_packet()
1423 if (!call || refcount_read(&call->ref) == 0) { in rxrpc_input_packet()
1425 sp->hdr.type != RXRPC_PACKET_TYPE_DATA) in rxrpc_input_packet()
1427 if (sp->hdr.seq != 1) in rxrpc_input_packet()
1447 trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, in rxrpc_input_packet()
1449 skb->priority = RXKADINCONSISTENCY; in rxrpc_input_packet()
1453 trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, in rxrpc_input_packet()
1455 skb->priority = RX_INVALID_OPERATION; in rxrpc_input_packet()
1459 trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, in rxrpc_input_packet()
1464 trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, in rxrpc_input_packet()
1467 skb->priority = RX_PROTOCOL_ERROR; in rxrpc_input_packet()
1469 skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; in rxrpc_input_packet()
1471 trace_rxrpc_rx_done(skb->mark, skb->priority); in rxrpc_input_packet()