• Home
  • Raw
  • Download

Lines Matching +full:rx +full:- +full:sched +full:- +full:sp

17 #include <linux/sched/signal.h>
21 #include "ar-internal.h"
29 struct rxrpc_sock *rx; in rxrpc_notify_socket() local
32 _enter("%d", call->debug_id); in rxrpc_notify_socket()
34 if (!list_empty(&call->recvmsg_link)) in rxrpc_notify_socket()
39 rx = rcu_dereference(call->socket); in rxrpc_notify_socket()
40 sk = &rx->sk; in rxrpc_notify_socket()
41 if (rx && sk->sk_state < RXRPC_CLOSE) { in rxrpc_notify_socket()
42 if (call->notify_rx) { in rxrpc_notify_socket()
43 spin_lock_bh(&call->notify_lock); in rxrpc_notify_socket()
44 call->notify_rx(sk, call, call->user_call_ID); in rxrpc_notify_socket()
45 spin_unlock_bh(&call->notify_lock); in rxrpc_notify_socket()
47 write_lock_bh(&rx->recvmsg_lock); in rxrpc_notify_socket()
48 if (list_empty(&call->recvmsg_link)) { in rxrpc_notify_socket()
50 list_add_tail(&call->recvmsg_link, &rx->recvmsg_q); in rxrpc_notify_socket()
52 write_unlock_bh(&rx->recvmsg_lock); in rxrpc_notify_socket()
55 _debug("call %ps", sk->sk_data_ready); in rxrpc_notify_socket()
56 sk->sk_data_ready(sk); in rxrpc_notify_socket()
73 switch (call->completion) { in rxrpc_recvmsg_term()
80 tmp = call->abort_code; in rxrpc_recvmsg_term()
84 tmp = call->abort_code; in rxrpc_recvmsg_term()
88 tmp = -call->error; in rxrpc_recvmsg_term()
92 tmp = -call->error; in rxrpc_recvmsg_term()
96 pr_err("Invalid terminal call state %u\n", call->state); in rxrpc_recvmsg_term()
101 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_terminal, call->rx_hard_ack, in rxrpc_recvmsg_term()
102 call->rx_pkt_offset, call->rx_pkt_len, ret); in rxrpc_recvmsg_term()
108 * to-be-accepted list. This means that the next call to be accepted might not
113 static int rxrpc_recvmsg_new_call(struct rxrpc_sock *rx, in rxrpc_recvmsg_new_call() argument
123 write_lock_bh(&rx->recvmsg_lock); in rxrpc_recvmsg_new_call()
124 list_del_init(&call->recvmsg_link); in rxrpc_recvmsg_new_call()
125 write_unlock_bh(&rx->recvmsg_lock); in rxrpc_recvmsg_new_call()
128 write_lock(&rx->call_lock); in rxrpc_recvmsg_new_call()
129 list_add_tail(&call->accept_link, &rx->to_be_accepted); in rxrpc_recvmsg_new_call()
130 write_unlock(&rx->call_lock); in rxrpc_recvmsg_new_call()
142 _enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]); in rxrpc_end_rx_phase()
144 trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top); in rxrpc_end_rx_phase()
145 ASSERTCMP(call->rx_hard_ack, ==, call->rx_top); in rxrpc_end_rx_phase()
147 if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) { in rxrpc_end_rx_phase()
153 write_lock_bh(&call->state_lock); in rxrpc_end_rx_phase()
155 switch (call->state) { in rxrpc_end_rx_phase()
158 write_unlock_bh(&call->state_lock); in rxrpc_end_rx_phase()
162 call->tx_phase = true; in rxrpc_end_rx_phase()
163 call->state = RXRPC_CALL_SERVER_ACK_REQUEST; in rxrpc_end_rx_phase()
164 call->expect_req_by = jiffies + MAX_JIFFY_OFFSET; in rxrpc_end_rx_phase()
165 write_unlock_bh(&call->state_lock); in rxrpc_end_rx_phase()
170 write_unlock_bh(&call->state_lock); in rxrpc_end_rx_phase()
176 * Discard a packet we've used up and advance the Rx window by one.
180 struct rxrpc_skb_priv *sp; in rxrpc_rotate_rx_window() local
187 _enter("%d", call->debug_id); in rxrpc_rotate_rx_window()
189 hard_ack = call->rx_hard_ack; in rxrpc_rotate_rx_window()
190 top = smp_load_acquire(&call->rx_top); in rxrpc_rotate_rx_window()
195 skb = call->rxtx_buffer[ix]; in rxrpc_rotate_rx_window()
197 sp = rxrpc_skb(skb); in rxrpc_rotate_rx_window()
198 flags = sp->hdr.flags; in rxrpc_rotate_rx_window()
199 serial = sp->hdr.serial; in rxrpc_rotate_rx_window()
200 if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) in rxrpc_rotate_rx_window()
201 serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1; in rxrpc_rotate_rx_window()
203 call->rxtx_buffer[ix] = NULL; in rxrpc_rotate_rx_window()
204 call->rxtx_annotations[ix] = 0; in rxrpc_rotate_rx_window()
206 smp_store_release(&call->rx_hard_ack, hard_ack); in rxrpc_rotate_rx_window()
216 if (after_eq(hard_ack, call->ackr_consumed + 2) || in rxrpc_rotate_rx_window()
217 after_eq(top, call->ackr_seen + 2) || in rxrpc_rotate_rx_window()
218 (hard_ack == top && after(hard_ack, call->ackr_consumed))) in rxrpc_rotate_rx_window()
222 if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY) in rxrpc_rotate_rx_window()
237 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); in rxrpc_verify_packet() local
238 rxrpc_seq_t seq = sp->hdr.seq; in rxrpc_verify_packet()
239 u16 cksum = sp->hdr.cksum; in rxrpc_verify_packet()
248 if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0) in rxrpc_verify_packet()
251 seq += (annotation & RXRPC_RX_ANNO_JUMBO) - 1; in rxrpc_verify_packet()
254 return call->conn->security->verify_packet(call, skb, offset, len, in rxrpc_verify_packet()
261 * (1) An skb may contain a jumbo packet - so we have to find the appropriate
278 len = skb->len - offset; in rxrpc_locate_data()
280 offset += (((annotation & RXRPC_RX_ANNO_JUMBO) - 1) * in rxrpc_locate_data()
283 skb->len - offset : RXRPC_JUMBO_SUBPKTLEN; in rxrpc_locate_data()
295 call->conn->security->locate_data(call, skb, _offset, _len); in rxrpc_locate_data()
302 * (returns 1). If more packets are required, it returns -EAGAIN.
308 struct rxrpc_skb_priv *sp; in rxrpc_recvmsg_data() local
314 int ix, copy, ret = -EAGAIN, ret2; in rxrpc_recvmsg_data()
316 if (test_and_clear_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags) && in rxrpc_recvmsg_data()
317 call->ackr_reason) in rxrpc_recvmsg_data()
320 rx_pkt_offset = call->rx_pkt_offset; in rxrpc_recvmsg_data()
321 rx_pkt_len = call->rx_pkt_len; in rxrpc_recvmsg_data()
323 if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) { in rxrpc_recvmsg_data()
324 seq = call->rx_hard_ack; in rxrpc_recvmsg_data()
330 hard_ack = call->rx_hard_ack; in rxrpc_recvmsg_data()
332 while (top = smp_load_acquire(&call->rx_top), in rxrpc_recvmsg_data()
336 skb = call->rxtx_buffer[ix]; in rxrpc_recvmsg_data()
344 sp = rxrpc_skb(skb); in rxrpc_recvmsg_data()
348 sp->hdr.serial, seq); in rxrpc_recvmsg_data()
351 sock_recv_timestamp(msg, sock->sk, skb); in rxrpc_recvmsg_data()
355 &call->rxtx_annotations[ix], in rxrpc_recvmsg_data()
368 /* We have to handle short, empty and used-up DATA packets. */ in rxrpc_recvmsg_data()
369 remain = len - *_offset; in rxrpc_recvmsg_data()
383 rx_pkt_len -= copy; in rxrpc_recvmsg_data()
396 last = sp->hdr.flags & RXRPC_LAST_PACKET; in rxrpc_recvmsg_data()
403 ASSERTCMP(seq, ==, READ_ONCE(call->rx_top)); in rxrpc_recvmsg_data()
413 call->rx_pkt_offset = rx_pkt_offset; in rxrpc_recvmsg_data()
414 call->rx_pkt_len = rx_pkt_len; in rxrpc_recvmsg_data()
419 if (ret == -EAGAIN) in rxrpc_recvmsg_data()
420 set_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags); in rxrpc_recvmsg_data()
426 * - we need to be careful about two or more threads calling recvmsg
433 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); in rxrpc_recvmsg() local
444 return -EOPNOTSUPP; in rxrpc_recvmsg()
446 timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); in rxrpc_recvmsg()
449 lock_sock(&rx->sk); in rxrpc_recvmsg()
452 if (RB_EMPTY_ROOT(&rx->calls) && in rxrpc_recvmsg()
453 list_empty(&rx->recvmsg_q) && in rxrpc_recvmsg()
454 rx->sk.sk_state != RXRPC_SERVER_LISTENING) { in rxrpc_recvmsg()
455 release_sock(&rx->sk); in rxrpc_recvmsg()
456 return -EAGAIN; in rxrpc_recvmsg()
459 if (list_empty(&rx->recvmsg_q)) { in rxrpc_recvmsg()
460 ret = -EWOULDBLOCK; in rxrpc_recvmsg()
466 release_sock(&rx->sk); in rxrpc_recvmsg()
469 prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, in rxrpc_recvmsg()
471 ret = sock_error(&rx->sk); in rxrpc_recvmsg()
475 if (list_empty(&rx->recvmsg_q)) { in rxrpc_recvmsg()
482 finish_wait(sk_sleep(&rx->sk), &wait); in rxrpc_recvmsg()
489 write_lock_bh(&rx->recvmsg_lock); in rxrpc_recvmsg()
490 l = rx->recvmsg_q.next; in rxrpc_recvmsg()
493 list_del_init(&call->recvmsg_link); in rxrpc_recvmsg()
496 write_unlock_bh(&rx->recvmsg_lock); in rxrpc_recvmsg()
503 if (!mutex_trylock(&call->user_mutex)) { in rxrpc_recvmsg()
504 ret = -EWOULDBLOCK; in rxrpc_recvmsg()
507 ret = -ERESTARTSYS; in rxrpc_recvmsg()
508 if (mutex_lock_interruptible(&call->user_mutex) < 0) in rxrpc_recvmsg()
512 release_sock(&rx->sk); in rxrpc_recvmsg()
514 if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) in rxrpc_recvmsg()
517 if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { in rxrpc_recvmsg()
519 unsigned int id32 = call->user_call_ID; in rxrpc_recvmsg()
524 unsigned long idl = call->user_call_ID; in rxrpc_recvmsg()
533 if (msg->msg_name && call->peer) { in rxrpc_recvmsg()
534 struct sockaddr_rxrpc *srx = msg->msg_name; in rxrpc_recvmsg()
535 size_t len = sizeof(call->peer->srx); in rxrpc_recvmsg()
537 memcpy(msg->msg_name, &call->peer->srx, len); in rxrpc_recvmsg()
538 srx->srx_service = call->service_id; in rxrpc_recvmsg()
539 msg->msg_namelen = len; in rxrpc_recvmsg()
542 switch (READ_ONCE(call->state)) { in rxrpc_recvmsg()
544 ret = rxrpc_recvmsg_new_call(rx, call, msg, flags); in rxrpc_recvmsg()
549 ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len, in rxrpc_recvmsg()
551 if (ret == -EAGAIN) in rxrpc_recvmsg()
554 if (after(call->rx_top, call->rx_hard_ack) && in rxrpc_recvmsg()
555 call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK]) in rxrpc_recvmsg()
566 if (call->state == RXRPC_CALL_COMPLETE) { in rxrpc_recvmsg()
571 rxrpc_release_call(rx, call); in rxrpc_recvmsg()
572 msg->msg_flags |= MSG_EOR; in rxrpc_recvmsg()
577 msg->msg_flags |= MSG_MORE; in rxrpc_recvmsg()
579 msg->msg_flags &= ~MSG_MORE; in rxrpc_recvmsg()
583 mutex_unlock(&call->user_mutex); in rxrpc_recvmsg()
590 write_lock_bh(&rx->recvmsg_lock); in rxrpc_recvmsg()
591 list_add(&call->recvmsg_link, &rx->recvmsg_q); in rxrpc_recvmsg()
592 write_unlock_bh(&rx->recvmsg_lock); in rxrpc_recvmsg()
598 release_sock(&rx->sk); in rxrpc_recvmsg()
606 finish_wait(sk_sleep(&rx->sk), &wait); in rxrpc_recvmsg()
612 * rxrpc_kernel_recv_data - Allow a kernel service to receive data/info
617 * @_abort: Where the abort code is stored if -ECONNABORTED is returned
623 * and -EAGAIN if we need more data.
625 * Note that we may return -EAGAIN to drain empty packets at the end of the
638 call->debug_id, rxrpc_call_states[call->state], in rxrpc_kernel_recv_data()
641 ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING); in rxrpc_kernel_recv_data()
643 mutex_lock(&call->user_mutex); in rxrpc_kernel_recv_data()
645 switch (READ_ONCE(call->state)) { in rxrpc_kernel_recv_data()
657 * full buffer or have been given -EAGAIN. in rxrpc_kernel_recv_data()
676 ret = -EINPROGRESS; in rxrpc_kernel_recv_data()
683 switch (call->ackr_reason) { in rxrpc_kernel_recv_data()
687 if (ret != -EAGAIN) in rxrpc_kernel_recv_data()
695 *_service = call->service_id; in rxrpc_kernel_recv_data()
696 mutex_unlock(&call->user_mutex); in rxrpc_kernel_recv_data()
702 ret = -EBADMSG; in rxrpc_kernel_recv_data()
706 ret = -EMSGSIZE; in rxrpc_kernel_recv_data()
709 *_abort = call->abort_code; in rxrpc_kernel_recv_data()
710 ret = call->error; in rxrpc_kernel_recv_data()
711 if (call->completion == RXRPC_CALL_SUCCEEDED) { in rxrpc_kernel_recv_data()
714 ret = -ECONNRESET; in rxrpc_kernel_recv_data()