1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC recvmsg() implementation
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/net.h>
11 #include <linux/skbuff.h>
12 #include <linux/export.h>
13 #include <linux/sched/signal.h>
14
15 #include <net/sock.h>
16 #include <net/af_rxrpc.h>
17 #include "ar-internal.h"
18
19 /*
20 * Post a call for attention by the socket or kernel service. Further
21 * notifications are suppressed by putting recvmsg_link on a dummy queue.
22 */
rxrpc_notify_socket(struct rxrpc_call * call)23 void rxrpc_notify_socket(struct rxrpc_call *call)
24 {
25 struct rxrpc_sock *rx;
26 struct sock *sk;
27
28 _enter("%d", call->debug_id);
29
30 if (!list_empty(&call->recvmsg_link))
31 return;
32
33 rcu_read_lock();
34
35 rx = rcu_dereference(call->socket);
36 sk = &rx->sk;
37 if (rx && sk->sk_state < RXRPC_CLOSE) {
38 if (call->notify_rx) {
39 spin_lock_bh(&call->notify_lock);
40 call->notify_rx(sk, call, call->user_call_ID);
41 spin_unlock_bh(&call->notify_lock);
42 } else {
43 write_lock_bh(&rx->recvmsg_lock);
44 if (list_empty(&call->recvmsg_link)) {
45 rxrpc_get_call(call, rxrpc_call_got);
46 list_add_tail(&call->recvmsg_link, &rx->recvmsg_q);
47 }
48 write_unlock_bh(&rx->recvmsg_lock);
49
50 if (!sock_flag(sk, SOCK_DEAD)) {
51 _debug("call %ps", sk->sk_data_ready);
52 sk->sk_data_ready(sk);
53 }
54 }
55 }
56
57 rcu_read_unlock();
58 _leave("");
59 }
60
61 /*
62 * Transition a call to the complete state.
63 */
__rxrpc_set_call_completion(struct rxrpc_call * call,enum rxrpc_call_completion compl,u32 abort_code,int error)64 bool __rxrpc_set_call_completion(struct rxrpc_call *call,
65 enum rxrpc_call_completion compl,
66 u32 abort_code,
67 int error)
68 {
69 if (call->state < RXRPC_CALL_COMPLETE) {
70 call->abort_code = abort_code;
71 call->error = error;
72 call->completion = compl,
73 call->state = RXRPC_CALL_COMPLETE;
74 trace_rxrpc_call_complete(call);
75 wake_up(&call->waitq);
76 rxrpc_notify_socket(call);
77 return true;
78 }
79 return false;
80 }
81
rxrpc_set_call_completion(struct rxrpc_call * call,enum rxrpc_call_completion compl,u32 abort_code,int error)82 bool rxrpc_set_call_completion(struct rxrpc_call *call,
83 enum rxrpc_call_completion compl,
84 u32 abort_code,
85 int error)
86 {
87 bool ret = false;
88
89 if (call->state < RXRPC_CALL_COMPLETE) {
90 write_lock_bh(&call->state_lock);
91 ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
92 write_unlock_bh(&call->state_lock);
93 }
94 return ret;
95 }
96
97 /*
98 * Record that a call successfully completed.
99 */
__rxrpc_call_completed(struct rxrpc_call * call)100 bool __rxrpc_call_completed(struct rxrpc_call *call)
101 {
102 return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
103 }
104
rxrpc_call_completed(struct rxrpc_call * call)105 bool rxrpc_call_completed(struct rxrpc_call *call)
106 {
107 bool ret = false;
108
109 if (call->state < RXRPC_CALL_COMPLETE) {
110 write_lock_bh(&call->state_lock);
111 ret = __rxrpc_call_completed(call);
112 write_unlock_bh(&call->state_lock);
113 }
114 return ret;
115 }
116
117 /*
118 * Record that a call is locally aborted.
119 */
__rxrpc_abort_call(const char * why,struct rxrpc_call * call,rxrpc_seq_t seq,u32 abort_code,int error)120 bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
121 rxrpc_seq_t seq, u32 abort_code, int error)
122 {
123 trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
124 abort_code, error);
125 return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
126 abort_code, error);
127 }
128
rxrpc_abort_call(const char * why,struct rxrpc_call * call,rxrpc_seq_t seq,u32 abort_code,int error)129 bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
130 rxrpc_seq_t seq, u32 abort_code, int error)
131 {
132 bool ret;
133
134 write_lock_bh(&call->state_lock);
135 ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
136 write_unlock_bh(&call->state_lock);
137 return ret;
138 }
139
140 /*
141 * Pass a call terminating message to userspace.
142 */
rxrpc_recvmsg_term(struct rxrpc_call * call,struct msghdr * msg)143 static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
144 {
145 u32 tmp = 0;
146 int ret;
147
148 switch (call->completion) {
149 case RXRPC_CALL_SUCCEEDED:
150 ret = 0;
151 if (rxrpc_is_service_call(call))
152 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &tmp);
153 break;
154 case RXRPC_CALL_REMOTELY_ABORTED:
155 tmp = call->abort_code;
156 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
157 break;
158 case RXRPC_CALL_LOCALLY_ABORTED:
159 tmp = call->abort_code;
160 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
161 break;
162 case RXRPC_CALL_NETWORK_ERROR:
163 tmp = -call->error;
164 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &tmp);
165 break;
166 case RXRPC_CALL_LOCAL_ERROR:
167 tmp = -call->error;
168 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp);
169 break;
170 default:
171 pr_err("Invalid terminal call state %u\n", call->state);
172 BUG();
173 break;
174 }
175
176 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_terminal, call->rx_hard_ack,
177 call->rx_pkt_offset, call->rx_pkt_len, ret);
178 return ret;
179 }
180
181 /*
182 * End the packet reception phase.
183 */
rxrpc_end_rx_phase(struct rxrpc_call * call,rxrpc_serial_t serial)184 static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
185 {
186 _enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]);
187
188 trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
189 ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
190
191 if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
192 rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial, false, true,
193 rxrpc_propose_ack_terminal_ack);
194 //rxrpc_send_ack_packet(call, false, NULL);
195 }
196
197 write_lock_bh(&call->state_lock);
198
199 switch (call->state) {
200 case RXRPC_CALL_CLIENT_RECV_REPLY:
201 __rxrpc_call_completed(call);
202 write_unlock_bh(&call->state_lock);
203 break;
204
205 case RXRPC_CALL_SERVER_RECV_REQUEST:
206 call->tx_phase = true;
207 call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
208 call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
209 write_unlock_bh(&call->state_lock);
210 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false, true,
211 rxrpc_propose_ack_processing_op);
212 break;
213 default:
214 write_unlock_bh(&call->state_lock);
215 break;
216 }
217 }
218
219 /*
220 * Discard a packet we've used up and advance the Rx window by one.
221 */
rxrpc_rotate_rx_window(struct rxrpc_call * call)222 static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
223 {
224 struct rxrpc_skb_priv *sp;
225 struct sk_buff *skb;
226 rxrpc_serial_t serial;
227 rxrpc_seq_t hard_ack, top;
228 bool last = false;
229 u8 subpacket;
230 int ix;
231
232 _enter("%d", call->debug_id);
233
234 hard_ack = call->rx_hard_ack;
235 top = smp_load_acquire(&call->rx_top);
236 ASSERT(before(hard_ack, top));
237
238 hard_ack++;
239 ix = hard_ack & RXRPC_RXTX_BUFF_MASK;
240 skb = call->rxtx_buffer[ix];
241 rxrpc_see_skb(skb, rxrpc_skb_rotated);
242 sp = rxrpc_skb(skb);
243
244 subpacket = call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
245 serial = sp->hdr.serial + subpacket;
246
247 if (subpacket == sp->nr_subpackets - 1 &&
248 sp->rx_flags & RXRPC_SKB_INCL_LAST)
249 last = true;
250
251 call->rxtx_buffer[ix] = NULL;
252 call->rxtx_annotations[ix] = 0;
253 /* Barrier against rxrpc_input_data(). */
254 smp_store_release(&call->rx_hard_ack, hard_ack);
255
256 rxrpc_free_skb(skb, rxrpc_skb_freed);
257
258 trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
259 if (last) {
260 rxrpc_end_rx_phase(call, serial);
261 } else {
262 /* Check to see if there's an ACK that needs sending. */
263 if (atomic_inc_return(&call->ackr_nr_consumed) > 2)
264 rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial,
265 true, false,
266 rxrpc_propose_ack_rotate_rx);
267 if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
268 rxrpc_send_ack_packet(call, false, NULL);
269 }
270 }
271
272 /*
273 * Decrypt and verify a (sub)packet. The packet's length may be changed due to
274 * padding, but if this is the case, the packet length will be resident in the
275 * socket buffer. Note that we can't modify the master skb info as the skb may
276 * be the home to multiple subpackets.
277 */
rxrpc_verify_packet(struct rxrpc_call * call,struct sk_buff * skb,u8 annotation,unsigned int offset,unsigned int len)278 static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
279 u8 annotation,
280 unsigned int offset, unsigned int len)
281 {
282 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
283 rxrpc_seq_t seq = sp->hdr.seq;
284 u16 cksum = sp->hdr.cksum;
285 u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
286
287 _enter("");
288
289 /* For all but the head jumbo subpacket, the security checksum is in a
290 * jumbo header immediately prior to the data.
291 */
292 if (subpacket > 0) {
293 __be16 tmp;
294 if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0)
295 BUG();
296 cksum = ntohs(tmp);
297 seq += subpacket;
298 }
299
300 return call->security->verify_packet(call, skb, offset, len,
301 seq, cksum);
302 }
303
304 /*
305 * Locate the data within a packet. This is complicated by:
306 *
307 * (1) An skb may contain a jumbo packet - so we have to find the appropriate
308 * subpacket.
309 *
310 * (2) The (sub)packets may be encrypted and, if so, the encrypted portion
311 * contains an extra header which includes the true length of the data,
312 * excluding any encrypted padding.
313 */
rxrpc_locate_data(struct rxrpc_call * call,struct sk_buff * skb,u8 * _annotation,unsigned int * _offset,unsigned int * _len,bool * _last)314 static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
315 u8 *_annotation,
316 unsigned int *_offset, unsigned int *_len,
317 bool *_last)
318 {
319 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
320 unsigned int offset = sizeof(struct rxrpc_wire_header);
321 unsigned int len;
322 bool last = false;
323 int ret;
324 u8 annotation = *_annotation;
325 u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
326
327 /* Locate the subpacket */
328 offset += subpacket * RXRPC_JUMBO_SUBPKTLEN;
329 len = skb->len - offset;
330 if (subpacket < sp->nr_subpackets - 1)
331 len = RXRPC_JUMBO_DATALEN;
332 else if (sp->rx_flags & RXRPC_SKB_INCL_LAST)
333 last = true;
334
335 if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
336 ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
337 if (ret < 0)
338 return ret;
339 *_annotation |= RXRPC_RX_ANNO_VERIFIED;
340 }
341
342 *_offset = offset;
343 *_len = len;
344 *_last = last;
345 call->security->locate_data(call, skb, _offset, _len);
346 return 0;
347 }
348
349 /*
350 * Deliver messages to a call. This keeps processing packets until the buffer
351 * is filled and we find either more DATA (returns 0) or the end of the DATA
352 * (returns 1). If more packets are required, it returns -EAGAIN.
353 */
rxrpc_recvmsg_data(struct socket * sock,struct rxrpc_call * call,struct msghdr * msg,struct iov_iter * iter,size_t len,int flags,size_t * _offset)354 static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
355 struct msghdr *msg, struct iov_iter *iter,
356 size_t len, int flags, size_t *_offset)
357 {
358 struct rxrpc_skb_priv *sp;
359 struct sk_buff *skb;
360 rxrpc_serial_t serial;
361 rxrpc_seq_t hard_ack, top, seq;
362 size_t remain;
363 bool rx_pkt_last;
364 unsigned int rx_pkt_offset, rx_pkt_len;
365 int ix, copy, ret = -EAGAIN, ret2;
366
367 if (test_and_clear_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags) &&
368 call->ackr_reason)
369 rxrpc_send_ack_packet(call, false, NULL);
370
371 rx_pkt_offset = call->rx_pkt_offset;
372 rx_pkt_len = call->rx_pkt_len;
373 rx_pkt_last = call->rx_pkt_last;
374
375 if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
376 seq = call->rx_hard_ack;
377 ret = 1;
378 goto done;
379 }
380
381 /* Barriers against rxrpc_input_data(). */
382 hard_ack = call->rx_hard_ack;
383 seq = hard_ack + 1;
384
385 while (top = smp_load_acquire(&call->rx_top),
386 before_eq(seq, top)
387 ) {
388 ix = seq & RXRPC_RXTX_BUFF_MASK;
389 skb = call->rxtx_buffer[ix];
390 if (!skb) {
391 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_hole, seq,
392 rx_pkt_offset, rx_pkt_len, 0);
393 break;
394 }
395 smp_rmb();
396 rxrpc_see_skb(skb, rxrpc_skb_seen);
397 sp = rxrpc_skb(skb);
398
399 if (!(flags & MSG_PEEK)) {
400 serial = sp->hdr.serial;
401 serial += call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
402 trace_rxrpc_receive(call, rxrpc_receive_front,
403 serial, seq);
404 }
405
406 if (msg)
407 sock_recv_timestamp(msg, sock->sk, skb);
408
409 if (rx_pkt_offset == 0) {
410 ret2 = rxrpc_locate_data(call, skb,
411 &call->rxtx_annotations[ix],
412 &rx_pkt_offset, &rx_pkt_len,
413 &rx_pkt_last);
414 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
415 rx_pkt_offset, rx_pkt_len, ret2);
416 if (ret2 < 0) {
417 ret = ret2;
418 goto out;
419 }
420 } else {
421 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_cont, seq,
422 rx_pkt_offset, rx_pkt_len, 0);
423 }
424
425 /* We have to handle short, empty and used-up DATA packets. */
426 remain = len - *_offset;
427 copy = rx_pkt_len;
428 if (copy > remain)
429 copy = remain;
430 if (copy > 0) {
431 ret2 = skb_copy_datagram_iter(skb, rx_pkt_offset, iter,
432 copy);
433 if (ret2 < 0) {
434 ret = ret2;
435 goto out;
436 }
437
438 /* handle piecemeal consumption of data packets */
439 rx_pkt_offset += copy;
440 rx_pkt_len -= copy;
441 *_offset += copy;
442 }
443
444 if (rx_pkt_len > 0) {
445 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_full, seq,
446 rx_pkt_offset, rx_pkt_len, 0);
447 ASSERTCMP(*_offset, ==, len);
448 ret = 0;
449 break;
450 }
451
452 /* The whole packet has been transferred. */
453 if (!(flags & MSG_PEEK))
454 rxrpc_rotate_rx_window(call);
455 rx_pkt_offset = 0;
456 rx_pkt_len = 0;
457
458 if (rx_pkt_last) {
459 ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
460 ret = 1;
461 goto out;
462 }
463
464 seq++;
465 }
466
467 out:
468 if (!(flags & MSG_PEEK)) {
469 call->rx_pkt_offset = rx_pkt_offset;
470 call->rx_pkt_len = rx_pkt_len;
471 call->rx_pkt_last = rx_pkt_last;
472 }
473 done:
474 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq,
475 rx_pkt_offset, rx_pkt_len, ret);
476 if (ret == -EAGAIN)
477 set_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags);
478 return ret;
479 }
480
481 /*
482 * Receive a message from an RxRPC socket
483 * - we need to be careful about two or more threads calling recvmsg
484 * simultaneously
485 */
rxrpc_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)486 int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
487 int flags)
488 {
489 struct rxrpc_call *call;
490 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
491 struct list_head *l;
492 size_t copied = 0;
493 long timeo;
494 int ret;
495
496 DEFINE_WAIT(wait);
497
498 trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0, 0, 0, 0);
499
500 if (flags & (MSG_OOB | MSG_TRUNC))
501 return -EOPNOTSUPP;
502
503 timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
504
505 try_again:
506 lock_sock(&rx->sk);
507
508 /* Return immediately if a client socket has no outstanding calls */
509 if (RB_EMPTY_ROOT(&rx->calls) &&
510 list_empty(&rx->recvmsg_q) &&
511 rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
512 release_sock(&rx->sk);
513 return -EAGAIN;
514 }
515
516 if (list_empty(&rx->recvmsg_q)) {
517 ret = -EWOULDBLOCK;
518 if (timeo == 0) {
519 call = NULL;
520 goto error_no_call;
521 }
522
523 release_sock(&rx->sk);
524
525 /* Wait for something to happen */
526 prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
527 TASK_INTERRUPTIBLE);
528 ret = sock_error(&rx->sk);
529 if (ret)
530 goto wait_error;
531
532 if (list_empty(&rx->recvmsg_q)) {
533 if (signal_pending(current))
534 goto wait_interrupted;
535 trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait,
536 0, 0, 0, 0);
537 timeo = schedule_timeout(timeo);
538 }
539 finish_wait(sk_sleep(&rx->sk), &wait);
540 goto try_again;
541 }
542
543 /* Find the next call and dequeue it if we're not just peeking. If we
544 * do dequeue it, that comes with a ref that we will need to release.
545 */
546 write_lock_bh(&rx->recvmsg_lock);
547 l = rx->recvmsg_q.next;
548 call = list_entry(l, struct rxrpc_call, recvmsg_link);
549 if (!(flags & MSG_PEEK))
550 list_del_init(&call->recvmsg_link);
551 else
552 rxrpc_get_call(call, rxrpc_call_got);
553 write_unlock_bh(&rx->recvmsg_lock);
554
555 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0);
556
557 /* We're going to drop the socket lock, so we need to lock the call
558 * against interference by sendmsg.
559 */
560 if (!mutex_trylock(&call->user_mutex)) {
561 ret = -EWOULDBLOCK;
562 if (flags & MSG_DONTWAIT)
563 goto error_requeue_call;
564 ret = -ERESTARTSYS;
565 if (mutex_lock_interruptible(&call->user_mutex) < 0)
566 goto error_requeue_call;
567 }
568
569 release_sock(&rx->sk);
570
571 if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
572 BUG();
573
574 if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
575 if (flags & MSG_CMSG_COMPAT) {
576 unsigned int id32 = call->user_call_ID;
577
578 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
579 sizeof(unsigned int), &id32);
580 } else {
581 unsigned long idl = call->user_call_ID;
582
583 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
584 sizeof(unsigned long), &idl);
585 }
586 if (ret < 0)
587 goto error_unlock_call;
588 }
589
590 if (msg->msg_name && call->peer) {
591 struct sockaddr_rxrpc *srx = msg->msg_name;
592 size_t len = sizeof(call->peer->srx);
593
594 memcpy(msg->msg_name, &call->peer->srx, len);
595 srx->srx_service = call->service_id;
596 msg->msg_namelen = len;
597 }
598
599 switch (READ_ONCE(call->state)) {
600 case RXRPC_CALL_CLIENT_RECV_REPLY:
601 case RXRPC_CALL_SERVER_RECV_REQUEST:
602 case RXRPC_CALL_SERVER_ACK_REQUEST:
603 ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
604 flags, &copied);
605 if (ret == -EAGAIN)
606 ret = 0;
607
608 if (after(call->rx_top, call->rx_hard_ack) &&
609 call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK])
610 rxrpc_notify_socket(call);
611 break;
612 default:
613 ret = 0;
614 break;
615 }
616
617 if (ret < 0)
618 goto error_unlock_call;
619
620 if (call->state == RXRPC_CALL_COMPLETE) {
621 ret = rxrpc_recvmsg_term(call, msg);
622 if (ret < 0)
623 goto error_unlock_call;
624 if (!(flags & MSG_PEEK))
625 rxrpc_release_call(rx, call);
626 msg->msg_flags |= MSG_EOR;
627 ret = 1;
628 }
629
630 if (ret == 0)
631 msg->msg_flags |= MSG_MORE;
632 else
633 msg->msg_flags &= ~MSG_MORE;
634 ret = copied;
635
636 error_unlock_call:
637 mutex_unlock(&call->user_mutex);
638 rxrpc_put_call(call, rxrpc_call_put);
639 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
640 return ret;
641
642 error_requeue_call:
643 if (!(flags & MSG_PEEK)) {
644 write_lock_bh(&rx->recvmsg_lock);
645 list_add(&call->recvmsg_link, &rx->recvmsg_q);
646 write_unlock_bh(&rx->recvmsg_lock);
647 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0, 0, 0, 0);
648 } else {
649 rxrpc_put_call(call, rxrpc_call_put);
650 }
651 error_no_call:
652 release_sock(&rx->sk);
653 error_trace:
654 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
655 return ret;
656
657 wait_interrupted:
658 ret = sock_intr_errno(timeo);
659 wait_error:
660 finish_wait(sk_sleep(&rx->sk), &wait);
661 call = NULL;
662 goto error_trace;
663 }
664
665 /**
666 * rxrpc_kernel_recv_data - Allow a kernel service to receive data/info
667 * @sock: The socket that the call exists on
668 * @call: The call to send data through
669 * @iter: The buffer to receive into
670 * @want_more: True if more data is expected to be read
671 * @_abort: Where the abort code is stored if -ECONNABORTED is returned
672 * @_service: Where to store the actual service ID (may be upgraded)
673 *
674 * Allow a kernel service to receive data and pick up information about the
675 * state of a call. Returns 0 if got what was asked for and there's more
676 * available, 1 if we got what was asked for and we're at the end of the data
677 * and -EAGAIN if we need more data.
678 *
679 * Note that we may return -EAGAIN to drain empty packets at the end of the
680 * data, even if we've already copied over the requested data.
681 *
682 * *_abort should also be initialised to 0.
683 */
rxrpc_kernel_recv_data(struct socket * sock,struct rxrpc_call * call,struct iov_iter * iter,bool want_more,u32 * _abort,u16 * _service)684 int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
685 struct iov_iter *iter,
686 bool want_more, u32 *_abort, u16 *_service)
687 {
688 size_t offset = 0;
689 int ret;
690
691 _enter("{%d,%s},%zu,%d",
692 call->debug_id, rxrpc_call_states[call->state],
693 iov_iter_count(iter), want_more);
694
695 ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_SECURING);
696
697 mutex_lock(&call->user_mutex);
698
699 switch (READ_ONCE(call->state)) {
700 case RXRPC_CALL_CLIENT_RECV_REPLY:
701 case RXRPC_CALL_SERVER_RECV_REQUEST:
702 case RXRPC_CALL_SERVER_ACK_REQUEST:
703 ret = rxrpc_recvmsg_data(sock, call, NULL, iter,
704 iov_iter_count(iter), 0,
705 &offset);
706 if (ret < 0)
707 goto out;
708
709 /* We can only reach here with a partially full buffer if we
710 * have reached the end of the data. We must otherwise have a
711 * full buffer or have been given -EAGAIN.
712 */
713 if (ret == 1) {
714 if (iov_iter_count(iter) > 0)
715 goto short_data;
716 if (!want_more)
717 goto read_phase_complete;
718 ret = 0;
719 goto out;
720 }
721
722 if (!want_more)
723 goto excess_data;
724 goto out;
725
726 case RXRPC_CALL_COMPLETE:
727 goto call_complete;
728
729 default:
730 ret = -EINPROGRESS;
731 goto out;
732 }
733
734 read_phase_complete:
735 ret = 1;
736 out:
737 switch (call->ackr_reason) {
738 case RXRPC_ACK_IDLE:
739 break;
740 case RXRPC_ACK_DELAY:
741 if (ret != -EAGAIN)
742 break;
743 fallthrough;
744 default:
745 rxrpc_send_ack_packet(call, false, NULL);
746 }
747
748 if (_service)
749 *_service = call->service_id;
750 mutex_unlock(&call->user_mutex);
751 _leave(" = %d [%zu,%d]", ret, iov_iter_count(iter), *_abort);
752 return ret;
753
754 short_data:
755 trace_rxrpc_rx_eproto(call, 0, tracepoint_string("short_data"));
756 ret = -EBADMSG;
757 goto out;
758 excess_data:
759 trace_rxrpc_rx_eproto(call, 0, tracepoint_string("excess_data"));
760 ret = -EMSGSIZE;
761 goto out;
762 call_complete:
763 *_abort = call->abort_code;
764 ret = call->error;
765 if (call->completion == RXRPC_CALL_SUCCEEDED) {
766 ret = 1;
767 if (iov_iter_count(iter) > 0)
768 ret = -ECONNRESET;
769 }
770 goto out;
771 }
772 EXPORT_SYMBOL(rxrpc_kernel_recv_data);
773
774 /**
775 * rxrpc_kernel_get_reply_time - Get timestamp on first reply packet
776 * @sock: The socket that the call exists on
777 * @call: The call to query
778 * @_ts: Where to put the timestamp
779 *
780 * Retrieve the timestamp from the first DATA packet of the reply if it is
781 * in the ring. Returns true if successful, false if not.
782 */
rxrpc_kernel_get_reply_time(struct socket * sock,struct rxrpc_call * call,ktime_t * _ts)783 bool rxrpc_kernel_get_reply_time(struct socket *sock, struct rxrpc_call *call,
784 ktime_t *_ts)
785 {
786 struct sk_buff *skb;
787 rxrpc_seq_t hard_ack, top, seq;
788 bool success = false;
789
790 mutex_lock(&call->user_mutex);
791
792 if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_RECV_REPLY)
793 goto out;
794
795 hard_ack = call->rx_hard_ack;
796 if (hard_ack != 0)
797 goto out;
798
799 seq = hard_ack + 1;
800 top = smp_load_acquire(&call->rx_top);
801 if (after(seq, top))
802 goto out;
803
804 skb = call->rxtx_buffer[seq & RXRPC_RXTX_BUFF_MASK];
805 if (!skb)
806 goto out;
807
808 *_ts = skb_get_ktime(skb);
809 success = true;
810
811 out:
812 mutex_unlock(&call->user_mutex);
813 return success;
814 }
815 EXPORT_SYMBOL(rxrpc_kernel_get_reply_time);
816