1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* incoming call handling
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/module.h>
11 #include <linux/net.h>
12 #include <linux/skbuff.h>
13 #include <linux/errqueue.h>
14 #include <linux/udp.h>
15 #include <linux/in.h>
16 #include <linux/in6.h>
17 #include <linux/icmp.h>
18 #include <linux/gfp.h>
19 #include <linux/circ_buf.h>
20 #include <net/sock.h>
21 #include <net/af_rxrpc.h>
22 #include <net/ip.h>
23 #include "ar-internal.h"
24
rxrpc_dummy_notify(struct sock * sk,struct rxrpc_call * call,unsigned long user_call_ID)25 static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
26 unsigned long user_call_ID)
27 {
28 }
29
30 /*
31 * Preallocate a single service call, connection and peer and, if possible,
32 * give them a user ID and attach the user's side of the ID to them.
33 */
rxrpc_service_prealloc_one(struct rxrpc_sock * rx,struct rxrpc_backlog * b,rxrpc_notify_rx_t notify_rx,rxrpc_user_attach_call_t user_attach_call,unsigned long user_call_ID,gfp_t gfp,unsigned int debug_id)34 static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
35 struct rxrpc_backlog *b,
36 rxrpc_notify_rx_t notify_rx,
37 rxrpc_user_attach_call_t user_attach_call,
38 unsigned long user_call_ID, gfp_t gfp,
39 unsigned int debug_id)
40 {
41 struct rxrpc_call *call, *xcall;
42 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
43 struct rb_node *parent, **pp;
44 int max, tmp;
45 unsigned int size = RXRPC_BACKLOG_MAX;
46 unsigned int head, tail, call_head, call_tail;
47
48 max = rx->sk.sk_max_ack_backlog;
49 tmp = rx->sk.sk_ack_backlog;
50 if (tmp >= max) {
51 _leave(" = -ENOBUFS [full %u]", max);
52 return -ENOBUFS;
53 }
54 max -= tmp;
55
56 /* We don't need more conns and peers than we have calls, but on the
57 * other hand, we shouldn't ever use more peers than conns or conns
58 * than calls.
59 */
60 call_head = b->call_backlog_head;
61 call_tail = READ_ONCE(b->call_backlog_tail);
62 tmp = CIRC_CNT(call_head, call_tail, size);
63 if (tmp >= max) {
64 _leave(" = -ENOBUFS [enough %u]", tmp);
65 return -ENOBUFS;
66 }
67 max = tmp + 1;
68
69 head = b->peer_backlog_head;
70 tail = READ_ONCE(b->peer_backlog_tail);
71 if (CIRC_CNT(head, tail, size) < max) {
72 struct rxrpc_peer *peer;
73
74 peer = rxrpc_alloc_peer(rx->local, gfp, rxrpc_peer_new_prealloc);
75 if (!peer)
76 return -ENOMEM;
77 b->peer_backlog[head] = peer;
78 smp_store_release(&b->peer_backlog_head,
79 (head + 1) & (size - 1));
80 }
81
82 head = b->conn_backlog_head;
83 tail = READ_ONCE(b->conn_backlog_tail);
84 if (CIRC_CNT(head, tail, size) < max) {
85 struct rxrpc_connection *conn;
86
87 conn = rxrpc_prealloc_service_connection(rxnet, gfp);
88 if (!conn)
89 return -ENOMEM;
90 b->conn_backlog[head] = conn;
91 smp_store_release(&b->conn_backlog_head,
92 (head + 1) & (size - 1));
93 }
94
95 /* Now it gets complicated, because calls get registered with the
96 * socket here, with a user ID preassigned by the user.
97 */
98 call = rxrpc_alloc_call(rx, gfp, debug_id);
99 if (!call)
100 return -ENOMEM;
101 call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
102 rxrpc_set_call_state(call, RXRPC_CALL_SERVER_PREALLOC);
103 __set_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events);
104
105 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
106 user_call_ID, rxrpc_call_new_prealloc_service);
107
108 write_lock(&rx->call_lock);
109
110 /* Check the user ID isn't already in use */
111 pp = &rx->calls.rb_node;
112 parent = NULL;
113 while (*pp) {
114 parent = *pp;
115 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
116 if (user_call_ID < xcall->user_call_ID)
117 pp = &(*pp)->rb_left;
118 else if (user_call_ID > xcall->user_call_ID)
119 pp = &(*pp)->rb_right;
120 else
121 goto id_in_use;
122 }
123
124 call->user_call_ID = user_call_ID;
125 call->notify_rx = notify_rx;
126 if (user_attach_call) {
127 rxrpc_get_call(call, rxrpc_call_get_kernel_service);
128 user_attach_call(call, user_call_ID);
129 }
130
131 rxrpc_get_call(call, rxrpc_call_get_userid);
132 rb_link_node(&call->sock_node, parent, pp);
133 rb_insert_color(&call->sock_node, &rx->calls);
134 set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
135
136 list_add(&call->sock_link, &rx->sock_calls);
137
138 write_unlock(&rx->call_lock);
139
140 rxnet = call->rxnet;
141 spin_lock(&rxnet->call_lock);
142 list_add_tail_rcu(&call->link, &rxnet->calls);
143 spin_unlock(&rxnet->call_lock);
144
145 b->call_backlog[call_head] = call;
146 smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
147 _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
148 return 0;
149
150 id_in_use:
151 write_unlock(&rx->call_lock);
152 rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, -EBADSLT);
153 rxrpc_cleanup_call(call);
154 _leave(" = -EBADSLT");
155 return -EBADSLT;
156 }
157
158 /*
159 * Allocate the preallocation buffers for incoming service calls. These must
160 * be charged manually.
161 */
rxrpc_service_prealloc(struct rxrpc_sock * rx,gfp_t gfp)162 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
163 {
164 struct rxrpc_backlog *b = rx->backlog;
165
166 if (!b) {
167 b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
168 if (!b)
169 return -ENOMEM;
170 rx->backlog = b;
171 }
172
173 return 0;
174 }
175
176 /*
177 * Discard the preallocation on a service.
178 */
rxrpc_discard_prealloc(struct rxrpc_sock * rx)179 void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
180 {
181 struct rxrpc_backlog *b = rx->backlog;
182 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
183 unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
184
185 if (!b)
186 return;
187 rx->backlog = NULL;
188
189 /* Make sure that there aren't any incoming calls in progress before we
190 * clear the preallocation buffers.
191 */
192 spin_lock(&rx->incoming_lock);
193 spin_unlock(&rx->incoming_lock);
194
195 head = b->peer_backlog_head;
196 tail = b->peer_backlog_tail;
197 while (CIRC_CNT(head, tail, size) > 0) {
198 struct rxrpc_peer *peer = b->peer_backlog[tail];
199 rxrpc_put_local(peer->local, rxrpc_local_put_prealloc_peer);
200 kfree(peer);
201 tail = (tail + 1) & (size - 1);
202 }
203
204 head = b->conn_backlog_head;
205 tail = b->conn_backlog_tail;
206 while (CIRC_CNT(head, tail, size) > 0) {
207 struct rxrpc_connection *conn = b->conn_backlog[tail];
208 write_lock(&rxnet->conn_lock);
209 list_del(&conn->link);
210 list_del(&conn->proc_link);
211 write_unlock(&rxnet->conn_lock);
212 kfree(conn);
213 if (atomic_dec_and_test(&rxnet->nr_conns))
214 wake_up_var(&rxnet->nr_conns);
215 tail = (tail + 1) & (size - 1);
216 }
217
218 head = b->call_backlog_head;
219 tail = b->call_backlog_tail;
220 while (CIRC_CNT(head, tail, size) > 0) {
221 struct rxrpc_call *call = b->call_backlog[tail];
222 rxrpc_see_call(call, rxrpc_call_see_discard);
223 rcu_assign_pointer(call->socket, rx);
224 if (rx->discard_new_call) {
225 _debug("discard %lx", call->user_call_ID);
226 rx->discard_new_call(call, call->user_call_ID);
227 if (call->notify_rx)
228 call->notify_rx = rxrpc_dummy_notify;
229 rxrpc_put_call(call, rxrpc_call_put_kernel);
230 }
231 rxrpc_call_completed(call);
232 rxrpc_release_call(rx, call);
233 rxrpc_put_call(call, rxrpc_call_put_discard_prealloc);
234 tail = (tail + 1) & (size - 1);
235 }
236
237 kfree(b);
238 }
239
240 /*
241 * Allocate a new incoming call from the prealloc pool, along with a connection
242 * and a peer as necessary.
243 */
rxrpc_alloc_incoming_call(struct rxrpc_sock * rx,struct rxrpc_local * local,struct rxrpc_peer * peer,struct rxrpc_connection * conn,const struct rxrpc_security * sec,struct sockaddr_rxrpc * peer_srx,struct sk_buff * skb)244 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
245 struct rxrpc_local *local,
246 struct rxrpc_peer *peer,
247 struct rxrpc_connection *conn,
248 const struct rxrpc_security *sec,
249 struct sockaddr_rxrpc *peer_srx,
250 struct sk_buff *skb)
251 {
252 struct rxrpc_backlog *b = rx->backlog;
253 struct rxrpc_call *call;
254 unsigned short call_head, conn_head, peer_head;
255 unsigned short call_tail, conn_tail, peer_tail;
256 unsigned short call_count, conn_count;
257
258 if (!b)
259 return NULL;
260
261 /* #calls >= #conns >= #peers must hold true. */
262 call_head = smp_load_acquire(&b->call_backlog_head);
263 call_tail = b->call_backlog_tail;
264 call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
265 conn_head = smp_load_acquire(&b->conn_backlog_head);
266 conn_tail = b->conn_backlog_tail;
267 conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
268 ASSERTCMP(conn_count, >=, call_count);
269 peer_head = smp_load_acquire(&b->peer_backlog_head);
270 peer_tail = b->peer_backlog_tail;
271 ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
272 conn_count);
273
274 if (call_count == 0)
275 return NULL;
276
277 if (!conn) {
278 if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_service_conn))
279 peer = NULL;
280 if (!peer) {
281 peer = b->peer_backlog[peer_tail];
282 peer->srx = *peer_srx;
283 b->peer_backlog[peer_tail] = NULL;
284 smp_store_release(&b->peer_backlog_tail,
285 (peer_tail + 1) &
286 (RXRPC_BACKLOG_MAX - 1));
287
288 rxrpc_new_incoming_peer(local, peer);
289 }
290
291 /* Now allocate and set up the connection */
292 conn = b->conn_backlog[conn_tail];
293 b->conn_backlog[conn_tail] = NULL;
294 smp_store_release(&b->conn_backlog_tail,
295 (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
296 conn->local = rxrpc_get_local(local, rxrpc_local_get_prealloc_conn);
297 conn->peer = peer;
298 rxrpc_see_connection(conn, rxrpc_conn_see_new_service_conn);
299 rxrpc_new_incoming_connection(rx, conn, sec, skb);
300 } else {
301 rxrpc_get_connection(conn, rxrpc_conn_get_service_conn);
302 atomic_inc(&conn->active);
303 }
304
305 /* And now we can allocate and set up a new call */
306 call = b->call_backlog[call_tail];
307 b->call_backlog[call_tail] = NULL;
308 smp_store_release(&b->call_backlog_tail,
309 (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
310
311 rxrpc_see_call(call, rxrpc_call_see_accept);
312 call->local = rxrpc_get_local(conn->local, rxrpc_local_get_call);
313 call->conn = conn;
314 call->security = conn->security;
315 call->security_ix = conn->security_ix;
316 call->peer = rxrpc_get_peer(conn->peer, rxrpc_peer_get_accept);
317 call->dest_srx = peer->srx;
318 call->cong_ssthresh = call->peer->cong_ssthresh;
319 call->tx_last_sent = ktime_get_real();
320 return call;
321 }
322
323 /*
324 * Set up a new incoming call. Called from the I/O thread.
325 *
326 * If this is for a kernel service, when we allocate the call, it will have
327 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
328 * retainer ref obtained from the backlog buffer. Prealloc calls for userspace
329 * services only have the ref from the backlog buffer.
330 *
331 * If we want to report an error, we mark the skb with the packet type and
332 * abort code and return false.
333 */
rxrpc_new_incoming_call(struct rxrpc_local * local,struct rxrpc_peer * peer,struct rxrpc_connection * conn,struct sockaddr_rxrpc * peer_srx,struct sk_buff * skb)334 bool rxrpc_new_incoming_call(struct rxrpc_local *local,
335 struct rxrpc_peer *peer,
336 struct rxrpc_connection *conn,
337 struct sockaddr_rxrpc *peer_srx,
338 struct sk_buff *skb)
339 {
340 const struct rxrpc_security *sec = NULL;
341 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
342 struct rxrpc_call *call = NULL;
343 struct rxrpc_sock *rx;
344
345 _enter("");
346
347 /* Don't set up a call for anything other than a DATA packet. */
348 if (sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
349 return rxrpc_protocol_error(skb, rxrpc_eproto_no_service_call);
350
351 read_lock(&local->services_lock);
352
353 /* Weed out packets to services we're not offering. Packets that would
354 * begin a call are explicitly rejected and the rest are just
355 * discarded.
356 */
357 rx = local->service;
358 if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
359 sp->hdr.serviceId != rx->second_service)
360 ) {
361 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
362 sp->hdr.seq == 1)
363 goto unsupported_service;
364 goto discard;
365 }
366
367 if (!conn) {
368 sec = rxrpc_get_incoming_security(rx, skb);
369 if (!sec)
370 goto unsupported_security;
371 }
372
373 spin_lock(&rx->incoming_lock);
374 if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
375 rx->sk.sk_state == RXRPC_CLOSE) {
376 rxrpc_direct_abort(skb, rxrpc_abort_shut_down,
377 RX_INVALID_OPERATION, -ESHUTDOWN);
378 goto no_call;
379 }
380
381 call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, peer_srx,
382 skb);
383 if (!call) {
384 skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
385 goto no_call;
386 }
387
388 trace_rxrpc_receive(call, rxrpc_receive_incoming,
389 sp->hdr.serial, sp->hdr.seq);
390
391 /* Make the call live. */
392 rxrpc_incoming_call(rx, call, skb);
393 conn = call->conn;
394
395 if (rx->notify_new_call)
396 rx->notify_new_call(&rx->sk, call, call->user_call_ID);
397
398 spin_lock(&conn->state_lock);
399 if (conn->state == RXRPC_CONN_SERVICE_UNSECURED) {
400 conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
401 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
402 rxrpc_queue_conn(call->conn, rxrpc_conn_queue_challenge);
403 }
404 spin_unlock(&conn->state_lock);
405
406 spin_unlock(&rx->incoming_lock);
407 read_unlock(&local->services_lock);
408
409 if (hlist_unhashed(&call->error_link)) {
410 spin_lock(&call->peer->lock);
411 hlist_add_head(&call->error_link, &call->peer->error_targets);
412 spin_unlock(&call->peer->lock);
413 }
414
415 _leave(" = %p{%d}", call, call->debug_id);
416 rxrpc_input_call_event(call, skb);
417 rxrpc_put_call(call, rxrpc_call_put_input);
418 return true;
419
420 unsupported_service:
421 read_unlock(&local->services_lock);
422 return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
423 RX_INVALID_OPERATION, -EOPNOTSUPP);
424 unsupported_security:
425 read_unlock(&local->services_lock);
426 return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
427 RX_INVALID_OPERATION, -EKEYREJECTED);
428 no_call:
429 spin_unlock(&rx->incoming_lock);
430 read_unlock(&local->services_lock);
431 _leave(" = f [%u]", skb->mark);
432 return false;
433 discard:
434 read_unlock(&local->services_lock);
435 return true;
436 }
437
438 /*
439 * Charge up socket with preallocated calls, attaching user call IDs.
440 */
rxrpc_user_charge_accept(struct rxrpc_sock * rx,unsigned long user_call_ID)441 int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID)
442 {
443 struct rxrpc_backlog *b = rx->backlog;
444
445 if (rx->sk.sk_state == RXRPC_CLOSE)
446 return -ESHUTDOWN;
447
448 return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID,
449 GFP_KERNEL,
450 atomic_inc_return(&rxrpc_debug_id));
451 }
452
453 /*
454 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
455 * @sock: The socket on which to preallocate
456 * @notify_rx: Event notification function for the call
457 * @user_attach_call: Func to attach call to user_call_ID
458 * @user_call_ID: The tag to attach to the preallocated call
459 * @gfp: The allocation conditions.
460 * @debug_id: The tracing debug ID.
461 *
462 * Charge up the socket with preallocated calls, each with a user ID. A
463 * function should be provided to effect the attachment from the user's side.
464 * The user is given a ref to hold on the call.
465 *
466 * Note that the call may be come connected before this function returns.
467 */
rxrpc_kernel_charge_accept(struct socket * sock,rxrpc_notify_rx_t notify_rx,rxrpc_user_attach_call_t user_attach_call,unsigned long user_call_ID,gfp_t gfp,unsigned int debug_id)468 int rxrpc_kernel_charge_accept(struct socket *sock,
469 rxrpc_notify_rx_t notify_rx,
470 rxrpc_user_attach_call_t user_attach_call,
471 unsigned long user_call_ID, gfp_t gfp,
472 unsigned int debug_id)
473 {
474 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
475 struct rxrpc_backlog *b = rx->backlog;
476
477 if (sock->sk->sk_state == RXRPC_CLOSE)
478 return -ESHUTDOWN;
479
480 return rxrpc_service_prealloc_one(rx, b, notify_rx,
481 user_attach_call, user_call_ID,
482 gfp, debug_id);
483 }
484 EXPORT_SYMBOL(rxrpc_kernel_charge_accept);
485