1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* connection-level event handling
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/module.h>
11 #include <linux/net.h>
12 #include <linux/skbuff.h>
13 #include <linux/errqueue.h>
14 #include <net/sock.h>
15 #include <net/af_rxrpc.h>
16 #include <net/ip.h>
17 #include "ar-internal.h"
18
19 /*
20 * Retransmit terminal ACK or ABORT of the previous call.
21 */
rxrpc_conn_retransmit_call(struct rxrpc_connection * conn,struct sk_buff * skb,unsigned int channel)22 static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
23 struct sk_buff *skb,
24 unsigned int channel)
25 {
26 struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
27 struct rxrpc_channel *chan;
28 struct msghdr msg;
29 struct kvec iov[3];
30 struct {
31 struct rxrpc_wire_header whdr;
32 union {
33 __be32 abort_code;
34 struct rxrpc_ackpacket ack;
35 };
36 } __attribute__((packed)) pkt;
37 struct rxrpc_ackinfo ack_info;
38 size_t len;
39 int ret, ioc;
40 u32 serial, mtu, call_id, padding;
41
42 _enter("%d", conn->debug_id);
43
44 if (sp && sp->hdr.type == RXRPC_PACKET_TYPE_ACK) {
45 if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
46 &pkt.ack, sizeof(pkt.ack)) < 0)
47 return;
48 if (pkt.ack.reason == RXRPC_ACK_PING_RESPONSE)
49 return;
50 }
51
52 chan = &conn->channels[channel];
53
54 /* If the last call got moved on whilst we were waiting to run, just
55 * ignore this packet.
56 */
57 call_id = READ_ONCE(chan->last_call);
58 /* Sync with __rxrpc_disconnect_call() */
59 smp_rmb();
60 if (skb && call_id != sp->hdr.callNumber)
61 return;
62
63 msg.msg_name = &conn->params.peer->srx.transport;
64 msg.msg_namelen = conn->params.peer->srx.transport_len;
65 msg.msg_control = NULL;
66 msg.msg_controllen = 0;
67 msg.msg_flags = 0;
68
69 iov[0].iov_base = &pkt;
70 iov[0].iov_len = sizeof(pkt.whdr);
71 iov[1].iov_base = &padding;
72 iov[1].iov_len = 3;
73 iov[2].iov_base = &ack_info;
74 iov[2].iov_len = sizeof(ack_info);
75
76 pkt.whdr.epoch = htonl(conn->proto.epoch);
77 pkt.whdr.cid = htonl(conn->proto.cid | channel);
78 pkt.whdr.callNumber = htonl(call_id);
79 pkt.whdr.seq = 0;
80 pkt.whdr.type = chan->last_type;
81 pkt.whdr.flags = conn->out_clientflag;
82 pkt.whdr.userStatus = 0;
83 pkt.whdr.securityIndex = conn->security_ix;
84 pkt.whdr._rsvd = 0;
85 pkt.whdr.serviceId = htons(conn->service_id);
86
87 len = sizeof(pkt.whdr);
88 switch (chan->last_type) {
89 case RXRPC_PACKET_TYPE_ABORT:
90 pkt.abort_code = htonl(chan->last_abort);
91 iov[0].iov_len += sizeof(pkt.abort_code);
92 len += sizeof(pkt.abort_code);
93 ioc = 1;
94 break;
95
96 case RXRPC_PACKET_TYPE_ACK:
97 mtu = conn->params.peer->if_mtu;
98 mtu -= conn->params.peer->hdrsize;
99 pkt.ack.bufferSpace = 0;
100 pkt.ack.maxSkew = htons(skb ? skb->priority : 0);
101 pkt.ack.firstPacket = htonl(chan->last_seq + 1);
102 pkt.ack.previousPacket = htonl(chan->last_seq);
103 pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0);
104 pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
105 pkt.ack.nAcks = 0;
106 ack_info.rxMTU = htonl(rxrpc_rx_mtu);
107 ack_info.maxMTU = htonl(mtu);
108 ack_info.rwind = htonl(rxrpc_rx_window_size);
109 ack_info.jumbo_max = htonl(rxrpc_rx_jumbo_max);
110 pkt.whdr.flags |= RXRPC_SLOW_START_OK;
111 padding = 0;
112 iov[0].iov_len += sizeof(pkt.ack);
113 len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
114 ioc = 3;
115 break;
116
117 default:
118 return;
119 }
120
121 /* Resync with __rxrpc_disconnect_call() and check that the last call
122 * didn't get advanced whilst we were filling out the packets.
123 */
124 smp_rmb();
125 if (READ_ONCE(chan->last_call) != call_id)
126 return;
127
128 serial = atomic_inc_return(&conn->serial);
129 pkt.whdr.serial = htonl(serial);
130
131 switch (chan->last_type) {
132 case RXRPC_PACKET_TYPE_ABORT:
133 _proto("Tx ABORT %%%u { %d } [re]", serial, conn->abort_code);
134 break;
135 case RXRPC_PACKET_TYPE_ACK:
136 trace_rxrpc_tx_ack(chan->call_debug_id, serial,
137 ntohl(pkt.ack.firstPacket),
138 ntohl(pkt.ack.serial),
139 pkt.ack.reason, 0);
140 _proto("Tx ACK %%%u [re]", serial);
141 break;
142 }
143
144 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
145 conn->params.peer->last_tx_at = ktime_get_seconds();
146 if (ret < 0)
147 trace_rxrpc_tx_fail(chan->call_debug_id, serial, ret,
148 rxrpc_tx_point_call_final_resend);
149 else
150 trace_rxrpc_tx_packet(chan->call_debug_id, &pkt.whdr,
151 rxrpc_tx_point_call_final_resend);
152
153 _leave("");
154 }
155
156 /*
157 * pass a connection-level abort onto all calls on that connection
158 */
rxrpc_abort_calls(struct rxrpc_connection * conn,enum rxrpc_call_completion compl,rxrpc_serial_t serial)159 static void rxrpc_abort_calls(struct rxrpc_connection *conn,
160 enum rxrpc_call_completion compl,
161 rxrpc_serial_t serial)
162 {
163 struct rxrpc_call *call;
164 int i;
165
166 _enter("{%d},%x", conn->debug_id, conn->abort_code);
167
168 spin_lock(&conn->bundle->channel_lock);
169
170 for (i = 0; i < RXRPC_MAXCALLS; i++) {
171 call = rcu_dereference_protected(
172 conn->channels[i].call,
173 lockdep_is_held(&conn->bundle->channel_lock));
174 if (call) {
175 if (compl == RXRPC_CALL_LOCALLY_ABORTED)
176 trace_rxrpc_abort(call->debug_id,
177 "CON", call->cid,
178 call->call_id, 0,
179 conn->abort_code,
180 conn->error);
181 else
182 trace_rxrpc_rx_abort(call, serial,
183 conn->abort_code);
184 rxrpc_set_call_completion(call, compl,
185 conn->abort_code,
186 conn->error);
187 }
188 }
189
190 spin_unlock(&conn->bundle->channel_lock);
191 _leave("");
192 }
193
194 /*
195 * generate a connection-level abort
196 */
rxrpc_abort_connection(struct rxrpc_connection * conn,int error,u32 abort_code)197 static int rxrpc_abort_connection(struct rxrpc_connection *conn,
198 int error, u32 abort_code)
199 {
200 struct rxrpc_wire_header whdr;
201 struct msghdr msg;
202 struct kvec iov[2];
203 __be32 word;
204 size_t len;
205 u32 serial;
206 int ret;
207
208 _enter("%d,,%u,%u", conn->debug_id, error, abort_code);
209
210 /* generate a connection-level abort */
211 spin_lock_bh(&conn->state_lock);
212 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
213 spin_unlock_bh(&conn->state_lock);
214 _leave(" = 0 [already dead]");
215 return 0;
216 }
217
218 conn->error = error;
219 conn->abort_code = abort_code;
220 conn->state = RXRPC_CONN_LOCALLY_ABORTED;
221 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
222 spin_unlock_bh(&conn->state_lock);
223
224 msg.msg_name = &conn->params.peer->srx.transport;
225 msg.msg_namelen = conn->params.peer->srx.transport_len;
226 msg.msg_control = NULL;
227 msg.msg_controllen = 0;
228 msg.msg_flags = 0;
229
230 whdr.epoch = htonl(conn->proto.epoch);
231 whdr.cid = htonl(conn->proto.cid);
232 whdr.callNumber = 0;
233 whdr.seq = 0;
234 whdr.type = RXRPC_PACKET_TYPE_ABORT;
235 whdr.flags = conn->out_clientflag;
236 whdr.userStatus = 0;
237 whdr.securityIndex = conn->security_ix;
238 whdr._rsvd = 0;
239 whdr.serviceId = htons(conn->service_id);
240
241 word = htonl(conn->abort_code);
242
243 iov[0].iov_base = &whdr;
244 iov[0].iov_len = sizeof(whdr);
245 iov[1].iov_base = &word;
246 iov[1].iov_len = sizeof(word);
247
248 len = iov[0].iov_len + iov[1].iov_len;
249
250 serial = atomic_inc_return(&conn->serial);
251 rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, serial);
252 whdr.serial = htonl(serial);
253 _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code);
254
255 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
256 if (ret < 0) {
257 trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
258 rxrpc_tx_point_conn_abort);
259 _debug("sendmsg failed: %d", ret);
260 return -EAGAIN;
261 }
262
263 trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort);
264
265 conn->params.peer->last_tx_at = ktime_get_seconds();
266
267 _leave(" = 0");
268 return 0;
269 }
270
271 /*
272 * mark a call as being on a now-secured channel
273 * - must be called with BH's disabled.
274 */
rxrpc_call_is_secure(struct rxrpc_call * call)275 static void rxrpc_call_is_secure(struct rxrpc_call *call)
276 {
277 _enter("%p", call);
278 if (call) {
279 write_lock_bh(&call->state_lock);
280 if (call->state == RXRPC_CALL_SERVER_SECURING) {
281 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
282 rxrpc_notify_socket(call);
283 }
284 write_unlock_bh(&call->state_lock);
285 }
286 }
287
288 /*
289 * connection-level Rx packet processor
290 */
rxrpc_process_event(struct rxrpc_connection * conn,struct sk_buff * skb,u32 * _abort_code)291 static int rxrpc_process_event(struct rxrpc_connection *conn,
292 struct sk_buff *skb,
293 u32 *_abort_code)
294 {
295 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
296 __be32 wtmp;
297 u32 abort_code;
298 int loop, ret;
299
300 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
301 _leave(" = -ECONNABORTED [%u]", conn->state);
302 return -ECONNABORTED;
303 }
304
305 _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial);
306
307 switch (sp->hdr.type) {
308 case RXRPC_PACKET_TYPE_DATA:
309 case RXRPC_PACKET_TYPE_ACK:
310 rxrpc_conn_retransmit_call(conn, skb,
311 sp->hdr.cid & RXRPC_CHANNELMASK);
312 return 0;
313
314 case RXRPC_PACKET_TYPE_BUSY:
315 /* Just ignore BUSY packets for now. */
316 return 0;
317
318 case RXRPC_PACKET_TYPE_ABORT:
319 if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
320 &wtmp, sizeof(wtmp)) < 0) {
321 trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
322 tracepoint_string("bad_abort"));
323 return -EPROTO;
324 }
325 abort_code = ntohl(wtmp);
326 _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
327
328 conn->error = -ECONNABORTED;
329 conn->abort_code = abort_code;
330 conn->state = RXRPC_CONN_REMOTELY_ABORTED;
331 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
332 rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial);
333 return -ECONNABORTED;
334
335 case RXRPC_PACKET_TYPE_CHALLENGE:
336 return conn->security->respond_to_challenge(conn, skb,
337 _abort_code);
338
339 case RXRPC_PACKET_TYPE_RESPONSE:
340 ret = conn->security->verify_response(conn, skb, _abort_code);
341 if (ret < 0)
342 return ret;
343
344 ret = conn->security->init_connection_security(
345 conn, conn->params.key->payload.data[0]);
346 if (ret < 0)
347 return ret;
348
349 spin_lock(&conn->bundle->channel_lock);
350 spin_lock_bh(&conn->state_lock);
351
352 if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) {
353 conn->state = RXRPC_CONN_SERVICE;
354 spin_unlock_bh(&conn->state_lock);
355 for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
356 rxrpc_call_is_secure(
357 rcu_dereference_protected(
358 conn->channels[loop].call,
359 lockdep_is_held(&conn->bundle->channel_lock)));
360 } else {
361 spin_unlock_bh(&conn->state_lock);
362 }
363
364 spin_unlock(&conn->bundle->channel_lock);
365 return 0;
366
367 default:
368 trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
369 tracepoint_string("bad_conn_pkt"));
370 return -EPROTO;
371 }
372 }
373
374 /*
375 * set up security and issue a challenge
376 */
rxrpc_secure_connection(struct rxrpc_connection * conn)377 static void rxrpc_secure_connection(struct rxrpc_connection *conn)
378 {
379 u32 abort_code;
380 int ret;
381
382 _enter("{%d}", conn->debug_id);
383
384 ASSERT(conn->security_ix != 0);
385
386 if (conn->security->issue_challenge(conn) < 0) {
387 abort_code = RX_CALL_DEAD;
388 ret = -ENOMEM;
389 goto abort;
390 }
391
392 _leave("");
393 return;
394
395 abort:
396 _debug("abort %d, %d", ret, abort_code);
397 rxrpc_abort_connection(conn, ret, abort_code);
398 _leave(" [aborted]");
399 }
400
401 /*
402 * Process delayed final ACKs that we haven't subsumed into a subsequent call.
403 */
rxrpc_process_delayed_final_acks(struct rxrpc_connection * conn,bool force)404 void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn, bool force)
405 {
406 unsigned long j = jiffies, next_j;
407 unsigned int channel;
408 bool set;
409
410 again:
411 next_j = j + LONG_MAX;
412 set = false;
413 for (channel = 0; channel < RXRPC_MAXCALLS; channel++) {
414 struct rxrpc_channel *chan = &conn->channels[channel];
415 unsigned long ack_at;
416
417 if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags))
418 continue;
419
420 smp_rmb(); /* vs rxrpc_disconnect_client_call */
421 ack_at = READ_ONCE(chan->final_ack_at);
422
423 if (time_before(j, ack_at) && !force) {
424 if (time_before(ack_at, next_j)) {
425 next_j = ack_at;
426 set = true;
427 }
428 continue;
429 }
430
431 if (test_and_clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel,
432 &conn->flags))
433 rxrpc_conn_retransmit_call(conn, NULL, channel);
434 }
435
436 j = jiffies;
437 if (time_before_eq(next_j, j))
438 goto again;
439 if (set)
440 rxrpc_reduce_conn_timer(conn, next_j);
441 }
442
443 /*
444 * connection-level event processor
445 */
rxrpc_do_process_connection(struct rxrpc_connection * conn)446 static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
447 {
448 struct sk_buff *skb;
449 u32 abort_code = RX_PROTOCOL_ERROR;
450 int ret;
451
452 if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
453 rxrpc_secure_connection(conn);
454
455 /* Process delayed ACKs whose time has come. */
456 if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
457 rxrpc_process_delayed_final_acks(conn, false);
458
459 /* go through the conn-level event packets, releasing the ref on this
460 * connection that each one has when we've finished with it */
461 while ((skb = skb_dequeue(&conn->rx_queue))) {
462 rxrpc_see_skb(skb, rxrpc_skb_seen);
463 ret = rxrpc_process_event(conn, skb, &abort_code);
464 switch (ret) {
465 case -EPROTO:
466 case -EKEYEXPIRED:
467 case -EKEYREJECTED:
468 goto protocol_error;
469 case -ENOMEM:
470 case -EAGAIN:
471 goto requeue_and_leave;
472 case -ECONNABORTED:
473 default:
474 rxrpc_free_skb(skb, rxrpc_skb_freed);
475 break;
476 }
477 }
478
479 return;
480
481 requeue_and_leave:
482 skb_queue_head(&conn->rx_queue, skb);
483 return;
484
485 protocol_error:
486 if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
487 goto requeue_and_leave;
488 rxrpc_free_skb(skb, rxrpc_skb_freed);
489 return;
490 }
491
rxrpc_process_connection(struct work_struct * work)492 void rxrpc_process_connection(struct work_struct *work)
493 {
494 struct rxrpc_connection *conn =
495 container_of(work, struct rxrpc_connection, processor);
496
497 rxrpc_see_connection(conn);
498
499 if (__rxrpc_use_local(conn->params.local)) {
500 rxrpc_do_process_connection(conn);
501 rxrpc_unuse_local(conn->params.local);
502 }
503
504 rxrpc_put_connection(conn);
505 _leave("");
506 return;
507 }
508