• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <net/sock.h>
36 #include <linux/in.h>
37 #include <linux/export.h>
38 
39 #include "rds.h"
40 
rds_inc_init(struct rds_incoming * inc,struct rds_connection * conn,__be32 saddr)41 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
42 		  __be32 saddr)
43 {
44 	atomic_set(&inc->i_refcount, 1);
45 	INIT_LIST_HEAD(&inc->i_item);
46 	inc->i_conn = conn;
47 	inc->i_saddr = saddr;
48 	inc->i_rdma_cookie = 0;
49 }
50 EXPORT_SYMBOL_GPL(rds_inc_init);
51 
rds_inc_addref(struct rds_incoming * inc)52 static void rds_inc_addref(struct rds_incoming *inc)
53 {
54 	rdsdebug("addref inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
55 	atomic_inc(&inc->i_refcount);
56 }
57 
rds_inc_put(struct rds_incoming * inc)58 void rds_inc_put(struct rds_incoming *inc)
59 {
60 	rdsdebug("put inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
61 	if (atomic_dec_and_test(&inc->i_refcount)) {
62 		BUG_ON(!list_empty(&inc->i_item));
63 
64 		inc->i_conn->c_trans->inc_free(inc);
65 	}
66 }
67 EXPORT_SYMBOL_GPL(rds_inc_put);
68 
rds_recv_rcvbuf_delta(struct rds_sock * rs,struct sock * sk,struct rds_cong_map * map,int delta,__be16 port)69 static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
70 				  struct rds_cong_map *map,
71 				  int delta, __be16 port)
72 {
73 	int now_congested;
74 
75 	if (delta == 0)
76 		return;
77 
78 	rs->rs_rcv_bytes += delta;
79 
80 	/* loop transport doesn't send/recv congestion updates */
81 	if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
82 		return;
83 
84 	now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
85 
86 	rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
87 	  "now_cong %d delta %d\n",
88 	  rs, &rs->rs_bound_addr,
89 	  ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
90 	  rds_sk_rcvbuf(rs), now_congested, delta);
91 
92 	/* wasn't -> am congested */
93 	if (!rs->rs_congested && now_congested) {
94 		rs->rs_congested = 1;
95 		rds_cong_set_bit(map, port);
96 		rds_cong_queue_updates(map);
97 	}
98 	/* was -> aren't congested */
99 	/* Require more free space before reporting uncongested to prevent
100 	   bouncing cong/uncong state too often */
101 	else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) {
102 		rs->rs_congested = 0;
103 		rds_cong_clear_bit(map, port);
104 		rds_cong_queue_updates(map);
105 	}
106 
107 	/* do nothing if no change in cong state */
108 }
109 
110 /*
111  * Process all extension headers that come with this message.
112  */
rds_recv_incoming_exthdrs(struct rds_incoming * inc,struct rds_sock * rs)113 static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs)
114 {
115 	struct rds_header *hdr = &inc->i_hdr;
116 	unsigned int pos = 0, type, len;
117 	union {
118 		struct rds_ext_header_version version;
119 		struct rds_ext_header_rdma rdma;
120 		struct rds_ext_header_rdma_dest rdma_dest;
121 	} buffer;
122 
123 	while (1) {
124 		len = sizeof(buffer);
125 		type = rds_message_next_extension(hdr, &pos, &buffer, &len);
126 		if (type == RDS_EXTHDR_NONE)
127 			break;
128 		/* Process extension header here */
129 		switch (type) {
130 		case RDS_EXTHDR_RDMA:
131 			rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0);
132 			break;
133 
134 		case RDS_EXTHDR_RDMA_DEST:
135 			/* We ignore the size for now. We could stash it
136 			 * somewhere and use it for error checking. */
137 			inc->i_rdma_cookie = rds_rdma_make_cookie(
138 					be32_to_cpu(buffer.rdma_dest.h_rdma_rkey),
139 					be32_to_cpu(buffer.rdma_dest.h_rdma_offset));
140 
141 			break;
142 		}
143 	}
144 }
145 
146 /*
147  * The transport must make sure that this is serialized against other
148  * rx and conn reset on this specific conn.
149  *
150  * We currently assert that only one fragmented message will be sent
151  * down a connection at a time.  This lets us reassemble in the conn
152  * instead of per-flow which means that we don't have to go digging through
153  * flows to tear down partial reassembly progress on conn failure and
154  * we save flow lookup and locking for each frag arrival.  It does mean
155  * that small messages will wait behind large ones.  Fragmenting at all
156  * is only to reduce the memory consumption of pre-posted buffers.
157  *
158  * The caller passes in saddr and daddr instead of us getting it from the
159  * conn.  This lets loopback, who only has one conn for both directions,
160  * tell us which roles the addrs in the conn are playing for this message.
161  */
rds_recv_incoming(struct rds_connection * conn,__be32 saddr,__be32 daddr,struct rds_incoming * inc,gfp_t gfp)162 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
163 		       struct rds_incoming *inc, gfp_t gfp)
164 {
165 	struct rds_sock *rs = NULL;
166 	struct sock *sk;
167 	unsigned long flags;
168 
169 	inc->i_conn = conn;
170 	inc->i_rx_jiffies = jiffies;
171 
172 	rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
173 		 "flags 0x%x rx_jiffies %lu\n", conn,
174 		 (unsigned long long)conn->c_next_rx_seq,
175 		 inc,
176 		 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
177 		 be32_to_cpu(inc->i_hdr.h_len),
178 		 be16_to_cpu(inc->i_hdr.h_sport),
179 		 be16_to_cpu(inc->i_hdr.h_dport),
180 		 inc->i_hdr.h_flags,
181 		 inc->i_rx_jiffies);
182 
183 	/*
184 	 * Sequence numbers should only increase.  Messages get their
185 	 * sequence number as they're queued in a sending conn.  They
186 	 * can be dropped, though, if the sending socket is closed before
187 	 * they hit the wire.  So sequence numbers can skip forward
188 	 * under normal operation.  They can also drop back in the conn
189 	 * failover case as previously sent messages are resent down the
190 	 * new instance of a conn.  We drop those, otherwise we have
191 	 * to assume that the next valid seq does not come after a
192 	 * hole in the fragment stream.
193 	 *
194 	 * The headers don't give us a way to realize if fragments of
195 	 * a message have been dropped.  We assume that frags that arrive
196 	 * to a flow are part of the current message on the flow that is
197 	 * being reassembled.  This means that senders can't drop messages
198 	 * from the sending conn until all their frags are sent.
199 	 *
200 	 * XXX we could spend more on the wire to get more robust failure
201 	 * detection, arguably worth it to avoid data corruption.
202 	 */
203 	if (be64_to_cpu(inc->i_hdr.h_sequence) < conn->c_next_rx_seq &&
204 	    (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
205 		rds_stats_inc(s_recv_drop_old_seq);
206 		goto out;
207 	}
208 	conn->c_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
209 
210 	if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
211 		rds_stats_inc(s_recv_ping);
212 		rds_send_pong(conn, inc->i_hdr.h_sport);
213 		goto out;
214 	}
215 
216 	rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
217 	if (!rs) {
218 		rds_stats_inc(s_recv_drop_no_sock);
219 		goto out;
220 	}
221 
222 	/* Process extension headers */
223 	rds_recv_incoming_exthdrs(inc, rs);
224 
225 	/* We can be racing with rds_release() which marks the socket dead. */
226 	sk = rds_rs_to_sk(rs);
227 
228 	/* serialize with rds_release -> sock_orphan */
229 	write_lock_irqsave(&rs->rs_recv_lock, flags);
230 	if (!sock_flag(sk, SOCK_DEAD)) {
231 		rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs);
232 		rds_stats_inc(s_recv_queued);
233 		rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
234 				      be32_to_cpu(inc->i_hdr.h_len),
235 				      inc->i_hdr.h_dport);
236 		rds_inc_addref(inc);
237 		list_add_tail(&inc->i_item, &rs->rs_recv_queue);
238 		__rds_wake_sk_sleep(sk);
239 	} else {
240 		rds_stats_inc(s_recv_drop_dead_sock);
241 	}
242 	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
243 
244 out:
245 	if (rs)
246 		rds_sock_put(rs);
247 }
248 EXPORT_SYMBOL_GPL(rds_recv_incoming);
249 
250 /*
251  * be very careful here.  This is being called as the condition in
252  * wait_event_*() needs to cope with being called many times.
253  */
rds_next_incoming(struct rds_sock * rs,struct rds_incoming ** inc)254 static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
255 {
256 	unsigned long flags;
257 
258 	if (!*inc) {
259 		read_lock_irqsave(&rs->rs_recv_lock, flags);
260 		if (!list_empty(&rs->rs_recv_queue)) {
261 			*inc = list_entry(rs->rs_recv_queue.next,
262 					  struct rds_incoming,
263 					  i_item);
264 			rds_inc_addref(*inc);
265 		}
266 		read_unlock_irqrestore(&rs->rs_recv_lock, flags);
267 	}
268 
269 	return *inc != NULL;
270 }
271 
rds_still_queued(struct rds_sock * rs,struct rds_incoming * inc,int drop)272 static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
273 			    int drop)
274 {
275 	struct sock *sk = rds_rs_to_sk(rs);
276 	int ret = 0;
277 	unsigned long flags;
278 
279 	write_lock_irqsave(&rs->rs_recv_lock, flags);
280 	if (!list_empty(&inc->i_item)) {
281 		ret = 1;
282 		if (drop) {
283 			/* XXX make sure this i_conn is reliable */
284 			rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
285 					      -be32_to_cpu(inc->i_hdr.h_len),
286 					      inc->i_hdr.h_dport);
287 			list_del_init(&inc->i_item);
288 			rds_inc_put(inc);
289 		}
290 	}
291 	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
292 
293 	rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
294 	return ret;
295 }
296 
297 /*
298  * Pull errors off the error queue.
299  * If msghdr is NULL, we will just purge the error queue.
300  */
rds_notify_queue_get(struct rds_sock * rs,struct msghdr * msghdr)301 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
302 {
303 	struct rds_notifier *notifier;
304 	struct rds_rdma_notify cmsg;
305 	unsigned int count = 0, max_messages = ~0U;
306 	unsigned long flags;
307 	LIST_HEAD(copy);
308 	int err = 0;
309 
310 	memset(&cmsg, 0, sizeof(cmsg));	/* fill holes with zero */
311 
312 	/* put_cmsg copies to user space and thus may sleep. We can't do this
313 	 * with rs_lock held, so first grab as many notifications as we can stuff
314 	 * in the user provided cmsg buffer. We don't try to copy more, to avoid
315 	 * losing notifications - except when the buffer is so small that it wouldn't
316 	 * even hold a single notification. Then we give him as much of this single
317 	 * msg as we can squeeze in, and set MSG_CTRUNC.
318 	 */
319 	if (msghdr) {
320 		max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg));
321 		if (!max_messages)
322 			max_messages = 1;
323 	}
324 
325 	spin_lock_irqsave(&rs->rs_lock, flags);
326 	while (!list_empty(&rs->rs_notify_queue) && count < max_messages) {
327 		notifier = list_entry(rs->rs_notify_queue.next,
328 				struct rds_notifier, n_list);
329 		list_move(&notifier->n_list, &copy);
330 		count++;
331 	}
332 	spin_unlock_irqrestore(&rs->rs_lock, flags);
333 
334 	if (!count)
335 		return 0;
336 
337 	while (!list_empty(&copy)) {
338 		notifier = list_entry(copy.next, struct rds_notifier, n_list);
339 
340 		if (msghdr) {
341 			cmsg.user_token = notifier->n_user_token;
342 			cmsg.status = notifier->n_status;
343 
344 			err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
345 				       sizeof(cmsg), &cmsg);
346 			if (err)
347 				break;
348 		}
349 
350 		list_del_init(&notifier->n_list);
351 		kfree(notifier);
352 	}
353 
354 	/* If we bailed out because of an error in put_cmsg,
355 	 * we may be left with one or more notifications that we
356 	 * didn't process. Return them to the head of the list. */
357 	if (!list_empty(&copy)) {
358 		spin_lock_irqsave(&rs->rs_lock, flags);
359 		list_splice(&copy, &rs->rs_notify_queue);
360 		spin_unlock_irqrestore(&rs->rs_lock, flags);
361 	}
362 
363 	return err;
364 }
365 
366 /*
367  * Queue a congestion notification
368  */
rds_notify_cong(struct rds_sock * rs,struct msghdr * msghdr)369 static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr)
370 {
371 	uint64_t notify = rs->rs_cong_notify;
372 	unsigned long flags;
373 	int err;
374 
375 	err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE,
376 			sizeof(notify), &notify);
377 	if (err)
378 		return err;
379 
380 	spin_lock_irqsave(&rs->rs_lock, flags);
381 	rs->rs_cong_notify &= ~notify;
382 	spin_unlock_irqrestore(&rs->rs_lock, flags);
383 
384 	return 0;
385 }
386 
387 /*
388  * Receive any control messages.
389  */
rds_cmsg_recv(struct rds_incoming * inc,struct msghdr * msg)390 static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg)
391 {
392 	int ret = 0;
393 
394 	if (inc->i_rdma_cookie) {
395 		ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
396 				sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie);
397 		if (ret)
398 			return ret;
399 	}
400 
401 	return 0;
402 }
403 
rds_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int msg_flags)404 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
405 		int msg_flags)
406 {
407 	struct sock *sk = sock->sk;
408 	struct rds_sock *rs = rds_sk_to_rs(sk);
409 	long timeo;
410 	int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
411 	DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
412 	struct rds_incoming *inc = NULL;
413 
414 	/* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
415 	timeo = sock_rcvtimeo(sk, nonblock);
416 
417 	rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
418 
419 	if (msg_flags & MSG_OOB)
420 		goto out;
421 
422 	while (1) {
423 		struct iov_iter save;
424 		/* If there are pending notifications, do those - and nothing else */
425 		if (!list_empty(&rs->rs_notify_queue)) {
426 			ret = rds_notify_queue_get(rs, msg);
427 			break;
428 		}
429 
430 		if (rs->rs_cong_notify) {
431 			ret = rds_notify_cong(rs, msg);
432 			break;
433 		}
434 
435 		if (!rds_next_incoming(rs, &inc)) {
436 			if (nonblock) {
437 				ret = -EAGAIN;
438 				break;
439 			}
440 
441 			timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
442 					(!list_empty(&rs->rs_notify_queue) ||
443 					 rs->rs_cong_notify ||
444 					 rds_next_incoming(rs, &inc)), timeo);
445 			rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
446 				 timeo);
447 			if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
448 				continue;
449 
450 			ret = timeo;
451 			if (ret == 0)
452 				ret = -ETIMEDOUT;
453 			break;
454 		}
455 
456 		rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
457 			 &inc->i_conn->c_faddr,
458 			 ntohs(inc->i_hdr.h_sport));
459 		save = msg->msg_iter;
460 		ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter);
461 		if (ret < 0)
462 			break;
463 
464 		/*
465 		 * if the message we just copied isn't at the head of the
466 		 * recv queue then someone else raced us to return it, try
467 		 * to get the next message.
468 		 */
469 		if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
470 			rds_inc_put(inc);
471 			inc = NULL;
472 			rds_stats_inc(s_recv_deliver_raced);
473 			msg->msg_iter = save;
474 			continue;
475 		}
476 
477 		if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
478 			if (msg_flags & MSG_TRUNC)
479 				ret = be32_to_cpu(inc->i_hdr.h_len);
480 			msg->msg_flags |= MSG_TRUNC;
481 		}
482 
483 		if (rds_cmsg_recv(inc, msg)) {
484 			ret = -EFAULT;
485 			break;
486 		}
487 
488 		rds_stats_inc(s_recv_delivered);
489 
490 		if (sin) {
491 			sin->sin_family = AF_INET;
492 			sin->sin_port = inc->i_hdr.h_sport;
493 			sin->sin_addr.s_addr = inc->i_saddr;
494 			memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
495 			msg->msg_namelen = sizeof(*sin);
496 		}
497 		break;
498 	}
499 
500 	if (inc)
501 		rds_inc_put(inc);
502 
503 out:
504 	return ret;
505 }
506 
507 /*
508  * The socket is being shut down and we're asked to drop messages that were
509  * queued for recvmsg.  The caller has unbound the socket so the receive path
510  * won't queue any more incoming fragments or messages on the socket.
511  */
rds_clear_recv_queue(struct rds_sock * rs)512 void rds_clear_recv_queue(struct rds_sock *rs)
513 {
514 	struct sock *sk = rds_rs_to_sk(rs);
515 	struct rds_incoming *inc, *tmp;
516 	unsigned long flags;
517 
518 	write_lock_irqsave(&rs->rs_recv_lock, flags);
519 	list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
520 		rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
521 				      -be32_to_cpu(inc->i_hdr.h_len),
522 				      inc->i_hdr.h_dport);
523 		list_del_init(&inc->i_item);
524 		rds_inc_put(inc);
525 	}
526 	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
527 }
528 
529 /*
530  * inc->i_saddr isn't used here because it is only set in the receive
531  * path.
532  */
rds_inc_info_copy(struct rds_incoming * inc,struct rds_info_iterator * iter,__be32 saddr,__be32 daddr,int flip)533 void rds_inc_info_copy(struct rds_incoming *inc,
534 		       struct rds_info_iterator *iter,
535 		       __be32 saddr, __be32 daddr, int flip)
536 {
537 	struct rds_info_message minfo;
538 
539 	minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
540 	minfo.len = be32_to_cpu(inc->i_hdr.h_len);
541 
542 	if (flip) {
543 		minfo.laddr = daddr;
544 		minfo.faddr = saddr;
545 		minfo.lport = inc->i_hdr.h_dport;
546 		minfo.fport = inc->i_hdr.h_sport;
547 	} else {
548 		minfo.laddr = saddr;
549 		minfo.faddr = daddr;
550 		minfo.lport = inc->i_hdr.h_sport;
551 		minfo.fport = inc->i_hdr.h_dport;
552 	}
553 
554 	minfo.flags = 0;
555 
556 	rds_info_copy(iter, &minfo, sizeof(minfo));
557 }
558