• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _RDS_RDS_H
2 #define _RDS_RDS_H
3 
4 #include <net/sock.h>
5 #include <linux/scatterlist.h>
6 #include <linux/highmem.h>
7 #include <rdma/rdma_cm.h>
8 #include <linux/mutex.h>
9 #include <linux/rds.h>
10 #include <linux/rhashtable.h>
11 
12 #include "info.h"
13 
14 /*
15  * RDS Network protocol version
16  */
17 #define RDS_PROTOCOL_3_0	0x0300
18 #define RDS_PROTOCOL_3_1	0x0301
19 #define RDS_PROTOCOL_VERSION	RDS_PROTOCOL_3_1
20 #define RDS_PROTOCOL_MAJOR(v)	((v) >> 8)
21 #define RDS_PROTOCOL_MINOR(v)	((v) & 255)
22 #define RDS_PROTOCOL(maj, min)	(((maj) << 8) | min)
23 
24 /*
25  * XXX randomly chosen, but at least seems to be unused:
26  * #               18464-18768 Unassigned
27  * We should do better.  We want a reserved port to discourage unpriv'ed
28  * userspace from listening.
29  */
30 #define RDS_PORT	18634
31 
32 #ifdef ATOMIC64_INIT
33 #define KERNEL_HAS_ATOMIC64
34 #endif
35 
36 #ifdef RDS_DEBUG
37 #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
38 #else
39 /* sigh, pr_debug() causes unused variable warnings */
40 static inline __printf(1, 2)
rdsdebug(char * fmt,...)41 void rdsdebug(char *fmt, ...)
42 {
43 }
44 #endif
45 
46 /* XXX is there one of these somewhere? */
47 #define ceil(x, y) \
48 	({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
49 
50 #define RDS_FRAG_SHIFT	12
51 #define RDS_FRAG_SIZE	((unsigned int)(1 << RDS_FRAG_SHIFT))
52 
53 /* Used to limit both RDMA and non-RDMA RDS message to 1MB */
54 #define RDS_MAX_MSG_SIZE	((unsigned int)(1 << 20))
55 
56 #define RDS_CONG_MAP_BYTES	(65536 / 8)
57 #define RDS_CONG_MAP_PAGES	(PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
58 #define RDS_CONG_MAP_PAGE_BITS	(PAGE_SIZE * 8)
59 
60 struct rds_cong_map {
61 	struct rb_node		m_rb_node;
62 	__be32			m_addr;
63 	wait_queue_head_t	m_waitq;
64 	struct list_head	m_conn_list;
65 	unsigned long		m_page_addrs[RDS_CONG_MAP_PAGES];
66 };
67 
68 
69 /*
70  * This is how we will track the connection state:
71  * A connection is always in one of the following
72  * states. Updates to the state are atomic and imply
73  * a memory barrier.
74  */
75 enum {
76 	RDS_CONN_DOWN = 0,
77 	RDS_CONN_CONNECTING,
78 	RDS_CONN_DISCONNECTING,
79 	RDS_CONN_UP,
80 	RDS_CONN_RESETTING,
81 	RDS_CONN_ERROR,
82 };
83 
84 /* Bits for c_flags */
85 #define RDS_LL_SEND_FULL	0
86 #define RDS_RECONNECT_PENDING	1
87 #define RDS_IN_XMIT		2
88 #define RDS_RECV_REFILL		3
89 
90 /* Max number of multipaths per RDS connection. Must be a power of 2 */
91 #define	RDS_MPATH_WORKERS	8
92 #define	RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
93 			       (rs)->rs_hash_initval) & ((n) - 1))
94 
95 /* Per mpath connection state */
96 struct rds_conn_path {
97 	struct rds_connection	*cp_conn;
98 	struct rds_message	*cp_xmit_rm;
99 	unsigned long		cp_xmit_sg;
100 	unsigned int		cp_xmit_hdr_off;
101 	unsigned int		cp_xmit_data_off;
102 	unsigned int		cp_xmit_atomic_sent;
103 	unsigned int		cp_xmit_rdma_sent;
104 	unsigned int		cp_xmit_data_sent;
105 
106 	spinlock_t		cp_lock;		/* protect msg queues */
107 	u64			cp_next_tx_seq;
108 	struct list_head	cp_send_queue;
109 	struct list_head	cp_retrans;
110 
111 	u64			cp_next_rx_seq;
112 
113 	void			*cp_transport_data;
114 
115 	atomic_t		cp_state;
116 	unsigned long		cp_send_gen;
117 	unsigned long		cp_flags;
118 	unsigned long		cp_reconnect_jiffies;
119 	struct delayed_work	cp_send_w;
120 	struct delayed_work	cp_recv_w;
121 	struct delayed_work	cp_conn_w;
122 	struct work_struct	cp_down_w;
123 	struct mutex		cp_cm_lock;	/* protect cp_state & cm */
124 	wait_queue_head_t	cp_waitq;
125 
126 	unsigned int		cp_unacked_packets;
127 	unsigned int		cp_unacked_bytes;
128 	unsigned int		cp_outgoing:1,
129 				cp_pad_to_32:31;
130 	unsigned int		cp_index;
131 };
132 
133 /* One rds_connection per RDS address pair */
134 struct rds_connection {
135 	struct hlist_node	c_hash_node;
136 	__be32			c_laddr;
137 	__be32			c_faddr;
138 	unsigned int		c_loopback:1,
139 				c_ping_triggered:1,
140 				c_pad_to_32:30;
141 	int			c_npaths;
142 	struct rds_connection	*c_passive;
143 	struct rds_transport	*c_trans;
144 
145 	struct rds_cong_map	*c_lcong;
146 	struct rds_cong_map	*c_fcong;
147 
148 	/* Protocol version */
149 	unsigned int		c_version;
150 	possible_net_t		c_net;
151 
152 	struct list_head	c_map_item;
153 	unsigned long		c_map_queued;
154 
155 	struct rds_conn_path	c_path[RDS_MPATH_WORKERS];
156 	wait_queue_head_t	c_hs_waitq; /* handshake waitq */
157 };
158 
159 static inline
rds_conn_net(struct rds_connection * conn)160 struct net *rds_conn_net(struct rds_connection *conn)
161 {
162 	return read_pnet(&conn->c_net);
163 }
164 
165 static inline
rds_conn_net_set(struct rds_connection * conn,struct net * net)166 void rds_conn_net_set(struct rds_connection *conn, struct net *net)
167 {
168 	write_pnet(&conn->c_net, net);
169 }
170 
171 #define RDS_FLAG_CONG_BITMAP	0x01
172 #define RDS_FLAG_ACK_REQUIRED	0x02
173 #define RDS_FLAG_RETRANSMITTED	0x04
174 #define RDS_MAX_ADV_CREDIT	255
175 
176 /* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping
177  * probe to exchange control information before establishing a connection.
178  * Currently the control information that is exchanged is the number of
179  * supported paths. If the peer is a legacy (older kernel revision) peer,
180  * it would return a pong message without additional control information
181  * that would then alert the sender that the peer was an older rev.
182  */
183 #define RDS_FLAG_PROBE_PORT	1
184 #define	RDS_HS_PROBE(sport, dport) \
185 		((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \
186 		 (sport == 0 && dport == RDS_FLAG_PROBE_PORT))
187 /*
188  * Maximum space available for extension headers.
189  */
190 #define RDS_HEADER_EXT_SPACE	16
191 
192 struct rds_header {
193 	__be64	h_sequence;
194 	__be64	h_ack;
195 	__be32	h_len;
196 	__be16	h_sport;
197 	__be16	h_dport;
198 	u8	h_flags;
199 	u8	h_credit;
200 	u8	h_padding[4];
201 	__sum16	h_csum;
202 
203 	u8	h_exthdr[RDS_HEADER_EXT_SPACE];
204 };
205 
206 /*
207  * Reserved - indicates end of extensions
208  */
209 #define RDS_EXTHDR_NONE		0
210 
211 /*
212  * This extension header is included in the very
213  * first message that is sent on a new connection,
214  * and identifies the protocol level. This will help
215  * rolling updates if a future change requires breaking
216  * the protocol.
217  * NB: This is no longer true for IB, where we do a version
218  * negotiation during the connection setup phase (protocol
219  * version information is included in the RDMA CM private data).
220  */
221 #define RDS_EXTHDR_VERSION	1
222 struct rds_ext_header_version {
223 	__be32			h_version;
224 };
225 
226 /*
227  * This extension header is included in the RDS message
228  * chasing an RDMA operation.
229  */
230 #define RDS_EXTHDR_RDMA		2
231 struct rds_ext_header_rdma {
232 	__be32			h_rdma_rkey;
233 };
234 
235 /*
236  * This extension header tells the peer about the
237  * destination <R_Key,offset> of the requested RDMA
238  * operation.
239  */
240 #define RDS_EXTHDR_RDMA_DEST	3
241 struct rds_ext_header_rdma_dest {
242 	__be32			h_rdma_rkey;
243 	__be32			h_rdma_offset;
244 };
245 
246 /* Extension header announcing number of paths.
247  * Implicit length = 2 bytes.
248  */
249 #define RDS_EXTHDR_NPATHS	4
250 
251 #define __RDS_EXTHDR_MAX	16 /* for now */
252 
253 struct rds_incoming {
254 	atomic_t		i_refcount;
255 	struct list_head	i_item;
256 	struct rds_connection	*i_conn;
257 	struct rds_conn_path	*i_conn_path;
258 	struct rds_header	i_hdr;
259 	unsigned long		i_rx_jiffies;
260 	__be32			i_saddr;
261 
262 	rds_rdma_cookie_t	i_rdma_cookie;
263 	struct timeval		i_rx_tstamp;
264 };
265 
266 struct rds_mr {
267 	struct rb_node		r_rb_node;
268 	atomic_t		r_refcount;
269 	u32			r_key;
270 
271 	/* A copy of the creation flags */
272 	unsigned int		r_use_once:1;
273 	unsigned int		r_invalidate:1;
274 	unsigned int		r_write:1;
275 
276 	/* This is for RDS_MR_DEAD.
277 	 * It would be nice & consistent to make this part of the above
278 	 * bit field here, but we need to use test_and_set_bit.
279 	 */
280 	unsigned long		r_state;
281 	struct rds_sock		*r_sock; /* back pointer to the socket that owns us */
282 	struct rds_transport	*r_trans;
283 	void			*r_trans_private;
284 };
285 
286 /* Flags for mr->r_state */
287 #define RDS_MR_DEAD		0
288 
rds_rdma_make_cookie(u32 r_key,u32 offset)289 static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
290 {
291 	return r_key | (((u64) offset) << 32);
292 }
293 
rds_rdma_cookie_key(rds_rdma_cookie_t cookie)294 static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
295 {
296 	return cookie;
297 }
298 
rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)299 static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
300 {
301 	return cookie >> 32;
302 }
303 
304 /* atomic operation types */
305 #define RDS_ATOMIC_TYPE_CSWP		0
306 #define RDS_ATOMIC_TYPE_FADD		1
307 
308 /*
309  * m_sock_item and m_conn_item are on lists that are serialized under
310  * conn->c_lock.  m_sock_item has additional meaning in that once it is empty
311  * the message will not be put back on the retransmit list after being sent.
312  * messages that are canceled while being sent rely on this.
313  *
314  * m_inc is used by loopback so that it can pass an incoming message straight
315  * back up into the rx path.  It embeds a wire header which is also used by
316  * the send path, which is kind of awkward.
317  *
318  * m_sock_item indicates the message's presence on a socket's send or receive
319  * queue.  m_rs will point to that socket.
320  *
321  * m_daddr is used by cancellation to prune messages to a given destination.
322  *
323  * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
324  * nesting.  As paths iterate over messages on a sock, or conn, they must
325  * also lock the conn, or sock, to remove the message from those lists too.
326  * Testing the flag to determine if the message is still on the lists lets
327  * us avoid testing the list_head directly.  That means each path can use
328  * the message's list_head to keep it on a local list while juggling locks
329  * without confusing the other path.
330  *
331  * m_ack_seq is an optional field set by transports who need a different
332  * sequence number range to invalidate.  They can use this in a callback
333  * that they pass to rds_send_drop_acked() to see if each message has been
334  * acked.  The HAS_ACK_SEQ flag can be used to detect messages which haven't
335  * had ack_seq set yet.
336  */
337 #define RDS_MSG_ON_SOCK		1
338 #define RDS_MSG_ON_CONN		2
339 #define RDS_MSG_HAS_ACK_SEQ	3
340 #define RDS_MSG_ACK_REQUIRED	4
341 #define RDS_MSG_RETRANSMITTED	5
342 #define RDS_MSG_MAPPED		6
343 #define RDS_MSG_PAGEVEC		7
344 
345 struct rds_message {
346 	atomic_t		m_refcount;
347 	struct list_head	m_sock_item;
348 	struct list_head	m_conn_item;
349 	struct rds_incoming	m_inc;
350 	u64			m_ack_seq;
351 	__be32			m_daddr;
352 	unsigned long		m_flags;
353 
354 	/* Never access m_rs without holding m_rs_lock.
355 	 * Lock nesting is
356 	 *  rm->m_rs_lock
357 	 *   -> rs->rs_lock
358 	 */
359 	spinlock_t		m_rs_lock;
360 	wait_queue_head_t	m_flush_wait;
361 
362 	struct rds_sock		*m_rs;
363 
364 	/* cookie to send to remote, in rds header */
365 	rds_rdma_cookie_t	m_rdma_cookie;
366 
367 	unsigned int		m_used_sgs;
368 	unsigned int		m_total_sgs;
369 
370 	void			*m_final_op;
371 
372 	struct {
373 		struct rm_atomic_op {
374 			int			op_type;
375 			union {
376 				struct {
377 					uint64_t	compare;
378 					uint64_t	swap;
379 					uint64_t	compare_mask;
380 					uint64_t	swap_mask;
381 				} op_m_cswp;
382 				struct {
383 					uint64_t	add;
384 					uint64_t	nocarry_mask;
385 				} op_m_fadd;
386 			};
387 
388 			u32			op_rkey;
389 			u64			op_remote_addr;
390 			unsigned int		op_notify:1;
391 			unsigned int		op_recverr:1;
392 			unsigned int		op_mapped:1;
393 			unsigned int		op_silent:1;
394 			unsigned int		op_active:1;
395 			struct scatterlist	*op_sg;
396 			struct rds_notifier	*op_notifier;
397 
398 			struct rds_mr		*op_rdma_mr;
399 		} atomic;
400 		struct rm_rdma_op {
401 			u32			op_rkey;
402 			u64			op_remote_addr;
403 			unsigned int		op_write:1;
404 			unsigned int		op_fence:1;
405 			unsigned int		op_notify:1;
406 			unsigned int		op_recverr:1;
407 			unsigned int		op_mapped:1;
408 			unsigned int		op_silent:1;
409 			unsigned int		op_active:1;
410 			unsigned int		op_bytes;
411 			unsigned int		op_nents;
412 			unsigned int		op_count;
413 			struct scatterlist	*op_sg;
414 			struct rds_notifier	*op_notifier;
415 
416 			struct rds_mr		*op_rdma_mr;
417 		} rdma;
418 		struct rm_data_op {
419 			unsigned int		op_active:1;
420 			unsigned int		op_notify:1;
421 			unsigned int		op_nents;
422 			unsigned int		op_count;
423 			unsigned int		op_dmasg;
424 			unsigned int		op_dmaoff;
425 			struct scatterlist	*op_sg;
426 		} data;
427 	};
428 };
429 
430 /*
431  * The RDS notifier is used (optionally) to tell the application about
432  * completed RDMA operations. Rather than keeping the whole rds message
433  * around on the queue, we allocate a small notifier that is put on the
434  * socket's notifier_list. Notifications are delivered to the application
435  * through control messages.
436  */
437 struct rds_notifier {
438 	struct list_head	n_list;
439 	uint64_t		n_user_token;
440 	int			n_status;
441 };
442 
443 /**
444  * struct rds_transport -  transport specific behavioural hooks
445  *
446  * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
447  *        part of a message.  The caller serializes on the send_sem so this
448  *        doesn't need to be reentrant for a given conn.  The header must be
449  *        sent before the data payload.  .xmit must be prepared to send a
450  *        message with no data payload.  .xmit should return the number of
451  *        bytes that were sent down the connection, including header bytes.
452  *        Returning 0 tells the caller that it doesn't need to perform any
453  *        additional work now.  This is usually the case when the transport has
454  *        filled the sending queue for its connection and will handle
455  *        triggering the rds thread to continue the send when space becomes
456  *        available.  Returning -EAGAIN tells the caller to retry the send
457  *        immediately.  Returning -ENOMEM tells the caller to retry the send at
458  *        some point in the future.
459  *
460  * @conn_shutdown: conn_shutdown stops traffic on the given connection.  Once
461  *                 it returns the connection can not call rds_recv_incoming().
462  *                 This will only be called once after conn_connect returns
463  *                 non-zero success and will The caller serializes this with
464  *                 the send and connecting paths (xmit_* and conn_*).  The
465  *                 transport is responsible for other serialization, including
466  *                 rds_recv_incoming().  This is called in process context but
467  *                 should try hard not to block.
468  */
469 
470 struct rds_transport {
471 	char			t_name[TRANSNAMSIZ];
472 	struct list_head	t_item;
473 	struct module		*t_owner;
474 	unsigned int		t_prefer_loopback:1,
475 				t_mp_capable:1;
476 	unsigned int		t_type;
477 
478 	int (*laddr_check)(struct net *net, __be32 addr);
479 	int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
480 	void (*conn_free)(void *data);
481 	int (*conn_path_connect)(struct rds_conn_path *cp);
482 	void (*conn_path_shutdown)(struct rds_conn_path *conn);
483 	void (*xmit_path_prepare)(struct rds_conn_path *cp);
484 	void (*xmit_path_complete)(struct rds_conn_path *cp);
485 	int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
486 		    unsigned int hdr_off, unsigned int sg, unsigned int off);
487 	int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
488 	int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
489 	int (*recv_path)(struct rds_conn_path *cp);
490 	int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
491 	void (*inc_free)(struct rds_incoming *inc);
492 
493 	int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
494 				 struct rdma_cm_event *event);
495 	int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
496 	void (*cm_connect_complete)(struct rds_connection *conn,
497 				    struct rdma_cm_event *event);
498 
499 	unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
500 					unsigned int avail);
501 	void (*exit)(void);
502 	void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
503 			struct rds_sock *rs, u32 *key_ret);
504 	void (*sync_mr)(void *trans_private, int direction);
505 	void (*free_mr)(void *trans_private, int invalidate);
506 	void (*flush_mrs)(void);
507 };
508 
509 struct rds_sock {
510 	struct sock		rs_sk;
511 
512 	u64			rs_user_addr;
513 	u64			rs_user_bytes;
514 
515 	/*
516 	 * bound_addr used for both incoming and outgoing, no INADDR_ANY
517 	 * support.
518 	 */
519 	struct rhash_head	rs_bound_node;
520 	u64			rs_bound_key;
521 	__be32			rs_bound_addr;
522 	__be32			rs_conn_addr;
523 	__be16			rs_bound_port;
524 	__be16			rs_conn_port;
525 	struct rds_transport    *rs_transport;
526 
527 	/*
528 	 * rds_sendmsg caches the conn it used the last time around.
529 	 * This helps avoid costly lookups.
530 	 */
531 	struct rds_connection	*rs_conn;
532 
533 	/* flag indicating we were congested or not */
534 	int			rs_congested;
535 	/* seen congestion (ENOBUFS) when sending? */
536 	int			rs_seen_congestion;
537 
538 	/* rs_lock protects all these adjacent members before the newline */
539 	spinlock_t		rs_lock;
540 	struct list_head	rs_send_queue;
541 	u32			rs_snd_bytes;
542 	int			rs_rcv_bytes;
543 	struct list_head	rs_notify_queue;	/* currently used for failed RDMAs */
544 
545 	/* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
546 	 * to decide whether the application should be woken up.
547 	 * If not set, we use rs_cong_track to find out whether a cong map
548 	 * update arrived.
549 	 */
550 	uint64_t		rs_cong_mask;
551 	uint64_t		rs_cong_notify;
552 	struct list_head	rs_cong_list;
553 	unsigned long		rs_cong_track;
554 
555 	/*
556 	 * rs_recv_lock protects the receive queue, and is
557 	 * used to serialize with rds_release.
558 	 */
559 	rwlock_t		rs_recv_lock;
560 	struct list_head	rs_recv_queue;
561 
562 	/* just for stats reporting */
563 	struct list_head	rs_item;
564 
565 	/* these have their own lock */
566 	spinlock_t		rs_rdma_lock;
567 	struct rb_root		rs_rdma_keys;
568 
569 	/* Socket options - in case there will be more */
570 	unsigned char		rs_recverr,
571 				rs_cong_monitor;
572 	u32			rs_hash_initval;
573 };
574 
rds_sk_to_rs(const struct sock * sk)575 static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
576 {
577 	return container_of(sk, struct rds_sock, rs_sk);
578 }
rds_rs_to_sk(struct rds_sock * rs)579 static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
580 {
581 	return &rs->rs_sk;
582 }
583 
584 /*
585  * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
586  * to account for overhead.  We don't account for overhead, we just apply
587  * the number of payload bytes to the specified value.
588  */
rds_sk_sndbuf(struct rds_sock * rs)589 static inline int rds_sk_sndbuf(struct rds_sock *rs)
590 {
591 	return rds_rs_to_sk(rs)->sk_sndbuf / 2;
592 }
rds_sk_rcvbuf(struct rds_sock * rs)593 static inline int rds_sk_rcvbuf(struct rds_sock *rs)
594 {
595 	return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
596 }
597 
598 struct rds_statistics {
599 	uint64_t	s_conn_reset;
600 	uint64_t	s_recv_drop_bad_checksum;
601 	uint64_t	s_recv_drop_old_seq;
602 	uint64_t	s_recv_drop_no_sock;
603 	uint64_t	s_recv_drop_dead_sock;
604 	uint64_t	s_recv_deliver_raced;
605 	uint64_t	s_recv_delivered;
606 	uint64_t	s_recv_queued;
607 	uint64_t	s_recv_immediate_retry;
608 	uint64_t	s_recv_delayed_retry;
609 	uint64_t	s_recv_ack_required;
610 	uint64_t	s_recv_rdma_bytes;
611 	uint64_t	s_recv_ping;
612 	uint64_t	s_send_queue_empty;
613 	uint64_t	s_send_queue_full;
614 	uint64_t	s_send_lock_contention;
615 	uint64_t	s_send_lock_queue_raced;
616 	uint64_t	s_send_immediate_retry;
617 	uint64_t	s_send_delayed_retry;
618 	uint64_t	s_send_drop_acked;
619 	uint64_t	s_send_ack_required;
620 	uint64_t	s_send_queued;
621 	uint64_t	s_send_rdma;
622 	uint64_t	s_send_rdma_bytes;
623 	uint64_t	s_send_pong;
624 	uint64_t	s_page_remainder_hit;
625 	uint64_t	s_page_remainder_miss;
626 	uint64_t	s_copy_to_user;
627 	uint64_t	s_copy_from_user;
628 	uint64_t	s_cong_update_queued;
629 	uint64_t	s_cong_update_received;
630 	uint64_t	s_cong_send_error;
631 	uint64_t	s_cong_send_blocked;
632 };
633 
634 /* af_rds.c */
635 void rds_sock_addref(struct rds_sock *rs);
636 void rds_sock_put(struct rds_sock *rs);
637 void rds_wake_sk_sleep(struct rds_sock *rs);
__rds_wake_sk_sleep(struct sock * sk)638 static inline void __rds_wake_sk_sleep(struct sock *sk)
639 {
640 	wait_queue_head_t *waitq = sk_sleep(sk);
641 
642 	if (!sock_flag(sk, SOCK_DEAD) && waitq)
643 		wake_up(waitq);
644 }
645 extern wait_queue_head_t rds_poll_waitq;
646 
647 
648 /* bind.c */
649 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
650 void rds_remove_bound(struct rds_sock *rs);
651 struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
652 int rds_bind_lock_init(void);
653 void rds_bind_lock_destroy(void);
654 
655 /* cong.c */
656 int rds_cong_get_maps(struct rds_connection *conn);
657 void rds_cong_add_conn(struct rds_connection *conn);
658 void rds_cong_remove_conn(struct rds_connection *conn);
659 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
660 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
661 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
662 void rds_cong_queue_updates(struct rds_cong_map *map);
663 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
664 int rds_cong_updated_since(unsigned long *recent);
665 void rds_cong_add_socket(struct rds_sock *);
666 void rds_cong_remove_socket(struct rds_sock *);
667 void rds_cong_exit(void);
668 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
669 
670 /* conn.c */
671 int rds_conn_init(void);
672 void rds_conn_exit(void);
673 struct rds_connection *rds_conn_create(struct net *net,
674 				       __be32 laddr, __be32 faddr,
675 				       struct rds_transport *trans, gfp_t gfp);
676 struct rds_connection *rds_conn_create_outgoing(struct net *net,
677 						__be32 laddr, __be32 faddr,
678 			       struct rds_transport *trans, gfp_t gfp);
679 void rds_conn_shutdown(struct rds_conn_path *cpath);
680 void rds_conn_destroy(struct rds_connection *conn);
681 void rds_conn_drop(struct rds_connection *conn);
682 void rds_conn_path_drop(struct rds_conn_path *cpath);
683 void rds_conn_connect_if_down(struct rds_connection *conn);
684 void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
685 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
686 			  struct rds_info_iterator *iter,
687 			  struct rds_info_lengths *lens,
688 			  int (*visitor)(struct rds_connection *, void *),
689 			  size_t item_len);
690 __printf(2, 3)
691 void __rds_conn_error(struct rds_connection *conn, const char *, ...);
692 #define rds_conn_error(conn, fmt...) \
693 	__rds_conn_error(conn, KERN_WARNING "RDS: " fmt)
694 
695 __printf(2, 3)
696 void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
697 #define rds_conn_path_error(cp, fmt...) \
698 	__rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
699 
700 static inline int
rds_conn_path_transition(struct rds_conn_path * cp,int old,int new)701 rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
702 {
703 	return atomic_cmpxchg(&cp->cp_state, old, new) == old;
704 }
705 
706 static inline int
rds_conn_transition(struct rds_connection * conn,int old,int new)707 rds_conn_transition(struct rds_connection *conn, int old, int new)
708 {
709 	WARN_ON(conn->c_trans->t_mp_capable);
710 	return rds_conn_path_transition(&conn->c_path[0], old, new);
711 }
712 
713 static inline int
rds_conn_path_state(struct rds_conn_path * cp)714 rds_conn_path_state(struct rds_conn_path *cp)
715 {
716 	return atomic_read(&cp->cp_state);
717 }
718 
719 static inline int
rds_conn_state(struct rds_connection * conn)720 rds_conn_state(struct rds_connection *conn)
721 {
722 	WARN_ON(conn->c_trans->t_mp_capable);
723 	return rds_conn_path_state(&conn->c_path[0]);
724 }
725 
726 static inline int
rds_conn_path_up(struct rds_conn_path * cp)727 rds_conn_path_up(struct rds_conn_path *cp)
728 {
729 	return atomic_read(&cp->cp_state) == RDS_CONN_UP;
730 }
731 
732 static inline int
rds_conn_up(struct rds_connection * conn)733 rds_conn_up(struct rds_connection *conn)
734 {
735 	WARN_ON(conn->c_trans->t_mp_capable);
736 	return rds_conn_path_up(&conn->c_path[0]);
737 }
738 
739 static inline int
rds_conn_path_connecting(struct rds_conn_path * cp)740 rds_conn_path_connecting(struct rds_conn_path *cp)
741 {
742 	return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
743 }
744 
745 static inline int
rds_conn_connecting(struct rds_connection * conn)746 rds_conn_connecting(struct rds_connection *conn)
747 {
748 	WARN_ON(conn->c_trans->t_mp_capable);
749 	return rds_conn_path_connecting(&conn->c_path[0]);
750 }
751 
752 /* message.c */
753 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
754 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
755 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from);
756 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
757 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
758 				 __be16 dport, u64 seq);
759 int rds_message_add_extension(struct rds_header *hdr,
760 			      unsigned int type, const void *data, unsigned int len);
761 int rds_message_next_extension(struct rds_header *hdr,
762 			       unsigned int *pos, void *buf, unsigned int *buflen);
763 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
764 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
765 void rds_message_inc_free(struct rds_incoming *inc);
766 void rds_message_addref(struct rds_message *rm);
767 void rds_message_put(struct rds_message *rm);
768 void rds_message_wait(struct rds_message *rm);
769 void rds_message_unmapped(struct rds_message *rm);
770 
rds_message_make_checksum(struct rds_header * hdr)771 static inline void rds_message_make_checksum(struct rds_header *hdr)
772 {
773 	hdr->h_csum = 0;
774 	hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
775 }
776 
rds_message_verify_checksum(const struct rds_header * hdr)777 static inline int rds_message_verify_checksum(const struct rds_header *hdr)
778 {
779 	return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
780 }
781 
782 
783 /* page.c */
784 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
785 			     gfp_t gfp);
786 int rds_page_copy_user(struct page *page, unsigned long offset,
787 		       void __user *ptr, unsigned long bytes,
788 		       int to_user);
789 #define rds_page_copy_to_user(page, offset, ptr, bytes) \
790 	rds_page_copy_user(page, offset, ptr, bytes, 1)
791 #define rds_page_copy_from_user(page, offset, ptr, bytes) \
792 	rds_page_copy_user(page, offset, ptr, bytes, 0)
793 void rds_page_exit(void);
794 
795 /* recv.c */
796 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
797 		  __be32 saddr);
798 void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
799 		       __be32 saddr);
800 void rds_inc_put(struct rds_incoming *inc);
801 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
802 		       struct rds_incoming *inc, gfp_t gfp);
803 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
804 		int msg_flags);
805 void rds_clear_recv_queue(struct rds_sock *rs);
806 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
807 void rds_inc_info_copy(struct rds_incoming *inc,
808 		       struct rds_info_iterator *iter,
809 		       __be32 saddr, __be32 daddr, int flip);
810 
811 /* send.c */
812 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
813 void rds_send_path_reset(struct rds_conn_path *conn);
814 int rds_send_xmit(struct rds_conn_path *cp);
815 struct sockaddr_in;
816 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
817 typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
818 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
819 			 is_acked_func is_acked);
820 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
821 			      is_acked_func is_acked);
822 int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
823 
824 /* rdma.c */
825 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
826 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
827 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
828 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
829 void rds_rdma_drop_keys(struct rds_sock *rs);
830 int rds_rdma_extra_size(struct rds_rdma_args *args);
831 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
832 			  struct cmsghdr *cmsg);
833 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
834 			  struct cmsghdr *cmsg);
835 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
836 			  struct cmsghdr *cmsg);
837 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
838 			  struct cmsghdr *cmsg);
839 void rds_rdma_free_op(struct rm_rdma_op *ro);
840 void rds_atomic_free_op(struct rm_atomic_op *ao);
841 void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
842 void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
843 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
844 		    struct cmsghdr *cmsg);
845 
846 void __rds_put_mr_final(struct rds_mr *mr);
rds_mr_put(struct rds_mr * mr)847 static inline void rds_mr_put(struct rds_mr *mr)
848 {
849 	if (atomic_dec_and_test(&mr->r_refcount))
850 		__rds_put_mr_final(mr);
851 }
852 
853 /* stats.c */
854 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
855 #define rds_stats_inc_which(which, member) do {		\
856 	per_cpu(which, get_cpu()).member++;		\
857 	put_cpu();					\
858 } while (0)
859 #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
860 #define rds_stats_add_which(which, member, count) do {		\
861 	per_cpu(which, get_cpu()).member += count;	\
862 	put_cpu();					\
863 } while (0)
864 #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
865 int rds_stats_init(void);
866 void rds_stats_exit(void);
867 void rds_stats_info_copy(struct rds_info_iterator *iter,
868 			 uint64_t *values, const char *const *names,
869 			 size_t nr);
870 
871 /* sysctl.c */
872 int rds_sysctl_init(void);
873 void rds_sysctl_exit(void);
874 extern unsigned long rds_sysctl_sndbuf_min;
875 extern unsigned long rds_sysctl_sndbuf_default;
876 extern unsigned long rds_sysctl_sndbuf_max;
877 extern unsigned long rds_sysctl_reconnect_min_jiffies;
878 extern unsigned long rds_sysctl_reconnect_max_jiffies;
879 extern unsigned int  rds_sysctl_max_unacked_packets;
880 extern unsigned int  rds_sysctl_max_unacked_bytes;
881 extern unsigned int  rds_sysctl_ping_enable;
882 extern unsigned long rds_sysctl_trace_flags;
883 extern unsigned int  rds_sysctl_trace_level;
884 
885 /* threads.c */
886 int rds_threads_init(void);
887 void rds_threads_exit(void);
888 extern struct workqueue_struct *rds_wq;
889 void rds_queue_reconnect(struct rds_conn_path *cp);
890 void rds_connect_worker(struct work_struct *);
891 void rds_shutdown_worker(struct work_struct *);
892 void rds_send_worker(struct work_struct *);
893 void rds_recv_worker(struct work_struct *);
894 void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
895 void rds_connect_complete(struct rds_connection *conn);
896 
897 /* transport.c */
898 int rds_trans_register(struct rds_transport *trans);
899 void rds_trans_unregister(struct rds_transport *trans);
900 struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
901 void rds_trans_put(struct rds_transport *trans);
902 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
903 				       unsigned int avail);
904 struct rds_transport *rds_trans_get(int t_type);
905 int rds_trans_init(void);
906 void rds_trans_exit(void);
907 
908 #endif
909