• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  #ifndef _RDS_IW_H
2  #define _RDS_IW_H
3  
4  #include <linux/interrupt.h>
5  #include <rdma/ib_verbs.h>
6  #include <rdma/rdma_cm.h>
7  #include "rds.h"
8  #include "rdma_transport.h"
9  
10  #define RDS_FASTREG_SIZE		20
11  #define RDS_FASTREG_POOL_SIZE		2048
12  
13  #define RDS_IW_MAX_SGE			8
14  #define RDS_IW_RECV_SGE 		2
15  
16  #define RDS_IW_DEFAULT_RECV_WR		1024
17  #define RDS_IW_DEFAULT_SEND_WR		256
18  
19  #define RDS_IW_SUPPORTED_PROTOCOLS	0x00000003	/* minor versions supported */
20  
21  extern struct list_head rds_iw_devices;
22  
23  /*
24   * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
25   * try and minimize the amount of memory tied up both the device and
26   * socket receive queues.
27   */
28  /* page offset of the final full frag that fits in the page */
29  #define RDS_PAGE_LAST_OFF (((PAGE_SIZE  / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE)
30  struct rds_page_frag {
31  	struct list_head	f_item;
32  	struct page		*f_page;
33  	unsigned long		f_offset;
34  	dma_addr_t 		f_mapped;
35  };
36  
37  struct rds_iw_incoming {
38  	struct list_head	ii_frags;
39  	struct rds_incoming	ii_inc;
40  };
41  
42  struct rds_iw_connect_private {
43  	/* Add new fields at the end, and don't permute existing fields. */
44  	__be32			dp_saddr;
45  	__be32			dp_daddr;
46  	u8			dp_protocol_major;
47  	u8			dp_protocol_minor;
48  	__be16			dp_protocol_minor_mask; /* bitmask */
49  	__be32			dp_reserved1;
50  	__be64			dp_ack_seq;
51  	__be32			dp_credit;		/* non-zero enables flow ctl */
52  };
53  
54  struct rds_iw_scatterlist {
55  	struct scatterlist	*list;
56  	unsigned int		len;
57  	int			dma_len;
58  	unsigned int		dma_npages;
59  	unsigned int		bytes;
60  };
61  
62  struct rds_iw_mapping {
63  	spinlock_t		m_lock;	/* protect the mapping struct */
64  	struct list_head	m_list;
65  	struct rds_iw_mr	*m_mr;
66  	uint32_t		m_rkey;
67  	struct rds_iw_scatterlist m_sg;
68  };
69  
70  struct rds_iw_send_work {
71  	struct rds_message	*s_rm;
72  
73  	/* We should really put these into a union: */
74  	struct rm_rdma_op	*s_op;
75  	struct rds_iw_mapping	*s_mapping;
76  	struct ib_mr		*s_mr;
77  	struct ib_fast_reg_page_list *s_page_list;
78  	unsigned char		s_remap_count;
79  
80  	struct ib_send_wr	s_wr;
81  	struct ib_sge		s_sge[RDS_IW_MAX_SGE];
82  	unsigned long		s_queued;
83  };
84  
85  struct rds_iw_recv_work {
86  	struct rds_iw_incoming 	*r_iwinc;
87  	struct rds_page_frag	*r_frag;
88  	struct ib_recv_wr	r_wr;
89  	struct ib_sge		r_sge[2];
90  };
91  
92  struct rds_iw_work_ring {
93  	u32		w_nr;
94  	u32		w_alloc_ptr;
95  	u32		w_alloc_ctr;
96  	u32		w_free_ptr;
97  	atomic_t	w_free_ctr;
98  };
99  
100  struct rds_iw_device;
101  
102  struct rds_iw_connection {
103  
104  	struct list_head	iw_node;
105  	struct rds_iw_device 	*rds_iwdev;
106  	struct rds_connection	*conn;
107  
108  	/* alphabet soup, IBTA style */
109  	struct rdma_cm_id	*i_cm_id;
110  	struct ib_pd		*i_pd;
111  	struct ib_mr		*i_mr;
112  	struct ib_cq		*i_send_cq;
113  	struct ib_cq		*i_recv_cq;
114  
115  	/* tx */
116  	struct rds_iw_work_ring	i_send_ring;
117  	struct rds_message	*i_rm;
118  	struct rds_header	*i_send_hdrs;
119  	u64			i_send_hdrs_dma;
120  	struct rds_iw_send_work *i_sends;
121  
122  	/* rx */
123  	struct tasklet_struct	i_recv_tasklet;
124  	struct mutex		i_recv_mutex;
125  	struct rds_iw_work_ring	i_recv_ring;
126  	struct rds_iw_incoming	*i_iwinc;
127  	u32			i_recv_data_rem;
128  	struct rds_header	*i_recv_hdrs;
129  	u64			i_recv_hdrs_dma;
130  	struct rds_iw_recv_work *i_recvs;
131  	struct rds_page_frag	i_frag;
132  	u64			i_ack_recv;	/* last ACK received */
133  
134  	/* sending acks */
135  	unsigned long		i_ack_flags;
136  #ifdef KERNEL_HAS_ATOMIC64
137  	atomic64_t		i_ack_next;	/* next ACK to send */
138  #else
139  	spinlock_t		i_ack_lock;	/* protect i_ack_next */
140  	u64			i_ack_next;	/* next ACK to send */
141  #endif
142  	struct rds_header	*i_ack;
143  	struct ib_send_wr	i_ack_wr;
144  	struct ib_sge		i_ack_sge;
145  	u64			i_ack_dma;
146  	unsigned long		i_ack_queued;
147  
148  	/* Flow control related information
149  	 *
150  	 * Our algorithm uses a pair variables that we need to access
151  	 * atomically - one for the send credits, and one posted
152  	 * recv credits we need to transfer to remote.
153  	 * Rather than protect them using a slow spinlock, we put both into
154  	 * a single atomic_t and update it using cmpxchg
155  	 */
156  	atomic_t		i_credits;
157  
158  	/* Protocol version specific information */
159  	unsigned int		i_flowctl:1;	/* enable/disable flow ctl */
160  	unsigned int		i_dma_local_lkey:1;
161  	unsigned int		i_fastreg_posted:1; /* fastreg posted on this connection */
162  	/* Batched completions */
163  	unsigned int		i_unsignaled_wrs;
164  	long			i_unsignaled_bytes;
165  };
166  
167  /* This assumes that atomic_t is at least 32 bits */
168  #define IB_GET_SEND_CREDITS(v)	((v) & 0xffff)
169  #define IB_GET_POST_CREDITS(v)	((v) >> 16)
170  #define IB_SET_SEND_CREDITS(v)	((v) & 0xffff)
171  #define IB_SET_POST_CREDITS(v)	((v) << 16)
172  
173  struct rds_iw_cm_id {
174  	struct list_head	list;
175  	struct rdma_cm_id	*cm_id;
176  };
177  
178  struct rds_iw_device {
179  	struct list_head	list;
180  	struct list_head	cm_id_list;
181  	struct list_head	conn_list;
182  	struct ib_device	*dev;
183  	struct ib_pd		*pd;
184  	struct ib_mr		*mr;
185  	struct rds_iw_mr_pool	*mr_pool;
186  	int			max_sge;
187  	unsigned int		max_wrs;
188  	unsigned int		dma_local_lkey:1;
189  	spinlock_t		spinlock;	/* protect the above */
190  };
191  
192  /* bits for i_ack_flags */
193  #define IB_ACK_IN_FLIGHT	0
194  #define IB_ACK_REQUESTED	1
195  
196  /* Magic WR_ID for ACKs */
197  #define RDS_IW_ACK_WR_ID	((u64)0xffffffffffffffffULL)
198  #define RDS_IW_FAST_REG_WR_ID	((u64)0xefefefefefefefefULL)
199  #define RDS_IW_LOCAL_INV_WR_ID	((u64)0xdfdfdfdfdfdfdfdfULL)
200  
201  struct rds_iw_statistics {
202  	uint64_t	s_iw_connect_raced;
203  	uint64_t	s_iw_listen_closed_stale;
204  	uint64_t	s_iw_tx_cq_call;
205  	uint64_t	s_iw_tx_cq_event;
206  	uint64_t	s_iw_tx_ring_full;
207  	uint64_t	s_iw_tx_throttle;
208  	uint64_t	s_iw_tx_sg_mapping_failure;
209  	uint64_t	s_iw_tx_stalled;
210  	uint64_t	s_iw_tx_credit_updates;
211  	uint64_t	s_iw_rx_cq_call;
212  	uint64_t	s_iw_rx_cq_event;
213  	uint64_t	s_iw_rx_ring_empty;
214  	uint64_t	s_iw_rx_refill_from_cq;
215  	uint64_t	s_iw_rx_refill_from_thread;
216  	uint64_t	s_iw_rx_alloc_limit;
217  	uint64_t	s_iw_rx_credit_updates;
218  	uint64_t	s_iw_ack_sent;
219  	uint64_t	s_iw_ack_send_failure;
220  	uint64_t	s_iw_ack_send_delayed;
221  	uint64_t	s_iw_ack_send_piggybacked;
222  	uint64_t	s_iw_ack_received;
223  	uint64_t	s_iw_rdma_mr_alloc;
224  	uint64_t	s_iw_rdma_mr_free;
225  	uint64_t	s_iw_rdma_mr_used;
226  	uint64_t	s_iw_rdma_mr_pool_flush;
227  	uint64_t	s_iw_rdma_mr_pool_wait;
228  	uint64_t	s_iw_rdma_mr_pool_depleted;
229  };
230  
231  extern struct workqueue_struct *rds_iw_wq;
232  
233  /*
234   * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
235   * doesn't define it.
236   */
rds_iw_dma_sync_sg_for_cpu(struct ib_device * dev,struct scatterlist * sg,unsigned int sg_dma_len,int direction)237  static inline void rds_iw_dma_sync_sg_for_cpu(struct ib_device *dev,
238  		struct scatterlist *sg, unsigned int sg_dma_len, int direction)
239  {
240  	unsigned int i;
241  
242  	for (i = 0; i < sg_dma_len; ++i) {
243  		ib_dma_sync_single_for_cpu(dev,
244  				ib_sg_dma_address(dev, &sg[i]),
245  				ib_sg_dma_len(dev, &sg[i]),
246  				direction);
247  	}
248  }
249  #define ib_dma_sync_sg_for_cpu	rds_iw_dma_sync_sg_for_cpu
250  
rds_iw_dma_sync_sg_for_device(struct ib_device * dev,struct scatterlist * sg,unsigned int sg_dma_len,int direction)251  static inline void rds_iw_dma_sync_sg_for_device(struct ib_device *dev,
252  		struct scatterlist *sg, unsigned int sg_dma_len, int direction)
253  {
254  	unsigned int i;
255  
256  	for (i = 0; i < sg_dma_len; ++i) {
257  		ib_dma_sync_single_for_device(dev,
258  				ib_sg_dma_address(dev, &sg[i]),
259  				ib_sg_dma_len(dev, &sg[i]),
260  				direction);
261  	}
262  }
263  #define ib_dma_sync_sg_for_device	rds_iw_dma_sync_sg_for_device
264  
rds_iw_local_dma_lkey(struct rds_iw_connection * ic)265  static inline u32 rds_iw_local_dma_lkey(struct rds_iw_connection *ic)
266  {
267  	return ic->i_dma_local_lkey ? ic->i_cm_id->device->local_dma_lkey : ic->i_mr->lkey;
268  }
269  
270  /* ib.c */
271  extern struct rds_transport rds_iw_transport;
272  extern struct ib_client rds_iw_client;
273  
274  extern unsigned int fastreg_pool_size;
275  extern unsigned int fastreg_message_size;
276  
277  extern spinlock_t iw_nodev_conns_lock;
278  extern struct list_head iw_nodev_conns;
279  
280  /* ib_cm.c */
281  int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp);
282  void rds_iw_conn_free(void *arg);
283  int rds_iw_conn_connect(struct rds_connection *conn);
284  void rds_iw_conn_shutdown(struct rds_connection *conn);
285  void rds_iw_state_change(struct sock *sk);
286  int rds_iw_listen_init(void);
287  void rds_iw_listen_stop(void);
288  void __rds_iw_conn_error(struct rds_connection *conn, const char *, ...);
289  int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
290  			     struct rdma_cm_event *event);
291  int rds_iw_cm_initiate_connect(struct rdma_cm_id *cm_id);
292  void rds_iw_cm_connect_complete(struct rds_connection *conn,
293  				struct rdma_cm_event *event);
294  
295  
296  #define rds_iw_conn_error(conn, fmt...) \
297  	__rds_iw_conn_error(conn, KERN_WARNING "RDS/IW: " fmt)
298  
299  /* ib_rdma.c */
300  int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id);
301  void rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn);
302  void rds_iw_remove_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn);
303  void __rds_iw_destroy_conns(struct list_head *list, spinlock_t *list_lock);
rds_iw_destroy_nodev_conns(void)304  static inline void rds_iw_destroy_nodev_conns(void)
305  {
306  	__rds_iw_destroy_conns(&iw_nodev_conns, &iw_nodev_conns_lock);
307  }
rds_iw_destroy_conns(struct rds_iw_device * rds_iwdev)308  static inline void rds_iw_destroy_conns(struct rds_iw_device *rds_iwdev)
309  {
310  	__rds_iw_destroy_conns(&rds_iwdev->conn_list, &rds_iwdev->spinlock);
311  }
312  struct rds_iw_mr_pool *rds_iw_create_mr_pool(struct rds_iw_device *);
313  void rds_iw_get_mr_info(struct rds_iw_device *rds_iwdev, struct rds_info_rdma_connection *iinfo);
314  void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *);
315  void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
316  		    struct rds_sock *rs, u32 *key_ret);
317  void rds_iw_sync_mr(void *trans_private, int dir);
318  void rds_iw_free_mr(void *trans_private, int invalidate);
319  void rds_iw_flush_mrs(void);
320  
321  /* ib_recv.c */
322  int rds_iw_recv_init(void);
323  void rds_iw_recv_exit(void);
324  int rds_iw_recv(struct rds_connection *conn);
325  int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
326  		       gfp_t page_gfp, int prefill);
327  void rds_iw_inc_free(struct rds_incoming *inc);
328  int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
329  			     size_t size);
330  void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context);
331  void rds_iw_recv_tasklet_fn(unsigned long data);
332  void rds_iw_recv_init_ring(struct rds_iw_connection *ic);
333  void rds_iw_recv_clear_ring(struct rds_iw_connection *ic);
334  void rds_iw_recv_init_ack(struct rds_iw_connection *ic);
335  void rds_iw_attempt_ack(struct rds_iw_connection *ic);
336  void rds_iw_ack_send_complete(struct rds_iw_connection *ic);
337  u64 rds_iw_piggyb_ack(struct rds_iw_connection *ic);
338  
339  /* ib_ring.c */
340  void rds_iw_ring_init(struct rds_iw_work_ring *ring, u32 nr);
341  void rds_iw_ring_resize(struct rds_iw_work_ring *ring, u32 nr);
342  u32 rds_iw_ring_alloc(struct rds_iw_work_ring *ring, u32 val, u32 *pos);
343  void rds_iw_ring_free(struct rds_iw_work_ring *ring, u32 val);
344  void rds_iw_ring_unalloc(struct rds_iw_work_ring *ring, u32 val);
345  int rds_iw_ring_empty(struct rds_iw_work_ring *ring);
346  int rds_iw_ring_low(struct rds_iw_work_ring *ring);
347  u32 rds_iw_ring_oldest(struct rds_iw_work_ring *ring);
348  u32 rds_iw_ring_completed(struct rds_iw_work_ring *ring, u32 wr_id, u32 oldest);
349  extern wait_queue_head_t rds_iw_ring_empty_wait;
350  
351  /* ib_send.c */
352  void rds_iw_xmit_complete(struct rds_connection *conn);
353  int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
354  		unsigned int hdr_off, unsigned int sg, unsigned int off);
355  void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context);
356  void rds_iw_send_init_ring(struct rds_iw_connection *ic);
357  void rds_iw_send_clear_ring(struct rds_iw_connection *ic);
358  int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
359  void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits);
360  void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted);
361  int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted,
362  			     u32 *adv_credits, int need_posted, int max_posted);
363  
364  /* ib_stats.c */
365  DECLARE_PER_CPU(struct rds_iw_statistics, rds_iw_stats);
366  #define rds_iw_stats_inc(member) rds_stats_inc_which(rds_iw_stats, member)
367  unsigned int rds_iw_stats_info_copy(struct rds_info_iterator *iter,
368  				    unsigned int avail);
369  
370  /* ib_sysctl.c */
371  int rds_iw_sysctl_init(void);
372  void rds_iw_sysctl_exit(void);
373  extern unsigned long rds_iw_sysctl_max_send_wr;
374  extern unsigned long rds_iw_sysctl_max_recv_wr;
375  extern unsigned long rds_iw_sysctl_max_unsig_wrs;
376  extern unsigned long rds_iw_sysctl_max_unsig_bytes;
377  extern unsigned long rds_iw_sysctl_max_recv_allocation;
378  extern unsigned int rds_iw_sysctl_flow_control;
379  
380  /*
381   * Helper functions for getting/setting the header and data SGEs in
382   * RDS packets (not RDMA)
383   */
384  static inline struct ib_sge *
rds_iw_header_sge(struct rds_iw_connection * ic,struct ib_sge * sge)385  rds_iw_header_sge(struct rds_iw_connection *ic, struct ib_sge *sge)
386  {
387  	return &sge[0];
388  }
389  
390  static inline struct ib_sge *
rds_iw_data_sge(struct rds_iw_connection * ic,struct ib_sge * sge)391  rds_iw_data_sge(struct rds_iw_connection *ic, struct ib_sge *sge)
392  {
393  	return &sge[1];
394  }
395  
396  #endif
397