• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/in.h>
35 #include <linux/device.h>
36 #include <linux/dmapool.h>
37 #include <linux/ratelimit.h>
38 
39 #include "rds.h"
40 #include "ib.h"
41 
42 static char *rds_ib_wc_status_strings[] = {
43 #define RDS_IB_WC_STATUS_STR(foo) \
44 		[IB_WC_##foo] = __stringify(IB_WC_##foo)
45 	RDS_IB_WC_STATUS_STR(SUCCESS),
46 	RDS_IB_WC_STATUS_STR(LOC_LEN_ERR),
47 	RDS_IB_WC_STATUS_STR(LOC_QP_OP_ERR),
48 	RDS_IB_WC_STATUS_STR(LOC_EEC_OP_ERR),
49 	RDS_IB_WC_STATUS_STR(LOC_PROT_ERR),
50 	RDS_IB_WC_STATUS_STR(WR_FLUSH_ERR),
51 	RDS_IB_WC_STATUS_STR(MW_BIND_ERR),
52 	RDS_IB_WC_STATUS_STR(BAD_RESP_ERR),
53 	RDS_IB_WC_STATUS_STR(LOC_ACCESS_ERR),
54 	RDS_IB_WC_STATUS_STR(REM_INV_REQ_ERR),
55 	RDS_IB_WC_STATUS_STR(REM_ACCESS_ERR),
56 	RDS_IB_WC_STATUS_STR(REM_OP_ERR),
57 	RDS_IB_WC_STATUS_STR(RETRY_EXC_ERR),
58 	RDS_IB_WC_STATUS_STR(RNR_RETRY_EXC_ERR),
59 	RDS_IB_WC_STATUS_STR(LOC_RDD_VIOL_ERR),
60 	RDS_IB_WC_STATUS_STR(REM_INV_RD_REQ_ERR),
61 	RDS_IB_WC_STATUS_STR(REM_ABORT_ERR),
62 	RDS_IB_WC_STATUS_STR(INV_EECN_ERR),
63 	RDS_IB_WC_STATUS_STR(INV_EEC_STATE_ERR),
64 	RDS_IB_WC_STATUS_STR(FATAL_ERR),
65 	RDS_IB_WC_STATUS_STR(RESP_TIMEOUT_ERR),
66 	RDS_IB_WC_STATUS_STR(GENERAL_ERR),
67 #undef RDS_IB_WC_STATUS_STR
68 };
69 
rds_ib_wc_status_str(enum ib_wc_status status)70 char *rds_ib_wc_status_str(enum ib_wc_status status)
71 {
72 	return rds_str_array(rds_ib_wc_status_strings,
73 			     ARRAY_SIZE(rds_ib_wc_status_strings), status);
74 }
75 
76 /*
77  * Convert IB-specific error message to RDS error message and call core
78  * completion handler.
79  */
rds_ib_send_complete(struct rds_message * rm,int wc_status,void (* complete)(struct rds_message * rm,int status))80 static void rds_ib_send_complete(struct rds_message *rm,
81 				 int wc_status,
82 				 void (*complete)(struct rds_message *rm, int status))
83 {
84 	int notify_status;
85 
86 	switch (wc_status) {
87 	case IB_WC_WR_FLUSH_ERR:
88 		return;
89 
90 	case IB_WC_SUCCESS:
91 		notify_status = RDS_RDMA_SUCCESS;
92 		break;
93 
94 	case IB_WC_REM_ACCESS_ERR:
95 		notify_status = RDS_RDMA_REMOTE_ERROR;
96 		break;
97 
98 	default:
99 		notify_status = RDS_RDMA_OTHER_ERROR;
100 		break;
101 	}
102 	complete(rm, notify_status);
103 }
104 
rds_ib_send_unmap_rdma(struct rds_ib_connection * ic,struct rm_rdma_op * op,int wc_status)105 static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
106 				   struct rm_rdma_op *op,
107 				   int wc_status)
108 {
109 	if (op->op_mapped) {
110 		ib_dma_unmap_sg(ic->i_cm_id->device,
111 				op->op_sg, op->op_nents,
112 				op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
113 		op->op_mapped = 0;
114 	}
115 
116 	/* If the user asked for a completion notification on this
117 	 * message, we can implement three different semantics:
118 	 *  1.	Notify when we received the ACK on the RDS message
119 	 *	that was queued with the RDMA. This provides reliable
120 	 *	notification of RDMA status at the expense of a one-way
121 	 *	packet delay.
122 	 *  2.	Notify when the IB stack gives us the completion event for
123 	 *	the RDMA operation.
124 	 *  3.	Notify when the IB stack gives us the completion event for
125 	 *	the accompanying RDS messages.
126 	 * Here, we implement approach #3. To implement approach #2,
127 	 * we would need to take an event for the rdma WR. To implement #1,
128 	 * don't call rds_rdma_send_complete at all, and fall back to the notify
129 	 * handling in the ACK processing code.
130 	 *
131 	 * Note: There's no need to explicitly sync any RDMA buffers using
132 	 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
133 	 * operation itself unmapped the RDMA buffers, which takes care
134 	 * of synching.
135 	 */
136 	rds_ib_send_complete(container_of(op, struct rds_message, rdma),
137 			     wc_status, rds_rdma_send_complete);
138 
139 	if (op->op_write)
140 		rds_stats_add(s_send_rdma_bytes, op->op_bytes);
141 	else
142 		rds_stats_add(s_recv_rdma_bytes, op->op_bytes);
143 }
144 
rds_ib_send_unmap_atomic(struct rds_ib_connection * ic,struct rm_atomic_op * op,int wc_status)145 static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
146 				     struct rm_atomic_op *op,
147 				     int wc_status)
148 {
149 	/* unmap atomic recvbuf */
150 	if (op->op_mapped) {
151 		ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
152 				DMA_FROM_DEVICE);
153 		op->op_mapped = 0;
154 	}
155 
156 	rds_ib_send_complete(container_of(op, struct rds_message, atomic),
157 			     wc_status, rds_atomic_send_complete);
158 
159 	if (op->op_type == RDS_ATOMIC_TYPE_CSWP)
160 		rds_ib_stats_inc(s_ib_atomic_cswp);
161 	else
162 		rds_ib_stats_inc(s_ib_atomic_fadd);
163 }
164 
rds_ib_send_unmap_data(struct rds_ib_connection * ic,struct rm_data_op * op,int wc_status)165 static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
166 				   struct rm_data_op *op,
167 				   int wc_status)
168 {
169 	struct rds_message *rm = container_of(op, struct rds_message, data);
170 
171 	if (op->op_nents)
172 		ib_dma_unmap_sg(ic->i_cm_id->device,
173 				op->op_sg, op->op_nents,
174 				DMA_TO_DEVICE);
175 
176 	if (rm->rdma.op_active && rm->data.op_notify)
177 		rds_ib_send_unmap_rdma(ic, &rm->rdma, wc_status);
178 }
179 
180 /*
181  * Unmap the resources associated with a struct send_work.
182  *
183  * Returns the rm for no good reason other than it is unobtainable
184  * other than by switching on wr.opcode, currently, and the caller,
185  * the event handler, needs it.
186  */
rds_ib_send_unmap_op(struct rds_ib_connection * ic,struct rds_ib_send_work * send,int wc_status)187 static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
188 						struct rds_ib_send_work *send,
189 						int wc_status)
190 {
191 	struct rds_message *rm = NULL;
192 
193 	/* In the error case, wc.opcode sometimes contains garbage */
194 	switch (send->s_wr.opcode) {
195 	case IB_WR_SEND:
196 		if (send->s_op) {
197 			rm = container_of(send->s_op, struct rds_message, data);
198 			rds_ib_send_unmap_data(ic, send->s_op, wc_status);
199 		}
200 		break;
201 	case IB_WR_RDMA_WRITE:
202 	case IB_WR_RDMA_READ:
203 		if (send->s_op) {
204 			rm = container_of(send->s_op, struct rds_message, rdma);
205 			rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
206 		}
207 		break;
208 	case IB_WR_ATOMIC_FETCH_AND_ADD:
209 	case IB_WR_ATOMIC_CMP_AND_SWP:
210 		if (send->s_op) {
211 			rm = container_of(send->s_op, struct rds_message, atomic);
212 			rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
213 		}
214 		break;
215 	default:
216 		printk_ratelimited(KERN_NOTICE
217 			       "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
218 			       __func__, send->s_wr.opcode);
219 		break;
220 	}
221 
222 	send->s_wr.opcode = 0xdead;
223 
224 	return rm;
225 }
226 
rds_ib_send_init_ring(struct rds_ib_connection * ic)227 void rds_ib_send_init_ring(struct rds_ib_connection *ic)
228 {
229 	struct rds_ib_send_work *send;
230 	u32 i;
231 
232 	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
233 		struct ib_sge *sge;
234 
235 		send->s_op = NULL;
236 
237 		send->s_wr.wr_id = i;
238 		send->s_wr.sg_list = send->s_sge;
239 		send->s_wr.ex.imm_data = 0;
240 
241 		sge = &send->s_sge[0];
242 		sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
243 		sge->length = sizeof(struct rds_header);
244 		sge->lkey = ic->i_mr->lkey;
245 
246 		send->s_sge[1].lkey = ic->i_mr->lkey;
247 	}
248 }
249 
rds_ib_send_clear_ring(struct rds_ib_connection * ic)250 void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
251 {
252 	struct rds_ib_send_work *send;
253 	u32 i;
254 
255 	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
256 		if (send->s_op && send->s_wr.opcode != 0xdead)
257 			rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
258 	}
259 }
260 
261 /*
262  * The only fast path caller always has a non-zero nr, so we don't
263  * bother testing nr before performing the atomic sub.
264  */
rds_ib_sub_signaled(struct rds_ib_connection * ic,int nr)265 static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
266 {
267 	if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
268 	    waitqueue_active(&rds_ib_ring_empty_wait))
269 		wake_up(&rds_ib_ring_empty_wait);
270 	BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
271 }
272 
273 /*
274  * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
275  * operations performed in the send path.  As the sender allocs and potentially
276  * unallocs the next free entry in the ring it doesn't alter which is
277  * the next to be freed, which is what this is concerned with.
278  */
rds_ib_send_cq_comp_handler(struct ib_cq * cq,void * context)279 void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
280 {
281 	struct rds_connection *conn = context;
282 	struct rds_ib_connection *ic = conn->c_transport_data;
283 	struct rds_message *rm = NULL;
284 	struct ib_wc wc;
285 	struct rds_ib_send_work *send;
286 	u32 completed;
287 	u32 oldest;
288 	u32 i = 0;
289 	int ret;
290 	int nr_sig = 0;
291 
292 	rdsdebug("cq %p conn %p\n", cq, conn);
293 	rds_ib_stats_inc(s_ib_tx_cq_call);
294 	ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
295 	if (ret)
296 		rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
297 
298 	while (ib_poll_cq(cq, 1, &wc) > 0) {
299 		rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
300 			 (unsigned long long)wc.wr_id, wc.status,
301 			 rds_ib_wc_status_str(wc.status), wc.byte_len,
302 			 be32_to_cpu(wc.ex.imm_data));
303 		rds_ib_stats_inc(s_ib_tx_cq_event);
304 
305 		if (wc.wr_id == RDS_IB_ACK_WR_ID) {
306 			if (time_after(jiffies, ic->i_ack_queued + HZ/2))
307 				rds_ib_stats_inc(s_ib_tx_stalled);
308 			rds_ib_ack_send_complete(ic);
309 			continue;
310 		}
311 
312 		oldest = rds_ib_ring_oldest(&ic->i_send_ring);
313 
314 		completed = rds_ib_ring_completed(&ic->i_send_ring, wc.wr_id, oldest);
315 
316 		for (i = 0; i < completed; i++) {
317 			send = &ic->i_sends[oldest];
318 			if (send->s_wr.send_flags & IB_SEND_SIGNALED)
319 				nr_sig++;
320 
321 			rm = rds_ib_send_unmap_op(ic, send, wc.status);
322 
323 			if (time_after(jiffies, send->s_queued + HZ/2))
324 				rds_ib_stats_inc(s_ib_tx_stalled);
325 
326 			if (send->s_op) {
327 				if (send->s_op == rm->m_final_op) {
328 					/* If anyone waited for this message to get flushed out, wake
329 					 * them up now */
330 					rds_message_unmapped(rm);
331 				}
332 				rds_message_put(rm);
333 				send->s_op = NULL;
334 			}
335 
336 			oldest = (oldest + 1) % ic->i_send_ring.w_nr;
337 		}
338 
339 		rds_ib_ring_free(&ic->i_send_ring, completed);
340 		rds_ib_sub_signaled(ic, nr_sig);
341 		nr_sig = 0;
342 
343 		if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
344 		    test_bit(0, &conn->c_map_queued))
345 			queue_delayed_work(rds_wq, &conn->c_send_w, 0);
346 
347 		/* We expect errors as the qp is drained during shutdown */
348 		if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
349 			rds_ib_conn_error(conn, "send completion on %pI4 had status "
350 					  "%u (%s), disconnecting and reconnecting\n",
351 					  &conn->c_faddr, wc.status,
352 					  rds_ib_wc_status_str(wc.status));
353 		}
354 	}
355 }
356 
357 /*
358  * This is the main function for allocating credits when sending
359  * messages.
360  *
361  * Conceptually, we have two counters:
362  *  -	send credits: this tells us how many WRs we're allowed
363  *	to submit without overruning the receiver's queue. For
364  *	each SEND WR we post, we decrement this by one.
365  *
366  *  -	posted credits: this tells us how many WRs we recently
367  *	posted to the receive queue. This value is transferred
368  *	to the peer as a "credit update" in a RDS header field.
369  *	Every time we transmit credits to the peer, we subtract
370  *	the amount of transferred credits from this counter.
371  *
372  * It is essential that we avoid situations where both sides have
373  * exhausted their send credits, and are unable to send new credits
374  * to the peer. We achieve this by requiring that we send at least
375  * one credit update to the peer before exhausting our credits.
376  * When new credits arrive, we subtract one credit that is withheld
377  * until we've posted new buffers and are ready to transmit these
378  * credits (see rds_ib_send_add_credits below).
379  *
380  * The RDS send code is essentially single-threaded; rds_send_xmit
381  * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
382  * However, the ACK sending code is independent and can race with
383  * message SENDs.
384  *
385  * In the send path, we need to update the counters for send credits
386  * and the counter of posted buffers atomically - when we use the
387  * last available credit, we cannot allow another thread to race us
388  * and grab the posted credits counter.  Hence, we have to use a
389  * spinlock to protect the credit counter, or use atomics.
390  *
391  * Spinlocks shared between the send and the receive path are bad,
392  * because they create unnecessary delays. An early implementation
393  * using a spinlock showed a 5% degradation in throughput at some
394  * loads.
395  *
396  * This implementation avoids spinlocks completely, putting both
397  * counters into a single atomic, and updating that atomic using
398  * atomic_add (in the receive path, when receiving fresh credits),
399  * and using atomic_cmpxchg when updating the two counters.
400  */
rds_ib_send_grab_credits(struct rds_ib_connection * ic,u32 wanted,u32 * adv_credits,int need_posted,int max_posted)401 int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
402 			     u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
403 {
404 	unsigned int avail, posted, got = 0, advertise;
405 	long oldval, newval;
406 
407 	*adv_credits = 0;
408 	if (!ic->i_flowctl)
409 		return wanted;
410 
411 try_again:
412 	advertise = 0;
413 	oldval = newval = atomic_read(&ic->i_credits);
414 	posted = IB_GET_POST_CREDITS(oldval);
415 	avail = IB_GET_SEND_CREDITS(oldval);
416 
417 	rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n",
418 			wanted, avail, posted);
419 
420 	/* The last credit must be used to send a credit update. */
421 	if (avail && !posted)
422 		avail--;
423 
424 	if (avail < wanted) {
425 		struct rds_connection *conn = ic->i_cm_id->context;
426 
427 		/* Oops, there aren't that many credits left! */
428 		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
429 		got = avail;
430 	} else {
431 		/* Sometimes you get what you want, lalala. */
432 		got = wanted;
433 	}
434 	newval -= IB_SET_SEND_CREDITS(got);
435 
436 	/*
437 	 * If need_posted is non-zero, then the caller wants
438 	 * the posted regardless of whether any send credits are
439 	 * available.
440 	 */
441 	if (posted && (got || need_posted)) {
442 		advertise = min_t(unsigned int, posted, max_posted);
443 		newval -= IB_SET_POST_CREDITS(advertise);
444 	}
445 
446 	/* Finally bill everything */
447 	if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
448 		goto try_again;
449 
450 	*adv_credits = advertise;
451 	return got;
452 }
453 
rds_ib_send_add_credits(struct rds_connection * conn,unsigned int credits)454 void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
455 {
456 	struct rds_ib_connection *ic = conn->c_transport_data;
457 
458 	if (credits == 0)
459 		return;
460 
461 	rdsdebug("rds_ib_send_add_credits(%u): current=%u%s\n",
462 			credits,
463 			IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
464 			test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
465 
466 	atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
467 	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
468 		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
469 
470 	WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
471 
472 	rds_ib_stats_inc(s_ib_rx_credit_updates);
473 }
474 
rds_ib_advertise_credits(struct rds_connection * conn,unsigned int posted)475 void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
476 {
477 	struct rds_ib_connection *ic = conn->c_transport_data;
478 
479 	if (posted == 0)
480 		return;
481 
482 	atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
483 
484 	/* Decide whether to send an update to the peer now.
485 	 * If we would send a credit update for every single buffer we
486 	 * post, we would end up with an ACK storm (ACK arrives,
487 	 * consumes buffer, we refill the ring, send ACK to remote
488 	 * advertising the newly posted buffer... ad inf)
489 	 *
490 	 * Performance pretty much depends on how often we send
491 	 * credit updates - too frequent updates mean lots of ACKs.
492 	 * Too infrequent updates, and the peer will run out of
493 	 * credits and has to throttle.
494 	 * For the time being, 16 seems to be a good compromise.
495 	 */
496 	if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
497 		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
498 }
499 
rds_ib_set_wr_signal_state(struct rds_ib_connection * ic,struct rds_ib_send_work * send,bool notify)500 static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
501 					     struct rds_ib_send_work *send,
502 					     bool notify)
503 {
504 	/*
505 	 * We want to delay signaling completions just enough to get
506 	 * the batching benefits but not so much that we create dead time
507 	 * on the wire.
508 	 */
509 	if (ic->i_unsignaled_wrs-- == 0 || notify) {
510 		ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
511 		send->s_wr.send_flags |= IB_SEND_SIGNALED;
512 		return 1;
513 	}
514 	return 0;
515 }
516 
517 /*
518  * This can be called multiple times for a given message.  The first time
519  * we see a message we map its scatterlist into the IB device so that
520  * we can provide that mapped address to the IB scatter gather entries
521  * in the IB work requests.  We translate the scatterlist into a series
522  * of work requests that fragment the message.  These work requests complete
523  * in order so we pass ownership of the message to the completion handler
524  * once we send the final fragment.
525  *
526  * The RDS core uses the c_send_lock to only enter this function once
527  * per connection.  This makes sure that the tx ring alloc/unalloc pairs
528  * don't get out of sync and confuse the ring.
529  */
rds_ib_xmit(struct rds_connection * conn,struct rds_message * rm,unsigned int hdr_off,unsigned int sg,unsigned int off)530 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
531 		unsigned int hdr_off, unsigned int sg, unsigned int off)
532 {
533 	struct rds_ib_connection *ic = conn->c_transport_data;
534 	struct ib_device *dev = ic->i_cm_id->device;
535 	struct rds_ib_send_work *send = NULL;
536 	struct rds_ib_send_work *first;
537 	struct rds_ib_send_work *prev;
538 	struct ib_send_wr *failed_wr;
539 	struct scatterlist *scat;
540 	u32 pos;
541 	u32 i;
542 	u32 work_alloc;
543 	u32 credit_alloc = 0;
544 	u32 posted;
545 	u32 adv_credits = 0;
546 	int send_flags = 0;
547 	int bytes_sent = 0;
548 	int ret;
549 	int flow_controlled = 0;
550 	int nr_sig = 0;
551 
552 	BUG_ON(off % RDS_FRAG_SIZE);
553 	BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
554 
555 	/* Do not send cong updates to IB loopback */
556 	if (conn->c_loopback
557 	    && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
558 		rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
559 		scat = &rm->data.op_sg[sg];
560 		ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
561 		return sizeof(struct rds_header) + ret;
562 	}
563 
564 	/* FIXME we may overallocate here */
565 	if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
566 		i = 1;
567 	else
568 		i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
569 
570 	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
571 	if (work_alloc == 0) {
572 		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
573 		rds_ib_stats_inc(s_ib_tx_ring_full);
574 		ret = -ENOMEM;
575 		goto out;
576 	}
577 
578 	if (ic->i_flowctl) {
579 		credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
580 		adv_credits += posted;
581 		if (credit_alloc < work_alloc) {
582 			rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
583 			work_alloc = credit_alloc;
584 			flow_controlled = 1;
585 		}
586 		if (work_alloc == 0) {
587 			set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
588 			rds_ib_stats_inc(s_ib_tx_throttle);
589 			ret = -ENOMEM;
590 			goto out;
591 		}
592 	}
593 
594 	/* map the message the first time we see it */
595 	if (!ic->i_data_op) {
596 		if (rm->data.op_nents) {
597 			rm->data.op_count = ib_dma_map_sg(dev,
598 							  rm->data.op_sg,
599 							  rm->data.op_nents,
600 							  DMA_TO_DEVICE);
601 			rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
602 			if (rm->data.op_count == 0) {
603 				rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
604 				rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
605 				ret = -ENOMEM; /* XXX ? */
606 				goto out;
607 			}
608 		} else {
609 			rm->data.op_count = 0;
610 		}
611 
612 		rds_message_addref(rm);
613 		ic->i_data_op = &rm->data;
614 
615 		/* Finalize the header */
616 		if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
617 			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
618 		if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
619 			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
620 
621 		/* If it has a RDMA op, tell the peer we did it. This is
622 		 * used by the peer to release use-once RDMA MRs. */
623 		if (rm->rdma.op_active) {
624 			struct rds_ext_header_rdma ext_hdr;
625 
626 			ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
627 			rds_message_add_extension(&rm->m_inc.i_hdr,
628 					RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
629 		}
630 		if (rm->m_rdma_cookie) {
631 			rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
632 					rds_rdma_cookie_key(rm->m_rdma_cookie),
633 					rds_rdma_cookie_offset(rm->m_rdma_cookie));
634 		}
635 
636 		/* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
637 		 * we should not do this unless we have a chance of at least
638 		 * sticking the header into the send ring. Which is why we
639 		 * should call rds_ib_ring_alloc first. */
640 		rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
641 		rds_message_make_checksum(&rm->m_inc.i_hdr);
642 
643 		/*
644 		 * Update adv_credits since we reset the ACK_REQUIRED bit.
645 		 */
646 		if (ic->i_flowctl) {
647 			rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
648 			adv_credits += posted;
649 			BUG_ON(adv_credits > 255);
650 		}
651 	}
652 
653 	/* Sometimes you want to put a fence between an RDMA
654 	 * READ and the following SEND.
655 	 * We could either do this all the time
656 	 * or when requested by the user. Right now, we let
657 	 * the application choose.
658 	 */
659 	if (rm->rdma.op_active && rm->rdma.op_fence)
660 		send_flags = IB_SEND_FENCE;
661 
662 	/* Each frag gets a header. Msgs may be 0 bytes */
663 	send = &ic->i_sends[pos];
664 	first = send;
665 	prev = NULL;
666 	scat = &ic->i_data_op->op_sg[sg];
667 	i = 0;
668 	do {
669 		unsigned int len = 0;
670 
671 		/* Set up the header */
672 		send->s_wr.send_flags = send_flags;
673 		send->s_wr.opcode = IB_WR_SEND;
674 		send->s_wr.num_sge = 1;
675 		send->s_wr.next = NULL;
676 		send->s_queued = jiffies;
677 		send->s_op = NULL;
678 
679 		send->s_sge[0].addr = ic->i_send_hdrs_dma
680 			+ (pos * sizeof(struct rds_header));
681 		send->s_sge[0].length = sizeof(struct rds_header);
682 
683 		memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
684 
685 		/* Set up the data, if present */
686 		if (i < work_alloc
687 		    && scat != &rm->data.op_sg[rm->data.op_count]) {
688 			len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
689 			send->s_wr.num_sge = 2;
690 
691 			send->s_sge[1].addr = ib_sg_dma_address(dev, scat) + off;
692 			send->s_sge[1].length = len;
693 
694 			bytes_sent += len;
695 			off += len;
696 			if (off == ib_sg_dma_len(dev, scat)) {
697 				scat++;
698 				off = 0;
699 			}
700 		}
701 
702 		rds_ib_set_wr_signal_state(ic, send, 0);
703 
704 		/*
705 		 * Always signal the last one if we're stopping due to flow control.
706 		 */
707 		if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
708 			send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
709 
710 		if (send->s_wr.send_flags & IB_SEND_SIGNALED)
711 			nr_sig++;
712 
713 		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
714 			 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
715 
716 		if (ic->i_flowctl && adv_credits) {
717 			struct rds_header *hdr = &ic->i_send_hdrs[pos];
718 
719 			/* add credit and redo the header checksum */
720 			hdr->h_credit = adv_credits;
721 			rds_message_make_checksum(hdr);
722 			adv_credits = 0;
723 			rds_ib_stats_inc(s_ib_tx_credit_updates);
724 		}
725 
726 		if (prev)
727 			prev->s_wr.next = &send->s_wr;
728 		prev = send;
729 
730 		pos = (pos + 1) % ic->i_send_ring.w_nr;
731 		send = &ic->i_sends[pos];
732 		i++;
733 
734 	} while (i < work_alloc
735 		 && scat != &rm->data.op_sg[rm->data.op_count]);
736 
737 	/* Account the RDS header in the number of bytes we sent, but just once.
738 	 * The caller has no concept of fragmentation. */
739 	if (hdr_off == 0)
740 		bytes_sent += sizeof(struct rds_header);
741 
742 	/* if we finished the message then send completion owns it */
743 	if (scat == &rm->data.op_sg[rm->data.op_count]) {
744 		prev->s_op = ic->i_data_op;
745 		prev->s_wr.send_flags |= IB_SEND_SOLICITED;
746 		ic->i_data_op = NULL;
747 	}
748 
749 	/* Put back wrs & credits we didn't use */
750 	if (i < work_alloc) {
751 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
752 		work_alloc = i;
753 	}
754 	if (ic->i_flowctl && i < credit_alloc)
755 		rds_ib_send_add_credits(conn, credit_alloc - i);
756 
757 	if (nr_sig)
758 		atomic_add(nr_sig, &ic->i_signaled_sends);
759 
760 	/* XXX need to worry about failed_wr and partial sends. */
761 	failed_wr = &first->s_wr;
762 	ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
763 	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
764 		 first, &first->s_wr, ret, failed_wr);
765 	BUG_ON(failed_wr != &first->s_wr);
766 	if (ret) {
767 		printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
768 		       "returned %d\n", &conn->c_faddr, ret);
769 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
770 		rds_ib_sub_signaled(ic, nr_sig);
771 		if (prev->s_op) {
772 			ic->i_data_op = prev->s_op;
773 			prev->s_op = NULL;
774 		}
775 
776 		rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
777 		goto out;
778 	}
779 
780 	ret = bytes_sent;
781 out:
782 	BUG_ON(adv_credits);
783 	return ret;
784 }
785 
786 /*
787  * Issue atomic operation.
788  * A simplified version of the rdma case, we always map 1 SG, and
789  * only 8 bytes, for the return value from the atomic operation.
790  */
rds_ib_xmit_atomic(struct rds_connection * conn,struct rm_atomic_op * op)791 int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
792 {
793 	struct rds_ib_connection *ic = conn->c_transport_data;
794 	struct rds_ib_send_work *send = NULL;
795 	struct ib_send_wr *failed_wr;
796 	struct rds_ib_device *rds_ibdev;
797 	u32 pos;
798 	u32 work_alloc;
799 	int ret;
800 	int nr_sig = 0;
801 
802 	rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
803 
804 	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
805 	if (work_alloc != 1) {
806 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
807 		rds_ib_stats_inc(s_ib_tx_ring_full);
808 		ret = -ENOMEM;
809 		goto out;
810 	}
811 
812 	/* address of send request in ring */
813 	send = &ic->i_sends[pos];
814 	send->s_queued = jiffies;
815 
816 	if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
817 		send->s_wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
818 		send->s_wr.wr.atomic.compare_add = op->op_m_cswp.compare;
819 		send->s_wr.wr.atomic.swap = op->op_m_cswp.swap;
820 		send->s_wr.wr.atomic.compare_add_mask = op->op_m_cswp.compare_mask;
821 		send->s_wr.wr.atomic.swap_mask = op->op_m_cswp.swap_mask;
822 	} else { /* FADD */
823 		send->s_wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
824 		send->s_wr.wr.atomic.compare_add = op->op_m_fadd.add;
825 		send->s_wr.wr.atomic.swap = 0;
826 		send->s_wr.wr.atomic.compare_add_mask = op->op_m_fadd.nocarry_mask;
827 		send->s_wr.wr.atomic.swap_mask = 0;
828 	}
829 	nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
830 	send->s_wr.num_sge = 1;
831 	send->s_wr.next = NULL;
832 	send->s_wr.wr.atomic.remote_addr = op->op_remote_addr;
833 	send->s_wr.wr.atomic.rkey = op->op_rkey;
834 	send->s_op = op;
835 	rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
836 
837 	/* map 8 byte retval buffer to the device */
838 	ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
839 	rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
840 	if (ret != 1) {
841 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
842 		rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
843 		ret = -ENOMEM; /* XXX ? */
844 		goto out;
845 	}
846 
847 	/* Convert our struct scatterlist to struct ib_sge */
848 	send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
849 	send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
850 	send->s_sge[0].lkey = ic->i_mr->lkey;
851 
852 	rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
853 		 send->s_sge[0].addr, send->s_sge[0].length);
854 
855 	if (nr_sig)
856 		atomic_add(nr_sig, &ic->i_signaled_sends);
857 
858 	failed_wr = &send->s_wr;
859 	ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr);
860 	rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
861 		 send, &send->s_wr, ret, failed_wr);
862 	BUG_ON(failed_wr != &send->s_wr);
863 	if (ret) {
864 		printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 "
865 		       "returned %d\n", &conn->c_faddr, ret);
866 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
867 		rds_ib_sub_signaled(ic, nr_sig);
868 		goto out;
869 	}
870 
871 	if (unlikely(failed_wr != &send->s_wr)) {
872 		printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
873 		BUG_ON(failed_wr != &send->s_wr);
874 	}
875 
876 out:
877 	return ret;
878 }
879 
rds_ib_xmit_rdma(struct rds_connection * conn,struct rm_rdma_op * op)880 int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
881 {
882 	struct rds_ib_connection *ic = conn->c_transport_data;
883 	struct rds_ib_send_work *send = NULL;
884 	struct rds_ib_send_work *first;
885 	struct rds_ib_send_work *prev;
886 	struct ib_send_wr *failed_wr;
887 	struct scatterlist *scat;
888 	unsigned long len;
889 	u64 remote_addr = op->op_remote_addr;
890 	u32 max_sge = ic->rds_ibdev->max_sge;
891 	u32 pos;
892 	u32 work_alloc;
893 	u32 i;
894 	u32 j;
895 	int sent;
896 	int ret;
897 	int num_sge;
898 	int nr_sig = 0;
899 
900 	/* map the op the first time we see it */
901 	if (!op->op_mapped) {
902 		op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
903 					     op->op_sg, op->op_nents, (op->op_write) ?
904 					     DMA_TO_DEVICE : DMA_FROM_DEVICE);
905 		rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
906 		if (op->op_count == 0) {
907 			rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
908 			ret = -ENOMEM; /* XXX ? */
909 			goto out;
910 		}
911 
912 		op->op_mapped = 1;
913 	}
914 
915 	/*
916 	 * Instead of knowing how to return a partial rdma read/write we insist that there
917 	 * be enough work requests to send the entire message.
918 	 */
919 	i = ceil(op->op_count, max_sge);
920 
921 	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
922 	if (work_alloc != i) {
923 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
924 		rds_ib_stats_inc(s_ib_tx_ring_full);
925 		ret = -ENOMEM;
926 		goto out;
927 	}
928 
929 	send = &ic->i_sends[pos];
930 	first = send;
931 	prev = NULL;
932 	scat = &op->op_sg[0];
933 	sent = 0;
934 	num_sge = op->op_count;
935 
936 	for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
937 		send->s_wr.send_flags = 0;
938 		send->s_queued = jiffies;
939 		send->s_op = NULL;
940 
941 		nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify);
942 
943 		send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
944 		send->s_wr.wr.rdma.remote_addr = remote_addr;
945 		send->s_wr.wr.rdma.rkey = op->op_rkey;
946 
947 		if (num_sge > max_sge) {
948 			send->s_wr.num_sge = max_sge;
949 			num_sge -= max_sge;
950 		} else {
951 			send->s_wr.num_sge = num_sge;
952 		}
953 
954 		send->s_wr.next = NULL;
955 
956 		if (prev)
957 			prev->s_wr.next = &send->s_wr;
958 
959 		for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
960 			len = ib_sg_dma_len(ic->i_cm_id->device, scat);
961 			send->s_sge[j].addr =
962 				 ib_sg_dma_address(ic->i_cm_id->device, scat);
963 			send->s_sge[j].length = len;
964 			send->s_sge[j].lkey = ic->i_mr->lkey;
965 
966 			sent += len;
967 			rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
968 
969 			remote_addr += len;
970 			scat++;
971 		}
972 
973 		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
974 			&send->s_wr, send->s_wr.num_sge, send->s_wr.next);
975 
976 		prev = send;
977 		if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
978 			send = ic->i_sends;
979 	}
980 
981 	/* give a reference to the last op */
982 	if (scat == &op->op_sg[op->op_count]) {
983 		prev->s_op = op;
984 		rds_message_addref(container_of(op, struct rds_message, rdma));
985 	}
986 
987 	if (i < work_alloc) {
988 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
989 		work_alloc = i;
990 	}
991 
992 	if (nr_sig)
993 		atomic_add(nr_sig, &ic->i_signaled_sends);
994 
995 	failed_wr = &first->s_wr;
996 	ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
997 	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
998 		 first, &first->s_wr, ret, failed_wr);
999 	BUG_ON(failed_wr != &first->s_wr);
1000 	if (ret) {
1001 		printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
1002 		       "returned %d\n", &conn->c_faddr, ret);
1003 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
1004 		rds_ib_sub_signaled(ic, nr_sig);
1005 		goto out;
1006 	}
1007 
1008 	if (unlikely(failed_wr != &first->s_wr)) {
1009 		printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
1010 		BUG_ON(failed_wr != &first->s_wr);
1011 	}
1012 
1013 
1014 out:
1015 	return ret;
1016 }
1017 
rds_ib_xmit_complete(struct rds_connection * conn)1018 void rds_ib_xmit_complete(struct rds_connection *conn)
1019 {
1020 	struct rds_ib_connection *ic = conn->c_transport_data;
1021 
1022 	/* We may have a pending ACK or window update we were unable
1023 	 * to send previously (due to flow control). Try again. */
1024 	rds_ib_attempt_ack(ic);
1025 }
1026