1 /*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <rdma/ib_cache.h>
44
45 #include <linux/atomic.h>
46
47 #include <scsi/scsi.h>
48 #include <scsi/scsi_device.h>
49 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_tcq.h>
51 #include <scsi/srp.h>
52 #include <scsi/scsi_transport_srp.h>
53
54 #include "ib_srp.h"
55
56 #define DRV_NAME "ib_srp"
57 #define PFX DRV_NAME ": "
58 #define DRV_VERSION "2.0"
59 #define DRV_RELDATE "July 26, 2015"
60
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
64 MODULE_VERSION(DRV_VERSION);
65 MODULE_INFO(release_date, DRV_RELDATE);
66
67 static unsigned int srp_sg_tablesize;
68 static unsigned int cmd_sg_entries;
69 static unsigned int indirect_sg_entries;
70 static bool allow_ext_sg;
71 static bool prefer_fr = true;
72 static bool register_always = true;
73 static int topspin_workarounds = 1;
74
75 module_param(srp_sg_tablesize, uint, 0444);
76 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77
78 module_param(cmd_sg_entries, uint, 0444);
79 MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81
82 module_param(indirect_sg_entries, uint, 0444);
83 MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86 module_param(allow_ext_sg, bool, 0444);
87 MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
90 module_param(topspin_workarounds, int, 0444);
91 MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
94 module_param(prefer_fr, bool, 0444);
95 MODULE_PARM_DESC(prefer_fr,
96 "Whether to use fast registration if both FMR and fast registration are supported");
97
98 module_param(register_always, bool, 0444);
99 MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
102 static const struct kernel_param_ops srp_tmo_ops;
103
104 static int srp_reconnect_delay = 10;
105 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
109 static int srp_fast_io_fail_tmo = 15;
110 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112 MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
117 static int srp_dev_loss_tmo = 600;
118 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120 MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
128 static unsigned ch_count;
129 module_param(ch_count, uint, 0444);
130 MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
133 static void srp_add_one(struct ib_device *device);
134 static void srp_remove_one(struct ib_device *device, void *client_data);
135 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
137 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138
139 static struct scsi_transport_template *ib_srp_transport_template;
140 static struct workqueue_struct *srp_remove_wq;
141
142 static struct ib_client srp_client = {
143 .name = "srp",
144 .add = srp_add_one,
145 .remove = srp_remove_one
146 };
147
148 static struct ib_sa_client srp_sa_client;
149
srp_tmo_get(char * buffer,const struct kernel_param * kp)150 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151 {
152 int tmo = *(int *)kp->arg;
153
154 if (tmo >= 0)
155 return sprintf(buffer, "%d", tmo);
156 else
157 return sprintf(buffer, "off");
158 }
159
srp_tmo_set(const char * val,const struct kernel_param * kp)160 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161 {
162 int tmo, res;
163
164 res = srp_parse_tmo(&tmo, val);
165 if (res)
166 goto out;
167
168 if (kp->arg == &srp_reconnect_delay)
169 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
170 srp_dev_loss_tmo);
171 else if (kp->arg == &srp_fast_io_fail_tmo)
172 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
173 else
174 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
175 tmo);
176 if (res)
177 goto out;
178 *(int *)kp->arg = tmo;
179
180 out:
181 return res;
182 }
183
184 static const struct kernel_param_ops srp_tmo_ops = {
185 .get = srp_tmo_get,
186 .set = srp_tmo_set,
187 };
188
host_to_target(struct Scsi_Host * host)189 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
190 {
191 return (struct srp_target_port *) host->hostdata;
192 }
193
srp_target_info(struct Scsi_Host * host)194 static const char *srp_target_info(struct Scsi_Host *host)
195 {
196 return host_to_target(host)->target_name;
197 }
198
srp_target_is_topspin(struct srp_target_port * target)199 static int srp_target_is_topspin(struct srp_target_port *target)
200 {
201 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
202 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
203
204 return topspin_workarounds &&
205 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
206 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
207 }
208
srp_alloc_iu(struct srp_host * host,size_t size,gfp_t gfp_mask,enum dma_data_direction direction)209 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
210 gfp_t gfp_mask,
211 enum dma_data_direction direction)
212 {
213 struct srp_iu *iu;
214
215 iu = kmalloc(sizeof *iu, gfp_mask);
216 if (!iu)
217 goto out;
218
219 iu->buf = kzalloc(size, gfp_mask);
220 if (!iu->buf)
221 goto out_free_iu;
222
223 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
224 direction);
225 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
226 goto out_free_buf;
227
228 iu->size = size;
229 iu->direction = direction;
230
231 return iu;
232
233 out_free_buf:
234 kfree(iu->buf);
235 out_free_iu:
236 kfree(iu);
237 out:
238 return NULL;
239 }
240
srp_free_iu(struct srp_host * host,struct srp_iu * iu)241 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
242 {
243 if (!iu)
244 return;
245
246 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
247 iu->direction);
248 kfree(iu->buf);
249 kfree(iu);
250 }
251
srp_qp_event(struct ib_event * event,void * context)252 static void srp_qp_event(struct ib_event *event, void *context)
253 {
254 pr_debug("QP event %s (%d)\n",
255 ib_event_msg(event->event), event->event);
256 }
257
srp_init_qp(struct srp_target_port * target,struct ib_qp * qp)258 static int srp_init_qp(struct srp_target_port *target,
259 struct ib_qp *qp)
260 {
261 struct ib_qp_attr *attr;
262 int ret;
263
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
265 if (!attr)
266 return -ENOMEM;
267
268 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
270 be16_to_cpu(target->pkey),
271 &attr->pkey_index);
272 if (ret)
273 goto out;
274
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
279
280 ret = ib_modify_qp(qp, attr,
281 IB_QP_STATE |
282 IB_QP_PKEY_INDEX |
283 IB_QP_ACCESS_FLAGS |
284 IB_QP_PORT);
285
286 out:
287 kfree(attr);
288 return ret;
289 }
290
srp_new_cm_id(struct srp_rdma_ch * ch)291 static int srp_new_cm_id(struct srp_rdma_ch *ch)
292 {
293 struct srp_target_port *target = ch->target;
294 struct ib_cm_id *new_cm_id;
295
296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
297 srp_cm_handler, ch);
298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
300
301 if (ch->cm_id)
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
308
309 return 0;
310 }
311
srp_alloc_fmr_pool(struct srp_target_port * target)312 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
313 {
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
316
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
320 fmr_param.cache = 1;
321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
326
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
328 }
329
330 /**
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
333 */
srp_destroy_fr_pool(struct srp_fr_pool * pool)334 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
335 {
336 int i;
337 struct srp_fr_desc *d;
338
339 if (!pool)
340 return;
341
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
343 if (d->mr)
344 ib_dereg_mr(d->mr);
345 }
346 kfree(pool);
347 }
348
349 /**
350 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
351 * @device: IB device to allocate fast registration descriptors for.
352 * @pd: Protection domain associated with the FR descriptors.
353 * @pool_size: Number of descriptors to allocate.
354 * @max_page_list_len: Maximum fast registration work request page list length.
355 */
srp_create_fr_pool(struct ib_device * device,struct ib_pd * pd,int pool_size,int max_page_list_len)356 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
357 struct ib_pd *pd, int pool_size,
358 int max_page_list_len)
359 {
360 struct srp_fr_pool *pool;
361 struct srp_fr_desc *d;
362 struct ib_mr *mr;
363 int i, ret = -EINVAL;
364
365 if (pool_size <= 0)
366 goto err;
367 ret = -ENOMEM;
368 pool = kzalloc(sizeof(struct srp_fr_pool) +
369 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
370 if (!pool)
371 goto err;
372 pool->size = pool_size;
373 pool->max_page_list_len = max_page_list_len;
374 spin_lock_init(&pool->lock);
375 INIT_LIST_HEAD(&pool->free_list);
376
377 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
378 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
379 max_page_list_len);
380 if (IS_ERR(mr)) {
381 ret = PTR_ERR(mr);
382 goto destroy_pool;
383 }
384 d->mr = mr;
385 list_add_tail(&d->entry, &pool->free_list);
386 }
387
388 out:
389 return pool;
390
391 destroy_pool:
392 srp_destroy_fr_pool(pool);
393
394 err:
395 pool = ERR_PTR(ret);
396 goto out;
397 }
398
399 /**
400 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
401 * @pool: Pool to obtain descriptor from.
402 */
srp_fr_pool_get(struct srp_fr_pool * pool)403 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
404 {
405 struct srp_fr_desc *d = NULL;
406 unsigned long flags;
407
408 spin_lock_irqsave(&pool->lock, flags);
409 if (!list_empty(&pool->free_list)) {
410 d = list_first_entry(&pool->free_list, typeof(*d), entry);
411 list_del(&d->entry);
412 }
413 spin_unlock_irqrestore(&pool->lock, flags);
414
415 return d;
416 }
417
418 /**
419 * srp_fr_pool_put() - put an FR descriptor back in the free list
420 * @pool: Pool the descriptor was allocated from.
421 * @desc: Pointer to an array of fast registration descriptor pointers.
422 * @n: Number of descriptors to put back.
423 *
424 * Note: The caller must already have queued an invalidation request for
425 * desc->mr->rkey before calling this function.
426 */
srp_fr_pool_put(struct srp_fr_pool * pool,struct srp_fr_desc ** desc,int n)427 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
428 int n)
429 {
430 unsigned long flags;
431 int i;
432
433 spin_lock_irqsave(&pool->lock, flags);
434 for (i = 0; i < n; i++)
435 list_add(&desc[i]->entry, &pool->free_list);
436 spin_unlock_irqrestore(&pool->lock, flags);
437 }
438
srp_alloc_fr_pool(struct srp_target_port * target)439 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
440 {
441 struct srp_device *dev = target->srp_host->srp_dev;
442
443 return srp_create_fr_pool(dev->dev, dev->pd,
444 target->scsi_host->can_queue,
445 dev->max_pages_per_mr);
446 }
447
448 /**
449 * srp_destroy_qp() - destroy an RDMA queue pair
450 * @ch: SRP RDMA channel.
451 *
452 * Change a queue pair into the error state and wait until all receive
453 * completions have been processed before destroying it. This avoids that
454 * the receive completion handler can access the queue pair while it is
455 * being destroyed.
456 */
srp_destroy_qp(struct srp_rdma_ch * ch)457 static void srp_destroy_qp(struct srp_rdma_ch *ch)
458 {
459 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
460 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
461 struct ib_recv_wr *bad_wr;
462 int ret;
463
464 /* Destroying a QP and reusing ch->done is only safe if not connected */
465 WARN_ON_ONCE(ch->connected);
466
467 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
468 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
469 if (ret)
470 goto out;
471
472 init_completion(&ch->done);
473 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
474 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
475 if (ret == 0)
476 wait_for_completion(&ch->done);
477
478 out:
479 ib_destroy_qp(ch->qp);
480 }
481
srp_create_ch_ib(struct srp_rdma_ch * ch)482 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
483 {
484 struct srp_target_port *target = ch->target;
485 struct srp_device *dev = target->srp_host->srp_dev;
486 struct ib_qp_init_attr *init_attr;
487 struct ib_cq *recv_cq, *send_cq;
488 struct ib_qp *qp;
489 struct ib_fmr_pool *fmr_pool = NULL;
490 struct srp_fr_pool *fr_pool = NULL;
491 const int m = dev->use_fast_reg ? 3 : 1;
492 struct ib_cq_init_attr cq_attr = {};
493 int ret;
494
495 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
496 if (!init_attr)
497 return -ENOMEM;
498
499 /* + 1 for SRP_LAST_WR_ID */
500 cq_attr.cqe = target->queue_size + 1;
501 cq_attr.comp_vector = ch->comp_vector;
502 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
503 &cq_attr);
504 if (IS_ERR(recv_cq)) {
505 ret = PTR_ERR(recv_cq);
506 goto err;
507 }
508
509 cq_attr.cqe = m * target->queue_size;
510 cq_attr.comp_vector = ch->comp_vector;
511 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
512 &cq_attr);
513 if (IS_ERR(send_cq)) {
514 ret = PTR_ERR(send_cq);
515 goto err_recv_cq;
516 }
517
518 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
519
520 init_attr->event_handler = srp_qp_event;
521 init_attr->cap.max_send_wr = m * target->queue_size;
522 init_attr->cap.max_recv_wr = target->queue_size + 1;
523 init_attr->cap.max_recv_sge = 1;
524 init_attr->cap.max_send_sge = 1;
525 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
526 init_attr->qp_type = IB_QPT_RC;
527 init_attr->send_cq = send_cq;
528 init_attr->recv_cq = recv_cq;
529
530 qp = ib_create_qp(dev->pd, init_attr);
531 if (IS_ERR(qp)) {
532 ret = PTR_ERR(qp);
533 goto err_send_cq;
534 }
535
536 ret = srp_init_qp(target, qp);
537 if (ret)
538 goto err_qp;
539
540 if (dev->use_fast_reg) {
541 fr_pool = srp_alloc_fr_pool(target);
542 if (IS_ERR(fr_pool)) {
543 ret = PTR_ERR(fr_pool);
544 shost_printk(KERN_WARNING, target->scsi_host, PFX
545 "FR pool allocation failed (%d)\n", ret);
546 goto err_qp;
547 }
548 } else if (dev->use_fmr) {
549 fmr_pool = srp_alloc_fmr_pool(target);
550 if (IS_ERR(fmr_pool)) {
551 ret = PTR_ERR(fmr_pool);
552 shost_printk(KERN_WARNING, target->scsi_host, PFX
553 "FMR pool allocation failed (%d)\n", ret);
554 goto err_qp;
555 }
556 }
557
558 if (ch->qp)
559 srp_destroy_qp(ch);
560 if (ch->recv_cq)
561 ib_destroy_cq(ch->recv_cq);
562 if (ch->send_cq)
563 ib_destroy_cq(ch->send_cq);
564
565 ch->qp = qp;
566 ch->recv_cq = recv_cq;
567 ch->send_cq = send_cq;
568
569 if (dev->use_fast_reg) {
570 if (ch->fr_pool)
571 srp_destroy_fr_pool(ch->fr_pool);
572 ch->fr_pool = fr_pool;
573 } else if (dev->use_fmr) {
574 if (ch->fmr_pool)
575 ib_destroy_fmr_pool(ch->fmr_pool);
576 ch->fmr_pool = fmr_pool;
577 }
578
579 kfree(init_attr);
580 return 0;
581
582 err_qp:
583 ib_destroy_qp(qp);
584
585 err_send_cq:
586 ib_destroy_cq(send_cq);
587
588 err_recv_cq:
589 ib_destroy_cq(recv_cq);
590
591 err:
592 kfree(init_attr);
593 return ret;
594 }
595
596 /*
597 * Note: this function may be called without srp_alloc_iu_bufs() having been
598 * invoked. Hence the ch->[rt]x_ring checks.
599 */
srp_free_ch_ib(struct srp_target_port * target,struct srp_rdma_ch * ch)600 static void srp_free_ch_ib(struct srp_target_port *target,
601 struct srp_rdma_ch *ch)
602 {
603 struct srp_device *dev = target->srp_host->srp_dev;
604 int i;
605
606 if (!ch->target)
607 return;
608
609 if (ch->cm_id) {
610 ib_destroy_cm_id(ch->cm_id);
611 ch->cm_id = NULL;
612 }
613
614 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
615 if (!ch->qp)
616 return;
617
618 if (dev->use_fast_reg) {
619 if (ch->fr_pool)
620 srp_destroy_fr_pool(ch->fr_pool);
621 } else if (dev->use_fmr) {
622 if (ch->fmr_pool)
623 ib_destroy_fmr_pool(ch->fmr_pool);
624 }
625 srp_destroy_qp(ch);
626 ib_destroy_cq(ch->send_cq);
627 ib_destroy_cq(ch->recv_cq);
628
629 /*
630 * Avoid that the SCSI error handler tries to use this channel after
631 * it has been freed. The SCSI error handler can namely continue
632 * trying to perform recovery actions after scsi_remove_host()
633 * returned.
634 */
635 ch->target = NULL;
636
637 ch->qp = NULL;
638 ch->send_cq = ch->recv_cq = NULL;
639
640 if (ch->rx_ring) {
641 for (i = 0; i < target->queue_size; ++i)
642 srp_free_iu(target->srp_host, ch->rx_ring[i]);
643 kfree(ch->rx_ring);
644 ch->rx_ring = NULL;
645 }
646 if (ch->tx_ring) {
647 for (i = 0; i < target->queue_size; ++i)
648 srp_free_iu(target->srp_host, ch->tx_ring[i]);
649 kfree(ch->tx_ring);
650 ch->tx_ring = NULL;
651 }
652 }
653
srp_path_rec_completion(int status,struct ib_sa_path_rec * pathrec,void * ch_ptr)654 static void srp_path_rec_completion(int status,
655 struct ib_sa_path_rec *pathrec,
656 void *ch_ptr)
657 {
658 struct srp_rdma_ch *ch = ch_ptr;
659 struct srp_target_port *target = ch->target;
660
661 ch->status = status;
662 if (status)
663 shost_printk(KERN_ERR, target->scsi_host,
664 PFX "Got failed path rec status %d\n", status);
665 else
666 ch->path = *pathrec;
667 complete(&ch->done);
668 }
669
srp_lookup_path(struct srp_rdma_ch * ch)670 static int srp_lookup_path(struct srp_rdma_ch *ch)
671 {
672 struct srp_target_port *target = ch->target;
673 int ret = -ENODEV;
674
675 ch->path.numb_path = 1;
676
677 init_completion(&ch->done);
678
679 /*
680 * Avoid that the SCSI host can be removed by srp_remove_target()
681 * before srp_path_rec_completion() is called.
682 */
683 if (!scsi_host_get(target->scsi_host))
684 goto out;
685
686 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
687 target->srp_host->srp_dev->dev,
688 target->srp_host->port,
689 &ch->path,
690 IB_SA_PATH_REC_SERVICE_ID |
691 IB_SA_PATH_REC_DGID |
692 IB_SA_PATH_REC_SGID |
693 IB_SA_PATH_REC_NUMB_PATH |
694 IB_SA_PATH_REC_PKEY,
695 SRP_PATH_REC_TIMEOUT_MS,
696 GFP_KERNEL,
697 srp_path_rec_completion,
698 ch, &ch->path_query);
699 ret = ch->path_query_id;
700 if (ret < 0)
701 goto put;
702
703 ret = wait_for_completion_interruptible(&ch->done);
704 if (ret < 0)
705 goto put;
706
707 ret = ch->status;
708 if (ret < 0)
709 shost_printk(KERN_WARNING, target->scsi_host,
710 PFX "Path record query failed\n");
711
712 put:
713 scsi_host_put(target->scsi_host);
714
715 out:
716 return ret;
717 }
718
srp_send_req(struct srp_rdma_ch * ch,bool multich)719 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
720 {
721 struct srp_target_port *target = ch->target;
722 struct {
723 struct ib_cm_req_param param;
724 struct srp_login_req priv;
725 } *req = NULL;
726 int status;
727
728 req = kzalloc(sizeof *req, GFP_KERNEL);
729 if (!req)
730 return -ENOMEM;
731
732 req->param.primary_path = &ch->path;
733 req->param.alternate_path = NULL;
734 req->param.service_id = target->service_id;
735 req->param.qp_num = ch->qp->qp_num;
736 req->param.qp_type = ch->qp->qp_type;
737 req->param.private_data = &req->priv;
738 req->param.private_data_len = sizeof req->priv;
739 req->param.flow_control = 1;
740
741 get_random_bytes(&req->param.starting_psn, 4);
742 req->param.starting_psn &= 0xffffff;
743
744 /*
745 * Pick some arbitrary defaults here; we could make these
746 * module parameters if anyone cared about setting them.
747 */
748 req->param.responder_resources = 4;
749 req->param.remote_cm_response_timeout = 20;
750 req->param.local_cm_response_timeout = 20;
751 req->param.retry_count = target->tl_retry_count;
752 req->param.rnr_retry_count = 7;
753 req->param.max_cm_retries = 15;
754
755 req->priv.opcode = SRP_LOGIN_REQ;
756 req->priv.tag = 0;
757 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
758 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
759 SRP_BUF_FORMAT_INDIRECT);
760 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
761 SRP_MULTICHAN_SINGLE);
762 /*
763 * In the published SRP specification (draft rev. 16a), the
764 * port identifier format is 8 bytes of ID extension followed
765 * by 8 bytes of GUID. Older drafts put the two halves in the
766 * opposite order, so that the GUID comes first.
767 *
768 * Targets conforming to these obsolete drafts can be
769 * recognized by the I/O Class they report.
770 */
771 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
772 memcpy(req->priv.initiator_port_id,
773 &target->sgid.global.interface_id, 8);
774 memcpy(req->priv.initiator_port_id + 8,
775 &target->initiator_ext, 8);
776 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
777 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
778 } else {
779 memcpy(req->priv.initiator_port_id,
780 &target->initiator_ext, 8);
781 memcpy(req->priv.initiator_port_id + 8,
782 &target->sgid.global.interface_id, 8);
783 memcpy(req->priv.target_port_id, &target->id_ext, 8);
784 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
785 }
786
787 /*
788 * Topspin/Cisco SRP targets will reject our login unless we
789 * zero out the first 8 bytes of our initiator port ID and set
790 * the second 8 bytes to the local node GUID.
791 */
792 if (srp_target_is_topspin(target)) {
793 shost_printk(KERN_DEBUG, target->scsi_host,
794 PFX "Topspin/Cisco initiator port ID workaround "
795 "activated for target GUID %016llx\n",
796 be64_to_cpu(target->ioc_guid));
797 memset(req->priv.initiator_port_id, 0, 8);
798 memcpy(req->priv.initiator_port_id + 8,
799 &target->srp_host->srp_dev->dev->node_guid, 8);
800 }
801
802 status = ib_send_cm_req(ch->cm_id, &req->param);
803
804 kfree(req);
805
806 return status;
807 }
808
srp_queue_remove_work(struct srp_target_port * target)809 static bool srp_queue_remove_work(struct srp_target_port *target)
810 {
811 bool changed = false;
812
813 spin_lock_irq(&target->lock);
814 if (target->state != SRP_TARGET_REMOVED) {
815 target->state = SRP_TARGET_REMOVED;
816 changed = true;
817 }
818 spin_unlock_irq(&target->lock);
819
820 if (changed)
821 queue_work(srp_remove_wq, &target->remove_work);
822
823 return changed;
824 }
825
srp_disconnect_target(struct srp_target_port * target)826 static void srp_disconnect_target(struct srp_target_port *target)
827 {
828 struct srp_rdma_ch *ch;
829 int i;
830
831 /* XXX should send SRP_I_LOGOUT request */
832
833 for (i = 0; i < target->ch_count; i++) {
834 ch = &target->ch[i];
835 ch->connected = false;
836 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
837 shost_printk(KERN_DEBUG, target->scsi_host,
838 PFX "Sending CM DREQ failed\n");
839 }
840 }
841 }
842
srp_free_req_data(struct srp_target_port * target,struct srp_rdma_ch * ch)843 static void srp_free_req_data(struct srp_target_port *target,
844 struct srp_rdma_ch *ch)
845 {
846 struct srp_device *dev = target->srp_host->srp_dev;
847 struct ib_device *ibdev = dev->dev;
848 struct srp_request *req;
849 int i;
850
851 if (!ch->req_ring)
852 return;
853
854 for (i = 0; i < target->req_ring_size; ++i) {
855 req = &ch->req_ring[i];
856 if (dev->use_fast_reg) {
857 kfree(req->fr_list);
858 } else {
859 kfree(req->fmr_list);
860 kfree(req->map_page);
861 }
862 if (req->indirect_dma_addr) {
863 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
864 target->indirect_size,
865 DMA_TO_DEVICE);
866 }
867 kfree(req->indirect_desc);
868 }
869
870 kfree(ch->req_ring);
871 ch->req_ring = NULL;
872 }
873
srp_alloc_req_data(struct srp_rdma_ch * ch)874 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
875 {
876 struct srp_target_port *target = ch->target;
877 struct srp_device *srp_dev = target->srp_host->srp_dev;
878 struct ib_device *ibdev = srp_dev->dev;
879 struct srp_request *req;
880 void *mr_list;
881 dma_addr_t dma_addr;
882 int i, ret = -ENOMEM;
883
884 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
885 GFP_KERNEL);
886 if (!ch->req_ring)
887 goto out;
888
889 for (i = 0; i < target->req_ring_size; ++i) {
890 req = &ch->req_ring[i];
891 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
892 GFP_KERNEL);
893 if (!mr_list)
894 goto out;
895 if (srp_dev->use_fast_reg) {
896 req->fr_list = mr_list;
897 } else {
898 req->fmr_list = mr_list;
899 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
900 sizeof(void *), GFP_KERNEL);
901 if (!req->map_page)
902 goto out;
903 }
904 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
905 if (!req->indirect_desc)
906 goto out;
907
908 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
909 target->indirect_size,
910 DMA_TO_DEVICE);
911 if (ib_dma_mapping_error(ibdev, dma_addr))
912 goto out;
913
914 req->indirect_dma_addr = dma_addr;
915 }
916 ret = 0;
917
918 out:
919 return ret;
920 }
921
922 /**
923 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
924 * @shost: SCSI host whose attributes to remove from sysfs.
925 *
926 * Note: Any attributes defined in the host template and that did not exist
927 * before invocation of this function will be ignored.
928 */
srp_del_scsi_host_attr(struct Scsi_Host * shost)929 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
930 {
931 struct device_attribute **attr;
932
933 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
934 device_remove_file(&shost->shost_dev, *attr);
935 }
936
srp_remove_target(struct srp_target_port * target)937 static void srp_remove_target(struct srp_target_port *target)
938 {
939 struct srp_rdma_ch *ch;
940 int i;
941
942 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
943
944 srp_del_scsi_host_attr(target->scsi_host);
945 srp_rport_get(target->rport);
946 srp_remove_host(target->scsi_host);
947 scsi_remove_host(target->scsi_host);
948 srp_stop_rport_timers(target->rport);
949 srp_disconnect_target(target);
950 for (i = 0; i < target->ch_count; i++) {
951 ch = &target->ch[i];
952 srp_free_ch_ib(target, ch);
953 }
954 cancel_work_sync(&target->tl_err_work);
955 srp_rport_put(target->rport);
956 for (i = 0; i < target->ch_count; i++) {
957 ch = &target->ch[i];
958 srp_free_req_data(target, ch);
959 }
960 kfree(target->ch);
961 target->ch = NULL;
962
963 spin_lock(&target->srp_host->target_lock);
964 list_del(&target->list);
965 spin_unlock(&target->srp_host->target_lock);
966
967 scsi_host_put(target->scsi_host);
968 }
969
srp_remove_work(struct work_struct * work)970 static void srp_remove_work(struct work_struct *work)
971 {
972 struct srp_target_port *target =
973 container_of(work, struct srp_target_port, remove_work);
974
975 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
976
977 srp_remove_target(target);
978 }
979
srp_rport_delete(struct srp_rport * rport)980 static void srp_rport_delete(struct srp_rport *rport)
981 {
982 struct srp_target_port *target = rport->lld_data;
983
984 srp_queue_remove_work(target);
985 }
986
987 /**
988 * srp_connected_ch() - number of connected channels
989 * @target: SRP target port.
990 */
srp_connected_ch(struct srp_target_port * target)991 static int srp_connected_ch(struct srp_target_port *target)
992 {
993 int i, c = 0;
994
995 for (i = 0; i < target->ch_count; i++)
996 c += target->ch[i].connected;
997
998 return c;
999 }
1000
srp_connect_ch(struct srp_rdma_ch * ch,bool multich)1001 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
1002 {
1003 struct srp_target_port *target = ch->target;
1004 int ret;
1005
1006 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1007
1008 ret = srp_lookup_path(ch);
1009 if (ret)
1010 goto out;
1011
1012 while (1) {
1013 init_completion(&ch->done);
1014 ret = srp_send_req(ch, multich);
1015 if (ret)
1016 goto out;
1017 ret = wait_for_completion_interruptible(&ch->done);
1018 if (ret < 0)
1019 goto out;
1020
1021 /*
1022 * The CM event handling code will set status to
1023 * SRP_PORT_REDIRECT if we get a port redirect REJ
1024 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1025 * redirect REJ back.
1026 */
1027 ret = ch->status;
1028 switch (ret) {
1029 case 0:
1030 ch->connected = true;
1031 goto out;
1032
1033 case SRP_PORT_REDIRECT:
1034 ret = srp_lookup_path(ch);
1035 if (ret)
1036 goto out;
1037 break;
1038
1039 case SRP_DLID_REDIRECT:
1040 break;
1041
1042 case SRP_STALE_CONN:
1043 shost_printk(KERN_ERR, target->scsi_host, PFX
1044 "giving up on stale connection\n");
1045 ret = -ECONNRESET;
1046 goto out;
1047
1048 default:
1049 goto out;
1050 }
1051 }
1052
1053 out:
1054 return ret <= 0 ? ret : -ENODEV;
1055 }
1056
srp_inv_rkey(struct srp_rdma_ch * ch,u32 rkey)1057 static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
1058 {
1059 struct ib_send_wr *bad_wr;
1060 struct ib_send_wr wr = {
1061 .opcode = IB_WR_LOCAL_INV,
1062 .wr_id = LOCAL_INV_WR_ID_MASK,
1063 .next = NULL,
1064 .num_sge = 0,
1065 .send_flags = 0,
1066 .ex.invalidate_rkey = rkey,
1067 };
1068
1069 return ib_post_send(ch->qp, &wr, &bad_wr);
1070 }
1071
srp_unmap_data(struct scsi_cmnd * scmnd,struct srp_rdma_ch * ch,struct srp_request * req)1072 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1073 struct srp_rdma_ch *ch,
1074 struct srp_request *req)
1075 {
1076 struct srp_target_port *target = ch->target;
1077 struct srp_device *dev = target->srp_host->srp_dev;
1078 struct ib_device *ibdev = dev->dev;
1079 int i, res;
1080
1081 if (!scsi_sglist(scmnd) ||
1082 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1083 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1084 return;
1085
1086 if (dev->use_fast_reg) {
1087 struct srp_fr_desc **pfr;
1088
1089 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1090 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
1091 if (res < 0) {
1092 shost_printk(KERN_ERR, target->scsi_host, PFX
1093 "Queueing INV WR for rkey %#x failed (%d)\n",
1094 (*pfr)->mr->rkey, res);
1095 queue_work(system_long_wq,
1096 &target->tl_err_work);
1097 }
1098 }
1099 if (req->nmdesc)
1100 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1101 req->nmdesc);
1102 } else if (dev->use_fmr) {
1103 struct ib_pool_fmr **pfmr;
1104
1105 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1106 ib_fmr_pool_unmap(*pfmr);
1107 }
1108
1109 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1110 scmnd->sc_data_direction);
1111 }
1112
1113 /**
1114 * srp_claim_req - Take ownership of the scmnd associated with a request.
1115 * @ch: SRP RDMA channel.
1116 * @req: SRP request.
1117 * @sdev: If not NULL, only take ownership for this SCSI device.
1118 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1119 * ownership of @req->scmnd if it equals @scmnd.
1120 *
1121 * Return value:
1122 * Either NULL or a pointer to the SCSI command the caller became owner of.
1123 */
srp_claim_req(struct srp_rdma_ch * ch,struct srp_request * req,struct scsi_device * sdev,struct scsi_cmnd * scmnd)1124 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1125 struct srp_request *req,
1126 struct scsi_device *sdev,
1127 struct scsi_cmnd *scmnd)
1128 {
1129 unsigned long flags;
1130
1131 spin_lock_irqsave(&ch->lock, flags);
1132 if (req->scmnd &&
1133 (!sdev || req->scmnd->device == sdev) &&
1134 (!scmnd || req->scmnd == scmnd)) {
1135 scmnd = req->scmnd;
1136 req->scmnd = NULL;
1137 } else {
1138 scmnd = NULL;
1139 }
1140 spin_unlock_irqrestore(&ch->lock, flags);
1141
1142 return scmnd;
1143 }
1144
1145 /**
1146 * srp_free_req() - Unmap data and add request to the free request list.
1147 * @ch: SRP RDMA channel.
1148 * @req: Request to be freed.
1149 * @scmnd: SCSI command associated with @req.
1150 * @req_lim_delta: Amount to be added to @target->req_lim.
1151 */
srp_free_req(struct srp_rdma_ch * ch,struct srp_request * req,struct scsi_cmnd * scmnd,s32 req_lim_delta)1152 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1153 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1154 {
1155 unsigned long flags;
1156
1157 srp_unmap_data(scmnd, ch, req);
1158
1159 spin_lock_irqsave(&ch->lock, flags);
1160 ch->req_lim += req_lim_delta;
1161 spin_unlock_irqrestore(&ch->lock, flags);
1162 }
1163
srp_finish_req(struct srp_rdma_ch * ch,struct srp_request * req,struct scsi_device * sdev,int result)1164 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1165 struct scsi_device *sdev, int result)
1166 {
1167 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1168
1169 if (scmnd) {
1170 srp_free_req(ch, req, scmnd, 0);
1171 scmnd->result = result;
1172 scmnd->scsi_done(scmnd);
1173 }
1174 }
1175
srp_terminate_io(struct srp_rport * rport)1176 static void srp_terminate_io(struct srp_rport *rport)
1177 {
1178 struct srp_target_port *target = rport->lld_data;
1179 struct srp_rdma_ch *ch;
1180 struct Scsi_Host *shost = target->scsi_host;
1181 struct scsi_device *sdev;
1182 int i, j;
1183
1184 /*
1185 * Invoking srp_terminate_io() while srp_queuecommand() is running
1186 * is not safe. Hence the warning statement below.
1187 */
1188 shost_for_each_device(sdev, shost)
1189 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1190
1191 for (i = 0; i < target->ch_count; i++) {
1192 ch = &target->ch[i];
1193
1194 for (j = 0; j < target->req_ring_size; ++j) {
1195 struct srp_request *req = &ch->req_ring[j];
1196
1197 srp_finish_req(ch, req, NULL,
1198 DID_TRANSPORT_FAILFAST << 16);
1199 }
1200 }
1201 }
1202
1203 /*
1204 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1205 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1206 * srp_reset_device() or srp_reset_host() calls will occur while this function
1207 * is in progress. One way to realize that is not to call this function
1208 * directly but to call srp_reconnect_rport() instead since that last function
1209 * serializes calls of this function via rport->mutex and also blocks
1210 * srp_queuecommand() calls before invoking this function.
1211 */
srp_rport_reconnect(struct srp_rport * rport)1212 static int srp_rport_reconnect(struct srp_rport *rport)
1213 {
1214 struct srp_target_port *target = rport->lld_data;
1215 struct srp_rdma_ch *ch;
1216 int i, j, ret = 0;
1217 bool multich = false;
1218
1219 srp_disconnect_target(target);
1220
1221 if (target->state == SRP_TARGET_SCANNING)
1222 return -ENODEV;
1223
1224 /*
1225 * Now get a new local CM ID so that we avoid confusing the target in
1226 * case things are really fouled up. Doing so also ensures that all CM
1227 * callbacks will have finished before a new QP is allocated.
1228 */
1229 for (i = 0; i < target->ch_count; i++) {
1230 ch = &target->ch[i];
1231 ret += srp_new_cm_id(ch);
1232 }
1233 for (i = 0; i < target->ch_count; i++) {
1234 ch = &target->ch[i];
1235 for (j = 0; j < target->req_ring_size; ++j) {
1236 struct srp_request *req = &ch->req_ring[j];
1237
1238 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1239 }
1240 }
1241 for (i = 0; i < target->ch_count; i++) {
1242 ch = &target->ch[i];
1243 /*
1244 * Whether or not creating a new CM ID succeeded, create a new
1245 * QP. This guarantees that all completion callback function
1246 * invocations have finished before request resetting starts.
1247 */
1248 ret += srp_create_ch_ib(ch);
1249
1250 INIT_LIST_HEAD(&ch->free_tx);
1251 for (j = 0; j < target->queue_size; ++j)
1252 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1253 }
1254
1255 target->qp_in_error = false;
1256
1257 for (i = 0; i < target->ch_count; i++) {
1258 ch = &target->ch[i];
1259 if (ret)
1260 break;
1261 ret = srp_connect_ch(ch, multich);
1262 multich = true;
1263 }
1264
1265 if (ret == 0)
1266 shost_printk(KERN_INFO, target->scsi_host,
1267 PFX "reconnect succeeded\n");
1268
1269 return ret;
1270 }
1271
srp_map_desc(struct srp_map_state * state,dma_addr_t dma_addr,unsigned int dma_len,u32 rkey)1272 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1273 unsigned int dma_len, u32 rkey)
1274 {
1275 struct srp_direct_buf *desc = state->desc;
1276
1277 WARN_ON_ONCE(!dma_len);
1278
1279 desc->va = cpu_to_be64(dma_addr);
1280 desc->key = cpu_to_be32(rkey);
1281 desc->len = cpu_to_be32(dma_len);
1282
1283 state->total_len += dma_len;
1284 state->desc++;
1285 state->ndesc++;
1286 }
1287
srp_map_finish_fmr(struct srp_map_state * state,struct srp_rdma_ch * ch)1288 static int srp_map_finish_fmr(struct srp_map_state *state,
1289 struct srp_rdma_ch *ch)
1290 {
1291 struct srp_target_port *target = ch->target;
1292 struct srp_device *dev = target->srp_host->srp_dev;
1293 struct ib_pool_fmr *fmr;
1294 u64 io_addr = 0;
1295
1296 if (state->fmr.next >= state->fmr.end)
1297 return -ENOMEM;
1298
1299 WARN_ON_ONCE(!dev->use_fmr);
1300
1301 if (state->npages == 0)
1302 return 0;
1303
1304 if (state->npages == 1 && target->global_mr) {
1305 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1306 target->global_mr->rkey);
1307 goto reset_state;
1308 }
1309
1310 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1311 state->npages, io_addr);
1312 if (IS_ERR(fmr))
1313 return PTR_ERR(fmr);
1314
1315 *state->fmr.next++ = fmr;
1316 state->nmdesc++;
1317
1318 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1319 state->dma_len, fmr->fmr->rkey);
1320
1321 reset_state:
1322 state->npages = 0;
1323 state->dma_len = 0;
1324
1325 return 0;
1326 }
1327
srp_map_finish_fr(struct srp_map_state * state,struct srp_rdma_ch * ch,int sg_nents)1328 static int srp_map_finish_fr(struct srp_map_state *state,
1329 struct srp_rdma_ch *ch, int sg_nents)
1330 {
1331 struct srp_target_port *target = ch->target;
1332 struct srp_device *dev = target->srp_host->srp_dev;
1333 struct ib_send_wr *bad_wr;
1334 struct ib_reg_wr wr;
1335 struct srp_fr_desc *desc;
1336 u32 rkey;
1337 int n, err;
1338
1339 if (state->fr.next >= state->fr.end)
1340 return -ENOMEM;
1341
1342 WARN_ON_ONCE(!dev->use_fast_reg);
1343
1344 if (sg_nents == 0)
1345 return 0;
1346
1347 if (sg_nents == 1 && target->global_mr) {
1348 srp_map_desc(state, sg_dma_address(state->sg),
1349 sg_dma_len(state->sg),
1350 target->global_mr->rkey);
1351 return 1;
1352 }
1353
1354 desc = srp_fr_pool_get(ch->fr_pool);
1355 if (!desc)
1356 return -ENOMEM;
1357
1358 rkey = ib_inc_rkey(desc->mr->rkey);
1359 ib_update_fast_reg_key(desc->mr, rkey);
1360
1361 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, dev->mr_page_size);
1362 if (unlikely(n < 0))
1363 return n;
1364
1365 wr.wr.next = NULL;
1366 wr.wr.opcode = IB_WR_REG_MR;
1367 wr.wr.wr_id = FAST_REG_WR_ID_MASK;
1368 wr.wr.num_sge = 0;
1369 wr.wr.send_flags = 0;
1370 wr.mr = desc->mr;
1371 wr.key = desc->mr->rkey;
1372 wr.access = (IB_ACCESS_LOCAL_WRITE |
1373 IB_ACCESS_REMOTE_READ |
1374 IB_ACCESS_REMOTE_WRITE);
1375
1376 *state->fr.next++ = desc;
1377 state->nmdesc++;
1378
1379 srp_map_desc(state, desc->mr->iova,
1380 desc->mr->length, desc->mr->rkey);
1381
1382 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
1383 if (unlikely(err))
1384 return err;
1385
1386 return n;
1387 }
1388
srp_map_sg_entry(struct srp_map_state * state,struct srp_rdma_ch * ch,struct scatterlist * sg,int sg_index)1389 static int srp_map_sg_entry(struct srp_map_state *state,
1390 struct srp_rdma_ch *ch,
1391 struct scatterlist *sg, int sg_index)
1392 {
1393 struct srp_target_port *target = ch->target;
1394 struct srp_device *dev = target->srp_host->srp_dev;
1395 struct ib_device *ibdev = dev->dev;
1396 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1397 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1398 unsigned int len = 0;
1399 int ret;
1400
1401 WARN_ON_ONCE(!dma_len);
1402
1403 while (dma_len) {
1404 unsigned offset = dma_addr & ~dev->mr_page_mask;
1405 if (state->npages == dev->max_pages_per_mr || offset != 0) {
1406 ret = srp_map_finish_fmr(state, ch);
1407 if (ret)
1408 return ret;
1409 }
1410
1411 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1412
1413 if (!state->npages)
1414 state->base_dma_addr = dma_addr;
1415 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1416 state->dma_len += len;
1417 dma_addr += len;
1418 dma_len -= len;
1419 }
1420
1421 /*
1422 * If the last entry of the MR wasn't a full page, then we need to
1423 * close it out and start a new one -- we can only merge at page
1424 * boundries.
1425 */
1426 ret = 0;
1427 if (len != dev->mr_page_size)
1428 ret = srp_map_finish_fmr(state, ch);
1429 return ret;
1430 }
1431
srp_map_sg_fmr(struct srp_map_state * state,struct srp_rdma_ch * ch,struct srp_request * req,struct scatterlist * scat,int count)1432 static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1433 struct srp_request *req, struct scatterlist *scat,
1434 int count)
1435 {
1436 struct scatterlist *sg;
1437 int i, ret;
1438
1439 state->desc = req->indirect_desc;
1440 state->pages = req->map_page;
1441 state->fmr.next = req->fmr_list;
1442 state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
1443
1444 for_each_sg(scat, sg, count, i) {
1445 ret = srp_map_sg_entry(state, ch, sg, i);
1446 if (ret)
1447 return ret;
1448 }
1449
1450 ret = srp_map_finish_fmr(state, ch);
1451 if (ret)
1452 return ret;
1453
1454 req->nmdesc = state->nmdesc;
1455
1456 return 0;
1457 }
1458
srp_map_sg_fr(struct srp_map_state * state,struct srp_rdma_ch * ch,struct srp_request * req,struct scatterlist * scat,int count)1459 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1460 struct srp_request *req, struct scatterlist *scat,
1461 int count)
1462 {
1463 state->desc = req->indirect_desc;
1464 state->fr.next = req->fr_list;
1465 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1466 state->sg = scat;
1467
1468 while (count) {
1469 int i, n;
1470
1471 n = srp_map_finish_fr(state, ch, count);
1472 if (unlikely(n < 0))
1473 return n;
1474
1475 count -= n;
1476 for (i = 0; i < n; i++)
1477 state->sg = sg_next(state->sg);
1478 }
1479
1480 req->nmdesc = state->nmdesc;
1481
1482 return 0;
1483 }
1484
srp_map_sg_dma(struct srp_map_state * state,struct srp_rdma_ch * ch,struct srp_request * req,struct scatterlist * scat,int count)1485 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1486 struct srp_request *req, struct scatterlist *scat,
1487 int count)
1488 {
1489 struct srp_target_port *target = ch->target;
1490 struct srp_device *dev = target->srp_host->srp_dev;
1491 struct scatterlist *sg;
1492 int i;
1493
1494 state->desc = req->indirect_desc;
1495 for_each_sg(scat, sg, count, i) {
1496 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1497 ib_sg_dma_len(dev->dev, sg),
1498 target->global_mr->rkey);
1499 }
1500
1501 req->nmdesc = state->nmdesc;
1502
1503 return 0;
1504 }
1505
1506 /*
1507 * Register the indirect data buffer descriptor with the HCA.
1508 *
1509 * Note: since the indirect data buffer descriptor has been allocated with
1510 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1511 * memory buffer.
1512 */
srp_map_idb(struct srp_rdma_ch * ch,struct srp_request * req,void ** next_mr,void ** end_mr,u32 idb_len,__be32 * idb_rkey)1513 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1514 void **next_mr, void **end_mr, u32 idb_len,
1515 __be32 *idb_rkey)
1516 {
1517 struct srp_target_port *target = ch->target;
1518 struct srp_device *dev = target->srp_host->srp_dev;
1519 struct srp_map_state state;
1520 struct srp_direct_buf idb_desc;
1521 u64 idb_pages[1];
1522 struct scatterlist idb_sg[1];
1523 int ret;
1524
1525 memset(&state, 0, sizeof(state));
1526 memset(&idb_desc, 0, sizeof(idb_desc));
1527 state.gen.next = next_mr;
1528 state.gen.end = end_mr;
1529 state.desc = &idb_desc;
1530 state.base_dma_addr = req->indirect_dma_addr;
1531 state.dma_len = idb_len;
1532
1533 if (dev->use_fast_reg) {
1534 state.sg = idb_sg;
1535 sg_init_one(idb_sg, req->indirect_desc, idb_len);
1536 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1537 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1538 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1539 #endif
1540 ret = srp_map_finish_fr(&state, ch, 1);
1541 if (ret < 0)
1542 return ret;
1543 } else if (dev->use_fmr) {
1544 state.pages = idb_pages;
1545 state.pages[0] = (req->indirect_dma_addr &
1546 dev->mr_page_mask);
1547 state.npages = 1;
1548 ret = srp_map_finish_fmr(&state, ch);
1549 if (ret < 0)
1550 return ret;
1551 } else {
1552 return -EINVAL;
1553 }
1554
1555 *idb_rkey = idb_desc.key;
1556
1557 return 0;
1558 }
1559
srp_map_data(struct scsi_cmnd * scmnd,struct srp_rdma_ch * ch,struct srp_request * req)1560 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1561 struct srp_request *req)
1562 {
1563 struct srp_target_port *target = ch->target;
1564 struct scatterlist *scat;
1565 struct srp_cmd *cmd = req->cmd->buf;
1566 int len, nents, count, ret;
1567 struct srp_device *dev;
1568 struct ib_device *ibdev;
1569 struct srp_map_state state;
1570 struct srp_indirect_buf *indirect_hdr;
1571 u32 idb_len, table_len;
1572 __be32 idb_rkey;
1573 u8 fmt;
1574
1575 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1576 return sizeof (struct srp_cmd);
1577
1578 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1579 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1580 shost_printk(KERN_WARNING, target->scsi_host,
1581 PFX "Unhandled data direction %d\n",
1582 scmnd->sc_data_direction);
1583 return -EINVAL;
1584 }
1585
1586 nents = scsi_sg_count(scmnd);
1587 scat = scsi_sglist(scmnd);
1588
1589 dev = target->srp_host->srp_dev;
1590 ibdev = dev->dev;
1591
1592 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1593 if (unlikely(count == 0))
1594 return -EIO;
1595
1596 fmt = SRP_DATA_DESC_DIRECT;
1597 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1598
1599 if (count == 1 && target->global_mr) {
1600 /*
1601 * The midlayer only generated a single gather/scatter
1602 * entry, or DMA mapping coalesced everything to a
1603 * single entry. So a direct descriptor along with
1604 * the DMA MR suffices.
1605 */
1606 struct srp_direct_buf *buf = (void *) cmd->add_data;
1607
1608 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1609 buf->key = cpu_to_be32(target->global_mr->rkey);
1610 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1611
1612 req->nmdesc = 0;
1613 goto map_complete;
1614 }
1615
1616 /*
1617 * We have more than one scatter/gather entry, so build our indirect
1618 * descriptor table, trying to merge as many entries as we can.
1619 */
1620 indirect_hdr = (void *) cmd->add_data;
1621
1622 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1623 target->indirect_size, DMA_TO_DEVICE);
1624
1625 memset(&state, 0, sizeof(state));
1626 if (dev->use_fast_reg)
1627 srp_map_sg_fr(&state, ch, req, scat, count);
1628 else if (dev->use_fmr)
1629 srp_map_sg_fmr(&state, ch, req, scat, count);
1630 else
1631 srp_map_sg_dma(&state, ch, req, scat, count);
1632
1633 /* We've mapped the request, now pull as much of the indirect
1634 * descriptor table as we can into the command buffer. If this
1635 * target is not using an external indirect table, we are
1636 * guaranteed to fit into the command, as the SCSI layer won't
1637 * give us more S/G entries than we allow.
1638 */
1639 if (state.ndesc == 1) {
1640 /*
1641 * Memory registration collapsed the sg-list into one entry,
1642 * so use a direct descriptor.
1643 */
1644 struct srp_direct_buf *buf = (void *) cmd->add_data;
1645
1646 *buf = req->indirect_desc[0];
1647 goto map_complete;
1648 }
1649
1650 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1651 !target->allow_ext_sg)) {
1652 shost_printk(KERN_ERR, target->scsi_host,
1653 "Could not fit S/G list into SRP_CMD\n");
1654 return -EIO;
1655 }
1656
1657 count = min(state.ndesc, target->cmd_sg_cnt);
1658 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1659 idb_len = sizeof(struct srp_indirect_buf) + table_len;
1660
1661 fmt = SRP_DATA_DESC_INDIRECT;
1662 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1663 len += count * sizeof (struct srp_direct_buf);
1664
1665 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1666 count * sizeof (struct srp_direct_buf));
1667
1668 if (!target->global_mr) {
1669 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1670 idb_len, &idb_rkey);
1671 if (ret < 0)
1672 return ret;
1673 req->nmdesc++;
1674 } else {
1675 idb_rkey = cpu_to_be32(target->global_mr->rkey);
1676 }
1677
1678 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1679 indirect_hdr->table_desc.key = idb_rkey;
1680 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1681 indirect_hdr->len = cpu_to_be32(state.total_len);
1682
1683 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1684 cmd->data_out_desc_cnt = count;
1685 else
1686 cmd->data_in_desc_cnt = count;
1687
1688 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1689 DMA_TO_DEVICE);
1690
1691 map_complete:
1692 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1693 cmd->buf_fmt = fmt << 4;
1694 else
1695 cmd->buf_fmt = fmt;
1696
1697 return len;
1698 }
1699
1700 /*
1701 * Return an IU and possible credit to the free pool
1702 */
srp_put_tx_iu(struct srp_rdma_ch * ch,struct srp_iu * iu,enum srp_iu_type iu_type)1703 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1704 enum srp_iu_type iu_type)
1705 {
1706 unsigned long flags;
1707
1708 spin_lock_irqsave(&ch->lock, flags);
1709 list_add(&iu->list, &ch->free_tx);
1710 if (iu_type != SRP_IU_RSP)
1711 ++ch->req_lim;
1712 spin_unlock_irqrestore(&ch->lock, flags);
1713 }
1714
1715 /*
1716 * Must be called with ch->lock held to protect req_lim and free_tx.
1717 * If IU is not sent, it must be returned using srp_put_tx_iu().
1718 *
1719 * Note:
1720 * An upper limit for the number of allocated information units for each
1721 * request type is:
1722 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1723 * more than Scsi_Host.can_queue requests.
1724 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1725 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1726 * one unanswered SRP request to an initiator.
1727 */
__srp_get_tx_iu(struct srp_rdma_ch * ch,enum srp_iu_type iu_type)1728 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1729 enum srp_iu_type iu_type)
1730 {
1731 struct srp_target_port *target = ch->target;
1732 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1733 struct srp_iu *iu;
1734
1735 srp_send_completion(ch->send_cq, ch);
1736
1737 if (list_empty(&ch->free_tx))
1738 return NULL;
1739
1740 /* Initiator responses to target requests do not consume credits */
1741 if (iu_type != SRP_IU_RSP) {
1742 if (ch->req_lim <= rsv) {
1743 ++target->zero_req_lim;
1744 return NULL;
1745 }
1746
1747 --ch->req_lim;
1748 }
1749
1750 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1751 list_del(&iu->list);
1752 return iu;
1753 }
1754
srp_post_send(struct srp_rdma_ch * ch,struct srp_iu * iu,int len)1755 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1756 {
1757 struct srp_target_port *target = ch->target;
1758 struct ib_sge list;
1759 struct ib_send_wr wr, *bad_wr;
1760
1761 list.addr = iu->dma;
1762 list.length = len;
1763 list.lkey = target->lkey;
1764
1765 wr.next = NULL;
1766 wr.wr_id = (uintptr_t) iu;
1767 wr.sg_list = &list;
1768 wr.num_sge = 1;
1769 wr.opcode = IB_WR_SEND;
1770 wr.send_flags = IB_SEND_SIGNALED;
1771
1772 return ib_post_send(ch->qp, &wr, &bad_wr);
1773 }
1774
srp_post_recv(struct srp_rdma_ch * ch,struct srp_iu * iu)1775 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1776 {
1777 struct srp_target_port *target = ch->target;
1778 struct ib_recv_wr wr, *bad_wr;
1779 struct ib_sge list;
1780
1781 list.addr = iu->dma;
1782 list.length = iu->size;
1783 list.lkey = target->lkey;
1784
1785 wr.next = NULL;
1786 wr.wr_id = (uintptr_t) iu;
1787 wr.sg_list = &list;
1788 wr.num_sge = 1;
1789
1790 return ib_post_recv(ch->qp, &wr, &bad_wr);
1791 }
1792
srp_process_rsp(struct srp_rdma_ch * ch,struct srp_rsp * rsp)1793 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1794 {
1795 struct srp_target_port *target = ch->target;
1796 struct srp_request *req;
1797 struct scsi_cmnd *scmnd;
1798 unsigned long flags;
1799
1800 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1801 spin_lock_irqsave(&ch->lock, flags);
1802 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1803 if (rsp->tag == ch->tsk_mgmt_tag) {
1804 ch->tsk_mgmt_status = -1;
1805 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1806 ch->tsk_mgmt_status = rsp->data[3];
1807 complete(&ch->tsk_mgmt_done);
1808 } else {
1809 shost_printk(KERN_ERR, target->scsi_host,
1810 "Received tsk mgmt response too late for tag %#llx\n",
1811 rsp->tag);
1812 }
1813 spin_unlock_irqrestore(&ch->lock, flags);
1814 } else {
1815 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1816 if (scmnd && scmnd->host_scribble) {
1817 req = (void *)scmnd->host_scribble;
1818 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1819 } else {
1820 scmnd = NULL;
1821 }
1822 if (!scmnd) {
1823 shost_printk(KERN_ERR, target->scsi_host,
1824 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1825 rsp->tag, ch - target->ch, ch->qp->qp_num);
1826
1827 spin_lock_irqsave(&ch->lock, flags);
1828 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1829 spin_unlock_irqrestore(&ch->lock, flags);
1830
1831 return;
1832 }
1833 scmnd->result = rsp->status;
1834
1835 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1836 memcpy(scmnd->sense_buffer, rsp->data +
1837 be32_to_cpu(rsp->resp_data_len),
1838 min_t(int, be32_to_cpu(rsp->sense_data_len),
1839 SCSI_SENSE_BUFFERSIZE));
1840 }
1841
1842 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1843 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1844 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1845 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1846 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1847 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1848 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1849 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1850
1851 srp_free_req(ch, req, scmnd,
1852 be32_to_cpu(rsp->req_lim_delta));
1853
1854 scmnd->host_scribble = NULL;
1855 scmnd->scsi_done(scmnd);
1856 }
1857 }
1858
srp_response_common(struct srp_rdma_ch * ch,s32 req_delta,void * rsp,int len)1859 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1860 void *rsp, int len)
1861 {
1862 struct srp_target_port *target = ch->target;
1863 struct ib_device *dev = target->srp_host->srp_dev->dev;
1864 unsigned long flags;
1865 struct srp_iu *iu;
1866 int err;
1867
1868 spin_lock_irqsave(&ch->lock, flags);
1869 ch->req_lim += req_delta;
1870 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1871 spin_unlock_irqrestore(&ch->lock, flags);
1872
1873 if (!iu) {
1874 shost_printk(KERN_ERR, target->scsi_host, PFX
1875 "no IU available to send response\n");
1876 return 1;
1877 }
1878
1879 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1880 memcpy(iu->buf, rsp, len);
1881 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1882
1883 err = srp_post_send(ch, iu, len);
1884 if (err) {
1885 shost_printk(KERN_ERR, target->scsi_host, PFX
1886 "unable to post response: %d\n", err);
1887 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1888 }
1889
1890 return err;
1891 }
1892
srp_process_cred_req(struct srp_rdma_ch * ch,struct srp_cred_req * req)1893 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1894 struct srp_cred_req *req)
1895 {
1896 struct srp_cred_rsp rsp = {
1897 .opcode = SRP_CRED_RSP,
1898 .tag = req->tag,
1899 };
1900 s32 delta = be32_to_cpu(req->req_lim_delta);
1901
1902 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1903 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1904 "problems processing SRP_CRED_REQ\n");
1905 }
1906
srp_process_aer_req(struct srp_rdma_ch * ch,struct srp_aer_req * req)1907 static void srp_process_aer_req(struct srp_rdma_ch *ch,
1908 struct srp_aer_req *req)
1909 {
1910 struct srp_target_port *target = ch->target;
1911 struct srp_aer_rsp rsp = {
1912 .opcode = SRP_AER_RSP,
1913 .tag = req->tag,
1914 };
1915 s32 delta = be32_to_cpu(req->req_lim_delta);
1916
1917 shost_printk(KERN_ERR, target->scsi_host, PFX
1918 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
1919
1920 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1921 shost_printk(KERN_ERR, target->scsi_host, PFX
1922 "problems processing SRP_AER_REQ\n");
1923 }
1924
srp_handle_recv(struct srp_rdma_ch * ch,struct ib_wc * wc)1925 static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
1926 {
1927 struct srp_target_port *target = ch->target;
1928 struct ib_device *dev = target->srp_host->srp_dev->dev;
1929 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1930 int res;
1931 u8 opcode;
1932
1933 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
1934 DMA_FROM_DEVICE);
1935
1936 opcode = *(u8 *) iu->buf;
1937
1938 if (0) {
1939 shost_printk(KERN_ERR, target->scsi_host,
1940 PFX "recv completion, opcode 0x%02x\n", opcode);
1941 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1942 iu->buf, wc->byte_len, true);
1943 }
1944
1945 switch (opcode) {
1946 case SRP_RSP:
1947 srp_process_rsp(ch, iu->buf);
1948 break;
1949
1950 case SRP_CRED_REQ:
1951 srp_process_cred_req(ch, iu->buf);
1952 break;
1953
1954 case SRP_AER_REQ:
1955 srp_process_aer_req(ch, iu->buf);
1956 break;
1957
1958 case SRP_T_LOGOUT:
1959 /* XXX Handle target logout */
1960 shost_printk(KERN_WARNING, target->scsi_host,
1961 PFX "Got target logout request\n");
1962 break;
1963
1964 default:
1965 shost_printk(KERN_WARNING, target->scsi_host,
1966 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1967 break;
1968 }
1969
1970 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
1971 DMA_FROM_DEVICE);
1972
1973 res = srp_post_recv(ch, iu);
1974 if (res != 0)
1975 shost_printk(KERN_ERR, target->scsi_host,
1976 PFX "Recv failed with error code %d\n", res);
1977 }
1978
1979 /**
1980 * srp_tl_err_work() - handle a transport layer error
1981 * @work: Work structure embedded in an SRP target port.
1982 *
1983 * Note: This function may get invoked before the rport has been created,
1984 * hence the target->rport test.
1985 */
srp_tl_err_work(struct work_struct * work)1986 static void srp_tl_err_work(struct work_struct *work)
1987 {
1988 struct srp_target_port *target;
1989
1990 target = container_of(work, struct srp_target_port, tl_err_work);
1991 if (target->rport)
1992 srp_start_tl_fail_timers(target->rport);
1993 }
1994
srp_handle_qp_err(u64 wr_id,enum ib_wc_status wc_status,bool send_err,struct srp_rdma_ch * ch)1995 static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1996 bool send_err, struct srp_rdma_ch *ch)
1997 {
1998 struct srp_target_port *target = ch->target;
1999
2000 if (wr_id == SRP_LAST_WR_ID) {
2001 complete(&ch->done);
2002 return;
2003 }
2004
2005 if (ch->connected && !target->qp_in_error) {
2006 if (wr_id & LOCAL_INV_WR_ID_MASK) {
2007 shost_printk(KERN_ERR, target->scsi_host, PFX
2008 "LOCAL_INV failed with status %s (%d)\n",
2009 ib_wc_status_msg(wc_status), wc_status);
2010 } else if (wr_id & FAST_REG_WR_ID_MASK) {
2011 shost_printk(KERN_ERR, target->scsi_host, PFX
2012 "FAST_REG_MR failed status %s (%d)\n",
2013 ib_wc_status_msg(wc_status), wc_status);
2014 } else {
2015 shost_printk(KERN_ERR, target->scsi_host,
2016 PFX "failed %s status %s (%d) for iu %p\n",
2017 send_err ? "send" : "receive",
2018 ib_wc_status_msg(wc_status), wc_status,
2019 (void *)(uintptr_t)wr_id);
2020 }
2021 queue_work(system_long_wq, &target->tl_err_work);
2022 }
2023 target->qp_in_error = true;
2024 }
2025
srp_recv_completion(struct ib_cq * cq,void * ch_ptr)2026 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
2027 {
2028 struct srp_rdma_ch *ch = ch_ptr;
2029 struct ib_wc wc;
2030
2031 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
2032 while (ib_poll_cq(cq, 1, &wc) > 0) {
2033 if (likely(wc.status == IB_WC_SUCCESS)) {
2034 srp_handle_recv(ch, &wc);
2035 } else {
2036 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
2037 }
2038 }
2039 }
2040
srp_send_completion(struct ib_cq * cq,void * ch_ptr)2041 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
2042 {
2043 struct srp_rdma_ch *ch = ch_ptr;
2044 struct ib_wc wc;
2045 struct srp_iu *iu;
2046
2047 while (ib_poll_cq(cq, 1, &wc) > 0) {
2048 if (likely(wc.status == IB_WC_SUCCESS)) {
2049 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
2050 list_add(&iu->list, &ch->free_tx);
2051 } else {
2052 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
2053 }
2054 }
2055 }
2056
srp_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scmnd)2057 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2058 {
2059 struct srp_target_port *target = host_to_target(shost);
2060 struct srp_rport *rport = target->rport;
2061 struct srp_rdma_ch *ch;
2062 struct srp_request *req;
2063 struct srp_iu *iu;
2064 struct srp_cmd *cmd;
2065 struct ib_device *dev;
2066 unsigned long flags;
2067 u32 tag;
2068 u16 idx;
2069 int len, ret;
2070 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2071
2072 /*
2073 * The SCSI EH thread is the only context from which srp_queuecommand()
2074 * can get invoked for blocked devices (SDEV_BLOCK /
2075 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2076 * locking the rport mutex if invoked from inside the SCSI EH.
2077 */
2078 if (in_scsi_eh)
2079 mutex_lock(&rport->mutex);
2080
2081 scmnd->result = srp_chkready(target->rport);
2082 if (unlikely(scmnd->result))
2083 goto err;
2084
2085 WARN_ON_ONCE(scmnd->request->tag < 0);
2086 tag = blk_mq_unique_tag(scmnd->request);
2087 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2088 idx = blk_mq_unique_tag_to_tag(tag);
2089 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2090 dev_name(&shost->shost_gendev), tag, idx,
2091 target->req_ring_size);
2092
2093 spin_lock_irqsave(&ch->lock, flags);
2094 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2095 spin_unlock_irqrestore(&ch->lock, flags);
2096
2097 if (!iu)
2098 goto err;
2099
2100 req = &ch->req_ring[idx];
2101 dev = target->srp_host->srp_dev->dev;
2102 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2103 DMA_TO_DEVICE);
2104
2105 scmnd->host_scribble = (void *) req;
2106
2107 cmd = iu->buf;
2108 memset(cmd, 0, sizeof *cmd);
2109
2110 cmd->opcode = SRP_CMD;
2111 int_to_scsilun(scmnd->device->lun, &cmd->lun);
2112 cmd->tag = tag;
2113 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2114
2115 req->scmnd = scmnd;
2116 req->cmd = iu;
2117
2118 len = srp_map_data(scmnd, ch, req);
2119 if (len < 0) {
2120 shost_printk(KERN_ERR, target->scsi_host,
2121 PFX "Failed to map data (%d)\n", len);
2122 /*
2123 * If we ran out of memory descriptors (-ENOMEM) because an
2124 * application is queuing many requests with more than
2125 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2126 * to reduce queue depth temporarily.
2127 */
2128 scmnd->result = len == -ENOMEM ?
2129 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2130 goto err_iu;
2131 }
2132
2133 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2134 DMA_TO_DEVICE);
2135
2136 if (srp_post_send(ch, iu, len)) {
2137 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2138 scmnd->result = DID_ERROR << 16;
2139 goto err_unmap;
2140 }
2141
2142 ret = 0;
2143
2144 unlock_rport:
2145 if (in_scsi_eh)
2146 mutex_unlock(&rport->mutex);
2147
2148 return ret;
2149
2150 err_unmap:
2151 srp_unmap_data(scmnd, ch, req);
2152
2153 err_iu:
2154 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2155
2156 /*
2157 * Avoid that the loops that iterate over the request ring can
2158 * encounter a dangling SCSI command pointer.
2159 */
2160 req->scmnd = NULL;
2161
2162 err:
2163 if (scmnd->result) {
2164 scmnd->scsi_done(scmnd);
2165 ret = 0;
2166 } else {
2167 ret = SCSI_MLQUEUE_HOST_BUSY;
2168 }
2169
2170 goto unlock_rport;
2171 }
2172
2173 /*
2174 * Note: the resources allocated in this function are freed in
2175 * srp_free_ch_ib().
2176 */
srp_alloc_iu_bufs(struct srp_rdma_ch * ch)2177 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2178 {
2179 struct srp_target_port *target = ch->target;
2180 int i;
2181
2182 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2183 GFP_KERNEL);
2184 if (!ch->rx_ring)
2185 goto err_no_ring;
2186 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2187 GFP_KERNEL);
2188 if (!ch->tx_ring)
2189 goto err_no_ring;
2190
2191 for (i = 0; i < target->queue_size; ++i) {
2192 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2193 ch->max_ti_iu_len,
2194 GFP_KERNEL, DMA_FROM_DEVICE);
2195 if (!ch->rx_ring[i])
2196 goto err;
2197 }
2198
2199 for (i = 0; i < target->queue_size; ++i) {
2200 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2201 target->max_iu_len,
2202 GFP_KERNEL, DMA_TO_DEVICE);
2203 if (!ch->tx_ring[i])
2204 goto err;
2205
2206 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2207 }
2208
2209 return 0;
2210
2211 err:
2212 for (i = 0; i < target->queue_size; ++i) {
2213 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2214 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2215 }
2216
2217
2218 err_no_ring:
2219 kfree(ch->tx_ring);
2220 ch->tx_ring = NULL;
2221 kfree(ch->rx_ring);
2222 ch->rx_ring = NULL;
2223
2224 return -ENOMEM;
2225 }
2226
srp_compute_rq_tmo(struct ib_qp_attr * qp_attr,int attr_mask)2227 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2228 {
2229 uint64_t T_tr_ns, max_compl_time_ms;
2230 uint32_t rq_tmo_jiffies;
2231
2232 /*
2233 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2234 * table 91), both the QP timeout and the retry count have to be set
2235 * for RC QP's during the RTR to RTS transition.
2236 */
2237 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2238 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2239
2240 /*
2241 * Set target->rq_tmo_jiffies to one second more than the largest time
2242 * it can take before an error completion is generated. See also
2243 * C9-140..142 in the IBTA spec for more information about how to
2244 * convert the QP Local ACK Timeout value to nanoseconds.
2245 */
2246 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2247 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2248 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2249 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2250
2251 return rq_tmo_jiffies;
2252 }
2253
srp_cm_rep_handler(struct ib_cm_id * cm_id,const struct srp_login_rsp * lrsp,struct srp_rdma_ch * ch)2254 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2255 const struct srp_login_rsp *lrsp,
2256 struct srp_rdma_ch *ch)
2257 {
2258 struct srp_target_port *target = ch->target;
2259 struct ib_qp_attr *qp_attr = NULL;
2260 int attr_mask = 0;
2261 int ret;
2262 int i;
2263
2264 if (lrsp->opcode == SRP_LOGIN_RSP) {
2265 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2266 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2267
2268 /*
2269 * Reserve credits for task management so we don't
2270 * bounce requests back to the SCSI mid-layer.
2271 */
2272 target->scsi_host->can_queue
2273 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2274 target->scsi_host->can_queue);
2275 target->scsi_host->cmd_per_lun
2276 = min_t(int, target->scsi_host->can_queue,
2277 target->scsi_host->cmd_per_lun);
2278 } else {
2279 shost_printk(KERN_WARNING, target->scsi_host,
2280 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2281 ret = -ECONNRESET;
2282 goto error;
2283 }
2284
2285 if (!ch->rx_ring) {
2286 ret = srp_alloc_iu_bufs(ch);
2287 if (ret)
2288 goto error;
2289 }
2290
2291 ret = -ENOMEM;
2292 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2293 if (!qp_attr)
2294 goto error;
2295
2296 qp_attr->qp_state = IB_QPS_RTR;
2297 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2298 if (ret)
2299 goto error_free;
2300
2301 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2302 if (ret)
2303 goto error_free;
2304
2305 for (i = 0; i < target->queue_size; i++) {
2306 struct srp_iu *iu = ch->rx_ring[i];
2307
2308 ret = srp_post_recv(ch, iu);
2309 if (ret)
2310 goto error_free;
2311 }
2312
2313 qp_attr->qp_state = IB_QPS_RTS;
2314 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2315 if (ret)
2316 goto error_free;
2317
2318 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2319
2320 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2321 if (ret)
2322 goto error_free;
2323
2324 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2325
2326 error_free:
2327 kfree(qp_attr);
2328
2329 error:
2330 ch->status = ret;
2331 }
2332
srp_cm_rej_handler(struct ib_cm_id * cm_id,struct ib_cm_event * event,struct srp_rdma_ch * ch)2333 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2334 struct ib_cm_event *event,
2335 struct srp_rdma_ch *ch)
2336 {
2337 struct srp_target_port *target = ch->target;
2338 struct Scsi_Host *shost = target->scsi_host;
2339 struct ib_class_port_info *cpi;
2340 int opcode;
2341
2342 switch (event->param.rej_rcvd.reason) {
2343 case IB_CM_REJ_PORT_CM_REDIRECT:
2344 cpi = event->param.rej_rcvd.ari;
2345 ch->path.dlid = cpi->redirect_lid;
2346 ch->path.pkey = cpi->redirect_pkey;
2347 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2348 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2349
2350 ch->status = ch->path.dlid ?
2351 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2352 break;
2353
2354 case IB_CM_REJ_PORT_REDIRECT:
2355 if (srp_target_is_topspin(target)) {
2356 /*
2357 * Topspin/Cisco SRP gateways incorrectly send
2358 * reject reason code 25 when they mean 24
2359 * (port redirect).
2360 */
2361 memcpy(ch->path.dgid.raw,
2362 event->param.rej_rcvd.ari, 16);
2363
2364 shost_printk(KERN_DEBUG, shost,
2365 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2366 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2367 be64_to_cpu(ch->path.dgid.global.interface_id));
2368
2369 ch->status = SRP_PORT_REDIRECT;
2370 } else {
2371 shost_printk(KERN_WARNING, shost,
2372 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2373 ch->status = -ECONNRESET;
2374 }
2375 break;
2376
2377 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2378 shost_printk(KERN_WARNING, shost,
2379 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2380 ch->status = -ECONNRESET;
2381 break;
2382
2383 case IB_CM_REJ_CONSUMER_DEFINED:
2384 opcode = *(u8 *) event->private_data;
2385 if (opcode == SRP_LOGIN_REJ) {
2386 struct srp_login_rej *rej = event->private_data;
2387 u32 reason = be32_to_cpu(rej->reason);
2388
2389 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2390 shost_printk(KERN_WARNING, shost,
2391 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2392 else
2393 shost_printk(KERN_WARNING, shost, PFX
2394 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2395 target->sgid.raw,
2396 target->orig_dgid.raw, reason);
2397 } else
2398 shost_printk(KERN_WARNING, shost,
2399 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2400 " opcode 0x%02x\n", opcode);
2401 ch->status = -ECONNRESET;
2402 break;
2403
2404 case IB_CM_REJ_STALE_CONN:
2405 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2406 ch->status = SRP_STALE_CONN;
2407 break;
2408
2409 default:
2410 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2411 event->param.rej_rcvd.reason);
2412 ch->status = -ECONNRESET;
2413 }
2414 }
2415
srp_cm_handler(struct ib_cm_id * cm_id,struct ib_cm_event * event)2416 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2417 {
2418 struct srp_rdma_ch *ch = cm_id->context;
2419 struct srp_target_port *target = ch->target;
2420 int comp = 0;
2421
2422 switch (event->event) {
2423 case IB_CM_REQ_ERROR:
2424 shost_printk(KERN_DEBUG, target->scsi_host,
2425 PFX "Sending CM REQ failed\n");
2426 comp = 1;
2427 ch->status = -ECONNRESET;
2428 break;
2429
2430 case IB_CM_REP_RECEIVED:
2431 comp = 1;
2432 srp_cm_rep_handler(cm_id, event->private_data, ch);
2433 break;
2434
2435 case IB_CM_REJ_RECEIVED:
2436 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2437 comp = 1;
2438
2439 srp_cm_rej_handler(cm_id, event, ch);
2440 break;
2441
2442 case IB_CM_DREQ_RECEIVED:
2443 shost_printk(KERN_WARNING, target->scsi_host,
2444 PFX "DREQ received - connection closed\n");
2445 ch->connected = false;
2446 if (ib_send_cm_drep(cm_id, NULL, 0))
2447 shost_printk(KERN_ERR, target->scsi_host,
2448 PFX "Sending CM DREP failed\n");
2449 queue_work(system_long_wq, &target->tl_err_work);
2450 break;
2451
2452 case IB_CM_TIMEWAIT_EXIT:
2453 shost_printk(KERN_ERR, target->scsi_host,
2454 PFX "connection closed\n");
2455 comp = 1;
2456
2457 ch->status = 0;
2458 break;
2459
2460 case IB_CM_MRA_RECEIVED:
2461 case IB_CM_DREQ_ERROR:
2462 case IB_CM_DREP_RECEIVED:
2463 break;
2464
2465 default:
2466 shost_printk(KERN_WARNING, target->scsi_host,
2467 PFX "Unhandled CM event %d\n", event->event);
2468 break;
2469 }
2470
2471 if (comp)
2472 complete(&ch->done);
2473
2474 return 0;
2475 }
2476
2477 /**
2478 * srp_change_queue_depth - setting device queue depth
2479 * @sdev: scsi device struct
2480 * @qdepth: requested queue depth
2481 *
2482 * Returns queue depth.
2483 */
2484 static int
srp_change_queue_depth(struct scsi_device * sdev,int qdepth)2485 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2486 {
2487 if (!sdev->tagged_supported)
2488 qdepth = 1;
2489 return scsi_change_queue_depth(sdev, qdepth);
2490 }
2491
srp_send_tsk_mgmt(struct srp_rdma_ch * ch,u64 req_tag,u64 lun,u8 func,u8 * status)2492 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2493 u8 func, u8 *status)
2494 {
2495 struct srp_target_port *target = ch->target;
2496 struct srp_rport *rport = target->rport;
2497 struct ib_device *dev = target->srp_host->srp_dev->dev;
2498 struct srp_iu *iu;
2499 struct srp_tsk_mgmt *tsk_mgmt;
2500 int res;
2501
2502 if (!ch->connected || target->qp_in_error)
2503 return -1;
2504
2505 /*
2506 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2507 * invoked while a task management function is being sent.
2508 */
2509 mutex_lock(&rport->mutex);
2510 spin_lock_irq(&ch->lock);
2511 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2512 spin_unlock_irq(&ch->lock);
2513
2514 if (!iu) {
2515 mutex_unlock(&rport->mutex);
2516
2517 return -1;
2518 }
2519
2520 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2521 DMA_TO_DEVICE);
2522 tsk_mgmt = iu->buf;
2523 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2524
2525 tsk_mgmt->opcode = SRP_TSK_MGMT;
2526 int_to_scsilun(lun, &tsk_mgmt->lun);
2527 tsk_mgmt->tsk_mgmt_func = func;
2528 tsk_mgmt->task_tag = req_tag;
2529
2530 spin_lock_irq(&ch->lock);
2531 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2532 tsk_mgmt->tag = ch->tsk_mgmt_tag;
2533 spin_unlock_irq(&ch->lock);
2534
2535 init_completion(&ch->tsk_mgmt_done);
2536
2537 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2538 DMA_TO_DEVICE);
2539 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2540 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2541 mutex_unlock(&rport->mutex);
2542
2543 return -1;
2544 }
2545 res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2546 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2547 if (res > 0 && status)
2548 *status = ch->tsk_mgmt_status;
2549 mutex_unlock(&rport->mutex);
2550
2551 WARN_ON_ONCE(res < 0);
2552
2553 return res > 0 ? 0 : -1;
2554 }
2555
srp_abort(struct scsi_cmnd * scmnd)2556 static int srp_abort(struct scsi_cmnd *scmnd)
2557 {
2558 struct srp_target_port *target = host_to_target(scmnd->device->host);
2559 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2560 u32 tag;
2561 u16 ch_idx;
2562 struct srp_rdma_ch *ch;
2563 int ret;
2564
2565 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2566
2567 if (!req)
2568 return SUCCESS;
2569 tag = blk_mq_unique_tag(scmnd->request);
2570 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2571 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2572 return SUCCESS;
2573 ch = &target->ch[ch_idx];
2574 if (!srp_claim_req(ch, req, NULL, scmnd))
2575 return SUCCESS;
2576 shost_printk(KERN_ERR, target->scsi_host,
2577 "Sending SRP abort for tag %#x\n", tag);
2578 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2579 SRP_TSK_ABORT_TASK, NULL) == 0)
2580 ret = SUCCESS;
2581 else if (target->rport->state == SRP_RPORT_LOST)
2582 ret = FAST_IO_FAIL;
2583 else
2584 ret = FAILED;
2585 if (ret == SUCCESS) {
2586 srp_free_req(ch, req, scmnd, 0);
2587 scmnd->result = DID_ABORT << 16;
2588 scmnd->scsi_done(scmnd);
2589 }
2590
2591 return ret;
2592 }
2593
srp_reset_device(struct scsi_cmnd * scmnd)2594 static int srp_reset_device(struct scsi_cmnd *scmnd)
2595 {
2596 struct srp_target_port *target = host_to_target(scmnd->device->host);
2597 struct srp_rdma_ch *ch;
2598 u8 status;
2599
2600 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2601
2602 ch = &target->ch[0];
2603 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2604 SRP_TSK_LUN_RESET, &status))
2605 return FAILED;
2606 if (status)
2607 return FAILED;
2608
2609 return SUCCESS;
2610 }
2611
srp_reset_host(struct scsi_cmnd * scmnd)2612 static int srp_reset_host(struct scsi_cmnd *scmnd)
2613 {
2614 struct srp_target_port *target = host_to_target(scmnd->device->host);
2615
2616 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2617
2618 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2619 }
2620
srp_slave_configure(struct scsi_device * sdev)2621 static int srp_slave_configure(struct scsi_device *sdev)
2622 {
2623 struct Scsi_Host *shost = sdev->host;
2624 struct srp_target_port *target = host_to_target(shost);
2625 struct request_queue *q = sdev->request_queue;
2626 unsigned long timeout;
2627
2628 if (sdev->type == TYPE_DISK) {
2629 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2630 blk_queue_rq_timeout(q, timeout);
2631 }
2632
2633 return 0;
2634 }
2635
show_id_ext(struct device * dev,struct device_attribute * attr,char * buf)2636 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2637 char *buf)
2638 {
2639 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2640
2641 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2642 }
2643
show_ioc_guid(struct device * dev,struct device_attribute * attr,char * buf)2644 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2645 char *buf)
2646 {
2647 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2648
2649 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2650 }
2651
show_service_id(struct device * dev,struct device_attribute * attr,char * buf)2652 static ssize_t show_service_id(struct device *dev,
2653 struct device_attribute *attr, char *buf)
2654 {
2655 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2656
2657 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
2658 }
2659
show_pkey(struct device * dev,struct device_attribute * attr,char * buf)2660 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2661 char *buf)
2662 {
2663 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2664
2665 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2666 }
2667
show_sgid(struct device * dev,struct device_attribute * attr,char * buf)2668 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2669 char *buf)
2670 {
2671 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2672
2673 return sprintf(buf, "%pI6\n", target->sgid.raw);
2674 }
2675
show_dgid(struct device * dev,struct device_attribute * attr,char * buf)2676 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2677 char *buf)
2678 {
2679 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2680 struct srp_rdma_ch *ch = &target->ch[0];
2681
2682 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2683 }
2684
show_orig_dgid(struct device * dev,struct device_attribute * attr,char * buf)2685 static ssize_t show_orig_dgid(struct device *dev,
2686 struct device_attribute *attr, char *buf)
2687 {
2688 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2689
2690 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2691 }
2692
show_req_lim(struct device * dev,struct device_attribute * attr,char * buf)2693 static ssize_t show_req_lim(struct device *dev,
2694 struct device_attribute *attr, char *buf)
2695 {
2696 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2697 struct srp_rdma_ch *ch;
2698 int i, req_lim = INT_MAX;
2699
2700 for (i = 0; i < target->ch_count; i++) {
2701 ch = &target->ch[i];
2702 req_lim = min(req_lim, ch->req_lim);
2703 }
2704 return sprintf(buf, "%d\n", req_lim);
2705 }
2706
show_zero_req_lim(struct device * dev,struct device_attribute * attr,char * buf)2707 static ssize_t show_zero_req_lim(struct device *dev,
2708 struct device_attribute *attr, char *buf)
2709 {
2710 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2711
2712 return sprintf(buf, "%d\n", target->zero_req_lim);
2713 }
2714
show_local_ib_port(struct device * dev,struct device_attribute * attr,char * buf)2715 static ssize_t show_local_ib_port(struct device *dev,
2716 struct device_attribute *attr, char *buf)
2717 {
2718 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2719
2720 return sprintf(buf, "%d\n", target->srp_host->port);
2721 }
2722
show_local_ib_device(struct device * dev,struct device_attribute * attr,char * buf)2723 static ssize_t show_local_ib_device(struct device *dev,
2724 struct device_attribute *attr, char *buf)
2725 {
2726 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2727
2728 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2729 }
2730
show_ch_count(struct device * dev,struct device_attribute * attr,char * buf)2731 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2732 char *buf)
2733 {
2734 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2735
2736 return sprintf(buf, "%d\n", target->ch_count);
2737 }
2738
show_comp_vector(struct device * dev,struct device_attribute * attr,char * buf)2739 static ssize_t show_comp_vector(struct device *dev,
2740 struct device_attribute *attr, char *buf)
2741 {
2742 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2743
2744 return sprintf(buf, "%d\n", target->comp_vector);
2745 }
2746
show_tl_retry_count(struct device * dev,struct device_attribute * attr,char * buf)2747 static ssize_t show_tl_retry_count(struct device *dev,
2748 struct device_attribute *attr, char *buf)
2749 {
2750 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2751
2752 return sprintf(buf, "%d\n", target->tl_retry_count);
2753 }
2754
show_cmd_sg_entries(struct device * dev,struct device_attribute * attr,char * buf)2755 static ssize_t show_cmd_sg_entries(struct device *dev,
2756 struct device_attribute *attr, char *buf)
2757 {
2758 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2759
2760 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2761 }
2762
show_allow_ext_sg(struct device * dev,struct device_attribute * attr,char * buf)2763 static ssize_t show_allow_ext_sg(struct device *dev,
2764 struct device_attribute *attr, char *buf)
2765 {
2766 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2767
2768 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2769 }
2770
2771 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2772 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2773 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2774 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2775 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
2776 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2777 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2778 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2779 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2780 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2781 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2782 static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
2783 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2784 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2785 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2786 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2787
2788 static struct device_attribute *srp_host_attrs[] = {
2789 &dev_attr_id_ext,
2790 &dev_attr_ioc_guid,
2791 &dev_attr_service_id,
2792 &dev_attr_pkey,
2793 &dev_attr_sgid,
2794 &dev_attr_dgid,
2795 &dev_attr_orig_dgid,
2796 &dev_attr_req_lim,
2797 &dev_attr_zero_req_lim,
2798 &dev_attr_local_ib_port,
2799 &dev_attr_local_ib_device,
2800 &dev_attr_ch_count,
2801 &dev_attr_comp_vector,
2802 &dev_attr_tl_retry_count,
2803 &dev_attr_cmd_sg_entries,
2804 &dev_attr_allow_ext_sg,
2805 NULL
2806 };
2807
2808 static struct scsi_host_template srp_template = {
2809 .module = THIS_MODULE,
2810 .name = "InfiniBand SRP initiator",
2811 .proc_name = DRV_NAME,
2812 .slave_configure = srp_slave_configure,
2813 .info = srp_target_info,
2814 .queuecommand = srp_queuecommand,
2815 .change_queue_depth = srp_change_queue_depth,
2816 .eh_abort_handler = srp_abort,
2817 .eh_device_reset_handler = srp_reset_device,
2818 .eh_host_reset_handler = srp_reset_host,
2819 .skip_settle_delay = true,
2820 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2821 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
2822 .this_id = -1,
2823 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2824 .use_clustering = ENABLE_CLUSTERING,
2825 .shost_attrs = srp_host_attrs,
2826 .track_queue_depth = 1,
2827 };
2828
srp_sdev_count(struct Scsi_Host * host)2829 static int srp_sdev_count(struct Scsi_Host *host)
2830 {
2831 struct scsi_device *sdev;
2832 int c = 0;
2833
2834 shost_for_each_device(sdev, host)
2835 c++;
2836
2837 return c;
2838 }
2839
2840 /*
2841 * Return values:
2842 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2843 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2844 * removal has been scheduled.
2845 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2846 */
srp_add_target(struct srp_host * host,struct srp_target_port * target)2847 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2848 {
2849 struct srp_rport_identifiers ids;
2850 struct srp_rport *rport;
2851
2852 target->state = SRP_TARGET_SCANNING;
2853 sprintf(target->target_name, "SRP.T10:%016llX",
2854 be64_to_cpu(target->id_ext));
2855
2856 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2857 return -ENODEV;
2858
2859 memcpy(ids.port_id, &target->id_ext, 8);
2860 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2861 ids.roles = SRP_RPORT_ROLE_TARGET;
2862 rport = srp_rport_add(target->scsi_host, &ids);
2863 if (IS_ERR(rport)) {
2864 scsi_remove_host(target->scsi_host);
2865 return PTR_ERR(rport);
2866 }
2867
2868 rport->lld_data = target;
2869 target->rport = rport;
2870
2871 spin_lock(&host->target_lock);
2872 list_add_tail(&target->list, &host->target_list);
2873 spin_unlock(&host->target_lock);
2874
2875 scsi_scan_target(&target->scsi_host->shost_gendev,
2876 0, target->scsi_id, SCAN_WILD_CARD, 0);
2877
2878 if (srp_connected_ch(target) < target->ch_count ||
2879 target->qp_in_error) {
2880 shost_printk(KERN_INFO, target->scsi_host,
2881 PFX "SCSI scan failed - removing SCSI host\n");
2882 srp_queue_remove_work(target);
2883 goto out;
2884 }
2885
2886 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2887 dev_name(&target->scsi_host->shost_gendev),
2888 srp_sdev_count(target->scsi_host));
2889
2890 spin_lock_irq(&target->lock);
2891 if (target->state == SRP_TARGET_SCANNING)
2892 target->state = SRP_TARGET_LIVE;
2893 spin_unlock_irq(&target->lock);
2894
2895 out:
2896 return 0;
2897 }
2898
srp_release_dev(struct device * dev)2899 static void srp_release_dev(struct device *dev)
2900 {
2901 struct srp_host *host =
2902 container_of(dev, struct srp_host, dev);
2903
2904 complete(&host->released);
2905 }
2906
2907 static struct class srp_class = {
2908 .name = "infiniband_srp",
2909 .dev_release = srp_release_dev
2910 };
2911
2912 /**
2913 * srp_conn_unique() - check whether the connection to a target is unique
2914 * @host: SRP host.
2915 * @target: SRP target port.
2916 */
srp_conn_unique(struct srp_host * host,struct srp_target_port * target)2917 static bool srp_conn_unique(struct srp_host *host,
2918 struct srp_target_port *target)
2919 {
2920 struct srp_target_port *t;
2921 bool ret = false;
2922
2923 if (target->state == SRP_TARGET_REMOVED)
2924 goto out;
2925
2926 ret = true;
2927
2928 spin_lock(&host->target_lock);
2929 list_for_each_entry(t, &host->target_list, list) {
2930 if (t != target &&
2931 target->id_ext == t->id_ext &&
2932 target->ioc_guid == t->ioc_guid &&
2933 target->initiator_ext == t->initiator_ext) {
2934 ret = false;
2935 break;
2936 }
2937 }
2938 spin_unlock(&host->target_lock);
2939
2940 out:
2941 return ret;
2942 }
2943
2944 /*
2945 * Target ports are added by writing
2946 *
2947 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2948 * pkey=<P_Key>,service_id=<service ID>
2949 *
2950 * to the add_target sysfs attribute.
2951 */
2952 enum {
2953 SRP_OPT_ERR = 0,
2954 SRP_OPT_ID_EXT = 1 << 0,
2955 SRP_OPT_IOC_GUID = 1 << 1,
2956 SRP_OPT_DGID = 1 << 2,
2957 SRP_OPT_PKEY = 1 << 3,
2958 SRP_OPT_SERVICE_ID = 1 << 4,
2959 SRP_OPT_MAX_SECT = 1 << 5,
2960 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2961 SRP_OPT_IO_CLASS = 1 << 7,
2962 SRP_OPT_INITIATOR_EXT = 1 << 8,
2963 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
2964 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2965 SRP_OPT_SG_TABLESIZE = 1 << 11,
2966 SRP_OPT_COMP_VECTOR = 1 << 12,
2967 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
2968 SRP_OPT_QUEUE_SIZE = 1 << 14,
2969 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2970 SRP_OPT_IOC_GUID |
2971 SRP_OPT_DGID |
2972 SRP_OPT_PKEY |
2973 SRP_OPT_SERVICE_ID),
2974 };
2975
2976 static const match_table_t srp_opt_tokens = {
2977 { SRP_OPT_ID_EXT, "id_ext=%s" },
2978 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2979 { SRP_OPT_DGID, "dgid=%s" },
2980 { SRP_OPT_PKEY, "pkey=%x" },
2981 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2982 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2983 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
2984 { SRP_OPT_IO_CLASS, "io_class=%x" },
2985 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
2986 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
2987 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2988 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
2989 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
2990 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
2991 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
2992 { SRP_OPT_ERR, NULL }
2993 };
2994
srp_parse_options(const char * buf,struct srp_target_port * target)2995 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2996 {
2997 char *options, *sep_opt;
2998 char *p;
2999 char dgid[3];
3000 substring_t args[MAX_OPT_ARGS];
3001 int opt_mask = 0;
3002 int token;
3003 int ret = -EINVAL;
3004 int i;
3005
3006 options = kstrdup(buf, GFP_KERNEL);
3007 if (!options)
3008 return -ENOMEM;
3009
3010 sep_opt = options;
3011 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3012 if (!*p)
3013 continue;
3014
3015 token = match_token(p, srp_opt_tokens, args);
3016 opt_mask |= token;
3017
3018 switch (token) {
3019 case SRP_OPT_ID_EXT:
3020 p = match_strdup(args);
3021 if (!p) {
3022 ret = -ENOMEM;
3023 goto out;
3024 }
3025 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3026 kfree(p);
3027 break;
3028
3029 case SRP_OPT_IOC_GUID:
3030 p = match_strdup(args);
3031 if (!p) {
3032 ret = -ENOMEM;
3033 goto out;
3034 }
3035 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3036 kfree(p);
3037 break;
3038
3039 case SRP_OPT_DGID:
3040 p = match_strdup(args);
3041 if (!p) {
3042 ret = -ENOMEM;
3043 goto out;
3044 }
3045 if (strlen(p) != 32) {
3046 pr_warn("bad dest GID parameter '%s'\n", p);
3047 kfree(p);
3048 goto out;
3049 }
3050
3051 for (i = 0; i < 16; ++i) {
3052 strlcpy(dgid, p + i * 2, sizeof(dgid));
3053 if (sscanf(dgid, "%hhx",
3054 &target->orig_dgid.raw[i]) < 1) {
3055 ret = -EINVAL;
3056 kfree(p);
3057 goto out;
3058 }
3059 }
3060 kfree(p);
3061 break;
3062
3063 case SRP_OPT_PKEY:
3064 if (match_hex(args, &token)) {
3065 pr_warn("bad P_Key parameter '%s'\n", p);
3066 goto out;
3067 }
3068 target->pkey = cpu_to_be16(token);
3069 break;
3070
3071 case SRP_OPT_SERVICE_ID:
3072 p = match_strdup(args);
3073 if (!p) {
3074 ret = -ENOMEM;
3075 goto out;
3076 }
3077 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3078 kfree(p);
3079 break;
3080
3081 case SRP_OPT_MAX_SECT:
3082 if (match_int(args, &token)) {
3083 pr_warn("bad max sect parameter '%s'\n", p);
3084 goto out;
3085 }
3086 target->scsi_host->max_sectors = token;
3087 break;
3088
3089 case SRP_OPT_QUEUE_SIZE:
3090 if (match_int(args, &token) || token < 1) {
3091 pr_warn("bad queue_size parameter '%s'\n", p);
3092 goto out;
3093 }
3094 target->scsi_host->can_queue = token;
3095 target->queue_size = token + SRP_RSP_SQ_SIZE +
3096 SRP_TSK_MGMT_SQ_SIZE;
3097 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3098 target->scsi_host->cmd_per_lun = token;
3099 break;
3100
3101 case SRP_OPT_MAX_CMD_PER_LUN:
3102 if (match_int(args, &token) || token < 1) {
3103 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3104 p);
3105 goto out;
3106 }
3107 target->scsi_host->cmd_per_lun = token;
3108 break;
3109
3110 case SRP_OPT_IO_CLASS:
3111 if (match_hex(args, &token)) {
3112 pr_warn("bad IO class parameter '%s'\n", p);
3113 goto out;
3114 }
3115 if (token != SRP_REV10_IB_IO_CLASS &&
3116 token != SRP_REV16A_IB_IO_CLASS) {
3117 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3118 token, SRP_REV10_IB_IO_CLASS,
3119 SRP_REV16A_IB_IO_CLASS);
3120 goto out;
3121 }
3122 target->io_class = token;
3123 break;
3124
3125 case SRP_OPT_INITIATOR_EXT:
3126 p = match_strdup(args);
3127 if (!p) {
3128 ret = -ENOMEM;
3129 goto out;
3130 }
3131 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3132 kfree(p);
3133 break;
3134
3135 case SRP_OPT_CMD_SG_ENTRIES:
3136 if (match_int(args, &token) || token < 1 || token > 255) {
3137 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3138 p);
3139 goto out;
3140 }
3141 target->cmd_sg_cnt = token;
3142 break;
3143
3144 case SRP_OPT_ALLOW_EXT_SG:
3145 if (match_int(args, &token)) {
3146 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3147 goto out;
3148 }
3149 target->allow_ext_sg = !!token;
3150 break;
3151
3152 case SRP_OPT_SG_TABLESIZE:
3153 if (match_int(args, &token) || token < 1 ||
3154 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3155 pr_warn("bad max sg_tablesize parameter '%s'\n",
3156 p);
3157 goto out;
3158 }
3159 target->sg_tablesize = token;
3160 break;
3161
3162 case SRP_OPT_COMP_VECTOR:
3163 if (match_int(args, &token) || token < 0) {
3164 pr_warn("bad comp_vector parameter '%s'\n", p);
3165 goto out;
3166 }
3167 target->comp_vector = token;
3168 break;
3169
3170 case SRP_OPT_TL_RETRY_COUNT:
3171 if (match_int(args, &token) || token < 2 || token > 7) {
3172 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3173 p);
3174 goto out;
3175 }
3176 target->tl_retry_count = token;
3177 break;
3178
3179 default:
3180 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3181 p);
3182 goto out;
3183 }
3184 }
3185
3186 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3187 ret = 0;
3188 else
3189 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3190 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3191 !(srp_opt_tokens[i].token & opt_mask))
3192 pr_warn("target creation request is missing parameter '%s'\n",
3193 srp_opt_tokens[i].pattern);
3194
3195 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3196 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3197 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3198 target->scsi_host->cmd_per_lun,
3199 target->scsi_host->can_queue);
3200
3201 out:
3202 kfree(options);
3203 return ret;
3204 }
3205
srp_create_target(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3206 static ssize_t srp_create_target(struct device *dev,
3207 struct device_attribute *attr,
3208 const char *buf, size_t count)
3209 {
3210 struct srp_host *host =
3211 container_of(dev, struct srp_host, dev);
3212 struct Scsi_Host *target_host;
3213 struct srp_target_port *target;
3214 struct srp_rdma_ch *ch;
3215 struct srp_device *srp_dev = host->srp_dev;
3216 struct ib_device *ibdev = srp_dev->dev;
3217 int ret, node_idx, node, cpu, i;
3218 bool multich = false;
3219
3220 target_host = scsi_host_alloc(&srp_template,
3221 sizeof (struct srp_target_port));
3222 if (!target_host)
3223 return -ENOMEM;
3224
3225 target_host->transportt = ib_srp_transport_template;
3226 target_host->max_channel = 0;
3227 target_host->max_id = 1;
3228 target_host->max_lun = -1LL;
3229 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3230
3231 target = host_to_target(target_host);
3232
3233 target->io_class = SRP_REV16A_IB_IO_CLASS;
3234 target->scsi_host = target_host;
3235 target->srp_host = host;
3236 target->lkey = host->srp_dev->pd->local_dma_lkey;
3237 target->global_mr = host->srp_dev->global_mr;
3238 target->cmd_sg_cnt = cmd_sg_entries;
3239 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3240 target->allow_ext_sg = allow_ext_sg;
3241 target->tl_retry_count = 7;
3242 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3243
3244 /*
3245 * Avoid that the SCSI host can be removed by srp_remove_target()
3246 * before this function returns.
3247 */
3248 scsi_host_get(target->scsi_host);
3249
3250 mutex_lock(&host->add_target_mutex);
3251
3252 ret = srp_parse_options(buf, target);
3253 if (ret)
3254 goto out;
3255
3256 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3257
3258 if (!srp_conn_unique(target->srp_host, target)) {
3259 shost_printk(KERN_INFO, target->scsi_host,
3260 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3261 be64_to_cpu(target->id_ext),
3262 be64_to_cpu(target->ioc_guid),
3263 be64_to_cpu(target->initiator_ext));
3264 ret = -EEXIST;
3265 goto out;
3266 }
3267
3268 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3269 target->cmd_sg_cnt < target->sg_tablesize) {
3270 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3271 target->sg_tablesize = target->cmd_sg_cnt;
3272 }
3273
3274 target_host->sg_tablesize = target->sg_tablesize;
3275 target->indirect_size = target->sg_tablesize *
3276 sizeof (struct srp_direct_buf);
3277 target->max_iu_len = sizeof (struct srp_cmd) +
3278 sizeof (struct srp_indirect_buf) +
3279 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3280
3281 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3282 INIT_WORK(&target->remove_work, srp_remove_work);
3283 spin_lock_init(&target->lock);
3284 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
3285 if (ret)
3286 goto out;
3287
3288 ret = -ENOMEM;
3289 target->ch_count = max_t(unsigned, num_online_nodes(),
3290 min(ch_count ? :
3291 min(4 * num_online_nodes(),
3292 ibdev->num_comp_vectors),
3293 num_online_cpus()));
3294 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3295 GFP_KERNEL);
3296 if (!target->ch)
3297 goto out;
3298
3299 node_idx = 0;
3300 for_each_online_node(node) {
3301 const int ch_start = (node_idx * target->ch_count /
3302 num_online_nodes());
3303 const int ch_end = ((node_idx + 1) * target->ch_count /
3304 num_online_nodes());
3305 const int cv_start = node_idx * ibdev->num_comp_vectors /
3306 num_online_nodes();
3307 const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
3308 num_online_nodes();
3309 int cpu_idx = 0;
3310
3311 for_each_online_cpu(cpu) {
3312 if (cpu_to_node(cpu) != node)
3313 continue;
3314 if (ch_start + cpu_idx >= ch_end)
3315 continue;
3316 ch = &target->ch[ch_start + cpu_idx];
3317 ch->target = target;
3318 ch->comp_vector = cv_start == cv_end ? cv_start :
3319 cv_start + cpu_idx % (cv_end - cv_start);
3320 spin_lock_init(&ch->lock);
3321 INIT_LIST_HEAD(&ch->free_tx);
3322 ret = srp_new_cm_id(ch);
3323 if (ret)
3324 goto err_disconnect;
3325
3326 ret = srp_create_ch_ib(ch);
3327 if (ret)
3328 goto err_disconnect;
3329
3330 ret = srp_alloc_req_data(ch);
3331 if (ret)
3332 goto err_disconnect;
3333
3334 ret = srp_connect_ch(ch, multich);
3335 if (ret) {
3336 shost_printk(KERN_ERR, target->scsi_host,
3337 PFX "Connection %d/%d failed\n",
3338 ch_start + cpu_idx,
3339 target->ch_count);
3340 if (node_idx == 0 && cpu_idx == 0) {
3341 goto err_disconnect;
3342 } else {
3343 srp_free_ch_ib(target, ch);
3344 srp_free_req_data(target, ch);
3345 target->ch_count = ch - target->ch;
3346 goto connected;
3347 }
3348 }
3349
3350 multich = true;
3351 cpu_idx++;
3352 }
3353 node_idx++;
3354 }
3355
3356 connected:
3357 target->scsi_host->nr_hw_queues = target->ch_count;
3358
3359 ret = srp_add_target(host, target);
3360 if (ret)
3361 goto err_disconnect;
3362
3363 if (target->state != SRP_TARGET_REMOVED) {
3364 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3365 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3366 be64_to_cpu(target->id_ext),
3367 be64_to_cpu(target->ioc_guid),
3368 be16_to_cpu(target->pkey),
3369 be64_to_cpu(target->service_id),
3370 target->sgid.raw, target->orig_dgid.raw);
3371 }
3372
3373 ret = count;
3374
3375 out:
3376 mutex_unlock(&host->add_target_mutex);
3377
3378 scsi_host_put(target->scsi_host);
3379 if (ret < 0)
3380 scsi_host_put(target->scsi_host);
3381
3382 return ret;
3383
3384 err_disconnect:
3385 srp_disconnect_target(target);
3386
3387 for (i = 0; i < target->ch_count; i++) {
3388 ch = &target->ch[i];
3389 srp_free_ch_ib(target, ch);
3390 srp_free_req_data(target, ch);
3391 }
3392
3393 kfree(target->ch);
3394 goto out;
3395 }
3396
3397 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3398
show_ibdev(struct device * dev,struct device_attribute * attr,char * buf)3399 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3400 char *buf)
3401 {
3402 struct srp_host *host = container_of(dev, struct srp_host, dev);
3403
3404 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3405 }
3406
3407 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3408
show_port(struct device * dev,struct device_attribute * attr,char * buf)3409 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3410 char *buf)
3411 {
3412 struct srp_host *host = container_of(dev, struct srp_host, dev);
3413
3414 return sprintf(buf, "%d\n", host->port);
3415 }
3416
3417 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3418
srp_add_port(struct srp_device * device,u8 port)3419 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3420 {
3421 struct srp_host *host;
3422
3423 host = kzalloc(sizeof *host, GFP_KERNEL);
3424 if (!host)
3425 return NULL;
3426
3427 INIT_LIST_HEAD(&host->target_list);
3428 spin_lock_init(&host->target_lock);
3429 init_completion(&host->released);
3430 mutex_init(&host->add_target_mutex);
3431 host->srp_dev = device;
3432 host->port = port;
3433
3434 host->dev.class = &srp_class;
3435 host->dev.parent = device->dev->dma_device;
3436 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3437
3438 if (device_register(&host->dev))
3439 goto free_host;
3440 if (device_create_file(&host->dev, &dev_attr_add_target))
3441 goto err_class;
3442 if (device_create_file(&host->dev, &dev_attr_ibdev))
3443 goto err_class;
3444 if (device_create_file(&host->dev, &dev_attr_port))
3445 goto err_class;
3446
3447 return host;
3448
3449 err_class:
3450 device_unregister(&host->dev);
3451
3452 free_host:
3453 kfree(host);
3454
3455 return NULL;
3456 }
3457
srp_add_one(struct ib_device * device)3458 static void srp_add_one(struct ib_device *device)
3459 {
3460 struct srp_device *srp_dev;
3461 struct ib_device_attr *dev_attr;
3462 struct srp_host *host;
3463 int mr_page_shift, p;
3464 u64 max_pages_per_mr;
3465
3466 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3467 if (!dev_attr)
3468 return;
3469
3470 if (ib_query_device(device, dev_attr)) {
3471 pr_warn("Query device failed for %s\n", device->name);
3472 goto free_attr;
3473 }
3474
3475 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3476 if (!srp_dev)
3477 goto free_attr;
3478
3479 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3480 device->map_phys_fmr && device->unmap_fmr);
3481 srp_dev->has_fr = (dev_attr->device_cap_flags &
3482 IB_DEVICE_MEM_MGT_EXTENSIONS);
3483 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3484 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3485
3486 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3487 (!srp_dev->has_fmr || prefer_fr));
3488 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3489
3490 /*
3491 * Use the smallest page size supported by the HCA, down to a
3492 * minimum of 4096 bytes. We're unlikely to build large sglists
3493 * out of smaller entries.
3494 */
3495 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3496 srp_dev->mr_page_size = 1 << mr_page_shift;
3497 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3498 max_pages_per_mr = dev_attr->max_mr_size;
3499 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3500 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3501 max_pages_per_mr);
3502 if (srp_dev->use_fast_reg) {
3503 srp_dev->max_pages_per_mr =
3504 min_t(u32, srp_dev->max_pages_per_mr,
3505 dev_attr->max_fast_reg_page_list_len);
3506 }
3507 srp_dev->mr_max_size = srp_dev->mr_page_size *
3508 srp_dev->max_pages_per_mr;
3509 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3510 device->name, mr_page_shift, dev_attr->max_mr_size,
3511 dev_attr->max_fast_reg_page_list_len,
3512 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3513
3514 INIT_LIST_HEAD(&srp_dev->dev_list);
3515
3516 srp_dev->dev = device;
3517 srp_dev->pd = ib_alloc_pd(device);
3518 if (IS_ERR(srp_dev->pd))
3519 goto free_dev;
3520
3521 if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3522 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3523 IB_ACCESS_LOCAL_WRITE |
3524 IB_ACCESS_REMOTE_READ |
3525 IB_ACCESS_REMOTE_WRITE);
3526 if (IS_ERR(srp_dev->global_mr))
3527 goto err_pd;
3528 } else {
3529 srp_dev->global_mr = NULL;
3530 }
3531
3532 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3533 host = srp_add_port(srp_dev, p);
3534 if (host)
3535 list_add_tail(&host->list, &srp_dev->dev_list);
3536 }
3537
3538 ib_set_client_data(device, &srp_client, srp_dev);
3539
3540 goto free_attr;
3541
3542 err_pd:
3543 ib_dealloc_pd(srp_dev->pd);
3544
3545 free_dev:
3546 kfree(srp_dev);
3547
3548 free_attr:
3549 kfree(dev_attr);
3550 }
3551
srp_remove_one(struct ib_device * device,void * client_data)3552 static void srp_remove_one(struct ib_device *device, void *client_data)
3553 {
3554 struct srp_device *srp_dev;
3555 struct srp_host *host, *tmp_host;
3556 struct srp_target_port *target;
3557
3558 srp_dev = client_data;
3559 if (!srp_dev)
3560 return;
3561
3562 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3563 device_unregister(&host->dev);
3564 /*
3565 * Wait for the sysfs entry to go away, so that no new
3566 * target ports can be created.
3567 */
3568 wait_for_completion(&host->released);
3569
3570 /*
3571 * Remove all target ports.
3572 */
3573 spin_lock(&host->target_lock);
3574 list_for_each_entry(target, &host->target_list, list)
3575 srp_queue_remove_work(target);
3576 spin_unlock(&host->target_lock);
3577
3578 /*
3579 * Wait for tl_err and target port removal tasks.
3580 */
3581 flush_workqueue(system_long_wq);
3582 flush_workqueue(srp_remove_wq);
3583
3584 kfree(host);
3585 }
3586
3587 if (srp_dev->global_mr)
3588 ib_dereg_mr(srp_dev->global_mr);
3589 ib_dealloc_pd(srp_dev->pd);
3590
3591 kfree(srp_dev);
3592 }
3593
3594 static struct srp_function_template ib_srp_transport_functions = {
3595 .has_rport_state = true,
3596 .reset_timer_if_blocked = true,
3597 .reconnect_delay = &srp_reconnect_delay,
3598 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3599 .dev_loss_tmo = &srp_dev_loss_tmo,
3600 .reconnect = srp_rport_reconnect,
3601 .rport_delete = srp_rport_delete,
3602 .terminate_rport_io = srp_terminate_io,
3603 };
3604
srp_init_module(void)3605 static int __init srp_init_module(void)
3606 {
3607 int ret;
3608
3609 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
3610
3611 if (srp_sg_tablesize) {
3612 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3613 if (!cmd_sg_entries)
3614 cmd_sg_entries = srp_sg_tablesize;
3615 }
3616
3617 if (!cmd_sg_entries)
3618 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3619
3620 if (cmd_sg_entries > 255) {
3621 pr_warn("Clamping cmd_sg_entries to 255\n");
3622 cmd_sg_entries = 255;
3623 }
3624
3625 if (!indirect_sg_entries)
3626 indirect_sg_entries = cmd_sg_entries;
3627 else if (indirect_sg_entries < cmd_sg_entries) {
3628 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3629 cmd_sg_entries);
3630 indirect_sg_entries = cmd_sg_entries;
3631 }
3632
3633 srp_remove_wq = create_workqueue("srp_remove");
3634 if (!srp_remove_wq) {
3635 ret = -ENOMEM;
3636 goto out;
3637 }
3638
3639 ret = -ENOMEM;
3640 ib_srp_transport_template =
3641 srp_attach_transport(&ib_srp_transport_functions);
3642 if (!ib_srp_transport_template)
3643 goto destroy_wq;
3644
3645 ret = class_register(&srp_class);
3646 if (ret) {
3647 pr_err("couldn't register class infiniband_srp\n");
3648 goto release_tr;
3649 }
3650
3651 ib_sa_register_client(&srp_sa_client);
3652
3653 ret = ib_register_client(&srp_client);
3654 if (ret) {
3655 pr_err("couldn't register IB client\n");
3656 goto unreg_sa;
3657 }
3658
3659 out:
3660 return ret;
3661
3662 unreg_sa:
3663 ib_sa_unregister_client(&srp_sa_client);
3664 class_unregister(&srp_class);
3665
3666 release_tr:
3667 srp_release_transport(ib_srp_transport_template);
3668
3669 destroy_wq:
3670 destroy_workqueue(srp_remove_wq);
3671 goto out;
3672 }
3673
srp_cleanup_module(void)3674 static void __exit srp_cleanup_module(void)
3675 {
3676 ib_unregister_client(&srp_client);
3677 ib_sa_unregister_client(&srp_sa_client);
3678 class_unregister(&srp_class);
3679 srp_release_transport(ib_srp_transport_template);
3680 destroy_workqueue(srp_remove_wq);
3681 }
3682
3683 module_init(srp_init_module);
3684 module_exit(srp_cleanup_module);
3685