1 /*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <linux/lockdep.h>
44 #include <linux/inet.h>
45 #include <rdma/ib_cache.h>
46
47 #include <linux/atomic.h>
48
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_device.h>
51 #include <scsi/scsi_dbg.h>
52 #include <scsi/scsi_tcq.h>
53 #include <scsi/srp.h>
54 #include <scsi/scsi_transport_srp.h>
55
56 #include "ib_srp.h"
57
58 #define DRV_NAME "ib_srp"
59 #define PFX DRV_NAME ": "
60
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
64
65 #if !defined(CONFIG_DYNAMIC_DEBUG)
66 #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
67 #define DYNAMIC_DEBUG_BRANCH(descriptor) false
68 #endif
69
70 static unsigned int srp_sg_tablesize;
71 static unsigned int cmd_sg_entries;
72 static unsigned int indirect_sg_entries;
73 static bool allow_ext_sg;
74 static bool register_always = true;
75 static bool never_register;
76 static int topspin_workarounds = 1;
77
78 module_param(srp_sg_tablesize, uint, 0444);
79 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
80
81 module_param(cmd_sg_entries, uint, 0444);
82 MODULE_PARM_DESC(cmd_sg_entries,
83 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
84
85 module_param(indirect_sg_entries, uint, 0444);
86 MODULE_PARM_DESC(indirect_sg_entries,
87 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
88
89 module_param(allow_ext_sg, bool, 0444);
90 MODULE_PARM_DESC(allow_ext_sg,
91 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
92
93 module_param(topspin_workarounds, int, 0444);
94 MODULE_PARM_DESC(topspin_workarounds,
95 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
96
97 module_param(register_always, bool, 0444);
98 MODULE_PARM_DESC(register_always,
99 "Use memory registration even for contiguous memory regions");
100
101 module_param(never_register, bool, 0444);
102 MODULE_PARM_DESC(never_register, "Never register memory");
103
104 static const struct kernel_param_ops srp_tmo_ops;
105
106 static int srp_reconnect_delay = 10;
107 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
108 S_IRUGO | S_IWUSR);
109 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
110
111 static int srp_fast_io_fail_tmo = 15;
112 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
113 S_IRUGO | S_IWUSR);
114 MODULE_PARM_DESC(fast_io_fail_tmo,
115 "Number of seconds between the observation of a transport"
116 " layer error and failing all I/O. \"off\" means that this"
117 " functionality is disabled.");
118
119 static int srp_dev_loss_tmo = 600;
120 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
121 S_IRUGO | S_IWUSR);
122 MODULE_PARM_DESC(dev_loss_tmo,
123 "Maximum number of seconds that the SRP transport should"
124 " insulate transport layer errors. After this time has been"
125 " exceeded the SCSI host is removed. Should be"
126 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
127 " if fast_io_fail_tmo has not been set. \"off\" means that"
128 " this functionality is disabled.");
129
130 static bool srp_use_imm_data = true;
131 module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
132 MODULE_PARM_DESC(use_imm_data,
133 "Whether or not to request permission to use immediate data during SRP login.");
134
135 static unsigned int srp_max_imm_data = 8 * 1024;
136 module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
137 MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
138
139 static unsigned ch_count;
140 module_param(ch_count, uint, 0444);
141 MODULE_PARM_DESC(ch_count,
142 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
143
144 static int srp_add_one(struct ib_device *device);
145 static void srp_remove_one(struct ib_device *device, void *client_data);
146 static void srp_rename_dev(struct ib_device *device, void *client_data);
147 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
148 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
149 const char *opname);
150 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
151 const struct ib_cm_event *event);
152 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
153 struct rdma_cm_event *event);
154
155 static struct scsi_transport_template *ib_srp_transport_template;
156 static struct workqueue_struct *srp_remove_wq;
157
158 static struct ib_client srp_client = {
159 .name = "srp",
160 .add = srp_add_one,
161 .remove = srp_remove_one,
162 .rename = srp_rename_dev
163 };
164
165 static struct ib_sa_client srp_sa_client;
166
srp_tmo_get(char * buffer,const struct kernel_param * kp)167 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
168 {
169 int tmo = *(int *)kp->arg;
170
171 if (tmo >= 0)
172 return sprintf(buffer, "%d\n", tmo);
173 else
174 return sprintf(buffer, "off\n");
175 }
176
srp_tmo_set(const char * val,const struct kernel_param * kp)177 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
178 {
179 int tmo, res;
180
181 res = srp_parse_tmo(&tmo, val);
182 if (res)
183 goto out;
184
185 if (kp->arg == &srp_reconnect_delay)
186 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
187 srp_dev_loss_tmo);
188 else if (kp->arg == &srp_fast_io_fail_tmo)
189 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
190 else
191 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
192 tmo);
193 if (res)
194 goto out;
195 *(int *)kp->arg = tmo;
196
197 out:
198 return res;
199 }
200
201 static const struct kernel_param_ops srp_tmo_ops = {
202 .get = srp_tmo_get,
203 .set = srp_tmo_set,
204 };
205
host_to_target(struct Scsi_Host * host)206 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
207 {
208 return (struct srp_target_port *) host->hostdata;
209 }
210
srp_target_info(struct Scsi_Host * host)211 static const char *srp_target_info(struct Scsi_Host *host)
212 {
213 return host_to_target(host)->target_name;
214 }
215
srp_target_is_topspin(struct srp_target_port * target)216 static int srp_target_is_topspin(struct srp_target_port *target)
217 {
218 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
219 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
220
221 return topspin_workarounds &&
222 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
223 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
224 }
225
srp_alloc_iu(struct srp_host * host,size_t size,gfp_t gfp_mask,enum dma_data_direction direction)226 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
227 gfp_t gfp_mask,
228 enum dma_data_direction direction)
229 {
230 struct srp_iu *iu;
231
232 iu = kmalloc(sizeof *iu, gfp_mask);
233 if (!iu)
234 goto out;
235
236 iu->buf = kzalloc(size, gfp_mask);
237 if (!iu->buf)
238 goto out_free_iu;
239
240 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
241 direction);
242 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
243 goto out_free_buf;
244
245 iu->size = size;
246 iu->direction = direction;
247
248 return iu;
249
250 out_free_buf:
251 kfree(iu->buf);
252 out_free_iu:
253 kfree(iu);
254 out:
255 return NULL;
256 }
257
srp_free_iu(struct srp_host * host,struct srp_iu * iu)258 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
259 {
260 if (!iu)
261 return;
262
263 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
264 iu->direction);
265 kfree(iu->buf);
266 kfree(iu);
267 }
268
srp_qp_event(struct ib_event * event,void * context)269 static void srp_qp_event(struct ib_event *event, void *context)
270 {
271 pr_debug("QP event %s (%d)\n",
272 ib_event_msg(event->event), event->event);
273 }
274
srp_init_ib_qp(struct srp_target_port * target,struct ib_qp * qp)275 static int srp_init_ib_qp(struct srp_target_port *target,
276 struct ib_qp *qp)
277 {
278 struct ib_qp_attr *attr;
279 int ret;
280
281 attr = kmalloc(sizeof *attr, GFP_KERNEL);
282 if (!attr)
283 return -ENOMEM;
284
285 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
286 target->srp_host->port,
287 be16_to_cpu(target->ib_cm.pkey),
288 &attr->pkey_index);
289 if (ret)
290 goto out;
291
292 attr->qp_state = IB_QPS_INIT;
293 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
294 IB_ACCESS_REMOTE_WRITE);
295 attr->port_num = target->srp_host->port;
296
297 ret = ib_modify_qp(qp, attr,
298 IB_QP_STATE |
299 IB_QP_PKEY_INDEX |
300 IB_QP_ACCESS_FLAGS |
301 IB_QP_PORT);
302
303 out:
304 kfree(attr);
305 return ret;
306 }
307
srp_new_ib_cm_id(struct srp_rdma_ch * ch)308 static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
309 {
310 struct srp_target_port *target = ch->target;
311 struct ib_cm_id *new_cm_id;
312
313 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
314 srp_ib_cm_handler, ch);
315 if (IS_ERR(new_cm_id))
316 return PTR_ERR(new_cm_id);
317
318 if (ch->ib_cm.cm_id)
319 ib_destroy_cm_id(ch->ib_cm.cm_id);
320 ch->ib_cm.cm_id = new_cm_id;
321 if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
322 target->srp_host->port))
323 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
324 else
325 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
326 ch->ib_cm.path.sgid = target->sgid;
327 ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
328 ch->ib_cm.path.pkey = target->ib_cm.pkey;
329 ch->ib_cm.path.service_id = target->ib_cm.service_id;
330
331 return 0;
332 }
333
srp_new_rdma_cm_id(struct srp_rdma_ch * ch)334 static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
335 {
336 struct srp_target_port *target = ch->target;
337 struct rdma_cm_id *new_cm_id;
338 int ret;
339
340 new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
341 RDMA_PS_TCP, IB_QPT_RC);
342 if (IS_ERR(new_cm_id)) {
343 ret = PTR_ERR(new_cm_id);
344 new_cm_id = NULL;
345 goto out;
346 }
347
348 init_completion(&ch->done);
349 ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
350 &target->rdma_cm.src.sa : NULL,
351 &target->rdma_cm.dst.sa,
352 SRP_PATH_REC_TIMEOUT_MS);
353 if (ret) {
354 pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
355 &target->rdma_cm.src, &target->rdma_cm.dst, ret);
356 goto out;
357 }
358 ret = wait_for_completion_interruptible(&ch->done);
359 if (ret < 0)
360 goto out;
361
362 ret = ch->status;
363 if (ret) {
364 pr_err("Resolving address %pISpsc failed (%d)\n",
365 &target->rdma_cm.dst, ret);
366 goto out;
367 }
368
369 swap(ch->rdma_cm.cm_id, new_cm_id);
370
371 out:
372 if (new_cm_id)
373 rdma_destroy_id(new_cm_id);
374
375 return ret;
376 }
377
srp_new_cm_id(struct srp_rdma_ch * ch)378 static int srp_new_cm_id(struct srp_rdma_ch *ch)
379 {
380 struct srp_target_port *target = ch->target;
381
382 return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
383 srp_new_ib_cm_id(ch);
384 }
385
386 /**
387 * srp_destroy_fr_pool() - free the resources owned by a pool
388 * @pool: Fast registration pool to be destroyed.
389 */
srp_destroy_fr_pool(struct srp_fr_pool * pool)390 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
391 {
392 int i;
393 struct srp_fr_desc *d;
394
395 if (!pool)
396 return;
397
398 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
399 if (d->mr)
400 ib_dereg_mr(d->mr);
401 }
402 kfree(pool);
403 }
404
405 /**
406 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
407 * @device: IB device to allocate fast registration descriptors for.
408 * @pd: Protection domain associated with the FR descriptors.
409 * @pool_size: Number of descriptors to allocate.
410 * @max_page_list_len: Maximum fast registration work request page list length.
411 */
srp_create_fr_pool(struct ib_device * device,struct ib_pd * pd,int pool_size,int max_page_list_len)412 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
413 struct ib_pd *pd, int pool_size,
414 int max_page_list_len)
415 {
416 struct srp_fr_pool *pool;
417 struct srp_fr_desc *d;
418 struct ib_mr *mr;
419 int i, ret = -EINVAL;
420 enum ib_mr_type mr_type;
421
422 if (pool_size <= 0)
423 goto err;
424 ret = -ENOMEM;
425 pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
426 if (!pool)
427 goto err;
428 pool->size = pool_size;
429 pool->max_page_list_len = max_page_list_len;
430 spin_lock_init(&pool->lock);
431 INIT_LIST_HEAD(&pool->free_list);
432
433 if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
434 mr_type = IB_MR_TYPE_SG_GAPS;
435 else
436 mr_type = IB_MR_TYPE_MEM_REG;
437
438 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
439 mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
440 if (IS_ERR(mr)) {
441 ret = PTR_ERR(mr);
442 if (ret == -ENOMEM)
443 pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
444 dev_name(&device->dev));
445 goto destroy_pool;
446 }
447 d->mr = mr;
448 list_add_tail(&d->entry, &pool->free_list);
449 }
450
451 out:
452 return pool;
453
454 destroy_pool:
455 srp_destroy_fr_pool(pool);
456
457 err:
458 pool = ERR_PTR(ret);
459 goto out;
460 }
461
462 /**
463 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
464 * @pool: Pool to obtain descriptor from.
465 */
srp_fr_pool_get(struct srp_fr_pool * pool)466 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
467 {
468 struct srp_fr_desc *d = NULL;
469 unsigned long flags;
470
471 spin_lock_irqsave(&pool->lock, flags);
472 if (!list_empty(&pool->free_list)) {
473 d = list_first_entry(&pool->free_list, typeof(*d), entry);
474 list_del(&d->entry);
475 }
476 spin_unlock_irqrestore(&pool->lock, flags);
477
478 return d;
479 }
480
481 /**
482 * srp_fr_pool_put() - put an FR descriptor back in the free list
483 * @pool: Pool the descriptor was allocated from.
484 * @desc: Pointer to an array of fast registration descriptor pointers.
485 * @n: Number of descriptors to put back.
486 *
487 * Note: The caller must already have queued an invalidation request for
488 * desc->mr->rkey before calling this function.
489 */
srp_fr_pool_put(struct srp_fr_pool * pool,struct srp_fr_desc ** desc,int n)490 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
491 int n)
492 {
493 unsigned long flags;
494 int i;
495
496 spin_lock_irqsave(&pool->lock, flags);
497 for (i = 0; i < n; i++)
498 list_add(&desc[i]->entry, &pool->free_list);
499 spin_unlock_irqrestore(&pool->lock, flags);
500 }
501
srp_alloc_fr_pool(struct srp_target_port * target)502 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
503 {
504 struct srp_device *dev = target->srp_host->srp_dev;
505
506 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
507 dev->max_pages_per_mr);
508 }
509
510 /**
511 * srp_destroy_qp() - destroy an RDMA queue pair
512 * @ch: SRP RDMA channel.
513 *
514 * Drain the qp before destroying it. This avoids that the receive
515 * completion handler can access the queue pair while it is
516 * being destroyed.
517 */
srp_destroy_qp(struct srp_rdma_ch * ch)518 static void srp_destroy_qp(struct srp_rdma_ch *ch)
519 {
520 spin_lock_irq(&ch->lock);
521 ib_process_cq_direct(ch->send_cq, -1);
522 spin_unlock_irq(&ch->lock);
523
524 ib_drain_qp(ch->qp);
525 ib_destroy_qp(ch->qp);
526 }
527
srp_create_ch_ib(struct srp_rdma_ch * ch)528 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
529 {
530 struct srp_target_port *target = ch->target;
531 struct srp_device *dev = target->srp_host->srp_dev;
532 const struct ib_device_attr *attr = &dev->dev->attrs;
533 struct ib_qp_init_attr *init_attr;
534 struct ib_cq *recv_cq, *send_cq;
535 struct ib_qp *qp;
536 struct srp_fr_pool *fr_pool = NULL;
537 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
538 int ret;
539
540 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
541 if (!init_attr)
542 return -ENOMEM;
543
544 /* queue_size + 1 for ib_drain_rq() */
545 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
546 ch->comp_vector, IB_POLL_SOFTIRQ);
547 if (IS_ERR(recv_cq)) {
548 ret = PTR_ERR(recv_cq);
549 goto err;
550 }
551
552 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
553 ch->comp_vector, IB_POLL_DIRECT);
554 if (IS_ERR(send_cq)) {
555 ret = PTR_ERR(send_cq);
556 goto err_recv_cq;
557 }
558
559 init_attr->event_handler = srp_qp_event;
560 init_attr->cap.max_send_wr = m * target->queue_size;
561 init_attr->cap.max_recv_wr = target->queue_size + 1;
562 init_attr->cap.max_recv_sge = 1;
563 init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge);
564 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
565 init_attr->qp_type = IB_QPT_RC;
566 init_attr->send_cq = send_cq;
567 init_attr->recv_cq = recv_cq;
568
569 ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
570
571 if (target->using_rdma_cm) {
572 ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
573 qp = ch->rdma_cm.cm_id->qp;
574 } else {
575 qp = ib_create_qp(dev->pd, init_attr);
576 if (!IS_ERR(qp)) {
577 ret = srp_init_ib_qp(target, qp);
578 if (ret)
579 ib_destroy_qp(qp);
580 } else {
581 ret = PTR_ERR(qp);
582 }
583 }
584 if (ret) {
585 pr_err("QP creation failed for dev %s: %d\n",
586 dev_name(&dev->dev->dev), ret);
587 goto err_send_cq;
588 }
589
590 if (dev->use_fast_reg) {
591 fr_pool = srp_alloc_fr_pool(target);
592 if (IS_ERR(fr_pool)) {
593 ret = PTR_ERR(fr_pool);
594 shost_printk(KERN_WARNING, target->scsi_host, PFX
595 "FR pool allocation failed (%d)\n", ret);
596 goto err_qp;
597 }
598 }
599
600 if (ch->qp)
601 srp_destroy_qp(ch);
602 if (ch->recv_cq)
603 ib_free_cq(ch->recv_cq);
604 if (ch->send_cq)
605 ib_free_cq(ch->send_cq);
606
607 ch->qp = qp;
608 ch->recv_cq = recv_cq;
609 ch->send_cq = send_cq;
610
611 if (dev->use_fast_reg) {
612 if (ch->fr_pool)
613 srp_destroy_fr_pool(ch->fr_pool);
614 ch->fr_pool = fr_pool;
615 }
616
617 kfree(init_attr);
618 return 0;
619
620 err_qp:
621 if (target->using_rdma_cm)
622 rdma_destroy_qp(ch->rdma_cm.cm_id);
623 else
624 ib_destroy_qp(qp);
625
626 err_send_cq:
627 ib_free_cq(send_cq);
628
629 err_recv_cq:
630 ib_free_cq(recv_cq);
631
632 err:
633 kfree(init_attr);
634 return ret;
635 }
636
637 /*
638 * Note: this function may be called without srp_alloc_iu_bufs() having been
639 * invoked. Hence the ch->[rt]x_ring checks.
640 */
srp_free_ch_ib(struct srp_target_port * target,struct srp_rdma_ch * ch)641 static void srp_free_ch_ib(struct srp_target_port *target,
642 struct srp_rdma_ch *ch)
643 {
644 struct srp_device *dev = target->srp_host->srp_dev;
645 int i;
646
647 if (!ch->target)
648 return;
649
650 if (target->using_rdma_cm) {
651 if (ch->rdma_cm.cm_id) {
652 rdma_destroy_id(ch->rdma_cm.cm_id);
653 ch->rdma_cm.cm_id = NULL;
654 }
655 } else {
656 if (ch->ib_cm.cm_id) {
657 ib_destroy_cm_id(ch->ib_cm.cm_id);
658 ch->ib_cm.cm_id = NULL;
659 }
660 }
661
662 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
663 if (!ch->qp)
664 return;
665
666 if (dev->use_fast_reg) {
667 if (ch->fr_pool)
668 srp_destroy_fr_pool(ch->fr_pool);
669 }
670
671 srp_destroy_qp(ch);
672 ib_free_cq(ch->send_cq);
673 ib_free_cq(ch->recv_cq);
674
675 /*
676 * Avoid that the SCSI error handler tries to use this channel after
677 * it has been freed. The SCSI error handler can namely continue
678 * trying to perform recovery actions after scsi_remove_host()
679 * returned.
680 */
681 ch->target = NULL;
682
683 ch->qp = NULL;
684 ch->send_cq = ch->recv_cq = NULL;
685
686 if (ch->rx_ring) {
687 for (i = 0; i < target->queue_size; ++i)
688 srp_free_iu(target->srp_host, ch->rx_ring[i]);
689 kfree(ch->rx_ring);
690 ch->rx_ring = NULL;
691 }
692 if (ch->tx_ring) {
693 for (i = 0; i < target->queue_size; ++i)
694 srp_free_iu(target->srp_host, ch->tx_ring[i]);
695 kfree(ch->tx_ring);
696 ch->tx_ring = NULL;
697 }
698 }
699
srp_path_rec_completion(int status,struct sa_path_rec * pathrec,void * ch_ptr)700 static void srp_path_rec_completion(int status,
701 struct sa_path_rec *pathrec,
702 void *ch_ptr)
703 {
704 struct srp_rdma_ch *ch = ch_ptr;
705 struct srp_target_port *target = ch->target;
706
707 ch->status = status;
708 if (status)
709 shost_printk(KERN_ERR, target->scsi_host,
710 PFX "Got failed path rec status %d\n", status);
711 else
712 ch->ib_cm.path = *pathrec;
713 complete(&ch->done);
714 }
715
srp_ib_lookup_path(struct srp_rdma_ch * ch)716 static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
717 {
718 struct srp_target_port *target = ch->target;
719 int ret;
720
721 ch->ib_cm.path.numb_path = 1;
722
723 init_completion(&ch->done);
724
725 ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
726 target->srp_host->srp_dev->dev,
727 target->srp_host->port,
728 &ch->ib_cm.path,
729 IB_SA_PATH_REC_SERVICE_ID |
730 IB_SA_PATH_REC_DGID |
731 IB_SA_PATH_REC_SGID |
732 IB_SA_PATH_REC_NUMB_PATH |
733 IB_SA_PATH_REC_PKEY,
734 SRP_PATH_REC_TIMEOUT_MS,
735 GFP_KERNEL,
736 srp_path_rec_completion,
737 ch, &ch->ib_cm.path_query);
738 if (ch->ib_cm.path_query_id < 0)
739 return ch->ib_cm.path_query_id;
740
741 ret = wait_for_completion_interruptible(&ch->done);
742 if (ret < 0)
743 return ret;
744
745 if (ch->status < 0)
746 shost_printk(KERN_WARNING, target->scsi_host,
747 PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
748 ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
749 be16_to_cpu(target->ib_cm.pkey),
750 be64_to_cpu(target->ib_cm.service_id));
751
752 return ch->status;
753 }
754
srp_rdma_lookup_path(struct srp_rdma_ch * ch)755 static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
756 {
757 struct srp_target_port *target = ch->target;
758 int ret;
759
760 init_completion(&ch->done);
761
762 ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
763 if (ret)
764 return ret;
765
766 wait_for_completion_interruptible(&ch->done);
767
768 if (ch->status != 0)
769 shost_printk(KERN_WARNING, target->scsi_host,
770 PFX "Path resolution failed\n");
771
772 return ch->status;
773 }
774
srp_lookup_path(struct srp_rdma_ch * ch)775 static int srp_lookup_path(struct srp_rdma_ch *ch)
776 {
777 struct srp_target_port *target = ch->target;
778
779 return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
780 srp_ib_lookup_path(ch);
781 }
782
srp_get_subnet_timeout(struct srp_host * host)783 static u8 srp_get_subnet_timeout(struct srp_host *host)
784 {
785 struct ib_port_attr attr;
786 int ret;
787 u8 subnet_timeout = 18;
788
789 ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
790 if (ret == 0)
791 subnet_timeout = attr.subnet_timeout;
792
793 if (unlikely(subnet_timeout < 15))
794 pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
795 dev_name(&host->srp_dev->dev->dev), subnet_timeout);
796
797 return subnet_timeout;
798 }
799
srp_send_req(struct srp_rdma_ch * ch,uint32_t max_iu_len,bool multich)800 static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
801 bool multich)
802 {
803 struct srp_target_port *target = ch->target;
804 struct {
805 struct rdma_conn_param rdma_param;
806 struct srp_login_req_rdma rdma_req;
807 struct ib_cm_req_param ib_param;
808 struct srp_login_req ib_req;
809 } *req = NULL;
810 char *ipi, *tpi;
811 int status;
812
813 req = kzalloc(sizeof *req, GFP_KERNEL);
814 if (!req)
815 return -ENOMEM;
816
817 req->ib_param.flow_control = 1;
818 req->ib_param.retry_count = target->tl_retry_count;
819
820 /*
821 * Pick some arbitrary defaults here; we could make these
822 * module parameters if anyone cared about setting them.
823 */
824 req->ib_param.responder_resources = 4;
825 req->ib_param.rnr_retry_count = 7;
826 req->ib_param.max_cm_retries = 15;
827
828 req->ib_req.opcode = SRP_LOGIN_REQ;
829 req->ib_req.tag = 0;
830 req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
831 req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
832 SRP_BUF_FORMAT_INDIRECT);
833 req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
834 SRP_MULTICHAN_SINGLE);
835 if (srp_use_imm_data) {
836 req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
837 req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
838 }
839
840 if (target->using_rdma_cm) {
841 req->rdma_param.flow_control = req->ib_param.flow_control;
842 req->rdma_param.responder_resources =
843 req->ib_param.responder_resources;
844 req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
845 req->rdma_param.retry_count = req->ib_param.retry_count;
846 req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
847 req->rdma_param.private_data = &req->rdma_req;
848 req->rdma_param.private_data_len = sizeof(req->rdma_req);
849
850 req->rdma_req.opcode = req->ib_req.opcode;
851 req->rdma_req.tag = req->ib_req.tag;
852 req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
853 req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
854 req->rdma_req.req_flags = req->ib_req.req_flags;
855 req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
856
857 ipi = req->rdma_req.initiator_port_id;
858 tpi = req->rdma_req.target_port_id;
859 } else {
860 u8 subnet_timeout;
861
862 subnet_timeout = srp_get_subnet_timeout(target->srp_host);
863
864 req->ib_param.primary_path = &ch->ib_cm.path;
865 req->ib_param.alternate_path = NULL;
866 req->ib_param.service_id = target->ib_cm.service_id;
867 get_random_bytes(&req->ib_param.starting_psn, 4);
868 req->ib_param.starting_psn &= 0xffffff;
869 req->ib_param.qp_num = ch->qp->qp_num;
870 req->ib_param.qp_type = ch->qp->qp_type;
871 req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
872 req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
873 req->ib_param.private_data = &req->ib_req;
874 req->ib_param.private_data_len = sizeof(req->ib_req);
875
876 ipi = req->ib_req.initiator_port_id;
877 tpi = req->ib_req.target_port_id;
878 }
879
880 /*
881 * In the published SRP specification (draft rev. 16a), the
882 * port identifier format is 8 bytes of ID extension followed
883 * by 8 bytes of GUID. Older drafts put the two halves in the
884 * opposite order, so that the GUID comes first.
885 *
886 * Targets conforming to these obsolete drafts can be
887 * recognized by the I/O Class they report.
888 */
889 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
890 memcpy(ipi, &target->sgid.global.interface_id, 8);
891 memcpy(ipi + 8, &target->initiator_ext, 8);
892 memcpy(tpi, &target->ioc_guid, 8);
893 memcpy(tpi + 8, &target->id_ext, 8);
894 } else {
895 memcpy(ipi, &target->initiator_ext, 8);
896 memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
897 memcpy(tpi, &target->id_ext, 8);
898 memcpy(tpi + 8, &target->ioc_guid, 8);
899 }
900
901 /*
902 * Topspin/Cisco SRP targets will reject our login unless we
903 * zero out the first 8 bytes of our initiator port ID and set
904 * the second 8 bytes to the local node GUID.
905 */
906 if (srp_target_is_topspin(target)) {
907 shost_printk(KERN_DEBUG, target->scsi_host,
908 PFX "Topspin/Cisco initiator port ID workaround "
909 "activated for target GUID %016llx\n",
910 be64_to_cpu(target->ioc_guid));
911 memset(ipi, 0, 8);
912 memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
913 }
914
915 if (target->using_rdma_cm)
916 status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
917 else
918 status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
919
920 kfree(req);
921
922 return status;
923 }
924
srp_queue_remove_work(struct srp_target_port * target)925 static bool srp_queue_remove_work(struct srp_target_port *target)
926 {
927 bool changed = false;
928
929 spin_lock_irq(&target->lock);
930 if (target->state != SRP_TARGET_REMOVED) {
931 target->state = SRP_TARGET_REMOVED;
932 changed = true;
933 }
934 spin_unlock_irq(&target->lock);
935
936 if (changed)
937 queue_work(srp_remove_wq, &target->remove_work);
938
939 return changed;
940 }
941
srp_disconnect_target(struct srp_target_port * target)942 static void srp_disconnect_target(struct srp_target_port *target)
943 {
944 struct srp_rdma_ch *ch;
945 int i, ret;
946
947 /* XXX should send SRP_I_LOGOUT request */
948
949 for (i = 0; i < target->ch_count; i++) {
950 ch = &target->ch[i];
951 ch->connected = false;
952 ret = 0;
953 if (target->using_rdma_cm) {
954 if (ch->rdma_cm.cm_id)
955 rdma_disconnect(ch->rdma_cm.cm_id);
956 } else {
957 if (ch->ib_cm.cm_id)
958 ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
959 NULL, 0);
960 }
961 if (ret < 0) {
962 shost_printk(KERN_DEBUG, target->scsi_host,
963 PFX "Sending CM DREQ failed\n");
964 }
965 }
966 }
967
srp_free_req_data(struct srp_target_port * target,struct srp_rdma_ch * ch)968 static void srp_free_req_data(struct srp_target_port *target,
969 struct srp_rdma_ch *ch)
970 {
971 struct srp_device *dev = target->srp_host->srp_dev;
972 struct ib_device *ibdev = dev->dev;
973 struct srp_request *req;
974 int i;
975
976 if (!ch->req_ring)
977 return;
978
979 for (i = 0; i < target->req_ring_size; ++i) {
980 req = &ch->req_ring[i];
981 if (dev->use_fast_reg)
982 kfree(req->fr_list);
983 if (req->indirect_dma_addr) {
984 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
985 target->indirect_size,
986 DMA_TO_DEVICE);
987 }
988 kfree(req->indirect_desc);
989 }
990
991 kfree(ch->req_ring);
992 ch->req_ring = NULL;
993 }
994
srp_alloc_req_data(struct srp_rdma_ch * ch)995 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
996 {
997 struct srp_target_port *target = ch->target;
998 struct srp_device *srp_dev = target->srp_host->srp_dev;
999 struct ib_device *ibdev = srp_dev->dev;
1000 struct srp_request *req;
1001 dma_addr_t dma_addr;
1002 int i, ret = -ENOMEM;
1003
1004 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
1005 GFP_KERNEL);
1006 if (!ch->req_ring)
1007 goto out;
1008
1009 for (i = 0; i < target->req_ring_size; ++i) {
1010 req = &ch->req_ring[i];
1011 if (srp_dev->use_fast_reg) {
1012 req->fr_list = kmalloc_array(target->mr_per_cmd,
1013 sizeof(void *), GFP_KERNEL);
1014 if (!req->fr_list)
1015 goto out;
1016 }
1017 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
1018 if (!req->indirect_desc)
1019 goto out;
1020
1021 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
1022 target->indirect_size,
1023 DMA_TO_DEVICE);
1024 if (ib_dma_mapping_error(ibdev, dma_addr))
1025 goto out;
1026
1027 req->indirect_dma_addr = dma_addr;
1028 }
1029 ret = 0;
1030
1031 out:
1032 return ret;
1033 }
1034
1035 /**
1036 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1037 * @shost: SCSI host whose attributes to remove from sysfs.
1038 *
1039 * Note: Any attributes defined in the host template and that did not exist
1040 * before invocation of this function will be ignored.
1041 */
srp_del_scsi_host_attr(struct Scsi_Host * shost)1042 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
1043 {
1044 struct device_attribute **attr;
1045
1046 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
1047 device_remove_file(&shost->shost_dev, *attr);
1048 }
1049
srp_remove_target(struct srp_target_port * target)1050 static void srp_remove_target(struct srp_target_port *target)
1051 {
1052 struct srp_rdma_ch *ch;
1053 int i;
1054
1055 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1056
1057 srp_del_scsi_host_attr(target->scsi_host);
1058 srp_rport_get(target->rport);
1059 srp_remove_host(target->scsi_host);
1060 scsi_remove_host(target->scsi_host);
1061 srp_stop_rport_timers(target->rport);
1062 srp_disconnect_target(target);
1063 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
1064 for (i = 0; i < target->ch_count; i++) {
1065 ch = &target->ch[i];
1066 srp_free_ch_ib(target, ch);
1067 }
1068 cancel_work_sync(&target->tl_err_work);
1069 srp_rport_put(target->rport);
1070 for (i = 0; i < target->ch_count; i++) {
1071 ch = &target->ch[i];
1072 srp_free_req_data(target, ch);
1073 }
1074 kfree(target->ch);
1075 target->ch = NULL;
1076
1077 spin_lock(&target->srp_host->target_lock);
1078 list_del(&target->list);
1079 spin_unlock(&target->srp_host->target_lock);
1080
1081 scsi_host_put(target->scsi_host);
1082 }
1083
srp_remove_work(struct work_struct * work)1084 static void srp_remove_work(struct work_struct *work)
1085 {
1086 struct srp_target_port *target =
1087 container_of(work, struct srp_target_port, remove_work);
1088
1089 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1090
1091 srp_remove_target(target);
1092 }
1093
srp_rport_delete(struct srp_rport * rport)1094 static void srp_rport_delete(struct srp_rport *rport)
1095 {
1096 struct srp_target_port *target = rport->lld_data;
1097
1098 srp_queue_remove_work(target);
1099 }
1100
1101 /**
1102 * srp_connected_ch() - number of connected channels
1103 * @target: SRP target port.
1104 */
srp_connected_ch(struct srp_target_port * target)1105 static int srp_connected_ch(struct srp_target_port *target)
1106 {
1107 int i, c = 0;
1108
1109 for (i = 0; i < target->ch_count; i++)
1110 c += target->ch[i].connected;
1111
1112 return c;
1113 }
1114
srp_connect_ch(struct srp_rdma_ch * ch,uint32_t max_iu_len,bool multich)1115 static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1116 bool multich)
1117 {
1118 struct srp_target_port *target = ch->target;
1119 int ret;
1120
1121 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1122
1123 ret = srp_lookup_path(ch);
1124 if (ret)
1125 goto out;
1126
1127 while (1) {
1128 init_completion(&ch->done);
1129 ret = srp_send_req(ch, max_iu_len, multich);
1130 if (ret)
1131 goto out;
1132 ret = wait_for_completion_interruptible(&ch->done);
1133 if (ret < 0)
1134 goto out;
1135
1136 /*
1137 * The CM event handling code will set status to
1138 * SRP_PORT_REDIRECT if we get a port redirect REJ
1139 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1140 * redirect REJ back.
1141 */
1142 ret = ch->status;
1143 switch (ret) {
1144 case 0:
1145 ch->connected = true;
1146 goto out;
1147
1148 case SRP_PORT_REDIRECT:
1149 ret = srp_lookup_path(ch);
1150 if (ret)
1151 goto out;
1152 break;
1153
1154 case SRP_DLID_REDIRECT:
1155 break;
1156
1157 case SRP_STALE_CONN:
1158 shost_printk(KERN_ERR, target->scsi_host, PFX
1159 "giving up on stale connection\n");
1160 ret = -ECONNRESET;
1161 goto out;
1162
1163 default:
1164 goto out;
1165 }
1166 }
1167
1168 out:
1169 return ret <= 0 ? ret : -ENODEV;
1170 }
1171
srp_inv_rkey_err_done(struct ib_cq * cq,struct ib_wc * wc)1172 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1173 {
1174 srp_handle_qp_err(cq, wc, "INV RKEY");
1175 }
1176
srp_inv_rkey(struct srp_request * req,struct srp_rdma_ch * ch,u32 rkey)1177 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1178 u32 rkey)
1179 {
1180 struct ib_send_wr wr = {
1181 .opcode = IB_WR_LOCAL_INV,
1182 .next = NULL,
1183 .num_sge = 0,
1184 .send_flags = 0,
1185 .ex.invalidate_rkey = rkey,
1186 };
1187
1188 wr.wr_cqe = &req->reg_cqe;
1189 req->reg_cqe.done = srp_inv_rkey_err_done;
1190 return ib_post_send(ch->qp, &wr, NULL);
1191 }
1192
srp_unmap_data(struct scsi_cmnd * scmnd,struct srp_rdma_ch * ch,struct srp_request * req)1193 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1194 struct srp_rdma_ch *ch,
1195 struct srp_request *req)
1196 {
1197 struct srp_target_port *target = ch->target;
1198 struct srp_device *dev = target->srp_host->srp_dev;
1199 struct ib_device *ibdev = dev->dev;
1200 int i, res;
1201
1202 if (!scsi_sglist(scmnd) ||
1203 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1204 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1205 return;
1206
1207 if (dev->use_fast_reg) {
1208 struct srp_fr_desc **pfr;
1209
1210 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1211 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1212 if (res < 0) {
1213 shost_printk(KERN_ERR, target->scsi_host, PFX
1214 "Queueing INV WR for rkey %#x failed (%d)\n",
1215 (*pfr)->mr->rkey, res);
1216 queue_work(system_long_wq,
1217 &target->tl_err_work);
1218 }
1219 }
1220 if (req->nmdesc)
1221 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1222 req->nmdesc);
1223 }
1224
1225 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1226 scmnd->sc_data_direction);
1227 }
1228
1229 /**
1230 * srp_claim_req - Take ownership of the scmnd associated with a request.
1231 * @ch: SRP RDMA channel.
1232 * @req: SRP request.
1233 * @sdev: If not NULL, only take ownership for this SCSI device.
1234 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1235 * ownership of @req->scmnd if it equals @scmnd.
1236 *
1237 * Return value:
1238 * Either NULL or a pointer to the SCSI command the caller became owner of.
1239 */
srp_claim_req(struct srp_rdma_ch * ch,struct srp_request * req,struct scsi_device * sdev,struct scsi_cmnd * scmnd)1240 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1241 struct srp_request *req,
1242 struct scsi_device *sdev,
1243 struct scsi_cmnd *scmnd)
1244 {
1245 unsigned long flags;
1246
1247 spin_lock_irqsave(&ch->lock, flags);
1248 if (req->scmnd &&
1249 (!sdev || req->scmnd->device == sdev) &&
1250 (!scmnd || req->scmnd == scmnd)) {
1251 scmnd = req->scmnd;
1252 req->scmnd = NULL;
1253 } else {
1254 scmnd = NULL;
1255 }
1256 spin_unlock_irqrestore(&ch->lock, flags);
1257
1258 return scmnd;
1259 }
1260
1261 /**
1262 * srp_free_req() - Unmap data and adjust ch->req_lim.
1263 * @ch: SRP RDMA channel.
1264 * @req: Request to be freed.
1265 * @scmnd: SCSI command associated with @req.
1266 * @req_lim_delta: Amount to be added to @target->req_lim.
1267 */
srp_free_req(struct srp_rdma_ch * ch,struct srp_request * req,struct scsi_cmnd * scmnd,s32 req_lim_delta)1268 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1269 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1270 {
1271 unsigned long flags;
1272
1273 srp_unmap_data(scmnd, ch, req);
1274
1275 spin_lock_irqsave(&ch->lock, flags);
1276 ch->req_lim += req_lim_delta;
1277 spin_unlock_irqrestore(&ch->lock, flags);
1278 }
1279
srp_finish_req(struct srp_rdma_ch * ch,struct srp_request * req,struct scsi_device * sdev,int result)1280 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1281 struct scsi_device *sdev, int result)
1282 {
1283 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1284
1285 if (scmnd) {
1286 srp_free_req(ch, req, scmnd, 0);
1287 scmnd->result = result;
1288 scmnd->scsi_done(scmnd);
1289 }
1290 }
1291
srp_terminate_io(struct srp_rport * rport)1292 static void srp_terminate_io(struct srp_rport *rport)
1293 {
1294 struct srp_target_port *target = rport->lld_data;
1295 struct srp_rdma_ch *ch;
1296 int i, j;
1297
1298 for (i = 0; i < target->ch_count; i++) {
1299 ch = &target->ch[i];
1300
1301 for (j = 0; j < target->req_ring_size; ++j) {
1302 struct srp_request *req = &ch->req_ring[j];
1303
1304 srp_finish_req(ch, req, NULL,
1305 DID_TRANSPORT_FAILFAST << 16);
1306 }
1307 }
1308 }
1309
1310 /* Calculate maximum initiator to target information unit length. */
srp_max_it_iu_len(int cmd_sg_cnt,bool use_imm_data,uint32_t max_it_iu_size)1311 static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
1312 uint32_t max_it_iu_size)
1313 {
1314 uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
1315 sizeof(struct srp_indirect_buf) +
1316 cmd_sg_cnt * sizeof(struct srp_direct_buf);
1317
1318 if (use_imm_data)
1319 max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
1320 srp_max_imm_data);
1321
1322 if (max_it_iu_size)
1323 max_iu_len = min(max_iu_len, max_it_iu_size);
1324
1325 pr_debug("max_iu_len = %d\n", max_iu_len);
1326
1327 return max_iu_len;
1328 }
1329
1330 /*
1331 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1332 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1333 * srp_reset_device() or srp_reset_host() calls will occur while this function
1334 * is in progress. One way to realize that is not to call this function
1335 * directly but to call srp_reconnect_rport() instead since that last function
1336 * serializes calls of this function via rport->mutex and also blocks
1337 * srp_queuecommand() calls before invoking this function.
1338 */
srp_rport_reconnect(struct srp_rport * rport)1339 static int srp_rport_reconnect(struct srp_rport *rport)
1340 {
1341 struct srp_target_port *target = rport->lld_data;
1342 struct srp_rdma_ch *ch;
1343 uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1344 srp_use_imm_data,
1345 target->max_it_iu_size);
1346 int i, j, ret = 0;
1347 bool multich = false;
1348
1349 srp_disconnect_target(target);
1350
1351 if (target->state == SRP_TARGET_SCANNING)
1352 return -ENODEV;
1353
1354 /*
1355 * Now get a new local CM ID so that we avoid confusing the target in
1356 * case things are really fouled up. Doing so also ensures that all CM
1357 * callbacks will have finished before a new QP is allocated.
1358 */
1359 for (i = 0; i < target->ch_count; i++) {
1360 ch = &target->ch[i];
1361 ret += srp_new_cm_id(ch);
1362 }
1363 for (i = 0; i < target->ch_count; i++) {
1364 ch = &target->ch[i];
1365 for (j = 0; j < target->req_ring_size; ++j) {
1366 struct srp_request *req = &ch->req_ring[j];
1367
1368 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1369 }
1370 }
1371 for (i = 0; i < target->ch_count; i++) {
1372 ch = &target->ch[i];
1373 /*
1374 * Whether or not creating a new CM ID succeeded, create a new
1375 * QP. This guarantees that all completion callback function
1376 * invocations have finished before request resetting starts.
1377 */
1378 ret += srp_create_ch_ib(ch);
1379
1380 INIT_LIST_HEAD(&ch->free_tx);
1381 for (j = 0; j < target->queue_size; ++j)
1382 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1383 }
1384
1385 target->qp_in_error = false;
1386
1387 for (i = 0; i < target->ch_count; i++) {
1388 ch = &target->ch[i];
1389 if (ret)
1390 break;
1391 ret = srp_connect_ch(ch, max_iu_len, multich);
1392 multich = true;
1393 }
1394
1395 if (ret == 0)
1396 shost_printk(KERN_INFO, target->scsi_host,
1397 PFX "reconnect succeeded\n");
1398
1399 return ret;
1400 }
1401
srp_map_desc(struct srp_map_state * state,dma_addr_t dma_addr,unsigned int dma_len,u32 rkey)1402 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1403 unsigned int dma_len, u32 rkey)
1404 {
1405 struct srp_direct_buf *desc = state->desc;
1406
1407 WARN_ON_ONCE(!dma_len);
1408
1409 desc->va = cpu_to_be64(dma_addr);
1410 desc->key = cpu_to_be32(rkey);
1411 desc->len = cpu_to_be32(dma_len);
1412
1413 state->total_len += dma_len;
1414 state->desc++;
1415 state->ndesc++;
1416 }
1417
srp_reg_mr_err_done(struct ib_cq * cq,struct ib_wc * wc)1418 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1419 {
1420 srp_handle_qp_err(cq, wc, "FAST REG");
1421 }
1422
1423 /*
1424 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1425 * where to start in the first element. If sg_offset_p != NULL then
1426 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1427 * byte that has not yet been mapped.
1428 */
srp_map_finish_fr(struct srp_map_state * state,struct srp_request * req,struct srp_rdma_ch * ch,int sg_nents,unsigned int * sg_offset_p)1429 static int srp_map_finish_fr(struct srp_map_state *state,
1430 struct srp_request *req,
1431 struct srp_rdma_ch *ch, int sg_nents,
1432 unsigned int *sg_offset_p)
1433 {
1434 struct srp_target_port *target = ch->target;
1435 struct srp_device *dev = target->srp_host->srp_dev;
1436 struct ib_reg_wr wr;
1437 struct srp_fr_desc *desc;
1438 u32 rkey;
1439 int n, err;
1440
1441 if (state->fr.next >= state->fr.end) {
1442 shost_printk(KERN_ERR, ch->target->scsi_host,
1443 PFX "Out of MRs (mr_per_cmd = %d)\n",
1444 ch->target->mr_per_cmd);
1445 return -ENOMEM;
1446 }
1447
1448 WARN_ON_ONCE(!dev->use_fast_reg);
1449
1450 if (sg_nents == 1 && target->global_rkey) {
1451 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1452
1453 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1454 sg_dma_len(state->sg) - sg_offset,
1455 target->global_rkey);
1456 if (sg_offset_p)
1457 *sg_offset_p = 0;
1458 return 1;
1459 }
1460
1461 desc = srp_fr_pool_get(ch->fr_pool);
1462 if (!desc)
1463 return -ENOMEM;
1464
1465 rkey = ib_inc_rkey(desc->mr->rkey);
1466 ib_update_fast_reg_key(desc->mr, rkey);
1467
1468 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1469 dev->mr_page_size);
1470 if (unlikely(n < 0)) {
1471 srp_fr_pool_put(ch->fr_pool, &desc, 1);
1472 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1473 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1474 sg_offset_p ? *sg_offset_p : -1, n);
1475 return n;
1476 }
1477
1478 WARN_ON_ONCE(desc->mr->length == 0);
1479
1480 req->reg_cqe.done = srp_reg_mr_err_done;
1481
1482 wr.wr.next = NULL;
1483 wr.wr.opcode = IB_WR_REG_MR;
1484 wr.wr.wr_cqe = &req->reg_cqe;
1485 wr.wr.num_sge = 0;
1486 wr.wr.send_flags = 0;
1487 wr.mr = desc->mr;
1488 wr.key = desc->mr->rkey;
1489 wr.access = (IB_ACCESS_LOCAL_WRITE |
1490 IB_ACCESS_REMOTE_READ |
1491 IB_ACCESS_REMOTE_WRITE);
1492
1493 *state->fr.next++ = desc;
1494 state->nmdesc++;
1495
1496 srp_map_desc(state, desc->mr->iova,
1497 desc->mr->length, desc->mr->rkey);
1498
1499 err = ib_post_send(ch->qp, &wr.wr, NULL);
1500 if (unlikely(err)) {
1501 WARN_ON_ONCE(err == -ENOMEM);
1502 return err;
1503 }
1504
1505 return n;
1506 }
1507
srp_map_sg_fr(struct srp_map_state * state,struct srp_rdma_ch * ch,struct srp_request * req,struct scatterlist * scat,int count)1508 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1509 struct srp_request *req, struct scatterlist *scat,
1510 int count)
1511 {
1512 unsigned int sg_offset = 0;
1513
1514 state->fr.next = req->fr_list;
1515 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1516 state->sg = scat;
1517
1518 if (count == 0)
1519 return 0;
1520
1521 while (count) {
1522 int i, n;
1523
1524 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1525 if (unlikely(n < 0))
1526 return n;
1527
1528 count -= n;
1529 for (i = 0; i < n; i++)
1530 state->sg = sg_next(state->sg);
1531 }
1532
1533 return 0;
1534 }
1535
srp_map_sg_dma(struct srp_map_state * state,struct srp_rdma_ch * ch,struct srp_request * req,struct scatterlist * scat,int count)1536 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1537 struct srp_request *req, struct scatterlist *scat,
1538 int count)
1539 {
1540 struct srp_target_port *target = ch->target;
1541 struct scatterlist *sg;
1542 int i;
1543
1544 for_each_sg(scat, sg, count, i) {
1545 srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
1546 target->global_rkey);
1547 }
1548
1549 return 0;
1550 }
1551
1552 /*
1553 * Register the indirect data buffer descriptor with the HCA.
1554 *
1555 * Note: since the indirect data buffer descriptor has been allocated with
1556 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1557 * memory buffer.
1558 */
srp_map_idb(struct srp_rdma_ch * ch,struct srp_request * req,void ** next_mr,void ** end_mr,u32 idb_len,__be32 * idb_rkey)1559 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1560 void **next_mr, void **end_mr, u32 idb_len,
1561 __be32 *idb_rkey)
1562 {
1563 struct srp_target_port *target = ch->target;
1564 struct srp_device *dev = target->srp_host->srp_dev;
1565 struct srp_map_state state;
1566 struct srp_direct_buf idb_desc;
1567 struct scatterlist idb_sg[1];
1568 int ret;
1569
1570 memset(&state, 0, sizeof(state));
1571 memset(&idb_desc, 0, sizeof(idb_desc));
1572 state.gen.next = next_mr;
1573 state.gen.end = end_mr;
1574 state.desc = &idb_desc;
1575 state.base_dma_addr = req->indirect_dma_addr;
1576 state.dma_len = idb_len;
1577
1578 if (dev->use_fast_reg) {
1579 state.sg = idb_sg;
1580 sg_init_one(idb_sg, req->indirect_desc, idb_len);
1581 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1582 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1583 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1584 #endif
1585 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1586 if (ret < 0)
1587 return ret;
1588 WARN_ON_ONCE(ret < 1);
1589 } else {
1590 return -EINVAL;
1591 }
1592
1593 *idb_rkey = idb_desc.key;
1594
1595 return 0;
1596 }
1597
srp_check_mapping(struct srp_map_state * state,struct srp_rdma_ch * ch,struct srp_request * req,struct scatterlist * scat,int count)1598 static void srp_check_mapping(struct srp_map_state *state,
1599 struct srp_rdma_ch *ch, struct srp_request *req,
1600 struct scatterlist *scat, int count)
1601 {
1602 struct srp_device *dev = ch->target->srp_host->srp_dev;
1603 struct srp_fr_desc **pfr;
1604 u64 desc_len = 0, mr_len = 0;
1605 int i;
1606
1607 for (i = 0; i < state->ndesc; i++)
1608 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1609 if (dev->use_fast_reg)
1610 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1611 mr_len += (*pfr)->mr->length;
1612 if (desc_len != scsi_bufflen(req->scmnd) ||
1613 mr_len > scsi_bufflen(req->scmnd))
1614 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1615 scsi_bufflen(req->scmnd), desc_len, mr_len,
1616 state->ndesc, state->nmdesc);
1617 }
1618
1619 /**
1620 * srp_map_data() - map SCSI data buffer onto an SRP request
1621 * @scmnd: SCSI command to map
1622 * @ch: SRP RDMA channel
1623 * @req: SRP request
1624 *
1625 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1626 * mapping failed. The size of any immediate data is not included in the
1627 * return value.
1628 */
srp_map_data(struct scsi_cmnd * scmnd,struct srp_rdma_ch * ch,struct srp_request * req)1629 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1630 struct srp_request *req)
1631 {
1632 struct srp_target_port *target = ch->target;
1633 struct scatterlist *scat, *sg;
1634 struct srp_cmd *cmd = req->cmd->buf;
1635 int i, len, nents, count, ret;
1636 struct srp_device *dev;
1637 struct ib_device *ibdev;
1638 struct srp_map_state state;
1639 struct srp_indirect_buf *indirect_hdr;
1640 u64 data_len;
1641 u32 idb_len, table_len;
1642 __be32 idb_rkey;
1643 u8 fmt;
1644
1645 req->cmd->num_sge = 1;
1646
1647 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1648 return sizeof(struct srp_cmd) + cmd->add_cdb_len;
1649
1650 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1651 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1652 shost_printk(KERN_WARNING, target->scsi_host,
1653 PFX "Unhandled data direction %d\n",
1654 scmnd->sc_data_direction);
1655 return -EINVAL;
1656 }
1657
1658 nents = scsi_sg_count(scmnd);
1659 scat = scsi_sglist(scmnd);
1660 data_len = scsi_bufflen(scmnd);
1661
1662 dev = target->srp_host->srp_dev;
1663 ibdev = dev->dev;
1664
1665 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1666 if (unlikely(count == 0))
1667 return -EIO;
1668
1669 if (ch->use_imm_data &&
1670 count <= ch->max_imm_sge &&
1671 SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1672 scmnd->sc_data_direction == DMA_TO_DEVICE) {
1673 struct srp_imm_buf *buf;
1674 struct ib_sge *sge = &req->cmd->sge[1];
1675
1676 fmt = SRP_DATA_DESC_IMM;
1677 len = SRP_IMM_DATA_OFFSET;
1678 req->nmdesc = 0;
1679 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1680 buf->len = cpu_to_be32(data_len);
1681 WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
1682 for_each_sg(scat, sg, count, i) {
1683 sge[i].addr = sg_dma_address(sg);
1684 sge[i].length = sg_dma_len(sg);
1685 sge[i].lkey = target->lkey;
1686 }
1687 req->cmd->num_sge += count;
1688 goto map_complete;
1689 }
1690
1691 fmt = SRP_DATA_DESC_DIRECT;
1692 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1693 sizeof(struct srp_direct_buf);
1694
1695 if (count == 1 && target->global_rkey) {
1696 /*
1697 * The midlayer only generated a single gather/scatter
1698 * entry, or DMA mapping coalesced everything to a
1699 * single entry. So a direct descriptor along with
1700 * the DMA MR suffices.
1701 */
1702 struct srp_direct_buf *buf;
1703
1704 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1705 buf->va = cpu_to_be64(sg_dma_address(scat));
1706 buf->key = cpu_to_be32(target->global_rkey);
1707 buf->len = cpu_to_be32(sg_dma_len(scat));
1708
1709 req->nmdesc = 0;
1710 goto map_complete;
1711 }
1712
1713 /*
1714 * We have more than one scatter/gather entry, so build our indirect
1715 * descriptor table, trying to merge as many entries as we can.
1716 */
1717 indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
1718
1719 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1720 target->indirect_size, DMA_TO_DEVICE);
1721
1722 memset(&state, 0, sizeof(state));
1723 state.desc = req->indirect_desc;
1724 if (dev->use_fast_reg)
1725 ret = srp_map_sg_fr(&state, ch, req, scat, count);
1726 else
1727 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1728 req->nmdesc = state.nmdesc;
1729 if (ret < 0)
1730 goto unmap;
1731
1732 {
1733 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1734 "Memory mapping consistency check");
1735 if (DYNAMIC_DEBUG_BRANCH(ddm))
1736 srp_check_mapping(&state, ch, req, scat, count);
1737 }
1738
1739 /* We've mapped the request, now pull as much of the indirect
1740 * descriptor table as we can into the command buffer. If this
1741 * target is not using an external indirect table, we are
1742 * guaranteed to fit into the command, as the SCSI layer won't
1743 * give us more S/G entries than we allow.
1744 */
1745 if (state.ndesc == 1) {
1746 /*
1747 * Memory registration collapsed the sg-list into one entry,
1748 * so use a direct descriptor.
1749 */
1750 struct srp_direct_buf *buf;
1751
1752 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1753 *buf = req->indirect_desc[0];
1754 goto map_complete;
1755 }
1756
1757 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1758 !target->allow_ext_sg)) {
1759 shost_printk(KERN_ERR, target->scsi_host,
1760 "Could not fit S/G list into SRP_CMD\n");
1761 ret = -EIO;
1762 goto unmap;
1763 }
1764
1765 count = min(state.ndesc, target->cmd_sg_cnt);
1766 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1767 idb_len = sizeof(struct srp_indirect_buf) + table_len;
1768
1769 fmt = SRP_DATA_DESC_INDIRECT;
1770 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1771 sizeof(struct srp_indirect_buf);
1772 len += count * sizeof (struct srp_direct_buf);
1773
1774 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1775 count * sizeof (struct srp_direct_buf));
1776
1777 if (!target->global_rkey) {
1778 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1779 idb_len, &idb_rkey);
1780 if (ret < 0)
1781 goto unmap;
1782 req->nmdesc++;
1783 } else {
1784 idb_rkey = cpu_to_be32(target->global_rkey);
1785 }
1786
1787 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1788 indirect_hdr->table_desc.key = idb_rkey;
1789 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1790 indirect_hdr->len = cpu_to_be32(state.total_len);
1791
1792 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1793 cmd->data_out_desc_cnt = count;
1794 else
1795 cmd->data_in_desc_cnt = count;
1796
1797 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1798 DMA_TO_DEVICE);
1799
1800 map_complete:
1801 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1802 cmd->buf_fmt = fmt << 4;
1803 else
1804 cmd->buf_fmt = fmt;
1805
1806 return len;
1807
1808 unmap:
1809 srp_unmap_data(scmnd, ch, req);
1810 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1811 ret = -E2BIG;
1812 return ret;
1813 }
1814
1815 /*
1816 * Return an IU and possible credit to the free pool
1817 */
srp_put_tx_iu(struct srp_rdma_ch * ch,struct srp_iu * iu,enum srp_iu_type iu_type)1818 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1819 enum srp_iu_type iu_type)
1820 {
1821 unsigned long flags;
1822
1823 spin_lock_irqsave(&ch->lock, flags);
1824 list_add(&iu->list, &ch->free_tx);
1825 if (iu_type != SRP_IU_RSP)
1826 ++ch->req_lim;
1827 spin_unlock_irqrestore(&ch->lock, flags);
1828 }
1829
1830 /*
1831 * Must be called with ch->lock held to protect req_lim and free_tx.
1832 * If IU is not sent, it must be returned using srp_put_tx_iu().
1833 *
1834 * Note:
1835 * An upper limit for the number of allocated information units for each
1836 * request type is:
1837 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1838 * more than Scsi_Host.can_queue requests.
1839 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1840 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1841 * one unanswered SRP request to an initiator.
1842 */
__srp_get_tx_iu(struct srp_rdma_ch * ch,enum srp_iu_type iu_type)1843 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1844 enum srp_iu_type iu_type)
1845 {
1846 struct srp_target_port *target = ch->target;
1847 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1848 struct srp_iu *iu;
1849
1850 lockdep_assert_held(&ch->lock);
1851
1852 ib_process_cq_direct(ch->send_cq, -1);
1853
1854 if (list_empty(&ch->free_tx))
1855 return NULL;
1856
1857 /* Initiator responses to target requests do not consume credits */
1858 if (iu_type != SRP_IU_RSP) {
1859 if (ch->req_lim <= rsv) {
1860 ++target->zero_req_lim;
1861 return NULL;
1862 }
1863
1864 --ch->req_lim;
1865 }
1866
1867 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1868 list_del(&iu->list);
1869 return iu;
1870 }
1871
1872 /*
1873 * Note: if this function is called from inside ib_drain_sq() then it will
1874 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1875 * with status IB_WC_SUCCESS then that's a bug.
1876 */
srp_send_done(struct ib_cq * cq,struct ib_wc * wc)1877 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1878 {
1879 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1880 struct srp_rdma_ch *ch = cq->cq_context;
1881
1882 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1883 srp_handle_qp_err(cq, wc, "SEND");
1884 return;
1885 }
1886
1887 lockdep_assert_held(&ch->lock);
1888
1889 list_add(&iu->list, &ch->free_tx);
1890 }
1891
1892 /**
1893 * srp_post_send() - send an SRP information unit
1894 * @ch: RDMA channel over which to send the information unit.
1895 * @iu: Information unit to send.
1896 * @len: Length of the information unit excluding immediate data.
1897 */
srp_post_send(struct srp_rdma_ch * ch,struct srp_iu * iu,int len)1898 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1899 {
1900 struct srp_target_port *target = ch->target;
1901 struct ib_send_wr wr;
1902
1903 if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
1904 return -EINVAL;
1905
1906 iu->sge[0].addr = iu->dma;
1907 iu->sge[0].length = len;
1908 iu->sge[0].lkey = target->lkey;
1909
1910 iu->cqe.done = srp_send_done;
1911
1912 wr.next = NULL;
1913 wr.wr_cqe = &iu->cqe;
1914 wr.sg_list = &iu->sge[0];
1915 wr.num_sge = iu->num_sge;
1916 wr.opcode = IB_WR_SEND;
1917 wr.send_flags = IB_SEND_SIGNALED;
1918
1919 return ib_post_send(ch->qp, &wr, NULL);
1920 }
1921
srp_post_recv(struct srp_rdma_ch * ch,struct srp_iu * iu)1922 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1923 {
1924 struct srp_target_port *target = ch->target;
1925 struct ib_recv_wr wr;
1926 struct ib_sge list;
1927
1928 list.addr = iu->dma;
1929 list.length = iu->size;
1930 list.lkey = target->lkey;
1931
1932 iu->cqe.done = srp_recv_done;
1933
1934 wr.next = NULL;
1935 wr.wr_cqe = &iu->cqe;
1936 wr.sg_list = &list;
1937 wr.num_sge = 1;
1938
1939 return ib_post_recv(ch->qp, &wr, NULL);
1940 }
1941
srp_process_rsp(struct srp_rdma_ch * ch,struct srp_rsp * rsp)1942 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1943 {
1944 struct srp_target_port *target = ch->target;
1945 struct srp_request *req;
1946 struct scsi_cmnd *scmnd;
1947 unsigned long flags;
1948
1949 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1950 spin_lock_irqsave(&ch->lock, flags);
1951 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1952 if (rsp->tag == ch->tsk_mgmt_tag) {
1953 ch->tsk_mgmt_status = -1;
1954 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1955 ch->tsk_mgmt_status = rsp->data[3];
1956 complete(&ch->tsk_mgmt_done);
1957 } else {
1958 shost_printk(KERN_ERR, target->scsi_host,
1959 "Received tsk mgmt response too late for tag %#llx\n",
1960 rsp->tag);
1961 }
1962 spin_unlock_irqrestore(&ch->lock, flags);
1963 } else {
1964 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1965 if (scmnd && scmnd->host_scribble) {
1966 req = (void *)scmnd->host_scribble;
1967 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1968 } else {
1969 scmnd = NULL;
1970 }
1971 if (!scmnd) {
1972 shost_printk(KERN_ERR, target->scsi_host,
1973 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1974 rsp->tag, ch - target->ch, ch->qp->qp_num);
1975
1976 spin_lock_irqsave(&ch->lock, flags);
1977 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1978 spin_unlock_irqrestore(&ch->lock, flags);
1979
1980 return;
1981 }
1982 scmnd->result = rsp->status;
1983
1984 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1985 memcpy(scmnd->sense_buffer, rsp->data +
1986 be32_to_cpu(rsp->resp_data_len),
1987 min_t(int, be32_to_cpu(rsp->sense_data_len),
1988 SCSI_SENSE_BUFFERSIZE));
1989 }
1990
1991 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1992 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1993 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1994 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1995 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1996 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1997 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1998 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1999
2000 srp_free_req(ch, req, scmnd,
2001 be32_to_cpu(rsp->req_lim_delta));
2002
2003 scmnd->host_scribble = NULL;
2004 scmnd->scsi_done(scmnd);
2005 }
2006 }
2007
srp_response_common(struct srp_rdma_ch * ch,s32 req_delta,void * rsp,int len)2008 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
2009 void *rsp, int len)
2010 {
2011 struct srp_target_port *target = ch->target;
2012 struct ib_device *dev = target->srp_host->srp_dev->dev;
2013 unsigned long flags;
2014 struct srp_iu *iu;
2015 int err;
2016
2017 spin_lock_irqsave(&ch->lock, flags);
2018 ch->req_lim += req_delta;
2019 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2020 spin_unlock_irqrestore(&ch->lock, flags);
2021
2022 if (!iu) {
2023 shost_printk(KERN_ERR, target->scsi_host, PFX
2024 "no IU available to send response\n");
2025 return 1;
2026 }
2027
2028 iu->num_sge = 1;
2029 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2030 memcpy(iu->buf, rsp, len);
2031 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2032
2033 err = srp_post_send(ch, iu, len);
2034 if (err) {
2035 shost_printk(KERN_ERR, target->scsi_host, PFX
2036 "unable to post response: %d\n", err);
2037 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
2038 }
2039
2040 return err;
2041 }
2042
srp_process_cred_req(struct srp_rdma_ch * ch,struct srp_cred_req * req)2043 static void srp_process_cred_req(struct srp_rdma_ch *ch,
2044 struct srp_cred_req *req)
2045 {
2046 struct srp_cred_rsp rsp = {
2047 .opcode = SRP_CRED_RSP,
2048 .tag = req->tag,
2049 };
2050 s32 delta = be32_to_cpu(req->req_lim_delta);
2051
2052 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2053 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
2054 "problems processing SRP_CRED_REQ\n");
2055 }
2056
srp_process_aer_req(struct srp_rdma_ch * ch,struct srp_aer_req * req)2057 static void srp_process_aer_req(struct srp_rdma_ch *ch,
2058 struct srp_aer_req *req)
2059 {
2060 struct srp_target_port *target = ch->target;
2061 struct srp_aer_rsp rsp = {
2062 .opcode = SRP_AER_RSP,
2063 .tag = req->tag,
2064 };
2065 s32 delta = be32_to_cpu(req->req_lim_delta);
2066
2067 shost_printk(KERN_ERR, target->scsi_host, PFX
2068 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
2069
2070 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2071 shost_printk(KERN_ERR, target->scsi_host, PFX
2072 "problems processing SRP_AER_REQ\n");
2073 }
2074
srp_recv_done(struct ib_cq * cq,struct ib_wc * wc)2075 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2076 {
2077 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2078 struct srp_rdma_ch *ch = cq->cq_context;
2079 struct srp_target_port *target = ch->target;
2080 struct ib_device *dev = target->srp_host->srp_dev->dev;
2081 int res;
2082 u8 opcode;
2083
2084 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2085 srp_handle_qp_err(cq, wc, "RECV");
2086 return;
2087 }
2088
2089 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2090 DMA_FROM_DEVICE);
2091
2092 opcode = *(u8 *) iu->buf;
2093
2094 if (0) {
2095 shost_printk(KERN_ERR, target->scsi_host,
2096 PFX "recv completion, opcode 0x%02x\n", opcode);
2097 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2098 iu->buf, wc->byte_len, true);
2099 }
2100
2101 switch (opcode) {
2102 case SRP_RSP:
2103 srp_process_rsp(ch, iu->buf);
2104 break;
2105
2106 case SRP_CRED_REQ:
2107 srp_process_cred_req(ch, iu->buf);
2108 break;
2109
2110 case SRP_AER_REQ:
2111 srp_process_aer_req(ch, iu->buf);
2112 break;
2113
2114 case SRP_T_LOGOUT:
2115 /* XXX Handle target logout */
2116 shost_printk(KERN_WARNING, target->scsi_host,
2117 PFX "Got target logout request\n");
2118 break;
2119
2120 default:
2121 shost_printk(KERN_WARNING, target->scsi_host,
2122 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2123 break;
2124 }
2125
2126 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2127 DMA_FROM_DEVICE);
2128
2129 res = srp_post_recv(ch, iu);
2130 if (res != 0)
2131 shost_printk(KERN_ERR, target->scsi_host,
2132 PFX "Recv failed with error code %d\n", res);
2133 }
2134
2135 /**
2136 * srp_tl_err_work() - handle a transport layer error
2137 * @work: Work structure embedded in an SRP target port.
2138 *
2139 * Note: This function may get invoked before the rport has been created,
2140 * hence the target->rport test.
2141 */
srp_tl_err_work(struct work_struct * work)2142 static void srp_tl_err_work(struct work_struct *work)
2143 {
2144 struct srp_target_port *target;
2145
2146 target = container_of(work, struct srp_target_port, tl_err_work);
2147 if (target->rport)
2148 srp_start_tl_fail_timers(target->rport);
2149 }
2150
srp_handle_qp_err(struct ib_cq * cq,struct ib_wc * wc,const char * opname)2151 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2152 const char *opname)
2153 {
2154 struct srp_rdma_ch *ch = cq->cq_context;
2155 struct srp_target_port *target = ch->target;
2156
2157 if (ch->connected && !target->qp_in_error) {
2158 shost_printk(KERN_ERR, target->scsi_host,
2159 PFX "failed %s status %s (%d) for CQE %p\n",
2160 opname, ib_wc_status_msg(wc->status), wc->status,
2161 wc->wr_cqe);
2162 queue_work(system_long_wq, &target->tl_err_work);
2163 }
2164 target->qp_in_error = true;
2165 }
2166
srp_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scmnd)2167 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2168 {
2169 struct srp_target_port *target = host_to_target(shost);
2170 struct srp_rdma_ch *ch;
2171 struct srp_request *req;
2172 struct srp_iu *iu;
2173 struct srp_cmd *cmd;
2174 struct ib_device *dev;
2175 unsigned long flags;
2176 u32 tag;
2177 u16 idx;
2178 int len, ret;
2179
2180 scmnd->result = srp_chkready(target->rport);
2181 if (unlikely(scmnd->result))
2182 goto err;
2183
2184 WARN_ON_ONCE(scmnd->request->tag < 0);
2185 tag = blk_mq_unique_tag(scmnd->request);
2186 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2187 idx = blk_mq_unique_tag_to_tag(tag);
2188 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2189 dev_name(&shost->shost_gendev), tag, idx,
2190 target->req_ring_size);
2191
2192 spin_lock_irqsave(&ch->lock, flags);
2193 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2194 spin_unlock_irqrestore(&ch->lock, flags);
2195
2196 if (!iu)
2197 goto err;
2198
2199 req = &ch->req_ring[idx];
2200 dev = target->srp_host->srp_dev->dev;
2201 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
2202 DMA_TO_DEVICE);
2203
2204 scmnd->host_scribble = (void *) req;
2205
2206 cmd = iu->buf;
2207 memset(cmd, 0, sizeof *cmd);
2208
2209 cmd->opcode = SRP_CMD;
2210 int_to_scsilun(scmnd->device->lun, &cmd->lun);
2211 cmd->tag = tag;
2212 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2213 if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
2214 cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
2215 4);
2216 if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
2217 goto err_iu;
2218 }
2219
2220 req->scmnd = scmnd;
2221 req->cmd = iu;
2222
2223 len = srp_map_data(scmnd, ch, req);
2224 if (len < 0) {
2225 shost_printk(KERN_ERR, target->scsi_host,
2226 PFX "Failed to map data (%d)\n", len);
2227 /*
2228 * If we ran out of memory descriptors (-ENOMEM) because an
2229 * application is queuing many requests with more than
2230 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2231 * to reduce queue depth temporarily.
2232 */
2233 scmnd->result = len == -ENOMEM ?
2234 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2235 goto err_iu;
2236 }
2237
2238 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
2239 DMA_TO_DEVICE);
2240
2241 if (srp_post_send(ch, iu, len)) {
2242 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2243 scmnd->result = DID_ERROR << 16;
2244 goto err_unmap;
2245 }
2246
2247 return 0;
2248
2249 err_unmap:
2250 srp_unmap_data(scmnd, ch, req);
2251
2252 err_iu:
2253 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2254
2255 /*
2256 * Avoid that the loops that iterate over the request ring can
2257 * encounter a dangling SCSI command pointer.
2258 */
2259 req->scmnd = NULL;
2260
2261 err:
2262 if (scmnd->result) {
2263 scmnd->scsi_done(scmnd);
2264 ret = 0;
2265 } else {
2266 ret = SCSI_MLQUEUE_HOST_BUSY;
2267 }
2268
2269 return ret;
2270 }
2271
2272 /*
2273 * Note: the resources allocated in this function are freed in
2274 * srp_free_ch_ib().
2275 */
srp_alloc_iu_bufs(struct srp_rdma_ch * ch)2276 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2277 {
2278 struct srp_target_port *target = ch->target;
2279 int i;
2280
2281 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2282 GFP_KERNEL);
2283 if (!ch->rx_ring)
2284 goto err_no_ring;
2285 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2286 GFP_KERNEL);
2287 if (!ch->tx_ring)
2288 goto err_no_ring;
2289
2290 for (i = 0; i < target->queue_size; ++i) {
2291 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2292 ch->max_ti_iu_len,
2293 GFP_KERNEL, DMA_FROM_DEVICE);
2294 if (!ch->rx_ring[i])
2295 goto err;
2296 }
2297
2298 for (i = 0; i < target->queue_size; ++i) {
2299 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2300 ch->max_it_iu_len,
2301 GFP_KERNEL, DMA_TO_DEVICE);
2302 if (!ch->tx_ring[i])
2303 goto err;
2304
2305 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2306 }
2307
2308 return 0;
2309
2310 err:
2311 for (i = 0; i < target->queue_size; ++i) {
2312 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2313 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2314 }
2315
2316
2317 err_no_ring:
2318 kfree(ch->tx_ring);
2319 ch->tx_ring = NULL;
2320 kfree(ch->rx_ring);
2321 ch->rx_ring = NULL;
2322
2323 return -ENOMEM;
2324 }
2325
srp_compute_rq_tmo(struct ib_qp_attr * qp_attr,int attr_mask)2326 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2327 {
2328 uint64_t T_tr_ns, max_compl_time_ms;
2329 uint32_t rq_tmo_jiffies;
2330
2331 /*
2332 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2333 * table 91), both the QP timeout and the retry count have to be set
2334 * for RC QP's during the RTR to RTS transition.
2335 */
2336 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2337 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2338
2339 /*
2340 * Set target->rq_tmo_jiffies to one second more than the largest time
2341 * it can take before an error completion is generated. See also
2342 * C9-140..142 in the IBTA spec for more information about how to
2343 * convert the QP Local ACK Timeout value to nanoseconds.
2344 */
2345 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2346 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2347 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2348 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2349
2350 return rq_tmo_jiffies;
2351 }
2352
srp_cm_rep_handler(struct ib_cm_id * cm_id,const struct srp_login_rsp * lrsp,struct srp_rdma_ch * ch)2353 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2354 const struct srp_login_rsp *lrsp,
2355 struct srp_rdma_ch *ch)
2356 {
2357 struct srp_target_port *target = ch->target;
2358 struct ib_qp_attr *qp_attr = NULL;
2359 int attr_mask = 0;
2360 int ret = 0;
2361 int i;
2362
2363 if (lrsp->opcode == SRP_LOGIN_RSP) {
2364 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2365 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2366 ch->use_imm_data = srp_use_imm_data &&
2367 (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
2368 ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
2369 ch->use_imm_data,
2370 target->max_it_iu_size);
2371 WARN_ON_ONCE(ch->max_it_iu_len >
2372 be32_to_cpu(lrsp->max_it_iu_len));
2373
2374 if (ch->use_imm_data)
2375 shost_printk(KERN_DEBUG, target->scsi_host,
2376 PFX "using immediate data\n");
2377
2378 /*
2379 * Reserve credits for task management so we don't
2380 * bounce requests back to the SCSI mid-layer.
2381 */
2382 target->scsi_host->can_queue
2383 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2384 target->scsi_host->can_queue);
2385 target->scsi_host->cmd_per_lun
2386 = min_t(int, target->scsi_host->can_queue,
2387 target->scsi_host->cmd_per_lun);
2388 } else {
2389 shost_printk(KERN_WARNING, target->scsi_host,
2390 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2391 ret = -ECONNRESET;
2392 goto error;
2393 }
2394
2395 if (!ch->rx_ring) {
2396 ret = srp_alloc_iu_bufs(ch);
2397 if (ret)
2398 goto error;
2399 }
2400
2401 for (i = 0; i < target->queue_size; i++) {
2402 struct srp_iu *iu = ch->rx_ring[i];
2403
2404 ret = srp_post_recv(ch, iu);
2405 if (ret)
2406 goto error;
2407 }
2408
2409 if (!target->using_rdma_cm) {
2410 ret = -ENOMEM;
2411 qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
2412 if (!qp_attr)
2413 goto error;
2414
2415 qp_attr->qp_state = IB_QPS_RTR;
2416 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2417 if (ret)
2418 goto error_free;
2419
2420 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2421 if (ret)
2422 goto error_free;
2423
2424 qp_attr->qp_state = IB_QPS_RTS;
2425 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2426 if (ret)
2427 goto error_free;
2428
2429 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2430
2431 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2432 if (ret)
2433 goto error_free;
2434
2435 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2436 }
2437
2438 error_free:
2439 kfree(qp_attr);
2440
2441 error:
2442 ch->status = ret;
2443 }
2444
srp_ib_cm_rej_handler(struct ib_cm_id * cm_id,const struct ib_cm_event * event,struct srp_rdma_ch * ch)2445 static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
2446 const struct ib_cm_event *event,
2447 struct srp_rdma_ch *ch)
2448 {
2449 struct srp_target_port *target = ch->target;
2450 struct Scsi_Host *shost = target->scsi_host;
2451 struct ib_class_port_info *cpi;
2452 int opcode;
2453 u16 dlid;
2454
2455 switch (event->param.rej_rcvd.reason) {
2456 case IB_CM_REJ_PORT_CM_REDIRECT:
2457 cpi = event->param.rej_rcvd.ari;
2458 dlid = be16_to_cpu(cpi->redirect_lid);
2459 sa_path_set_dlid(&ch->ib_cm.path, dlid);
2460 ch->ib_cm.path.pkey = cpi->redirect_pkey;
2461 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2462 memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
2463
2464 ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2465 break;
2466
2467 case IB_CM_REJ_PORT_REDIRECT:
2468 if (srp_target_is_topspin(target)) {
2469 union ib_gid *dgid = &ch->ib_cm.path.dgid;
2470
2471 /*
2472 * Topspin/Cisco SRP gateways incorrectly send
2473 * reject reason code 25 when they mean 24
2474 * (port redirect).
2475 */
2476 memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
2477
2478 shost_printk(KERN_DEBUG, shost,
2479 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2480 be64_to_cpu(dgid->global.subnet_prefix),
2481 be64_to_cpu(dgid->global.interface_id));
2482
2483 ch->status = SRP_PORT_REDIRECT;
2484 } else {
2485 shost_printk(KERN_WARNING, shost,
2486 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2487 ch->status = -ECONNRESET;
2488 }
2489 break;
2490
2491 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2492 shost_printk(KERN_WARNING, shost,
2493 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2494 ch->status = -ECONNRESET;
2495 break;
2496
2497 case IB_CM_REJ_CONSUMER_DEFINED:
2498 opcode = *(u8 *) event->private_data;
2499 if (opcode == SRP_LOGIN_REJ) {
2500 struct srp_login_rej *rej = event->private_data;
2501 u32 reason = be32_to_cpu(rej->reason);
2502
2503 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2504 shost_printk(KERN_WARNING, shost,
2505 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2506 else
2507 shost_printk(KERN_WARNING, shost, PFX
2508 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2509 target->sgid.raw,
2510 target->ib_cm.orig_dgid.raw,
2511 reason);
2512 } else
2513 shost_printk(KERN_WARNING, shost,
2514 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2515 " opcode 0x%02x\n", opcode);
2516 ch->status = -ECONNRESET;
2517 break;
2518
2519 case IB_CM_REJ_STALE_CONN:
2520 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2521 ch->status = SRP_STALE_CONN;
2522 break;
2523
2524 default:
2525 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2526 event->param.rej_rcvd.reason);
2527 ch->status = -ECONNRESET;
2528 }
2529 }
2530
srp_ib_cm_handler(struct ib_cm_id * cm_id,const struct ib_cm_event * event)2531 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
2532 const struct ib_cm_event *event)
2533 {
2534 struct srp_rdma_ch *ch = cm_id->context;
2535 struct srp_target_port *target = ch->target;
2536 int comp = 0;
2537
2538 switch (event->event) {
2539 case IB_CM_REQ_ERROR:
2540 shost_printk(KERN_DEBUG, target->scsi_host,
2541 PFX "Sending CM REQ failed\n");
2542 comp = 1;
2543 ch->status = -ECONNRESET;
2544 break;
2545
2546 case IB_CM_REP_RECEIVED:
2547 comp = 1;
2548 srp_cm_rep_handler(cm_id, event->private_data, ch);
2549 break;
2550
2551 case IB_CM_REJ_RECEIVED:
2552 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2553 comp = 1;
2554
2555 srp_ib_cm_rej_handler(cm_id, event, ch);
2556 break;
2557
2558 case IB_CM_DREQ_RECEIVED:
2559 shost_printk(KERN_WARNING, target->scsi_host,
2560 PFX "DREQ received - connection closed\n");
2561 ch->connected = false;
2562 if (ib_send_cm_drep(cm_id, NULL, 0))
2563 shost_printk(KERN_ERR, target->scsi_host,
2564 PFX "Sending CM DREP failed\n");
2565 queue_work(system_long_wq, &target->tl_err_work);
2566 break;
2567
2568 case IB_CM_TIMEWAIT_EXIT:
2569 shost_printk(KERN_ERR, target->scsi_host,
2570 PFX "connection closed\n");
2571 comp = 1;
2572
2573 ch->status = 0;
2574 break;
2575
2576 case IB_CM_MRA_RECEIVED:
2577 case IB_CM_DREQ_ERROR:
2578 case IB_CM_DREP_RECEIVED:
2579 break;
2580
2581 default:
2582 shost_printk(KERN_WARNING, target->scsi_host,
2583 PFX "Unhandled CM event %d\n", event->event);
2584 break;
2585 }
2586
2587 if (comp)
2588 complete(&ch->done);
2589
2590 return 0;
2591 }
2592
srp_rdma_cm_rej_handler(struct srp_rdma_ch * ch,struct rdma_cm_event * event)2593 static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
2594 struct rdma_cm_event *event)
2595 {
2596 struct srp_target_port *target = ch->target;
2597 struct Scsi_Host *shost = target->scsi_host;
2598 int opcode;
2599
2600 switch (event->status) {
2601 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2602 shost_printk(KERN_WARNING, shost,
2603 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2604 ch->status = -ECONNRESET;
2605 break;
2606
2607 case IB_CM_REJ_CONSUMER_DEFINED:
2608 opcode = *(u8 *) event->param.conn.private_data;
2609 if (opcode == SRP_LOGIN_REJ) {
2610 struct srp_login_rej *rej =
2611 (struct srp_login_rej *)
2612 event->param.conn.private_data;
2613 u32 reason = be32_to_cpu(rej->reason);
2614
2615 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2616 shost_printk(KERN_WARNING, shost,
2617 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2618 else
2619 shost_printk(KERN_WARNING, shost,
2620 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
2621 } else {
2622 shost_printk(KERN_WARNING, shost,
2623 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
2624 opcode);
2625 }
2626 ch->status = -ECONNRESET;
2627 break;
2628
2629 case IB_CM_REJ_STALE_CONN:
2630 shost_printk(KERN_WARNING, shost,
2631 " REJ reason: stale connection\n");
2632 ch->status = SRP_STALE_CONN;
2633 break;
2634
2635 default:
2636 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2637 event->status);
2638 ch->status = -ECONNRESET;
2639 break;
2640 }
2641 }
2642
srp_rdma_cm_handler(struct rdma_cm_id * cm_id,struct rdma_cm_event * event)2643 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
2644 struct rdma_cm_event *event)
2645 {
2646 struct srp_rdma_ch *ch = cm_id->context;
2647 struct srp_target_port *target = ch->target;
2648 int comp = 0;
2649
2650 switch (event->event) {
2651 case RDMA_CM_EVENT_ADDR_RESOLVED:
2652 ch->status = 0;
2653 comp = 1;
2654 break;
2655
2656 case RDMA_CM_EVENT_ADDR_ERROR:
2657 ch->status = -ENXIO;
2658 comp = 1;
2659 break;
2660
2661 case RDMA_CM_EVENT_ROUTE_RESOLVED:
2662 ch->status = 0;
2663 comp = 1;
2664 break;
2665
2666 case RDMA_CM_EVENT_ROUTE_ERROR:
2667 case RDMA_CM_EVENT_UNREACHABLE:
2668 ch->status = -EHOSTUNREACH;
2669 comp = 1;
2670 break;
2671
2672 case RDMA_CM_EVENT_CONNECT_ERROR:
2673 shost_printk(KERN_DEBUG, target->scsi_host,
2674 PFX "Sending CM REQ failed\n");
2675 comp = 1;
2676 ch->status = -ECONNRESET;
2677 break;
2678
2679 case RDMA_CM_EVENT_ESTABLISHED:
2680 comp = 1;
2681 srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
2682 break;
2683
2684 case RDMA_CM_EVENT_REJECTED:
2685 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2686 comp = 1;
2687
2688 srp_rdma_cm_rej_handler(ch, event);
2689 break;
2690
2691 case RDMA_CM_EVENT_DISCONNECTED:
2692 if (ch->connected) {
2693 shost_printk(KERN_WARNING, target->scsi_host,
2694 PFX "received DREQ\n");
2695 rdma_disconnect(ch->rdma_cm.cm_id);
2696 comp = 1;
2697 ch->status = 0;
2698 queue_work(system_long_wq, &target->tl_err_work);
2699 }
2700 break;
2701
2702 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2703 shost_printk(KERN_ERR, target->scsi_host,
2704 PFX "connection closed\n");
2705
2706 comp = 1;
2707 ch->status = 0;
2708 break;
2709
2710 default:
2711 shost_printk(KERN_WARNING, target->scsi_host,
2712 PFX "Unhandled CM event %d\n", event->event);
2713 break;
2714 }
2715
2716 if (comp)
2717 complete(&ch->done);
2718
2719 return 0;
2720 }
2721
2722 /**
2723 * srp_change_queue_depth - setting device queue depth
2724 * @sdev: scsi device struct
2725 * @qdepth: requested queue depth
2726 *
2727 * Returns queue depth.
2728 */
2729 static int
srp_change_queue_depth(struct scsi_device * sdev,int qdepth)2730 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2731 {
2732 if (!sdev->tagged_supported)
2733 qdepth = 1;
2734 return scsi_change_queue_depth(sdev, qdepth);
2735 }
2736
srp_send_tsk_mgmt(struct srp_rdma_ch * ch,u64 req_tag,u64 lun,u8 func,u8 * status)2737 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2738 u8 func, u8 *status)
2739 {
2740 struct srp_target_port *target = ch->target;
2741 struct srp_rport *rport = target->rport;
2742 struct ib_device *dev = target->srp_host->srp_dev->dev;
2743 struct srp_iu *iu;
2744 struct srp_tsk_mgmt *tsk_mgmt;
2745 int res;
2746
2747 if (!ch->connected || target->qp_in_error)
2748 return -1;
2749
2750 /*
2751 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2752 * invoked while a task management function is being sent.
2753 */
2754 mutex_lock(&rport->mutex);
2755 spin_lock_irq(&ch->lock);
2756 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2757 spin_unlock_irq(&ch->lock);
2758
2759 if (!iu) {
2760 mutex_unlock(&rport->mutex);
2761
2762 return -1;
2763 }
2764
2765 iu->num_sge = 1;
2766
2767 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2768 DMA_TO_DEVICE);
2769 tsk_mgmt = iu->buf;
2770 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2771
2772 tsk_mgmt->opcode = SRP_TSK_MGMT;
2773 int_to_scsilun(lun, &tsk_mgmt->lun);
2774 tsk_mgmt->tsk_mgmt_func = func;
2775 tsk_mgmt->task_tag = req_tag;
2776
2777 spin_lock_irq(&ch->lock);
2778 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2779 tsk_mgmt->tag = ch->tsk_mgmt_tag;
2780 spin_unlock_irq(&ch->lock);
2781
2782 init_completion(&ch->tsk_mgmt_done);
2783
2784 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2785 DMA_TO_DEVICE);
2786 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2787 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2788 mutex_unlock(&rport->mutex);
2789
2790 return -1;
2791 }
2792 res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2793 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2794 if (res > 0 && status)
2795 *status = ch->tsk_mgmt_status;
2796 mutex_unlock(&rport->mutex);
2797
2798 WARN_ON_ONCE(res < 0);
2799
2800 return res > 0 ? 0 : -1;
2801 }
2802
srp_abort(struct scsi_cmnd * scmnd)2803 static int srp_abort(struct scsi_cmnd *scmnd)
2804 {
2805 struct srp_target_port *target = host_to_target(scmnd->device->host);
2806 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2807 u32 tag;
2808 u16 ch_idx;
2809 struct srp_rdma_ch *ch;
2810 int ret;
2811
2812 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2813
2814 if (!req)
2815 return SUCCESS;
2816 tag = blk_mq_unique_tag(scmnd->request);
2817 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2818 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2819 return SUCCESS;
2820 ch = &target->ch[ch_idx];
2821 if (!srp_claim_req(ch, req, NULL, scmnd))
2822 return SUCCESS;
2823 shost_printk(KERN_ERR, target->scsi_host,
2824 "Sending SRP abort for tag %#x\n", tag);
2825 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2826 SRP_TSK_ABORT_TASK, NULL) == 0)
2827 ret = SUCCESS;
2828 else if (target->rport->state == SRP_RPORT_LOST)
2829 ret = FAST_IO_FAIL;
2830 else
2831 ret = FAILED;
2832 if (ret == SUCCESS) {
2833 srp_free_req(ch, req, scmnd, 0);
2834 scmnd->result = DID_ABORT << 16;
2835 scmnd->scsi_done(scmnd);
2836 }
2837
2838 return ret;
2839 }
2840
srp_reset_device(struct scsi_cmnd * scmnd)2841 static int srp_reset_device(struct scsi_cmnd *scmnd)
2842 {
2843 struct srp_target_port *target = host_to_target(scmnd->device->host);
2844 struct srp_rdma_ch *ch;
2845 u8 status;
2846
2847 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2848
2849 ch = &target->ch[0];
2850 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2851 SRP_TSK_LUN_RESET, &status))
2852 return FAILED;
2853 if (status)
2854 return FAILED;
2855
2856 return SUCCESS;
2857 }
2858
srp_reset_host(struct scsi_cmnd * scmnd)2859 static int srp_reset_host(struct scsi_cmnd *scmnd)
2860 {
2861 struct srp_target_port *target = host_to_target(scmnd->device->host);
2862
2863 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2864
2865 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2866 }
2867
srp_target_alloc(struct scsi_target * starget)2868 static int srp_target_alloc(struct scsi_target *starget)
2869 {
2870 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2871 struct srp_target_port *target = host_to_target(shost);
2872
2873 if (target->target_can_queue)
2874 starget->can_queue = target->target_can_queue;
2875 return 0;
2876 }
2877
srp_slave_configure(struct scsi_device * sdev)2878 static int srp_slave_configure(struct scsi_device *sdev)
2879 {
2880 struct Scsi_Host *shost = sdev->host;
2881 struct srp_target_port *target = host_to_target(shost);
2882 struct request_queue *q = sdev->request_queue;
2883 unsigned long timeout;
2884
2885 if (sdev->type == TYPE_DISK) {
2886 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2887 blk_queue_rq_timeout(q, timeout);
2888 }
2889
2890 return 0;
2891 }
2892
show_id_ext(struct device * dev,struct device_attribute * attr,char * buf)2893 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2894 char *buf)
2895 {
2896 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2897
2898 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2899 }
2900
show_ioc_guid(struct device * dev,struct device_attribute * attr,char * buf)2901 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2902 char *buf)
2903 {
2904 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2905
2906 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2907 }
2908
show_service_id(struct device * dev,struct device_attribute * attr,char * buf)2909 static ssize_t show_service_id(struct device *dev,
2910 struct device_attribute *attr, char *buf)
2911 {
2912 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2913
2914 if (target->using_rdma_cm)
2915 return -ENOENT;
2916 return sprintf(buf, "0x%016llx\n",
2917 be64_to_cpu(target->ib_cm.service_id));
2918 }
2919
show_pkey(struct device * dev,struct device_attribute * attr,char * buf)2920 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2921 char *buf)
2922 {
2923 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2924
2925 if (target->using_rdma_cm)
2926 return -ENOENT;
2927 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
2928 }
2929
show_sgid(struct device * dev,struct device_attribute * attr,char * buf)2930 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2931 char *buf)
2932 {
2933 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2934
2935 return sprintf(buf, "%pI6\n", target->sgid.raw);
2936 }
2937
show_dgid(struct device * dev,struct device_attribute * attr,char * buf)2938 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2939 char *buf)
2940 {
2941 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2942 struct srp_rdma_ch *ch = &target->ch[0];
2943
2944 if (target->using_rdma_cm)
2945 return -ENOENT;
2946 return sprintf(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
2947 }
2948
show_orig_dgid(struct device * dev,struct device_attribute * attr,char * buf)2949 static ssize_t show_orig_dgid(struct device *dev,
2950 struct device_attribute *attr, char *buf)
2951 {
2952 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2953
2954 if (target->using_rdma_cm)
2955 return -ENOENT;
2956 return sprintf(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
2957 }
2958
show_req_lim(struct device * dev,struct device_attribute * attr,char * buf)2959 static ssize_t show_req_lim(struct device *dev,
2960 struct device_attribute *attr, char *buf)
2961 {
2962 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2963 struct srp_rdma_ch *ch;
2964 int i, req_lim = INT_MAX;
2965
2966 for (i = 0; i < target->ch_count; i++) {
2967 ch = &target->ch[i];
2968 req_lim = min(req_lim, ch->req_lim);
2969 }
2970 return sprintf(buf, "%d\n", req_lim);
2971 }
2972
show_zero_req_lim(struct device * dev,struct device_attribute * attr,char * buf)2973 static ssize_t show_zero_req_lim(struct device *dev,
2974 struct device_attribute *attr, char *buf)
2975 {
2976 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2977
2978 return sprintf(buf, "%d\n", target->zero_req_lim);
2979 }
2980
show_local_ib_port(struct device * dev,struct device_attribute * attr,char * buf)2981 static ssize_t show_local_ib_port(struct device *dev,
2982 struct device_attribute *attr, char *buf)
2983 {
2984 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2985
2986 return sprintf(buf, "%d\n", target->srp_host->port);
2987 }
2988
show_local_ib_device(struct device * dev,struct device_attribute * attr,char * buf)2989 static ssize_t show_local_ib_device(struct device *dev,
2990 struct device_attribute *attr, char *buf)
2991 {
2992 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2993
2994 return sprintf(buf, "%s\n",
2995 dev_name(&target->srp_host->srp_dev->dev->dev));
2996 }
2997
show_ch_count(struct device * dev,struct device_attribute * attr,char * buf)2998 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2999 char *buf)
3000 {
3001 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3002
3003 return sprintf(buf, "%d\n", target->ch_count);
3004 }
3005
show_comp_vector(struct device * dev,struct device_attribute * attr,char * buf)3006 static ssize_t show_comp_vector(struct device *dev,
3007 struct device_attribute *attr, char *buf)
3008 {
3009 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3010
3011 return sprintf(buf, "%d\n", target->comp_vector);
3012 }
3013
show_tl_retry_count(struct device * dev,struct device_attribute * attr,char * buf)3014 static ssize_t show_tl_retry_count(struct device *dev,
3015 struct device_attribute *attr, char *buf)
3016 {
3017 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3018
3019 return sprintf(buf, "%d\n", target->tl_retry_count);
3020 }
3021
show_cmd_sg_entries(struct device * dev,struct device_attribute * attr,char * buf)3022 static ssize_t show_cmd_sg_entries(struct device *dev,
3023 struct device_attribute *attr, char *buf)
3024 {
3025 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3026
3027 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
3028 }
3029
show_allow_ext_sg(struct device * dev,struct device_attribute * attr,char * buf)3030 static ssize_t show_allow_ext_sg(struct device *dev,
3031 struct device_attribute *attr, char *buf)
3032 {
3033 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3034
3035 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
3036 }
3037
3038 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
3039 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
3040 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
3041 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
3042 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
3043 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
3044 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
3045 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
3046 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
3047 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
3048 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
3049 static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
3050 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
3051 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
3052 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
3053 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
3054
3055 static struct device_attribute *srp_host_attrs[] = {
3056 &dev_attr_id_ext,
3057 &dev_attr_ioc_guid,
3058 &dev_attr_service_id,
3059 &dev_attr_pkey,
3060 &dev_attr_sgid,
3061 &dev_attr_dgid,
3062 &dev_attr_orig_dgid,
3063 &dev_attr_req_lim,
3064 &dev_attr_zero_req_lim,
3065 &dev_attr_local_ib_port,
3066 &dev_attr_local_ib_device,
3067 &dev_attr_ch_count,
3068 &dev_attr_comp_vector,
3069 &dev_attr_tl_retry_count,
3070 &dev_attr_cmd_sg_entries,
3071 &dev_attr_allow_ext_sg,
3072 NULL
3073 };
3074
3075 static struct scsi_host_template srp_template = {
3076 .module = THIS_MODULE,
3077 .name = "InfiniBand SRP initiator",
3078 .proc_name = DRV_NAME,
3079 .target_alloc = srp_target_alloc,
3080 .slave_configure = srp_slave_configure,
3081 .info = srp_target_info,
3082 .queuecommand = srp_queuecommand,
3083 .change_queue_depth = srp_change_queue_depth,
3084 .eh_timed_out = srp_timed_out,
3085 .eh_abort_handler = srp_abort,
3086 .eh_device_reset_handler = srp_reset_device,
3087 .eh_host_reset_handler = srp_reset_host,
3088 .skip_settle_delay = true,
3089 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
3090 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
3091 .this_id = -1,
3092 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
3093 .shost_attrs = srp_host_attrs,
3094 .track_queue_depth = 1,
3095 };
3096
srp_sdev_count(struct Scsi_Host * host)3097 static int srp_sdev_count(struct Scsi_Host *host)
3098 {
3099 struct scsi_device *sdev;
3100 int c = 0;
3101
3102 shost_for_each_device(sdev, host)
3103 c++;
3104
3105 return c;
3106 }
3107
3108 /*
3109 * Return values:
3110 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
3111 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3112 * removal has been scheduled.
3113 * 0 and target->state != SRP_TARGET_REMOVED upon success.
3114 */
srp_add_target(struct srp_host * host,struct srp_target_port * target)3115 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
3116 {
3117 struct srp_rport_identifiers ids;
3118 struct srp_rport *rport;
3119
3120 target->state = SRP_TARGET_SCANNING;
3121 sprintf(target->target_name, "SRP.T10:%016llX",
3122 be64_to_cpu(target->id_ext));
3123
3124 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
3125 return -ENODEV;
3126
3127 memcpy(ids.port_id, &target->id_ext, 8);
3128 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
3129 ids.roles = SRP_RPORT_ROLE_TARGET;
3130 rport = srp_rport_add(target->scsi_host, &ids);
3131 if (IS_ERR(rport)) {
3132 scsi_remove_host(target->scsi_host);
3133 return PTR_ERR(rport);
3134 }
3135
3136 rport->lld_data = target;
3137 target->rport = rport;
3138
3139 spin_lock(&host->target_lock);
3140 list_add_tail(&target->list, &host->target_list);
3141 spin_unlock(&host->target_lock);
3142
3143 scsi_scan_target(&target->scsi_host->shost_gendev,
3144 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
3145
3146 if (srp_connected_ch(target) < target->ch_count ||
3147 target->qp_in_error) {
3148 shost_printk(KERN_INFO, target->scsi_host,
3149 PFX "SCSI scan failed - removing SCSI host\n");
3150 srp_queue_remove_work(target);
3151 goto out;
3152 }
3153
3154 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
3155 dev_name(&target->scsi_host->shost_gendev),
3156 srp_sdev_count(target->scsi_host));
3157
3158 spin_lock_irq(&target->lock);
3159 if (target->state == SRP_TARGET_SCANNING)
3160 target->state = SRP_TARGET_LIVE;
3161 spin_unlock_irq(&target->lock);
3162
3163 out:
3164 return 0;
3165 }
3166
srp_release_dev(struct device * dev)3167 static void srp_release_dev(struct device *dev)
3168 {
3169 struct srp_host *host =
3170 container_of(dev, struct srp_host, dev);
3171
3172 complete(&host->released);
3173 }
3174
3175 static struct class srp_class = {
3176 .name = "infiniband_srp",
3177 .dev_release = srp_release_dev
3178 };
3179
3180 /**
3181 * srp_conn_unique() - check whether the connection to a target is unique
3182 * @host: SRP host.
3183 * @target: SRP target port.
3184 */
srp_conn_unique(struct srp_host * host,struct srp_target_port * target)3185 static bool srp_conn_unique(struct srp_host *host,
3186 struct srp_target_port *target)
3187 {
3188 struct srp_target_port *t;
3189 bool ret = false;
3190
3191 if (target->state == SRP_TARGET_REMOVED)
3192 goto out;
3193
3194 ret = true;
3195
3196 spin_lock(&host->target_lock);
3197 list_for_each_entry(t, &host->target_list, list) {
3198 if (t != target &&
3199 target->id_ext == t->id_ext &&
3200 target->ioc_guid == t->ioc_guid &&
3201 target->initiator_ext == t->initiator_ext) {
3202 ret = false;
3203 break;
3204 }
3205 }
3206 spin_unlock(&host->target_lock);
3207
3208 out:
3209 return ret;
3210 }
3211
3212 /*
3213 * Target ports are added by writing
3214 *
3215 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3216 * pkey=<P_Key>,service_id=<service ID>
3217 * or
3218 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
3219 * [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
3220 *
3221 * to the add_target sysfs attribute.
3222 */
3223 enum {
3224 SRP_OPT_ERR = 0,
3225 SRP_OPT_ID_EXT = 1 << 0,
3226 SRP_OPT_IOC_GUID = 1 << 1,
3227 SRP_OPT_DGID = 1 << 2,
3228 SRP_OPT_PKEY = 1 << 3,
3229 SRP_OPT_SERVICE_ID = 1 << 4,
3230 SRP_OPT_MAX_SECT = 1 << 5,
3231 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
3232 SRP_OPT_IO_CLASS = 1 << 7,
3233 SRP_OPT_INITIATOR_EXT = 1 << 8,
3234 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
3235 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
3236 SRP_OPT_SG_TABLESIZE = 1 << 11,
3237 SRP_OPT_COMP_VECTOR = 1 << 12,
3238 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
3239 SRP_OPT_QUEUE_SIZE = 1 << 14,
3240 SRP_OPT_IP_SRC = 1 << 15,
3241 SRP_OPT_IP_DEST = 1 << 16,
3242 SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
3243 SRP_OPT_MAX_IT_IU_SIZE = 1 << 18,
3244 SRP_OPT_CH_COUNT = 1 << 19,
3245 };
3246
3247 static unsigned int srp_opt_mandatory[] = {
3248 SRP_OPT_ID_EXT |
3249 SRP_OPT_IOC_GUID |
3250 SRP_OPT_DGID |
3251 SRP_OPT_PKEY |
3252 SRP_OPT_SERVICE_ID,
3253 SRP_OPT_ID_EXT |
3254 SRP_OPT_IOC_GUID |
3255 SRP_OPT_IP_DEST,
3256 };
3257
3258 static const match_table_t srp_opt_tokens = {
3259 { SRP_OPT_ID_EXT, "id_ext=%s" },
3260 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
3261 { SRP_OPT_DGID, "dgid=%s" },
3262 { SRP_OPT_PKEY, "pkey=%x" },
3263 { SRP_OPT_SERVICE_ID, "service_id=%s" },
3264 { SRP_OPT_MAX_SECT, "max_sect=%d" },
3265 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
3266 { SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" },
3267 { SRP_OPT_IO_CLASS, "io_class=%x" },
3268 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
3269 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
3270 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
3271 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
3272 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
3273 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
3274 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
3275 { SRP_OPT_IP_SRC, "src=%s" },
3276 { SRP_OPT_IP_DEST, "dest=%s" },
3277 { SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" },
3278 { SRP_OPT_CH_COUNT, "ch_count=%u", },
3279 { SRP_OPT_ERR, NULL }
3280 };
3281
3282 /**
3283 * srp_parse_in - parse an IP address and port number combination
3284 * @net: [in] Network namespace.
3285 * @sa: [out] Address family, IP address and port number.
3286 * @addr_port_str: [in] IP address and port number.
3287 * @has_port: [out] Whether or not @addr_port_str includes a port number.
3288 *
3289 * Parse the following address formats:
3290 * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3291 * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3292 */
srp_parse_in(struct net * net,struct sockaddr_storage * sa,const char * addr_port_str,bool * has_port)3293 static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3294 const char *addr_port_str, bool *has_port)
3295 {
3296 char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3297 char *port_str;
3298 int ret;
3299
3300 if (!addr)
3301 return -ENOMEM;
3302 port_str = strrchr(addr, ':');
3303 if (port_str && strchr(port_str, ']'))
3304 port_str = NULL;
3305 if (port_str)
3306 *port_str++ = '\0';
3307 if (has_port)
3308 *has_port = port_str != NULL;
3309 ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3310 if (ret && addr[0]) {
3311 addr_end = addr + strlen(addr) - 1;
3312 if (addr[0] == '[' && *addr_end == ']') {
3313 *addr_end = '\0';
3314 ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
3315 port_str, sa);
3316 }
3317 }
3318 kfree(addr);
3319 pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
3320 return ret;
3321 }
3322
srp_parse_options(struct net * net,const char * buf,struct srp_target_port * target)3323 static int srp_parse_options(struct net *net, const char *buf,
3324 struct srp_target_port *target)
3325 {
3326 char *options, *sep_opt;
3327 char *p;
3328 substring_t args[MAX_OPT_ARGS];
3329 unsigned long long ull;
3330 bool has_port;
3331 int opt_mask = 0;
3332 int token;
3333 int ret = -EINVAL;
3334 int i;
3335
3336 options = kstrdup(buf, GFP_KERNEL);
3337 if (!options)
3338 return -ENOMEM;
3339
3340 sep_opt = options;
3341 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3342 if (!*p)
3343 continue;
3344
3345 token = match_token(p, srp_opt_tokens, args);
3346 opt_mask |= token;
3347
3348 switch (token) {
3349 case SRP_OPT_ID_EXT:
3350 p = match_strdup(args);
3351 if (!p) {
3352 ret = -ENOMEM;
3353 goto out;
3354 }
3355 ret = kstrtoull(p, 16, &ull);
3356 if (ret) {
3357 pr_warn("invalid id_ext parameter '%s'\n", p);
3358 kfree(p);
3359 goto out;
3360 }
3361 target->id_ext = cpu_to_be64(ull);
3362 kfree(p);
3363 break;
3364
3365 case SRP_OPT_IOC_GUID:
3366 p = match_strdup(args);
3367 if (!p) {
3368 ret = -ENOMEM;
3369 goto out;
3370 }
3371 ret = kstrtoull(p, 16, &ull);
3372 if (ret) {
3373 pr_warn("invalid ioc_guid parameter '%s'\n", p);
3374 kfree(p);
3375 goto out;
3376 }
3377 target->ioc_guid = cpu_to_be64(ull);
3378 kfree(p);
3379 break;
3380
3381 case SRP_OPT_DGID:
3382 p = match_strdup(args);
3383 if (!p) {
3384 ret = -ENOMEM;
3385 goto out;
3386 }
3387 if (strlen(p) != 32) {
3388 pr_warn("bad dest GID parameter '%s'\n", p);
3389 kfree(p);
3390 goto out;
3391 }
3392
3393 ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
3394 kfree(p);
3395 if (ret < 0)
3396 goto out;
3397 break;
3398
3399 case SRP_OPT_PKEY:
3400 if (match_hex(args, &token)) {
3401 pr_warn("bad P_Key parameter '%s'\n", p);
3402 goto out;
3403 }
3404 target->ib_cm.pkey = cpu_to_be16(token);
3405 break;
3406
3407 case SRP_OPT_SERVICE_ID:
3408 p = match_strdup(args);
3409 if (!p) {
3410 ret = -ENOMEM;
3411 goto out;
3412 }
3413 ret = kstrtoull(p, 16, &ull);
3414 if (ret) {
3415 pr_warn("bad service_id parameter '%s'\n", p);
3416 kfree(p);
3417 goto out;
3418 }
3419 target->ib_cm.service_id = cpu_to_be64(ull);
3420 kfree(p);
3421 break;
3422
3423 case SRP_OPT_IP_SRC:
3424 p = match_strdup(args);
3425 if (!p) {
3426 ret = -ENOMEM;
3427 goto out;
3428 }
3429 ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
3430 NULL);
3431 if (ret < 0) {
3432 pr_warn("bad source parameter '%s'\n", p);
3433 kfree(p);
3434 goto out;
3435 }
3436 target->rdma_cm.src_specified = true;
3437 kfree(p);
3438 break;
3439
3440 case SRP_OPT_IP_DEST:
3441 p = match_strdup(args);
3442 if (!p) {
3443 ret = -ENOMEM;
3444 goto out;
3445 }
3446 ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
3447 &has_port);
3448 if (!has_port)
3449 ret = -EINVAL;
3450 if (ret < 0) {
3451 pr_warn("bad dest parameter '%s'\n", p);
3452 kfree(p);
3453 goto out;
3454 }
3455 target->using_rdma_cm = true;
3456 kfree(p);
3457 break;
3458
3459 case SRP_OPT_MAX_SECT:
3460 if (match_int(args, &token)) {
3461 pr_warn("bad max sect parameter '%s'\n", p);
3462 goto out;
3463 }
3464 target->scsi_host->max_sectors = token;
3465 break;
3466
3467 case SRP_OPT_QUEUE_SIZE:
3468 if (match_int(args, &token) || token < 1) {
3469 pr_warn("bad queue_size parameter '%s'\n", p);
3470 goto out;
3471 }
3472 target->scsi_host->can_queue = token;
3473 target->queue_size = token + SRP_RSP_SQ_SIZE +
3474 SRP_TSK_MGMT_SQ_SIZE;
3475 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3476 target->scsi_host->cmd_per_lun = token;
3477 break;
3478
3479 case SRP_OPT_MAX_CMD_PER_LUN:
3480 if (match_int(args, &token) || token < 1) {
3481 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3482 p);
3483 goto out;
3484 }
3485 target->scsi_host->cmd_per_lun = token;
3486 break;
3487
3488 case SRP_OPT_TARGET_CAN_QUEUE:
3489 if (match_int(args, &token) || token < 1) {
3490 pr_warn("bad max target_can_queue parameter '%s'\n",
3491 p);
3492 goto out;
3493 }
3494 target->target_can_queue = token;
3495 break;
3496
3497 case SRP_OPT_IO_CLASS:
3498 if (match_hex(args, &token)) {
3499 pr_warn("bad IO class parameter '%s'\n", p);
3500 goto out;
3501 }
3502 if (token != SRP_REV10_IB_IO_CLASS &&
3503 token != SRP_REV16A_IB_IO_CLASS) {
3504 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3505 token, SRP_REV10_IB_IO_CLASS,
3506 SRP_REV16A_IB_IO_CLASS);
3507 goto out;
3508 }
3509 target->io_class = token;
3510 break;
3511
3512 case SRP_OPT_INITIATOR_EXT:
3513 p = match_strdup(args);
3514 if (!p) {
3515 ret = -ENOMEM;
3516 goto out;
3517 }
3518 ret = kstrtoull(p, 16, &ull);
3519 if (ret) {
3520 pr_warn("bad initiator_ext value '%s'\n", p);
3521 kfree(p);
3522 goto out;
3523 }
3524 target->initiator_ext = cpu_to_be64(ull);
3525 kfree(p);
3526 break;
3527
3528 case SRP_OPT_CMD_SG_ENTRIES:
3529 if (match_int(args, &token) || token < 1 || token > 255) {
3530 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3531 p);
3532 goto out;
3533 }
3534 target->cmd_sg_cnt = token;
3535 break;
3536
3537 case SRP_OPT_ALLOW_EXT_SG:
3538 if (match_int(args, &token)) {
3539 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3540 goto out;
3541 }
3542 target->allow_ext_sg = !!token;
3543 break;
3544
3545 case SRP_OPT_SG_TABLESIZE:
3546 if (match_int(args, &token) || token < 1 ||
3547 token > SG_MAX_SEGMENTS) {
3548 pr_warn("bad max sg_tablesize parameter '%s'\n",
3549 p);
3550 goto out;
3551 }
3552 target->sg_tablesize = token;
3553 break;
3554
3555 case SRP_OPT_COMP_VECTOR:
3556 if (match_int(args, &token) || token < 0) {
3557 pr_warn("bad comp_vector parameter '%s'\n", p);
3558 goto out;
3559 }
3560 target->comp_vector = token;
3561 break;
3562
3563 case SRP_OPT_TL_RETRY_COUNT:
3564 if (match_int(args, &token) || token < 2 || token > 7) {
3565 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3566 p);
3567 goto out;
3568 }
3569 target->tl_retry_count = token;
3570 break;
3571
3572 case SRP_OPT_MAX_IT_IU_SIZE:
3573 if (match_int(args, &token) || token < 0) {
3574 pr_warn("bad maximum initiator to target IU size '%s'\n", p);
3575 goto out;
3576 }
3577 target->max_it_iu_size = token;
3578 break;
3579
3580 case SRP_OPT_CH_COUNT:
3581 if (match_int(args, &token) || token < 1) {
3582 pr_warn("bad channel count %s\n", p);
3583 goto out;
3584 }
3585 target->ch_count = token;
3586 break;
3587
3588 default:
3589 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3590 p);
3591 goto out;
3592 }
3593 }
3594
3595 for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
3596 if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
3597 ret = 0;
3598 break;
3599 }
3600 }
3601 if (ret)
3602 pr_warn("target creation request is missing one or more parameters\n");
3603
3604 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3605 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3606 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3607 target->scsi_host->cmd_per_lun,
3608 target->scsi_host->can_queue);
3609
3610 out:
3611 kfree(options);
3612 return ret;
3613 }
3614
srp_create_target(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3615 static ssize_t srp_create_target(struct device *dev,
3616 struct device_attribute *attr,
3617 const char *buf, size_t count)
3618 {
3619 struct srp_host *host =
3620 container_of(dev, struct srp_host, dev);
3621 struct Scsi_Host *target_host;
3622 struct srp_target_port *target;
3623 struct srp_rdma_ch *ch;
3624 struct srp_device *srp_dev = host->srp_dev;
3625 struct ib_device *ibdev = srp_dev->dev;
3626 int ret, i, ch_idx;
3627 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3628 bool multich = false;
3629 uint32_t max_iu_len;
3630
3631 target_host = scsi_host_alloc(&srp_template,
3632 sizeof (struct srp_target_port));
3633 if (!target_host)
3634 return -ENOMEM;
3635
3636 target_host->transportt = ib_srp_transport_template;
3637 target_host->max_channel = 0;
3638 target_host->max_id = 1;
3639 target_host->max_lun = -1LL;
3640 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3641 target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
3642
3643 if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
3644 target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
3645
3646 target = host_to_target(target_host);
3647
3648 target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
3649 target->io_class = SRP_REV16A_IB_IO_CLASS;
3650 target->scsi_host = target_host;
3651 target->srp_host = host;
3652 target->lkey = host->srp_dev->pd->local_dma_lkey;
3653 target->global_rkey = host->srp_dev->global_rkey;
3654 target->cmd_sg_cnt = cmd_sg_entries;
3655 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3656 target->allow_ext_sg = allow_ext_sg;
3657 target->tl_retry_count = 7;
3658 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3659
3660 /*
3661 * Avoid that the SCSI host can be removed by srp_remove_target()
3662 * before this function returns.
3663 */
3664 scsi_host_get(target->scsi_host);
3665
3666 ret = mutex_lock_interruptible(&host->add_target_mutex);
3667 if (ret < 0)
3668 goto put;
3669
3670 ret = srp_parse_options(target->net, buf, target);
3671 if (ret)
3672 goto out;
3673
3674 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3675
3676 if (!srp_conn_unique(target->srp_host, target)) {
3677 if (target->using_rdma_cm) {
3678 shost_printk(KERN_INFO, target->scsi_host,
3679 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
3680 be64_to_cpu(target->id_ext),
3681 be64_to_cpu(target->ioc_guid),
3682 &target->rdma_cm.dst);
3683 } else {
3684 shost_printk(KERN_INFO, target->scsi_host,
3685 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3686 be64_to_cpu(target->id_ext),
3687 be64_to_cpu(target->ioc_guid),
3688 be64_to_cpu(target->initiator_ext));
3689 }
3690 ret = -EEXIST;
3691 goto out;
3692 }
3693
3694 if (!srp_dev->has_fr && !target->allow_ext_sg &&
3695 target->cmd_sg_cnt < target->sg_tablesize) {
3696 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3697 target->sg_tablesize = target->cmd_sg_cnt;
3698 }
3699
3700 if (srp_dev->use_fast_reg) {
3701 bool gaps_reg = (ibdev->attrs.device_cap_flags &
3702 IB_DEVICE_SG_GAPS_REG);
3703
3704 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3705 (ilog2(srp_dev->mr_page_size) - 9);
3706 if (!gaps_reg) {
3707 /*
3708 * FR can only map one HCA page per entry. If the start
3709 * address is not aligned on a HCA page boundary two
3710 * entries will be used for the head and the tail
3711 * although these two entries combined contain at most
3712 * one HCA page of data. Hence the "+ 1" in the
3713 * calculation below.
3714 *
3715 * The indirect data buffer descriptor is contiguous
3716 * so the memory for that buffer will only be
3717 * registered if register_always is true. Hence add
3718 * one to mr_per_cmd if register_always has been set.
3719 */
3720 mr_per_cmd = register_always +
3721 (target->scsi_host->max_sectors + 1 +
3722 max_sectors_per_mr - 1) / max_sectors_per_mr;
3723 } else {
3724 mr_per_cmd = register_always +
3725 (target->sg_tablesize +
3726 srp_dev->max_pages_per_mr - 1) /
3727 srp_dev->max_pages_per_mr;
3728 }
3729 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3730 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3731 max_sectors_per_mr, mr_per_cmd);
3732 }
3733
3734 target_host->sg_tablesize = target->sg_tablesize;
3735 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3736 target->mr_per_cmd = mr_per_cmd;
3737 target->indirect_size = target->sg_tablesize *
3738 sizeof (struct srp_direct_buf);
3739 max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
3740 srp_use_imm_data,
3741 target->max_it_iu_size);
3742
3743 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3744 INIT_WORK(&target->remove_work, srp_remove_work);
3745 spin_lock_init(&target->lock);
3746 ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
3747 if (ret)
3748 goto out;
3749
3750 ret = -ENOMEM;
3751 if (target->ch_count == 0) {
3752 target->ch_count =
3753 min(ch_count ?:
3754 max(4 * num_online_nodes(),
3755 ibdev->num_comp_vectors),
3756 num_online_cpus());
3757 }
3758
3759 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3760 GFP_KERNEL);
3761 if (!target->ch)
3762 goto out;
3763
3764 for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) {
3765 ch = &target->ch[ch_idx];
3766 ch->target = target;
3767 ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
3768 spin_lock_init(&ch->lock);
3769 INIT_LIST_HEAD(&ch->free_tx);
3770 ret = srp_new_cm_id(ch);
3771 if (ret)
3772 goto err_disconnect;
3773
3774 ret = srp_create_ch_ib(ch);
3775 if (ret)
3776 goto err_disconnect;
3777
3778 ret = srp_alloc_req_data(ch);
3779 if (ret)
3780 goto err_disconnect;
3781
3782 ret = srp_connect_ch(ch, max_iu_len, multich);
3783 if (ret) {
3784 char dst[64];
3785
3786 if (target->using_rdma_cm)
3787 snprintf(dst, sizeof(dst), "%pIS",
3788 &target->rdma_cm.dst);
3789 else
3790 snprintf(dst, sizeof(dst), "%pI6",
3791 target->ib_cm.orig_dgid.raw);
3792 shost_printk(KERN_ERR, target->scsi_host,
3793 PFX "Connection %d/%d to %s failed\n",
3794 ch_idx,
3795 target->ch_count, dst);
3796 if (ch_idx == 0) {
3797 goto free_ch;
3798 } else {
3799 srp_free_ch_ib(target, ch);
3800 srp_free_req_data(target, ch);
3801 target->ch_count = ch - target->ch;
3802 goto connected;
3803 }
3804 }
3805 multich = true;
3806 }
3807
3808 connected:
3809 target->scsi_host->nr_hw_queues = target->ch_count;
3810
3811 ret = srp_add_target(host, target);
3812 if (ret)
3813 goto err_disconnect;
3814
3815 if (target->state != SRP_TARGET_REMOVED) {
3816 if (target->using_rdma_cm) {
3817 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3818 "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
3819 be64_to_cpu(target->id_ext),
3820 be64_to_cpu(target->ioc_guid),
3821 target->sgid.raw, &target->rdma_cm.dst);
3822 } else {
3823 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3824 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3825 be64_to_cpu(target->id_ext),
3826 be64_to_cpu(target->ioc_guid),
3827 be16_to_cpu(target->ib_cm.pkey),
3828 be64_to_cpu(target->ib_cm.service_id),
3829 target->sgid.raw,
3830 target->ib_cm.orig_dgid.raw);
3831 }
3832 }
3833
3834 ret = count;
3835
3836 out:
3837 mutex_unlock(&host->add_target_mutex);
3838
3839 put:
3840 scsi_host_put(target->scsi_host);
3841 if (ret < 0) {
3842 /*
3843 * If a call to srp_remove_target() has not been scheduled,
3844 * drop the network namespace reference now that was obtained
3845 * earlier in this function.
3846 */
3847 if (target->state != SRP_TARGET_REMOVED)
3848 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
3849 scsi_host_put(target->scsi_host);
3850 }
3851
3852 return ret;
3853
3854 err_disconnect:
3855 srp_disconnect_target(target);
3856
3857 free_ch:
3858 for (i = 0; i < target->ch_count; i++) {
3859 ch = &target->ch[i];
3860 srp_free_ch_ib(target, ch);
3861 srp_free_req_data(target, ch);
3862 }
3863
3864 kfree(target->ch);
3865 goto out;
3866 }
3867
3868 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3869
show_ibdev(struct device * dev,struct device_attribute * attr,char * buf)3870 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3871 char *buf)
3872 {
3873 struct srp_host *host = container_of(dev, struct srp_host, dev);
3874
3875 return sprintf(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
3876 }
3877
3878 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3879
show_port(struct device * dev,struct device_attribute * attr,char * buf)3880 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3881 char *buf)
3882 {
3883 struct srp_host *host = container_of(dev, struct srp_host, dev);
3884
3885 return sprintf(buf, "%d\n", host->port);
3886 }
3887
3888 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3889
srp_add_port(struct srp_device * device,u8 port)3890 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3891 {
3892 struct srp_host *host;
3893
3894 host = kzalloc(sizeof *host, GFP_KERNEL);
3895 if (!host)
3896 return NULL;
3897
3898 INIT_LIST_HEAD(&host->target_list);
3899 spin_lock_init(&host->target_lock);
3900 init_completion(&host->released);
3901 mutex_init(&host->add_target_mutex);
3902 host->srp_dev = device;
3903 host->port = port;
3904
3905 host->dev.class = &srp_class;
3906 host->dev.parent = device->dev->dev.parent;
3907 dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
3908 port);
3909
3910 if (device_register(&host->dev))
3911 goto free_host;
3912 if (device_create_file(&host->dev, &dev_attr_add_target))
3913 goto err_class;
3914 if (device_create_file(&host->dev, &dev_attr_ibdev))
3915 goto err_class;
3916 if (device_create_file(&host->dev, &dev_attr_port))
3917 goto err_class;
3918
3919 return host;
3920
3921 err_class:
3922 device_unregister(&host->dev);
3923
3924 free_host:
3925 kfree(host);
3926
3927 return NULL;
3928 }
3929
srp_rename_dev(struct ib_device * device,void * client_data)3930 static void srp_rename_dev(struct ib_device *device, void *client_data)
3931 {
3932 struct srp_device *srp_dev = client_data;
3933 struct srp_host *host, *tmp_host;
3934
3935 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3936 char name[IB_DEVICE_NAME_MAX + 8];
3937
3938 snprintf(name, sizeof(name), "srp-%s-%d",
3939 dev_name(&device->dev), host->port);
3940 device_rename(&host->dev, name);
3941 }
3942 }
3943
srp_add_one(struct ib_device * device)3944 static int srp_add_one(struct ib_device *device)
3945 {
3946 struct srp_device *srp_dev;
3947 struct ib_device_attr *attr = &device->attrs;
3948 struct srp_host *host;
3949 int mr_page_shift;
3950 unsigned int p;
3951 u64 max_pages_per_mr;
3952 unsigned int flags = 0;
3953
3954 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
3955 if (!srp_dev)
3956 return -ENOMEM;
3957
3958 /*
3959 * Use the smallest page size supported by the HCA, down to a
3960 * minimum of 4096 bytes. We're unlikely to build large sglists
3961 * out of smaller entries.
3962 */
3963 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
3964 srp_dev->mr_page_size = 1 << mr_page_shift;
3965 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3966 max_pages_per_mr = attr->max_mr_size;
3967 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3968 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
3969 attr->max_mr_size, srp_dev->mr_page_size,
3970 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
3971 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3972 max_pages_per_mr);
3973
3974 srp_dev->has_fr = (attr->device_cap_flags &
3975 IB_DEVICE_MEM_MGT_EXTENSIONS);
3976 if (!never_register && !srp_dev->has_fr)
3977 dev_warn(&device->dev, "FR is not supported\n");
3978 else if (!never_register &&
3979 attr->max_mr_size >= 2 * srp_dev->mr_page_size)
3980 srp_dev->use_fast_reg = srp_dev->has_fr;
3981
3982 if (never_register || !register_always || !srp_dev->has_fr)
3983 flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
3984
3985 if (srp_dev->use_fast_reg) {
3986 srp_dev->max_pages_per_mr =
3987 min_t(u32, srp_dev->max_pages_per_mr,
3988 attr->max_fast_reg_page_list_len);
3989 }
3990 srp_dev->mr_max_size = srp_dev->mr_page_size *
3991 srp_dev->max_pages_per_mr;
3992 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3993 dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
3994 attr->max_fast_reg_page_list_len,
3995 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3996
3997 INIT_LIST_HEAD(&srp_dev->dev_list);
3998
3999 srp_dev->dev = device;
4000 srp_dev->pd = ib_alloc_pd(device, flags);
4001 if (IS_ERR(srp_dev->pd)) {
4002 int ret = PTR_ERR(srp_dev->pd);
4003
4004 kfree(srp_dev);
4005 return ret;
4006 }
4007
4008 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
4009 srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
4010 WARN_ON_ONCE(srp_dev->global_rkey == 0);
4011 }
4012
4013 rdma_for_each_port (device, p) {
4014 host = srp_add_port(srp_dev, p);
4015 if (host)
4016 list_add_tail(&host->list, &srp_dev->dev_list);
4017 }
4018
4019 ib_set_client_data(device, &srp_client, srp_dev);
4020 return 0;
4021 }
4022
srp_remove_one(struct ib_device * device,void * client_data)4023 static void srp_remove_one(struct ib_device *device, void *client_data)
4024 {
4025 struct srp_device *srp_dev;
4026 struct srp_host *host, *tmp_host;
4027 struct srp_target_port *target;
4028
4029 srp_dev = client_data;
4030
4031 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
4032 device_unregister(&host->dev);
4033 /*
4034 * Wait for the sysfs entry to go away, so that no new
4035 * target ports can be created.
4036 */
4037 wait_for_completion(&host->released);
4038
4039 /*
4040 * Remove all target ports.
4041 */
4042 spin_lock(&host->target_lock);
4043 list_for_each_entry(target, &host->target_list, list)
4044 srp_queue_remove_work(target);
4045 spin_unlock(&host->target_lock);
4046
4047 /*
4048 * Wait for tl_err and target port removal tasks.
4049 */
4050 flush_workqueue(system_long_wq);
4051 flush_workqueue(srp_remove_wq);
4052
4053 kfree(host);
4054 }
4055
4056 ib_dealloc_pd(srp_dev->pd);
4057
4058 kfree(srp_dev);
4059 }
4060
4061 static struct srp_function_template ib_srp_transport_functions = {
4062 .has_rport_state = true,
4063 .reset_timer_if_blocked = true,
4064 .reconnect_delay = &srp_reconnect_delay,
4065 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
4066 .dev_loss_tmo = &srp_dev_loss_tmo,
4067 .reconnect = srp_rport_reconnect,
4068 .rport_delete = srp_rport_delete,
4069 .terminate_rport_io = srp_terminate_io,
4070 };
4071
srp_init_module(void)4072 static int __init srp_init_module(void)
4073 {
4074 int ret;
4075
4076 BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
4077 BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
4078 BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
4079 BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
4080
4081 if (srp_sg_tablesize) {
4082 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
4083 if (!cmd_sg_entries)
4084 cmd_sg_entries = srp_sg_tablesize;
4085 }
4086
4087 if (!cmd_sg_entries)
4088 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
4089
4090 if (cmd_sg_entries > 255) {
4091 pr_warn("Clamping cmd_sg_entries to 255\n");
4092 cmd_sg_entries = 255;
4093 }
4094
4095 if (!indirect_sg_entries)
4096 indirect_sg_entries = cmd_sg_entries;
4097 else if (indirect_sg_entries < cmd_sg_entries) {
4098 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
4099 cmd_sg_entries);
4100 indirect_sg_entries = cmd_sg_entries;
4101 }
4102
4103 if (indirect_sg_entries > SG_MAX_SEGMENTS) {
4104 pr_warn("Clamping indirect_sg_entries to %u\n",
4105 SG_MAX_SEGMENTS);
4106 indirect_sg_entries = SG_MAX_SEGMENTS;
4107 }
4108
4109 srp_remove_wq = create_workqueue("srp_remove");
4110 if (!srp_remove_wq) {
4111 ret = -ENOMEM;
4112 goto out;
4113 }
4114
4115 ret = -ENOMEM;
4116 ib_srp_transport_template =
4117 srp_attach_transport(&ib_srp_transport_functions);
4118 if (!ib_srp_transport_template)
4119 goto destroy_wq;
4120
4121 ret = class_register(&srp_class);
4122 if (ret) {
4123 pr_err("couldn't register class infiniband_srp\n");
4124 goto release_tr;
4125 }
4126
4127 ib_sa_register_client(&srp_sa_client);
4128
4129 ret = ib_register_client(&srp_client);
4130 if (ret) {
4131 pr_err("couldn't register IB client\n");
4132 goto unreg_sa;
4133 }
4134
4135 out:
4136 return ret;
4137
4138 unreg_sa:
4139 ib_sa_unregister_client(&srp_sa_client);
4140 class_unregister(&srp_class);
4141
4142 release_tr:
4143 srp_release_transport(ib_srp_transport_template);
4144
4145 destroy_wq:
4146 destroy_workqueue(srp_remove_wq);
4147 goto out;
4148 }
4149
srp_cleanup_module(void)4150 static void __exit srp_cleanup_module(void)
4151 {
4152 ib_unregister_client(&srp_client);
4153 ib_sa_unregister_client(&srp_sa_client);
4154 class_unregister(&srp_class);
4155 srp_release_transport(ib_srp_transport_template);
4156 destroy_workqueue(srp_remove_wq);
4157 }
4158
4159 module_init(srp_init_module);
4160 module_exit(srp_cleanup_module);
4161