1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2017 QLogic Corporation
5 */
6 #include "qla_nvme.h"
7 #include <linux/scatterlist.h>
8 #include <linux/delay.h>
9 #include <linux/nvme.h>
10 #include <linux/nvme-fc.h>
11
12 static struct nvme_fc_port_template qla_nvme_fc_transport;
13
qla_nvme_register_remote(struct scsi_qla_host * vha,struct fc_port * fcport)14 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
15 {
16 struct qla_nvme_rport *rport;
17 struct nvme_fc_port_info req;
18 int ret;
19
20 if (!IS_ENABLED(CONFIG_NVME_FC))
21 return 0;
22
23 if (!vha->flags.nvme_enabled) {
24 ql_log(ql_log_info, vha, 0x2100,
25 "%s: Not registering target since Host NVME is not enabled\n",
26 __func__);
27 return 0;
28 }
29
30 if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
31 return 0;
32
33 if (!(fcport->nvme_prli_service_param &
34 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
35 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
36 return 0;
37
38 fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
39
40 memset(&req, 0, sizeof(struct nvme_fc_port_info));
41 req.port_name = wwn_to_u64(fcport->port_name);
42 req.node_name = wwn_to_u64(fcport->node_name);
43 req.port_role = 0;
44 req.dev_loss_tmo = 0;
45
46 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
47 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
48
49 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
50 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
51
52 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
53 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
54
55 req.port_id = fcport->d_id.b24;
56
57 ql_log(ql_log_info, vha, 0x2102,
58 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
59 __func__, req.node_name, req.port_name,
60 req.port_id);
61
62 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
63 &fcport->nvme_remote_port);
64 if (ret) {
65 ql_log(ql_log_warn, vha, 0x212e,
66 "Failed to register remote port. Transport returned %d\n",
67 ret);
68 return ret;
69 }
70
71 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
72 ql_log(ql_log_info, vha, 0x212a,
73 "PortID:%06x Supports SLER\n", req.port_id);
74
75 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
76 ql_log(ql_log_info, vha, 0x212b,
77 "PortID:%06x Supports PI control\n", req.port_id);
78
79 rport = fcport->nvme_remote_port->private;
80 rport->fcport = fcport;
81
82 fcport->nvme_flag |= NVME_FLAG_REGISTERED;
83 return 0;
84 }
85
86 /* Allocate a queue for NVMe traffic */
qla_nvme_alloc_queue(struct nvme_fc_local_port * lport,unsigned int qidx,u16 qsize,void ** handle)87 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
88 unsigned int qidx, u16 qsize, void **handle)
89 {
90 struct scsi_qla_host *vha;
91 struct qla_hw_data *ha;
92 struct qla_qpair *qpair;
93
94 /* Map admin queue and 1st IO queue to index 0 */
95 if (qidx)
96 qidx--;
97
98 vha = (struct scsi_qla_host *)lport->private;
99 ha = vha->hw;
100
101 ql_log(ql_log_info, vha, 0x2104,
102 "%s: handle %p, idx =%d, qsize %d\n",
103 __func__, handle, qidx, qsize);
104
105 if (qidx > qla_nvme_fc_transport.max_hw_queues) {
106 ql_log(ql_log_warn, vha, 0x212f,
107 "%s: Illegal qidx=%d. Max=%d\n",
108 __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
109 return -EINVAL;
110 }
111
112 /* Use base qpair if max_qpairs is 0 */
113 if (!ha->max_qpairs) {
114 qpair = ha->base_qpair;
115 } else {
116 if (ha->queue_pair_map[qidx]) {
117 *handle = ha->queue_pair_map[qidx];
118 ql_log(ql_log_info, vha, 0x2121,
119 "Returning existing qpair of %p for idx=%x\n",
120 *handle, qidx);
121 return 0;
122 }
123
124 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
125 if (!qpair) {
126 ql_log(ql_log_warn, vha, 0x2122,
127 "Failed to allocate qpair\n");
128 return -EINVAL;
129 }
130 }
131 *handle = qpair;
132
133 return 0;
134 }
135
qla_nvme_release_fcp_cmd_kref(struct kref * kref)136 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
137 {
138 struct srb *sp = container_of(kref, struct srb, cmd_kref);
139 struct nvme_private *priv = (struct nvme_private *)sp->priv;
140 struct nvmefc_fcp_req *fd;
141 struct srb_iocb *nvme;
142 unsigned long flags;
143
144 if (!priv)
145 goto out;
146
147 nvme = &sp->u.iocb_cmd;
148 fd = nvme->u.nvme.desc;
149
150 spin_lock_irqsave(&priv->cmd_lock, flags);
151 priv->sp = NULL;
152 sp->priv = NULL;
153 if (priv->comp_status == QLA_SUCCESS) {
154 fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
155 fd->status = NVME_SC_SUCCESS;
156 } else {
157 fd->rcv_rsplen = 0;
158 fd->transferred_length = 0;
159 fd->status = NVME_SC_INTERNAL;
160 }
161 spin_unlock_irqrestore(&priv->cmd_lock, flags);
162
163 fd->done(fd);
164 out:
165 qla2xxx_rel_qpair_sp(sp->qpair, sp);
166 }
167
qla_nvme_release_ls_cmd_kref(struct kref * kref)168 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
169 {
170 struct srb *sp = container_of(kref, struct srb, cmd_kref);
171 struct nvme_private *priv = (struct nvme_private *)sp->priv;
172 struct nvmefc_ls_req *fd;
173 unsigned long flags;
174
175 if (!priv)
176 goto out;
177
178 spin_lock_irqsave(&priv->cmd_lock, flags);
179 priv->sp = NULL;
180 sp->priv = NULL;
181 spin_unlock_irqrestore(&priv->cmd_lock, flags);
182
183 fd = priv->fd;
184
185 fd->done(fd, priv->comp_status);
186 out:
187 qla2x00_rel_sp(sp);
188 }
189
qla_nvme_ls_complete(struct work_struct * work)190 static void qla_nvme_ls_complete(struct work_struct *work)
191 {
192 struct nvme_private *priv =
193 container_of(work, struct nvme_private, ls_work);
194
195 kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
196 }
197
qla_nvme_sp_ls_done(srb_t * sp,int res)198 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
199 {
200 struct nvme_private *priv = sp->priv;
201
202 if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
203 return;
204
205 if (res)
206 res = -EINVAL;
207
208 priv->comp_status = res;
209 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
210 schedule_work(&priv->ls_work);
211 }
212
213 /* it assumed that QPair lock is held. */
qla_nvme_sp_done(srb_t * sp,int res)214 static void qla_nvme_sp_done(srb_t *sp, int res)
215 {
216 struct nvme_private *priv = sp->priv;
217
218 priv->comp_status = res;
219 kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
220
221 return;
222 }
223
qla_nvme_abort_work(struct work_struct * work)224 static void qla_nvme_abort_work(struct work_struct *work)
225 {
226 struct nvme_private *priv =
227 container_of(work, struct nvme_private, abort_work);
228 srb_t *sp = priv->sp;
229 fc_port_t *fcport = sp->fcport;
230 struct qla_hw_data *ha = fcport->vha->hw;
231 int rval;
232
233 ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
234 "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
235 __func__, sp, sp->handle, fcport, fcport->deleted);
236
237 if (!ha->flags.fw_started && fcport->deleted)
238 goto out;
239
240 if (ha->flags.host_shutting_down) {
241 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
242 "%s Calling done on sp: %p, type: 0x%x\n",
243 __func__, sp, sp->type);
244 sp->done(sp, 0);
245 goto out;
246 }
247
248 rval = ha->isp_ops->abort_command(sp);
249
250 ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
251 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
252 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
253 sp, sp->handle, fcport, rval);
254
255 out:
256 /* kref_get was done before work was schedule. */
257 kref_put(&sp->cmd_kref, sp->put_fn);
258 }
259
qla_nvme_ls_abort(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,struct nvmefc_ls_req * fd)260 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
261 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
262 {
263 struct nvme_private *priv = fd->private;
264 unsigned long flags;
265
266 spin_lock_irqsave(&priv->cmd_lock, flags);
267 if (!priv->sp) {
268 spin_unlock_irqrestore(&priv->cmd_lock, flags);
269 return;
270 }
271
272 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
273 spin_unlock_irqrestore(&priv->cmd_lock, flags);
274 return;
275 }
276 spin_unlock_irqrestore(&priv->cmd_lock, flags);
277
278 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
279 schedule_work(&priv->abort_work);
280 }
281
qla_nvme_ls_req(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,struct nvmefc_ls_req * fd)282 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
283 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
284 {
285 struct qla_nvme_rport *qla_rport = rport->private;
286 fc_port_t *fcport = qla_rport->fcport;
287 struct srb_iocb *nvme;
288 struct nvme_private *priv = fd->private;
289 struct scsi_qla_host *vha;
290 int rval = QLA_FUNCTION_FAILED;
291 struct qla_hw_data *ha;
292 srb_t *sp;
293
294
295 if (!fcport || (fcport && fcport->deleted))
296 return rval;
297
298 vha = fcport->vha;
299 ha = vha->hw;
300
301 if (!ha->flags.fw_started)
302 return rval;
303
304 /* Alloc SRB structure */
305 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
306 if (!sp)
307 return rval;
308
309 sp->type = SRB_NVME_LS;
310 sp->name = "nvme_ls";
311 sp->done = qla_nvme_sp_ls_done;
312 sp->put_fn = qla_nvme_release_ls_cmd_kref;
313 sp->priv = priv;
314 priv->sp = sp;
315 kref_init(&sp->cmd_kref);
316 spin_lock_init(&priv->cmd_lock);
317 nvme = &sp->u.iocb_cmd;
318 priv->fd = fd;
319 nvme->u.nvme.desc = fd;
320 nvme->u.nvme.dir = 0;
321 nvme->u.nvme.dl = 0;
322 nvme->u.nvme.cmd_len = fd->rqstlen;
323 nvme->u.nvme.rsp_len = fd->rsplen;
324 nvme->u.nvme.rsp_dma = fd->rspdma;
325 nvme->u.nvme.timeout_sec = fd->timeout;
326 nvme->u.nvme.cmd_dma = fd->rqstdma;
327 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
328 fd->rqstlen, DMA_TO_DEVICE);
329
330 rval = qla2x00_start_sp(sp);
331 if (rval != QLA_SUCCESS) {
332 ql_log(ql_log_warn, vha, 0x700e,
333 "qla2x00_start_sp failed = %d\n", rval);
334 sp->priv = NULL;
335 priv->sp = NULL;
336 qla2x00_rel_sp(sp);
337 return rval;
338 }
339
340 return rval;
341 }
342
qla_nvme_fcp_abort(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,void * hw_queue_handle,struct nvmefc_fcp_req * fd)343 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
344 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
345 struct nvmefc_fcp_req *fd)
346 {
347 struct nvme_private *priv = fd->private;
348 unsigned long flags;
349
350 spin_lock_irqsave(&priv->cmd_lock, flags);
351 if (!priv->sp) {
352 spin_unlock_irqrestore(&priv->cmd_lock, flags);
353 return;
354 }
355 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
356 spin_unlock_irqrestore(&priv->cmd_lock, flags);
357 return;
358 }
359 spin_unlock_irqrestore(&priv->cmd_lock, flags);
360
361 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
362 schedule_work(&priv->abort_work);
363 }
364
qla2x00_start_nvme_mq(srb_t * sp)365 static inline int qla2x00_start_nvme_mq(srb_t *sp)
366 {
367 unsigned long flags;
368 uint32_t *clr_ptr;
369 uint32_t handle;
370 struct cmd_nvme *cmd_pkt;
371 uint16_t cnt, i;
372 uint16_t req_cnt;
373 uint16_t tot_dsds;
374 uint16_t avail_dsds;
375 struct dsd64 *cur_dsd;
376 struct req_que *req = NULL;
377 struct scsi_qla_host *vha = sp->fcport->vha;
378 struct qla_hw_data *ha = vha->hw;
379 struct qla_qpair *qpair = sp->qpair;
380 struct srb_iocb *nvme = &sp->u.iocb_cmd;
381 struct scatterlist *sgl, *sg;
382 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
383 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
384 uint32_t rval = QLA_SUCCESS;
385
386 /* Setup qpair pointers */
387 req = qpair->req;
388 tot_dsds = fd->sg_cnt;
389
390 /* Acquire qpair specific lock */
391 spin_lock_irqsave(&qpair->qp_lock, flags);
392
393 handle = qla2xxx_get_next_handle(req);
394 if (handle == 0) {
395 rval = -EBUSY;
396 goto queuing_error;
397 }
398 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
399 if (req->cnt < (req_cnt + 2)) {
400 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
401 rd_reg_dword_relaxed(req->req_q_out);
402
403 if (req->ring_index < cnt)
404 req->cnt = cnt - req->ring_index;
405 else
406 req->cnt = req->length - (req->ring_index - cnt);
407
408 if (req->cnt < (req_cnt + 2)){
409 rval = -EBUSY;
410 goto queuing_error;
411 }
412 }
413
414 if (unlikely(!fd->sqid)) {
415 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
416 nvme->u.nvme.aen_op = 1;
417 atomic_inc(&ha->nvme_active_aen_cnt);
418 }
419 }
420
421 /* Build command packet. */
422 req->current_outstanding_cmd = handle;
423 req->outstanding_cmds[handle] = sp;
424 sp->handle = handle;
425 req->cnt -= req_cnt;
426
427 cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
428 cmd_pkt->handle = make_handle(req->id, handle);
429
430 /* Zero out remaining portion of packet. */
431 clr_ptr = (uint32_t *)cmd_pkt + 2;
432 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
433
434 cmd_pkt->entry_status = 0;
435
436 /* Update entry type to indicate Command NVME IOCB */
437 cmd_pkt->entry_type = COMMAND_NVME;
438
439 /* No data transfer how do we check buffer len == 0?? */
440 if (fd->io_dir == NVMEFC_FCP_READ) {
441 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
442 qpair->counters.input_bytes += fd->payload_length;
443 qpair->counters.input_requests++;
444 } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
445 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
446 if ((vha->flags.nvme_first_burst) &&
447 (sp->fcport->nvme_prli_service_param &
448 NVME_PRLI_SP_FIRST_BURST)) {
449 if ((fd->payload_length <=
450 sp->fcport->nvme_first_burst_size) ||
451 (sp->fcport->nvme_first_burst_size == 0))
452 cmd_pkt->control_flags |=
453 cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
454 }
455 qpair->counters.output_bytes += fd->payload_length;
456 qpair->counters.output_requests++;
457 } else if (fd->io_dir == 0) {
458 cmd_pkt->control_flags = 0;
459 }
460 /* Set BIT_13 of control flags for Async event */
461 if (vha->flags.nvme2_enabled &&
462 cmd->sqe.common.opcode == nvme_admin_async_event) {
463 cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
464 }
465
466 /* Set NPORT-ID */
467 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
468 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
469 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
470 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
471 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
472
473 /* NVME RSP IU */
474 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
475 put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
476
477 /* NVME CNMD IU */
478 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
479 cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
480
481 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
482 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
483
484 /* One DSD is available in the Command Type NVME IOCB */
485 avail_dsds = 1;
486 cur_dsd = &cmd_pkt->nvme_dsd;
487 sgl = fd->first_sgl;
488
489 /* Load data segments */
490 for_each_sg(sgl, sg, tot_dsds, i) {
491 cont_a64_entry_t *cont_pkt;
492
493 /* Allocate additional continuation packets? */
494 if (avail_dsds == 0) {
495 /*
496 * Five DSDs are available in the Continuation
497 * Type 1 IOCB.
498 */
499
500 /* Adjust ring index */
501 req->ring_index++;
502 if (req->ring_index == req->length) {
503 req->ring_index = 0;
504 req->ring_ptr = req->ring;
505 } else {
506 req->ring_ptr++;
507 }
508 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
509 put_unaligned_le32(CONTINUE_A64_TYPE,
510 &cont_pkt->entry_type);
511
512 cur_dsd = cont_pkt->dsd;
513 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
514 }
515
516 append_dsd64(&cur_dsd, sg);
517 avail_dsds--;
518 }
519
520 /* Set total entry count. */
521 cmd_pkt->entry_count = (uint8_t)req_cnt;
522 wmb();
523
524 /* Adjust ring index. */
525 req->ring_index++;
526 if (req->ring_index == req->length) {
527 req->ring_index = 0;
528 req->ring_ptr = req->ring;
529 } else {
530 req->ring_ptr++;
531 }
532
533 /* Set chip new ring index. */
534 wrt_reg_dword(req->req_q_in, req->ring_index);
535
536 queuing_error:
537 spin_unlock_irqrestore(&qpair->qp_lock, flags);
538 return rval;
539 }
540
541 /* Post a command */
qla_nvme_post_cmd(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,void * hw_queue_handle,struct nvmefc_fcp_req * fd)542 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
543 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
544 struct nvmefc_fcp_req *fd)
545 {
546 fc_port_t *fcport;
547 struct srb_iocb *nvme;
548 struct scsi_qla_host *vha;
549 int rval;
550 srb_t *sp;
551 struct qla_qpair *qpair = hw_queue_handle;
552 struct nvme_private *priv = fd->private;
553 struct qla_nvme_rport *qla_rport = rport->private;
554
555 if (!priv) {
556 /* nvme association has been torn down */
557 return -ENODEV;
558 }
559
560 fcport = qla_rport->fcport;
561
562 if (!qpair || !fcport)
563 return -ENODEV;
564
565 if (!qpair->fw_started || fcport->deleted)
566 return -EBUSY;
567
568 vha = fcport->vha;
569
570 if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
571 return -ENODEV;
572
573 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
574 (qpair && !qpair->fw_started) || fcport->deleted)
575 return -EBUSY;
576
577 /*
578 * If we know the dev is going away while the transport is still sending
579 * IO's return busy back to stall the IO Q. This happens when the
580 * link goes away and fw hasn't notified us yet, but IO's are being
581 * returned. If the dev comes back quickly we won't exhaust the IO
582 * retry count at the core.
583 */
584 if (fcport->nvme_flag & NVME_FLAG_RESETTING)
585 return -EBUSY;
586
587 /* Alloc SRB structure */
588 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
589 if (!sp)
590 return -EBUSY;
591
592 kref_init(&sp->cmd_kref);
593 spin_lock_init(&priv->cmd_lock);
594 sp->priv = priv;
595 priv->sp = sp;
596 sp->type = SRB_NVME_CMD;
597 sp->name = "nvme_cmd";
598 sp->done = qla_nvme_sp_done;
599 sp->put_fn = qla_nvme_release_fcp_cmd_kref;
600 sp->qpair = qpair;
601 sp->vha = vha;
602 nvme = &sp->u.iocb_cmd;
603 nvme->u.nvme.desc = fd;
604
605 rval = qla2x00_start_nvme_mq(sp);
606 if (rval != QLA_SUCCESS) {
607 ql_log(ql_log_warn, vha, 0x212d,
608 "qla2x00_start_nvme_mq failed = %d\n", rval);
609 sp->priv = NULL;
610 priv->sp = NULL;
611 qla2xxx_rel_qpair_sp(sp->qpair, sp);
612 }
613
614 return rval;
615 }
616
qla_nvme_localport_delete(struct nvme_fc_local_port * lport)617 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
618 {
619 struct scsi_qla_host *vha = lport->private;
620
621 ql_log(ql_log_info, vha, 0x210f,
622 "localport delete of %p completed.\n", vha->nvme_local_port);
623 vha->nvme_local_port = NULL;
624 complete(&vha->nvme_del_done);
625 }
626
qla_nvme_remoteport_delete(struct nvme_fc_remote_port * rport)627 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
628 {
629 fc_port_t *fcport;
630 struct qla_nvme_rport *qla_rport = rport->private;
631
632 fcport = qla_rport->fcport;
633 fcport->nvme_remote_port = NULL;
634 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
635 fcport->nvme_flag &= ~NVME_FLAG_DELETING;
636 ql_log(ql_log_info, fcport->vha, 0x2110,
637 "remoteport_delete of %p %8phN completed.\n",
638 fcport, fcport->port_name);
639 complete(&fcport->nvme_del_done);
640 }
641
642 static struct nvme_fc_port_template qla_nvme_fc_transport = {
643 .localport_delete = qla_nvme_localport_delete,
644 .remoteport_delete = qla_nvme_remoteport_delete,
645 .create_queue = qla_nvme_alloc_queue,
646 .delete_queue = NULL,
647 .ls_req = qla_nvme_ls_req,
648 .ls_abort = qla_nvme_ls_abort,
649 .fcp_io = qla_nvme_post_cmd,
650 .fcp_abort = qla_nvme_fcp_abort,
651 .max_hw_queues = 8,
652 .max_sgl_segments = 1024,
653 .max_dif_sgl_segments = 64,
654 .dma_boundary = 0xFFFFFFFF,
655 .local_priv_sz = 8,
656 .remote_priv_sz = sizeof(struct qla_nvme_rport),
657 .lsrqst_priv_sz = sizeof(struct nvme_private),
658 .fcprqst_priv_sz = sizeof(struct nvme_private),
659 };
660
qla_nvme_unregister_remote_port(struct fc_port * fcport)661 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
662 {
663 int ret;
664
665 if (!IS_ENABLED(CONFIG_NVME_FC))
666 return;
667
668 ql_log(ql_log_warn, NULL, 0x2112,
669 "%s: unregister remoteport on %p %8phN\n",
670 __func__, fcport, fcport->port_name);
671
672 if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
673 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
674
675 init_completion(&fcport->nvme_del_done);
676 ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
677 if (ret)
678 ql_log(ql_log_info, fcport->vha, 0x2114,
679 "%s: Failed to unregister nvme_remote_port (%d)\n",
680 __func__, ret);
681 wait_for_completion(&fcport->nvme_del_done);
682 }
683
qla_nvme_delete(struct scsi_qla_host * vha)684 void qla_nvme_delete(struct scsi_qla_host *vha)
685 {
686 int nv_ret;
687
688 if (!IS_ENABLED(CONFIG_NVME_FC))
689 return;
690
691 if (vha->nvme_local_port) {
692 init_completion(&vha->nvme_del_done);
693 ql_log(ql_log_info, vha, 0x2116,
694 "unregister localport=%p\n",
695 vha->nvme_local_port);
696 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
697 if (nv_ret)
698 ql_log(ql_log_info, vha, 0x2115,
699 "Unregister of localport failed\n");
700 else
701 wait_for_completion(&vha->nvme_del_done);
702 }
703 }
704
qla_nvme_register_hba(struct scsi_qla_host * vha)705 int qla_nvme_register_hba(struct scsi_qla_host *vha)
706 {
707 struct nvme_fc_port_template *tmpl;
708 struct qla_hw_data *ha;
709 struct nvme_fc_port_info pinfo;
710 int ret = -EINVAL;
711
712 if (!IS_ENABLED(CONFIG_NVME_FC))
713 return ret;
714
715 ha = vha->hw;
716 tmpl = &qla_nvme_fc_transport;
717
718 WARN_ON(vha->nvme_local_port);
719
720 qla_nvme_fc_transport.max_hw_queues =
721 min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
722 (uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1));
723
724 pinfo.node_name = wwn_to_u64(vha->node_name);
725 pinfo.port_name = wwn_to_u64(vha->port_name);
726 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
727 pinfo.port_id = vha->d_id.b24;
728
729 ql_log(ql_log_info, vha, 0xffff,
730 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
731 pinfo.node_name, pinfo.port_name, pinfo.port_id);
732 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
733
734 ret = nvme_fc_register_localport(&pinfo, tmpl,
735 get_device(&ha->pdev->dev), &vha->nvme_local_port);
736 if (ret) {
737 ql_log(ql_log_warn, vha, 0xffff,
738 "register_localport failed: ret=%x\n", ret);
739 } else {
740 vha->nvme_local_port->private = vha;
741 }
742
743 return ret;
744 }
745