1 /*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/delay.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19
20 #include <asm/unaligned.h>
21
22 #include <misc/cxl.h>
23
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_host.h>
26 #include <uapi/scsi/cxlflash_ioctl.h>
27
28 #include "main.h"
29 #include "sislite.h"
30 #include "common.h"
31
32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35 MODULE_LICENSE("GPL");
36
37 /**
38 * cmd_checkout() - checks out an AFU command
39 * @afu: AFU to checkout from.
40 *
41 * Commands are checked out in a round-robin fashion. Note that since
42 * the command pool is larger than the hardware queue, the majority of
43 * times we will only loop once or twice before getting a command. The
44 * buffer and CDB within the command are initialized (zeroed) prior to
45 * returning.
46 *
47 * Return: The checked out command or NULL when command pool is empty.
48 */
cmd_checkout(struct afu * afu)49 static struct afu_cmd *cmd_checkout(struct afu *afu)
50 {
51 int k, dec = CXLFLASH_NUM_CMDS;
52 struct afu_cmd *cmd;
53
54 while (dec--) {
55 k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
56
57 cmd = &afu->cmd[k];
58
59 if (!atomic_dec_if_positive(&cmd->free)) {
60 pr_devel("%s: returning found index=%d cmd=%p\n",
61 __func__, cmd->slot, cmd);
62 memset(cmd->buf, 0, CMD_BUFSIZE);
63 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
64 return cmd;
65 }
66 }
67
68 return NULL;
69 }
70
71 /**
72 * cmd_checkin() - checks in an AFU command
73 * @cmd: AFU command to checkin.
74 *
75 * Safe to pass commands that have already been checked in. Several
76 * internal tracking fields are reset as part of the checkin. Note
77 * that these are intentionally reset prior to toggling the free bit
78 * to avoid clobbering values in the event that the command is checked
79 * out right away.
80 */
cmd_checkin(struct afu_cmd * cmd)81 static void cmd_checkin(struct afu_cmd *cmd)
82 {
83 cmd->rcb.scp = NULL;
84 cmd->rcb.timeout = 0;
85 cmd->sa.ioasc = 0;
86 cmd->cmd_tmf = false;
87 cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
88
89 if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
90 pr_err("%s: Freeing cmd (%d) that is not in use!\n",
91 __func__, cmd->slot);
92 return;
93 }
94
95 pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
96 }
97
98 /**
99 * process_cmd_err() - command error handler
100 * @cmd: AFU command that experienced the error.
101 * @scp: SCSI command associated with the AFU command in error.
102 *
103 * Translates error bits from AFU command to SCSI command results.
104 */
process_cmd_err(struct afu_cmd * cmd,struct scsi_cmnd * scp)105 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
106 {
107 struct sisl_ioarcb *ioarcb;
108 struct sisl_ioasa *ioasa;
109 u32 resid;
110
111 if (unlikely(!cmd))
112 return;
113
114 ioarcb = &(cmd->rcb);
115 ioasa = &(cmd->sa);
116
117 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
118 resid = ioasa->resid;
119 scsi_set_resid(scp, resid);
120 pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
121 __func__, cmd, scp, resid);
122 }
123
124 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
125 pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
126 __func__, cmd, scp);
127 scp->result = (DID_ERROR << 16);
128 }
129
130 pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
131 "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n",
132 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
133 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
134 ioasa->fc_extra);
135
136 if (ioasa->rc.scsi_rc) {
137 /* We have a SCSI status */
138 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
139 memcpy(scp->sense_buffer, ioasa->sense_data,
140 SISL_SENSE_DATA_LEN);
141 scp->result = ioasa->rc.scsi_rc;
142 } else
143 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
144 }
145
146 /*
147 * We encountered an error. Set scp->result based on nature
148 * of error.
149 */
150 if (ioasa->rc.fc_rc) {
151 /* We have an FC status */
152 switch (ioasa->rc.fc_rc) {
153 case SISL_FC_RC_LINKDOWN:
154 scp->result = (DID_REQUEUE << 16);
155 break;
156 case SISL_FC_RC_RESID:
157 /* This indicates an FCP resid underrun */
158 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
159 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
160 * then we will handle this error else where.
161 * If not then we must handle it here.
162 * This is probably an AFU bug.
163 */
164 scp->result = (DID_ERROR << 16);
165 }
166 break;
167 case SISL_FC_RC_RESIDERR:
168 /* Resid mismatch between adapter and device */
169 case SISL_FC_RC_TGTABORT:
170 case SISL_FC_RC_ABORTOK:
171 case SISL_FC_RC_ABORTFAIL:
172 case SISL_FC_RC_NOLOGI:
173 case SISL_FC_RC_ABORTPEND:
174 case SISL_FC_RC_WRABORTPEND:
175 case SISL_FC_RC_NOEXP:
176 case SISL_FC_RC_INUSE:
177 scp->result = (DID_ERROR << 16);
178 break;
179 }
180 }
181
182 if (ioasa->rc.afu_rc) {
183 /* We have an AFU error */
184 switch (ioasa->rc.afu_rc) {
185 case SISL_AFU_RC_NO_CHANNELS:
186 scp->result = (DID_NO_CONNECT << 16);
187 break;
188 case SISL_AFU_RC_DATA_DMA_ERR:
189 switch (ioasa->afu_extra) {
190 case SISL_AFU_DMA_ERR_PAGE_IN:
191 /* Retry */
192 scp->result = (DID_IMM_RETRY << 16);
193 break;
194 case SISL_AFU_DMA_ERR_INVALID_EA:
195 default:
196 scp->result = (DID_ERROR << 16);
197 }
198 break;
199 case SISL_AFU_RC_OUT_OF_DATA_BUFS:
200 /* Retry */
201 scp->result = (DID_ALLOC_FAILURE << 16);
202 break;
203 default:
204 scp->result = (DID_ERROR << 16);
205 }
206 }
207 }
208
209 /**
210 * cmd_complete() - command completion handler
211 * @cmd: AFU command that has completed.
212 *
213 * Prepares and submits command that has either completed or timed out to
214 * the SCSI stack. Checks AFU command back into command pool for non-internal
215 * (rcb.scp populated) commands.
216 */
cmd_complete(struct afu_cmd * cmd)217 static void cmd_complete(struct afu_cmd *cmd)
218 {
219 struct scsi_cmnd *scp;
220 ulong lock_flags;
221 struct afu *afu = cmd->parent;
222 struct cxlflash_cfg *cfg = afu->parent;
223 bool cmd_is_tmf;
224
225 spin_lock_irqsave(&cmd->slock, lock_flags);
226 cmd->sa.host_use_b[0] |= B_DONE;
227 spin_unlock_irqrestore(&cmd->slock, lock_flags);
228
229 if (cmd->rcb.scp) {
230 scp = cmd->rcb.scp;
231 if (unlikely(cmd->sa.ioasc))
232 process_cmd_err(cmd, scp);
233 else
234 scp->result = (DID_OK << 16);
235
236 cmd_is_tmf = cmd->cmd_tmf;
237 cmd_checkin(cmd); /* Don't use cmd after here */
238
239 pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
240 "ioasc=%d\n", __func__, scp, scp->result,
241 cmd->sa.ioasc);
242
243 scsi_dma_unmap(scp);
244 scp->scsi_done(scp);
245
246 if (cmd_is_tmf) {
247 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
248 cfg->tmf_active = false;
249 wake_up_all_locked(&cfg->tmf_waitq);
250 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
251 }
252 } else
253 complete(&cmd->cevent);
254 }
255
256 /**
257 * context_reset() - timeout handler for AFU commands
258 * @cmd: AFU command that timed out.
259 *
260 * Sends a reset to the AFU.
261 */
context_reset(struct afu_cmd * cmd)262 static void context_reset(struct afu_cmd *cmd)
263 {
264 int nretry = 0;
265 u64 rrin = 0x1;
266 u64 room = 0;
267 struct afu *afu = cmd->parent;
268 ulong lock_flags;
269
270 pr_debug("%s: cmd=%p\n", __func__, cmd);
271
272 spin_lock_irqsave(&cmd->slock, lock_flags);
273
274 /* Already completed? */
275 if (cmd->sa.host_use_b[0] & B_DONE) {
276 spin_unlock_irqrestore(&cmd->slock, lock_flags);
277 return;
278 }
279
280 cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
281 spin_unlock_irqrestore(&cmd->slock, lock_flags);
282
283 /*
284 * We really want to send this reset at all costs, so spread
285 * out wait time on successive retries for available room.
286 */
287 do {
288 room = readq_be(&afu->host_map->cmd_room);
289 atomic64_set(&afu->room, room);
290 if (room)
291 goto write_rrin;
292 udelay(1 << nretry);
293 } while (nretry++ < MC_ROOM_RETRY_CNT);
294
295 pr_err("%s: no cmd_room to send reset\n", __func__);
296 return;
297
298 write_rrin:
299 nretry = 0;
300 writeq_be(rrin, &afu->host_map->ioarrin);
301 do {
302 rrin = readq_be(&afu->host_map->ioarrin);
303 if (rrin != 0x1)
304 break;
305 /* Double delay each time */
306 udelay(1 << nretry);
307 } while (nretry++ < MC_ROOM_RETRY_CNT);
308 }
309
310 /**
311 * send_cmd() - sends an AFU command
312 * @afu: AFU associated with the host.
313 * @cmd: AFU command to send.
314 *
315 * Return:
316 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
317 */
send_cmd(struct afu * afu,struct afu_cmd * cmd)318 static int send_cmd(struct afu *afu, struct afu_cmd *cmd)
319 {
320 struct cxlflash_cfg *cfg = afu->parent;
321 struct device *dev = &cfg->dev->dev;
322 int nretry = 0;
323 int rc = 0;
324 u64 room;
325 long newval;
326
327 /*
328 * This routine is used by critical users such an AFU sync and to
329 * send a task management function (TMF). Thus we want to retry a
330 * bit before returning an error. To avoid the performance penalty
331 * of MMIO, we spread the update of 'room' over multiple commands.
332 */
333 retry:
334 newval = atomic64_dec_if_positive(&afu->room);
335 if (!newval) {
336 do {
337 room = readq_be(&afu->host_map->cmd_room);
338 atomic64_set(&afu->room, room);
339 if (room)
340 goto write_ioarrin;
341 udelay(1 << nretry);
342 } while (nretry++ < MC_ROOM_RETRY_CNT);
343
344 dev_err(dev, "%s: no cmd_room to send 0x%X\n",
345 __func__, cmd->rcb.cdb[0]);
346
347 goto no_room;
348 } else if (unlikely(newval < 0)) {
349 /* This should be rare. i.e. Only if two threads race and
350 * decrement before the MMIO read is done. In this case
351 * just benefit from the other thread having updated
352 * afu->room.
353 */
354 if (nretry++ < MC_ROOM_RETRY_CNT) {
355 udelay(1 << nretry);
356 goto retry;
357 }
358
359 goto no_room;
360 }
361
362 write_ioarrin:
363 writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
364 out:
365 pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
366 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
367 return rc;
368
369 no_room:
370 afu->read_room = true;
371 kref_get(&cfg->afu->mapcount);
372 schedule_work(&cfg->work_q);
373 rc = SCSI_MLQUEUE_HOST_BUSY;
374 goto out;
375 }
376
377 /**
378 * wait_resp() - polls for a response or timeout to a sent AFU command
379 * @afu: AFU associated with the host.
380 * @cmd: AFU command that was sent.
381 */
wait_resp(struct afu * afu,struct afu_cmd * cmd)382 static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
383 {
384 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
385
386 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
387 if (!timeout)
388 context_reset(cmd);
389
390 if (unlikely(cmd->sa.ioasc != 0))
391 pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
392 "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
393 cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
394 cmd->sa.rc.fc_rc);
395 }
396
397 /**
398 * send_tmf() - sends a Task Management Function (TMF)
399 * @afu: AFU to checkout from.
400 * @scp: SCSI command from stack.
401 * @tmfcmd: TMF command to send.
402 *
403 * Return:
404 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
405 */
send_tmf(struct afu * afu,struct scsi_cmnd * scp,u64 tmfcmd)406 static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
407 {
408 struct afu_cmd *cmd;
409
410 u32 port_sel = scp->device->channel + 1;
411 short lflag = 0;
412 struct Scsi_Host *host = scp->device->host;
413 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
414 struct device *dev = &cfg->dev->dev;
415 ulong lock_flags;
416 int rc = 0;
417 ulong to;
418
419 cmd = cmd_checkout(afu);
420 if (unlikely(!cmd)) {
421 dev_err(dev, "%s: could not get a free command\n", __func__);
422 rc = SCSI_MLQUEUE_HOST_BUSY;
423 goto out;
424 }
425
426 /* When Task Management Function is active do not send another */
427 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
428 if (cfg->tmf_active)
429 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
430 !cfg->tmf_active,
431 cfg->tmf_slock);
432 cfg->tmf_active = true;
433 cmd->cmd_tmf = true;
434 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
435
436 cmd->rcb.ctx_id = afu->ctx_hndl;
437 cmd->rcb.port_sel = port_sel;
438 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
439
440 lflag = SISL_REQ_FLAGS_TMF_CMD;
441
442 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
443 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
444
445 /* Stash the scp in the reserved field, for reuse during interrupt */
446 cmd->rcb.scp = scp;
447
448 /* Copy the CDB from the cmd passed in */
449 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
450
451 /* Send the command */
452 rc = send_cmd(afu, cmd);
453 if (unlikely(rc)) {
454 cmd_checkin(cmd);
455 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
456 cfg->tmf_active = false;
457 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
458 goto out;
459 }
460
461 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
462 to = msecs_to_jiffies(5000);
463 to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
464 !cfg->tmf_active,
465 cfg->tmf_slock,
466 to);
467 if (!to) {
468 cfg->tmf_active = false;
469 dev_err(dev, "%s: TMF timed out!\n", __func__);
470 rc = -1;
471 }
472 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
473 out:
474 return rc;
475 }
476
afu_unmap(struct kref * ref)477 static void afu_unmap(struct kref *ref)
478 {
479 struct afu *afu = container_of(ref, struct afu, mapcount);
480
481 if (likely(afu->afu_map)) {
482 cxl_psa_unmap((void __iomem *)afu->afu_map);
483 afu->afu_map = NULL;
484 }
485 }
486
487 /**
488 * cxlflash_driver_info() - information handler for this host driver
489 * @host: SCSI host associated with device.
490 *
491 * Return: A string describing the device.
492 */
cxlflash_driver_info(struct Scsi_Host * host)493 static const char *cxlflash_driver_info(struct Scsi_Host *host)
494 {
495 return CXLFLASH_ADAPTER_NAME;
496 }
497
498 /**
499 * cxlflash_queuecommand() - sends a mid-layer request
500 * @host: SCSI host associated with device.
501 * @scp: SCSI command to send.
502 *
503 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
504 */
cxlflash_queuecommand(struct Scsi_Host * host,struct scsi_cmnd * scp)505 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
506 {
507 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
508 struct afu *afu = cfg->afu;
509 struct device *dev = &cfg->dev->dev;
510 struct afu_cmd *cmd;
511 u32 port_sel = scp->device->channel + 1;
512 int nseg, i, ncount;
513 struct scatterlist *sg;
514 ulong lock_flags;
515 short lflag = 0;
516 int rc = 0;
517 int kref_got = 0;
518
519 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
520 "cdb=(%08X-%08X-%08X-%08X)\n",
521 __func__, scp, host->host_no, scp->device->channel,
522 scp->device->id, scp->device->lun,
523 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
524 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
525 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
526 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
527
528 /*
529 * If a Task Management Function is active, wait for it to complete
530 * before continuing with regular commands.
531 */
532 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
533 if (cfg->tmf_active) {
534 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
535 rc = SCSI_MLQUEUE_HOST_BUSY;
536 goto out;
537 }
538 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
539
540 switch (cfg->state) {
541 case STATE_RESET:
542 dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__);
543 rc = SCSI_MLQUEUE_HOST_BUSY;
544 goto out;
545 case STATE_FAILTERM:
546 dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__);
547 scp->result = (DID_NO_CONNECT << 16);
548 scp->scsi_done(scp);
549 rc = 0;
550 goto out;
551 default:
552 break;
553 }
554
555 cmd = cmd_checkout(afu);
556 if (unlikely(!cmd)) {
557 dev_err(dev, "%s: could not get a free command\n", __func__);
558 rc = SCSI_MLQUEUE_HOST_BUSY;
559 goto out;
560 }
561
562 kref_get(&cfg->afu->mapcount);
563 kref_got = 1;
564
565 cmd->rcb.ctx_id = afu->ctx_hndl;
566 cmd->rcb.port_sel = port_sel;
567 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
568
569 if (scp->sc_data_direction == DMA_TO_DEVICE)
570 lflag = SISL_REQ_FLAGS_HOST_WRITE;
571 else
572 lflag = SISL_REQ_FLAGS_HOST_READ;
573
574 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
575 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
576
577 /* Stash the scp in the reserved field, for reuse during interrupt */
578 cmd->rcb.scp = scp;
579
580 nseg = scsi_dma_map(scp);
581 if (unlikely(nseg < 0)) {
582 dev_err(dev, "%s: Fail DMA map! nseg=%d\n",
583 __func__, nseg);
584 rc = SCSI_MLQUEUE_HOST_BUSY;
585 goto out;
586 }
587
588 ncount = scsi_sg_count(scp);
589 scsi_for_each_sg(scp, sg, ncount, i) {
590 cmd->rcb.data_len = sg_dma_len(sg);
591 cmd->rcb.data_ea = sg_dma_address(sg);
592 }
593
594 /* Copy the CDB from the scsi_cmnd passed in */
595 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
596
597 /* Send the command */
598 rc = send_cmd(afu, cmd);
599 if (unlikely(rc)) {
600 cmd_checkin(cmd);
601 scsi_dma_unmap(scp);
602 }
603
604 out:
605 if (kref_got)
606 kref_put(&afu->mapcount, afu_unmap);
607 pr_devel("%s: returning rc=%d\n", __func__, rc);
608 return rc;
609 }
610
611 /**
612 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
613 * @cfg: Internal structure associated with the host.
614 */
cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg * cfg)615 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
616 {
617 struct pci_dev *pdev = cfg->dev;
618
619 if (pci_channel_offline(pdev))
620 wait_event_timeout(cfg->reset_waitq,
621 !pci_channel_offline(pdev),
622 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
623 }
624
625 /**
626 * free_mem() - free memory associated with the AFU
627 * @cfg: Internal structure associated with the host.
628 */
free_mem(struct cxlflash_cfg * cfg)629 static void free_mem(struct cxlflash_cfg *cfg)
630 {
631 int i;
632 char *buf = NULL;
633 struct afu *afu = cfg->afu;
634
635 if (cfg->afu) {
636 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
637 buf = afu->cmd[i].buf;
638 if (!((u64)buf & (PAGE_SIZE - 1)))
639 free_page((ulong)buf);
640 }
641
642 free_pages((ulong)afu, get_order(sizeof(struct afu)));
643 cfg->afu = NULL;
644 }
645 }
646
647 /**
648 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
649 * @cfg: Internal structure associated with the host.
650 *
651 * Safe to call with AFU in a partially allocated/initialized state.
652 *
653 * Cleans up all state associated with the command queue, and unmaps
654 * the MMIO space.
655 *
656 * - complete() will take care of commands we initiated (they'll be checked
657 * in as part of the cleanup that occurs after the completion)
658 *
659 * - cmd_checkin() will take care of entries that we did not initiate and that
660 * have not (and will not) complete because they are sitting on a [now stale]
661 * hardware queue
662 */
stop_afu(struct cxlflash_cfg * cfg)663 static void stop_afu(struct cxlflash_cfg *cfg)
664 {
665 int i;
666 struct afu *afu = cfg->afu;
667 struct afu_cmd *cmd;
668
669 if (likely(afu)) {
670 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
671 cmd = &afu->cmd[i];
672 complete(&cmd->cevent);
673 if (!atomic_read(&cmd->free))
674 cmd_checkin(cmd);
675 }
676
677 if (likely(afu->afu_map)) {
678 cxl_psa_unmap((void __iomem *)afu->afu_map);
679 afu->afu_map = NULL;
680 }
681 kref_put(&afu->mapcount, afu_unmap);
682 }
683 }
684
685 /**
686 * term_mc() - terminates the master context
687 * @cfg: Internal structure associated with the host.
688 * @level: Depth of allocation, where to begin waterfall tear down.
689 *
690 * Safe to call with AFU/MC in partially allocated/initialized state.
691 */
term_mc(struct cxlflash_cfg * cfg,enum undo_level level)692 static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
693 {
694 int rc = 0;
695 struct afu *afu = cfg->afu;
696 struct device *dev = &cfg->dev->dev;
697
698 if (!afu || !cfg->mcctx) {
699 dev_err(dev, "%s: returning from term_mc with NULL afu or MC\n",
700 __func__);
701 return;
702 }
703
704 switch (level) {
705 case UNDO_START:
706 rc = cxl_stop_context(cfg->mcctx);
707 BUG_ON(rc);
708 case UNMAP_THREE:
709 cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
710 case UNMAP_TWO:
711 cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
712 case UNMAP_ONE:
713 cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
714 case FREE_IRQ:
715 cxl_free_afu_irqs(cfg->mcctx);
716 case RELEASE_CONTEXT:
717 cfg->mcctx = NULL;
718 }
719 }
720
721 /**
722 * term_afu() - terminates the AFU
723 * @cfg: Internal structure associated with the host.
724 *
725 * Safe to call with AFU/MC in partially allocated/initialized state.
726 */
term_afu(struct cxlflash_cfg * cfg)727 static void term_afu(struct cxlflash_cfg *cfg)
728 {
729 term_mc(cfg, UNDO_START);
730
731 if (cfg->afu)
732 stop_afu(cfg);
733
734 pr_debug("%s: returning\n", __func__);
735 }
736
737 /**
738 * cxlflash_remove() - PCI entry point to tear down host
739 * @pdev: PCI device associated with the host.
740 *
741 * Safe to use as a cleanup in partially allocated/initialized state.
742 */
cxlflash_remove(struct pci_dev * pdev)743 static void cxlflash_remove(struct pci_dev *pdev)
744 {
745 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
746 ulong lock_flags;
747
748 /* If a Task Management Function is active, wait for it to complete
749 * before continuing with remove.
750 */
751 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
752 if (cfg->tmf_active)
753 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
754 !cfg->tmf_active,
755 cfg->tmf_slock);
756 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
757
758 cfg->state = STATE_FAILTERM;
759 cxlflash_stop_term_user_contexts(cfg);
760
761 switch (cfg->init_state) {
762 case INIT_STATE_SCSI:
763 cxlflash_term_local_luns(cfg);
764 scsi_remove_host(cfg->host);
765 /* fall through */
766 case INIT_STATE_AFU:
767 cancel_work_sync(&cfg->work_q);
768 term_afu(cfg);
769 case INIT_STATE_PCI:
770 pci_release_regions(cfg->dev);
771 pci_disable_device(pdev);
772 case INIT_STATE_NONE:
773 free_mem(cfg);
774 scsi_host_put(cfg->host);
775 break;
776 }
777
778 pr_debug("%s: returning\n", __func__);
779 }
780
781 /**
782 * alloc_mem() - allocates the AFU and its command pool
783 * @cfg: Internal structure associated with the host.
784 *
785 * A partially allocated state remains on failure.
786 *
787 * Return:
788 * 0 on success
789 * -ENOMEM on failure to allocate memory
790 */
alloc_mem(struct cxlflash_cfg * cfg)791 static int alloc_mem(struct cxlflash_cfg *cfg)
792 {
793 int rc = 0;
794 int i;
795 char *buf = NULL;
796 struct device *dev = &cfg->dev->dev;
797
798 /* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
799 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
800 get_order(sizeof(struct afu)));
801 if (unlikely(!cfg->afu)) {
802 dev_err(dev, "%s: cannot get %d free pages\n",
803 __func__, get_order(sizeof(struct afu)));
804 rc = -ENOMEM;
805 goto out;
806 }
807 cfg->afu->parent = cfg;
808 cfg->afu->afu_map = NULL;
809
810 for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
811 if (!((u64)buf & (PAGE_SIZE - 1))) {
812 buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
813 if (unlikely(!buf)) {
814 dev_err(dev,
815 "%s: Allocate command buffers fail!\n",
816 __func__);
817 rc = -ENOMEM;
818 free_mem(cfg);
819 goto out;
820 }
821 }
822
823 cfg->afu->cmd[i].buf = buf;
824 atomic_set(&cfg->afu->cmd[i].free, 1);
825 cfg->afu->cmd[i].slot = i;
826 }
827
828 out:
829 return rc;
830 }
831
832 /**
833 * init_pci() - initializes the host as a PCI device
834 * @cfg: Internal structure associated with the host.
835 *
836 * Return: 0 on success, -errno on failure
837 */
init_pci(struct cxlflash_cfg * cfg)838 static int init_pci(struct cxlflash_cfg *cfg)
839 {
840 struct pci_dev *pdev = cfg->dev;
841 int rc = 0;
842
843 cfg->cxlflash_regs_pci = pci_resource_start(pdev, 0);
844 rc = pci_request_regions(pdev, CXLFLASH_NAME);
845 if (rc < 0) {
846 dev_err(&pdev->dev,
847 "%s: Couldn't register memory range of registers\n",
848 __func__);
849 goto out;
850 }
851
852 rc = pci_enable_device(pdev);
853 if (rc || pci_channel_offline(pdev)) {
854 if (pci_channel_offline(pdev)) {
855 cxlflash_wait_for_pci_err_recovery(cfg);
856 rc = pci_enable_device(pdev);
857 }
858
859 if (rc) {
860 dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
861 __func__);
862 cxlflash_wait_for_pci_err_recovery(cfg);
863 goto out_release_regions;
864 }
865 }
866
867 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
868 if (rc < 0) {
869 dev_dbg(&pdev->dev, "%s: Failed to set 64 bit PCI DMA mask\n",
870 __func__);
871 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
872 }
873
874 if (rc < 0) {
875 dev_err(&pdev->dev, "%s: Failed to set PCI DMA mask\n",
876 __func__);
877 goto out_disable;
878 }
879
880 pci_set_master(pdev);
881
882 if (pci_channel_offline(pdev)) {
883 cxlflash_wait_for_pci_err_recovery(cfg);
884 if (pci_channel_offline(pdev)) {
885 rc = -EIO;
886 goto out_msi_disable;
887 }
888 }
889
890 rc = pci_save_state(pdev);
891
892 if (rc != PCIBIOS_SUCCESSFUL) {
893 dev_err(&pdev->dev, "%s: Failed to save PCI config space\n",
894 __func__);
895 rc = -EIO;
896 goto cleanup_nolog;
897 }
898
899 out:
900 pr_debug("%s: returning rc=%d\n", __func__, rc);
901 return rc;
902
903 cleanup_nolog:
904 out_msi_disable:
905 cxlflash_wait_for_pci_err_recovery(cfg);
906 out_disable:
907 pci_disable_device(pdev);
908 out_release_regions:
909 pci_release_regions(pdev);
910 goto out;
911
912 }
913
914 /**
915 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
916 * @cfg: Internal structure associated with the host.
917 *
918 * Return: 0 on success, -errno on failure
919 */
init_scsi(struct cxlflash_cfg * cfg)920 static int init_scsi(struct cxlflash_cfg *cfg)
921 {
922 struct pci_dev *pdev = cfg->dev;
923 int rc = 0;
924
925 rc = scsi_add_host(cfg->host, &pdev->dev);
926 if (rc) {
927 dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n",
928 __func__, rc);
929 goto out;
930 }
931
932 scsi_scan_host(cfg->host);
933
934 out:
935 pr_debug("%s: returning rc=%d\n", __func__, rc);
936 return rc;
937 }
938
939 /**
940 * set_port_online() - transitions the specified host FC port to online state
941 * @fc_regs: Top of MMIO region defined for specified port.
942 *
943 * The provided MMIO region must be mapped prior to call. Online state means
944 * that the FC link layer has synced, completed the handshaking process, and
945 * is ready for login to start.
946 */
set_port_online(__be64 __iomem * fc_regs)947 static void set_port_online(__be64 __iomem *fc_regs)
948 {
949 u64 cmdcfg;
950
951 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
952 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
953 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
954 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
955 }
956
957 /**
958 * set_port_offline() - transitions the specified host FC port to offline state
959 * @fc_regs: Top of MMIO region defined for specified port.
960 *
961 * The provided MMIO region must be mapped prior to call.
962 */
set_port_offline(__be64 __iomem * fc_regs)963 static void set_port_offline(__be64 __iomem *fc_regs)
964 {
965 u64 cmdcfg;
966
967 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
968 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
969 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
970 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
971 }
972
973 /**
974 * wait_port_online() - waits for the specified host FC port come online
975 * @fc_regs: Top of MMIO region defined for specified port.
976 * @delay_us: Number of microseconds to delay between reading port status.
977 * @nretry: Number of cycles to retry reading port status.
978 *
979 * The provided MMIO region must be mapped prior to call. This will timeout
980 * when the cable is not plugged in.
981 *
982 * Return:
983 * TRUE (1) when the specified port is online
984 * FALSE (0) when the specified port fails to come online after timeout
985 * -EINVAL when @delay_us is less than 1000
986 */
wait_port_online(__be64 __iomem * fc_regs,u32 delay_us,u32 nretry)987 static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
988 {
989 u64 status;
990
991 if (delay_us < 1000) {
992 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
993 return -EINVAL;
994 }
995
996 do {
997 msleep(delay_us / 1000);
998 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
999 if (status == U64_MAX)
1000 nretry /= 2;
1001 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1002 nretry--);
1003
1004 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1005 }
1006
1007 /**
1008 * wait_port_offline() - waits for the specified host FC port go offline
1009 * @fc_regs: Top of MMIO region defined for specified port.
1010 * @delay_us: Number of microseconds to delay between reading port status.
1011 * @nretry: Number of cycles to retry reading port status.
1012 *
1013 * The provided MMIO region must be mapped prior to call.
1014 *
1015 * Return:
1016 * TRUE (1) when the specified port is offline
1017 * FALSE (0) when the specified port fails to go offline after timeout
1018 * -EINVAL when @delay_us is less than 1000
1019 */
wait_port_offline(__be64 __iomem * fc_regs,u32 delay_us,u32 nretry)1020 static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1021 {
1022 u64 status;
1023
1024 if (delay_us < 1000) {
1025 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
1026 return -EINVAL;
1027 }
1028
1029 do {
1030 msleep(delay_us / 1000);
1031 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1032 if (status == U64_MAX)
1033 nretry /= 2;
1034 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1035 nretry--);
1036
1037 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1038 }
1039
1040 /**
1041 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1042 * @afu: AFU associated with the host that owns the specified FC port.
1043 * @port: Port number being configured.
1044 * @fc_regs: Top of MMIO region defined for specified port.
1045 * @wwpn: The world-wide-port-number previously discovered for port.
1046 *
1047 * The provided MMIO region must be mapped prior to call. As part of the
1048 * sequence to configure the WWPN, the port is toggled offline and then back
1049 * online. This toggling action can cause this routine to delay up to a few
1050 * seconds. When configured to use the internal LUN feature of the AFU, a
1051 * failure to come online is overridden.
1052 *
1053 * Return:
1054 * 0 when the WWPN is successfully written and the port comes back online
1055 * -1 when the port fails to go offline or come back up online
1056 */
afu_set_wwpn(struct afu * afu,int port,__be64 __iomem * fc_regs,u64 wwpn)1057 static int afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1058 u64 wwpn)
1059 {
1060 int rc = 0;
1061
1062 set_port_offline(fc_regs);
1063
1064 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1065 FC_PORT_STATUS_RETRY_CNT)) {
1066 pr_debug("%s: wait on port %d to go offline timed out\n",
1067 __func__, port);
1068 rc = -1; /* but continue on to leave the port back online */
1069 }
1070
1071 if (rc == 0)
1072 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1073
1074 /* Always return success after programming WWPN */
1075 rc = 0;
1076
1077 set_port_online(fc_regs);
1078
1079 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1080 FC_PORT_STATUS_RETRY_CNT)) {
1081 pr_err("%s: wait on port %d to go online timed out\n",
1082 __func__, port);
1083 }
1084
1085 pr_debug("%s: returning rc=%d\n", __func__, rc);
1086
1087 return rc;
1088 }
1089
1090 /**
1091 * afu_link_reset() - resets the specified host FC port
1092 * @afu: AFU associated with the host that owns the specified FC port.
1093 * @port: Port number being configured.
1094 * @fc_regs: Top of MMIO region defined for specified port.
1095 *
1096 * The provided MMIO region must be mapped prior to call. The sequence to
1097 * reset the port involves toggling it offline and then back online. This
1098 * action can cause this routine to delay up to a few seconds. An effort
1099 * is made to maintain link with the device by switching to host to use
1100 * the alternate port exclusively while the reset takes place.
1101 * failure to come online is overridden.
1102 */
afu_link_reset(struct afu * afu,int port,__be64 __iomem * fc_regs)1103 static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1104 {
1105 u64 port_sel;
1106
1107 /* first switch the AFU to the other links, if any */
1108 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1109 port_sel &= ~(1ULL << port);
1110 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1111 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1112
1113 set_port_offline(fc_regs);
1114 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1115 FC_PORT_STATUS_RETRY_CNT))
1116 pr_err("%s: wait on port %d to go offline timed out\n",
1117 __func__, port);
1118
1119 set_port_online(fc_regs);
1120 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1121 FC_PORT_STATUS_RETRY_CNT))
1122 pr_err("%s: wait on port %d to go online timed out\n",
1123 __func__, port);
1124
1125 /* switch back to include this port */
1126 port_sel |= (1ULL << port);
1127 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1128 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1129
1130 pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel);
1131 }
1132
1133 /*
1134 * Asynchronous interrupt information table
1135 */
1136 static const struct asyc_intr_info ainfo[] = {
1137 {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
1138 {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
1139 {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
1140 {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET},
1141 {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
1142 {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
1143 {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
1144 {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
1145 {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
1146 {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
1147 {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
1148 {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET},
1149 {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
1150 {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
1151 {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
1152 {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
1153 {0x0, "", 0, 0} /* terminator */
1154 };
1155
1156 /**
1157 * find_ainfo() - locates and returns asynchronous interrupt information
1158 * @status: Status code set by AFU on error.
1159 *
1160 * Return: The located information or NULL when the status code is invalid.
1161 */
find_ainfo(u64 status)1162 static const struct asyc_intr_info *find_ainfo(u64 status)
1163 {
1164 const struct asyc_intr_info *info;
1165
1166 for (info = &ainfo[0]; info->status; info++)
1167 if (info->status == status)
1168 return info;
1169
1170 return NULL;
1171 }
1172
1173 /**
1174 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1175 * @afu: AFU associated with the host.
1176 */
afu_err_intr_init(struct afu * afu)1177 static void afu_err_intr_init(struct afu *afu)
1178 {
1179 int i;
1180 u64 reg;
1181
1182 /* global async interrupts: AFU clears afu_ctrl on context exit
1183 * if async interrupts were sent to that context. This prevents
1184 * the AFU form sending further async interrupts when
1185 * there is
1186 * nobody to receive them.
1187 */
1188
1189 /* mask all */
1190 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1191 /* set LISN# to send and point to master context */
1192 reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1193
1194 if (afu->internal_lun)
1195 reg |= 1; /* Bit 63 indicates local lun */
1196 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1197 /* clear all */
1198 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1199 /* unmask bits that are of interest */
1200 /* note: afu can send an interrupt after this step */
1201 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1202 /* clear again in case a bit came on after previous clear but before */
1203 /* unmask */
1204 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1205
1206 /* Clear/Set internal lun bits */
1207 reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1208 reg &= SISL_FC_INTERNAL_MASK;
1209 if (afu->internal_lun)
1210 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1211 writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1212
1213 /* now clear FC errors */
1214 for (i = 0; i < NUM_FC_PORTS; i++) {
1215 writeq_be(0xFFFFFFFFU,
1216 &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
1217 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
1218 }
1219
1220 /* sync interrupts for master's IOARRIN write */
1221 /* note that unlike asyncs, there can be no pending sync interrupts */
1222 /* at this time (this is a fresh context and master has not written */
1223 /* IOARRIN yet), so there is nothing to clear. */
1224
1225 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1226 writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
1227 writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
1228 }
1229
1230 /**
1231 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1232 * @irq: Interrupt number.
1233 * @data: Private data provided at interrupt registration, the AFU.
1234 *
1235 * Return: Always return IRQ_HANDLED.
1236 */
cxlflash_sync_err_irq(int irq,void * data)1237 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1238 {
1239 struct afu *afu = (struct afu *)data;
1240 u64 reg;
1241 u64 reg_unmasked;
1242
1243 reg = readq_be(&afu->host_map->intr_status);
1244 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1245
1246 if (reg_unmasked == 0UL) {
1247 pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n",
1248 __func__, (u64)afu, reg);
1249 goto cxlflash_sync_err_irq_exit;
1250 }
1251
1252 pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n",
1253 __func__, (u64)afu, reg);
1254
1255 writeq_be(reg_unmasked, &afu->host_map->intr_clear);
1256
1257 cxlflash_sync_err_irq_exit:
1258 pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
1259 return IRQ_HANDLED;
1260 }
1261
1262 /**
1263 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1264 * @irq: Interrupt number.
1265 * @data: Private data provided at interrupt registration, the AFU.
1266 *
1267 * Return: Always return IRQ_HANDLED.
1268 */
cxlflash_rrq_irq(int irq,void * data)1269 static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1270 {
1271 struct afu *afu = (struct afu *)data;
1272 struct afu_cmd *cmd;
1273 bool toggle = afu->toggle;
1274 u64 entry,
1275 *hrrq_start = afu->hrrq_start,
1276 *hrrq_end = afu->hrrq_end,
1277 *hrrq_curr = afu->hrrq_curr;
1278
1279 /* Process however many RRQ entries that are ready */
1280 while (true) {
1281 entry = *hrrq_curr;
1282
1283 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1284 break;
1285
1286 cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT);
1287 cmd_complete(cmd);
1288
1289 /* Advance to next entry or wrap and flip the toggle bit */
1290 if (hrrq_curr < hrrq_end)
1291 hrrq_curr++;
1292 else {
1293 hrrq_curr = hrrq_start;
1294 toggle ^= SISL_RESP_HANDLE_T_BIT;
1295 }
1296 }
1297
1298 afu->hrrq_curr = hrrq_curr;
1299 afu->toggle = toggle;
1300
1301 return IRQ_HANDLED;
1302 }
1303
1304 /**
1305 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1306 * @irq: Interrupt number.
1307 * @data: Private data provided at interrupt registration, the AFU.
1308 *
1309 * Return: Always return IRQ_HANDLED.
1310 */
cxlflash_async_err_irq(int irq,void * data)1311 static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1312 {
1313 struct afu *afu = (struct afu *)data;
1314 struct cxlflash_cfg *cfg = afu->parent;
1315 struct device *dev = &cfg->dev->dev;
1316 u64 reg_unmasked;
1317 const struct asyc_intr_info *info;
1318 struct sisl_global_map __iomem *global = &afu->afu_map->global;
1319 u64 reg;
1320 u8 port;
1321 int i;
1322
1323 reg = readq_be(&global->regs.aintr_status);
1324 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1325
1326 if (reg_unmasked == 0) {
1327 dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n",
1328 __func__, reg);
1329 goto out;
1330 }
1331
1332 /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1333 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1334
1335 /* Check each bit that is on */
1336 for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
1337 info = find_ainfo(1ULL << i);
1338 if (((reg_unmasked & 0x1) == 0) || !info)
1339 continue;
1340
1341 port = info->port;
1342
1343 dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n",
1344 __func__, port, info->desc,
1345 readq_be(&global->fc_regs[port][FC_STATUS / 8]));
1346
1347 /*
1348 * Do link reset first, some OTHER errors will set FC_ERROR
1349 * again if cleared before or w/o a reset
1350 */
1351 if (info->action & LINK_RESET) {
1352 dev_err(dev, "%s: FC Port %d: resetting link\n",
1353 __func__, port);
1354 cfg->lr_state = LINK_RESET_REQUIRED;
1355 cfg->lr_port = port;
1356 kref_get(&cfg->afu->mapcount);
1357 schedule_work(&cfg->work_q);
1358 }
1359
1360 if (info->action & CLR_FC_ERROR) {
1361 reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
1362
1363 /*
1364 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1365 * should be the same and tracing one is sufficient.
1366 */
1367
1368 dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n",
1369 __func__, port, reg);
1370
1371 writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
1372 writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
1373 }
1374
1375 if (info->action & SCAN_HOST) {
1376 atomic_inc(&cfg->scan_host_needed);
1377 kref_get(&cfg->afu->mapcount);
1378 schedule_work(&cfg->work_q);
1379 }
1380 }
1381
1382 out:
1383 dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu);
1384 return IRQ_HANDLED;
1385 }
1386
1387 /**
1388 * start_context() - starts the master context
1389 * @cfg: Internal structure associated with the host.
1390 *
1391 * Return: A success or failure value from CXL services.
1392 */
start_context(struct cxlflash_cfg * cfg)1393 static int start_context(struct cxlflash_cfg *cfg)
1394 {
1395 int rc = 0;
1396
1397 rc = cxl_start_context(cfg->mcctx,
1398 cfg->afu->work.work_element_descriptor,
1399 NULL);
1400
1401 pr_debug("%s: returning rc=%d\n", __func__, rc);
1402 return rc;
1403 }
1404
1405 /**
1406 * read_vpd() - obtains the WWPNs from VPD
1407 * @cfg: Internal structure associated with the host.
1408 * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs
1409 *
1410 * Return: 0 on success, -errno on failure
1411 */
read_vpd(struct cxlflash_cfg * cfg,u64 wwpn[])1412 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1413 {
1414 struct pci_dev *dev = cfg->parent_dev;
1415 int rc = 0;
1416 int ro_start, ro_size, i, j, k;
1417 ssize_t vpd_size;
1418 char vpd_data[CXLFLASH_VPD_LEN];
1419 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1420 char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
1421
1422 /* Get the VPD data from the device */
1423 vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
1424 if (unlikely(vpd_size <= 0)) {
1425 dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n",
1426 __func__, vpd_size);
1427 rc = -ENODEV;
1428 goto out;
1429 }
1430
1431 /* Get the read only section offset */
1432 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1433 PCI_VPD_LRDT_RO_DATA);
1434 if (unlikely(ro_start < 0)) {
1435 dev_err(&dev->dev, "%s: VPD Read-only data not found\n",
1436 __func__);
1437 rc = -ENODEV;
1438 goto out;
1439 }
1440
1441 /* Get the read only section size, cap when extends beyond read VPD */
1442 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1443 j = ro_size;
1444 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1445 if (unlikely((i + j) > vpd_size)) {
1446 pr_debug("%s: Might need to read more VPD (%d > %ld)\n",
1447 __func__, (i + j), vpd_size);
1448 ro_size = vpd_size - i;
1449 }
1450
1451 /*
1452 * Find the offset of the WWPN tag within the read only
1453 * VPD data and validate the found field (partials are
1454 * no good to us). Convert the ASCII data to an integer
1455 * value. Note that we must copy to a temporary buffer
1456 * because the conversion service requires that the ASCII
1457 * string be terminated.
1458 */
1459 for (k = 0; k < NUM_FC_PORTS; k++) {
1460 j = ro_size;
1461 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1462
1463 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1464 if (unlikely(i < 0)) {
1465 dev_err(&dev->dev, "%s: Port %d WWPN not found "
1466 "in VPD\n", __func__, k);
1467 rc = -ENODEV;
1468 goto out;
1469 }
1470
1471 j = pci_vpd_info_field_size(&vpd_data[i]);
1472 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1473 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1474 dev_err(&dev->dev, "%s: Port %d WWPN incomplete or "
1475 "VPD corrupt\n",
1476 __func__, k);
1477 rc = -ENODEV;
1478 goto out;
1479 }
1480
1481 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1482 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1483 if (unlikely(rc)) {
1484 dev_err(&dev->dev, "%s: Fail to convert port %d WWPN "
1485 "to integer\n", __func__, k);
1486 rc = -ENODEV;
1487 goto out;
1488 }
1489 }
1490
1491 out:
1492 pr_debug("%s: returning rc=%d\n", __func__, rc);
1493 return rc;
1494 }
1495
1496 /**
1497 * init_pcr() - initialize the provisioning and control registers
1498 * @cfg: Internal structure associated with the host.
1499 *
1500 * Also sets up fast access to the mapped registers and initializes AFU
1501 * command fields that never change.
1502 */
init_pcr(struct cxlflash_cfg * cfg)1503 static void init_pcr(struct cxlflash_cfg *cfg)
1504 {
1505 struct afu *afu = cfg->afu;
1506 struct sisl_ctrl_map __iomem *ctrl_map;
1507 int i;
1508
1509 for (i = 0; i < MAX_CONTEXT; i++) {
1510 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1511 /* Disrupt any clients that could be running */
1512 /* e.g. clients that survived a master restart */
1513 writeq_be(0, &ctrl_map->rht_start);
1514 writeq_be(0, &ctrl_map->rht_cnt_id);
1515 writeq_be(0, &ctrl_map->ctx_cap);
1516 }
1517
1518 /* Copy frequently used fields into afu */
1519 afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
1520 afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
1521 afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
1522
1523 /* Program the Endian Control for the master context */
1524 writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
1525
1526 /* Initialize cmd fields that never change */
1527 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1528 afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
1529 afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
1530 afu->cmd[i].rcb.rrq = 0x0;
1531 }
1532 }
1533
1534 /**
1535 * init_global() - initialize AFU global registers
1536 * @cfg: Internal structure associated with the host.
1537 */
init_global(struct cxlflash_cfg * cfg)1538 static int init_global(struct cxlflash_cfg *cfg)
1539 {
1540 struct afu *afu = cfg->afu;
1541 struct device *dev = &cfg->dev->dev;
1542 u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */
1543 int i = 0, num_ports = 0;
1544 int rc = 0;
1545 u64 reg;
1546
1547 rc = read_vpd(cfg, &wwpn[0]);
1548 if (rc) {
1549 dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1550 goto out;
1551 }
1552
1553 pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
1554
1555 /* Set up RRQ in AFU for master issued cmds */
1556 writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
1557 writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
1558
1559 /* AFU configuration */
1560 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1561 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1562 /* enable all auto retry options and control endianness */
1563 /* leave others at default: */
1564 /* CTX_CAP write protected, mbox_r does not clear on read and */
1565 /* checker on if dual afu */
1566 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1567
1568 /* Global port select: select either port */
1569 if (afu->internal_lun) {
1570 /* Only use port 0 */
1571 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1572 num_ports = NUM_FC_PORTS - 1;
1573 } else {
1574 writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
1575 num_ports = NUM_FC_PORTS;
1576 }
1577
1578 for (i = 0; i < num_ports; i++) {
1579 /* Unmask all errors (but they are still masked at AFU) */
1580 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
1581 /* Clear CRC error cnt & set a threshold */
1582 (void)readq_be(&afu->afu_map->global.
1583 fc_regs[i][FC_CNT_CRCERR / 8]);
1584 writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
1585 [FC_CRC_THRESH / 8]);
1586
1587 /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1588 if (wwpn[i] != 0 &&
1589 afu_set_wwpn(afu, i,
1590 &afu->afu_map->global.fc_regs[i][0],
1591 wwpn[i])) {
1592 dev_err(dev, "%s: failed to set WWPN on port %d\n",
1593 __func__, i);
1594 rc = -EIO;
1595 goto out;
1596 }
1597 /* Programming WWPN back to back causes additional
1598 * offline/online transitions and a PLOGI
1599 */
1600 msleep(100);
1601 }
1602
1603 /* Set up master's own CTX_CAP to allow real mode, host translation */
1604 /* tables, afu cmds and read/write GSCSI cmds. */
1605 /* First, unlock ctx_cap write by reading mbox */
1606 (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */
1607 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1608 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1609 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1610 &afu->ctrl_map->ctx_cap);
1611 /* Initialize heartbeat */
1612 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1613
1614 out:
1615 return rc;
1616 }
1617
1618 /**
1619 * start_afu() - initializes and starts the AFU
1620 * @cfg: Internal structure associated with the host.
1621 */
start_afu(struct cxlflash_cfg * cfg)1622 static int start_afu(struct cxlflash_cfg *cfg)
1623 {
1624 struct afu *afu = cfg->afu;
1625 struct afu_cmd *cmd;
1626
1627 int i = 0;
1628 int rc = 0;
1629
1630 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1631 cmd = &afu->cmd[i];
1632
1633 init_completion(&cmd->cevent);
1634 spin_lock_init(&cmd->slock);
1635 cmd->parent = afu;
1636 }
1637
1638 init_pcr(cfg);
1639
1640 /* After an AFU reset, RRQ entries are stale, clear them */
1641 memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry));
1642
1643 /* Initialize RRQ pointers */
1644 afu->hrrq_start = &afu->rrq_entry[0];
1645 afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
1646 afu->hrrq_curr = afu->hrrq_start;
1647 afu->toggle = 1;
1648
1649 rc = init_global(cfg);
1650
1651 pr_debug("%s: returning rc=%d\n", __func__, rc);
1652 return rc;
1653 }
1654
1655 /**
1656 * init_mc() - create and register as the master context
1657 * @cfg: Internal structure associated with the host.
1658 *
1659 * Return: 0 on success, -errno on failure
1660 */
init_mc(struct cxlflash_cfg * cfg)1661 static int init_mc(struct cxlflash_cfg *cfg)
1662 {
1663 struct cxl_context *ctx;
1664 struct device *dev = &cfg->dev->dev;
1665 struct afu *afu = cfg->afu;
1666 int rc = 0;
1667 enum undo_level level;
1668
1669 ctx = cxl_get_context(cfg->dev);
1670 if (unlikely(!ctx))
1671 return -ENOMEM;
1672 cfg->mcctx = ctx;
1673
1674 /* Set it up as a master with the CXL */
1675 cxl_set_master(ctx);
1676
1677 /* During initialization reset the AFU to start from a clean slate */
1678 rc = cxl_afu_reset(cfg->mcctx);
1679 if (unlikely(rc)) {
1680 dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1681 __func__, rc);
1682 level = RELEASE_CONTEXT;
1683 goto out;
1684 }
1685
1686 rc = cxl_allocate_afu_irqs(ctx, 3);
1687 if (unlikely(rc)) {
1688 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
1689 __func__, rc);
1690 level = RELEASE_CONTEXT;
1691 goto out;
1692 }
1693
1694 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
1695 "SISL_MSI_SYNC_ERROR");
1696 if (unlikely(rc <= 0)) {
1697 dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n",
1698 __func__);
1699 level = FREE_IRQ;
1700 goto out;
1701 }
1702
1703 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
1704 "SISL_MSI_RRQ_UPDATED");
1705 if (unlikely(rc <= 0)) {
1706 dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n",
1707 __func__);
1708 level = UNMAP_ONE;
1709 goto out;
1710 }
1711
1712 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
1713 "SISL_MSI_ASYNC_ERROR");
1714 if (unlikely(rc <= 0)) {
1715 dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n",
1716 __func__);
1717 level = UNMAP_TWO;
1718 goto out;
1719 }
1720
1721 rc = 0;
1722
1723 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
1724 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1725 * element (pe) that is embedded in the context (ctx)
1726 */
1727 rc = start_context(cfg);
1728 if (unlikely(rc)) {
1729 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
1730 level = UNMAP_THREE;
1731 goto out;
1732 }
1733 ret:
1734 pr_debug("%s: returning rc=%d\n", __func__, rc);
1735 return rc;
1736 out:
1737 term_mc(cfg, level);
1738 goto ret;
1739 }
1740
1741 /**
1742 * init_afu() - setup as master context and start AFU
1743 * @cfg: Internal structure associated with the host.
1744 *
1745 * This routine is a higher level of control for configuring the
1746 * AFU on probe and reset paths.
1747 *
1748 * Return: 0 on success, -errno on failure
1749 */
init_afu(struct cxlflash_cfg * cfg)1750 static int init_afu(struct cxlflash_cfg *cfg)
1751 {
1752 u64 reg;
1753 int rc = 0;
1754 struct afu *afu = cfg->afu;
1755 struct device *dev = &cfg->dev->dev;
1756
1757 cxl_perst_reloads_same_image(cfg->cxl_afu, true);
1758
1759 rc = init_mc(cfg);
1760 if (rc) {
1761 dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
1762 __func__, rc);
1763 goto out;
1764 }
1765
1766 /* Map the entire MMIO space of the AFU */
1767 afu->afu_map = cxl_psa_map(cfg->mcctx);
1768 if (!afu->afu_map) {
1769 dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
1770 rc = -ENOMEM;
1771 goto err1;
1772 }
1773 kref_init(&afu->mapcount);
1774
1775 /* No byte reverse on reading afu_version or string will be backwards */
1776 reg = readq(&afu->afu_map->global.regs.afu_version);
1777 memcpy(afu->version, ®, sizeof(reg));
1778 afu->interface_version =
1779 readq_be(&afu->afu_map->global.regs.interface_version);
1780 if ((afu->interface_version + 1) == 0) {
1781 pr_err("Back level AFU, please upgrade. AFU version %s "
1782 "interface version 0x%llx\n", afu->version,
1783 afu->interface_version);
1784 rc = -EINVAL;
1785 goto err2;
1786 }
1787
1788 pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
1789 afu->version, afu->interface_version);
1790
1791 rc = start_afu(cfg);
1792 if (rc) {
1793 dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
1794 __func__, rc);
1795 goto err2;
1796 }
1797
1798 afu_err_intr_init(cfg->afu);
1799 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
1800
1801 /* Restore the LUN mappings */
1802 cxlflash_restore_luntable(cfg);
1803 out:
1804 pr_debug("%s: returning rc=%d\n", __func__, rc);
1805 return rc;
1806
1807 err2:
1808 kref_put(&afu->mapcount, afu_unmap);
1809 err1:
1810 term_mc(cfg, UNDO_START);
1811 goto out;
1812 }
1813
1814 /**
1815 * cxlflash_afu_sync() - builds and sends an AFU sync command
1816 * @afu: AFU associated with the host.
1817 * @ctx_hndl_u: Identifies context requesting sync.
1818 * @res_hndl_u: Identifies resource requesting sync.
1819 * @mode: Type of sync to issue (lightweight, heavyweight, global).
1820 *
1821 * The AFU can only take 1 sync command at a time. This routine enforces this
1822 * limitation by using a mutex to provide exclusive access to the AFU during
1823 * the sync. This design point requires calling threads to not be on interrupt
1824 * context due to the possibility of sleeping during concurrent sync operations.
1825 *
1826 * AFU sync operations are only necessary and allowed when the device is
1827 * operating normally. When not operating normally, sync requests can occur as
1828 * part of cleaning up resources associated with an adapter prior to removal.
1829 * In this scenario, these requests are simply ignored (safe due to the AFU
1830 * going away).
1831 *
1832 * Return:
1833 * 0 on success
1834 * -1 on failure
1835 */
cxlflash_afu_sync(struct afu * afu,ctx_hndl_t ctx_hndl_u,res_hndl_t res_hndl_u,u8 mode)1836 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
1837 res_hndl_t res_hndl_u, u8 mode)
1838 {
1839 struct cxlflash_cfg *cfg = afu->parent;
1840 struct device *dev = &cfg->dev->dev;
1841 struct afu_cmd *cmd = NULL;
1842 int rc = 0;
1843 int retry_cnt = 0;
1844 static DEFINE_MUTEX(sync_active);
1845
1846 if (cfg->state != STATE_NORMAL) {
1847 pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state);
1848 return 0;
1849 }
1850
1851 mutex_lock(&sync_active);
1852 retry:
1853 cmd = cmd_checkout(afu);
1854 if (unlikely(!cmd)) {
1855 retry_cnt++;
1856 udelay(1000 * retry_cnt);
1857 if (retry_cnt < MC_RETRY_CNT)
1858 goto retry;
1859 dev_err(dev, "%s: could not get a free command\n", __func__);
1860 rc = -1;
1861 goto out;
1862 }
1863
1864 pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
1865
1866 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
1867
1868 cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
1869 cmd->rcb.port_sel = 0x0; /* NA */
1870 cmd->rcb.lun_id = 0x0; /* NA */
1871 cmd->rcb.data_len = 0x0;
1872 cmd->rcb.data_ea = 0x0;
1873 cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
1874
1875 cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
1876 cmd->rcb.cdb[1] = mode;
1877
1878 /* The cdb is aligned, no unaligned accessors required */
1879 *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
1880 *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
1881
1882 rc = send_cmd(afu, cmd);
1883 if (unlikely(rc))
1884 goto out;
1885
1886 wait_resp(afu, cmd);
1887
1888 /* Set on timeout */
1889 if (unlikely((cmd->sa.ioasc != 0) ||
1890 (cmd->sa.host_use_b[0] & B_ERROR)))
1891 rc = -1;
1892 out:
1893 mutex_unlock(&sync_active);
1894 if (cmd)
1895 cmd_checkin(cmd);
1896 pr_debug("%s: returning rc=%d\n", __func__, rc);
1897 return rc;
1898 }
1899
1900 /**
1901 * afu_reset() - resets the AFU
1902 * @cfg: Internal structure associated with the host.
1903 *
1904 * Return: 0 on success, -errno on failure
1905 */
afu_reset(struct cxlflash_cfg * cfg)1906 static int afu_reset(struct cxlflash_cfg *cfg)
1907 {
1908 int rc = 0;
1909 /* Stop the context before the reset. Since the context is
1910 * no longer available restart it after the reset is complete
1911 */
1912
1913 term_afu(cfg);
1914
1915 rc = init_afu(cfg);
1916
1917 pr_debug("%s: returning rc=%d\n", __func__, rc);
1918 return rc;
1919 }
1920
1921 /**
1922 * cxlflash_eh_device_reset_handler() - reset a single LUN
1923 * @scp: SCSI command to send.
1924 *
1925 * Return:
1926 * SUCCESS as defined in scsi/scsi.h
1927 * FAILED as defined in scsi/scsi.h
1928 */
cxlflash_eh_device_reset_handler(struct scsi_cmnd * scp)1929 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
1930 {
1931 int rc = SUCCESS;
1932 struct Scsi_Host *host = scp->device->host;
1933 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
1934 struct afu *afu = cfg->afu;
1935 int rcr = 0;
1936
1937 pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
1938 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
1939 host->host_no, scp->device->channel,
1940 scp->device->id, scp->device->lun,
1941 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1942 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1943 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1944 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1945
1946 retry:
1947 switch (cfg->state) {
1948 case STATE_NORMAL:
1949 rcr = send_tmf(afu, scp, TMF_LUN_RESET);
1950 if (unlikely(rcr))
1951 rc = FAILED;
1952 break;
1953 case STATE_RESET:
1954 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
1955 goto retry;
1956 default:
1957 rc = FAILED;
1958 break;
1959 }
1960
1961 pr_debug("%s: returning rc=%d\n", __func__, rc);
1962 return rc;
1963 }
1964
1965 /**
1966 * cxlflash_eh_host_reset_handler() - reset the host adapter
1967 * @scp: SCSI command from stack identifying host.
1968 *
1969 * Following a reset, the state is evaluated again in case an EEH occurred
1970 * during the reset. In such a scenario, the host reset will either yield
1971 * until the EEH recovery is complete or return success or failure based
1972 * upon the current device state.
1973 *
1974 * Return:
1975 * SUCCESS as defined in scsi/scsi.h
1976 * FAILED as defined in scsi/scsi.h
1977 */
cxlflash_eh_host_reset_handler(struct scsi_cmnd * scp)1978 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
1979 {
1980 int rc = SUCCESS;
1981 int rcr = 0;
1982 struct Scsi_Host *host = scp->device->host;
1983 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
1984
1985 pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
1986 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
1987 host->host_no, scp->device->channel,
1988 scp->device->id, scp->device->lun,
1989 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1990 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1991 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1992 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1993
1994 switch (cfg->state) {
1995 case STATE_NORMAL:
1996 cfg->state = STATE_RESET;
1997 cxlflash_mark_contexts_error(cfg);
1998 rcr = afu_reset(cfg);
1999 if (rcr) {
2000 rc = FAILED;
2001 cfg->state = STATE_FAILTERM;
2002 } else
2003 cfg->state = STATE_NORMAL;
2004 wake_up_all(&cfg->reset_waitq);
2005 ssleep(1);
2006 /* fall through */
2007 case STATE_RESET:
2008 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2009 if (cfg->state == STATE_NORMAL)
2010 break;
2011 /* fall through */
2012 default:
2013 rc = FAILED;
2014 break;
2015 }
2016
2017 pr_debug("%s: returning rc=%d\n", __func__, rc);
2018 return rc;
2019 }
2020
2021 /**
2022 * cxlflash_change_queue_depth() - change the queue depth for the device
2023 * @sdev: SCSI device destined for queue depth change.
2024 * @qdepth: Requested queue depth value to set.
2025 *
2026 * The requested queue depth is capped to the maximum supported value.
2027 *
2028 * Return: The actual queue depth set.
2029 */
cxlflash_change_queue_depth(struct scsi_device * sdev,int qdepth)2030 static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2031 {
2032
2033 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2034 qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2035
2036 scsi_change_queue_depth(sdev, qdepth);
2037 return sdev->queue_depth;
2038 }
2039
2040 /**
2041 * cxlflash_show_port_status() - queries and presents the current port status
2042 * @port: Desired port for status reporting.
2043 * @afu: AFU owning the specified port.
2044 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2045 *
2046 * Return: The size of the ASCII string returned in @buf.
2047 */
cxlflash_show_port_status(u32 port,struct afu * afu,char * buf)2048 static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf)
2049 {
2050 char *disp_status;
2051 u64 status;
2052 __be64 __iomem *fc_regs;
2053
2054 if (port >= NUM_FC_PORTS)
2055 return 0;
2056
2057 fc_regs = &afu->afu_map->global.fc_regs[port][0];
2058 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
2059 status &= FC_MTIP_STATUS_MASK;
2060
2061 if (status == FC_MTIP_STATUS_ONLINE)
2062 disp_status = "online";
2063 else if (status == FC_MTIP_STATUS_OFFLINE)
2064 disp_status = "offline";
2065 else
2066 disp_status = "unknown";
2067
2068 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2069 }
2070
2071 /**
2072 * port0_show() - queries and presents the current status of port 0
2073 * @dev: Generic device associated with the host owning the port.
2074 * @attr: Device attribute representing the port.
2075 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2076 *
2077 * Return: The size of the ASCII string returned in @buf.
2078 */
port0_show(struct device * dev,struct device_attribute * attr,char * buf)2079 static ssize_t port0_show(struct device *dev,
2080 struct device_attribute *attr,
2081 char *buf)
2082 {
2083 struct Scsi_Host *shost = class_to_shost(dev);
2084 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2085 struct afu *afu = cfg->afu;
2086
2087 return cxlflash_show_port_status(0, afu, buf);
2088 }
2089
2090 /**
2091 * port1_show() - queries and presents the current status of port 1
2092 * @dev: Generic device associated with the host owning the port.
2093 * @attr: Device attribute representing the port.
2094 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2095 *
2096 * Return: The size of the ASCII string returned in @buf.
2097 */
port1_show(struct device * dev,struct device_attribute * attr,char * buf)2098 static ssize_t port1_show(struct device *dev,
2099 struct device_attribute *attr,
2100 char *buf)
2101 {
2102 struct Scsi_Host *shost = class_to_shost(dev);
2103 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2104 struct afu *afu = cfg->afu;
2105
2106 return cxlflash_show_port_status(1, afu, buf);
2107 }
2108
2109 /**
2110 * lun_mode_show() - presents the current LUN mode of the host
2111 * @dev: Generic device associated with the host.
2112 * @attr: Device attribute representing the LUN mode.
2113 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2114 *
2115 * Return: The size of the ASCII string returned in @buf.
2116 */
lun_mode_show(struct device * dev,struct device_attribute * attr,char * buf)2117 static ssize_t lun_mode_show(struct device *dev,
2118 struct device_attribute *attr, char *buf)
2119 {
2120 struct Scsi_Host *shost = class_to_shost(dev);
2121 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2122 struct afu *afu = cfg->afu;
2123
2124 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2125 }
2126
2127 /**
2128 * lun_mode_store() - sets the LUN mode of the host
2129 * @dev: Generic device associated with the host.
2130 * @attr: Device attribute representing the LUN mode.
2131 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2132 * @count: Length of data resizing in @buf.
2133 *
2134 * The CXL Flash AFU supports a dummy LUN mode where the external
2135 * links and storage are not required. Space on the FPGA is used
2136 * to create 1 or 2 small LUNs which are presented to the system
2137 * as if they were a normal storage device. This feature is useful
2138 * during development and also provides manufacturing with a way
2139 * to test the AFU without an actual device.
2140 *
2141 * 0 = external LUN[s] (default)
2142 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2143 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2144 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2145 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2146 *
2147 * Return: The size of the ASCII string returned in @buf.
2148 */
lun_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2149 static ssize_t lun_mode_store(struct device *dev,
2150 struct device_attribute *attr,
2151 const char *buf, size_t count)
2152 {
2153 struct Scsi_Host *shost = class_to_shost(dev);
2154 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2155 struct afu *afu = cfg->afu;
2156 int rc;
2157 u32 lun_mode;
2158
2159 rc = kstrtouint(buf, 10, &lun_mode);
2160 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2161 afu->internal_lun = lun_mode;
2162
2163 /*
2164 * When configured for internal LUN, there is only one channel,
2165 * channel number 0, else there will be 2 (default).
2166 */
2167 if (afu->internal_lun)
2168 shost->max_channel = 0;
2169 else
2170 shost->max_channel = NUM_FC_PORTS - 1;
2171
2172 afu_reset(cfg);
2173 scsi_scan_host(cfg->host);
2174 }
2175
2176 return count;
2177 }
2178
2179 /**
2180 * ioctl_version_show() - presents the current ioctl version of the host
2181 * @dev: Generic device associated with the host.
2182 * @attr: Device attribute representing the ioctl version.
2183 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
2184 *
2185 * Return: The size of the ASCII string returned in @buf.
2186 */
ioctl_version_show(struct device * dev,struct device_attribute * attr,char * buf)2187 static ssize_t ioctl_version_show(struct device *dev,
2188 struct device_attribute *attr, char *buf)
2189 {
2190 return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
2191 }
2192
2193 /**
2194 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2195 * @port: Desired port for status reporting.
2196 * @afu: AFU owning the specified port.
2197 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2198 *
2199 * Return: The size of the ASCII string returned in @buf.
2200 */
cxlflash_show_port_lun_table(u32 port,struct afu * afu,char * buf)2201 static ssize_t cxlflash_show_port_lun_table(u32 port,
2202 struct afu *afu,
2203 char *buf)
2204 {
2205 int i;
2206 ssize_t bytes = 0;
2207 __be64 __iomem *fc_port;
2208
2209 if (port >= NUM_FC_PORTS)
2210 return 0;
2211
2212 fc_port = &afu->afu_map->global.fc_port[port][0];
2213
2214 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2215 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2216 "%03d: %016llX\n", i, readq_be(&fc_port[i]));
2217 return bytes;
2218 }
2219
2220 /**
2221 * port0_lun_table_show() - presents the current LUN table of port 0
2222 * @dev: Generic device associated with the host owning the port.
2223 * @attr: Device attribute representing the port.
2224 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2225 *
2226 * Return: The size of the ASCII string returned in @buf.
2227 */
port0_lun_table_show(struct device * dev,struct device_attribute * attr,char * buf)2228 static ssize_t port0_lun_table_show(struct device *dev,
2229 struct device_attribute *attr,
2230 char *buf)
2231 {
2232 struct Scsi_Host *shost = class_to_shost(dev);
2233 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2234 struct afu *afu = cfg->afu;
2235
2236 return cxlflash_show_port_lun_table(0, afu, buf);
2237 }
2238
2239 /**
2240 * port1_lun_table_show() - presents the current LUN table of port 1
2241 * @dev: Generic device associated with the host owning the port.
2242 * @attr: Device attribute representing the port.
2243 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
2244 *
2245 * Return: The size of the ASCII string returned in @buf.
2246 */
port1_lun_table_show(struct device * dev,struct device_attribute * attr,char * buf)2247 static ssize_t port1_lun_table_show(struct device *dev,
2248 struct device_attribute *attr,
2249 char *buf)
2250 {
2251 struct Scsi_Host *shost = class_to_shost(dev);
2252 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2253 struct afu *afu = cfg->afu;
2254
2255 return cxlflash_show_port_lun_table(1, afu, buf);
2256 }
2257
2258 /**
2259 * mode_show() - presents the current mode of the device
2260 * @dev: Generic device associated with the device.
2261 * @attr: Device attribute representing the device mode.
2262 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
2263 *
2264 * Return: The size of the ASCII string returned in @buf.
2265 */
mode_show(struct device * dev,struct device_attribute * attr,char * buf)2266 static ssize_t mode_show(struct device *dev,
2267 struct device_attribute *attr, char *buf)
2268 {
2269 struct scsi_device *sdev = to_scsi_device(dev);
2270
2271 return scnprintf(buf, PAGE_SIZE, "%s\n",
2272 sdev->hostdata ? "superpipe" : "legacy");
2273 }
2274
2275 /*
2276 * Host attributes
2277 */
2278 static DEVICE_ATTR_RO(port0);
2279 static DEVICE_ATTR_RO(port1);
2280 static DEVICE_ATTR_RW(lun_mode);
2281 static DEVICE_ATTR_RO(ioctl_version);
2282 static DEVICE_ATTR_RO(port0_lun_table);
2283 static DEVICE_ATTR_RO(port1_lun_table);
2284
2285 static struct device_attribute *cxlflash_host_attrs[] = {
2286 &dev_attr_port0,
2287 &dev_attr_port1,
2288 &dev_attr_lun_mode,
2289 &dev_attr_ioctl_version,
2290 &dev_attr_port0_lun_table,
2291 &dev_attr_port1_lun_table,
2292 NULL
2293 };
2294
2295 /*
2296 * Device attributes
2297 */
2298 static DEVICE_ATTR_RO(mode);
2299
2300 static struct device_attribute *cxlflash_dev_attrs[] = {
2301 &dev_attr_mode,
2302 NULL
2303 };
2304
2305 /*
2306 * Host template
2307 */
2308 static struct scsi_host_template driver_template = {
2309 .module = THIS_MODULE,
2310 .name = CXLFLASH_ADAPTER_NAME,
2311 .info = cxlflash_driver_info,
2312 .ioctl = cxlflash_ioctl,
2313 .proc_name = CXLFLASH_NAME,
2314 .queuecommand = cxlflash_queuecommand,
2315 .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
2316 .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
2317 .change_queue_depth = cxlflash_change_queue_depth,
2318 .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
2319 .can_queue = CXLFLASH_MAX_CMDS,
2320 .this_id = -1,
2321 .sg_tablesize = SG_NONE, /* No scatter gather support */
2322 .max_sectors = CXLFLASH_MAX_SECTORS,
2323 .use_clustering = ENABLE_CLUSTERING,
2324 .shost_attrs = cxlflash_host_attrs,
2325 .sdev_attrs = cxlflash_dev_attrs,
2326 };
2327
2328 /*
2329 * Device dependent values
2330 */
2331 static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
2332 static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS };
2333
2334 /*
2335 * PCI device binding table
2336 */
2337 static struct pci_device_id cxlflash_pci_table[] = {
2338 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
2339 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
2340 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
2341 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
2342 {}
2343 };
2344
2345 MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
2346
2347 /**
2348 * cxlflash_worker_thread() - work thread handler for the AFU
2349 * @work: Work structure contained within cxlflash associated with host.
2350 *
2351 * Handles the following events:
2352 * - Link reset which cannot be performed on interrupt context due to
2353 * blocking up to a few seconds
2354 * - Read AFU command room
2355 * - Rescan the host
2356 */
cxlflash_worker_thread(struct work_struct * work)2357 static void cxlflash_worker_thread(struct work_struct *work)
2358 {
2359 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
2360 work_q);
2361 struct afu *afu = cfg->afu;
2362 struct device *dev = &cfg->dev->dev;
2363 int port;
2364 ulong lock_flags;
2365
2366 /* Avoid MMIO if the device has failed */
2367
2368 if (cfg->state != STATE_NORMAL)
2369 return;
2370
2371 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2372
2373 if (cfg->lr_state == LINK_RESET_REQUIRED) {
2374 port = cfg->lr_port;
2375 if (port < 0)
2376 dev_err(dev, "%s: invalid port index %d\n",
2377 __func__, port);
2378 else {
2379 spin_unlock_irqrestore(cfg->host->host_lock,
2380 lock_flags);
2381
2382 /* The reset can block... */
2383 afu_link_reset(afu, port,
2384 &afu->afu_map->global.fc_regs[port][0]);
2385 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2386 }
2387
2388 cfg->lr_state = LINK_RESET_COMPLETE;
2389 }
2390
2391 if (afu->read_room) {
2392 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
2393 afu->read_room = false;
2394 }
2395
2396 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
2397
2398 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
2399 scsi_scan_host(cfg->host);
2400 kref_put(&afu->mapcount, afu_unmap);
2401 }
2402
2403 /**
2404 * cxlflash_probe() - PCI entry point to add host
2405 * @pdev: PCI device associated with the host.
2406 * @dev_id: PCI device id associated with device.
2407 *
2408 * Return: 0 on success, -errno on failure
2409 */
cxlflash_probe(struct pci_dev * pdev,const struct pci_device_id * dev_id)2410 static int cxlflash_probe(struct pci_dev *pdev,
2411 const struct pci_device_id *dev_id)
2412 {
2413 struct Scsi_Host *host;
2414 struct cxlflash_cfg *cfg = NULL;
2415 struct device *phys_dev;
2416 struct dev_dependent_vals *ddv;
2417 int rc = 0;
2418
2419 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
2420 __func__, pdev->irq);
2421
2422 ddv = (struct dev_dependent_vals *)dev_id->driver_data;
2423 driver_template.max_sectors = ddv->max_sectors;
2424
2425 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
2426 if (!host) {
2427 dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
2428 __func__);
2429 rc = -ENOMEM;
2430 goto out;
2431 }
2432
2433 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
2434 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
2435 host->max_channel = NUM_FC_PORTS - 1;
2436 host->unique_id = host->host_no;
2437 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
2438
2439 cfg = (struct cxlflash_cfg *)host->hostdata;
2440 cfg->host = host;
2441 rc = alloc_mem(cfg);
2442 if (rc) {
2443 dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n",
2444 __func__);
2445 rc = -ENOMEM;
2446 scsi_host_put(cfg->host);
2447 goto out;
2448 }
2449
2450 cfg->init_state = INIT_STATE_NONE;
2451 cfg->dev = pdev;
2452 cfg->cxl_fops = cxlflash_cxl_fops;
2453
2454 /*
2455 * The promoted LUNs move to the top of the LUN table. The rest stay
2456 * on the bottom half. The bottom half grows from the end
2457 * (index = 255), whereas the top half grows from the beginning
2458 * (index = 0).
2459 */
2460 cfg->promote_lun_index = 0;
2461 cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
2462 cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
2463
2464 cfg->dev_id = (struct pci_device_id *)dev_id;
2465
2466 init_waitqueue_head(&cfg->tmf_waitq);
2467 init_waitqueue_head(&cfg->reset_waitq);
2468
2469 INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
2470 cfg->lr_state = LINK_RESET_INVALID;
2471 cfg->lr_port = -1;
2472 spin_lock_init(&cfg->tmf_slock);
2473 mutex_init(&cfg->ctx_tbl_list_mutex);
2474 mutex_init(&cfg->ctx_recovery_mutex);
2475 init_rwsem(&cfg->ioctl_rwsem);
2476 INIT_LIST_HEAD(&cfg->ctx_err_recovery);
2477 INIT_LIST_HEAD(&cfg->lluns);
2478
2479 pci_set_drvdata(pdev, cfg);
2480
2481 /*
2482 * Use the special service provided to look up the physical
2483 * PCI device, since we are called on the probe of the virtual
2484 * PCI host bus (vphb)
2485 */
2486 phys_dev = cxl_get_phys_dev(pdev);
2487 if (!dev_is_pci(phys_dev)) {
2488 dev_err(&pdev->dev, "%s: not a pci dev\n", __func__);
2489 rc = -ENODEV;
2490 goto out_remove;
2491 }
2492 cfg->parent_dev = to_pci_dev(phys_dev);
2493
2494 cfg->cxl_afu = cxl_pci_to_afu(pdev);
2495
2496 rc = init_pci(cfg);
2497 if (rc) {
2498 dev_err(&pdev->dev, "%s: call to init_pci "
2499 "failed rc=%d!\n", __func__, rc);
2500 goto out_remove;
2501 }
2502 cfg->init_state = INIT_STATE_PCI;
2503
2504 rc = init_afu(cfg);
2505 if (rc) {
2506 dev_err(&pdev->dev, "%s: call to init_afu "
2507 "failed rc=%d!\n", __func__, rc);
2508 goto out_remove;
2509 }
2510 cfg->init_state = INIT_STATE_AFU;
2511
2512 rc = init_scsi(cfg);
2513 if (rc) {
2514 dev_err(&pdev->dev, "%s: call to init_scsi "
2515 "failed rc=%d!\n", __func__, rc);
2516 goto out_remove;
2517 }
2518 cfg->init_state = INIT_STATE_SCSI;
2519
2520 out:
2521 pr_debug("%s: returning rc=%d\n", __func__, rc);
2522 return rc;
2523
2524 out_remove:
2525 cxlflash_remove(pdev);
2526 goto out;
2527 }
2528
2529 /**
2530 * drain_ioctls() - wait until all currently executing ioctls have completed
2531 * @cfg: Internal structure associated with the host.
2532 *
2533 * Obtain write access to read/write semaphore that wraps ioctl
2534 * handling to 'drain' ioctls currently executing.
2535 */
drain_ioctls(struct cxlflash_cfg * cfg)2536 static void drain_ioctls(struct cxlflash_cfg *cfg)
2537 {
2538 down_write(&cfg->ioctl_rwsem);
2539 up_write(&cfg->ioctl_rwsem);
2540 }
2541
2542 /**
2543 * cxlflash_pci_error_detected() - called when a PCI error is detected
2544 * @pdev: PCI device struct.
2545 * @state: PCI channel state.
2546 *
2547 * When an EEH occurs during an active reset, wait until the reset is
2548 * complete and then take action based upon the device state.
2549 *
2550 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
2551 */
cxlflash_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)2552 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2553 pci_channel_state_t state)
2554 {
2555 int rc = 0;
2556 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2557 struct device *dev = &cfg->dev->dev;
2558
2559 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
2560
2561 switch (state) {
2562 case pci_channel_io_frozen:
2563 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2564 if (cfg->state == STATE_FAILTERM)
2565 return PCI_ERS_RESULT_DISCONNECT;
2566
2567 cfg->state = STATE_RESET;
2568 scsi_block_requests(cfg->host);
2569 drain_ioctls(cfg);
2570 rc = cxlflash_mark_contexts_error(cfg);
2571 if (unlikely(rc))
2572 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
2573 __func__, rc);
2574 term_mc(cfg, UNDO_START);
2575 stop_afu(cfg);
2576 return PCI_ERS_RESULT_NEED_RESET;
2577 case pci_channel_io_perm_failure:
2578 cfg->state = STATE_FAILTERM;
2579 wake_up_all(&cfg->reset_waitq);
2580 scsi_unblock_requests(cfg->host);
2581 return PCI_ERS_RESULT_DISCONNECT;
2582 default:
2583 break;
2584 }
2585 return PCI_ERS_RESULT_NEED_RESET;
2586 }
2587
2588 /**
2589 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
2590 * @pdev: PCI device struct.
2591 *
2592 * This routine is called by the pci error recovery code after the PCI
2593 * slot has been reset, just before we should resume normal operations.
2594 *
2595 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
2596 */
cxlflash_pci_slot_reset(struct pci_dev * pdev)2597 static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
2598 {
2599 int rc = 0;
2600 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2601 struct device *dev = &cfg->dev->dev;
2602
2603 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2604
2605 rc = init_afu(cfg);
2606 if (unlikely(rc)) {
2607 dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc);
2608 return PCI_ERS_RESULT_DISCONNECT;
2609 }
2610
2611 return PCI_ERS_RESULT_RECOVERED;
2612 }
2613
2614 /**
2615 * cxlflash_pci_resume() - called when normal operation can resume
2616 * @pdev: PCI device struct
2617 */
cxlflash_pci_resume(struct pci_dev * pdev)2618 static void cxlflash_pci_resume(struct pci_dev *pdev)
2619 {
2620 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2621 struct device *dev = &cfg->dev->dev;
2622
2623 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2624
2625 cfg->state = STATE_NORMAL;
2626 wake_up_all(&cfg->reset_waitq);
2627 scsi_unblock_requests(cfg->host);
2628 }
2629
2630 static const struct pci_error_handlers cxlflash_err_handler = {
2631 .error_detected = cxlflash_pci_error_detected,
2632 .slot_reset = cxlflash_pci_slot_reset,
2633 .resume = cxlflash_pci_resume,
2634 };
2635
2636 /*
2637 * PCI device structure
2638 */
2639 static struct pci_driver cxlflash_driver = {
2640 .name = CXLFLASH_NAME,
2641 .id_table = cxlflash_pci_table,
2642 .probe = cxlflash_probe,
2643 .remove = cxlflash_remove,
2644 .err_handler = &cxlflash_err_handler,
2645 };
2646
2647 /**
2648 * init_cxlflash() - module entry point
2649 *
2650 * Return: 0 on success, -errno on failure
2651 */
init_cxlflash(void)2652 static int __init init_cxlflash(void)
2653 {
2654 pr_info("%s: IBM Power CXL Flash Adapter: %s\n",
2655 __func__, CXLFLASH_DRIVER_DATE);
2656
2657 cxlflash_list_init();
2658
2659 return pci_register_driver(&cxlflash_driver);
2660 }
2661
2662 /**
2663 * exit_cxlflash() - module exit point
2664 */
exit_cxlflash(void)2665 static void __exit exit_cxlflash(void)
2666 {
2667 cxlflash_term_global_luns();
2668 cxlflash_free_errpage();
2669
2670 pci_unregister_driver(&cxlflash_driver);
2671 }
2672
2673 module_init(init_cxlflash);
2674 module_exit(exit_cxlflash);
2675