1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2015 Linaro Ltd.
4 * Copyright (c) 2015 Hisilicon Limited.
5 */
6
7 #include "hisi_sas.h"
8 #define DRV_NAME "hisi_sas"
9
10 #define DEV_IS_GONE(dev) \
11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
12
13 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
14 u8 *lun, struct hisi_sas_tmf_task *tmf);
15 static int
16 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
17 struct domain_device *device,
18 int abort_flag, int tag);
19 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
20 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
21 void *funcdata);
22 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
23 struct domain_device *device);
24 static void hisi_sas_dev_gone(struct domain_device *device);
25
hisi_sas_get_ata_protocol(struct host_to_dev_fis * fis,int direction)26 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
27 {
28 switch (fis->command) {
29 case ATA_CMD_FPDMA_WRITE:
30 case ATA_CMD_FPDMA_READ:
31 case ATA_CMD_FPDMA_RECV:
32 case ATA_CMD_FPDMA_SEND:
33 case ATA_CMD_NCQ_NON_DATA:
34 return HISI_SAS_SATA_PROTOCOL_FPDMA;
35
36 case ATA_CMD_DOWNLOAD_MICRO:
37 case ATA_CMD_ID_ATA:
38 case ATA_CMD_PMP_READ:
39 case ATA_CMD_READ_LOG_EXT:
40 case ATA_CMD_PIO_READ:
41 case ATA_CMD_PIO_READ_EXT:
42 case ATA_CMD_PMP_WRITE:
43 case ATA_CMD_WRITE_LOG_EXT:
44 case ATA_CMD_PIO_WRITE:
45 case ATA_CMD_PIO_WRITE_EXT:
46 return HISI_SAS_SATA_PROTOCOL_PIO;
47
48 case ATA_CMD_DSM:
49 case ATA_CMD_DOWNLOAD_MICRO_DMA:
50 case ATA_CMD_PMP_READ_DMA:
51 case ATA_CMD_PMP_WRITE_DMA:
52 case ATA_CMD_READ:
53 case ATA_CMD_READ_EXT:
54 case ATA_CMD_READ_LOG_DMA_EXT:
55 case ATA_CMD_READ_STREAM_DMA_EXT:
56 case ATA_CMD_TRUSTED_RCV_DMA:
57 case ATA_CMD_TRUSTED_SND_DMA:
58 case ATA_CMD_WRITE:
59 case ATA_CMD_WRITE_EXT:
60 case ATA_CMD_WRITE_FUA_EXT:
61 case ATA_CMD_WRITE_QUEUED:
62 case ATA_CMD_WRITE_LOG_DMA_EXT:
63 case ATA_CMD_WRITE_STREAM_DMA_EXT:
64 case ATA_CMD_ZAC_MGMT_IN:
65 return HISI_SAS_SATA_PROTOCOL_DMA;
66
67 case ATA_CMD_CHK_POWER:
68 case ATA_CMD_DEV_RESET:
69 case ATA_CMD_EDD:
70 case ATA_CMD_FLUSH:
71 case ATA_CMD_FLUSH_EXT:
72 case ATA_CMD_VERIFY:
73 case ATA_CMD_VERIFY_EXT:
74 case ATA_CMD_SET_FEATURES:
75 case ATA_CMD_STANDBY:
76 case ATA_CMD_STANDBYNOW1:
77 case ATA_CMD_ZAC_MGMT_OUT:
78 return HISI_SAS_SATA_PROTOCOL_NONDATA;
79
80 case ATA_CMD_SET_MAX:
81 switch (fis->features) {
82 case ATA_SET_MAX_PASSWD:
83 case ATA_SET_MAX_LOCK:
84 return HISI_SAS_SATA_PROTOCOL_PIO;
85
86 case ATA_SET_MAX_PASSWD_DMA:
87 case ATA_SET_MAX_UNLOCK_DMA:
88 return HISI_SAS_SATA_PROTOCOL_DMA;
89
90 default:
91 return HISI_SAS_SATA_PROTOCOL_NONDATA;
92 }
93
94 default:
95 {
96 if (direction == DMA_NONE)
97 return HISI_SAS_SATA_PROTOCOL_NONDATA;
98 return HISI_SAS_SATA_PROTOCOL_PIO;
99 }
100 }
101 }
102 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
103
hisi_sas_sata_done(struct sas_task * task,struct hisi_sas_slot * slot)104 void hisi_sas_sata_done(struct sas_task *task,
105 struct hisi_sas_slot *slot)
106 {
107 struct task_status_struct *ts = &task->task_status;
108 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
109 struct hisi_sas_status_buffer *status_buf =
110 hisi_sas_status_buf_addr_mem(slot);
111 u8 *iu = &status_buf->iu[0];
112 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
113
114 resp->frame_len = sizeof(struct dev_to_host_fis);
115 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
116
117 ts->buf_valid_size = sizeof(*resp);
118 }
119 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
120
121 /*
122 * This function assumes linkrate mask fits in 8 bits, which it
123 * does for all HW versions supported.
124 */
hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)125 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
126 {
127 u8 rate = 0;
128 int i;
129
130 max -= SAS_LINK_RATE_1_5_GBPS;
131 for (i = 0; i <= max; i++)
132 rate |= 1 << (i * 2);
133 return rate;
134 }
135 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
136
dev_to_hisi_hba(struct domain_device * device)137 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
138 {
139 return device->port->ha->lldd_ha;
140 }
141
to_hisi_sas_port(struct asd_sas_port * sas_port)142 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
143 {
144 return container_of(sas_port, struct hisi_sas_port, sas_port);
145 }
146 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
147
hisi_sas_stop_phys(struct hisi_hba * hisi_hba)148 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
149 {
150 int phy_no;
151
152 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
153 hisi_sas_phy_enable(hisi_hba, phy_no, 0);
154 }
155 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
156
hisi_sas_slot_index_clear(struct hisi_hba * hisi_hba,int slot_idx)157 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
158 {
159 void *bitmap = hisi_hba->slot_index_tags;
160
161 clear_bit(slot_idx, bitmap);
162 }
163
hisi_sas_slot_index_free(struct hisi_hba * hisi_hba,int slot_idx)164 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
165 {
166 unsigned long flags;
167
168 if (hisi_hba->hw->slot_index_alloc ||
169 slot_idx >= HISI_SAS_UNRESERVED_IPTT) {
170 spin_lock_irqsave(&hisi_hba->lock, flags);
171 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
172 spin_unlock_irqrestore(&hisi_hba->lock, flags);
173 }
174 }
175
hisi_sas_slot_index_set(struct hisi_hba * hisi_hba,int slot_idx)176 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
177 {
178 void *bitmap = hisi_hba->slot_index_tags;
179
180 set_bit(slot_idx, bitmap);
181 }
182
hisi_sas_slot_index_alloc(struct hisi_hba * hisi_hba,struct scsi_cmnd * scsi_cmnd)183 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
184 struct scsi_cmnd *scsi_cmnd)
185 {
186 int index;
187 void *bitmap = hisi_hba->slot_index_tags;
188 unsigned long flags;
189
190 if (scsi_cmnd)
191 return scsi_cmnd->request->tag;
192
193 spin_lock_irqsave(&hisi_hba->lock, flags);
194 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
195 hisi_hba->last_slot_index + 1);
196 if (index >= hisi_hba->slot_index_count) {
197 index = find_next_zero_bit(bitmap,
198 hisi_hba->slot_index_count,
199 HISI_SAS_UNRESERVED_IPTT);
200 if (index >= hisi_hba->slot_index_count) {
201 spin_unlock_irqrestore(&hisi_hba->lock, flags);
202 return -SAS_QUEUE_FULL;
203 }
204 }
205 hisi_sas_slot_index_set(hisi_hba, index);
206 hisi_hba->last_slot_index = index;
207 spin_unlock_irqrestore(&hisi_hba->lock, flags);
208
209 return index;
210 }
211
hisi_sas_slot_index_init(struct hisi_hba * hisi_hba)212 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
213 {
214 int i;
215
216 for (i = 0; i < hisi_hba->slot_index_count; ++i)
217 hisi_sas_slot_index_clear(hisi_hba, i);
218 }
219
hisi_sas_slot_task_free(struct hisi_hba * hisi_hba,struct sas_task * task,struct hisi_sas_slot * slot)220 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
221 struct hisi_sas_slot *slot)
222 {
223 unsigned long flags;
224 int device_id = slot->device_id;
225 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id];
226
227 if (task) {
228 struct device *dev = hisi_hba->dev;
229
230 if (!task->lldd_task)
231 return;
232
233 task->lldd_task = NULL;
234
235 if (!sas_protocol_ata(task->task_proto)) {
236 struct sas_ssp_task *ssp_task = &task->ssp_task;
237 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
238
239 if (slot->n_elem)
240 dma_unmap_sg(dev, task->scatter,
241 task->num_scatter,
242 task->data_dir);
243 if (slot->n_elem_dif)
244 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
245 scsi_prot_sg_count(scsi_cmnd),
246 task->data_dir);
247 }
248 }
249
250 spin_lock_irqsave(&sas_dev->lock, flags);
251 list_del_init(&slot->entry);
252 spin_unlock_irqrestore(&sas_dev->lock, flags);
253
254 memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
255
256 hisi_sas_slot_index_free(hisi_hba, slot->idx);
257 }
258 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
259
hisi_sas_task_prep_smp(struct hisi_hba * hisi_hba,struct hisi_sas_slot * slot)260 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
261 struct hisi_sas_slot *slot)
262 {
263 hisi_hba->hw->prep_smp(hisi_hba, slot);
264 }
265
hisi_sas_task_prep_ssp(struct hisi_hba * hisi_hba,struct hisi_sas_slot * slot)266 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
267 struct hisi_sas_slot *slot)
268 {
269 hisi_hba->hw->prep_ssp(hisi_hba, slot);
270 }
271
hisi_sas_task_prep_ata(struct hisi_hba * hisi_hba,struct hisi_sas_slot * slot)272 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
273 struct hisi_sas_slot *slot)
274 {
275 hisi_hba->hw->prep_stp(hisi_hba, slot);
276 }
277
hisi_sas_task_prep_abort(struct hisi_hba * hisi_hba,struct hisi_sas_slot * slot,int device_id,int abort_flag,int tag_to_abort)278 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
279 struct hisi_sas_slot *slot,
280 int device_id, int abort_flag, int tag_to_abort)
281 {
282 hisi_hba->hw->prep_abort(hisi_hba, slot,
283 device_id, abort_flag, tag_to_abort);
284 }
285
hisi_sas_dma_unmap(struct hisi_hba * hisi_hba,struct sas_task * task,int n_elem,int n_elem_req)286 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
287 struct sas_task *task, int n_elem,
288 int n_elem_req)
289 {
290 struct device *dev = hisi_hba->dev;
291
292 if (!sas_protocol_ata(task->task_proto)) {
293 if (task->num_scatter) {
294 if (n_elem)
295 dma_unmap_sg(dev, task->scatter,
296 task->num_scatter,
297 task->data_dir);
298 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
299 if (n_elem_req)
300 dma_unmap_sg(dev, &task->smp_task.smp_req,
301 1, DMA_TO_DEVICE);
302 }
303 }
304 }
305
hisi_sas_dma_map(struct hisi_hba * hisi_hba,struct sas_task * task,int * n_elem,int * n_elem_req)306 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
307 struct sas_task *task, int *n_elem,
308 int *n_elem_req)
309 {
310 struct device *dev = hisi_hba->dev;
311 int rc;
312
313 if (sas_protocol_ata(task->task_proto)) {
314 *n_elem = task->num_scatter;
315 } else {
316 unsigned int req_len;
317
318 if (task->num_scatter) {
319 *n_elem = dma_map_sg(dev, task->scatter,
320 task->num_scatter, task->data_dir);
321 if (!*n_elem) {
322 rc = -ENOMEM;
323 goto prep_out;
324 }
325 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
326 *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
327 1, DMA_TO_DEVICE);
328 if (!*n_elem_req) {
329 rc = -ENOMEM;
330 goto prep_out;
331 }
332 req_len = sg_dma_len(&task->smp_task.smp_req);
333 if (req_len & 0x3) {
334 rc = -EINVAL;
335 goto err_out_dma_unmap;
336 }
337 }
338 }
339
340 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
341 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
342 *n_elem);
343 rc = -EINVAL;
344 goto err_out_dma_unmap;
345 }
346 return 0;
347
348 err_out_dma_unmap:
349 /* It would be better to call dma_unmap_sg() here, but it's messy */
350 hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
351 *n_elem_req);
352 prep_out:
353 return rc;
354 }
355
hisi_sas_dif_dma_unmap(struct hisi_hba * hisi_hba,struct sas_task * task,int n_elem_dif)356 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba,
357 struct sas_task *task, int n_elem_dif)
358 {
359 struct device *dev = hisi_hba->dev;
360
361 if (n_elem_dif) {
362 struct sas_ssp_task *ssp_task = &task->ssp_task;
363 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
364
365 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
366 scsi_prot_sg_count(scsi_cmnd),
367 task->data_dir);
368 }
369 }
370
hisi_sas_dif_dma_map(struct hisi_hba * hisi_hba,int * n_elem_dif,struct sas_task * task)371 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba,
372 int *n_elem_dif, struct sas_task *task)
373 {
374 struct device *dev = hisi_hba->dev;
375 struct sas_ssp_task *ssp_task;
376 struct scsi_cmnd *scsi_cmnd;
377 int rc;
378
379 if (task->num_scatter) {
380 ssp_task = &task->ssp_task;
381 scsi_cmnd = ssp_task->cmd;
382
383 if (scsi_prot_sg_count(scsi_cmnd)) {
384 *n_elem_dif = dma_map_sg(dev,
385 scsi_prot_sglist(scsi_cmnd),
386 scsi_prot_sg_count(scsi_cmnd),
387 task->data_dir);
388
389 if (!*n_elem_dif)
390 return -ENOMEM;
391
392 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) {
393 dev_err(dev, "task prep: n_elem_dif(%d) too large\n",
394 *n_elem_dif);
395 rc = -EINVAL;
396 goto err_out_dif_dma_unmap;
397 }
398 }
399 }
400
401 return 0;
402
403 err_out_dif_dma_unmap:
404 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
405 scsi_prot_sg_count(scsi_cmnd), task->data_dir);
406 return rc;
407 }
408
hisi_sas_task_prep(struct sas_task * task,struct hisi_sas_dq ** dq_pointer,bool is_tmf,struct hisi_sas_tmf_task * tmf,int * pass)409 static int hisi_sas_task_prep(struct sas_task *task,
410 struct hisi_sas_dq **dq_pointer,
411 bool is_tmf, struct hisi_sas_tmf_task *tmf,
412 int *pass)
413 {
414 struct domain_device *device = task->dev;
415 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
416 struct hisi_sas_device *sas_dev = device->lldd_dev;
417 struct hisi_sas_port *port;
418 struct hisi_sas_slot *slot;
419 struct hisi_sas_cmd_hdr *cmd_hdr_base;
420 struct asd_sas_port *sas_port = device->port;
421 struct device *dev = hisi_hba->dev;
422 int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
423 int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
424 struct hisi_sas_dq *dq;
425 unsigned long flags;
426 int wr_q_index;
427
428 if (DEV_IS_GONE(sas_dev)) {
429 if (sas_dev)
430 dev_info(dev, "task prep: device %d not ready\n",
431 sas_dev->device_id);
432 else
433 dev_info(dev, "task prep: device %016llx not ready\n",
434 SAS_ADDR(device->sas_addr));
435
436 return -ECOMM;
437 }
438
439 if (hisi_hba->reply_map) {
440 int cpu = raw_smp_processor_id();
441 unsigned int dq_index = hisi_hba->reply_map[cpu];
442
443 *dq_pointer = dq = &hisi_hba->dq[dq_index];
444 } else {
445 *dq_pointer = dq = sas_dev->dq;
446 }
447
448 port = to_hisi_sas_port(sas_port);
449 if (port && !port->port_attached) {
450 dev_info(dev, "task prep: %s port%d not attach device\n",
451 (dev_is_sata(device)) ?
452 "SATA/STP" : "SAS",
453 device->port->id);
454
455 return -ECOMM;
456 }
457
458 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
459 &n_elem_req);
460 if (rc < 0)
461 goto prep_out;
462
463 if (!sas_protocol_ata(task->task_proto)) {
464 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
465 if (rc < 0)
466 goto err_out_dma_unmap;
467 }
468
469 if (hisi_hba->hw->slot_index_alloc)
470 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
471 else {
472 struct scsi_cmnd *scsi_cmnd = NULL;
473
474 if (task->uldd_task) {
475 struct ata_queued_cmd *qc;
476
477 if (dev_is_sata(device)) {
478 qc = task->uldd_task;
479 scsi_cmnd = qc->scsicmd;
480 } else {
481 scsi_cmnd = task->uldd_task;
482 }
483 }
484 rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd);
485 }
486 if (rc < 0)
487 goto err_out_dif_dma_unmap;
488
489 slot_idx = rc;
490 slot = &hisi_hba->slot_info[slot_idx];
491
492 spin_lock_irqsave(&dq->lock, flags);
493 wr_q_index = dq->wr_point;
494 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
495 list_add_tail(&slot->delivery, &dq->list);
496 spin_unlock_irqrestore(&dq->lock, flags);
497 spin_lock_irqsave(&sas_dev->lock, flags);
498 list_add_tail(&slot->entry, &sas_dev->list);
499 spin_unlock_irqrestore(&sas_dev->lock, flags);
500
501 dlvry_queue = dq->id;
502 dlvry_queue_slot = wr_q_index;
503
504 slot->device_id = sas_dev->device_id;
505 slot->n_elem = n_elem;
506 slot->n_elem_dif = n_elem_dif;
507 slot->dlvry_queue = dlvry_queue;
508 slot->dlvry_queue_slot = dlvry_queue_slot;
509 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
510 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
511 slot->task = task;
512 slot->port = port;
513 slot->tmf = tmf;
514 slot->is_internal = is_tmf;
515 task->lldd_task = slot;
516
517 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
518 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
519 memset(hisi_sas_status_buf_addr_mem(slot), 0,
520 sizeof(struct hisi_sas_err_record));
521
522 switch (task->task_proto) {
523 case SAS_PROTOCOL_SMP:
524 hisi_sas_task_prep_smp(hisi_hba, slot);
525 break;
526 case SAS_PROTOCOL_SSP:
527 hisi_sas_task_prep_ssp(hisi_hba, slot);
528 break;
529 case SAS_PROTOCOL_SATA:
530 case SAS_PROTOCOL_STP:
531 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
532 hisi_sas_task_prep_ata(hisi_hba, slot);
533 break;
534 default:
535 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
536 task->task_proto);
537 break;
538 }
539
540 spin_lock_irqsave(&task->task_state_lock, flags);
541 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
542 spin_unlock_irqrestore(&task->task_state_lock, flags);
543
544 ++(*pass);
545 WRITE_ONCE(slot->ready, 1);
546
547 return 0;
548
549 err_out_dif_dma_unmap:
550 if (!sas_protocol_ata(task->task_proto))
551 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
552 err_out_dma_unmap:
553 hisi_sas_dma_unmap(hisi_hba, task, n_elem,
554 n_elem_req);
555 prep_out:
556 dev_err(dev, "task prep: failed[%d]!\n", rc);
557 return rc;
558 }
559
hisi_sas_task_exec(struct sas_task * task,gfp_t gfp_flags,bool is_tmf,struct hisi_sas_tmf_task * tmf)560 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
561 bool is_tmf, struct hisi_sas_tmf_task *tmf)
562 {
563 u32 rc;
564 u32 pass = 0;
565 unsigned long flags;
566 struct hisi_hba *hisi_hba;
567 struct device *dev;
568 struct domain_device *device = task->dev;
569 struct asd_sas_port *sas_port = device->port;
570 struct hisi_sas_dq *dq = NULL;
571
572 if (!sas_port) {
573 struct task_status_struct *ts = &task->task_status;
574
575 ts->resp = SAS_TASK_UNDELIVERED;
576 ts->stat = SAS_PHY_DOWN;
577 /*
578 * libsas will use dev->port, should
579 * not call task_done for sata
580 */
581 if (device->dev_type != SAS_SATA_DEV)
582 task->task_done(task);
583 return -ECOMM;
584 }
585
586 hisi_hba = dev_to_hisi_hba(device);
587 dev = hisi_hba->dev;
588
589 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
590 /*
591 * For IOs from upper layer, it may already disable preempt
592 * in the IO path, if disable preempt again in down(),
593 * function schedule() will report schedule_bug(), so check
594 * preemptible() before goto down().
595 */
596 if (!preemptible())
597 return -EINVAL;
598
599 down(&hisi_hba->sem);
600 up(&hisi_hba->sem);
601 }
602
603 /* protect task_prep and start_delivery sequence */
604 rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
605 if (rc)
606 dev_err(dev, "task exec: failed[%d]!\n", rc);
607
608 if (likely(pass)) {
609 spin_lock_irqsave(&dq->lock, flags);
610 hisi_hba->hw->start_delivery(dq);
611 spin_unlock_irqrestore(&dq->lock, flags);
612 }
613
614 return rc;
615 }
616
hisi_sas_bytes_dmaed(struct hisi_hba * hisi_hba,int phy_no)617 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
618 {
619 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
620 struct asd_sas_phy *sas_phy = &phy->sas_phy;
621 struct sas_ha_struct *sas_ha;
622
623 if (!phy->phy_attached)
624 return;
625
626 sas_ha = &hisi_hba->sha;
627 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
628
629 if (sas_phy->phy) {
630 struct sas_phy *sphy = sas_phy->phy;
631
632 sphy->negotiated_linkrate = sas_phy->linkrate;
633 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
634 sphy->maximum_linkrate_hw =
635 hisi_hba->hw->phy_get_max_linkrate();
636 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
637 sphy->minimum_linkrate = phy->minimum_linkrate;
638
639 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
640 sphy->maximum_linkrate = phy->maximum_linkrate;
641 }
642
643 if (phy->phy_type & PORT_TYPE_SAS) {
644 struct sas_identify_frame *id;
645
646 id = (struct sas_identify_frame *)phy->frame_rcvd;
647 id->dev_type = phy->identify.device_type;
648 id->initiator_bits = SAS_PROTOCOL_ALL;
649 id->target_bits = phy->identify.target_port_protocols;
650 } else if (phy->phy_type & PORT_TYPE_SATA) {
651 /* Nothing */
652 }
653
654 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
655 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
656 }
657
hisi_sas_alloc_dev(struct domain_device * device)658 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
659 {
660 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
661 struct hisi_sas_device *sas_dev = NULL;
662 unsigned long flags;
663 int last = hisi_hba->last_dev_id;
664 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
665 int i;
666
667 spin_lock_irqsave(&hisi_hba->lock, flags);
668 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
669 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
670 int queue = i % hisi_hba->queue_count;
671 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
672
673 hisi_hba->devices[i].device_id = i;
674 sas_dev = &hisi_hba->devices[i];
675 sas_dev->dev_status = HISI_SAS_DEV_INIT;
676 sas_dev->dev_type = device->dev_type;
677 sas_dev->hisi_hba = hisi_hba;
678 sas_dev->sas_device = device;
679 sas_dev->dq = dq;
680 spin_lock_init(&sas_dev->lock);
681 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
682 break;
683 }
684 i++;
685 }
686 hisi_hba->last_dev_id = i;
687 spin_unlock_irqrestore(&hisi_hba->lock, flags);
688
689 return sas_dev;
690 }
691
692 #define HISI_SAS_DISK_RECOVER_CNT 3
hisi_sas_init_device(struct domain_device * device)693 static int hisi_sas_init_device(struct domain_device *device)
694 {
695 int rc = TMF_RESP_FUNC_COMPLETE;
696 struct scsi_lun lun;
697 struct hisi_sas_tmf_task tmf_task;
698 int retry = HISI_SAS_DISK_RECOVER_CNT;
699 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
700 struct device *dev = hisi_hba->dev;
701 struct sas_phy *local_phy;
702
703 switch (device->dev_type) {
704 case SAS_END_DEVICE:
705 int_to_scsilun(0, &lun);
706
707 tmf_task.tmf = TMF_CLEAR_TASK_SET;
708 while (retry-- > 0) {
709 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
710 &tmf_task);
711 if (rc == TMF_RESP_FUNC_COMPLETE) {
712 hisi_sas_release_task(hisi_hba, device);
713 break;
714 }
715 }
716 break;
717 case SAS_SATA_DEV:
718 case SAS_SATA_PM:
719 case SAS_SATA_PM_PORT:
720 case SAS_SATA_PENDING:
721 /*
722 * send HARD RESET to clear previous affiliation of
723 * STP target port
724 */
725 local_phy = sas_get_local_phy(device);
726 if (!scsi_is_sas_phy_local(local_phy) &&
727 !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
728 unsigned long deadline = ata_deadline(jiffies, 20000);
729 struct sata_device *sata_dev = &device->sata_dev;
730 struct ata_host *ata_host = sata_dev->ata_host;
731 struct ata_port_operations *ops = ata_host->ops;
732 struct ata_port *ap = sata_dev->ap;
733 struct ata_link *link;
734 unsigned int classes;
735
736 ata_for_each_link(link, ap, EDGE)
737 rc = ops->hardreset(link, &classes,
738 deadline);
739 }
740 sas_put_local_phy(local_phy);
741 if (rc) {
742 dev_warn(dev, "SATA disk hardreset fail: %d\n", rc);
743 return rc;
744 }
745
746 while (retry-- > 0) {
747 rc = hisi_sas_softreset_ata_disk(device);
748 if (!rc)
749 break;
750 }
751 break;
752 default:
753 break;
754 }
755
756 return rc;
757 }
758
hisi_sas_dev_found(struct domain_device * device)759 static int hisi_sas_dev_found(struct domain_device *device)
760 {
761 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
762 struct domain_device *parent_dev = device->parent;
763 struct hisi_sas_device *sas_dev;
764 struct device *dev = hisi_hba->dev;
765 int rc;
766
767 if (hisi_hba->hw->alloc_dev)
768 sas_dev = hisi_hba->hw->alloc_dev(device);
769 else
770 sas_dev = hisi_sas_alloc_dev(device);
771 if (!sas_dev) {
772 dev_err(dev, "fail alloc dev: max support %d devices\n",
773 HISI_SAS_MAX_DEVICES);
774 return -EINVAL;
775 }
776
777 device->lldd_dev = sas_dev;
778 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
779
780 if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
781 int phy_no;
782 u8 phy_num = parent_dev->ex_dev.num_phys;
783 struct ex_phy *phy;
784
785 for (phy_no = 0; phy_no < phy_num; phy_no++) {
786 phy = &parent_dev->ex_dev.ex_phy[phy_no];
787 if (SAS_ADDR(phy->attached_sas_addr) ==
788 SAS_ADDR(device->sas_addr))
789 break;
790 }
791
792 if (phy_no == phy_num) {
793 dev_info(dev, "dev found: no attached "
794 "dev:%016llx at ex:%016llx\n",
795 SAS_ADDR(device->sas_addr),
796 SAS_ADDR(parent_dev->sas_addr));
797 rc = -EINVAL;
798 goto err_out;
799 }
800 }
801
802 dev_info(dev, "dev[%d:%x] found\n",
803 sas_dev->device_id, sas_dev->dev_type);
804
805 rc = hisi_sas_init_device(device);
806 if (rc)
807 goto err_out;
808 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
809 return 0;
810
811 err_out:
812 hisi_sas_dev_gone(device);
813 return rc;
814 }
815
hisi_sas_slave_configure(struct scsi_device * sdev)816 int hisi_sas_slave_configure(struct scsi_device *sdev)
817 {
818 struct domain_device *dev = sdev_to_domain_dev(sdev);
819 int ret = sas_slave_configure(sdev);
820
821 if (ret)
822 return ret;
823 if (!dev_is_sata(dev))
824 sas_change_queue_depth(sdev, 64);
825
826 return 0;
827 }
828 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
829
hisi_sas_scan_start(struct Scsi_Host * shost)830 void hisi_sas_scan_start(struct Scsi_Host *shost)
831 {
832 struct hisi_hba *hisi_hba = shost_priv(shost);
833
834 hisi_hba->hw->phys_init(hisi_hba);
835 }
836 EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
837
hisi_sas_scan_finished(struct Scsi_Host * shost,unsigned long time)838 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
839 {
840 struct hisi_hba *hisi_hba = shost_priv(shost);
841 struct sas_ha_struct *sha = &hisi_hba->sha;
842
843 /* Wait for PHY up interrupt to occur */
844 if (time < HZ)
845 return 0;
846
847 sas_drain_work(sha);
848 return 1;
849 }
850 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
851
hisi_sas_phyup_work(struct work_struct * work)852 static void hisi_sas_phyup_work(struct work_struct *work)
853 {
854 struct hisi_sas_phy *phy =
855 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
856 struct hisi_hba *hisi_hba = phy->hisi_hba;
857 struct asd_sas_phy *sas_phy = &phy->sas_phy;
858 int phy_no = sas_phy->id;
859
860 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
861 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
862 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
863 }
864
hisi_sas_linkreset_work(struct work_struct * work)865 static void hisi_sas_linkreset_work(struct work_struct *work)
866 {
867 struct hisi_sas_phy *phy =
868 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
869 struct asd_sas_phy *sas_phy = &phy->sas_phy;
870
871 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
872 }
873
874 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
875 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
876 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
877 };
878
hisi_sas_notify_phy_event(struct hisi_sas_phy * phy,enum hisi_sas_phy_event event)879 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
880 enum hisi_sas_phy_event event)
881 {
882 struct hisi_hba *hisi_hba = phy->hisi_hba;
883
884 if (WARN_ON(event >= HISI_PHYES_NUM))
885 return false;
886
887 return queue_work(hisi_hba->wq, &phy->works[event]);
888 }
889 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
890
hisi_sas_wait_phyup_timedout(struct timer_list * t)891 static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
892 {
893 struct hisi_sas_phy *phy = from_timer(phy, t, timer);
894 struct hisi_hba *hisi_hba = phy->hisi_hba;
895 struct device *dev = hisi_hba->dev;
896 int phy_no = phy->sas_phy.id;
897
898 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no);
899 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
900 }
901
hisi_sas_phy_oob_ready(struct hisi_hba * hisi_hba,int phy_no)902 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
903 {
904 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
905 struct device *dev = hisi_hba->dev;
906
907 if (!timer_pending(&phy->timer)) {
908 dev_dbg(dev, "phy%d OOB ready\n", phy_no);
909 phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ;
910 add_timer(&phy->timer);
911 }
912 }
913 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
914
hisi_sas_phy_init(struct hisi_hba * hisi_hba,int phy_no)915 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
916 {
917 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
918 struct asd_sas_phy *sas_phy = &phy->sas_phy;
919 int i;
920
921 phy->hisi_hba = hisi_hba;
922 phy->port = NULL;
923 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
924 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
925 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
926 sas_phy->class = SAS;
927 sas_phy->iproto = SAS_PROTOCOL_ALL;
928 sas_phy->tproto = 0;
929 sas_phy->type = PHY_TYPE_PHYSICAL;
930 sas_phy->role = PHY_ROLE_INITIATOR;
931 sas_phy->oob_mode = OOB_NOT_CONNECTED;
932 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
933 sas_phy->id = phy_no;
934 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
935 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
936 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
937 sas_phy->lldd_phy = phy;
938
939 for (i = 0; i < HISI_PHYES_NUM; i++)
940 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
941
942 spin_lock_init(&phy->lock);
943
944 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0);
945 }
946
947 /* Wrapper to ensure we track hisi_sas_phy.enable properly */
hisi_sas_phy_enable(struct hisi_hba * hisi_hba,int phy_no,int enable)948 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable)
949 {
950 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
951 struct asd_sas_phy *aphy = &phy->sas_phy;
952 struct sas_phy *sphy = aphy->phy;
953 unsigned long flags;
954
955 spin_lock_irqsave(&phy->lock, flags);
956
957 if (enable) {
958 /* We may have been enabled already; if so, don't touch */
959 if (!phy->enable)
960 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
961 hisi_hba->hw->phy_start(hisi_hba, phy_no);
962 } else {
963 sphy->negotiated_linkrate = SAS_PHY_DISABLED;
964 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
965 }
966 phy->enable = enable;
967 spin_unlock_irqrestore(&phy->lock, flags);
968 }
969 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable);
970
hisi_sas_port_notify_formed(struct asd_sas_phy * sas_phy)971 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
972 {
973 struct sas_ha_struct *sas_ha = sas_phy->ha;
974 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
975 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
976 struct asd_sas_port *sas_port = sas_phy->port;
977 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
978 unsigned long flags;
979
980 if (!sas_port)
981 return;
982
983 spin_lock_irqsave(&hisi_hba->lock, flags);
984 port->port_attached = 1;
985 port->id = phy->port_id;
986 phy->port = port;
987 sas_port->lldd_port = port;
988 spin_unlock_irqrestore(&hisi_hba->lock, flags);
989 }
990
hisi_sas_do_release_task(struct hisi_hba * hisi_hba,struct sas_task * task,struct hisi_sas_slot * slot)991 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
992 struct hisi_sas_slot *slot)
993 {
994 if (task) {
995 unsigned long flags;
996 struct task_status_struct *ts;
997
998 ts = &task->task_status;
999
1000 ts->resp = SAS_TASK_COMPLETE;
1001 ts->stat = SAS_ABORTED_TASK;
1002 spin_lock_irqsave(&task->task_state_lock, flags);
1003 task->task_state_flags &=
1004 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1005 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
1006 task->task_state_flags |= SAS_TASK_STATE_DONE;
1007 spin_unlock_irqrestore(&task->task_state_lock, flags);
1008 }
1009
1010 hisi_sas_slot_task_free(hisi_hba, task, slot);
1011 }
1012
hisi_sas_release_task(struct hisi_hba * hisi_hba,struct domain_device * device)1013 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
1014 struct domain_device *device)
1015 {
1016 struct hisi_sas_slot *slot, *slot2;
1017 struct hisi_sas_device *sas_dev = device->lldd_dev;
1018
1019 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
1020 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
1021 }
1022
hisi_sas_release_tasks(struct hisi_hba * hisi_hba)1023 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
1024 {
1025 struct hisi_sas_device *sas_dev;
1026 struct domain_device *device;
1027 int i;
1028
1029 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1030 sas_dev = &hisi_hba->devices[i];
1031 device = sas_dev->sas_device;
1032
1033 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
1034 !device)
1035 continue;
1036
1037 hisi_sas_release_task(hisi_hba, device);
1038 }
1039 }
1040 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
1041
hisi_sas_dereg_device(struct hisi_hba * hisi_hba,struct domain_device * device)1042 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
1043 struct domain_device *device)
1044 {
1045 if (hisi_hba->hw->dereg_device)
1046 hisi_hba->hw->dereg_device(hisi_hba, device);
1047 }
1048
hisi_sas_dev_gone(struct domain_device * device)1049 static void hisi_sas_dev_gone(struct domain_device *device)
1050 {
1051 struct hisi_sas_device *sas_dev = device->lldd_dev;
1052 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1053 struct device *dev = hisi_hba->dev;
1054
1055 dev_info(dev, "dev[%d:%x] is gone\n",
1056 sas_dev->device_id, sas_dev->dev_type);
1057
1058 down(&hisi_hba->sem);
1059 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
1060 hisi_sas_internal_task_abort(hisi_hba, device,
1061 HISI_SAS_INT_ABT_DEV, 0);
1062
1063 hisi_sas_dereg_device(hisi_hba, device);
1064
1065 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
1066 device->lldd_dev = NULL;
1067 }
1068
1069 if (hisi_hba->hw->free_device)
1070 hisi_hba->hw->free_device(sas_dev);
1071 sas_dev->dev_type = SAS_PHY_UNUSED;
1072 sas_dev->sas_device = NULL;
1073 up(&hisi_hba->sem);
1074 }
1075
hisi_sas_queue_command(struct sas_task * task,gfp_t gfp_flags)1076 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
1077 {
1078 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
1079 }
1080
hisi_sas_phy_set_linkrate(struct hisi_hba * hisi_hba,int phy_no,struct sas_phy_linkrates * r)1081 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
1082 struct sas_phy_linkrates *r)
1083 {
1084 struct sas_phy_linkrates _r;
1085
1086 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1087 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1088 enum sas_linkrate min, max;
1089
1090 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS)
1091 return -EINVAL;
1092
1093 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1094 max = sas_phy->phy->maximum_linkrate;
1095 min = r->minimum_linkrate;
1096 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1097 max = r->maximum_linkrate;
1098 min = sas_phy->phy->minimum_linkrate;
1099 } else
1100 return -EINVAL;
1101
1102 _r.maximum_linkrate = max;
1103 _r.minimum_linkrate = min;
1104
1105 sas_phy->phy->maximum_linkrate = max;
1106 sas_phy->phy->minimum_linkrate = min;
1107
1108 hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1109 msleep(100);
1110 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
1111 hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1112
1113 return 0;
1114 }
1115
hisi_sas_control_phy(struct asd_sas_phy * sas_phy,enum phy_func func,void * funcdata)1116 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
1117 void *funcdata)
1118 {
1119 struct sas_ha_struct *sas_ha = sas_phy->ha;
1120 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1121 int phy_no = sas_phy->id;
1122
1123 switch (func) {
1124 case PHY_FUNC_HARD_RESET:
1125 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
1126 break;
1127
1128 case PHY_FUNC_LINK_RESET:
1129 hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1130 msleep(100);
1131 hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1132 break;
1133
1134 case PHY_FUNC_DISABLE:
1135 hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1136 break;
1137
1138 case PHY_FUNC_SET_LINK_RATE:
1139 return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
1140 case PHY_FUNC_GET_EVENTS:
1141 if (hisi_hba->hw->get_events) {
1142 hisi_hba->hw->get_events(hisi_hba, phy_no);
1143 break;
1144 }
1145 /* fallthru */
1146 case PHY_FUNC_RELEASE_SPINUP_HOLD:
1147 default:
1148 return -EOPNOTSUPP;
1149 }
1150 return 0;
1151 }
1152
hisi_sas_task_done(struct sas_task * task)1153 static void hisi_sas_task_done(struct sas_task *task)
1154 {
1155 del_timer(&task->slow_task->timer);
1156 complete(&task->slow_task->completion);
1157 }
1158
hisi_sas_tmf_timedout(struct timer_list * t)1159 static void hisi_sas_tmf_timedout(struct timer_list *t)
1160 {
1161 struct sas_task_slow *slow = from_timer(slow, t, timer);
1162 struct sas_task *task = slow->task;
1163 unsigned long flags;
1164 bool is_completed = true;
1165
1166 spin_lock_irqsave(&task->task_state_lock, flags);
1167 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1168 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1169 is_completed = false;
1170 }
1171 spin_unlock_irqrestore(&task->task_state_lock, flags);
1172
1173 if (!is_completed)
1174 complete(&task->slow_task->completion);
1175 }
1176
1177 #define TASK_TIMEOUT 20
1178 #define TASK_RETRY 3
1179 #define INTERNAL_ABORT_TIMEOUT 6
hisi_sas_exec_internal_tmf_task(struct domain_device * device,void * parameter,u32 para_len,struct hisi_sas_tmf_task * tmf)1180 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
1181 void *parameter, u32 para_len,
1182 struct hisi_sas_tmf_task *tmf)
1183 {
1184 struct hisi_sas_device *sas_dev = device->lldd_dev;
1185 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
1186 struct device *dev = hisi_hba->dev;
1187 struct sas_task *task;
1188 int res, retry;
1189
1190 for (retry = 0; retry < TASK_RETRY; retry++) {
1191 task = sas_alloc_slow_task(GFP_KERNEL);
1192 if (!task)
1193 return -ENOMEM;
1194
1195 task->dev = device;
1196 task->task_proto = device->tproto;
1197
1198 if (dev_is_sata(device)) {
1199 task->ata_task.device_control_reg_update = 1;
1200 memcpy(&task->ata_task.fis, parameter, para_len);
1201 } else {
1202 memcpy(&task->ssp_task, parameter, para_len);
1203 }
1204 task->task_done = hisi_sas_task_done;
1205
1206 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1207 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ;
1208 add_timer(&task->slow_task->timer);
1209
1210 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
1211
1212 if (res) {
1213 del_timer(&task->slow_task->timer);
1214 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
1215 res);
1216 goto ex_err;
1217 }
1218
1219 wait_for_completion(&task->slow_task->completion);
1220 res = TMF_RESP_FUNC_FAILED;
1221 /* Even TMF timed out, return direct. */
1222 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1223 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1224 struct hisi_sas_slot *slot = task->lldd_task;
1225
1226 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
1227 if (slot) {
1228 struct hisi_sas_cq *cq =
1229 &hisi_hba->cq[slot->dlvry_queue];
1230 /*
1231 * flush tasklet to avoid free'ing task
1232 * before using task in IO completion
1233 */
1234 tasklet_kill(&cq->tasklet);
1235 slot->task = NULL;
1236 }
1237
1238 goto ex_err;
1239 } else
1240 dev_err(dev, "abort tmf: TMF task timeout\n");
1241 }
1242
1243 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1244 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1245 res = TMF_RESP_FUNC_COMPLETE;
1246 break;
1247 }
1248
1249 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1250 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1251 res = TMF_RESP_FUNC_SUCC;
1252 break;
1253 }
1254
1255 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1256 task->task_status.stat == SAS_DATA_UNDERRUN) {
1257 /* no error, but return the number of bytes of
1258 * underrun
1259 */
1260 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
1261 SAS_ADDR(device->sas_addr),
1262 task->task_status.resp,
1263 task->task_status.stat);
1264 res = task->task_status.residual;
1265 break;
1266 }
1267
1268 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1269 task->task_status.stat == SAS_DATA_OVERRUN) {
1270 dev_warn(dev, "abort tmf: blocked task error\n");
1271 res = -EMSGSIZE;
1272 break;
1273 }
1274
1275 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1276 task->task_status.stat == SAS_OPEN_REJECT) {
1277 dev_warn(dev, "abort tmf: open reject failed\n");
1278 res = -EIO;
1279 } else {
1280 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n",
1281 SAS_ADDR(device->sas_addr),
1282 task->task_status.resp,
1283 task->task_status.stat);
1284 }
1285 sas_free_task(task);
1286 task = NULL;
1287 }
1288 ex_err:
1289 if (retry == TASK_RETRY)
1290 dev_warn(dev, "abort tmf: executing internal task failed!\n");
1291 sas_free_task(task);
1292 return res;
1293 }
1294
hisi_sas_fill_ata_reset_cmd(struct ata_device * dev,bool reset,int pmp,u8 * fis)1295 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1296 bool reset, int pmp, u8 *fis)
1297 {
1298 struct ata_taskfile tf;
1299
1300 ata_tf_init(dev, &tf);
1301 if (reset)
1302 tf.ctl |= ATA_SRST;
1303 else
1304 tf.ctl &= ~ATA_SRST;
1305 tf.command = ATA_CMD_DEV_RESET;
1306 ata_tf_to_fis(&tf, pmp, 0, fis);
1307 }
1308
hisi_sas_softreset_ata_disk(struct domain_device * device)1309 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1310 {
1311 u8 fis[20] = {0};
1312 struct ata_port *ap = device->sata_dev.ap;
1313 struct ata_link *link;
1314 int rc = TMF_RESP_FUNC_FAILED;
1315 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1316 struct device *dev = hisi_hba->dev;
1317 int s = sizeof(struct host_to_dev_fis);
1318
1319 ata_for_each_link(link, ap, EDGE) {
1320 int pmp = sata_srst_pmp(link);
1321
1322 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1323 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1324 if (rc != TMF_RESP_FUNC_COMPLETE)
1325 break;
1326 }
1327
1328 if (rc == TMF_RESP_FUNC_COMPLETE) {
1329 ata_for_each_link(link, ap, EDGE) {
1330 int pmp = sata_srst_pmp(link);
1331
1332 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1333 rc = hisi_sas_exec_internal_tmf_task(device, fis,
1334 s, NULL);
1335 if (rc != TMF_RESP_FUNC_COMPLETE)
1336 dev_err(dev, "ata disk de-reset failed\n");
1337 }
1338 } else {
1339 dev_err(dev, "ata disk reset failed\n");
1340 }
1341
1342 if (rc == TMF_RESP_FUNC_COMPLETE)
1343 hisi_sas_release_task(hisi_hba, device);
1344
1345 return rc;
1346 }
1347
hisi_sas_debug_issue_ssp_tmf(struct domain_device * device,u8 * lun,struct hisi_sas_tmf_task * tmf)1348 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1349 u8 *lun, struct hisi_sas_tmf_task *tmf)
1350 {
1351 struct sas_ssp_task ssp_task;
1352
1353 if (!(device->tproto & SAS_PROTOCOL_SSP))
1354 return TMF_RESP_FUNC_ESUPP;
1355
1356 memcpy(ssp_task.LUN, lun, 8);
1357
1358 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1359 sizeof(ssp_task), tmf);
1360 }
1361
hisi_sas_refresh_port_id(struct hisi_hba * hisi_hba)1362 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1363 {
1364 u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1365 int i;
1366
1367 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1368 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1369 struct domain_device *device = sas_dev->sas_device;
1370 struct asd_sas_port *sas_port;
1371 struct hisi_sas_port *port;
1372 struct hisi_sas_phy *phy = NULL;
1373 struct asd_sas_phy *sas_phy;
1374
1375 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1376 || !device || !device->port)
1377 continue;
1378
1379 sas_port = device->port;
1380 port = to_hisi_sas_port(sas_port);
1381
1382 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1383 if (state & BIT(sas_phy->id)) {
1384 phy = sas_phy->lldd_phy;
1385 break;
1386 }
1387
1388 if (phy) {
1389 port->id = phy->port_id;
1390
1391 /* Update linkrate of directly attached device. */
1392 if (!device->parent)
1393 device->linkrate = phy->sas_phy.linkrate;
1394
1395 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1396 } else
1397 port->id = 0xff;
1398 }
1399 }
1400
hisi_sas_rescan_topology(struct hisi_hba * hisi_hba,u32 state)1401 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
1402 {
1403 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1404 struct asd_sas_port *_sas_port = NULL;
1405 int phy_no;
1406
1407 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1408 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1409 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1410 struct asd_sas_port *sas_port = sas_phy->port;
1411 bool do_port_check = !!(_sas_port != sas_port);
1412
1413 if (!sas_phy->phy->enabled)
1414 continue;
1415
1416 /* Report PHY state change to libsas */
1417 if (state & BIT(phy_no)) {
1418 if (do_port_check && sas_port && sas_port->port_dev) {
1419 struct domain_device *dev = sas_port->port_dev;
1420
1421 _sas_port = sas_port;
1422
1423 if (dev_is_expander(dev->dev_type))
1424 sas_ha->notify_port_event(sas_phy,
1425 PORTE_BROADCAST_RCVD);
1426 }
1427 } else {
1428 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1429 }
1430
1431 }
1432 }
1433
hisi_sas_reset_init_all_devices(struct hisi_hba * hisi_hba)1434 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
1435 {
1436 struct hisi_sas_device *sas_dev;
1437 struct domain_device *device;
1438 int i;
1439
1440 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1441 sas_dev = &hisi_hba->devices[i];
1442 device = sas_dev->sas_device;
1443
1444 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1445 continue;
1446
1447 hisi_sas_init_device(device);
1448 }
1449 }
1450
hisi_sas_send_ata_reset_each_phy(struct hisi_hba * hisi_hba,struct asd_sas_port * sas_port,struct domain_device * device)1451 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
1452 struct asd_sas_port *sas_port,
1453 struct domain_device *device)
1454 {
1455 struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 };
1456 struct ata_port *ap = device->sata_dev.ap;
1457 struct device *dev = hisi_hba->dev;
1458 int s = sizeof(struct host_to_dev_fis);
1459 int rc = TMF_RESP_FUNC_FAILED;
1460 struct asd_sas_phy *sas_phy;
1461 struct ata_link *link;
1462 u8 fis[20] = {0};
1463 u32 state;
1464
1465 state = hisi_hba->hw->get_phys_state(hisi_hba);
1466 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
1467 if (!(state & BIT(sas_phy->id)))
1468 continue;
1469
1470 ata_for_each_link(link, ap, EDGE) {
1471 int pmp = sata_srst_pmp(link);
1472
1473 tmf_task.phy_id = sas_phy->id;
1474 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1475 rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
1476 &tmf_task);
1477 if (rc != TMF_RESP_FUNC_COMPLETE) {
1478 dev_err(dev, "phy%d ata reset failed rc=%d\n",
1479 sas_phy->id, rc);
1480 break;
1481 }
1482 }
1483 }
1484 }
1485
hisi_sas_terminate_stp_reject(struct hisi_hba * hisi_hba)1486 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
1487 {
1488 struct device *dev = hisi_hba->dev;
1489 int port_no, rc, i;
1490
1491 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1492 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1493 struct domain_device *device = sas_dev->sas_device;
1494
1495 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1496 continue;
1497
1498 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1499 HISI_SAS_INT_ABT_DEV, 0);
1500 if (rc < 0)
1501 dev_err(dev, "STP reject: abort dev failed %d\n", rc);
1502 }
1503
1504 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
1505 struct hisi_sas_port *port = &hisi_hba->port[port_no];
1506 struct asd_sas_port *sas_port = &port->sas_port;
1507 struct domain_device *port_dev = sas_port->port_dev;
1508 struct domain_device *device;
1509
1510 if (!port_dev || !dev_is_expander(port_dev->dev_type))
1511 continue;
1512
1513 /* Try to find a SATA device */
1514 list_for_each_entry(device, &sas_port->dev_list,
1515 dev_list_node) {
1516 if (dev_is_sata(device)) {
1517 hisi_sas_send_ata_reset_each_phy(hisi_hba,
1518 sas_port,
1519 device);
1520 break;
1521 }
1522 }
1523 }
1524 }
1525
hisi_sas_controller_reset_prepare(struct hisi_hba * hisi_hba)1526 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
1527 {
1528 struct Scsi_Host *shost = hisi_hba->shost;
1529
1530 down(&hisi_hba->sem);
1531 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
1532
1533 scsi_block_requests(shost);
1534 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
1535
1536 if (timer_pending(&hisi_hba->timer))
1537 del_timer_sync(&hisi_hba->timer);
1538
1539 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1540 }
1541 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
1542
hisi_sas_controller_reset_done(struct hisi_hba * hisi_hba)1543 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
1544 {
1545 struct Scsi_Host *shost = hisi_hba->shost;
1546 u32 state;
1547
1548 /* Init and wait for PHYs to come up and all libsas event finished. */
1549 hisi_hba->hw->phys_init(hisi_hba);
1550 msleep(1000);
1551 hisi_sas_refresh_port_id(hisi_hba);
1552 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1553
1554 if (hisi_hba->reject_stp_links_msk)
1555 hisi_sas_terminate_stp_reject(hisi_hba);
1556 hisi_sas_reset_init_all_devices(hisi_hba);
1557 up(&hisi_hba->sem);
1558 scsi_unblock_requests(shost);
1559 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1560
1561 state = hisi_hba->hw->get_phys_state(hisi_hba);
1562 hisi_sas_rescan_topology(hisi_hba, state);
1563 }
1564 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
1565
hisi_sas_controller_reset(struct hisi_hba * hisi_hba)1566 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1567 {
1568 struct device *dev = hisi_hba->dev;
1569 struct Scsi_Host *shost = hisi_hba->shost;
1570 int rc;
1571
1572 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct)
1573 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
1574
1575 if (!hisi_hba->hw->soft_reset)
1576 return -1;
1577
1578 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1579 return -1;
1580
1581 dev_info(dev, "controller resetting...\n");
1582 hisi_sas_controller_reset_prepare(hisi_hba);
1583
1584 rc = hisi_hba->hw->soft_reset(hisi_hba);
1585 if (rc) {
1586 dev_warn(dev, "controller reset failed (%d)\n", rc);
1587 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1588 up(&hisi_hba->sem);
1589 scsi_unblock_requests(shost);
1590 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1591 return rc;
1592 }
1593
1594 hisi_sas_controller_reset_done(hisi_hba);
1595 dev_info(dev, "controller reset complete\n");
1596
1597 return 0;
1598 }
1599
hisi_sas_abort_task(struct sas_task * task)1600 static int hisi_sas_abort_task(struct sas_task *task)
1601 {
1602 struct scsi_lun lun;
1603 struct hisi_sas_tmf_task tmf_task;
1604 struct domain_device *device = task->dev;
1605 struct hisi_sas_device *sas_dev = device->lldd_dev;
1606 struct hisi_hba *hisi_hba;
1607 struct device *dev;
1608 int rc = TMF_RESP_FUNC_FAILED;
1609 unsigned long flags;
1610
1611 if (!sas_dev)
1612 return TMF_RESP_FUNC_FAILED;
1613
1614 hisi_hba = dev_to_hisi_hba(task->dev);
1615 dev = hisi_hba->dev;
1616
1617 spin_lock_irqsave(&task->task_state_lock, flags);
1618 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1619 struct hisi_sas_slot *slot = task->lldd_task;
1620 struct hisi_sas_cq *cq;
1621
1622 if (slot) {
1623 /*
1624 * flush tasklet to avoid free'ing task
1625 * before using task in IO completion
1626 */
1627 cq = &hisi_hba->cq[slot->dlvry_queue];
1628 tasklet_kill(&cq->tasklet);
1629 }
1630 spin_unlock_irqrestore(&task->task_state_lock, flags);
1631 rc = TMF_RESP_FUNC_COMPLETE;
1632 goto out;
1633 }
1634 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1635 spin_unlock_irqrestore(&task->task_state_lock, flags);
1636
1637 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1638 struct scsi_cmnd *cmnd = task->uldd_task;
1639 struct hisi_sas_slot *slot = task->lldd_task;
1640 u16 tag = slot->idx;
1641 int rc2;
1642
1643 int_to_scsilun(cmnd->device->lun, &lun);
1644 tmf_task.tmf = TMF_ABORT_TASK;
1645 tmf_task.tag_of_task_to_be_managed = tag;
1646
1647 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1648 &tmf_task);
1649
1650 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1651 HISI_SAS_INT_ABT_CMD, tag);
1652 if (rc2 < 0) {
1653 dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1654 return TMF_RESP_FUNC_FAILED;
1655 }
1656
1657 /*
1658 * If the TMF finds that the IO is not in the device and also
1659 * the internal abort does not succeed, then it is safe to
1660 * free the slot.
1661 * Note: if the internal abort succeeds then the slot
1662 * will have already been completed
1663 */
1664 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1665 if (task->lldd_task)
1666 hisi_sas_do_release_task(hisi_hba, task, slot);
1667 }
1668 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1669 task->task_proto & SAS_PROTOCOL_STP) {
1670 if (task->dev->dev_type == SAS_SATA_DEV) {
1671 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1672 HISI_SAS_INT_ABT_DEV,
1673 0);
1674 if (rc < 0) {
1675 dev_err(dev, "abort task: internal abort failed\n");
1676 goto out;
1677 }
1678 hisi_sas_dereg_device(hisi_hba, device);
1679 rc = hisi_sas_softreset_ata_disk(device);
1680 }
1681 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1682 /* SMP */
1683 struct hisi_sas_slot *slot = task->lldd_task;
1684 u32 tag = slot->idx;
1685 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
1686
1687 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1688 HISI_SAS_INT_ABT_CMD, tag);
1689 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1690 task->lldd_task) {
1691 /*
1692 * flush tasklet to avoid free'ing task
1693 * before using task in IO completion
1694 */
1695 tasklet_kill(&cq->tasklet);
1696 slot->task = NULL;
1697 }
1698 }
1699
1700 out:
1701 if (rc != TMF_RESP_FUNC_COMPLETE)
1702 dev_notice(dev, "abort task: rc=%d\n", rc);
1703 return rc;
1704 }
1705
hisi_sas_abort_task_set(struct domain_device * device,u8 * lun)1706 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1707 {
1708 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1709 struct device *dev = hisi_hba->dev;
1710 struct hisi_sas_tmf_task tmf_task;
1711 int rc;
1712
1713 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1714 HISI_SAS_INT_ABT_DEV, 0);
1715 if (rc < 0) {
1716 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1717 return TMF_RESP_FUNC_FAILED;
1718 }
1719 hisi_sas_dereg_device(hisi_hba, device);
1720
1721 tmf_task.tmf = TMF_ABORT_TASK_SET;
1722 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1723
1724 if (rc == TMF_RESP_FUNC_COMPLETE)
1725 hisi_sas_release_task(hisi_hba, device);
1726
1727 return rc;
1728 }
1729
hisi_sas_clear_aca(struct domain_device * device,u8 * lun)1730 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1731 {
1732 struct hisi_sas_tmf_task tmf_task;
1733 int rc;
1734
1735 tmf_task.tmf = TMF_CLEAR_ACA;
1736 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1737
1738 return rc;
1739 }
1740
hisi_sas_debug_I_T_nexus_reset(struct domain_device * device)1741 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1742 {
1743 struct sas_phy *local_phy = sas_get_local_phy(device);
1744 struct hisi_sas_device *sas_dev = device->lldd_dev;
1745 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1746 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1747 DECLARE_COMPLETION_ONSTACK(phyreset);
1748 int rc, reset_type;
1749
1750 if (!local_phy->enabled) {
1751 sas_put_local_phy(local_phy);
1752 return -ENODEV;
1753 }
1754
1755 if (scsi_is_sas_phy_local(local_phy)) {
1756 struct asd_sas_phy *sas_phy =
1757 sas_ha->sas_phy[local_phy->number];
1758 struct hisi_sas_phy *phy =
1759 container_of(sas_phy, struct hisi_sas_phy, sas_phy);
1760 phy->in_reset = 1;
1761 phy->reset_completion = &phyreset;
1762 }
1763
1764 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
1765 !dev_is_sata(device)) ? true : false;
1766
1767 rc = sas_phy_reset(local_phy, reset_type);
1768 sas_put_local_phy(local_phy);
1769
1770 if (scsi_is_sas_phy_local(local_phy)) {
1771 struct asd_sas_phy *sas_phy =
1772 sas_ha->sas_phy[local_phy->number];
1773 struct hisi_sas_phy *phy =
1774 container_of(sas_phy, struct hisi_sas_phy, sas_phy);
1775 int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
1776 unsigned long flags;
1777
1778 spin_lock_irqsave(&phy->lock, flags);
1779 phy->reset_completion = NULL;
1780 phy->in_reset = 0;
1781 spin_unlock_irqrestore(&phy->lock, flags);
1782
1783 /* report PHY down if timed out */
1784 if (!ret)
1785 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
1786 } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) {
1787 /*
1788 * If in init state, we rely on caller to wait for link to be
1789 * ready; otherwise, except phy reset is fail, delay.
1790 */
1791 if (!rc)
1792 msleep(2000);
1793 }
1794
1795 return rc;
1796 }
1797
hisi_sas_I_T_nexus_reset(struct domain_device * device)1798 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1799 {
1800 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1801 struct device *dev = hisi_hba->dev;
1802 int rc;
1803
1804 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1805 HISI_SAS_INT_ABT_DEV, 0);
1806 if (rc < 0) {
1807 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1808 return TMF_RESP_FUNC_FAILED;
1809 }
1810 hisi_sas_dereg_device(hisi_hba, device);
1811
1812 if (dev_is_sata(device)) {
1813 rc = hisi_sas_softreset_ata_disk(device);
1814 if (rc == TMF_RESP_FUNC_FAILED)
1815 return TMF_RESP_FUNC_FAILED;
1816 }
1817
1818 rc = hisi_sas_debug_I_T_nexus_reset(device);
1819
1820 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1821 hisi_sas_release_task(hisi_hba, device);
1822
1823 return rc;
1824 }
1825
hisi_sas_lu_reset(struct domain_device * device,u8 * lun)1826 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1827 {
1828 struct hisi_sas_device *sas_dev = device->lldd_dev;
1829 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1830 struct device *dev = hisi_hba->dev;
1831 int rc = TMF_RESP_FUNC_FAILED;
1832
1833 /* Clear internal IO and then lu reset */
1834 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1835 HISI_SAS_INT_ABT_DEV, 0);
1836 if (rc < 0) {
1837 dev_err(dev, "lu_reset: internal abort failed\n");
1838 goto out;
1839 }
1840 hisi_sas_dereg_device(hisi_hba, device);
1841
1842 if (dev_is_sata(device)) {
1843 struct sas_phy *phy;
1844
1845 phy = sas_get_local_phy(device);
1846
1847 rc = sas_phy_reset(phy, true);
1848
1849 if (rc == 0)
1850 hisi_sas_release_task(hisi_hba, device);
1851 sas_put_local_phy(phy);
1852 } else {
1853 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1854
1855 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1856 if (rc == TMF_RESP_FUNC_COMPLETE)
1857 hisi_sas_release_task(hisi_hba, device);
1858 }
1859 out:
1860 if (rc != TMF_RESP_FUNC_COMPLETE)
1861 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1862 sas_dev->device_id, rc);
1863 return rc;
1864 }
1865
hisi_sas_clear_nexus_ha(struct sas_ha_struct * sas_ha)1866 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1867 {
1868 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1869 struct device *dev = hisi_hba->dev;
1870 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1871 int rc, i;
1872
1873 queue_work(hisi_hba->wq, &r.work);
1874 wait_for_completion(r.completion);
1875 if (!r.done)
1876 return TMF_RESP_FUNC_FAILED;
1877
1878 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1879 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1880 struct domain_device *device = sas_dev->sas_device;
1881
1882 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
1883 dev_is_expander(device->dev_type))
1884 continue;
1885
1886 rc = hisi_sas_debug_I_T_nexus_reset(device);
1887 if (rc != TMF_RESP_FUNC_COMPLETE)
1888 dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
1889 sas_dev->device_id, rc);
1890 }
1891
1892 hisi_sas_release_tasks(hisi_hba);
1893
1894 return TMF_RESP_FUNC_COMPLETE;
1895 }
1896
hisi_sas_query_task(struct sas_task * task)1897 static int hisi_sas_query_task(struct sas_task *task)
1898 {
1899 struct scsi_lun lun;
1900 struct hisi_sas_tmf_task tmf_task;
1901 int rc = TMF_RESP_FUNC_FAILED;
1902
1903 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1904 struct scsi_cmnd *cmnd = task->uldd_task;
1905 struct domain_device *device = task->dev;
1906 struct hisi_sas_slot *slot = task->lldd_task;
1907 u32 tag = slot->idx;
1908
1909 int_to_scsilun(cmnd->device->lun, &lun);
1910 tmf_task.tmf = TMF_QUERY_TASK;
1911 tmf_task.tag_of_task_to_be_managed = tag;
1912
1913 rc = hisi_sas_debug_issue_ssp_tmf(device,
1914 lun.scsi_lun,
1915 &tmf_task);
1916 switch (rc) {
1917 /* The task is still in Lun, release it then */
1918 case TMF_RESP_FUNC_SUCC:
1919 /* The task is not in Lun or failed, reset the phy */
1920 case TMF_RESP_FUNC_FAILED:
1921 case TMF_RESP_FUNC_COMPLETE:
1922 break;
1923 default:
1924 rc = TMF_RESP_FUNC_FAILED;
1925 break;
1926 }
1927 }
1928 return rc;
1929 }
1930
1931 static int
hisi_sas_internal_abort_task_exec(struct hisi_hba * hisi_hba,int device_id,struct sas_task * task,int abort_flag,int task_tag,struct hisi_sas_dq * dq)1932 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1933 struct sas_task *task, int abort_flag,
1934 int task_tag, struct hisi_sas_dq *dq)
1935 {
1936 struct domain_device *device = task->dev;
1937 struct hisi_sas_device *sas_dev = device->lldd_dev;
1938 struct device *dev = hisi_hba->dev;
1939 struct hisi_sas_port *port;
1940 struct hisi_sas_slot *slot;
1941 struct asd_sas_port *sas_port = device->port;
1942 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1943 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1944 unsigned long flags;
1945 int wr_q_index;
1946
1947 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1948 return -EINVAL;
1949
1950 if (!device->port)
1951 return -1;
1952
1953 port = to_hisi_sas_port(sas_port);
1954
1955 /* simply get a slot and send abort command */
1956 rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
1957 if (rc < 0)
1958 goto err_out;
1959
1960 slot_idx = rc;
1961 slot = &hisi_hba->slot_info[slot_idx];
1962
1963 spin_lock_irqsave(&dq->lock, flags);
1964 wr_q_index = dq->wr_point;
1965 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
1966 list_add_tail(&slot->delivery, &dq->list);
1967 spin_unlock_irqrestore(&dq->lock, flags);
1968 spin_lock_irqsave(&sas_dev->lock, flags);
1969 list_add_tail(&slot->entry, &sas_dev->list);
1970 spin_unlock_irqrestore(&sas_dev->lock, flags);
1971
1972 dlvry_queue = dq->id;
1973 dlvry_queue_slot = wr_q_index;
1974
1975 slot->device_id = sas_dev->device_id;
1976 slot->n_elem = n_elem;
1977 slot->dlvry_queue = dlvry_queue;
1978 slot->dlvry_queue_slot = dlvry_queue_slot;
1979 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1980 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1981 slot->task = task;
1982 slot->port = port;
1983 slot->is_internal = true;
1984 task->lldd_task = slot;
1985
1986 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1987 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1988 memset(hisi_sas_status_buf_addr_mem(slot), 0,
1989 sizeof(struct hisi_sas_err_record));
1990
1991 hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1992 abort_flag, task_tag);
1993
1994 spin_lock_irqsave(&task->task_state_lock, flags);
1995 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1996 spin_unlock_irqrestore(&task->task_state_lock, flags);
1997 WRITE_ONCE(slot->ready, 1);
1998 /* send abort command to the chip */
1999 spin_lock_irqsave(&dq->lock, flags);
2000 hisi_hba->hw->start_delivery(dq);
2001 spin_unlock_irqrestore(&dq->lock, flags);
2002
2003 return 0;
2004
2005 err_out:
2006 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
2007
2008 return rc;
2009 }
2010
2011 /**
2012 * _hisi_sas_internal_task_abort -- execute an internal
2013 * abort command for single IO command or a device
2014 * @hisi_hba: host controller struct
2015 * @device: domain device
2016 * @abort_flag: mode of operation, device or single IO
2017 * @tag: tag of IO to be aborted (only relevant to single
2018 * IO mode)
2019 * @dq: delivery queue for this internal abort command
2020 */
2021 static int
_hisi_sas_internal_task_abort(struct hisi_hba * hisi_hba,struct domain_device * device,int abort_flag,int tag,struct hisi_sas_dq * dq)2022 _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
2023 struct domain_device *device, int abort_flag,
2024 int tag, struct hisi_sas_dq *dq)
2025 {
2026 struct sas_task *task;
2027 struct hisi_sas_device *sas_dev = device->lldd_dev;
2028 struct device *dev = hisi_hba->dev;
2029 int res;
2030
2031 /*
2032 * The interface is not realized means this HW don't support internal
2033 * abort, or don't need to do internal abort. Then here, we return
2034 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
2035 * the internal abort has been executed and returned CQ.
2036 */
2037 if (!hisi_hba->hw->prep_abort)
2038 return TMF_RESP_FUNC_FAILED;
2039
2040 task = sas_alloc_slow_task(GFP_KERNEL);
2041 if (!task)
2042 return -ENOMEM;
2043
2044 task->dev = device;
2045 task->task_proto = device->tproto;
2046 task->task_done = hisi_sas_task_done;
2047 task->slow_task->timer.function = hisi_sas_tmf_timedout;
2048 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ;
2049 add_timer(&task->slow_task->timer);
2050
2051 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
2052 task, abort_flag, tag, dq);
2053 if (res) {
2054 del_timer(&task->slow_task->timer);
2055 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
2056 res);
2057 goto exit;
2058 }
2059 wait_for_completion(&task->slow_task->completion);
2060 res = TMF_RESP_FUNC_FAILED;
2061
2062 /* Internal abort timed out */
2063 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
2064 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct)
2065 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
2066
2067 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
2068 struct hisi_sas_slot *slot = task->lldd_task;
2069
2070 if (slot) {
2071 struct hisi_sas_cq *cq =
2072 &hisi_hba->cq[slot->dlvry_queue];
2073 /*
2074 * flush tasklet to avoid free'ing task
2075 * before using task in IO completion
2076 */
2077 tasklet_kill(&cq->tasklet);
2078 slot->task = NULL;
2079 }
2080 dev_err(dev, "internal task abort: timeout and not done.\n");
2081
2082 res = -EIO;
2083 goto exit;
2084 } else
2085 dev_err(dev, "internal task abort: timeout.\n");
2086 }
2087
2088 if (task->task_status.resp == SAS_TASK_COMPLETE &&
2089 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
2090 res = TMF_RESP_FUNC_COMPLETE;
2091 goto exit;
2092 }
2093
2094 if (task->task_status.resp == SAS_TASK_COMPLETE &&
2095 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
2096 res = TMF_RESP_FUNC_SUCC;
2097 goto exit;
2098 }
2099
2100 exit:
2101 dev_dbg(dev, "internal task abort: task to dev %016llx task=%pK resp: 0x%x sts 0x%x\n",
2102 SAS_ADDR(device->sas_addr), task,
2103 task->task_status.resp, /* 0 is complete, -1 is undelivered */
2104 task->task_status.stat);
2105 sas_free_task(task);
2106
2107 return res;
2108 }
2109
2110 static int
hisi_sas_internal_task_abort(struct hisi_hba * hisi_hba,struct domain_device * device,int abort_flag,int tag)2111 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
2112 struct domain_device *device,
2113 int abort_flag, int tag)
2114 {
2115 struct hisi_sas_slot *slot;
2116 struct device *dev = hisi_hba->dev;
2117 struct hisi_sas_dq *dq;
2118 int i, rc;
2119
2120 switch (abort_flag) {
2121 case HISI_SAS_INT_ABT_CMD:
2122 slot = &hisi_hba->slot_info[tag];
2123 dq = &hisi_hba->dq[slot->dlvry_queue];
2124 return _hisi_sas_internal_task_abort(hisi_hba, device,
2125 abort_flag, tag, dq);
2126 case HISI_SAS_INT_ABT_DEV:
2127 for (i = 0; i < hisi_hba->cq_nvecs; i++) {
2128 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2129 const struct cpumask *mask = cq->pci_irq_mask;
2130
2131 if (mask && !cpumask_intersects(cpu_online_mask, mask))
2132 continue;
2133 dq = &hisi_hba->dq[i];
2134 rc = _hisi_sas_internal_task_abort(hisi_hba, device,
2135 abort_flag, tag,
2136 dq);
2137 if (rc)
2138 return rc;
2139 }
2140 break;
2141 default:
2142 dev_err(dev, "Unrecognised internal abort flag (%d)\n",
2143 abort_flag);
2144 return -EINVAL;
2145 }
2146
2147 return 0;
2148 }
2149
hisi_sas_port_formed(struct asd_sas_phy * sas_phy)2150 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
2151 {
2152 hisi_sas_port_notify_formed(sas_phy);
2153 }
2154
hisi_sas_write_gpio(struct sas_ha_struct * sha,u8 reg_type,u8 reg_index,u8 reg_count,u8 * write_data)2155 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
2156 u8 reg_index, u8 reg_count, u8 *write_data)
2157 {
2158 struct hisi_hba *hisi_hba = sha->lldd_ha;
2159
2160 if (!hisi_hba->hw->write_gpio)
2161 return -EOPNOTSUPP;
2162
2163 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
2164 reg_index, reg_count, write_data);
2165 }
2166
hisi_sas_phy_disconnected(struct hisi_sas_phy * phy)2167 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
2168 {
2169 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2170 struct sas_phy *sphy = sas_phy->phy;
2171 unsigned long flags;
2172
2173 phy->phy_attached = 0;
2174 phy->phy_type = 0;
2175 phy->port = NULL;
2176
2177 spin_lock_irqsave(&phy->lock, flags);
2178 if (phy->enable)
2179 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
2180 else
2181 sphy->negotiated_linkrate = SAS_PHY_DISABLED;
2182 spin_unlock_irqrestore(&phy->lock, flags);
2183 }
2184
hisi_sas_phy_down(struct hisi_hba * hisi_hba,int phy_no,int rdy)2185 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
2186 {
2187 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
2188 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2189 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
2190 struct device *dev = hisi_hba->dev;
2191
2192 if (rdy) {
2193 /* Phy down but ready */
2194 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
2195 hisi_sas_port_notify_formed(sas_phy);
2196 } else {
2197 struct hisi_sas_port *port = phy->port;
2198
2199 if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
2200 phy->in_reset) {
2201 dev_info(dev, "ignore flutter phy%d down\n", phy_no);
2202 return;
2203 }
2204 /* Phy down and not ready */
2205 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
2206 sas_phy_disconnected(sas_phy);
2207
2208 if (port) {
2209 if (phy->phy_type & PORT_TYPE_SAS) {
2210 int port_id = port->id;
2211
2212 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
2213 port_id))
2214 port->port_attached = 0;
2215 } else if (phy->phy_type & PORT_TYPE_SATA)
2216 port->port_attached = 0;
2217 }
2218 hisi_sas_phy_disconnected(phy);
2219 }
2220 }
2221 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
2222
hisi_sas_kill_tasklets(struct hisi_hba * hisi_hba)2223 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
2224 {
2225 int i;
2226
2227 for (i = 0; i < hisi_hba->cq_nvecs; i++) {
2228 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2229
2230 tasklet_kill(&cq->tasklet);
2231 }
2232 }
2233 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
2234
hisi_sas_host_reset(struct Scsi_Host * shost,int reset_type)2235 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type)
2236 {
2237 struct hisi_hba *hisi_hba = shost_priv(shost);
2238
2239 if (reset_type != SCSI_ADAPTER_RESET)
2240 return -EOPNOTSUPP;
2241
2242 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
2243
2244 return 0;
2245 }
2246 EXPORT_SYMBOL_GPL(hisi_sas_host_reset);
2247
2248 struct scsi_transport_template *hisi_sas_stt;
2249 EXPORT_SYMBOL_GPL(hisi_sas_stt);
2250
2251 static struct sas_domain_function_template hisi_sas_transport_ops = {
2252 .lldd_dev_found = hisi_sas_dev_found,
2253 .lldd_dev_gone = hisi_sas_dev_gone,
2254 .lldd_execute_task = hisi_sas_queue_command,
2255 .lldd_control_phy = hisi_sas_control_phy,
2256 .lldd_abort_task = hisi_sas_abort_task,
2257 .lldd_abort_task_set = hisi_sas_abort_task_set,
2258 .lldd_clear_aca = hisi_sas_clear_aca,
2259 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
2260 .lldd_lu_reset = hisi_sas_lu_reset,
2261 .lldd_query_task = hisi_sas_query_task,
2262 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
2263 .lldd_port_formed = hisi_sas_port_formed,
2264 .lldd_write_gpio = hisi_sas_write_gpio,
2265 };
2266
hisi_sas_init_mem(struct hisi_hba * hisi_hba)2267 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
2268 {
2269 int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS;
2270 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint;
2271
2272 for (i = 0; i < hisi_hba->queue_count; i++) {
2273 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2274 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2275 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i];
2276
2277 s = sizeof(struct hisi_sas_cmd_hdr);
2278 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
2279 memset(&cmd_hdr[j], 0, s);
2280
2281 dq->wr_point = 0;
2282
2283 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2284 memset(hisi_hba->complete_hdr[i], 0, s);
2285 cq->rd_point = 0;
2286 }
2287
2288 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
2289 memset(hisi_hba->initial_fis, 0, s);
2290
2291 s = max_command_entries * sizeof(struct hisi_sas_iost);
2292 memset(hisi_hba->iost, 0, s);
2293
2294 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2295 memset(hisi_hba->breakpoint, 0, s);
2296
2297 s = sizeof(struct hisi_sas_sata_breakpoint);
2298 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++)
2299 memset(&sata_breakpoint[j], 0, s);
2300 }
2301 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
2302
hisi_sas_alloc(struct hisi_hba * hisi_hba)2303 int hisi_sas_alloc(struct hisi_hba *hisi_hba)
2304 {
2305 struct device *dev = hisi_hba->dev;
2306 int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS;
2307 int max_command_entries_ru, sz_slot_buf_ru;
2308 int blk_cnt, slots_per_blk;
2309
2310 sema_init(&hisi_hba->sem, 1);
2311 spin_lock_init(&hisi_hba->lock);
2312 for (i = 0; i < hisi_hba->n_phy; i++) {
2313 hisi_sas_phy_init(hisi_hba, i);
2314 hisi_hba->port[i].port_attached = 0;
2315 hisi_hba->port[i].id = -1;
2316 }
2317
2318 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
2319 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
2320 hisi_hba->devices[i].device_id = i;
2321 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT;
2322 }
2323
2324 for (i = 0; i < hisi_hba->queue_count; i++) {
2325 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2326 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2327
2328 /* Completion queue structure */
2329 cq->id = i;
2330 cq->hisi_hba = hisi_hba;
2331
2332 /* Delivery queue structure */
2333 spin_lock_init(&dq->lock);
2334 INIT_LIST_HEAD(&dq->list);
2335 dq->id = i;
2336 dq->hisi_hba = hisi_hba;
2337
2338 /* Delivery queue */
2339 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2340 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
2341 &hisi_hba->cmd_hdr_dma[i],
2342 GFP_KERNEL);
2343 if (!hisi_hba->cmd_hdr[i])
2344 goto err_out;
2345
2346 /* Completion queue */
2347 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2348 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
2349 &hisi_hba->complete_hdr_dma[i],
2350 GFP_KERNEL);
2351 if (!hisi_hba->complete_hdr[i])
2352 goto err_out;
2353 }
2354
2355 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
2356 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
2357 GFP_KERNEL);
2358 if (!hisi_hba->itct)
2359 goto err_out;
2360
2361 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
2362 sizeof(struct hisi_sas_slot),
2363 GFP_KERNEL);
2364 if (!hisi_hba->slot_info)
2365 goto err_out;
2366
2367 /* roundup to avoid overly large block size */
2368 max_command_entries_ru = roundup(max_command_entries, 64);
2369 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
2370 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table);
2371 else
2372 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
2373 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
2374 s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE);
2375 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
2376 slots_per_blk = s / sz_slot_buf_ru;
2377
2378 for (i = 0; i < blk_cnt; i++) {
2379 int slot_index = i * slots_per_blk;
2380 dma_addr_t buf_dma;
2381 void *buf;
2382
2383 buf = dmam_alloc_coherent(dev, s, &buf_dma,
2384 GFP_KERNEL);
2385 if (!buf)
2386 goto err_out;
2387
2388 for (j = 0; j < slots_per_blk; j++, slot_index++) {
2389 struct hisi_sas_slot *slot;
2390
2391 slot = &hisi_hba->slot_info[slot_index];
2392 slot->buf = buf;
2393 slot->buf_dma = buf_dma;
2394 slot->idx = slot_index;
2395
2396 buf += sz_slot_buf_ru;
2397 buf_dma += sz_slot_buf_ru;
2398 }
2399 }
2400
2401 s = max_command_entries * sizeof(struct hisi_sas_iost);
2402 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
2403 GFP_KERNEL);
2404 if (!hisi_hba->iost)
2405 goto err_out;
2406
2407 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2408 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
2409 &hisi_hba->breakpoint_dma,
2410 GFP_KERNEL);
2411 if (!hisi_hba->breakpoint)
2412 goto err_out;
2413
2414 hisi_hba->slot_index_count = max_command_entries;
2415 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
2416 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
2417 if (!hisi_hba->slot_index_tags)
2418 goto err_out;
2419
2420 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2421 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
2422 &hisi_hba->initial_fis_dma,
2423 GFP_KERNEL);
2424 if (!hisi_hba->initial_fis)
2425 goto err_out;
2426
2427 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2428 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
2429 &hisi_hba->sata_breakpoint_dma,
2430 GFP_KERNEL);
2431 if (!hisi_hba->sata_breakpoint)
2432 goto err_out;
2433
2434 hisi_sas_slot_index_init(hisi_hba);
2435 hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT;
2436
2437 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
2438 if (!hisi_hba->wq) {
2439 dev_err(dev, "sas_alloc: failed to create workqueue\n");
2440 goto err_out;
2441 }
2442
2443 return 0;
2444 err_out:
2445 return -ENOMEM;
2446 }
2447 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
2448
hisi_sas_free(struct hisi_hba * hisi_hba)2449 void hisi_sas_free(struct hisi_hba *hisi_hba)
2450 {
2451 int i;
2452
2453 for (i = 0; i < hisi_hba->n_phy; i++) {
2454 struct hisi_sas_phy *phy = &hisi_hba->phy[i];
2455
2456 del_timer_sync(&phy->timer);
2457 }
2458
2459 if (hisi_hba->wq)
2460 destroy_workqueue(hisi_hba->wq);
2461 }
2462 EXPORT_SYMBOL_GPL(hisi_sas_free);
2463
hisi_sas_rst_work_handler(struct work_struct * work)2464 void hisi_sas_rst_work_handler(struct work_struct *work)
2465 {
2466 struct hisi_hba *hisi_hba =
2467 container_of(work, struct hisi_hba, rst_work);
2468
2469 hisi_sas_controller_reset(hisi_hba);
2470 }
2471 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2472
hisi_sas_sync_rst_work_handler(struct work_struct * work)2473 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2474 {
2475 struct hisi_sas_rst *rst =
2476 container_of(work, struct hisi_sas_rst, work);
2477
2478 if (!hisi_sas_controller_reset(rst->hisi_hba))
2479 rst->done = true;
2480 complete(rst->completion);
2481 }
2482 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2483
hisi_sas_get_fw_info(struct hisi_hba * hisi_hba)2484 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2485 {
2486 struct device *dev = hisi_hba->dev;
2487 struct platform_device *pdev = hisi_hba->platform_dev;
2488 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2489 struct clk *refclk;
2490
2491 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2492 SAS_ADDR_SIZE)) {
2493 dev_err(dev, "could not get property sas-addr\n");
2494 return -ENOENT;
2495 }
2496
2497 if (np) {
2498 /*
2499 * These properties are only required for platform device-based
2500 * controller with DT firmware.
2501 */
2502 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2503 "hisilicon,sas-syscon");
2504 if (IS_ERR(hisi_hba->ctrl)) {
2505 dev_err(dev, "could not get syscon\n");
2506 return -ENOENT;
2507 }
2508
2509 if (device_property_read_u32(dev, "ctrl-reset-reg",
2510 &hisi_hba->ctrl_reset_reg)) {
2511 dev_err(dev, "could not get property ctrl-reset-reg\n");
2512 return -ENOENT;
2513 }
2514
2515 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2516 &hisi_hba->ctrl_reset_sts_reg)) {
2517 dev_err(dev, "could not get property ctrl-reset-sts-reg\n");
2518 return -ENOENT;
2519 }
2520
2521 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2522 &hisi_hba->ctrl_clock_ena_reg)) {
2523 dev_err(dev, "could not get property ctrl-clock-ena-reg\n");
2524 return -ENOENT;
2525 }
2526 }
2527
2528 refclk = devm_clk_get(dev, NULL);
2529 if (IS_ERR(refclk))
2530 dev_dbg(dev, "no ref clk property\n");
2531 else
2532 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2533
2534 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2535 dev_err(dev, "could not get property phy-count\n");
2536 return -ENOENT;
2537 }
2538
2539 if (device_property_read_u32(dev, "queue-count",
2540 &hisi_hba->queue_count)) {
2541 dev_err(dev, "could not get property queue-count\n");
2542 return -ENOENT;
2543 }
2544
2545 return 0;
2546 }
2547 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2548
hisi_sas_shost_alloc(struct platform_device * pdev,const struct hisi_sas_hw * hw)2549 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2550 const struct hisi_sas_hw *hw)
2551 {
2552 struct resource *res;
2553 struct Scsi_Host *shost;
2554 struct hisi_hba *hisi_hba;
2555 struct device *dev = &pdev->dev;
2556 int error;
2557
2558 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
2559 if (!shost) {
2560 dev_err(dev, "scsi host alloc failed\n");
2561 return NULL;
2562 }
2563 hisi_hba = shost_priv(shost);
2564
2565 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2566 hisi_hba->hw = hw;
2567 hisi_hba->dev = dev;
2568 hisi_hba->platform_dev = pdev;
2569 hisi_hba->shost = shost;
2570 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2571
2572 timer_setup(&hisi_hba->timer, NULL, 0);
2573
2574 if (hisi_sas_get_fw_info(hisi_hba) < 0)
2575 goto err_out;
2576
2577 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
2578 if (error)
2579 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2580
2581 if (error) {
2582 dev_err(dev, "No usable DMA addressing method\n");
2583 goto err_out;
2584 }
2585
2586 hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0);
2587 if (IS_ERR(hisi_hba->regs))
2588 goto err_out;
2589
2590 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2591 if (res) {
2592 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2593 if (IS_ERR(hisi_hba->sgpio_regs))
2594 goto err_out;
2595 }
2596
2597 if (hisi_sas_alloc(hisi_hba)) {
2598 hisi_sas_free(hisi_hba);
2599 goto err_out;
2600 }
2601
2602 return shost;
2603 err_out:
2604 scsi_host_put(shost);
2605 dev_err(dev, "shost alloc failed\n");
2606 return NULL;
2607 }
2608
hisi_sas_probe(struct platform_device * pdev,const struct hisi_sas_hw * hw)2609 int hisi_sas_probe(struct platform_device *pdev,
2610 const struct hisi_sas_hw *hw)
2611 {
2612 struct Scsi_Host *shost;
2613 struct hisi_hba *hisi_hba;
2614 struct device *dev = &pdev->dev;
2615 struct asd_sas_phy **arr_phy;
2616 struct asd_sas_port **arr_port;
2617 struct sas_ha_struct *sha;
2618 int rc, phy_nr, port_nr, i;
2619
2620 shost = hisi_sas_shost_alloc(pdev, hw);
2621 if (!shost)
2622 return -ENOMEM;
2623
2624 sha = SHOST_TO_SAS_HA(shost);
2625 hisi_hba = shost_priv(shost);
2626 platform_set_drvdata(pdev, sha);
2627
2628 phy_nr = port_nr = hisi_hba->n_phy;
2629
2630 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2631 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2632 if (!arr_phy || !arr_port) {
2633 rc = -ENOMEM;
2634 goto err_out_ha;
2635 }
2636
2637 sha->sas_phy = arr_phy;
2638 sha->sas_port = arr_port;
2639 sha->lldd_ha = hisi_hba;
2640
2641 shost->transportt = hisi_sas_stt;
2642 shost->max_id = HISI_SAS_MAX_DEVICES;
2643 shost->max_lun = ~0;
2644 shost->max_channel = 1;
2645 shost->max_cmd_len = 16;
2646 if (hisi_hba->hw->slot_index_alloc) {
2647 shost->can_queue = HISI_SAS_MAX_COMMANDS;
2648 shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS;
2649 } else {
2650 shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
2651 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
2652 }
2653
2654 sha->sas_ha_name = DRV_NAME;
2655 sha->dev = hisi_hba->dev;
2656 sha->lldd_module = THIS_MODULE;
2657 sha->sas_addr = &hisi_hba->sas_addr[0];
2658 sha->num_phys = hisi_hba->n_phy;
2659 sha->core.shost = hisi_hba->shost;
2660
2661 for (i = 0; i < hisi_hba->n_phy; i++) {
2662 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2663 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2664 }
2665
2666 rc = scsi_add_host(shost, &pdev->dev);
2667 if (rc)
2668 goto err_out_ha;
2669
2670 rc = sas_register_ha(sha);
2671 if (rc)
2672 goto err_out_register_ha;
2673
2674 rc = hisi_hba->hw->hw_init(hisi_hba);
2675 if (rc)
2676 goto err_out_register_ha;
2677
2678 scsi_scan_host(shost);
2679
2680 return 0;
2681
2682 err_out_register_ha:
2683 scsi_remove_host(shost);
2684 err_out_ha:
2685 hisi_sas_debugfs_exit(hisi_hba);
2686 hisi_sas_free(hisi_hba);
2687 scsi_host_put(shost);
2688 return rc;
2689 }
2690 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2691
2692 struct dentry *hisi_sas_debugfs_dir;
2693
hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba * hisi_hba)2694 static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
2695 {
2696 int queue_entry_size = hisi_hba->hw->complete_hdr_size;
2697 int i;
2698
2699 for (i = 0; i < hisi_hba->queue_count; i++)
2700 memcpy(hisi_hba->debugfs_complete_hdr[i],
2701 hisi_hba->complete_hdr[i],
2702 HISI_SAS_QUEUE_SLOTS * queue_entry_size);
2703 }
2704
hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba * hisi_hba)2705 static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
2706 {
2707 int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr);
2708 int i;
2709
2710 for (i = 0; i < hisi_hba->queue_count; i++) {
2711 struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
2712 int j;
2713
2714 debugfs_cmd_hdr = hisi_hba->debugfs_cmd_hdr[i];
2715 cmd_hdr = hisi_hba->cmd_hdr[i];
2716
2717 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
2718 memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j],
2719 queue_entry_size);
2720 }
2721 }
2722
hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba * hisi_hba)2723 static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
2724 {
2725 const struct hisi_sas_debugfs_reg *port =
2726 hisi_hba->hw->debugfs_reg_port;
2727 int i, phy_cnt;
2728 u32 offset;
2729 u32 *databuf;
2730
2731 for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
2732 databuf = (u32 *)hisi_hba->debugfs_port_reg[phy_cnt];
2733 for (i = 0; i < port->count; i++, databuf++) {
2734 offset = port->base_off + 4 * i;
2735 *databuf = port->read_port_reg(hisi_hba, phy_cnt,
2736 offset);
2737 }
2738 }
2739 }
2740
hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba * hisi_hba)2741 static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
2742 {
2743 u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_GLOBAL];
2744 const struct hisi_sas_hw *hw = hisi_hba->hw;
2745 const struct hisi_sas_debugfs_reg *global =
2746 hw->debugfs_reg_array[DEBUGFS_GLOBAL];
2747 int i;
2748
2749 for (i = 0; i < global->count; i++, databuf++)
2750 *databuf = global->read_global_reg(hisi_hba, 4 * i);
2751 }
2752
hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba * hisi_hba)2753 static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba)
2754 {
2755 u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_AXI];
2756 const struct hisi_sas_hw *hw = hisi_hba->hw;
2757 const struct hisi_sas_debugfs_reg *axi =
2758 hw->debugfs_reg_array[DEBUGFS_AXI];
2759 int i;
2760
2761 for (i = 0; i < axi->count; i++, databuf++)
2762 *databuf = axi->read_global_reg(hisi_hba,
2763 4 * i + axi->base_off);
2764 }
2765
hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba * hisi_hba)2766 static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba)
2767 {
2768 u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_RAS];
2769 const struct hisi_sas_hw *hw = hisi_hba->hw;
2770 const struct hisi_sas_debugfs_reg *ras =
2771 hw->debugfs_reg_array[DEBUGFS_RAS];
2772 int i;
2773
2774 for (i = 0; i < ras->count; i++, databuf++)
2775 *databuf = ras->read_global_reg(hisi_hba,
2776 4 * i + ras->base_off);
2777 }
2778
hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba * hisi_hba)2779 static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
2780 {
2781 void *cachebuf = hisi_hba->debugfs_itct_cache;
2782 void *databuf = hisi_hba->debugfs_itct;
2783 struct hisi_sas_itct *itct;
2784 int i;
2785
2786 hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_ITCT_CACHE,
2787 cachebuf);
2788
2789 itct = hisi_hba->itct;
2790
2791 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
2792 memcpy(databuf, itct, sizeof(struct hisi_sas_itct));
2793 databuf += sizeof(struct hisi_sas_itct);
2794 }
2795 }
2796
hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba * hisi_hba)2797 static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba)
2798 {
2799 int max_command_entries = HISI_SAS_MAX_COMMANDS;
2800 void *cachebuf = hisi_hba->debugfs_iost_cache;
2801 void *databuf = hisi_hba->debugfs_iost;
2802 struct hisi_sas_iost *iost;
2803 int i;
2804
2805 hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_IOST_CACHE,
2806 cachebuf);
2807
2808 iost = hisi_hba->iost;
2809
2810 for (i = 0; i < max_command_entries; i++, iost++) {
2811 memcpy(databuf, iost, sizeof(struct hisi_sas_iost));
2812 databuf += sizeof(struct hisi_sas_iost);
2813 }
2814 }
2815
2816 static const char *
hisi_sas_debugfs_to_reg_name(int off,int base_off,const struct hisi_sas_debugfs_reg_lu * lu)2817 hisi_sas_debugfs_to_reg_name(int off, int base_off,
2818 const struct hisi_sas_debugfs_reg_lu *lu)
2819 {
2820 for (; lu->name; lu++) {
2821 if (off == lu->off - base_off)
2822 return lu->name;
2823 }
2824
2825 return NULL;
2826 }
2827
hisi_sas_debugfs_print_reg(u32 * regs_val,const void * ptr,struct seq_file * s)2828 static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr,
2829 struct seq_file *s)
2830 {
2831 const struct hisi_sas_debugfs_reg *reg = ptr;
2832 int i;
2833
2834 for (i = 0; i < reg->count; i++) {
2835 int off = i * 4;
2836 const char *name;
2837
2838 name = hisi_sas_debugfs_to_reg_name(off, reg->base_off,
2839 reg->lu);
2840
2841 if (name)
2842 seq_printf(s, "0x%08x 0x%08x %s\n", off,
2843 regs_val[i], name);
2844 else
2845 seq_printf(s, "0x%08x 0x%08x\n", off,
2846 regs_val[i]);
2847 }
2848 }
2849
hisi_sas_debugfs_global_show(struct seq_file * s,void * p)2850 static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p)
2851 {
2852 struct hisi_hba *hisi_hba = s->private;
2853 const struct hisi_sas_hw *hw = hisi_hba->hw;
2854 const void *reg_global = hw->debugfs_reg_array[DEBUGFS_GLOBAL];
2855
2856 hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_GLOBAL],
2857 reg_global, s);
2858
2859 return 0;
2860 }
2861
hisi_sas_debugfs_global_open(struct inode * inode,struct file * filp)2862 static int hisi_sas_debugfs_global_open(struct inode *inode, struct file *filp)
2863 {
2864 return single_open(filp, hisi_sas_debugfs_global_show,
2865 inode->i_private);
2866 }
2867
2868 static const struct file_operations hisi_sas_debugfs_global_fops = {
2869 .open = hisi_sas_debugfs_global_open,
2870 .read = seq_read,
2871 .llseek = seq_lseek,
2872 .release = single_release,
2873 .owner = THIS_MODULE,
2874 };
2875
hisi_sas_debugfs_axi_show(struct seq_file * s,void * p)2876 static int hisi_sas_debugfs_axi_show(struct seq_file *s, void *p)
2877 {
2878 struct hisi_hba *hisi_hba = s->private;
2879 const struct hisi_sas_hw *hw = hisi_hba->hw;
2880 const void *reg_axi = hw->debugfs_reg_array[DEBUGFS_AXI];
2881
2882 hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_AXI],
2883 reg_axi, s);
2884
2885 return 0;
2886 }
2887
hisi_sas_debugfs_axi_open(struct inode * inode,struct file * filp)2888 static int hisi_sas_debugfs_axi_open(struct inode *inode, struct file *filp)
2889 {
2890 return single_open(filp, hisi_sas_debugfs_axi_show,
2891 inode->i_private);
2892 }
2893
2894 static const struct file_operations hisi_sas_debugfs_axi_fops = {
2895 .open = hisi_sas_debugfs_axi_open,
2896 .read = seq_read,
2897 .llseek = seq_lseek,
2898 .release = single_release,
2899 .owner = THIS_MODULE,
2900 };
2901
hisi_sas_debugfs_ras_show(struct seq_file * s,void * p)2902 static int hisi_sas_debugfs_ras_show(struct seq_file *s, void *p)
2903 {
2904 struct hisi_hba *hisi_hba = s->private;
2905 const struct hisi_sas_hw *hw = hisi_hba->hw;
2906 const void *reg_ras = hw->debugfs_reg_array[DEBUGFS_RAS];
2907
2908 hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_RAS],
2909 reg_ras, s);
2910
2911 return 0;
2912 }
2913
hisi_sas_debugfs_ras_open(struct inode * inode,struct file * filp)2914 static int hisi_sas_debugfs_ras_open(struct inode *inode, struct file *filp)
2915 {
2916 return single_open(filp, hisi_sas_debugfs_ras_show,
2917 inode->i_private);
2918 }
2919
2920 static const struct file_operations hisi_sas_debugfs_ras_fops = {
2921 .open = hisi_sas_debugfs_ras_open,
2922 .read = seq_read,
2923 .llseek = seq_lseek,
2924 .release = single_release,
2925 .owner = THIS_MODULE,
2926 };
2927
hisi_sas_debugfs_port_show(struct seq_file * s,void * p)2928 static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p)
2929 {
2930 struct hisi_sas_phy *phy = s->private;
2931 struct hisi_hba *hisi_hba = phy->hisi_hba;
2932 const struct hisi_sas_hw *hw = hisi_hba->hw;
2933 const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port;
2934 u32 *databuf = hisi_hba->debugfs_port_reg[phy->sas_phy.id];
2935
2936 hisi_sas_debugfs_print_reg(databuf, reg_port, s);
2937
2938 return 0;
2939 }
2940
hisi_sas_debugfs_port_open(struct inode * inode,struct file * filp)2941 static int hisi_sas_debugfs_port_open(struct inode *inode, struct file *filp)
2942 {
2943 return single_open(filp, hisi_sas_debugfs_port_show, inode->i_private);
2944 }
2945
2946 static const struct file_operations hisi_sas_debugfs_port_fops = {
2947 .open = hisi_sas_debugfs_port_open,
2948 .read = seq_read,
2949 .llseek = seq_lseek,
2950 .release = single_release,
2951 .owner = THIS_MODULE,
2952 };
2953
hisi_sas_show_row_64(struct seq_file * s,int index,int sz,__le64 * ptr)2954 static void hisi_sas_show_row_64(struct seq_file *s, int index,
2955 int sz, __le64 *ptr)
2956 {
2957 int i;
2958
2959 /* completion header size not fixed per HW version */
2960 seq_printf(s, "index %04d:\n\t", index);
2961 for (i = 1; i <= sz / 8; i++, ptr++) {
2962 seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr));
2963 if (!(i % 2))
2964 seq_puts(s, "\n\t");
2965 }
2966
2967 seq_puts(s, "\n");
2968 }
2969
hisi_sas_show_row_32(struct seq_file * s,int index,int sz,__le32 * ptr)2970 static void hisi_sas_show_row_32(struct seq_file *s, int index,
2971 int sz, __le32 *ptr)
2972 {
2973 int i;
2974
2975 /* completion header size not fixed per HW version */
2976 seq_printf(s, "index %04d:\n\t", index);
2977 for (i = 1; i <= sz / 4; i++, ptr++) {
2978 seq_printf(s, " 0x%08x", le32_to_cpu(*ptr));
2979 if (!(i % 4))
2980 seq_puts(s, "\n\t");
2981 }
2982 seq_puts(s, "\n");
2983 }
2984
hisi_sas_cq_show_slot(struct seq_file * s,int slot,void * cq_ptr)2985 static void hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
2986 {
2987 struct hisi_sas_cq *cq = cq_ptr;
2988 struct hisi_hba *hisi_hba = cq->hisi_hba;
2989 void *complete_queue = hisi_hba->debugfs_complete_hdr[cq->id];
2990 __le32 *complete_hdr = complete_queue +
2991 (hisi_hba->hw->complete_hdr_size * slot);
2992
2993 hisi_sas_show_row_32(s, slot,
2994 hisi_hba->hw->complete_hdr_size,
2995 complete_hdr);
2996 }
2997
hisi_sas_debugfs_cq_show(struct seq_file * s,void * p)2998 static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p)
2999 {
3000 struct hisi_sas_cq *cq = s->private;
3001 int slot;
3002
3003 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
3004 hisi_sas_cq_show_slot(s, slot, cq);
3005 }
3006 return 0;
3007 }
3008
hisi_sas_debugfs_cq_open(struct inode * inode,struct file * filp)3009 static int hisi_sas_debugfs_cq_open(struct inode *inode, struct file *filp)
3010 {
3011 return single_open(filp, hisi_sas_debugfs_cq_show, inode->i_private);
3012 }
3013
3014 static const struct file_operations hisi_sas_debugfs_cq_fops = {
3015 .open = hisi_sas_debugfs_cq_open,
3016 .read = seq_read,
3017 .llseek = seq_lseek,
3018 .release = single_release,
3019 .owner = THIS_MODULE,
3020 };
3021
hisi_sas_dq_show_slot(struct seq_file * s,int slot,void * dq_ptr)3022 static void hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
3023 {
3024 struct hisi_sas_dq *dq = dq_ptr;
3025 struct hisi_hba *hisi_hba = dq->hisi_hba;
3026 void *cmd_queue = hisi_hba->debugfs_cmd_hdr[dq->id];
3027 __le32 *cmd_hdr = cmd_queue +
3028 sizeof(struct hisi_sas_cmd_hdr) * slot;
3029
3030 hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr), cmd_hdr);
3031 }
3032
hisi_sas_debugfs_dq_show(struct seq_file * s,void * p)3033 static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p)
3034 {
3035 int slot;
3036
3037 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
3038 hisi_sas_dq_show_slot(s, slot, s->private);
3039 }
3040 return 0;
3041 }
3042
hisi_sas_debugfs_dq_open(struct inode * inode,struct file * filp)3043 static int hisi_sas_debugfs_dq_open(struct inode *inode, struct file *filp)
3044 {
3045 return single_open(filp, hisi_sas_debugfs_dq_show, inode->i_private);
3046 }
3047
3048 static const struct file_operations hisi_sas_debugfs_dq_fops = {
3049 .open = hisi_sas_debugfs_dq_open,
3050 .read = seq_read,
3051 .llseek = seq_lseek,
3052 .release = single_release,
3053 .owner = THIS_MODULE,
3054 };
3055
hisi_sas_debugfs_iost_show(struct seq_file * s,void * p)3056 static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p)
3057 {
3058 struct hisi_hba *hisi_hba = s->private;
3059 struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost;
3060 int i, max_command_entries = HISI_SAS_MAX_COMMANDS;
3061
3062 for (i = 0; i < max_command_entries; i++, debugfs_iost++) {
3063 __le64 *iost = &debugfs_iost->qw0;
3064
3065 hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost), iost);
3066 }
3067
3068 return 0;
3069 }
3070
hisi_sas_debugfs_iost_open(struct inode * inode,struct file * filp)3071 static int hisi_sas_debugfs_iost_open(struct inode *inode, struct file *filp)
3072 {
3073 return single_open(filp, hisi_sas_debugfs_iost_show, inode->i_private);
3074 }
3075
3076 static const struct file_operations hisi_sas_debugfs_iost_fops = {
3077 .open = hisi_sas_debugfs_iost_open,
3078 .read = seq_read,
3079 .llseek = seq_lseek,
3080 .release = single_release,
3081 .owner = THIS_MODULE,
3082 };
3083
hisi_sas_debugfs_iost_cache_show(struct seq_file * s,void * p)3084 static int hisi_sas_debugfs_iost_cache_show(struct seq_file *s, void *p)
3085 {
3086 struct hisi_hba *hisi_hba = s->private;
3087 struct hisi_sas_iost_itct_cache *iost_cache =
3088 (struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_iost_cache;
3089 u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
3090 int i, tab_idx;
3091 __le64 *iost;
3092
3093 for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) {
3094 /*
3095 * Data struct of IOST cache:
3096 * Data[1]: BIT0~15: Table index
3097 * Bit16: Valid mask
3098 * Data[2]~[9]: IOST table
3099 */
3100 tab_idx = (iost_cache->data[1] & 0xffff);
3101 iost = (__le64 *)iost_cache;
3102
3103 hisi_sas_show_row_64(s, tab_idx, cache_size, iost);
3104 }
3105
3106 return 0;
3107 }
3108
hisi_sas_debugfs_iost_cache_open(struct inode * inode,struct file * filp)3109 static int hisi_sas_debugfs_iost_cache_open(struct inode *inode,
3110 struct file *filp)
3111 {
3112 return single_open(filp, hisi_sas_debugfs_iost_cache_show,
3113 inode->i_private);
3114 }
3115
3116 static const struct file_operations hisi_sas_debugfs_iost_cache_fops = {
3117 .open = hisi_sas_debugfs_iost_cache_open,
3118 .read = seq_read,
3119 .llseek = seq_lseek,
3120 .release = single_release,
3121 .owner = THIS_MODULE,
3122 };
3123
hisi_sas_debugfs_itct_show(struct seq_file * s,void * p)3124 static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p)
3125 {
3126 int i;
3127 struct hisi_hba *hisi_hba = s->private;
3128 struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct;
3129
3130 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) {
3131 __le64 *itct = &debugfs_itct->qw0;
3132
3133 hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct), itct);
3134 }
3135
3136 return 0;
3137 }
3138
hisi_sas_debugfs_itct_open(struct inode * inode,struct file * filp)3139 static int hisi_sas_debugfs_itct_open(struct inode *inode, struct file *filp)
3140 {
3141 return single_open(filp, hisi_sas_debugfs_itct_show, inode->i_private);
3142 }
3143
3144 static const struct file_operations hisi_sas_debugfs_itct_fops = {
3145 .open = hisi_sas_debugfs_itct_open,
3146 .read = seq_read,
3147 .llseek = seq_lseek,
3148 .release = single_release,
3149 .owner = THIS_MODULE,
3150 };
3151
hisi_sas_debugfs_itct_cache_show(struct seq_file * s,void * p)3152 static int hisi_sas_debugfs_itct_cache_show(struct seq_file *s, void *p)
3153 {
3154 struct hisi_hba *hisi_hba = s->private;
3155 struct hisi_sas_iost_itct_cache *itct_cache =
3156 (struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_itct_cache;
3157 u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
3158 int i, tab_idx;
3159 __le64 *itct;
3160
3161 for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) {
3162 /*
3163 * Data struct of ITCT cache:
3164 * Data[1]: BIT0~15: Table index
3165 * Bit16: Valid mask
3166 * Data[2]~[9]: ITCT table
3167 */
3168 tab_idx = itct_cache->data[1] & 0xffff;
3169 itct = (__le64 *)itct_cache;
3170
3171 hisi_sas_show_row_64(s, tab_idx, cache_size, itct);
3172 }
3173
3174 return 0;
3175 }
3176
hisi_sas_debugfs_itct_cache_open(struct inode * inode,struct file * filp)3177 static int hisi_sas_debugfs_itct_cache_open(struct inode *inode,
3178 struct file *filp)
3179 {
3180 return single_open(filp, hisi_sas_debugfs_itct_cache_show,
3181 inode->i_private);
3182 }
3183
3184 static const struct file_operations hisi_sas_debugfs_itct_cache_fops = {
3185 .open = hisi_sas_debugfs_itct_cache_open,
3186 .read = seq_read,
3187 .llseek = seq_lseek,
3188 .release = single_release,
3189 .owner = THIS_MODULE,
3190 };
3191
hisi_sas_debugfs_create_files(struct hisi_hba * hisi_hba)3192 static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
3193 {
3194 struct dentry *dump_dentry;
3195 struct dentry *dentry;
3196 char name[256];
3197 int p;
3198 int c;
3199 int d;
3200
3201 /* Create dump dir inside device dir */
3202 dump_dentry = debugfs_create_dir("dump", hisi_hba->debugfs_dir);
3203 hisi_hba->debugfs_dump_dentry = dump_dentry;
3204
3205 debugfs_create_file("global", 0400, dump_dentry, hisi_hba,
3206 &hisi_sas_debugfs_global_fops);
3207
3208 /* Create port dir and files */
3209 dentry = debugfs_create_dir("port", dump_dentry);
3210 for (p = 0; p < hisi_hba->n_phy; p++) {
3211 snprintf(name, 256, "%d", p);
3212
3213 debugfs_create_file(name, 0400, dentry, &hisi_hba->phy[p],
3214 &hisi_sas_debugfs_port_fops);
3215 }
3216
3217 /* Create CQ dir and files */
3218 dentry = debugfs_create_dir("cq", dump_dentry);
3219 for (c = 0; c < hisi_hba->queue_count; c++) {
3220 snprintf(name, 256, "%d", c);
3221
3222 debugfs_create_file(name, 0400, dentry, &hisi_hba->cq[c],
3223 &hisi_sas_debugfs_cq_fops);
3224 }
3225
3226 /* Create DQ dir and files */
3227 dentry = debugfs_create_dir("dq", dump_dentry);
3228 for (d = 0; d < hisi_hba->queue_count; d++) {
3229 snprintf(name, 256, "%d", d);
3230
3231 debugfs_create_file(name, 0400, dentry, &hisi_hba->dq[d],
3232 &hisi_sas_debugfs_dq_fops);
3233 }
3234
3235 debugfs_create_file("iost", 0400, dump_dentry, hisi_hba,
3236 &hisi_sas_debugfs_iost_fops);
3237
3238 debugfs_create_file("iost_cache", 0400, dump_dentry, hisi_hba,
3239 &hisi_sas_debugfs_iost_cache_fops);
3240
3241 debugfs_create_file("itct", 0400, dump_dentry, hisi_hba,
3242 &hisi_sas_debugfs_itct_fops);
3243
3244 debugfs_create_file("itct_cache", 0400, dump_dentry, hisi_hba,
3245 &hisi_sas_debugfs_itct_cache_fops);
3246
3247 debugfs_create_file("axi", 0400, dump_dentry, hisi_hba,
3248 &hisi_sas_debugfs_axi_fops);
3249
3250 debugfs_create_file("ras", 0400, dump_dentry, hisi_hba,
3251 &hisi_sas_debugfs_ras_fops);
3252
3253 return;
3254 }
3255
hisi_sas_debugfs_snapshot_regs(struct hisi_hba * hisi_hba)3256 static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba *hisi_hba)
3257 {
3258 hisi_hba->hw->snapshot_prepare(hisi_hba);
3259
3260 hisi_sas_debugfs_snapshot_global_reg(hisi_hba);
3261 hisi_sas_debugfs_snapshot_port_reg(hisi_hba);
3262 hisi_sas_debugfs_snapshot_axi_reg(hisi_hba);
3263 hisi_sas_debugfs_snapshot_ras_reg(hisi_hba);
3264 hisi_sas_debugfs_snapshot_cq_reg(hisi_hba);
3265 hisi_sas_debugfs_snapshot_dq_reg(hisi_hba);
3266 hisi_sas_debugfs_snapshot_itct_reg(hisi_hba);
3267 hisi_sas_debugfs_snapshot_iost_reg(hisi_hba);
3268
3269 hisi_sas_debugfs_create_files(hisi_hba);
3270
3271 hisi_hba->hw->snapshot_restore(hisi_hba);
3272 }
3273
hisi_sas_debugfs_trigger_dump_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)3274 static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file,
3275 const char __user *user_buf,
3276 size_t count, loff_t *ppos)
3277 {
3278 struct hisi_hba *hisi_hba = file->f_inode->i_private;
3279 char buf[8];
3280
3281 /* A bit racy, but don't care too much since it's only debugfs */
3282 if (hisi_hba->debugfs_snapshot)
3283 return -EFAULT;
3284
3285 if (count > 8)
3286 return -EFAULT;
3287
3288 if (copy_from_user(buf, user_buf, count))
3289 return -EFAULT;
3290
3291 if (buf[0] != '1')
3292 return -EFAULT;
3293
3294 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
3295
3296 return count;
3297 }
3298
3299 static const struct file_operations hisi_sas_debugfs_trigger_dump_fops = {
3300 .write = &hisi_sas_debugfs_trigger_dump_write,
3301 .owner = THIS_MODULE,
3302 };
3303
3304 enum {
3305 HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL = 0,
3306 HISI_SAS_BIST_LOOPBACK_MODE_SERDES,
3307 HISI_SAS_BIST_LOOPBACK_MODE_REMOTE,
3308 };
3309
3310 enum {
3311 HISI_SAS_BIST_CODE_MODE_PRBS7 = 0,
3312 HISI_SAS_BIST_CODE_MODE_PRBS23,
3313 HISI_SAS_BIST_CODE_MODE_PRBS31,
3314 HISI_SAS_BIST_CODE_MODE_JTPAT,
3315 HISI_SAS_BIST_CODE_MODE_CJTPAT,
3316 HISI_SAS_BIST_CODE_MODE_SCRAMBED_0,
3317 HISI_SAS_BIST_CODE_MODE_TRAIN,
3318 HISI_SAS_BIST_CODE_MODE_TRAIN_DONE,
3319 HISI_SAS_BIST_CODE_MODE_HFTP,
3320 HISI_SAS_BIST_CODE_MODE_MFTP,
3321 HISI_SAS_BIST_CODE_MODE_LFTP,
3322 HISI_SAS_BIST_CODE_MODE_FIXED_DATA,
3323 };
3324
3325 static const struct {
3326 int value;
3327 char *name;
3328 } hisi_sas_debugfs_loop_linkrate[] = {
3329 { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" },
3330 { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" },
3331 { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
3332 { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" },
3333 };
3334
hisi_sas_debugfs_bist_linkrate_show(struct seq_file * s,void * p)3335 static int hisi_sas_debugfs_bist_linkrate_show(struct seq_file *s, void *p)
3336 {
3337 struct hisi_hba *hisi_hba = s->private;
3338 int i;
3339
3340 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) {
3341 int match = (hisi_hba->debugfs_bist_linkrate ==
3342 hisi_sas_debugfs_loop_linkrate[i].value);
3343
3344 seq_printf(s, "%s%s%s ", match ? "[" : "",
3345 hisi_sas_debugfs_loop_linkrate[i].name,
3346 match ? "]" : "");
3347 }
3348 seq_puts(s, "\n");
3349
3350 return 0;
3351 }
3352
hisi_sas_debugfs_bist_linkrate_write(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)3353 static ssize_t hisi_sas_debugfs_bist_linkrate_write(struct file *filp,
3354 const char __user *buf,
3355 size_t count, loff_t *ppos)
3356 {
3357 struct seq_file *m = filp->private_data;
3358 struct hisi_hba *hisi_hba = m->private;
3359 char kbuf[16] = {}, *pkbuf;
3360 bool found = false;
3361 int i;
3362
3363 if (hisi_hba->debugfs_bist_enable)
3364 return -EPERM;
3365
3366 if (count >= sizeof(kbuf))
3367 return -EOVERFLOW;
3368
3369 if (copy_from_user(kbuf, buf, count))
3370 return -EINVAL;
3371
3372 pkbuf = strstrip(kbuf);
3373
3374 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) {
3375 if (!strncmp(hisi_sas_debugfs_loop_linkrate[i].name,
3376 pkbuf, 16)) {
3377 hisi_hba->debugfs_bist_linkrate =
3378 hisi_sas_debugfs_loop_linkrate[i].value;
3379 found = true;
3380 break;
3381 }
3382 }
3383
3384 if (!found)
3385 return -EINVAL;
3386
3387 return count;
3388 }
3389
hisi_sas_debugfs_bist_linkrate_open(struct inode * inode,struct file * filp)3390 static int hisi_sas_debugfs_bist_linkrate_open(struct inode *inode,
3391 struct file *filp)
3392 {
3393 return single_open(filp, hisi_sas_debugfs_bist_linkrate_show,
3394 inode->i_private);
3395 }
3396
3397 static const struct file_operations hisi_sas_debugfs_bist_linkrate_ops = {
3398 .open = hisi_sas_debugfs_bist_linkrate_open,
3399 .read = seq_read,
3400 .write = hisi_sas_debugfs_bist_linkrate_write,
3401 .llseek = seq_lseek,
3402 .release = single_release,
3403 .owner = THIS_MODULE,
3404 };
3405
3406 static const struct {
3407 int value;
3408 char *name;
3409 } hisi_sas_debugfs_loop_code_mode[] = {
3410 { HISI_SAS_BIST_CODE_MODE_PRBS7, "PRBS7" },
3411 { HISI_SAS_BIST_CODE_MODE_PRBS23, "PRBS23" },
3412 { HISI_SAS_BIST_CODE_MODE_PRBS31, "PRBS31" },
3413 { HISI_SAS_BIST_CODE_MODE_JTPAT, "JTPAT" },
3414 { HISI_SAS_BIST_CODE_MODE_CJTPAT, "CJTPAT" },
3415 { HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, "SCRAMBED_0" },
3416 { HISI_SAS_BIST_CODE_MODE_TRAIN, "TRAIN" },
3417 { HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, "TRAIN_DONE" },
3418 { HISI_SAS_BIST_CODE_MODE_HFTP, "HFTP" },
3419 { HISI_SAS_BIST_CODE_MODE_MFTP, "MFTP" },
3420 { HISI_SAS_BIST_CODE_MODE_LFTP, "LFTP" },
3421 { HISI_SAS_BIST_CODE_MODE_FIXED_DATA, "FIXED_DATA" },
3422 };
3423
hisi_sas_debugfs_bist_code_mode_show(struct seq_file * s,void * p)3424 static int hisi_sas_debugfs_bist_code_mode_show(struct seq_file *s, void *p)
3425 {
3426 struct hisi_hba *hisi_hba = s->private;
3427 int i;
3428
3429 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) {
3430 int match = (hisi_hba->debugfs_bist_code_mode ==
3431 hisi_sas_debugfs_loop_code_mode[i].value);
3432
3433 seq_printf(s, "%s%s%s ", match ? "[" : "",
3434 hisi_sas_debugfs_loop_code_mode[i].name,
3435 match ? "]" : "");
3436 }
3437 seq_puts(s, "\n");
3438
3439 return 0;
3440 }
3441
hisi_sas_debugfs_bist_code_mode_write(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)3442 static ssize_t hisi_sas_debugfs_bist_code_mode_write(struct file *filp,
3443 const char __user *buf,
3444 size_t count,
3445 loff_t *ppos)
3446 {
3447 struct seq_file *m = filp->private_data;
3448 struct hisi_hba *hisi_hba = m->private;
3449 char kbuf[16] = {}, *pkbuf;
3450 bool found = false;
3451 int i;
3452
3453 if (hisi_hba->debugfs_bist_enable)
3454 return -EPERM;
3455
3456 if (count >= sizeof(kbuf))
3457 return -EINVAL;
3458
3459 if (copy_from_user(kbuf, buf, count))
3460 return -EOVERFLOW;
3461
3462 pkbuf = strstrip(kbuf);
3463
3464 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) {
3465 if (!strncmp(hisi_sas_debugfs_loop_code_mode[i].name,
3466 pkbuf, 16)) {
3467 hisi_hba->debugfs_bist_code_mode =
3468 hisi_sas_debugfs_loop_code_mode[i].value;
3469 found = true;
3470 break;
3471 }
3472 }
3473
3474 if (!found)
3475 return -EINVAL;
3476
3477 return count;
3478 }
3479
hisi_sas_debugfs_bist_code_mode_open(struct inode * inode,struct file * filp)3480 static int hisi_sas_debugfs_bist_code_mode_open(struct inode *inode,
3481 struct file *filp)
3482 {
3483 return single_open(filp, hisi_sas_debugfs_bist_code_mode_show,
3484 inode->i_private);
3485 }
3486
3487 static const struct file_operations hisi_sas_debugfs_bist_code_mode_ops = {
3488 .open = hisi_sas_debugfs_bist_code_mode_open,
3489 .read = seq_read,
3490 .write = hisi_sas_debugfs_bist_code_mode_write,
3491 .llseek = seq_lseek,
3492 .release = single_release,
3493 .owner = THIS_MODULE,
3494 };
3495
hisi_sas_debugfs_bist_phy_write(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)3496 static ssize_t hisi_sas_debugfs_bist_phy_write(struct file *filp,
3497 const char __user *buf,
3498 size_t count, loff_t *ppos)
3499 {
3500 struct seq_file *m = filp->private_data;
3501 struct hisi_hba *hisi_hba = m->private;
3502 unsigned int phy_no;
3503 int val;
3504
3505 if (hisi_hba->debugfs_bist_enable)
3506 return -EPERM;
3507
3508 val = kstrtouint_from_user(buf, count, 0, &phy_no);
3509 if (val)
3510 return val;
3511
3512 if (phy_no >= hisi_hba->n_phy)
3513 return -EINVAL;
3514
3515 hisi_hba->debugfs_bist_phy_no = phy_no;
3516
3517 return count;
3518 }
3519
hisi_sas_debugfs_bist_phy_show(struct seq_file * s,void * p)3520 static int hisi_sas_debugfs_bist_phy_show(struct seq_file *s, void *p)
3521 {
3522 struct hisi_hba *hisi_hba = s->private;
3523
3524 seq_printf(s, "%d\n", hisi_hba->debugfs_bist_phy_no);
3525
3526 return 0;
3527 }
3528
hisi_sas_debugfs_bist_phy_open(struct inode * inode,struct file * filp)3529 static int hisi_sas_debugfs_bist_phy_open(struct inode *inode,
3530 struct file *filp)
3531 {
3532 return single_open(filp, hisi_sas_debugfs_bist_phy_show,
3533 inode->i_private);
3534 }
3535
3536 static const struct file_operations hisi_sas_debugfs_bist_phy_ops = {
3537 .open = hisi_sas_debugfs_bist_phy_open,
3538 .read = seq_read,
3539 .write = hisi_sas_debugfs_bist_phy_write,
3540 .llseek = seq_lseek,
3541 .release = single_release,
3542 .owner = THIS_MODULE,
3543 };
3544
3545 static const struct {
3546 int value;
3547 char *name;
3548 } hisi_sas_debugfs_loop_modes[] = {
3549 { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digial" },
3550 { HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" },
3551 { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" },
3552 };
3553
hisi_sas_debugfs_bist_mode_show(struct seq_file * s,void * p)3554 static int hisi_sas_debugfs_bist_mode_show(struct seq_file *s, void *p)
3555 {
3556 struct hisi_hba *hisi_hba = s->private;
3557 int i;
3558
3559 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) {
3560 int match = (hisi_hba->debugfs_bist_mode ==
3561 hisi_sas_debugfs_loop_modes[i].value);
3562
3563 seq_printf(s, "%s%s%s ", match ? "[" : "",
3564 hisi_sas_debugfs_loop_modes[i].name,
3565 match ? "]" : "");
3566 }
3567 seq_puts(s, "\n");
3568
3569 return 0;
3570 }
3571
hisi_sas_debugfs_bist_mode_write(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)3572 static ssize_t hisi_sas_debugfs_bist_mode_write(struct file *filp,
3573 const char __user *buf,
3574 size_t count, loff_t *ppos)
3575 {
3576 struct seq_file *m = filp->private_data;
3577 struct hisi_hba *hisi_hba = m->private;
3578 char kbuf[16] = {}, *pkbuf;
3579 bool found = false;
3580 int i;
3581
3582 if (hisi_hba->debugfs_bist_enable)
3583 return -EPERM;
3584
3585 if (count >= sizeof(kbuf))
3586 return -EINVAL;
3587
3588 if (copy_from_user(kbuf, buf, count))
3589 return -EOVERFLOW;
3590
3591 pkbuf = strstrip(kbuf);
3592
3593 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) {
3594 if (!strncmp(hisi_sas_debugfs_loop_modes[i].name, pkbuf, 16)) {
3595 hisi_hba->debugfs_bist_mode =
3596 hisi_sas_debugfs_loop_modes[i].value;
3597 found = true;
3598 break;
3599 }
3600 }
3601
3602 if (!found)
3603 return -EINVAL;
3604
3605 return count;
3606 }
3607
hisi_sas_debugfs_bist_mode_open(struct inode * inode,struct file * filp)3608 static int hisi_sas_debugfs_bist_mode_open(struct inode *inode,
3609 struct file *filp)
3610 {
3611 return single_open(filp, hisi_sas_debugfs_bist_mode_show,
3612 inode->i_private);
3613 }
3614
3615 static const struct file_operations hisi_sas_debugfs_bist_mode_ops = {
3616 .open = hisi_sas_debugfs_bist_mode_open,
3617 .read = seq_read,
3618 .write = hisi_sas_debugfs_bist_mode_write,
3619 .llseek = seq_lseek,
3620 .release = single_release,
3621 .owner = THIS_MODULE,
3622 };
3623
hisi_sas_debugfs_bist_enable_write(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)3624 static ssize_t hisi_sas_debugfs_bist_enable_write(struct file *filp,
3625 const char __user *buf,
3626 size_t count, loff_t *ppos)
3627 {
3628 struct seq_file *m = filp->private_data;
3629 struct hisi_hba *hisi_hba = m->private;
3630 unsigned int enable;
3631 int val;
3632
3633 val = kstrtouint_from_user(buf, count, 0, &enable);
3634 if (val)
3635 return val;
3636
3637 if (enable > 1)
3638 return -EINVAL;
3639
3640 if (enable == hisi_hba->debugfs_bist_enable)
3641 return count;
3642
3643 if (!hisi_hba->hw->set_bist)
3644 return -EPERM;
3645
3646 val = hisi_hba->hw->set_bist(hisi_hba, enable);
3647 if (val < 0)
3648 return val;
3649
3650 hisi_hba->debugfs_bist_enable = enable;
3651
3652 return count;
3653 }
3654
hisi_sas_debugfs_bist_enable_show(struct seq_file * s,void * p)3655 static int hisi_sas_debugfs_bist_enable_show(struct seq_file *s, void *p)
3656 {
3657 struct hisi_hba *hisi_hba = s->private;
3658
3659 seq_printf(s, "%d\n", hisi_hba->debugfs_bist_enable);
3660
3661 return 0;
3662 }
3663
hisi_sas_debugfs_bist_enable_open(struct inode * inode,struct file * filp)3664 static int hisi_sas_debugfs_bist_enable_open(struct inode *inode,
3665 struct file *filp)
3666 {
3667 return single_open(filp, hisi_sas_debugfs_bist_enable_show,
3668 inode->i_private);
3669 }
3670
3671 static const struct file_operations hisi_sas_debugfs_bist_enable_ops = {
3672 .open = hisi_sas_debugfs_bist_enable_open,
3673 .read = seq_read,
3674 .write = hisi_sas_debugfs_bist_enable_write,
3675 .llseek = seq_lseek,
3676 .release = single_release,
3677 .owner = THIS_MODULE,
3678 };
3679
hisi_sas_debugfs_work_handler(struct work_struct * work)3680 void hisi_sas_debugfs_work_handler(struct work_struct *work)
3681 {
3682 struct hisi_hba *hisi_hba =
3683 container_of(work, struct hisi_hba, debugfs_work);
3684
3685 if (hisi_hba->debugfs_snapshot)
3686 return;
3687 hisi_hba->debugfs_snapshot = true;
3688
3689 hisi_sas_debugfs_snapshot_regs(hisi_hba);
3690 }
3691 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
3692
hisi_sas_debugfs_release(struct hisi_hba * hisi_hba)3693 static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba)
3694 {
3695 struct device *dev = hisi_hba->dev;
3696 int i;
3697
3698 devm_kfree(dev, hisi_hba->debugfs_iost_cache);
3699 devm_kfree(dev, hisi_hba->debugfs_itct_cache);
3700 devm_kfree(dev, hisi_hba->debugfs_iost);
3701
3702 for (i = 0; i < hisi_hba->queue_count; i++)
3703 devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]);
3704
3705 for (i = 0; i < hisi_hba->queue_count; i++)
3706 devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]);
3707
3708 for (i = 0; i < DEBUGFS_REGS_NUM; i++)
3709 devm_kfree(dev, hisi_hba->debugfs_regs[i]);
3710
3711 for (i = 0; i < hisi_hba->n_phy; i++)
3712 devm_kfree(dev, hisi_hba->debugfs_port_reg[i]);
3713 }
3714
hisi_sas_debugfs_alloc(struct hisi_hba * hisi_hba)3715 static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba)
3716 {
3717 const struct hisi_sas_hw *hw = hisi_hba->hw;
3718 struct device *dev = hisi_hba->dev;
3719 int p, c, d;
3720 size_t sz;
3721
3722 sz = hw->debugfs_reg_array[DEBUGFS_GLOBAL]->count * 4;
3723 hisi_hba->debugfs_regs[DEBUGFS_GLOBAL] =
3724 devm_kmalloc(dev, sz, GFP_KERNEL);
3725
3726 if (!hisi_hba->debugfs_regs[DEBUGFS_GLOBAL])
3727 goto fail;
3728
3729 sz = hw->debugfs_reg_port->count * 4;
3730 for (p = 0; p < hisi_hba->n_phy; p++) {
3731 hisi_hba->debugfs_port_reg[p] =
3732 devm_kmalloc(dev, sz, GFP_KERNEL);
3733
3734 if (!hisi_hba->debugfs_port_reg[p])
3735 goto fail;
3736 }
3737
3738 sz = hw->debugfs_reg_array[DEBUGFS_AXI]->count * 4;
3739 hisi_hba->debugfs_regs[DEBUGFS_AXI] =
3740 devm_kmalloc(dev, sz, GFP_KERNEL);
3741
3742 if (!hisi_hba->debugfs_regs[DEBUGFS_AXI])
3743 goto fail;
3744
3745 sz = hw->debugfs_reg_array[DEBUGFS_RAS]->count * 4;
3746 hisi_hba->debugfs_regs[DEBUGFS_RAS] =
3747 devm_kmalloc(dev, sz, GFP_KERNEL);
3748
3749 if (!hisi_hba->debugfs_regs[DEBUGFS_RAS])
3750 goto fail;
3751
3752 sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
3753 for (c = 0; c < hisi_hba->queue_count; c++) {
3754 hisi_hba->debugfs_complete_hdr[c] =
3755 devm_kmalloc(dev, sz, GFP_KERNEL);
3756
3757 if (!hisi_hba->debugfs_complete_hdr[c])
3758 goto fail;
3759 }
3760
3761 sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
3762 for (d = 0; d < hisi_hba->queue_count; d++) {
3763 hisi_hba->debugfs_cmd_hdr[d] =
3764 devm_kmalloc(dev, sz, GFP_KERNEL);
3765
3766 if (!hisi_hba->debugfs_cmd_hdr[d])
3767 goto fail;
3768 }
3769
3770 sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost);
3771
3772 hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL);
3773 if (!hisi_hba->debugfs_iost)
3774 goto fail;
3775
3776 sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
3777 sizeof(struct hisi_sas_iost_itct_cache);
3778
3779 hisi_hba->debugfs_iost_cache = devm_kmalloc(dev, sz, GFP_KERNEL);
3780 if (!hisi_hba->debugfs_iost_cache)
3781 goto fail;
3782
3783 sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
3784 sizeof(struct hisi_sas_iost_itct_cache);
3785
3786 hisi_hba->debugfs_itct_cache = devm_kmalloc(dev, sz, GFP_KERNEL);
3787 if (!hisi_hba->debugfs_itct_cache)
3788 goto fail;
3789
3790 /* New memory allocation must be locate before itct */
3791 sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
3792
3793 hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL);
3794 if (!hisi_hba->debugfs_itct)
3795 goto fail;
3796
3797 return 0;
3798 fail:
3799 hisi_sas_debugfs_release(hisi_hba);
3800 return -ENOMEM;
3801 }
3802
hisi_sas_debugfs_bist_init(struct hisi_hba * hisi_hba)3803 static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
3804 {
3805 hisi_hba->debugfs_bist_dentry =
3806 debugfs_create_dir("bist", hisi_hba->debugfs_dir);
3807 debugfs_create_file("link_rate", 0600,
3808 hisi_hba->debugfs_bist_dentry, hisi_hba,
3809 &hisi_sas_debugfs_bist_linkrate_ops);
3810
3811 debugfs_create_file("code_mode", 0600,
3812 hisi_hba->debugfs_bist_dentry, hisi_hba,
3813 &hisi_sas_debugfs_bist_code_mode_ops);
3814
3815 debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry,
3816 hisi_hba, &hisi_sas_debugfs_bist_phy_ops);
3817
3818 debugfs_create_u32("cnt", 0600, hisi_hba->debugfs_bist_dentry,
3819 &hisi_hba->debugfs_bist_cnt);
3820
3821 debugfs_create_file("loopback_mode", 0600,
3822 hisi_hba->debugfs_bist_dentry,
3823 hisi_hba, &hisi_sas_debugfs_bist_mode_ops);
3824
3825 debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry,
3826 hisi_hba, &hisi_sas_debugfs_bist_enable_ops);
3827
3828 hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
3829 }
3830
hisi_sas_debugfs_init(struct hisi_hba * hisi_hba)3831 void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
3832 {
3833 struct device *dev = hisi_hba->dev;
3834
3835 hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
3836 hisi_sas_debugfs_dir);
3837 debugfs_create_file("trigger_dump", 0600,
3838 hisi_hba->debugfs_dir,
3839 hisi_hba,
3840 &hisi_sas_debugfs_trigger_dump_fops);
3841
3842 /* create bist structures */
3843 hisi_sas_debugfs_bist_init(hisi_hba);
3844
3845 if (hisi_sas_debugfs_alloc(hisi_hba)) {
3846 debugfs_remove_recursive(hisi_hba->debugfs_dir);
3847 dev_dbg(dev, "failed to init debugfs!\n");
3848 }
3849 }
3850 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init);
3851
hisi_sas_debugfs_exit(struct hisi_hba * hisi_hba)3852 void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba)
3853 {
3854 debugfs_remove_recursive(hisi_hba->debugfs_dir);
3855 }
3856 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_exit);
3857
hisi_sas_remove(struct platform_device * pdev)3858 int hisi_sas_remove(struct platform_device *pdev)
3859 {
3860 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
3861 struct hisi_hba *hisi_hba = sha->lldd_ha;
3862 struct Scsi_Host *shost = sha->core.shost;
3863
3864 if (timer_pending(&hisi_hba->timer))
3865 del_timer(&hisi_hba->timer);
3866
3867 sas_unregister_ha(sha);
3868 sas_remove_host(sha->core.shost);
3869
3870 hisi_sas_free(hisi_hba);
3871 scsi_host_put(shost);
3872 return 0;
3873 }
3874 EXPORT_SYMBOL_GPL(hisi_sas_remove);
3875
3876 bool hisi_sas_debugfs_enable;
3877 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
3878 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
3879 MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)");
3880
hisi_sas_init(void)3881 static __init int hisi_sas_init(void)
3882 {
3883 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
3884 if (!hisi_sas_stt)
3885 return -ENOMEM;
3886
3887 if (hisi_sas_debugfs_enable)
3888 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
3889
3890 return 0;
3891 }
3892
hisi_sas_exit(void)3893 static __exit void hisi_sas_exit(void)
3894 {
3895 sas_release_transport(hisi_sas_stt);
3896
3897 debugfs_remove(hisi_sas_debugfs_dir);
3898 }
3899
3900 module_init(hisi_sas_init);
3901 module_exit(hisi_sas_exit);
3902
3903 MODULE_LICENSE("GPL");
3904 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
3905 MODULE_DESCRIPTION("HISILICON SAS controller driver");
3906 MODULE_ALIAS("platform:" DRV_NAME);
3907