• Home
  • Raw
  • Download

Lines Matching refs:ctrl

215 		q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth);  in dma_intr_coal_auto_tune()
224 static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, in rsxx_complete_dma() argument
229 ctrl->stats.dma_sw_err++; in rsxx_complete_dma()
231 ctrl->stats.dma_hw_fault++; in rsxx_complete_dma()
233 ctrl->stats.dma_cancelled++; in rsxx_complete_dma()
236 pci_unmap_page(ctrl->card->dev, dma->dma_addr, in rsxx_complete_dma()
243 dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); in rsxx_complete_dma()
248 static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, in rsxx_requeue_dma() argument
255 spin_lock(&ctrl->queue_lock); in rsxx_requeue_dma()
256 list_add(&dma->list, &ctrl->queue); in rsxx_requeue_dma()
257 spin_unlock(&ctrl->queue_lock); in rsxx_requeue_dma()
260 static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, in rsxx_handle_dma_error() argument
267 dev_dbg(CARD_TO_DEV(ctrl->card), in rsxx_handle_dma_error()
272 ctrl->stats.crc_errors++; in rsxx_handle_dma_error()
274 ctrl->stats.hard_errors++; in rsxx_handle_dma_error()
276 ctrl->stats.soft_errors++; in rsxx_handle_dma_error()
281 if (ctrl->card->scrub_hard) { in rsxx_handle_dma_error()
284 ctrl->stats.reads_retried++; in rsxx_handle_dma_error()
287 ctrl->stats.reads_failed++; in rsxx_handle_dma_error()
291 ctrl->stats.reads_failed++; in rsxx_handle_dma_error()
299 ctrl->stats.reads_failed++; in rsxx_handle_dma_error()
305 ctrl->stats.writes_failed++; in rsxx_handle_dma_error()
310 ctrl->stats.discards_failed++; in rsxx_handle_dma_error()
314 dev_err(CARD_TO_DEV(ctrl->card), in rsxx_handle_dma_error()
324 rsxx_requeue_dma(ctrl, dma); in rsxx_handle_dma_error()
326 rsxx_complete_dma(ctrl, dma, status); in rsxx_handle_dma_error()
331 struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; in dma_engine_stalled() local
333 if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || in dma_engine_stalled()
334 unlikely(ctrl->card->eeh_state)) in dma_engine_stalled()
337 if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { in dma_engine_stalled()
342 dev_warn(CARD_TO_DEV(ctrl->card), in dma_engine_stalled()
344 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); in dma_engine_stalled()
345 mod_timer(&ctrl->activity_timer, in dma_engine_stalled()
348 dev_warn(CARD_TO_DEV(ctrl->card), in dma_engine_stalled()
350 ctrl->id); in dma_engine_stalled()
351 ctrl->card->dma_fault = 1; in dma_engine_stalled()
357 struct rsxx_dma_ctrl *ctrl; in rsxx_issue_dmas() local
363 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); in rsxx_issue_dmas()
364 hw_cmd_buf = ctrl->cmd.buf; in rsxx_issue_dmas()
366 if (unlikely(ctrl->card->halt) || in rsxx_issue_dmas()
367 unlikely(ctrl->card->eeh_state)) in rsxx_issue_dmas()
371 spin_lock(&ctrl->queue_lock); in rsxx_issue_dmas()
372 if (list_empty(&ctrl->queue)) { in rsxx_issue_dmas()
373 spin_unlock(&ctrl->queue_lock); in rsxx_issue_dmas()
376 spin_unlock(&ctrl->queue_lock); in rsxx_issue_dmas()
378 tag = pop_tracker(ctrl->trackers); in rsxx_issue_dmas()
382 spin_lock(&ctrl->queue_lock); in rsxx_issue_dmas()
383 dma = list_entry(ctrl->queue.next, struct rsxx_dma, list); in rsxx_issue_dmas()
385 ctrl->stats.sw_q_depth--; in rsxx_issue_dmas()
386 spin_unlock(&ctrl->queue_lock); in rsxx_issue_dmas()
393 if (unlikely(ctrl->card->dma_fault)) { in rsxx_issue_dmas()
394 push_tracker(ctrl->trackers, tag); in rsxx_issue_dmas()
395 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); in rsxx_issue_dmas()
399 set_tracker_dma(ctrl->trackers, tag, dma); in rsxx_issue_dmas()
400 hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd; in rsxx_issue_dmas()
401 hw_cmd_buf[ctrl->cmd.idx].tag = tag; in rsxx_issue_dmas()
402 hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0; in rsxx_issue_dmas()
403 hw_cmd_buf[ctrl->cmd.idx].sub_page = in rsxx_issue_dmas()
407 hw_cmd_buf[ctrl->cmd.idx].device_addr = in rsxx_issue_dmas()
410 hw_cmd_buf[ctrl->cmd.idx].host_addr = in rsxx_issue_dmas()
413 dev_dbg(CARD_TO_DEV(ctrl->card), in rsxx_issue_dmas()
415 ctrl->id, dma->laddr, tag, ctrl->cmd.idx); in rsxx_issue_dmas()
417 ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK; in rsxx_issue_dmas()
421 ctrl->stats.writes_issued++; in rsxx_issue_dmas()
423 ctrl->stats.discards_issued++; in rsxx_issue_dmas()
425 ctrl->stats.reads_issued++; in rsxx_issue_dmas()
430 atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); in rsxx_issue_dmas()
431 mod_timer(&ctrl->activity_timer, in rsxx_issue_dmas()
434 if (unlikely(ctrl->card->eeh_state)) { in rsxx_issue_dmas()
435 del_timer_sync(&ctrl->activity_timer); in rsxx_issue_dmas()
439 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); in rsxx_issue_dmas()
445 struct rsxx_dma_ctrl *ctrl; in rsxx_dma_done() local
453 ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work); in rsxx_dma_done()
454 hw_st_buf = ctrl->status.buf; in rsxx_dma_done()
456 if (unlikely(ctrl->card->halt) || in rsxx_dma_done()
457 unlikely(ctrl->card->dma_fault) || in rsxx_dma_done()
458 unlikely(ctrl->card->eeh_state)) in rsxx_dma_done()
461 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); in rsxx_dma_done()
463 while (count == ctrl->e_cnt) { in rsxx_dma_done()
473 status = hw_st_buf[ctrl->status.idx].status; in rsxx_dma_done()
474 tag = hw_st_buf[ctrl->status.idx].tag; in rsxx_dma_done()
476 dma = get_tracker_dma(ctrl->trackers, tag); in rsxx_dma_done()
478 spin_lock_irqsave(&ctrl->card->irq_lock, flags); in rsxx_dma_done()
479 rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL); in rsxx_dma_done()
480 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); in rsxx_dma_done()
482 dev_err(CARD_TO_DEV(ctrl->card), in rsxx_dma_done()
485 tag, ctrl->status.idx, ctrl->id); in rsxx_dma_done()
489 dev_dbg(CARD_TO_DEV(ctrl->card), in rsxx_dma_done()
492 ctrl->id, dma->laddr, tag, status, count, in rsxx_dma_done()
493 ctrl->status.idx); in rsxx_dma_done()
495 atomic_dec(&ctrl->stats.hw_q_depth); in rsxx_dma_done()
497 mod_timer(&ctrl->activity_timer, in rsxx_dma_done()
501 rsxx_handle_dma_error(ctrl, dma, status); in rsxx_dma_done()
503 rsxx_complete_dma(ctrl, dma, 0); in rsxx_dma_done()
505 push_tracker(ctrl->trackers, tag); in rsxx_dma_done()
507 ctrl->status.idx = (ctrl->status.idx + 1) & in rsxx_dma_done()
509 ctrl->e_cnt++; in rsxx_dma_done()
511 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); in rsxx_dma_done()
514 dma_intr_coal_auto_tune(ctrl->card); in rsxx_dma_done()
516 if (atomic_read(&ctrl->stats.hw_q_depth) == 0) in rsxx_dma_done()
517 del_timer_sync(&ctrl->activity_timer); in rsxx_dma_done()
519 spin_lock_irqsave(&ctrl->card->irq_lock, flags); in rsxx_dma_done()
520 rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id)); in rsxx_dma_done()
521 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); in rsxx_dma_done()
523 spin_lock(&ctrl->queue_lock); in rsxx_dma_done()
524 if (ctrl->stats.sw_q_depth) in rsxx_dma_done()
525 queue_work(ctrl->issue_wq, &ctrl->issue_dma_work); in rsxx_dma_done()
526 spin_unlock(&ctrl->queue_lock); in rsxx_dma_done()
701 spin_lock(&card->ctrl[i].queue_lock); in rsxx_dma_queue_bio()
702 card->ctrl[i].stats.sw_q_depth += dma_cnt[i]; in rsxx_dma_queue_bio()
703 list_splice_tail(&dma_list[i], &card->ctrl[i].queue); in rsxx_dma_queue_bio()
704 spin_unlock(&card->ctrl[i].queue_lock); in rsxx_dma_queue_bio()
706 queue_work(card->ctrl[i].issue_wq, in rsxx_dma_queue_bio()
707 &card->ctrl[i].issue_dma_work); in rsxx_dma_queue_bio()
722 int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl) in rsxx_hw_buffers_init() argument
724 ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8, in rsxx_hw_buffers_init()
725 &ctrl->status.dma_addr); in rsxx_hw_buffers_init()
726 ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8, in rsxx_hw_buffers_init()
727 &ctrl->cmd.dma_addr); in rsxx_hw_buffers_init()
728 if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) in rsxx_hw_buffers_init()
731 memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); in rsxx_hw_buffers_init()
732 iowrite32(lower_32_bits(ctrl->status.dma_addr), in rsxx_hw_buffers_init()
733 ctrl->regmap + SB_ADD_LO); in rsxx_hw_buffers_init()
734 iowrite32(upper_32_bits(ctrl->status.dma_addr), in rsxx_hw_buffers_init()
735 ctrl->regmap + SB_ADD_HI); in rsxx_hw_buffers_init()
737 memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); in rsxx_hw_buffers_init()
738 iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); in rsxx_hw_buffers_init()
739 iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); in rsxx_hw_buffers_init()
741 ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); in rsxx_hw_buffers_init()
742 if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { in rsxx_hw_buffers_init()
744 ctrl->status.idx); in rsxx_hw_buffers_init()
747 iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); in rsxx_hw_buffers_init()
748 iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); in rsxx_hw_buffers_init()
750 ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); in rsxx_hw_buffers_init()
751 if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { in rsxx_hw_buffers_init()
753 ctrl->status.idx); in rsxx_hw_buffers_init()
756 iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); in rsxx_hw_buffers_init()
757 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); in rsxx_hw_buffers_init()
763 struct rsxx_dma_ctrl *ctrl) in rsxx_dma_ctrl_init() argument
768 memset(&ctrl->stats, 0, sizeof(ctrl->stats)); in rsxx_dma_ctrl_init()
770 ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); in rsxx_dma_ctrl_init()
771 if (!ctrl->trackers) in rsxx_dma_ctrl_init()
774 ctrl->trackers->head = 0; in rsxx_dma_ctrl_init()
776 ctrl->trackers->list[i].next_tag = i + 1; in rsxx_dma_ctrl_init()
777 ctrl->trackers->list[i].dma = NULL; in rsxx_dma_ctrl_init()
779 ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1; in rsxx_dma_ctrl_init()
780 spin_lock_init(&ctrl->trackers->lock); in rsxx_dma_ctrl_init()
782 spin_lock_init(&ctrl->queue_lock); in rsxx_dma_ctrl_init()
783 INIT_LIST_HEAD(&ctrl->queue); in rsxx_dma_ctrl_init()
785 setup_timer(&ctrl->activity_timer, dma_engine_stalled, in rsxx_dma_ctrl_init()
786 (unsigned long)ctrl); in rsxx_dma_ctrl_init()
788 ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0); in rsxx_dma_ctrl_init()
789 if (!ctrl->issue_wq) in rsxx_dma_ctrl_init()
792 ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0); in rsxx_dma_ctrl_init()
793 if (!ctrl->done_wq) in rsxx_dma_ctrl_init()
796 INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); in rsxx_dma_ctrl_init()
797 INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); in rsxx_dma_ctrl_init()
799 st = rsxx_hw_buffers_init(dev, ctrl); in rsxx_dma_ctrl_init()
861 card->ctrl[i].regmap = card->regmap + (i * 4096); in rsxx_dma_setup()
870 st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]); in rsxx_dma_setup()
874 card->ctrl[i].card = card; in rsxx_dma_setup()
875 card->ctrl[i].id = i; in rsxx_dma_setup()
894 struct rsxx_dma_ctrl *ctrl = &card->ctrl[i]; in rsxx_dma_setup() local
896 if (ctrl->issue_wq) { in rsxx_dma_setup()
897 destroy_workqueue(ctrl->issue_wq); in rsxx_dma_setup()
898 ctrl->issue_wq = NULL; in rsxx_dma_setup()
901 if (ctrl->done_wq) { in rsxx_dma_setup()
902 destroy_workqueue(ctrl->done_wq); in rsxx_dma_setup()
903 ctrl->done_wq = NULL; in rsxx_dma_setup()
906 if (ctrl->trackers) in rsxx_dma_setup()
907 vfree(ctrl->trackers); in rsxx_dma_setup()
909 if (ctrl->status.buf) in rsxx_dma_setup()
911 ctrl->status.buf, in rsxx_dma_setup()
912 ctrl->status.dma_addr); in rsxx_dma_setup()
913 if (ctrl->cmd.buf) in rsxx_dma_setup()
915 ctrl->cmd.buf, ctrl->cmd.dma_addr); in rsxx_dma_setup()
924 struct rsxx_dma_ctrl *ctrl; in rsxx_dma_destroy() local
930 ctrl = &card->ctrl[i]; in rsxx_dma_destroy()
932 if (ctrl->issue_wq) { in rsxx_dma_destroy()
933 destroy_workqueue(ctrl->issue_wq); in rsxx_dma_destroy()
934 ctrl->issue_wq = NULL; in rsxx_dma_destroy()
937 if (ctrl->done_wq) { in rsxx_dma_destroy()
938 destroy_workqueue(ctrl->done_wq); in rsxx_dma_destroy()
939 ctrl->done_wq = NULL; in rsxx_dma_destroy()
942 if (timer_pending(&ctrl->activity_timer)) in rsxx_dma_destroy()
943 del_timer_sync(&ctrl->activity_timer); in rsxx_dma_destroy()
946 spin_lock(&ctrl->queue_lock); in rsxx_dma_destroy()
947 cnt = rsxx_cleanup_dma_queue(card, &ctrl->queue); in rsxx_dma_destroy()
948 spin_unlock(&ctrl->queue_lock); in rsxx_dma_destroy()
957 dma = get_tracker_dma(ctrl->trackers, j); in rsxx_dma_destroy()
974 vfree(ctrl->trackers); in rsxx_dma_destroy()
977 ctrl->status.buf, ctrl->status.dma_addr); in rsxx_dma_destroy()
979 ctrl->cmd.buf, ctrl->cmd.dma_addr); in rsxx_dma_destroy()
1000 dma = get_tracker_dma(card->ctrl[i].trackers, j); in rsxx_eeh_save_issued_dmas()
1005 card->ctrl[i].stats.writes_issued--; in rsxx_eeh_save_issued_dmas()
1007 card->ctrl[i].stats.discards_issued--; in rsxx_eeh_save_issued_dmas()
1009 card->ctrl[i].stats.reads_issued--; in rsxx_eeh_save_issued_dmas()
1012 push_tracker(card->ctrl[i].trackers, j); in rsxx_eeh_save_issued_dmas()
1016 spin_lock(&card->ctrl[i].queue_lock); in rsxx_eeh_save_issued_dmas()
1017 list_splice(&issued_dmas[i], &card->ctrl[i].queue); in rsxx_eeh_save_issued_dmas()
1019 atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); in rsxx_eeh_save_issued_dmas()
1020 card->ctrl[i].stats.sw_q_depth += cnt; in rsxx_eeh_save_issued_dmas()
1021 card->ctrl[i].e_cnt = 0; in rsxx_eeh_save_issued_dmas()
1023 list_for_each_entry(dma, &card->ctrl[i].queue, list) { in rsxx_eeh_save_issued_dmas()
1031 spin_unlock(&card->ctrl[i].queue_lock); in rsxx_eeh_save_issued_dmas()
1046 spin_lock(&card->ctrl[i].queue_lock); in rsxx_eeh_cancel_dmas()
1047 list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) { in rsxx_eeh_cancel_dmas()
1050 rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED); in rsxx_eeh_cancel_dmas()
1052 spin_unlock(&card->ctrl[i].queue_lock); in rsxx_eeh_cancel_dmas()
1062 spin_lock(&card->ctrl[i].queue_lock); in rsxx_eeh_remap_dmas()
1063 list_for_each_entry(dma, &card->ctrl[i].queue, list) { in rsxx_eeh_remap_dmas()
1070 spin_unlock(&card->ctrl[i].queue_lock); in rsxx_eeh_remap_dmas()
1075 spin_unlock(&card->ctrl[i].queue_lock); in rsxx_eeh_remap_dmas()