Lines Matching refs:udev
106 struct tcmu_dev *udev; member
305 struct tcmu_dev *udev = nl_cmd->udev; in tcmu_fail_netlink_cmd() local
313 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name); in tcmu_fail_netlink_cmd()
374 struct tcmu_dev *udev = NULL; in tcmu_genl_cmd_done() local
389 if (nl_cmd->udev->se_dev.dev_index == dev_id) { in tcmu_genl_cmd_done()
390 udev = nl_cmd->udev; in tcmu_genl_cmd_done()
395 if (!udev) { in tcmu_genl_cmd_done()
404 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc, in tcmu_genl_cmd_done()
409 udev->name, completed_cmd, nl_cmd->cmd); in tcmu_genl_cmd_done()
498 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; in tcmu_cmd_free_data() local
502 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); in tcmu_cmd_free_data()
505 static inline int tcmu_get_empty_block(struct tcmu_dev *udev, in tcmu_get_empty_block() argument
509 XA_STATE(xas, &udev->data_pages, 0); in tcmu_get_empty_block()
514 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); in tcmu_get_empty_block()
515 if (dbi == udev->dbi_thresh) in tcmu_get_empty_block()
518 dpi = dbi * udev->data_pages_per_blk; in tcmu_get_empty_block()
532 if (xa_store(&udev->data_pages, dpi + i, page, GFP_NOIO)) { in tcmu_get_empty_block()
541 if (i && dbi > udev->dbi_max) in tcmu_get_empty_block()
542 udev->dbi_max = dbi; in tcmu_get_empty_block()
544 set_bit(dbi, udev->data_bitmap); in tcmu_get_empty_block()
553 static int tcmu_get_empty_blocks(struct tcmu_dev *udev, in tcmu_get_empty_blocks() argument
559 uint32_t blk_size = udev->data_blk_size; in tcmu_get_empty_blocks()
563 dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len, in tcmu_get_empty_blocks()
595 static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd, in new_block_to_iov() argument
602 len = min_t(int, len, udev->data_blk_size); in new_block_to_iov()
614 (udev->data_off + dbi * udev->data_blk_size); in new_block_to_iov()
621 static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd, in tcmu_setup_iovs() argument
628 for (; data_length > 0; data_length -= udev->data_blk_size) in tcmu_setup_iovs()
629 dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length); in tcmu_setup_iovs()
635 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_alloc_cmd() local
644 tcmu_cmd->tcmu_dev = udev; in tcmu_alloc_cmd()
701 static inline void tcmu_copy_data(struct tcmu_dev *udev, in tcmu_copy_data() argument
723 dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi, in tcmu_copy_data()
729 if (page_cnt > udev->data_pages_per_blk) in tcmu_copy_data()
730 page_cnt = udev->data_pages_per_blk; in tcmu_copy_data()
732 dpi = dbi * udev->data_pages_per_blk; in tcmu_copy_data()
735 page = xa_load(&udev->data_pages, dpi); in tcmu_copy_data()
773 static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, in scatter_data_area() argument
778 tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg, in scatter_data_area()
782 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, in gather_data_area() argument
805 tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg, in gather_data_area()
819 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size) in is_ring_space_avail() argument
821 struct tcmu_mailbox *mb = udev->mb_addr; in is_ring_space_avail()
827 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ in is_ring_space_avail()
833 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) in is_ring_space_avail()
836 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); in is_ring_space_avail()
838 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); in is_ring_space_avail()
841 udev->cmdr_last_cleaned, udev->cmdr_size); in is_ring_space_avail()
853 static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd, in tcmu_alloc_data_space() argument
862 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); in tcmu_alloc_data_space()
865 (udev->max_blocks - udev->dbi_thresh) + space; in tcmu_alloc_data_space()
869 blocks_left * udev->data_blk_size, in tcmu_alloc_data_space()
870 cmd->dbi_cnt * udev->data_blk_size); in tcmu_alloc_data_space()
874 udev->dbi_thresh += cmd->dbi_cnt; in tcmu_alloc_data_space()
875 if (udev->dbi_thresh > udev->max_blocks) in tcmu_alloc_data_space()
876 udev->dbi_thresh = udev->max_blocks; in tcmu_alloc_data_space()
879 iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length); in tcmu_alloc_data_space()
884 ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi); in tcmu_alloc_data_space()
930 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; in add_to_qfull_queue() local
937 if (!udev->qfull_time_out) in add_to_qfull_queue()
939 else if (udev->qfull_time_out > 0) in add_to_qfull_queue()
940 tmo = udev->qfull_time_out; in add_to_qfull_queue()
941 else if (udev->cmd_time_out) in add_to_qfull_queue()
942 tmo = udev->cmd_time_out; in add_to_qfull_queue()
946 tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); in add_to_qfull_queue()
948 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); in add_to_qfull_queue()
950 tcmu_cmd, udev->name); in add_to_qfull_queue()
954 static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size) in ring_insert_padding() argument
957 struct tcmu_mailbox *mb = udev->mb_addr; in ring_insert_padding()
958 uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ in ring_insert_padding()
961 if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) { in ring_insert_padding()
962 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); in ring_insert_padding()
964 hdr = udev->cmdr + cmd_head; in ring_insert_padding()
972 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); in ring_insert_padding()
975 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ in ring_insert_padding()
985 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_unplug_device() local
987 clear_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags); in tcmu_unplug_device()
988 uio_event_notify(&udev->uio_info); in tcmu_unplug_device()
993 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_plug_device() local
995 if (!test_and_set_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags)) in tcmu_plug_device()
996 return &udev->se_plug; in tcmu_plug_device()
1013 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; in queue_cmd_ring() local
1016 struct tcmu_mailbox *mb = udev->mb_addr; in queue_cmd_ring()
1022 uint32_t blk_size = udev->data_blk_size; in queue_cmd_ring()
1028 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) { in queue_cmd_ring()
1033 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { in queue_cmd_ring()
1038 if (!list_empty(&udev->qfull_queue)) in queue_cmd_ring()
1041 if (data_length > (size_t)udev->max_blocks * blk_size) { in queue_cmd_ring()
1043 data_length, (size_t)udev->max_blocks * blk_size); in queue_cmd_ring()
1048 iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt); in queue_cmd_ring()
1059 if (command_size > (udev->cmdr_size / 2)) { in queue_cmd_ring()
1061 command_size, udev->cmdr_size); in queue_cmd_ring()
1067 if (!is_ring_space_avail(udev, command_size)) in queue_cmd_ring()
1074 if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff), in queue_cmd_ring()
1085 tcmu_cmd, udev->name); in queue_cmd_ring()
1087 cmd_head = ring_insert_padding(udev, command_size); in queue_cmd_ring()
1089 entry = udev->cmdr + cmd_head; in queue_cmd_ring()
1099 scatter_data_area(udev, tcmu_cmd, &iov); in queue_cmd_ring()
1101 tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length); in queue_cmd_ring()
1108 tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi); in queue_cmd_ring()
1112 tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); in queue_cmd_ring()
1124 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); in queue_cmd_ring()
1127 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); in queue_cmd_ring()
1129 if (!test_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags)) in queue_cmd_ring()
1130 uio_event_notify(&udev->uio_info); in queue_cmd_ring()
1157 queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr) in queue_tmr_ring() argument
1162 struct tcmu_mailbox *mb = udev->mb_addr; in queue_tmr_ring()
1165 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) in queue_tmr_ring()
1171 if (!list_empty(&udev->tmr_queue) || in queue_tmr_ring()
1172 !is_ring_space_avail(udev, cmd_size)) { in queue_tmr_ring()
1173 list_add_tail(&tmr->queue_entry, &udev->tmr_queue); in queue_tmr_ring()
1175 tmr, udev->name); in queue_tmr_ring()
1179 cmd_head = ring_insert_padding(udev, cmd_size); in queue_tmr_ring()
1181 entry = udev->cmdr + cmd_head; in queue_tmr_ring()
1190 UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size); in queue_tmr_ring()
1193 uio_event_notify(&udev->uio_info); in queue_tmr_ring()
1205 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_queue_cmd() local
1214 mutex_lock(&udev->cmdr_lock); in tcmu_queue_cmd()
1221 mutex_unlock(&udev->cmdr_lock); in tcmu_queue_cmd()
1263 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_tmr_notify() local
1265 mutex_lock(&udev->cmdr_lock); in tcmu_tmr_notify()
1279 cmd, udev->name); in tcmu_tmr_notify()
1288 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); in tcmu_tmr_notify()
1290 if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)) in tcmu_tmr_notify()
1294 tcmu_tmr_type(tmf), udev->name, i, cmd_cnt); in tcmu_tmr_notify()
1314 queue_tmr_ring(udev, tmr); in tcmu_tmr_notify()
1317 mutex_unlock(&udev->cmdr_lock); in tcmu_tmr_notify()
1324 struct tcmu_dev *udev = cmd->tcmu_dev; in tcmu_handle_completion() local
1340 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); in tcmu_handle_completion()
1373 gather_data_area(udev, cmd, true, read_len); in tcmu_handle_completion()
1375 gather_data_area(udev, cmd, false, read_len); in tcmu_handle_completion()
1411 static int tcmu_run_tmr_queue(struct tcmu_dev *udev) in tcmu_run_tmr_queue() argument
1416 if (list_empty(&udev->tmr_queue)) in tcmu_run_tmr_queue()
1419 pr_debug("running %s's tmr queue\n", udev->name); in tcmu_run_tmr_queue()
1421 list_splice_init(&udev->tmr_queue, &tmrs); in tcmu_run_tmr_queue()
1427 tmr, udev->name); in tcmu_run_tmr_queue()
1429 if (queue_tmr_ring(udev, tmr)) { in tcmu_run_tmr_queue()
1435 list_splice_tail(&tmrs, &udev->tmr_queue); in tcmu_run_tmr_queue()
1443 static bool tcmu_handle_completions(struct tcmu_dev *udev) in tcmu_handle_completions() argument
1449 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { in tcmu_handle_completions()
1454 mb = udev->mb_addr; in tcmu_handle_completions()
1457 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { in tcmu_handle_completions()
1459 struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned; in tcmu_handle_completions()
1466 size_t ring_left = head_to_end(udev->cmdr_last_cleaned, in tcmu_handle_completions()
1467 udev->cmdr_size); in tcmu_handle_completions()
1475 UPDATE_HEAD(udev->cmdr_last_cleaned, in tcmu_handle_completions()
1477 udev->cmdr_size); in tcmu_handle_completions()
1484 cmd = xa_load(&udev->commands, entry->hdr.cmd_id); in tcmu_handle_completions()
1486 cmd = xa_erase(&udev->commands, entry->hdr.cmd_id); in tcmu_handle_completions()
1490 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); in tcmu_handle_completions()
1497 UPDATE_HEAD(udev->cmdr_last_cleaned, in tcmu_handle_completions()
1499 udev->cmdr_size); in tcmu_handle_completions()
1502 free_space = tcmu_run_tmr_queue(udev); in tcmu_handle_completions()
1505 xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) { in tcmu_handle_completions()
1512 if (udev->cmd_time_out) in tcmu_handle_completions()
1513 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); in tcmu_handle_completions()
1555 static void tcmu_device_timedout(struct tcmu_dev *udev) in tcmu_device_timedout() argument
1558 if (list_empty(&udev->timedout_entry)) in tcmu_device_timedout()
1559 list_add_tail(&udev->timedout_entry, &timed_out_udevs); in tcmu_device_timedout()
1567 struct tcmu_dev *udev = from_timer(udev, t, cmd_timer); in tcmu_cmd_timedout() local
1569 pr_debug("%s cmd timeout has expired\n", udev->name); in tcmu_cmd_timedout()
1570 tcmu_device_timedout(udev); in tcmu_cmd_timedout()
1575 struct tcmu_dev *udev = from_timer(udev, t, qfull_timer); in tcmu_qfull_timedout() local
1577 pr_debug("%s qfull timeout has expired\n", udev->name); in tcmu_qfull_timedout()
1578 tcmu_device_timedout(udev); in tcmu_qfull_timedout()
1603 struct tcmu_dev *udev; in tcmu_alloc_device() local
1605 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); in tcmu_alloc_device()
1606 if (!udev) in tcmu_alloc_device()
1608 kref_init(&udev->kref); in tcmu_alloc_device()
1610 udev->name = kstrdup(name, GFP_KERNEL); in tcmu_alloc_device()
1611 if (!udev->name) { in tcmu_alloc_device()
1612 kfree(udev); in tcmu_alloc_device()
1616 udev->hba = hba; in tcmu_alloc_device()
1617 udev->cmd_time_out = TCMU_TIME_OUT; in tcmu_alloc_device()
1618 udev->qfull_time_out = -1; in tcmu_alloc_device()
1620 udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF; in tcmu_alloc_device()
1621 udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk; in tcmu_alloc_device()
1622 udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF); in tcmu_alloc_device()
1624 mutex_init(&udev->cmdr_lock); in tcmu_alloc_device()
1626 INIT_LIST_HEAD(&udev->node); in tcmu_alloc_device()
1627 INIT_LIST_HEAD(&udev->timedout_entry); in tcmu_alloc_device()
1628 INIT_LIST_HEAD(&udev->qfull_queue); in tcmu_alloc_device()
1629 INIT_LIST_HEAD(&udev->tmr_queue); in tcmu_alloc_device()
1630 INIT_LIST_HEAD(&udev->inflight_queue); in tcmu_alloc_device()
1631 xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1); in tcmu_alloc_device()
1633 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); in tcmu_alloc_device()
1634 timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); in tcmu_alloc_device()
1636 xa_init(&udev->data_pages); in tcmu_alloc_device()
1638 return &udev->se_dev; in tcmu_alloc_device()
1644 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_dev_call_rcu() local
1646 kfree(udev->uio_info.name); in tcmu_dev_call_rcu()
1647 kfree(udev->name); in tcmu_dev_call_rcu()
1648 kfree(udev); in tcmu_dev_call_rcu()
1661 static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first, in tcmu_blocks_release() argument
1668 first = first * udev->data_pages_per_blk; in tcmu_blocks_release()
1669 last = (last + 1) * udev->data_pages_per_blk - 1; in tcmu_blocks_release()
1670 xa_for_each_range(&udev->data_pages, dpi, page, first, last) { in tcmu_blocks_release()
1671 xa_erase(&udev->data_pages, dpi); in tcmu_blocks_release()
1701 static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev) in tcmu_remove_all_queued_tmr() argument
1705 list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) { in tcmu_remove_all_queued_tmr()
1713 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); in tcmu_dev_kref_release() local
1714 struct se_device *dev = &udev->se_dev; in tcmu_dev_kref_release()
1719 vfree(udev->mb_addr); in tcmu_dev_kref_release()
1720 udev->mb_addr = NULL; in tcmu_dev_kref_release()
1723 if (!list_empty(&udev->timedout_entry)) in tcmu_dev_kref_release()
1724 list_del(&udev->timedout_entry); in tcmu_dev_kref_release()
1728 mutex_lock(&udev->cmdr_lock); in tcmu_dev_kref_release()
1729 xa_for_each(&udev->commands, i, cmd) { in tcmu_dev_kref_release()
1734 tcmu_remove_all_queued_tmr(udev); in tcmu_dev_kref_release()
1735 if (!list_empty(&udev->qfull_queue)) in tcmu_dev_kref_release()
1737 xa_destroy(&udev->commands); in tcmu_dev_kref_release()
1740 tcmu_blocks_release(udev, 0, udev->dbi_max); in tcmu_dev_kref_release()
1741 bitmap_free(udev->data_bitmap); in tcmu_dev_kref_release()
1742 mutex_unlock(&udev->cmdr_lock); in tcmu_dev_kref_release()
1749 static void run_qfull_queue(struct tcmu_dev *udev, bool fail) in run_qfull_queue() argument
1756 if (list_empty(&udev->qfull_queue)) in run_qfull_queue()
1759 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); in run_qfull_queue()
1761 list_splice_init(&udev->qfull_queue, &cmds); in run_qfull_queue()
1767 tcmu_cmd, udev->name); in run_qfull_queue()
1786 tcmu_cmd, udev->name, scsi_ret); in run_qfull_queue()
1801 list_splice_tail(&cmds, &udev->qfull_queue); in run_qfull_queue()
1806 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); in run_qfull_queue()
1811 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); in tcmu_irqcontrol() local
1813 mutex_lock(&udev->cmdr_lock); in tcmu_irqcontrol()
1814 if (tcmu_handle_completions(udev)) in tcmu_irqcontrol()
1815 run_qfull_queue(udev, false); in tcmu_irqcontrol()
1816 mutex_unlock(&udev->cmdr_lock); in tcmu_irqcontrol()
1827 struct tcmu_dev *udev = vma->vm_private_data; in tcmu_find_mem_index() local
1828 struct uio_info *info = &udev->uio_info; in tcmu_find_mem_index()
1838 static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi) in tcmu_try_get_data_page() argument
1842 mutex_lock(&udev->cmdr_lock); in tcmu_try_get_data_page()
1843 page = xa_load(&udev->data_pages, dpi); in tcmu_try_get_data_page()
1847 mutex_unlock(&udev->cmdr_lock); in tcmu_try_get_data_page()
1856 dpi, udev->name); in tcmu_try_get_data_page()
1857 mutex_unlock(&udev->cmdr_lock); in tcmu_try_get_data_page()
1864 struct tcmu_dev *udev = vma->vm_private_data; in tcmu_vma_open() local
1868 kref_get(&udev->kref); in tcmu_vma_open()
1873 struct tcmu_dev *udev = vma->vm_private_data; in tcmu_vma_close() local
1878 kref_put(&udev->kref, tcmu_dev_kref_release); in tcmu_vma_close()
1883 struct tcmu_dev *udev = vmf->vma->vm_private_data; in tcmu_vma_fault() local
1884 struct uio_info *info = &udev->uio_info; in tcmu_vma_fault()
1900 if (offset < udev->data_off) { in tcmu_vma_fault()
1909 dpi = (offset - udev->data_off) / PAGE_SIZE; in tcmu_vma_fault()
1910 page = tcmu_try_get_data_page(udev, dpi); in tcmu_vma_fault()
1928 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); in tcmu_mmap() local
1933 vma->vm_private_data = udev; in tcmu_mmap()
1936 if (vma_pages(vma) != udev->mmap_pages) in tcmu_mmap()
1946 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); in tcmu_open() local
1949 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) in tcmu_open()
1952 udev->inode = inode; in tcmu_open()
1961 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); in tcmu_release() local
1966 mutex_lock(&udev->cmdr_lock); in tcmu_release()
1968 xa_for_each(&udev->commands, i, cmd) { in tcmu_release()
1979 cmd->cmd_id, udev->name); in tcmu_release()
1982 xa_erase(&udev->commands, i); in tcmu_release()
1990 if (freed && list_empty(&udev->tmr_queue)) in tcmu_release()
1991 run_qfull_queue(udev, false); in tcmu_release()
1993 mutex_unlock(&udev->cmdr_lock); in tcmu_release()
1995 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); in tcmu_release()
2002 static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) in tcmu_init_genl_cmd_reply() argument
2004 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; in tcmu_init_genl_cmd_reply()
2009 if (udev->nl_reply_supported <= 0) in tcmu_init_genl_cmd_reply()
2017 udev->name); in tcmu_init_genl_cmd_reply()
2024 nl_cmd->cmd, udev->name); in tcmu_init_genl_cmd_reply()
2030 nl_cmd->udev = udev; in tcmu_init_genl_cmd_reply()
2040 static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev) in tcmu_destroy_genl_cmd_reply() argument
2042 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; in tcmu_destroy_genl_cmd_reply()
2047 if (udev->nl_reply_supported <= 0) in tcmu_destroy_genl_cmd_reply()
2058 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) in tcmu_wait_genl_cmd_reply() argument
2060 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; in tcmu_wait_genl_cmd_reply()
2066 if (udev->nl_reply_supported <= 0) in tcmu_wait_genl_cmd_reply()
2080 static int tcmu_netlink_event_init(struct tcmu_dev *udev, in tcmu_netlink_event_init() argument
2096 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); in tcmu_netlink_event_init()
2100 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); in tcmu_netlink_event_init()
2104 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); in tcmu_netlink_event_init()
2117 static int tcmu_netlink_event_send(struct tcmu_dev *udev, in tcmu_netlink_event_send() argument
2125 ret = tcmu_init_genl_cmd_reply(udev, cmd); in tcmu_netlink_event_send()
2137 return tcmu_wait_genl_cmd_reply(udev); in tcmu_netlink_event_send()
2139 tcmu_destroy_genl_cmd_reply(udev); in tcmu_netlink_event_send()
2144 static int tcmu_send_dev_add_event(struct tcmu_dev *udev) in tcmu_send_dev_add_event() argument
2150 ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb, in tcmu_send_dev_add_event()
2154 return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb, in tcmu_send_dev_add_event()
2158 static int tcmu_send_dev_remove_event(struct tcmu_dev *udev) in tcmu_send_dev_remove_event() argument
2164 ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE, in tcmu_send_dev_remove_event()
2168 return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE, in tcmu_send_dev_remove_event()
2172 static int tcmu_update_uio_info(struct tcmu_dev *udev) in tcmu_update_uio_info() argument
2174 struct tcmu_hba *hba = udev->hba->hba_ptr; in tcmu_update_uio_info()
2178 info = &udev->uio_info; in tcmu_update_uio_info()
2180 if (udev->dev_config[0]) in tcmu_update_uio_info()
2182 udev->name, udev->dev_config); in tcmu_update_uio_info()
2185 udev->name); in tcmu_update_uio_info()
2198 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_configure_device() local
2204 ret = tcmu_update_uio_info(udev); in tcmu_configure_device()
2208 info = &udev->uio_info; in tcmu_configure_device()
2210 mutex_lock(&udev->cmdr_lock); in tcmu_configure_device()
2211 udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL); in tcmu_configure_device()
2212 mutex_unlock(&udev->cmdr_lock); in tcmu_configure_device()
2213 if (!udev->data_bitmap) { in tcmu_configure_device()
2225 udev->mb_addr = mb; in tcmu_configure_device()
2226 udev->cmdr = (void *)mb + CMDR_OFF; in tcmu_configure_device()
2227 udev->cmdr_size = CMDR_SIZE; in tcmu_configure_device()
2228 udev->data_off = MB_CMDR_SIZE; in tcmu_configure_device()
2229 data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT; in tcmu_configure_device()
2230 udev->mmap_pages = (data_size + MB_CMDR_SIZE) >> PAGE_SHIFT; in tcmu_configure_device()
2231 udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE; in tcmu_configure_device()
2232 udev->dbi_thresh = 0; /* Default in Idle state */ in tcmu_configure_device()
2241 mb->cmdr_size = udev->cmdr_size; in tcmu_configure_device()
2243 WARN_ON(!PAGE_ALIGNED(udev->data_off)); in tcmu_configure_device()
2249 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; in tcmu_configure_device()
2277 if (udev->nl_reply_supported >= 0) in tcmu_configure_device()
2278 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported; in tcmu_configure_device()
2284 kref_get(&udev->kref); in tcmu_configure_device()
2286 ret = tcmu_send_dev_add_event(udev); in tcmu_configure_device()
2291 list_add(&udev->node, &root_udev); in tcmu_configure_device()
2297 kref_put(&udev->kref, tcmu_dev_kref_release); in tcmu_configure_device()
2298 uio_unregister_device(&udev->uio_info); in tcmu_configure_device()
2300 vfree(udev->mb_addr); in tcmu_configure_device()
2301 udev->mb_addr = NULL; in tcmu_configure_device()
2303 bitmap_free(udev->data_bitmap); in tcmu_configure_device()
2304 udev->data_bitmap = NULL; in tcmu_configure_device()
2314 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_free_device() local
2317 kref_put(&udev->kref, tcmu_dev_kref_release); in tcmu_free_device()
2322 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_destroy_device() local
2324 del_timer_sync(&udev->cmd_timer); in tcmu_destroy_device()
2325 del_timer_sync(&udev->qfull_timer); in tcmu_destroy_device()
2328 list_del(&udev->node); in tcmu_destroy_device()
2331 tcmu_send_dev_remove_event(udev); in tcmu_destroy_device()
2333 uio_unregister_device(&udev->uio_info); in tcmu_destroy_device()
2336 kref_put(&udev->kref, tcmu_dev_kref_release); in tcmu_destroy_device()
2339 static void tcmu_unblock_dev(struct tcmu_dev *udev) in tcmu_unblock_dev() argument
2341 mutex_lock(&udev->cmdr_lock); in tcmu_unblock_dev()
2342 clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags); in tcmu_unblock_dev()
2343 mutex_unlock(&udev->cmdr_lock); in tcmu_unblock_dev()
2346 static void tcmu_block_dev(struct tcmu_dev *udev) in tcmu_block_dev() argument
2348 mutex_lock(&udev->cmdr_lock); in tcmu_block_dev()
2350 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) in tcmu_block_dev()
2354 tcmu_handle_completions(udev); in tcmu_block_dev()
2356 run_qfull_queue(udev, true); in tcmu_block_dev()
2359 mutex_unlock(&udev->cmdr_lock); in tcmu_block_dev()
2362 static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) in tcmu_reset_ring() argument
2368 mutex_lock(&udev->cmdr_lock); in tcmu_reset_ring()
2370 xa_for_each(&udev->commands, i, cmd) { in tcmu_reset_ring()
2372 cmd->cmd_id, udev->name, in tcmu_reset_ring()
2378 xa_erase(&udev->commands, i); in tcmu_reset_ring()
2400 mb = udev->mb_addr; in tcmu_reset_ring()
2402 pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned, in tcmu_reset_ring()
2405 udev->cmdr_last_cleaned = 0; in tcmu_reset_ring()
2409 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); in tcmu_reset_ring()
2411 del_timer(&udev->cmd_timer); in tcmu_reset_ring()
2420 tcmu_remove_all_queued_tmr(udev); in tcmu_reset_ring()
2422 run_qfull_queue(udev, false); in tcmu_reset_ring()
2424 mutex_unlock(&udev->cmdr_lock); in tcmu_reset_ring()
2464 static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg) in tcmu_set_max_blocks_param() argument
2467 uint32_t pages_per_blk = udev->data_pages_per_blk; in tcmu_set_max_blocks_param()
2490 mutex_lock(&udev->cmdr_lock); in tcmu_set_max_blocks_param()
2491 if (udev->data_bitmap) { in tcmu_set_max_blocks_param()
2497 udev->data_area_mb = val; in tcmu_set_max_blocks_param()
2498 udev->max_blocks = TCMU_MBS_TO_PAGES(val) / pages_per_blk; in tcmu_set_max_blocks_param()
2501 mutex_unlock(&udev->cmdr_lock); in tcmu_set_max_blocks_param()
2505 static int tcmu_set_data_pages_per_blk(struct tcmu_dev *udev, substring_t *arg) in tcmu_set_data_pages_per_blk() argument
2516 if (val > TCMU_MBS_TO_PAGES(udev->data_area_mb)) { in tcmu_set_data_pages_per_blk()
2518 val, udev->data_area_mb, in tcmu_set_data_pages_per_blk()
2519 TCMU_MBS_TO_PAGES(udev->data_area_mb)); in tcmu_set_data_pages_per_blk()
2523 mutex_lock(&udev->cmdr_lock); in tcmu_set_data_pages_per_blk()
2524 if (udev->data_bitmap) { in tcmu_set_data_pages_per_blk()
2530 udev->data_pages_per_blk = val; in tcmu_set_data_pages_per_blk()
2531 udev->max_blocks = TCMU_MBS_TO_PAGES(udev->data_area_mb) / val; in tcmu_set_data_pages_per_blk()
2534 mutex_unlock(&udev->cmdr_lock); in tcmu_set_data_pages_per_blk()
2541 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_set_configfs_dev_params() local
2559 if (match_strlcpy(udev->dev_config, &args[0], in tcmu_set_configfs_dev_params()
2564 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); in tcmu_set_configfs_dev_params()
2567 ret = match_u64(&args[0], &udev->dev_size); in tcmu_set_configfs_dev_params()
2581 ret = match_int(&args[0], &udev->nl_reply_supported); in tcmu_set_configfs_dev_params()
2587 ret = tcmu_set_max_blocks_param(udev, &args[0]); in tcmu_set_configfs_dev_params()
2590 ret = tcmu_set_data_pages_per_blk(udev, &args[0]); in tcmu_set_configfs_dev_params()
2606 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_show_configfs_dev_params() local
2610 udev->dev_config[0] ? udev->dev_config : "NULL"); in tcmu_show_configfs_dev_params()
2611 bl += sprintf(b + bl, "Size: %llu ", udev->dev_size); in tcmu_show_configfs_dev_params()
2612 bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb); in tcmu_show_configfs_dev_params()
2613 bl += sprintf(b + bl, "DataPagesPerBlk: %u\n", udev->data_pages_per_blk); in tcmu_show_configfs_dev_params()
2620 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_get_blocks() local
2622 return div_u64(udev->dev_size - dev->dev_attrib.block_size, in tcmu_get_blocks()
2636 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_cmd_time_out_show() local
2638 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); in tcmu_cmd_time_out_show()
2646 struct tcmu_dev *udev = container_of(da->da_dev, in tcmu_cmd_time_out_store() local
2660 udev->cmd_time_out = val * MSEC_PER_SEC; in tcmu_cmd_time_out_store()
2669 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_qfull_time_out_show() local
2671 return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ? in tcmu_qfull_time_out_show()
2672 udev->qfull_time_out : in tcmu_qfull_time_out_show()
2673 udev->qfull_time_out / MSEC_PER_SEC); in tcmu_qfull_time_out_show()
2681 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_qfull_time_out_store() local
2690 udev->qfull_time_out = val * MSEC_PER_SEC; in tcmu_qfull_time_out_store()
2692 udev->qfull_time_out = val; in tcmu_qfull_time_out_store()
2705 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_max_data_area_mb_show() local
2707 return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb); in tcmu_max_data_area_mb_show()
2716 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_data_pages_per_blk_show() local
2718 return snprintf(page, PAGE_SIZE, "%u\n", udev->data_pages_per_blk); in tcmu_data_pages_per_blk_show()
2726 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_dev_config_show() local
2728 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); in tcmu_dev_config_show()
2731 static int tcmu_send_dev_config_event(struct tcmu_dev *udev, in tcmu_send_dev_config_event() argument
2738 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_dev_config_event()
2747 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_dev_config_event()
2757 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_dev_config_store() local
2765 if (target_dev_configured(&udev->se_dev)) { in tcmu_dev_config_store()
2766 ret = tcmu_send_dev_config_event(udev, page); in tcmu_dev_config_store()
2771 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); in tcmu_dev_config_store()
2773 ret = tcmu_update_uio_info(udev); in tcmu_dev_config_store()
2778 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); in tcmu_dev_config_store()
2788 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_dev_size_show() local
2790 return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size); in tcmu_dev_size_show()
2793 static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size) in tcmu_send_dev_size_event() argument
2799 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_dev_size_event()
2809 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_dev_size_event()
2818 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_dev_size_store() local
2827 if (target_dev_configured(&udev->se_dev)) { in tcmu_dev_size_store()
2828 ret = tcmu_send_dev_size_event(udev, val); in tcmu_dev_size_store()
2834 udev->dev_size = val; in tcmu_dev_size_store()
2844 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_nl_reply_supported_show() local
2846 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported); in tcmu_nl_reply_supported_show()
2854 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_nl_reply_supported_store() local
2862 udev->nl_reply_supported = val; in tcmu_nl_reply_supported_store()
2876 static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val) in tcmu_send_emulate_write_cache() argument
2882 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_emulate_write_cache()
2891 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_emulate_write_cache()
2900 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_emulate_write_cache_store() local
2909 if (target_dev_configured(&udev->se_dev)) { in tcmu_emulate_write_cache_store()
2910 ret = tcmu_send_emulate_write_cache(udev, val); in tcmu_emulate_write_cache_store()
2926 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_tmr_notification_show() local
2929 test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)); in tcmu_tmr_notification_show()
2937 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_tmr_notification_store() local
2948 set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); in tcmu_tmr_notification_store()
2950 clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); in tcmu_tmr_notification_store()
2960 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_block_dev_show() local
2962 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) in tcmu_block_dev_show()
2974 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_block_dev_store() local
2978 if (!target_dev_configured(&udev->se_dev)) { in tcmu_block_dev_store()
2993 tcmu_unblock_dev(udev); in tcmu_block_dev_store()
2995 tcmu_block_dev(udev); in tcmu_block_dev_store()
3006 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_reset_ring_store() local
3010 if (!target_dev_configured(&udev->se_dev)) { in tcmu_reset_ring_store()
3024 tcmu_reset_ring(udev, val); in tcmu_reset_ring_store()
3035 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_free_kept_buf_store() local
3040 if (!target_dev_configured(&udev->se_dev)) { in tcmu_free_kept_buf_store()
3049 mutex_lock(&udev->cmdr_lock); in tcmu_free_kept_buf_store()
3052 XA_STATE(xas, &udev->commands, cmd_id); in tcmu_free_kept_buf_store()
3079 if (list_empty(&udev->tmr_queue)) in tcmu_free_kept_buf_store()
3080 run_qfull_queue(udev, false); in tcmu_free_kept_buf_store()
3083 mutex_unlock(&udev->cmdr_lock); in tcmu_free_kept_buf_store()
3135 struct tcmu_dev *udev; in find_free_blocks() local
3144 list_for_each_entry(udev, &root_udev, node) { in find_free_blocks()
3145 mutex_lock(&udev->cmdr_lock); in find_free_blocks()
3147 if (!target_dev_configured(&udev->se_dev)) { in find_free_blocks()
3148 mutex_unlock(&udev->cmdr_lock); in find_free_blocks()
3153 if (tcmu_handle_completions(udev)) in find_free_blocks()
3154 run_qfull_queue(udev, false); in find_free_blocks()
3157 if (!udev->dbi_thresh) { in find_free_blocks()
3158 mutex_unlock(&udev->cmdr_lock); in find_free_blocks()
3162 end = udev->dbi_max + 1; in find_free_blocks()
3163 block = find_last_bit(udev->data_bitmap, end); in find_free_blocks()
3164 if (block == udev->dbi_max) { in find_free_blocks()
3169 mutex_unlock(&udev->cmdr_lock); in find_free_blocks()
3173 udev->dbi_thresh = start = 0; in find_free_blocks()
3174 udev->dbi_max = 0; in find_free_blocks()
3176 udev->dbi_thresh = start = block + 1; in find_free_blocks()
3177 udev->dbi_max = block; in find_free_blocks()
3190 pages_freed = tcmu_blocks_release(udev, start, end - 1); in find_free_blocks()
3193 off = udev->data_off + (loff_t)start * udev->data_blk_size; in find_free_blocks()
3194 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); in find_free_blocks()
3196 mutex_unlock(&udev->cmdr_lock); in find_free_blocks()
3202 total_blocks_freed, udev->name); in find_free_blocks()
3212 struct tcmu_dev *udev, *tmp_dev; in check_timedout_devices() local
3219 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) { in check_timedout_devices()
3220 list_del_init(&udev->timedout_entry); in check_timedout_devices()
3223 mutex_lock(&udev->cmdr_lock); in check_timedout_devices()
3229 if (udev->cmd_time_out) { in check_timedout_devices()
3231 &udev->inflight_queue, in check_timedout_devices()
3235 tcmu_set_next_deadline(&udev->inflight_queue, in check_timedout_devices()
3236 &udev->cmd_timer); in check_timedout_devices()
3238 list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue, in check_timedout_devices()
3242 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); in check_timedout_devices()
3244 mutex_unlock(&udev->cmdr_lock); in check_timedout_devices()