Lines Matching refs:udev
105 struct tcmu_dev *udev; member
304 struct tcmu_dev *udev = nl_cmd->udev; in tcmu_fail_netlink_cmd() local
312 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name); in tcmu_fail_netlink_cmd()
373 struct tcmu_dev *udev = NULL; in tcmu_genl_cmd_done() local
388 if (nl_cmd->udev->se_dev.dev_index == dev_id) { in tcmu_genl_cmd_done()
389 udev = nl_cmd->udev; in tcmu_genl_cmd_done()
394 if (!udev) { in tcmu_genl_cmd_done()
403 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc, in tcmu_genl_cmd_done()
408 udev->name, completed_cmd, nl_cmd->cmd); in tcmu_genl_cmd_done()
497 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; in tcmu_cmd_free_data() local
501 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); in tcmu_cmd_free_data()
504 static inline int tcmu_get_empty_block(struct tcmu_dev *udev, in tcmu_get_empty_block() argument
508 XA_STATE(xas, &udev->data_pages, 0); in tcmu_get_empty_block()
513 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); in tcmu_get_empty_block()
514 if (dbi == udev->dbi_thresh) in tcmu_get_empty_block()
517 dpi = dbi * udev->data_pages_per_blk; in tcmu_get_empty_block()
531 if (xa_store(&udev->data_pages, dpi + i, page, GFP_NOIO)) { in tcmu_get_empty_block()
540 if (i && dbi > udev->dbi_max) in tcmu_get_empty_block()
541 udev->dbi_max = dbi; in tcmu_get_empty_block()
543 set_bit(dbi, udev->data_bitmap); in tcmu_get_empty_block()
552 static int tcmu_get_empty_blocks(struct tcmu_dev *udev, in tcmu_get_empty_blocks() argument
558 uint32_t blk_size = udev->data_blk_size; in tcmu_get_empty_blocks()
562 dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len, in tcmu_get_empty_blocks()
594 static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd, in new_block_to_iov() argument
601 len = min_t(int, len, udev->data_blk_size); in new_block_to_iov()
613 (udev->data_off + dbi * udev->data_blk_size); in new_block_to_iov()
620 static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd, in tcmu_setup_iovs() argument
627 for (; data_length > 0; data_length -= udev->data_blk_size) in tcmu_setup_iovs()
628 dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length); in tcmu_setup_iovs()
634 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_alloc_cmd() local
643 tcmu_cmd->tcmu_dev = udev; in tcmu_alloc_cmd()
700 static inline void tcmu_copy_data(struct tcmu_dev *udev, in tcmu_copy_data() argument
722 dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi, in tcmu_copy_data()
728 if (page_cnt > udev->data_pages_per_blk) in tcmu_copy_data()
729 page_cnt = udev->data_pages_per_blk; in tcmu_copy_data()
731 dpi = dbi * udev->data_pages_per_blk; in tcmu_copy_data()
734 page = xa_load(&udev->data_pages, dpi); in tcmu_copy_data()
772 static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, in scatter_data_area() argument
777 tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg, in scatter_data_area()
781 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, in gather_data_area() argument
804 tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg, in gather_data_area()
818 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size) in is_ring_space_avail() argument
820 struct tcmu_mailbox *mb = udev->mb_addr; in is_ring_space_avail()
826 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ in is_ring_space_avail()
832 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) in is_ring_space_avail()
835 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); in is_ring_space_avail()
837 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); in is_ring_space_avail()
840 udev->cmdr_last_cleaned, udev->cmdr_size); in is_ring_space_avail()
852 static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd, in tcmu_alloc_data_space() argument
861 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); in tcmu_alloc_data_space()
864 (udev->max_blocks - udev->dbi_thresh) + space; in tcmu_alloc_data_space()
868 blocks_left * udev->data_blk_size, in tcmu_alloc_data_space()
869 cmd->dbi_cnt * udev->data_blk_size); in tcmu_alloc_data_space()
873 udev->dbi_thresh += cmd->dbi_cnt; in tcmu_alloc_data_space()
874 if (udev->dbi_thresh > udev->max_blocks) in tcmu_alloc_data_space()
875 udev->dbi_thresh = udev->max_blocks; in tcmu_alloc_data_space()
878 iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length); in tcmu_alloc_data_space()
883 ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi); in tcmu_alloc_data_space()
929 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; in add_to_qfull_queue() local
936 if (!udev->qfull_time_out) in add_to_qfull_queue()
938 else if (udev->qfull_time_out > 0) in add_to_qfull_queue()
939 tmo = udev->qfull_time_out; in add_to_qfull_queue()
940 else if (udev->cmd_time_out) in add_to_qfull_queue()
941 tmo = udev->cmd_time_out; in add_to_qfull_queue()
945 tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); in add_to_qfull_queue()
947 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); in add_to_qfull_queue()
949 tcmu_cmd, udev->name); in add_to_qfull_queue()
953 static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size) in ring_insert_padding() argument
956 struct tcmu_mailbox *mb = udev->mb_addr; in ring_insert_padding()
957 uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ in ring_insert_padding()
960 if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) { in ring_insert_padding()
961 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); in ring_insert_padding()
963 hdr = udev->cmdr + cmd_head; in ring_insert_padding()
971 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); in ring_insert_padding()
974 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ in ring_insert_padding()
984 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_unplug_device() local
986 clear_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags); in tcmu_unplug_device()
987 uio_event_notify(&udev->uio_info); in tcmu_unplug_device()
992 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_plug_device() local
994 if (!test_and_set_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags)) in tcmu_plug_device()
995 return &udev->se_plug; in tcmu_plug_device()
1012 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; in queue_cmd_ring() local
1015 struct tcmu_mailbox *mb = udev->mb_addr; in queue_cmd_ring()
1021 uint32_t blk_size = udev->data_blk_size; in queue_cmd_ring()
1027 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) { in queue_cmd_ring()
1032 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { in queue_cmd_ring()
1037 if (!list_empty(&udev->qfull_queue)) in queue_cmd_ring()
1040 if (data_length > (size_t)udev->max_blocks * blk_size) { in queue_cmd_ring()
1042 data_length, (size_t)udev->max_blocks * blk_size); in queue_cmd_ring()
1047 iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt); in queue_cmd_ring()
1058 if (command_size > (udev->cmdr_size / 2)) { in queue_cmd_ring()
1060 command_size, udev->cmdr_size); in queue_cmd_ring()
1066 if (!is_ring_space_avail(udev, command_size)) in queue_cmd_ring()
1073 if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff), in queue_cmd_ring()
1084 tcmu_cmd, udev->name); in queue_cmd_ring()
1086 cmd_head = ring_insert_padding(udev, command_size); in queue_cmd_ring()
1088 entry = udev->cmdr + cmd_head; in queue_cmd_ring()
1098 scatter_data_area(udev, tcmu_cmd, &iov); in queue_cmd_ring()
1100 tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length); in queue_cmd_ring()
1107 tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi); in queue_cmd_ring()
1111 tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); in queue_cmd_ring()
1123 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); in queue_cmd_ring()
1126 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); in queue_cmd_ring()
1128 if (!test_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags)) in queue_cmd_ring()
1129 uio_event_notify(&udev->uio_info); in queue_cmd_ring()
1156 queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr) in queue_tmr_ring() argument
1161 struct tcmu_mailbox *mb = udev->mb_addr; in queue_tmr_ring()
1164 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) in queue_tmr_ring()
1170 if (!list_empty(&udev->tmr_queue) || in queue_tmr_ring()
1171 !is_ring_space_avail(udev, cmd_size)) { in queue_tmr_ring()
1172 list_add_tail(&tmr->queue_entry, &udev->tmr_queue); in queue_tmr_ring()
1174 tmr, udev->name); in queue_tmr_ring()
1178 cmd_head = ring_insert_padding(udev, cmd_size); in queue_tmr_ring()
1180 entry = udev->cmdr + cmd_head; in queue_tmr_ring()
1189 UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size); in queue_tmr_ring()
1192 uio_event_notify(&udev->uio_info); in queue_tmr_ring()
1204 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_queue_cmd() local
1213 mutex_lock(&udev->cmdr_lock); in tcmu_queue_cmd()
1220 mutex_unlock(&udev->cmdr_lock); in tcmu_queue_cmd()
1261 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_tmr_notify() local
1263 mutex_lock(&udev->cmdr_lock); in tcmu_tmr_notify()
1277 cmd, udev->name); in tcmu_tmr_notify()
1286 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); in tcmu_tmr_notify()
1288 if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)) in tcmu_tmr_notify()
1292 tcmu_tmr_type(tmf), udev->name, i, cmd_cnt); in tcmu_tmr_notify()
1312 queue_tmr_ring(udev, tmr); in tcmu_tmr_notify()
1315 mutex_unlock(&udev->cmdr_lock); in tcmu_tmr_notify()
1322 struct tcmu_dev *udev = cmd->tcmu_dev; in tcmu_handle_completion() local
1338 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); in tcmu_handle_completion()
1371 gather_data_area(udev, cmd, true, read_len); in tcmu_handle_completion()
1373 gather_data_area(udev, cmd, false, read_len); in tcmu_handle_completion()
1409 static int tcmu_run_tmr_queue(struct tcmu_dev *udev) in tcmu_run_tmr_queue() argument
1414 if (list_empty(&udev->tmr_queue)) in tcmu_run_tmr_queue()
1417 pr_debug("running %s's tmr queue\n", udev->name); in tcmu_run_tmr_queue()
1419 list_splice_init(&udev->tmr_queue, &tmrs); in tcmu_run_tmr_queue()
1425 tmr, udev->name); in tcmu_run_tmr_queue()
1427 if (queue_tmr_ring(udev, tmr)) { in tcmu_run_tmr_queue()
1433 list_splice_tail(&tmrs, &udev->tmr_queue); in tcmu_run_tmr_queue()
1441 static bool tcmu_handle_completions(struct tcmu_dev *udev) in tcmu_handle_completions() argument
1447 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { in tcmu_handle_completions()
1452 mb = udev->mb_addr; in tcmu_handle_completions()
1455 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { in tcmu_handle_completions()
1457 struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned; in tcmu_handle_completions()
1464 size_t ring_left = head_to_end(udev->cmdr_last_cleaned, in tcmu_handle_completions()
1465 udev->cmdr_size); in tcmu_handle_completions()
1473 UPDATE_HEAD(udev->cmdr_last_cleaned, in tcmu_handle_completions()
1475 udev->cmdr_size); in tcmu_handle_completions()
1482 cmd = xa_load(&udev->commands, entry->hdr.cmd_id); in tcmu_handle_completions()
1484 cmd = xa_erase(&udev->commands, entry->hdr.cmd_id); in tcmu_handle_completions()
1488 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); in tcmu_handle_completions()
1495 UPDATE_HEAD(udev->cmdr_last_cleaned, in tcmu_handle_completions()
1497 udev->cmdr_size); in tcmu_handle_completions()
1500 free_space = tcmu_run_tmr_queue(udev); in tcmu_handle_completions()
1503 xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) { in tcmu_handle_completions()
1510 if (udev->cmd_time_out) in tcmu_handle_completions()
1511 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); in tcmu_handle_completions()
1553 static void tcmu_device_timedout(struct tcmu_dev *udev) in tcmu_device_timedout() argument
1556 if (list_empty(&udev->timedout_entry)) in tcmu_device_timedout()
1557 list_add_tail(&udev->timedout_entry, &timed_out_udevs); in tcmu_device_timedout()
1565 struct tcmu_dev *udev = from_timer(udev, t, cmd_timer); in tcmu_cmd_timedout() local
1567 pr_debug("%s cmd timeout has expired\n", udev->name); in tcmu_cmd_timedout()
1568 tcmu_device_timedout(udev); in tcmu_cmd_timedout()
1573 struct tcmu_dev *udev = from_timer(udev, t, qfull_timer); in tcmu_qfull_timedout() local
1575 pr_debug("%s qfull timeout has expired\n", udev->name); in tcmu_qfull_timedout()
1576 tcmu_device_timedout(udev); in tcmu_qfull_timedout()
1601 struct tcmu_dev *udev; in tcmu_alloc_device() local
1603 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); in tcmu_alloc_device()
1604 if (!udev) in tcmu_alloc_device()
1606 kref_init(&udev->kref); in tcmu_alloc_device()
1608 udev->name = kstrdup(name, GFP_KERNEL); in tcmu_alloc_device()
1609 if (!udev->name) { in tcmu_alloc_device()
1610 kfree(udev); in tcmu_alloc_device()
1614 udev->hba = hba; in tcmu_alloc_device()
1615 udev->cmd_time_out = TCMU_TIME_OUT; in tcmu_alloc_device()
1616 udev->qfull_time_out = -1; in tcmu_alloc_device()
1618 udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF; in tcmu_alloc_device()
1619 udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk; in tcmu_alloc_device()
1620 udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF); in tcmu_alloc_device()
1622 mutex_init(&udev->cmdr_lock); in tcmu_alloc_device()
1624 INIT_LIST_HEAD(&udev->node); in tcmu_alloc_device()
1625 INIT_LIST_HEAD(&udev->timedout_entry); in tcmu_alloc_device()
1626 INIT_LIST_HEAD(&udev->qfull_queue); in tcmu_alloc_device()
1627 INIT_LIST_HEAD(&udev->tmr_queue); in tcmu_alloc_device()
1628 INIT_LIST_HEAD(&udev->inflight_queue); in tcmu_alloc_device()
1629 xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1); in tcmu_alloc_device()
1631 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); in tcmu_alloc_device()
1632 timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); in tcmu_alloc_device()
1634 xa_init(&udev->data_pages); in tcmu_alloc_device()
1636 return &udev->se_dev; in tcmu_alloc_device()
1642 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_dev_call_rcu() local
1644 kfree(udev->uio_info.name); in tcmu_dev_call_rcu()
1645 kfree(udev->name); in tcmu_dev_call_rcu()
1646 kfree(udev); in tcmu_dev_call_rcu()
1659 static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first, in tcmu_blocks_release() argument
1662 XA_STATE(xas, &udev->data_pages, first * udev->data_pages_per_blk); in tcmu_blocks_release()
1667 xas_for_each(&xas, page, (last + 1) * udev->data_pages_per_blk - 1) { in tcmu_blocks_release()
1679 static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev) in tcmu_remove_all_queued_tmr() argument
1683 list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) { in tcmu_remove_all_queued_tmr()
1691 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); in tcmu_dev_kref_release() local
1692 struct se_device *dev = &udev->se_dev; in tcmu_dev_kref_release()
1697 vfree(udev->mb_addr); in tcmu_dev_kref_release()
1698 udev->mb_addr = NULL; in tcmu_dev_kref_release()
1701 if (!list_empty(&udev->timedout_entry)) in tcmu_dev_kref_release()
1702 list_del(&udev->timedout_entry); in tcmu_dev_kref_release()
1706 mutex_lock(&udev->cmdr_lock); in tcmu_dev_kref_release()
1707 xa_for_each(&udev->commands, i, cmd) { in tcmu_dev_kref_release()
1712 tcmu_remove_all_queued_tmr(udev); in tcmu_dev_kref_release()
1713 if (!list_empty(&udev->qfull_queue)) in tcmu_dev_kref_release()
1715 xa_destroy(&udev->commands); in tcmu_dev_kref_release()
1718 tcmu_blocks_release(udev, 0, udev->dbi_max); in tcmu_dev_kref_release()
1719 bitmap_free(udev->data_bitmap); in tcmu_dev_kref_release()
1720 mutex_unlock(&udev->cmdr_lock); in tcmu_dev_kref_release()
1727 static void run_qfull_queue(struct tcmu_dev *udev, bool fail) in run_qfull_queue() argument
1734 if (list_empty(&udev->qfull_queue)) in run_qfull_queue()
1737 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); in run_qfull_queue()
1739 list_splice_init(&udev->qfull_queue, &cmds); in run_qfull_queue()
1745 tcmu_cmd, udev->name); in run_qfull_queue()
1764 tcmu_cmd, udev->name, scsi_ret); in run_qfull_queue()
1779 list_splice_tail(&cmds, &udev->qfull_queue); in run_qfull_queue()
1784 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); in run_qfull_queue()
1789 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); in tcmu_irqcontrol() local
1791 mutex_lock(&udev->cmdr_lock); in tcmu_irqcontrol()
1792 if (tcmu_handle_completions(udev)) in tcmu_irqcontrol()
1793 run_qfull_queue(udev, false); in tcmu_irqcontrol()
1794 mutex_unlock(&udev->cmdr_lock); in tcmu_irqcontrol()
1805 struct tcmu_dev *udev = vma->vm_private_data; in tcmu_find_mem_index() local
1806 struct uio_info *info = &udev->uio_info; in tcmu_find_mem_index()
1816 static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi) in tcmu_try_get_data_page() argument
1820 mutex_lock(&udev->cmdr_lock); in tcmu_try_get_data_page()
1821 page = xa_load(&udev->data_pages, dpi); in tcmu_try_get_data_page()
1823 mutex_unlock(&udev->cmdr_lock); in tcmu_try_get_data_page()
1832 dpi, udev->name); in tcmu_try_get_data_page()
1833 mutex_unlock(&udev->cmdr_lock); in tcmu_try_get_data_page()
1840 struct tcmu_dev *udev = vma->vm_private_data; in tcmu_vma_open() local
1844 kref_get(&udev->kref); in tcmu_vma_open()
1849 struct tcmu_dev *udev = vma->vm_private_data; in tcmu_vma_close() local
1854 kref_put(&udev->kref, tcmu_dev_kref_release); in tcmu_vma_close()
1859 struct tcmu_dev *udev = vmf->vma->vm_private_data; in tcmu_vma_fault() local
1860 struct uio_info *info = &udev->uio_info; in tcmu_vma_fault()
1875 if (offset < udev->data_off) { in tcmu_vma_fault()
1883 dpi = (offset - udev->data_off) / PAGE_SIZE; in tcmu_vma_fault()
1884 page = tcmu_try_get_data_page(udev, dpi); in tcmu_vma_fault()
1902 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); in tcmu_mmap() local
1907 vma->vm_private_data = udev; in tcmu_mmap()
1910 if (vma_pages(vma) != udev->mmap_pages) in tcmu_mmap()
1920 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); in tcmu_open() local
1923 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) in tcmu_open()
1926 udev->inode = inode; in tcmu_open()
1935 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); in tcmu_release() local
1940 mutex_lock(&udev->cmdr_lock); in tcmu_release()
1942 xa_for_each(&udev->commands, i, cmd) { in tcmu_release()
1953 cmd->cmd_id, udev->name); in tcmu_release()
1956 xa_erase(&udev->commands, i); in tcmu_release()
1964 if (freed && list_empty(&udev->tmr_queue)) in tcmu_release()
1965 run_qfull_queue(udev, false); in tcmu_release()
1967 mutex_unlock(&udev->cmdr_lock); in tcmu_release()
1969 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); in tcmu_release()
1976 static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) in tcmu_init_genl_cmd_reply() argument
1978 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; in tcmu_init_genl_cmd_reply()
1983 if (udev->nl_reply_supported <= 0) in tcmu_init_genl_cmd_reply()
1991 udev->name); in tcmu_init_genl_cmd_reply()
1998 nl_cmd->cmd, udev->name); in tcmu_init_genl_cmd_reply()
2004 nl_cmd->udev = udev; in tcmu_init_genl_cmd_reply()
2014 static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev) in tcmu_destroy_genl_cmd_reply() argument
2016 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; in tcmu_destroy_genl_cmd_reply()
2021 if (udev->nl_reply_supported <= 0) in tcmu_destroy_genl_cmd_reply()
2032 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) in tcmu_wait_genl_cmd_reply() argument
2034 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; in tcmu_wait_genl_cmd_reply()
2040 if (udev->nl_reply_supported <= 0) in tcmu_wait_genl_cmd_reply()
2054 static int tcmu_netlink_event_init(struct tcmu_dev *udev, in tcmu_netlink_event_init() argument
2070 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); in tcmu_netlink_event_init()
2074 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); in tcmu_netlink_event_init()
2078 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); in tcmu_netlink_event_init()
2091 static int tcmu_netlink_event_send(struct tcmu_dev *udev, in tcmu_netlink_event_send() argument
2099 ret = tcmu_init_genl_cmd_reply(udev, cmd); in tcmu_netlink_event_send()
2111 return tcmu_wait_genl_cmd_reply(udev); in tcmu_netlink_event_send()
2113 tcmu_destroy_genl_cmd_reply(udev); in tcmu_netlink_event_send()
2118 static int tcmu_send_dev_add_event(struct tcmu_dev *udev) in tcmu_send_dev_add_event() argument
2124 ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb, in tcmu_send_dev_add_event()
2128 return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb, in tcmu_send_dev_add_event()
2132 static int tcmu_send_dev_remove_event(struct tcmu_dev *udev) in tcmu_send_dev_remove_event() argument
2138 ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE, in tcmu_send_dev_remove_event()
2142 return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE, in tcmu_send_dev_remove_event()
2146 static int tcmu_update_uio_info(struct tcmu_dev *udev) in tcmu_update_uio_info() argument
2148 struct tcmu_hba *hba = udev->hba->hba_ptr; in tcmu_update_uio_info()
2152 info = &udev->uio_info; in tcmu_update_uio_info()
2154 if (udev->dev_config[0]) in tcmu_update_uio_info()
2156 udev->name, udev->dev_config); in tcmu_update_uio_info()
2159 udev->name); in tcmu_update_uio_info()
2172 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_configure_device() local
2178 ret = tcmu_update_uio_info(udev); in tcmu_configure_device()
2182 info = &udev->uio_info; in tcmu_configure_device()
2184 mutex_lock(&udev->cmdr_lock); in tcmu_configure_device()
2185 udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL); in tcmu_configure_device()
2186 mutex_unlock(&udev->cmdr_lock); in tcmu_configure_device()
2187 if (!udev->data_bitmap) { in tcmu_configure_device()
2199 udev->mb_addr = mb; in tcmu_configure_device()
2200 udev->cmdr = (void *)mb + CMDR_OFF; in tcmu_configure_device()
2201 udev->cmdr_size = CMDR_SIZE; in tcmu_configure_device()
2202 udev->data_off = MB_CMDR_SIZE; in tcmu_configure_device()
2203 data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT; in tcmu_configure_device()
2204 udev->mmap_pages = (data_size + MB_CMDR_SIZE) >> PAGE_SHIFT; in tcmu_configure_device()
2205 udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE; in tcmu_configure_device()
2206 udev->dbi_thresh = 0; /* Default in Idle state */ in tcmu_configure_device()
2215 mb->cmdr_size = udev->cmdr_size; in tcmu_configure_device()
2217 WARN_ON(!PAGE_ALIGNED(udev->data_off)); in tcmu_configure_device()
2223 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; in tcmu_configure_device()
2251 if (udev->nl_reply_supported >= 0) in tcmu_configure_device()
2252 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported; in tcmu_configure_device()
2258 kref_get(&udev->kref); in tcmu_configure_device()
2260 ret = tcmu_send_dev_add_event(udev); in tcmu_configure_device()
2265 list_add(&udev->node, &root_udev); in tcmu_configure_device()
2271 kref_put(&udev->kref, tcmu_dev_kref_release); in tcmu_configure_device()
2272 uio_unregister_device(&udev->uio_info); in tcmu_configure_device()
2274 vfree(udev->mb_addr); in tcmu_configure_device()
2275 udev->mb_addr = NULL; in tcmu_configure_device()
2277 bitmap_free(udev->data_bitmap); in tcmu_configure_device()
2278 udev->data_bitmap = NULL; in tcmu_configure_device()
2288 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_free_device() local
2291 kref_put(&udev->kref, tcmu_dev_kref_release); in tcmu_free_device()
2296 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_destroy_device() local
2298 del_timer_sync(&udev->cmd_timer); in tcmu_destroy_device()
2299 del_timer_sync(&udev->qfull_timer); in tcmu_destroy_device()
2302 list_del(&udev->node); in tcmu_destroy_device()
2305 tcmu_send_dev_remove_event(udev); in tcmu_destroy_device()
2307 uio_unregister_device(&udev->uio_info); in tcmu_destroy_device()
2310 kref_put(&udev->kref, tcmu_dev_kref_release); in tcmu_destroy_device()
2313 static void tcmu_unblock_dev(struct tcmu_dev *udev) in tcmu_unblock_dev() argument
2315 mutex_lock(&udev->cmdr_lock); in tcmu_unblock_dev()
2316 clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags); in tcmu_unblock_dev()
2317 mutex_unlock(&udev->cmdr_lock); in tcmu_unblock_dev()
2320 static void tcmu_block_dev(struct tcmu_dev *udev) in tcmu_block_dev() argument
2322 mutex_lock(&udev->cmdr_lock); in tcmu_block_dev()
2324 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) in tcmu_block_dev()
2328 tcmu_handle_completions(udev); in tcmu_block_dev()
2330 run_qfull_queue(udev, true); in tcmu_block_dev()
2333 mutex_unlock(&udev->cmdr_lock); in tcmu_block_dev()
2336 static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) in tcmu_reset_ring() argument
2342 mutex_lock(&udev->cmdr_lock); in tcmu_reset_ring()
2344 xa_for_each(&udev->commands, i, cmd) { in tcmu_reset_ring()
2346 cmd->cmd_id, udev->name, in tcmu_reset_ring()
2352 xa_erase(&udev->commands, i); in tcmu_reset_ring()
2374 mb = udev->mb_addr; in tcmu_reset_ring()
2376 pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned, in tcmu_reset_ring()
2379 udev->cmdr_last_cleaned = 0; in tcmu_reset_ring()
2383 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); in tcmu_reset_ring()
2385 del_timer(&udev->cmd_timer); in tcmu_reset_ring()
2394 tcmu_remove_all_queued_tmr(udev); in tcmu_reset_ring()
2396 run_qfull_queue(udev, false); in tcmu_reset_ring()
2398 mutex_unlock(&udev->cmdr_lock); in tcmu_reset_ring()
2438 static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg) in tcmu_set_max_blocks_param() argument
2441 uint32_t pages_per_blk = udev->data_pages_per_blk; in tcmu_set_max_blocks_param()
2464 mutex_lock(&udev->cmdr_lock); in tcmu_set_max_blocks_param()
2465 if (udev->data_bitmap) { in tcmu_set_max_blocks_param()
2471 udev->data_area_mb = val; in tcmu_set_max_blocks_param()
2472 udev->max_blocks = TCMU_MBS_TO_PAGES(val) / pages_per_blk; in tcmu_set_max_blocks_param()
2475 mutex_unlock(&udev->cmdr_lock); in tcmu_set_max_blocks_param()
2479 static int tcmu_set_data_pages_per_blk(struct tcmu_dev *udev, substring_t *arg) in tcmu_set_data_pages_per_blk() argument
2490 if (val > TCMU_MBS_TO_PAGES(udev->data_area_mb)) { in tcmu_set_data_pages_per_blk()
2492 val, udev->data_area_mb, in tcmu_set_data_pages_per_blk()
2493 TCMU_MBS_TO_PAGES(udev->data_area_mb)); in tcmu_set_data_pages_per_blk()
2497 mutex_lock(&udev->cmdr_lock); in tcmu_set_data_pages_per_blk()
2498 if (udev->data_bitmap) { in tcmu_set_data_pages_per_blk()
2504 udev->data_pages_per_blk = val; in tcmu_set_data_pages_per_blk()
2505 udev->max_blocks = TCMU_MBS_TO_PAGES(udev->data_area_mb) / val; in tcmu_set_data_pages_per_blk()
2508 mutex_unlock(&udev->cmdr_lock); in tcmu_set_data_pages_per_blk()
2515 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_set_configfs_dev_params() local
2533 if (match_strlcpy(udev->dev_config, &args[0], in tcmu_set_configfs_dev_params()
2538 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); in tcmu_set_configfs_dev_params()
2541 ret = match_u64(&args[0], &udev->dev_size); in tcmu_set_configfs_dev_params()
2555 ret = match_int(&args[0], &udev->nl_reply_supported); in tcmu_set_configfs_dev_params()
2561 ret = tcmu_set_max_blocks_param(udev, &args[0]); in tcmu_set_configfs_dev_params()
2564 ret = tcmu_set_data_pages_per_blk(udev, &args[0]); in tcmu_set_configfs_dev_params()
2580 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_show_configfs_dev_params() local
2584 udev->dev_config[0] ? udev->dev_config : "NULL"); in tcmu_show_configfs_dev_params()
2585 bl += sprintf(b + bl, "Size: %llu ", udev->dev_size); in tcmu_show_configfs_dev_params()
2586 bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb); in tcmu_show_configfs_dev_params()
2587 bl += sprintf(b + bl, "DataPagesPerBlk: %u\n", udev->data_pages_per_blk); in tcmu_show_configfs_dev_params()
2594 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_get_blocks() local
2596 return div_u64(udev->dev_size - dev->dev_attrib.block_size, in tcmu_get_blocks()
2610 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_cmd_time_out_show() local
2612 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); in tcmu_cmd_time_out_show()
2620 struct tcmu_dev *udev = container_of(da->da_dev, in tcmu_cmd_time_out_store() local
2634 udev->cmd_time_out = val * MSEC_PER_SEC; in tcmu_cmd_time_out_store()
2643 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_qfull_time_out_show() local
2645 return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ? in tcmu_qfull_time_out_show()
2646 udev->qfull_time_out : in tcmu_qfull_time_out_show()
2647 udev->qfull_time_out / MSEC_PER_SEC); in tcmu_qfull_time_out_show()
2655 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_qfull_time_out_store() local
2664 udev->qfull_time_out = val * MSEC_PER_SEC; in tcmu_qfull_time_out_store()
2666 udev->qfull_time_out = val; in tcmu_qfull_time_out_store()
2679 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_max_data_area_mb_show() local
2681 return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb); in tcmu_max_data_area_mb_show()
2690 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_data_pages_per_blk_show() local
2692 return snprintf(page, PAGE_SIZE, "%u\n", udev->data_pages_per_blk); in tcmu_data_pages_per_blk_show()
2700 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_dev_config_show() local
2702 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); in tcmu_dev_config_show()
2705 static int tcmu_send_dev_config_event(struct tcmu_dev *udev, in tcmu_send_dev_config_event() argument
2712 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_dev_config_event()
2721 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_dev_config_event()
2731 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_dev_config_store() local
2739 if (target_dev_configured(&udev->se_dev)) { in tcmu_dev_config_store()
2740 ret = tcmu_send_dev_config_event(udev, page); in tcmu_dev_config_store()
2745 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); in tcmu_dev_config_store()
2747 ret = tcmu_update_uio_info(udev); in tcmu_dev_config_store()
2752 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); in tcmu_dev_config_store()
2762 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_dev_size_show() local
2764 return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size); in tcmu_dev_size_show()
2767 static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size) in tcmu_send_dev_size_event() argument
2773 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_dev_size_event()
2783 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_dev_size_event()
2792 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_dev_size_store() local
2801 if (target_dev_configured(&udev->se_dev)) { in tcmu_dev_size_store()
2802 ret = tcmu_send_dev_size_event(udev, val); in tcmu_dev_size_store()
2808 udev->dev_size = val; in tcmu_dev_size_store()
2818 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_nl_reply_supported_show() local
2820 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported); in tcmu_nl_reply_supported_show()
2828 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_nl_reply_supported_store() local
2836 udev->nl_reply_supported = val; in tcmu_nl_reply_supported_store()
2850 static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val) in tcmu_send_emulate_write_cache() argument
2856 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_emulate_write_cache()
2865 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_emulate_write_cache()
2874 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_emulate_write_cache_store() local
2883 if (target_dev_configured(&udev->se_dev)) { in tcmu_emulate_write_cache_store()
2884 ret = tcmu_send_emulate_write_cache(udev, val); in tcmu_emulate_write_cache_store()
2900 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_tmr_notification_show() local
2903 test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)); in tcmu_tmr_notification_show()
2911 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_tmr_notification_store() local
2922 set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); in tcmu_tmr_notification_store()
2924 clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); in tcmu_tmr_notification_store()
2934 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_block_dev_show() local
2936 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) in tcmu_block_dev_show()
2948 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_block_dev_store() local
2952 if (!target_dev_configured(&udev->se_dev)) { in tcmu_block_dev_store()
2967 tcmu_unblock_dev(udev); in tcmu_block_dev_store()
2969 tcmu_block_dev(udev); in tcmu_block_dev_store()
2980 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_reset_ring_store() local
2984 if (!target_dev_configured(&udev->se_dev)) { in tcmu_reset_ring_store()
2998 tcmu_reset_ring(udev, val); in tcmu_reset_ring_store()
3009 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_free_kept_buf_store() local
3014 if (!target_dev_configured(&udev->se_dev)) { in tcmu_free_kept_buf_store()
3023 mutex_lock(&udev->cmdr_lock); in tcmu_free_kept_buf_store()
3026 XA_STATE(xas, &udev->commands, cmd_id); in tcmu_free_kept_buf_store()
3053 if (list_empty(&udev->tmr_queue)) in tcmu_free_kept_buf_store()
3054 run_qfull_queue(udev, false); in tcmu_free_kept_buf_store()
3057 mutex_unlock(&udev->cmdr_lock); in tcmu_free_kept_buf_store()
3109 struct tcmu_dev *udev; in find_free_blocks() local
3118 list_for_each_entry(udev, &root_udev, node) { in find_free_blocks()
3119 mutex_lock(&udev->cmdr_lock); in find_free_blocks()
3121 if (!target_dev_configured(&udev->se_dev)) { in find_free_blocks()
3122 mutex_unlock(&udev->cmdr_lock); in find_free_blocks()
3127 if (tcmu_handle_completions(udev)) in find_free_blocks()
3128 run_qfull_queue(udev, false); in find_free_blocks()
3131 if (!udev->dbi_thresh) { in find_free_blocks()
3132 mutex_unlock(&udev->cmdr_lock); in find_free_blocks()
3136 end = udev->dbi_max + 1; in find_free_blocks()
3137 block = find_last_bit(udev->data_bitmap, end); in find_free_blocks()
3138 if (block == udev->dbi_max) { in find_free_blocks()
3143 mutex_unlock(&udev->cmdr_lock); in find_free_blocks()
3147 udev->dbi_thresh = start = 0; in find_free_blocks()
3148 udev->dbi_max = 0; in find_free_blocks()
3150 udev->dbi_thresh = start = block + 1; in find_free_blocks()
3151 udev->dbi_max = block; in find_free_blocks()
3155 off = udev->data_off + (loff_t)start * udev->data_blk_size; in find_free_blocks()
3156 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); in find_free_blocks()
3159 pages_freed = tcmu_blocks_release(udev, start, end - 1); in find_free_blocks()
3160 mutex_unlock(&udev->cmdr_lock); in find_free_blocks()
3166 total_blocks_freed, udev->name); in find_free_blocks()
3176 struct tcmu_dev *udev, *tmp_dev; in check_timedout_devices() local
3183 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) { in check_timedout_devices()
3184 list_del_init(&udev->timedout_entry); in check_timedout_devices()
3187 mutex_lock(&udev->cmdr_lock); in check_timedout_devices()
3193 if (udev->cmd_time_out) { in check_timedout_devices()
3195 &udev->inflight_queue, in check_timedout_devices()
3199 tcmu_set_next_deadline(&udev->inflight_queue, in check_timedout_devices()
3200 &udev->cmd_timer); in check_timedout_devices()
3202 list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue, in check_timedout_devices()
3206 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); in check_timedout_devices()
3208 mutex_unlock(&udev->cmdr_lock); in check_timedout_devices()