Lines Matching refs:head
156 spin_lock_irqsave(&ns->head->requeue_lock, flags); in nvme_failover_req()
158 bio_set_dev(bio, ns->head->disk->part0); in nvme_failover_req()
172 blk_steal_bios(&ns->head->requeue_list, req); in nvme_failover_req()
173 spin_unlock_irqrestore(&ns->head->requeue_lock, flags); in nvme_failover_req()
177 kblockd_schedule_work(&ns->head->requeue_work); in nvme_failover_req()
183 struct gendisk *disk = ns->head->disk; in nvme_mpath_start_request()
185 if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) { in nvme_mpath_start_request()
208 bdev_end_io_acct(ns->head->disk->part0, req_op(rq), in nvme_mpath_end_request()
221 if (!ns->head->disk) in nvme_kick_requeue_lists()
223 kblockd_schedule_work(&ns->head->requeue_work); in nvme_kick_requeue_lists()
225 disk_uevent(ns->head->disk, KOBJ_CHANGE); in nvme_kick_requeue_lists()
241 struct nvme_ns_head *head = ns->head; in nvme_mpath_clear_current_path() local
245 if (!head) in nvme_mpath_clear_current_path()
249 if (ns == rcu_access_pointer(head->current_path[node])) { in nvme_mpath_clear_current_path()
250 rcu_assign_pointer(head->current_path[node], NULL); in nvme_mpath_clear_current_path()
267 kblockd_schedule_work(&ns->head->requeue_work); in nvme_mpath_clear_ctrl_paths()
274 struct nvme_ns_head *head = ns->head; in nvme_mpath_revalidate_paths() local
275 sector_t capacity = get_capacity(head->disk); in nvme_mpath_revalidate_paths()
279 srcu_idx = srcu_read_lock(&head->srcu); in nvme_mpath_revalidate_paths()
280 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_mpath_revalidate_paths()
281 srcu_read_lock_held(&head->srcu)) { in nvme_mpath_revalidate_paths()
285 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_mpath_revalidate_paths()
288 rcu_assign_pointer(head->current_path[node], NULL); in nvme_mpath_revalidate_paths()
289 kblockd_schedule_work(&head->requeue_work); in nvme_mpath_revalidate_paths()
309 static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node) in __nvme_find_path() argument
314 list_for_each_entry_srcu(ns, &head->list, siblings, in __nvme_find_path()
315 srcu_read_lock_held(&head->srcu)) { in __nvme_find_path()
320 READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA) in __nvme_find_path()
346 rcu_assign_pointer(head->current_path[node], found); in __nvme_find_path()
350 static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head, in nvme_next_ns() argument
353 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, in nvme_next_ns()
357 return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings); in nvme_next_ns()
360 static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head) in nvme_round_robin_path() argument
364 struct nvme_ns *old = srcu_dereference(head->current_path[node], in nvme_round_robin_path()
365 &head->srcu); in nvme_round_robin_path()
368 return __nvme_find_path(head, node); in nvme_round_robin_path()
370 if (list_is_singular(&head->list)) { in nvme_round_robin_path()
376 for (ns = nvme_next_ns(head, old); in nvme_round_robin_path()
378 ns = nvme_next_ns(head, ns)) { in nvme_round_robin_path()
404 rcu_assign_pointer(head->current_path[node], found); in nvme_round_robin_path()
408 static struct nvme_ns *nvme_queue_depth_path(struct nvme_ns_head *head) in nvme_queue_depth_path() argument
414 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_queue_depth_path()
415 srcu_read_lock_held(&head->srcu)) { in nvme_queue_depth_path()
451 static struct nvme_ns *nvme_numa_path(struct nvme_ns_head *head) in nvme_numa_path() argument
456 ns = srcu_dereference(head->current_path[node], &head->srcu); in nvme_numa_path()
458 return __nvme_find_path(head, node); in nvme_numa_path()
460 return __nvme_find_path(head, node); in nvme_numa_path()
464 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) in nvme_find_path() argument
466 switch (READ_ONCE(head->subsys->iopolicy)) { in nvme_find_path()
468 return nvme_queue_depth_path(head); in nvme_find_path()
470 return nvme_round_robin_path(head); in nvme_find_path()
472 return nvme_numa_path(head); in nvme_find_path()
476 static bool nvme_available_path(struct nvme_ns_head *head) in nvme_available_path() argument
480 if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) in nvme_available_path()
483 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_available_path()
484 srcu_read_lock_held(&head->srcu)) { in nvme_available_path()
506 return nvme_mpath_queue_if_no_path(head); in nvme_available_path()
511 struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data; in nvme_ns_head_submit_bio() local
512 struct device *dev = disk_to_dev(head->disk); in nvme_ns_head_submit_bio()
525 srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_submit_bio()
526 ns = nvme_find_path(head); in nvme_ns_head_submit_bio()
530 trace_block_bio_remap(bio, disk_devt(ns->head->disk), in nvme_ns_head_submit_bio()
533 } else if (nvme_available_path(head)) { in nvme_ns_head_submit_bio()
536 spin_lock_irq(&head->requeue_lock); in nvme_ns_head_submit_bio()
537 bio_list_add(&head->requeue_list, bio); in nvme_ns_head_submit_bio()
538 spin_unlock_irq(&head->requeue_lock); in nvme_ns_head_submit_bio()
545 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_submit_bio()
563 struct nvme_ns_head *head = disk->private_data; in nvme_ns_head_get_unique_id() local
567 srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_get_unique_id()
568 ns = nvme_find_path(head); in nvme_ns_head_get_unique_id()
571 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_get_unique_id()
579 struct nvme_ns_head *head = disk->private_data; in nvme_ns_head_report_zones() local
583 srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_report_zones()
584 ns = nvme_find_path(head); in nvme_ns_head_report_zones()
587 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_report_zones()
635 static int nvme_add_ns_head_cdev(struct nvme_ns_head *head) in nvme_add_ns_head_cdev() argument
639 head->cdev_device.parent = &head->subsys->dev; in nvme_add_ns_head_cdev()
640 ret = dev_set_name(&head->cdev_device, "ng%dn%d", in nvme_add_ns_head_cdev()
641 head->subsys->instance, head->instance); in nvme_add_ns_head_cdev()
644 ret = nvme_cdev_add(&head->cdev, &head->cdev_device, in nvme_add_ns_head_cdev()
651 struct nvme_ns_head *head = in nvme_partition_scan_work() local
655 &head->disk->state))) in nvme_partition_scan_work()
658 mutex_lock(&head->disk->open_mutex); in nvme_partition_scan_work()
659 bdev_disk_changed(head->disk, false); in nvme_partition_scan_work()
660 mutex_unlock(&head->disk->open_mutex); in nvme_partition_scan_work()
665 struct nvme_ns_head *head = in nvme_requeue_work() local
669 spin_lock_irq(&head->requeue_lock); in nvme_requeue_work()
670 next = bio_list_get(&head->requeue_list); in nvme_requeue_work()
671 spin_unlock_irq(&head->requeue_lock); in nvme_requeue_work()
681 static void nvme_remove_head(struct nvme_ns_head *head) in nvme_remove_head() argument
683 if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { in nvme_remove_head()
688 kblockd_schedule_work(&head->requeue_work); in nvme_remove_head()
690 nvme_cdev_del(&head->cdev, &head->cdev_device); in nvme_remove_head()
691 synchronize_srcu(&head->srcu); in nvme_remove_head()
692 del_gendisk(head->disk); in nvme_remove_head()
694 nvme_put_ns_head(head); in nvme_remove_head()
699 struct nvme_ns_head *head = container_of(to_delayed_work(work), in nvme_remove_head_work() local
703 mutex_lock(&head->subsys->lock); in nvme_remove_head_work()
704 if (list_empty(&head->list)) { in nvme_remove_head_work()
705 list_del_init(&head->entry); in nvme_remove_head_work()
708 mutex_unlock(&head->subsys->lock); in nvme_remove_head_work()
710 nvme_remove_head(head); in nvme_remove_head_work()
715 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) in nvme_mpath_alloc_disk() argument
719 mutex_init(&head->lock); in nvme_mpath_alloc_disk()
720 bio_list_init(&head->requeue_list); in nvme_mpath_alloc_disk()
721 spin_lock_init(&head->requeue_lock); in nvme_mpath_alloc_disk()
722 INIT_WORK(&head->requeue_work, nvme_requeue_work); in nvme_mpath_alloc_disk()
723 INIT_WORK(&head->partition_scan_work, nvme_partition_scan_work); in nvme_mpath_alloc_disk()
724 INIT_DELAYED_WORK(&head->remove_work, nvme_remove_head_work); in nvme_mpath_alloc_disk()
725 head->delayed_removal_secs = 0; in nvme_mpath_alloc_disk()
742 if (!nvme_is_unique_nsid(ctrl, head)) in nvme_mpath_alloc_disk()
749 if (head->ids.csi == NVME_CSI_ZNS) in nvme_mpath_alloc_disk()
752 head->disk = blk_alloc_disk(&lim, ctrl->numa_node); in nvme_mpath_alloc_disk()
753 if (IS_ERR(head->disk)) in nvme_mpath_alloc_disk()
754 return PTR_ERR(head->disk); in nvme_mpath_alloc_disk()
755 head->disk->fops = &nvme_ns_head_ops; in nvme_mpath_alloc_disk()
756 head->disk->private_data = head; in nvme_mpath_alloc_disk()
766 set_bit(GD_SUPPRESS_PART_SCAN, &head->disk->state); in nvme_mpath_alloc_disk()
767 sprintf(head->disk->disk_name, "nvme%dn%d", in nvme_mpath_alloc_disk()
768 ctrl->subsys->instance, head->instance); in nvme_mpath_alloc_disk()
769 nvme_tryget_ns_head(head); in nvme_mpath_alloc_disk()
775 struct nvme_ns_head *head = ns->head; in nvme_mpath_set_live() local
778 if (!head->disk) in nvme_mpath_set_live()
786 if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { in nvme_mpath_set_live()
787 rc = device_add_disk(&head->subsys->dev, head->disk, in nvme_mpath_set_live()
790 clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags); in nvme_mpath_set_live()
793 nvme_add_ns_head_cdev(head); in nvme_mpath_set_live()
794 kblockd_schedule_work(&head->partition_scan_work); in nvme_mpath_set_live()
797 nvme_mpath_add_sysfs_link(ns->head); in nvme_mpath_set_live()
799 mutex_lock(&head->lock); in nvme_mpath_set_live()
803 srcu_idx = srcu_read_lock(&head->srcu); in nvme_mpath_set_live()
805 __nvme_find_path(head, node); in nvme_mpath_set_live()
806 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_mpath_set_live()
808 mutex_unlock(&head->lock); in nvme_mpath_set_live()
810 synchronize_srcu(&head->srcu); in nvme_mpath_set_live()
811 kblockd_schedule_work(&head->requeue_work); in nvme_mpath_set_live()
897 if (test_bit(NVME_NSHEAD_DISK_LIVE, &ns->head->flags)) in nvme_update_ns_ana_state()
898 nvme_mpath_add_sysfs_link(ns->head); in nvme_update_ns_ana_state()
926 if (ns->head->ns_id < nsid) in nvme_update_ana_state()
928 if (ns->head->ns_id == nsid) in nvme_update_ana_state()
932 if (ns->head->ns_id > nsid) in nvme_update_ana_state()
1092 if (ns->head->subsys->iopolicy != NVME_IOPOLICY_QD) in queue_depth_show()
1106 struct nvme_ns_head *head = ns->head; in numa_nodes_show() local
1108 if (head->subsys->iopolicy != NVME_IOPOLICY_NUMA) in numa_nodes_show()
1113 srcu_idx = srcu_read_lock(&head->srcu); in numa_nodes_show()
1115 current_ns = srcu_dereference(head->current_path[node], in numa_nodes_show()
1116 &head->srcu); in numa_nodes_show()
1120 srcu_read_unlock(&head->srcu, srcu_idx); in numa_nodes_show()
1130 struct nvme_ns_head *head = disk->private_data; in delayed_removal_secs_show() local
1133 mutex_lock(&head->subsys->lock); in delayed_removal_secs_show()
1134 ret = sysfs_emit(buf, "%u\n", head->delayed_removal_secs); in delayed_removal_secs_show()
1135 mutex_unlock(&head->subsys->lock); in delayed_removal_secs_show()
1143 struct nvme_ns_head *head = disk->private_data; in delayed_removal_secs_store() local
1151 mutex_lock(&head->subsys->lock); in delayed_removal_secs_store()
1152 head->delayed_removal_secs = sec; in delayed_removal_secs_store()
1154 set_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags); in delayed_removal_secs_store()
1156 clear_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags); in delayed_removal_secs_store()
1157 mutex_unlock(&head->subsys->lock); in delayed_removal_secs_store()
1162 synchronize_srcu(&head->srcu); in delayed_removal_secs_store()
1181 void nvme_mpath_add_sysfs_link(struct nvme_ns_head *head) in nvme_mpath_add_sysfs_link() argument
1192 if (!test_bit(GD_ADDED, &head->disk->state)) in nvme_mpath_add_sysfs_link()
1195 kobj = &disk_to_dev(head->disk)->kobj; in nvme_mpath_add_sysfs_link()
1201 srcu_idx = srcu_read_lock(&head->srcu); in nvme_mpath_add_sysfs_link()
1203 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_mpath_add_sysfs_link()
1204 srcu_read_lock_held(&head->srcu)) { in nvme_mpath_add_sysfs_link()
1235 dev_err(disk_to_dev(ns->head->disk), in nvme_mpath_add_sysfs_link()
1242 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_mpath_add_sysfs_link()
1254 kobj = &disk_to_dev(ns->head->disk)->kobj; in nvme_mpath_remove_sysfs_link()
1286 if (blk_queue_is_zoned(ns->queue) && ns->head->disk) in nvme_mpath_add_disk()
1287 ns->head->disk->nr_zones = ns->disk->nr_zones; in nvme_mpath_add_disk()
1291 void nvme_mpath_remove_disk(struct nvme_ns_head *head) in nvme_mpath_remove_disk() argument
1295 if (!head->disk) in nvme_mpath_remove_disk()
1298 mutex_lock(&head->subsys->lock); in nvme_mpath_remove_disk()
1308 if (!list_empty(&head->list)) in nvme_mpath_remove_disk()
1311 if (head->delayed_removal_secs) { in nvme_mpath_remove_disk()
1318 mod_delayed_work(nvme_wq, &head->remove_work, in nvme_mpath_remove_disk()
1319 head->delayed_removal_secs * HZ); in nvme_mpath_remove_disk()
1321 list_del_init(&head->entry); in nvme_mpath_remove_disk()
1325 mutex_unlock(&head->subsys->lock); in nvme_mpath_remove_disk()
1327 nvme_remove_head(head); in nvme_mpath_remove_disk()
1330 void nvme_mpath_put_disk(struct nvme_ns_head *head) in nvme_mpath_put_disk() argument
1332 if (!head->disk) in nvme_mpath_put_disk()
1335 kblockd_schedule_work(&head->requeue_work); in nvme_mpath_put_disk()
1336 flush_work(&head->requeue_work); in nvme_mpath_put_disk()
1337 flush_work(&head->partition_scan_work); in nvme_mpath_put_disk()
1338 put_disk(head->disk); in nvme_mpath_put_disk()