Lines Matching refs:ns
88 struct nvme_ns *ns = req->q->queuedata; in nvme_failover_req() local
93 nvme_mpath_clear_current_path(ns); in nvme_failover_req()
100 if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) { in nvme_failover_req()
101 set_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_failover_req()
102 queue_work(nvme_wq, &ns->ctrl->ana_work); in nvme_failover_req()
105 spin_lock_irqsave(&ns->head->requeue_lock, flags); in nvme_failover_req()
107 bio_set_dev(bio, ns->head->disk->part0); in nvme_failover_req()
121 blk_steal_bios(&ns->head->requeue_list, req); in nvme_failover_req()
122 spin_unlock_irqrestore(&ns->head->requeue_lock, flags); in nvme_failover_req()
126 kblockd_schedule_work(&ns->head->requeue_work); in nvme_failover_req()
131 struct nvme_ns *ns = rq->q->queuedata; in nvme_mpath_start_request() local
132 struct gendisk *disk = ns->head->disk; in nvme_mpath_start_request()
134 if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) { in nvme_mpath_start_request()
135 atomic_inc(&ns->ctrl->nr_active); in nvme_mpath_start_request()
150 struct nvme_ns *ns = rq->q->queuedata; in nvme_mpath_end_request() local
153 atomic_dec_if_positive(&ns->ctrl->nr_active); in nvme_mpath_end_request()
157 bdev_end_io_acct(ns->head->disk->part0, req_op(rq), in nvme_mpath_end_request()
164 struct nvme_ns *ns; in nvme_kick_requeue_lists() local
168 list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { in nvme_kick_requeue_lists()
169 if (!ns->head->disk) in nvme_kick_requeue_lists()
171 kblockd_schedule_work(&ns->head->requeue_work); in nvme_kick_requeue_lists()
172 if (nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE) in nvme_kick_requeue_lists()
173 disk_uevent(ns->head->disk, KOBJ_CHANGE); in nvme_kick_requeue_lists()
187 bool nvme_mpath_clear_current_path(struct nvme_ns *ns) in nvme_mpath_clear_current_path() argument
189 struct nvme_ns_head *head = ns->head; in nvme_mpath_clear_current_path()
197 if (ns == rcu_access_pointer(head->current_path[node])) { in nvme_mpath_clear_current_path()
208 struct nvme_ns *ns; in nvme_mpath_clear_ctrl_paths() local
212 list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { in nvme_mpath_clear_ctrl_paths()
213 nvme_mpath_clear_current_path(ns); in nvme_mpath_clear_ctrl_paths()
214 kblockd_schedule_work(&ns->head->requeue_work); in nvme_mpath_clear_ctrl_paths()
219 void nvme_mpath_revalidate_paths(struct nvme_ns *ns) in nvme_mpath_revalidate_paths() argument
221 struct nvme_ns_head *head = ns->head; in nvme_mpath_revalidate_paths()
227 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_mpath_revalidate_paths()
228 if (capacity != get_capacity(ns->disk)) in nvme_mpath_revalidate_paths()
229 clear_bit(NVME_NS_READY, &ns->flags); in nvme_mpath_revalidate_paths()
238 static bool nvme_path_is_disabled(struct nvme_ns *ns) in nvme_path_is_disabled() argument
240 enum nvme_ctrl_state state = nvme_ctrl_state(ns->ctrl); in nvme_path_is_disabled()
249 if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) || in nvme_path_is_disabled()
250 !test_bit(NVME_NS_READY, &ns->flags)) in nvme_path_is_disabled()
258 struct nvme_ns *found = NULL, *fallback = NULL, *ns; in __nvme_find_path() local
260 list_for_each_entry_rcu(ns, &head->list, siblings) { in __nvme_find_path()
261 if (nvme_path_is_disabled(ns)) in __nvme_find_path()
264 if (ns->ctrl->numa_node != NUMA_NO_NODE && in __nvme_find_path()
266 distance = node_distance(node, ns->ctrl->numa_node); in __nvme_find_path()
270 switch (ns->ana_state) { in __nvme_find_path()
274 found = ns; in __nvme_find_path()
280 fallback = ns; in __nvme_find_path()
296 struct nvme_ns *ns) in nvme_next_ns() argument
298 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, in nvme_next_ns()
300 if (ns) in nvme_next_ns()
301 return ns; in nvme_next_ns()
307 struct nvme_ns *ns, *found = NULL; in nvme_round_robin_path() local
321 for (ns = nvme_next_ns(head, old); in nvme_round_robin_path()
322 ns && ns != old; in nvme_round_robin_path()
323 ns = nvme_next_ns(head, ns)) { in nvme_round_robin_path()
324 if (nvme_path_is_disabled(ns)) in nvme_round_robin_path()
327 if (ns->ana_state == NVME_ANA_OPTIMIZED) { in nvme_round_robin_path()
328 found = ns; in nvme_round_robin_path()
331 if (ns->ana_state == NVME_ANA_NONOPTIMIZED) in nvme_round_robin_path()
332 found = ns; in nvme_round_robin_path()
355 struct nvme_ns *best_opt = NULL, *best_nonopt = NULL, *ns; in nvme_queue_depth_path() local
359 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_queue_depth_path()
360 if (nvme_path_is_disabled(ns)) in nvme_queue_depth_path()
363 depth = atomic_read(&ns->ctrl->nr_active); in nvme_queue_depth_path()
365 switch (ns->ana_state) { in nvme_queue_depth_path()
369 best_opt = ns; in nvme_queue_depth_path()
375 best_nonopt = ns; in nvme_queue_depth_path()
389 static inline bool nvme_path_is_optimized(struct nvme_ns *ns) in nvme_path_is_optimized() argument
391 return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE && in nvme_path_is_optimized()
392 ns->ana_state == NVME_ANA_OPTIMIZED; in nvme_path_is_optimized()
398 struct nvme_ns *ns; in nvme_numa_path() local
400 ns = srcu_dereference(head->current_path[node], &head->srcu); in nvme_numa_path()
401 if (unlikely(!ns)) in nvme_numa_path()
403 if (unlikely(!nvme_path_is_optimized(ns))) in nvme_numa_path()
405 return ns; in nvme_numa_path()
422 struct nvme_ns *ns; in nvme_available_path() local
427 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_available_path()
428 if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags)) in nvme_available_path()
430 switch (nvme_ctrl_state(ns->ctrl)) { in nvme_available_path()
446 struct nvme_ns *ns; in nvme_ns_head_submit_bio() local
459 ns = nvme_find_path(head); in nvme_ns_head_submit_bio()
460 if (likely(ns)) { in nvme_ns_head_submit_bio()
461 bio_set_dev(bio, ns->disk->part0); in nvme_ns_head_submit_bio()
463 trace_block_bio_remap(bio, disk_devt(ns->head->disk), in nvme_ns_head_submit_bio()
497 struct nvme_ns *ns; in nvme_ns_head_get_unique_id() local
501 ns = nvme_find_path(head); in nvme_ns_head_get_unique_id()
502 if (ns) in nvme_ns_head_get_unique_id()
503 ret = nvme_ns_get_unique_id(ns, id, type); in nvme_ns_head_get_unique_id()
513 struct nvme_ns *ns; in nvme_ns_head_report_zones() local
517 ns = nvme_find_path(head); in nvme_ns_head_report_zones()
518 if (ns) in nvme_ns_head_report_zones()
519 ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data); in nvme_ns_head_report_zones()
661 static void nvme_mpath_set_live(struct nvme_ns *ns) in nvme_mpath_set_live() argument
663 struct nvme_ns_head *head = ns->head; in nvme_mpath_set_live()
686 if (nvme_path_is_optimized(ns)) { in nvme_mpath_set_live()
750 struct nvme_ns *ns) in nvme_update_ns_ana_state() argument
752 ns->ana_grpid = le32_to_cpu(desc->grpid); in nvme_update_ns_ana_state()
753 ns->ana_state = desc->state; in nvme_update_ns_ana_state()
754 clear_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_update_ns_ana_state()
764 if (nvme_state_is_live(ns->ana_state) && in nvme_update_ns_ana_state()
765 nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE) in nvme_update_ns_ana_state()
766 nvme_mpath_set_live(ns); in nvme_update_ns_ana_state()
774 struct nvme_ns *ns; in nvme_update_ana_state() local
788 list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { in nvme_update_ana_state()
792 if (ns->head->ns_id < nsid) in nvme_update_ana_state()
794 if (ns->head->ns_id == nsid) in nvme_update_ana_state()
795 nvme_update_ns_ana_state(desc, ns); in nvme_update_ana_state()
798 if (ns->head->ns_id > nsid) in nvme_update_ana_state()
947 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); in ana_state_show() local
949 return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]); in ana_state_show()
965 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) in nvme_mpath_add_disk() argument
967 if (nvme_ctrl_use_ana(ns->ctrl)) { in nvme_mpath_add_disk()
973 mutex_lock(&ns->ctrl->ana_lock); in nvme_mpath_add_disk()
974 ns->ana_grpid = le32_to_cpu(anagrpid); in nvme_mpath_add_disk()
975 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc); in nvme_mpath_add_disk()
976 mutex_unlock(&ns->ctrl->ana_lock); in nvme_mpath_add_disk()
979 nvme_update_ns_ana_state(&desc, ns); in nvme_mpath_add_disk()
982 set_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_mpath_add_disk()
983 queue_work(nvme_wq, &ns->ctrl->ana_work); in nvme_mpath_add_disk()
986 ns->ana_state = NVME_ANA_OPTIMIZED; in nvme_mpath_add_disk()
987 nvme_mpath_set_live(ns); in nvme_mpath_add_disk()
991 if (blk_queue_is_zoned(ns->queue) && ns->head->disk) in nvme_mpath_add_disk()
992 ns->head->disk->nr_zones = ns->disk->nr_zones; in nvme_mpath_add_disk()