Lines Matching refs:ns

85 	struct nvme_ns *ns = req->q->queuedata;  in nvme_failover_req()  local
90 nvme_mpath_clear_current_path(ns); in nvme_failover_req()
97 if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) { in nvme_failover_req()
98 set_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_failover_req()
99 queue_work(nvme_wq, &ns->ctrl->ana_work); in nvme_failover_req()
102 spin_lock_irqsave(&ns->head->requeue_lock, flags); in nvme_failover_req()
104 bio_set_dev(bio, ns->head->disk->part0); in nvme_failover_req()
110 blk_steal_bios(&ns->head->requeue_list, req); in nvme_failover_req()
111 spin_unlock_irqrestore(&ns->head->requeue_lock, flags); in nvme_failover_req()
114 kblockd_schedule_work(&ns->head->requeue_work); in nvme_failover_req()
119 struct nvme_ns *ns = rq->q->queuedata; in nvme_mpath_start_request() local
120 struct gendisk *disk = ns->head->disk; in nvme_mpath_start_request()
134 struct nvme_ns *ns = rq->q->queuedata; in nvme_mpath_end_request() local
138 bdev_end_io_acct(ns->head->disk->part0, req_op(rq), in nvme_mpath_end_request()
144 struct nvme_ns *ns; in nvme_kick_requeue_lists() local
147 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_kick_requeue_lists()
148 if (!ns->head->disk) in nvme_kick_requeue_lists()
150 kblockd_schedule_work(&ns->head->requeue_work); in nvme_kick_requeue_lists()
152 disk_uevent(ns->head->disk, KOBJ_CHANGE); in nvme_kick_requeue_lists()
166 bool nvme_mpath_clear_current_path(struct nvme_ns *ns) in nvme_mpath_clear_current_path() argument
168 struct nvme_ns_head *head = ns->head; in nvme_mpath_clear_current_path()
176 if (ns == rcu_access_pointer(head->current_path[node])) { in nvme_mpath_clear_current_path()
187 struct nvme_ns *ns; in nvme_mpath_clear_ctrl_paths() local
190 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_mpath_clear_ctrl_paths()
191 nvme_mpath_clear_current_path(ns); in nvme_mpath_clear_ctrl_paths()
192 kblockd_schedule_work(&ns->head->requeue_work); in nvme_mpath_clear_ctrl_paths()
197 void nvme_mpath_revalidate_paths(struct nvme_ns *ns) in nvme_mpath_revalidate_paths() argument
199 struct nvme_ns_head *head = ns->head; in nvme_mpath_revalidate_paths()
205 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_mpath_revalidate_paths()
206 if (capacity != get_capacity(ns->disk)) in nvme_mpath_revalidate_paths()
207 clear_bit(NVME_NS_READY, &ns->flags); in nvme_mpath_revalidate_paths()
216 static bool nvme_path_is_disabled(struct nvme_ns *ns) in nvme_path_is_disabled() argument
223 if (ns->ctrl->state != NVME_CTRL_LIVE && in nvme_path_is_disabled()
224 ns->ctrl->state != NVME_CTRL_DELETING) in nvme_path_is_disabled()
226 if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) || in nvme_path_is_disabled()
227 !test_bit(NVME_NS_READY, &ns->flags)) in nvme_path_is_disabled()
235 struct nvme_ns *found = NULL, *fallback = NULL, *ns; in __nvme_find_path() local
237 list_for_each_entry_rcu(ns, &head->list, siblings) { in __nvme_find_path()
238 if (nvme_path_is_disabled(ns)) in __nvme_find_path()
242 distance = node_distance(node, ns->ctrl->numa_node); in __nvme_find_path()
246 switch (ns->ana_state) { in __nvme_find_path()
250 found = ns; in __nvme_find_path()
256 fallback = ns; in __nvme_find_path()
272 struct nvme_ns *ns) in nvme_next_ns() argument
274 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, in nvme_next_ns()
276 if (ns) in nvme_next_ns()
277 return ns; in nvme_next_ns()
284 struct nvme_ns *ns, *found = NULL; in nvme_round_robin_path() local
292 for (ns = nvme_next_ns(head, old); in nvme_round_robin_path()
293 ns && ns != old; in nvme_round_robin_path()
294 ns = nvme_next_ns(head, ns)) { in nvme_round_robin_path()
295 if (nvme_path_is_disabled(ns)) in nvme_round_robin_path()
298 if (ns->ana_state == NVME_ANA_OPTIMIZED) { in nvme_round_robin_path()
299 found = ns; in nvme_round_robin_path()
302 if (ns->ana_state == NVME_ANA_NONOPTIMIZED) in nvme_round_robin_path()
303 found = ns; in nvme_round_robin_path()
324 static inline bool nvme_path_is_optimized(struct nvme_ns *ns) in nvme_path_is_optimized() argument
326 return ns->ctrl->state == NVME_CTRL_LIVE && in nvme_path_is_optimized()
327 ns->ana_state == NVME_ANA_OPTIMIZED; in nvme_path_is_optimized()
333 struct nvme_ns *ns; in nvme_find_path() local
335 ns = srcu_dereference(head->current_path[node], &head->srcu); in nvme_find_path()
336 if (unlikely(!ns)) in nvme_find_path()
340 return nvme_round_robin_path(head, node, ns); in nvme_find_path()
341 if (unlikely(!nvme_path_is_optimized(ns))) in nvme_find_path()
343 return ns; in nvme_find_path()
348 struct nvme_ns *ns; in nvme_available_path() local
350 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_available_path()
351 if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags)) in nvme_available_path()
353 switch (ns->ctrl->state) { in nvme_available_path()
370 struct nvme_ns *ns; in nvme_ns_head_submit_bio() local
383 ns = nvme_find_path(head); in nvme_ns_head_submit_bio()
384 if (likely(ns)) { in nvme_ns_head_submit_bio()
385 bio_set_dev(bio, ns->disk->part0); in nvme_ns_head_submit_bio()
387 trace_block_bio_remap(bio, disk_devt(ns->head->disk), in nvme_ns_head_submit_bio()
422 struct nvme_ns *ns; in nvme_ns_head_report_zones() local
426 ns = nvme_find_path(head); in nvme_ns_head_report_zones()
427 if (ns) in nvme_ns_head_report_zones()
428 ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data); in nvme_ns_head_report_zones()
559 static void nvme_mpath_set_live(struct nvme_ns *ns) in nvme_mpath_set_live() argument
561 struct nvme_ns_head *head = ns->head; in nvme_mpath_set_live()
576 clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags); in nvme_mpath_set_live()
583 if (nvme_path_is_optimized(ns)) { in nvme_mpath_set_live()
647 struct nvme_ns *ns) in nvme_update_ns_ana_state() argument
649 ns->ana_grpid = le32_to_cpu(desc->grpid); in nvme_update_ns_ana_state()
650 ns->ana_state = desc->state; in nvme_update_ns_ana_state()
651 clear_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_update_ns_ana_state()
661 if (nvme_state_is_live(ns->ana_state) && in nvme_update_ns_ana_state()
662 ns->ctrl->state == NVME_CTRL_LIVE) in nvme_update_ns_ana_state()
663 nvme_mpath_set_live(ns); in nvme_update_ns_ana_state()
671 struct nvme_ns *ns; in nvme_update_ana_state() local
684 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_update_ana_state()
688 if (ns->head->ns_id < nsid) in nvme_update_ana_state()
690 if (ns->head->ns_id == nsid) in nvme_update_ana_state()
691 nvme_update_ns_ana_state(desc, ns); in nvme_update_ana_state()
694 if (ns->head->ns_id > nsid) in nvme_update_ana_state()
820 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); in ana_state_show() local
822 return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]); in ana_state_show()
838 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) in nvme_mpath_add_disk() argument
840 if (nvme_ctrl_use_ana(ns->ctrl)) { in nvme_mpath_add_disk()
846 mutex_lock(&ns->ctrl->ana_lock); in nvme_mpath_add_disk()
847 ns->ana_grpid = le32_to_cpu(anagrpid); in nvme_mpath_add_disk()
848 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc); in nvme_mpath_add_disk()
849 mutex_unlock(&ns->ctrl->ana_lock); in nvme_mpath_add_disk()
852 nvme_update_ns_ana_state(&desc, ns); in nvme_mpath_add_disk()
855 set_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_mpath_add_disk()
856 queue_work(nvme_wq, &ns->ctrl->ana_work); in nvme_mpath_add_disk()
859 ns->ana_state = NVME_ANA_OPTIMIZED; in nvme_mpath_add_disk()
860 nvme_mpath_set_live(ns); in nvme_mpath_add_disk()
863 if (blk_queue_stable_writes(ns->queue) && ns->head->disk) in nvme_mpath_add_disk()
865 ns->head->disk->queue); in nvme_mpath_add_disk()
867 if (blk_queue_is_zoned(ns->queue) && ns->head->disk) in nvme_mpath_add_disk()
868 ns->head->disk->nr_zones = ns->disk->nr_zones; in nvme_mpath_add_disk()