Lines Matching refs:sdev
294 int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd, in scsi_execute_cmd() argument
311 req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags); in scsi_execute_cmd()
389 void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd) in scsi_device_unbusy() argument
391 struct Scsi_Host *shost = sdev->host; in scsi_device_unbusy()
392 struct scsi_target *starget = scsi_target(sdev); in scsi_device_unbusy()
399 sbitmap_put(&sdev->budget_map, cmd->budget_token); in scsi_device_unbusy()
407 static void scsi_kick_sdev_queue(struct scsi_device *sdev, void *data) in scsi_kick_sdev_queue() argument
411 if (sdev != current_sdev) in scsi_kick_sdev_queue()
412 blk_mq_run_hw_queues(sdev->request_queue, true); in scsi_kick_sdev_queue()
448 static inline bool scsi_device_is_busy(struct scsi_device *sdev) in scsi_device_is_busy() argument
450 if (scsi_device_busy(sdev) >= sdev->queue_depth) in scsi_device_is_busy()
452 if (atomic_read(&sdev->device_blocked) > 0) in scsi_device_is_busy()
480 struct scsi_device *sdev; in scsi_starved_list_run() local
502 sdev = list_entry(starved_list.next, in scsi_starved_list_run()
504 list_del_init(&sdev->starved_entry); in scsi_starved_list_run()
505 if (scsi_target_is_busy(scsi_target(sdev))) { in scsi_starved_list_run()
506 list_move_tail(&sdev->starved_entry, in scsi_starved_list_run()
521 slq = sdev->request_queue; in scsi_starved_list_run()
544 struct scsi_device *sdev = q->queuedata; in scsi_run_queue() local
546 if (scsi_target(sdev)->single_lun) in scsi_run_queue()
547 scsi_single_lun_run(sdev); in scsi_run_queue()
548 if (!list_empty(&sdev->host->starved_list)) in scsi_run_queue()
549 scsi_starved_list_run(sdev->host); in scsi_run_queue()
557 struct scsi_device *sdev; in scsi_requeue_run_queue() local
560 sdev = container_of(work, struct scsi_device, requeue_work); in scsi_requeue_run_queue()
561 q = sdev->request_queue; in scsi_requeue_run_queue()
567 struct scsi_device *sdev; in scsi_run_host_queues() local
569 shost_for_each_device(sdev, shost) in scsi_run_host_queues()
570 scsi_run_queue(sdev->request_queue); in scsi_run_host_queues()
600 static void scsi_run_queue_async(struct scsi_device *sdev) in scsi_run_queue_async() argument
602 if (scsi_host_in_recovery(sdev->host)) in scsi_run_queue_async()
605 if (scsi_target(sdev)->single_lun || in scsi_run_queue_async()
606 !list_empty(&sdev->host->starved_list)) { in scsi_run_queue_async()
607 kblockd_schedule_work(&sdev->requeue_work); in scsi_run_queue_async()
614 int old = atomic_read(&sdev->restarts); in scsi_run_queue_async()
624 if (old && atomic_cmpxchg(&sdev->restarts, old, 0) == old) in scsi_run_queue_async()
625 blk_mq_run_hw_queues(sdev->request_queue, true); in scsi_run_queue_async()
634 struct scsi_device *sdev = cmd->device; in scsi_end_request() local
635 struct request_queue *q = sdev->request_queue; in scsi_end_request()
672 scsi_run_queue_async(sdev); in scsi_end_request()
1100 static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev, in scsi_cmd_needs_dma_drain() argument
1103 return sdev->dma_drain_len && blk_rq_is_passthrough(rq) && in scsi_cmd_needs_dma_drain()
1105 sdev->host->hostt->dma_need_drain(rq); in scsi_cmd_needs_dma_drain()
1122 struct scsi_device *sdev = cmd->device; in scsi_alloc_sgtables() local
1127 bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq); in scsi_alloc_sgtables()
1164 sg_set_buf(last_sg, sdev->dma_drain_buf, sdev->dma_drain_len); in scsi_alloc_sgtables()
1167 cmd->extra_len += sdev->dma_drain_len; in scsi_alloc_sgtables()
1280 static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev, in scsi_setup_scsi_cmnd() argument
1306 scsi_device_state_check(struct scsi_device *sdev, struct request *req) in scsi_device_state_check() argument
1308 switch (sdev->sdev_state) { in scsi_device_state_check()
1318 if (!sdev->offline_already) { in scsi_device_state_check()
1319 sdev->offline_already = true; in scsi_device_state_check()
1320 sdev_printk(KERN_ERR, sdev, in scsi_device_state_check()
1329 sdev_printk(KERN_ERR, sdev, in scsi_device_state_check()
1359 struct scsi_device *sdev) in scsi_dev_queue_ready() argument
1363 token = sbitmap_get(&sdev->budget_map); in scsi_dev_queue_ready()
1367 if (!atomic_read(&sdev->device_blocked)) in scsi_dev_queue_ready()
1374 if (scsi_device_busy(sdev) > 1 || in scsi_dev_queue_ready()
1375 atomic_dec_return(&sdev->device_blocked) > 0) { in scsi_dev_queue_ready()
1376 sbitmap_put(&sdev->budget_map, token); in scsi_dev_queue_ready()
1380 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, in scsi_dev_queue_ready()
1391 struct scsi_device *sdev) in scsi_target_queue_ready() argument
1393 struct scsi_target *starget = scsi_target(sdev); in scsi_target_queue_ready()
1399 starget->starget_sdev_user != sdev) { in scsi_target_queue_ready()
1403 starget->starget_sdev_user = sdev; in scsi_target_queue_ready()
1432 list_move_tail(&sdev->starved_entry, &shost->starved_list); in scsi_target_queue_ready()
1447 struct scsi_device *sdev, in scsi_host_queue_ready() argument
1469 if (!list_empty(&sdev->starved_entry)) { in scsi_host_queue_ready()
1471 if (!list_empty(&sdev->starved_entry)) in scsi_host_queue_ready()
1472 list_del_init(&sdev->starved_entry); in scsi_host_queue_ready()
1482 if (list_empty(&sdev->starved_entry)) in scsi_host_queue_ready()
1483 list_add_tail(&sdev->starved_entry, &shost->starved_list); in scsi_host_queue_ready()
1504 struct scsi_device *sdev = q->queuedata; in scsi_mq_lld_busy() local
1510 shost = sdev->host; in scsi_mq_lld_busy()
1518 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) in scsi_mq_lld_busy()
1654 struct scsi_device *sdev = req->q->queuedata; in scsi_prepare_cmd() local
1655 struct Scsi_Host *shost = sdev->host; in scsi_prepare_cmd()
1659 scsi_init_command(sdev, cmd); in scsi_prepare_cmd()
1696 return scsi_setup_scsi_cmnd(sdev, req); in scsi_prepare_cmd()
1698 if (sdev->handler && sdev->handler->prep_fn) { in scsi_prepare_cmd()
1699 blk_status_t ret = sdev->handler->prep_fn(sdev, req); in scsi_prepare_cmd()
1750 struct scsi_device *sdev = q->queuedata; in scsi_mq_put_budget() local
1752 sbitmap_put(&sdev->budget_map, budget_token); in scsi_mq_put_budget()
1764 struct scsi_device *sdev = q->queuedata; in scsi_mq_get_budget() local
1765 int token = scsi_dev_queue_ready(q, sdev); in scsi_mq_get_budget()
1770 atomic_inc(&sdev->restarts); in scsi_mq_get_budget()
1787 if (unlikely(scsi_device_busy(sdev) == 0 && in scsi_mq_get_budget()
1788 !scsi_device_blocked(sdev))) in scsi_mq_get_budget()
1789 blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY); in scsi_mq_get_budget()
1812 struct scsi_device *sdev = q->queuedata; in scsi_queue_rq() local
1813 struct Scsi_Host *shost = sdev->host; in scsi_queue_rq()
1824 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { in scsi_queue_rq()
1825 ret = scsi_device_state_check(sdev, req); in scsi_queue_rq()
1831 if (!scsi_target_queue_ready(shost, sdev)) in scsi_queue_rq()
1838 if (!scsi_host_queue_ready(q, shost, sdev, cmd)) in scsi_queue_rq()
1858 if (sdev->simple_tags) in scsi_queue_rq()
1880 if (scsi_target(sdev)->can_queue > 0) in scsi_queue_rq()
1881 atomic_dec(&scsi_target(sdev)->target_busy); in scsi_queue_rq()
1889 if (scsi_device_blocked(sdev)) in scsi_queue_rq()
1898 if (unlikely(!scsi_device_online(sdev))) in scsi_queue_rq()
1909 scsi_run_queue_async(sdev); in scsi_queue_rq()
2118 struct scsi_device *sdev = NULL; in scsi_device_from_queue() local
2122 sdev = q->queuedata; in scsi_device_from_queue()
2123 if (!sdev || !get_device(&sdev->sdev_gendev)) in scsi_device_from_queue()
2124 sdev = NULL; in scsi_device_from_queue()
2126 return sdev; in scsi_device_from_queue()
2190 int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, in scsi_mode_select() argument
2209 if (sdev->use_10_for_ms || in scsi_mode_select()
2248 ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, real_buffer, len, in scsi_mode_select()
2272 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage, in scsi_mode_sense() argument
2303 dbd = sdev->set_dbd_for_ms ? 8 : dbd; in scsi_mode_sense()
2311 use_10_for_ms = sdev->use_10_for_ms || len > 255; in scsi_mode_sense()
2331 result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len, in scsi_mode_sense()
2355 sdev->use_10_for_ms = 0; in scsi_mode_sense()
2400 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, in scsi_test_unit_ready() argument
2413 result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0, in scsi_test_unit_ready()
2415 if (sdev->removable && result > 0 && scsi_sense_valid(sshdr) && in scsi_test_unit_ready()
2417 sdev->changed = 1; in scsi_test_unit_ready()
2434 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) in scsi_device_set_state() argument
2436 enum scsi_device_state oldstate = sdev->sdev_state; in scsi_device_set_state()
2538 sdev->offline_already = false; in scsi_device_set_state()
2539 sdev->sdev_state = state; in scsi_device_set_state()
2544 sdev_printk(KERN_ERR, sdev, in scsi_device_set_state()
2560 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) in scsi_evt_emit() argument
2570 scsi_rescan_device(sdev); in scsi_evt_emit()
2598 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); in scsi_evt_emit()
2610 struct scsi_device *sdev; in scsi_evt_thread() local
2614 sdev = container_of(work, struct scsi_device, event_work); in scsi_evt_thread()
2617 if (test_and_clear_bit(evt_type, sdev->pending_events)) in scsi_evt_thread()
2618 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); in scsi_evt_thread()
2625 spin_lock_irqsave(&sdev->list_lock, flags); in scsi_evt_thread()
2626 list_splice_init(&sdev->event_list, &event_list); in scsi_evt_thread()
2627 spin_unlock_irqrestore(&sdev->list_lock, flags); in scsi_evt_thread()
2635 scsi_evt_emit(sdev, evt); in scsi_evt_thread()
2648 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) in sdev_evt_send() argument
2656 if (!test_bit(evt->evt_type, sdev->supported_events)) { in sdev_evt_send()
2662 spin_lock_irqsave(&sdev->list_lock, flags); in sdev_evt_send()
2663 list_add_tail(&evt->node, &sdev->event_list); in sdev_evt_send()
2664 schedule_work(&sdev->event_work); in sdev_evt_send()
2665 spin_unlock_irqrestore(&sdev->list_lock, flags); in sdev_evt_send()
2713 void sdev_evt_send_simple(struct scsi_device *sdev, in sdev_evt_send_simple() argument
2718 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", in sdev_evt_send_simple()
2723 sdev_evt_send(sdev, evt); in sdev_evt_send_simple()
2741 scsi_device_quiesce(struct scsi_device *sdev) in scsi_device_quiesce() argument
2743 struct request_queue *q = sdev->request_queue; in scsi_device_quiesce()
2752 WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current); in scsi_device_quiesce()
2754 if (sdev->quiesced_by == current) in scsi_device_quiesce()
2769 mutex_lock(&sdev->state_mutex); in scsi_device_quiesce()
2770 err = scsi_device_set_state(sdev, SDEV_QUIESCE); in scsi_device_quiesce()
2772 sdev->quiesced_by = current; in scsi_device_quiesce()
2775 mutex_unlock(&sdev->state_mutex); in scsi_device_quiesce()
2790 void scsi_device_resume(struct scsi_device *sdev) in scsi_device_resume() argument
2796 mutex_lock(&sdev->state_mutex); in scsi_device_resume()
2797 if (sdev->sdev_state == SDEV_QUIESCE) in scsi_device_resume()
2798 scsi_device_set_state(sdev, SDEV_RUNNING); in scsi_device_resume()
2799 if (sdev->quiesced_by) { in scsi_device_resume()
2800 sdev->quiesced_by = NULL; in scsi_device_resume()
2801 blk_clear_pm_only(sdev->request_queue); in scsi_device_resume()
2803 mutex_unlock(&sdev->state_mutex); in scsi_device_resume()
2808 device_quiesce_fn(struct scsi_device *sdev, void *data) in device_quiesce_fn() argument
2810 scsi_device_quiesce(sdev); in device_quiesce_fn()
2821 device_resume_fn(struct scsi_device *sdev, void *data) in device_resume_fn() argument
2823 scsi_device_resume(sdev); in device_resume_fn()
2833 static int __scsi_internal_device_block_nowait(struct scsi_device *sdev) in __scsi_internal_device_block_nowait() argument
2835 if (scsi_device_set_state(sdev, SDEV_BLOCK)) in __scsi_internal_device_block_nowait()
2836 return scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); in __scsi_internal_device_block_nowait()
2841 void scsi_start_queue(struct scsi_device *sdev) in scsi_start_queue() argument
2843 if (cmpxchg(&sdev->queue_stopped, 1, 0)) in scsi_start_queue()
2844 blk_mq_unquiesce_queue(sdev->request_queue); in scsi_start_queue()
2847 static void scsi_stop_queue(struct scsi_device *sdev) in scsi_stop_queue() argument
2855 if (!cmpxchg(&sdev->queue_stopped, 0, 1)) in scsi_stop_queue()
2856 blk_mq_quiesce_queue_nowait(sdev->request_queue); in scsi_stop_queue()
2873 int scsi_internal_device_block_nowait(struct scsi_device *sdev) in scsi_internal_device_block_nowait() argument
2875 int ret = __scsi_internal_device_block_nowait(sdev); in scsi_internal_device_block_nowait()
2883 scsi_stop_queue(sdev); in scsi_internal_device_block_nowait()
2903 static void scsi_device_block(struct scsi_device *sdev, void *data) in scsi_device_block() argument
2908 mutex_lock(&sdev->state_mutex); in scsi_device_block()
2909 err = __scsi_internal_device_block_nowait(sdev); in scsi_device_block()
2910 state = sdev->sdev_state; in scsi_device_block()
2917 scsi_stop_queue(sdev); in scsi_device_block()
2919 mutex_unlock(&sdev->state_mutex); in scsi_device_block()
2922 __func__, dev_name(&sdev->sdev_gendev), state); in scsi_device_block()
2940 int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, in scsi_internal_device_unblock_nowait() argument
2955 switch (sdev->sdev_state) { in scsi_internal_device_unblock_nowait()
2958 sdev->sdev_state = new_state; in scsi_internal_device_unblock_nowait()
2963 sdev->sdev_state = new_state; in scsi_internal_device_unblock_nowait()
2965 sdev->sdev_state = SDEV_CREATED; in scsi_internal_device_unblock_nowait()
2973 scsi_start_queue(sdev); in scsi_internal_device_unblock_nowait()
2993 static int scsi_internal_device_unblock(struct scsi_device *sdev, in scsi_internal_device_unblock() argument
2998 mutex_lock(&sdev->state_mutex); in scsi_internal_device_unblock()
2999 ret = scsi_internal_device_unblock_nowait(sdev, new_state); in scsi_internal_device_unblock()
3000 mutex_unlock(&sdev->state_mutex); in scsi_internal_device_unblock()
3036 device_unblock(struct scsi_device *sdev, void *data) in device_unblock() argument
3038 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data); in device_unblock()
3073 struct scsi_device *sdev; in scsi_host_block() local
3080 shost_for_each_device(sdev, shost) { in scsi_host_block()
3081 mutex_lock(&sdev->state_mutex); in scsi_host_block()
3082 ret = scsi_internal_device_block_nowait(sdev); in scsi_host_block()
3083 mutex_unlock(&sdev->state_mutex); in scsi_host_block()
3085 scsi_device_put(sdev); in scsi_host_block()
3100 struct scsi_device *sdev; in scsi_host_unblock() local
3103 shost_for_each_device(sdev, shost) { in scsi_host_unblock()
3104 ret = scsi_internal_device_unblock(sdev, new_state); in scsi_host_unblock()
3106 scsi_device_put(sdev); in scsi_host_unblock()
3174 void sdev_disable_disk_events(struct scsi_device *sdev) in sdev_disable_disk_events() argument
3176 atomic_inc(&sdev->disk_events_disable_depth); in sdev_disable_disk_events()
3180 void sdev_enable_disk_events(struct scsi_device *sdev) in sdev_enable_disk_events() argument
3182 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0)) in sdev_enable_disk_events()
3184 atomic_dec(&sdev->disk_events_disable_depth); in sdev_enable_disk_events()
3274 int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) in scsi_vpd_lun_id() argument
3283 vpd_pg83 = rcu_dereference(sdev->vpd_pg83); in scsi_vpd_lun_id()
3400 int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id) in scsi_vpd_tpg_id() argument
3407 vpd_pg83 = rcu_dereference(sdev->vpd_pg83); in scsi_vpd_tpg_id()