Lines Matching refs:vblk
136 struct virtio_blk *vblk = hctx->queue->queuedata; in get_virtio_blk_vq() local
137 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in get_virtio_blk_vq()
369 struct virtio_blk *vblk = vq->vdev->priv; in virtblk_done() local
370 struct virtio_blk_vq *vblk_vq = &vblk->vqs[vq->index]; in virtblk_done()
389 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); in virtblk_done()
396 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_commit_rqs() local
397 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in virtio_commit_rqs()
422 struct virtio_blk *vblk, in virtblk_prep_rq() argument
429 status = virtblk_setup_cmd(vblk->vdev, req, vbr); in virtblk_prep_rq()
446 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_queue_rq() local
455 status = virtblk_prep_rq(hctx, vblk, req, vbr); in virtio_queue_rq()
459 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
460 err = virtblk_add_req(vblk->vqs[qid].vq, vbr); in virtio_queue_rq()
462 virtqueue_kick(vblk->vqs[qid].vq); in virtio_queue_rq()
468 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
473 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) in virtio_queue_rq()
475 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
478 virtqueue_notify(vblk->vqs[qid].vq); in virtio_queue_rq()
484 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata; in virtblk_prep_rq_batch() local
489 return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK; in virtblk_prep_rq_batch()
551 static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk, in virtblk_alloc_report_buffer() argument
556 struct request_queue *q = vblk->disk->queue; in virtblk_alloc_report_buffer()
561 get_capacity(vblk->disk) >> ilog2(zone_sectors)); in virtblk_alloc_report_buffer()
581 static int virtblk_submit_zone_report(struct virtio_blk *vblk, in virtblk_submit_zone_report() argument
585 struct request_queue *q = vblk->disk->queue; in virtblk_submit_zone_report()
596 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT); in virtblk_submit_zone_report()
597 vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector); in virtblk_submit_zone_report()
610 static int virtblk_parse_zone(struct virtio_blk *vblk, in virtblk_parse_zone() argument
620 dev_err(&vblk->vdev->dev, "invalid zone type %#x\n", in virtblk_parse_zone()
642 struct virtio_blk *vblk = disk->private_data; in virtblk_report_zones() local
644 unsigned int zone_sectors = vblk->zone_sectors; in virtblk_report_zones()
649 if (WARN_ON_ONCE(!vblk->zone_sectors)) in virtblk_report_zones()
652 report = virtblk_alloc_report_buffer(vblk, nr_zones, in virtblk_report_zones()
657 while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) { in virtblk_report_zones()
660 ret = virtblk_submit_zone_report(vblk, (char *)report, in virtblk_report_zones()
672 ret = virtblk_parse_zone(vblk, &report->zones[i], in virtblk_report_zones()
690 static void virtblk_revalidate_zones(struct virtio_blk *vblk) in virtblk_revalidate_zones() argument
694 if (!vblk->zone_sectors) in virtblk_revalidate_zones()
697 virtio_cread(vblk->vdev, struct virtio_blk_config, in virtblk_revalidate_zones()
699 if (!blk_revalidate_disk_zones(vblk->disk, NULL)) in virtblk_revalidate_zones()
700 set_capacity_and_notify(vblk->disk, 0); in virtblk_revalidate_zones()
704 struct virtio_blk *vblk, in virtblk_probe_zoned_device() argument
733 disk_set_zoned(vblk->disk, BLK_ZONED_HM); in virtblk_probe_zoned_device()
738 disk_set_max_open_zones(vblk->disk, le32_to_cpu(v)); in virtblk_probe_zoned_device()
744 disk_set_max_active_zones(vblk->disk, le32_to_cpu(v)); in virtblk_probe_zoned_device()
763 vblk->zone_sectors = le32_to_cpu(v); in virtblk_probe_zoned_device()
764 if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) { in virtblk_probe_zoned_device()
767 vblk->zone_sectors); in virtblk_probe_zoned_device()
770 dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors); in virtblk_probe_zoned_device()
773 dev_warn(&vblk->vdev->dev, in virtblk_probe_zoned_device()
778 ret = blk_revalidate_disk_zones(vblk->disk, NULL); in virtblk_probe_zoned_device()
804 static inline void virtblk_revalidate_zones(struct virtio_blk *vblk) in virtblk_revalidate_zones() argument
808 struct virtio_blk *vblk, struct request_queue *q) in virtblk_probe_zoned_device() argument
823 struct virtio_blk *vblk = disk->private_data; in virtblk_get_id() local
824 struct request_queue *q = vblk->disk->queue; in virtblk_get_id()
835 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID); in virtblk_get_id()
852 struct virtio_blk *vblk = bd->bd_disk->private_data; in virtblk_getgeo() local
855 mutex_lock(&vblk->vdev_mutex); in virtblk_getgeo()
857 if (!vblk->vdev) { in virtblk_getgeo()
863 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { in virtblk_getgeo()
864 virtio_cread(vblk->vdev, struct virtio_blk_config, in virtblk_getgeo()
866 virtio_cread(vblk->vdev, struct virtio_blk_config, in virtblk_getgeo()
868 virtio_cread(vblk->vdev, struct virtio_blk_config, in virtblk_getgeo()
877 mutex_unlock(&vblk->vdev_mutex); in virtblk_getgeo()
883 struct virtio_blk *vblk = disk->private_data; in virtblk_free_disk() local
885 ida_free(&vd_index_ida, vblk->index); in virtblk_free_disk()
886 mutex_destroy(&vblk->vdev_mutex); in virtblk_free_disk()
887 kfree(vblk); in virtblk_free_disk()
930 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize) in virtblk_update_capacity() argument
932 struct virtio_device *vdev = vblk->vdev; in virtblk_update_capacity()
933 struct request_queue *q = vblk->disk->queue; in virtblk_update_capacity()
950 vblk->disk->disk_name, in virtblk_update_capacity()
957 set_capacity_and_notify(vblk->disk, capacity); in virtblk_update_capacity()
962 struct virtio_blk *vblk = in virtblk_config_changed_work() local
965 virtblk_revalidate_zones(vblk); in virtblk_config_changed_work()
966 virtblk_update_capacity(vblk, true); in virtblk_config_changed_work()
971 struct virtio_blk *vblk = vdev->priv; in virtblk_config_changed() local
973 queue_work(virtblk_wq, &vblk->config_work); in virtblk_config_changed()
976 static int init_vq(struct virtio_blk *vblk) in init_vq() argument
985 struct virtio_device *vdev = vblk->vdev; in init_vq()
1005 vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs; in init_vq()
1006 vblk->io_queues[HCTX_TYPE_READ] = 0; in init_vq()
1007 vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs; in init_vq()
1010 vblk->io_queues[HCTX_TYPE_DEFAULT], in init_vq()
1011 vblk->io_queues[HCTX_TYPE_READ], in init_vq()
1012 vblk->io_queues[HCTX_TYPE_POLL]); in init_vq()
1014 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); in init_vq()
1015 if (!vblk->vqs) in init_vq()
1028 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i); in init_vq()
1029 names[i] = vblk->vqs[i].name; in init_vq()
1034 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%d", i); in init_vq()
1035 names[i] = vblk->vqs[i].name; in init_vq()
1044 spin_lock_init(&vblk->vqs[i].lock); in init_vq()
1045 vblk->vqs[i].vq = vqs[i]; in init_vq()
1047 vblk->num_vqs = num_vqs; in init_vq()
1054 kfree(vblk->vqs); in init_vq()
1108 struct virtio_blk *vblk = vdev->priv; in virtblk_update_cache_mode() local
1110 blk_queue_write_cache(vblk->disk->queue, writeback, false); in virtblk_update_cache_mode()
1122 struct virtio_blk *vblk = disk->private_data; in cache_type_store() local
1123 struct virtio_device *vdev = vblk->vdev; in cache_type_store()
1126 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); in cache_type_store()
1140 struct virtio_blk *vblk = disk->private_data; in cache_type_show() local
1141 u8 writeback = virtblk_get_cache_mode(vblk->vdev); in cache_type_show()
1160 struct virtio_blk *vblk = disk->private_data; in virtblk_attrs_are_visible() local
1161 struct virtio_device *vdev = vblk->vdev; in virtblk_attrs_are_visible()
1182 struct virtio_blk *vblk = set->driver_data; in virtblk_map_queues() local
1188 map->nr_queues = vblk->io_queues[i]; in virtblk_map_queues()
1203 blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0); in virtblk_map_queues()
1209 struct virtio_blk *vblk = hctx->queue->queuedata; in virtblk_poll() local
1218 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); in virtblk_poll()
1239 struct virtio_blk *vblk; in virtblk_probe() local
1274 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); in virtblk_probe()
1275 if (!vblk) { in virtblk_probe()
1280 mutex_init(&vblk->vdev_mutex); in virtblk_probe()
1282 vblk->vdev = vdev; in virtblk_probe()
1284 INIT_WORK(&vblk->config_work, virtblk_config_changed_work); in virtblk_probe()
1286 err = init_vq(vblk); in virtblk_probe()
1292 queue_depth = vblk->vqs[0].vq->num_free; in virtblk_probe()
1300 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); in virtblk_probe()
1301 vblk->tag_set.ops = &virtio_mq_ops; in virtblk_probe()
1302 vblk->tag_set.queue_depth = queue_depth; in virtblk_probe()
1303 vblk->tag_set.numa_node = NUMA_NO_NODE; in virtblk_probe()
1304 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in virtblk_probe()
1305 vblk->tag_set.cmd_size = in virtblk_probe()
1308 vblk->tag_set.driver_data = vblk; in virtblk_probe()
1309 vblk->tag_set.nr_hw_queues = vblk->num_vqs; in virtblk_probe()
1310 vblk->tag_set.nr_maps = 1; in virtblk_probe()
1311 if (vblk->io_queues[HCTX_TYPE_POLL]) in virtblk_probe()
1312 vblk->tag_set.nr_maps = 3; in virtblk_probe()
1314 err = blk_mq_alloc_tag_set(&vblk->tag_set); in virtblk_probe()
1318 vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk); in virtblk_probe()
1319 if (IS_ERR(vblk->disk)) { in virtblk_probe()
1320 err = PTR_ERR(vblk->disk); in virtblk_probe()
1323 q = vblk->disk->queue; in virtblk_probe()
1325 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); in virtblk_probe()
1327 vblk->disk->major = major; in virtblk_probe()
1328 vblk->disk->first_minor = index_to_minor(index); in virtblk_probe()
1329 vblk->disk->minors = 1 << PART_BITS; in virtblk_probe()
1330 vblk->disk->private_data = vblk; in virtblk_probe()
1331 vblk->disk->fops = &virtblk_fops; in virtblk_probe()
1332 vblk->index = index; in virtblk_probe()
1339 set_disk_ro(vblk->disk, 1); in virtblk_probe()
1498 virtblk_update_capacity(vblk, false); in virtblk_probe()
1502 err = virtblk_probe_zoned_device(vdev, vblk, q); in virtblk_probe()
1510 err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups); in virtblk_probe()
1517 put_disk(vblk->disk); in virtblk_probe()
1519 blk_mq_free_tag_set(&vblk->tag_set); in virtblk_probe()
1522 kfree(vblk->vqs); in virtblk_probe()
1524 kfree(vblk); in virtblk_probe()
1533 struct virtio_blk *vblk = vdev->priv; in virtblk_remove() local
1536 flush_work(&vblk->config_work); in virtblk_remove()
1538 del_gendisk(vblk->disk); in virtblk_remove()
1539 blk_mq_free_tag_set(&vblk->tag_set); in virtblk_remove()
1541 mutex_lock(&vblk->vdev_mutex); in virtblk_remove()
1547 vblk->vdev = NULL; in virtblk_remove()
1550 kfree(vblk->vqs); in virtblk_remove()
1552 mutex_unlock(&vblk->vdev_mutex); in virtblk_remove()
1554 put_disk(vblk->disk); in virtblk_remove()
1560 struct virtio_blk *vblk = vdev->priv; in virtblk_freeze() local
1566 flush_work(&vblk->config_work); in virtblk_freeze()
1568 blk_mq_quiesce_queue(vblk->disk->queue); in virtblk_freeze()
1571 kfree(vblk->vqs); in virtblk_freeze()
1578 struct virtio_blk *vblk = vdev->priv; in virtblk_restore() local
1587 blk_mq_unquiesce_queue(vblk->disk->queue); in virtblk_restore()