Lines Matching refs:nullb

275 static void null_del_dev(struct nullb *nullb);
277 static struct nullb *null_find_dev_by_name(const char *name);
382 if (!dev->nullb) in nullb_update_nr_hw_queues()
407 set = dev->nullb->tag_set; in nullb_update_nr_hw_queues()
513 null_del_dev(dev->nullb); in nullb_device_power_store()
702 null_del_dev(dev->nullb); in nullb_group_drop_item()
762 static inline int null_cache_active(struct nullb *nullb) in null_cache_active() argument
764 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); in null_cache_active()
890 static void null_free_sector(struct nullb *nullb, sector_t sector, in null_free_sector() argument
898 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; in null_free_sector()
911 nullb->dev->curr_cache -= PAGE_SIZE; in null_free_sector()
916 static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx, in null_radix_tree_insert() argument
921 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; in null_radix_tree_insert()
928 nullb->dev->curr_cache += PAGE_SIZE; in null_radix_tree_insert()
962 static struct nullb_page *__null_lookup_page(struct nullb *nullb, in __null_lookup_page() argument
973 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; in __null_lookup_page()
983 static struct nullb_page *null_lookup_page(struct nullb *nullb, in null_lookup_page() argument
989 page = __null_lookup_page(nullb, sector, for_write, true); in null_lookup_page()
992 return __null_lookup_page(nullb, sector, for_write, false); in null_lookup_page()
995 static struct nullb_page *null_insert_page(struct nullb *nullb, in null_insert_page() argument
997 __releases(&nullb->lock) in null_insert_page()
998 __acquires(&nullb->lock) in null_insert_page()
1003 t_page = null_lookup_page(nullb, sector, true, ignore_cache); in null_insert_page()
1007 spin_unlock_irq(&nullb->lock); in null_insert_page()
1016 spin_lock_irq(&nullb->lock); in null_insert_page()
1019 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache); in null_insert_page()
1026 spin_lock_irq(&nullb->lock); in null_insert_page()
1027 return null_lookup_page(nullb, sector, true, ignore_cache); in null_insert_page()
1030 static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) in null_flush_cache_page() argument
1040 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true); in null_flush_cache_page()
1046 ret = radix_tree_delete_item(&nullb->dev->data, in null_flush_cache_page()
1060 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { in null_flush_cache_page()
1064 nullb->dev->blocksize); in null_flush_cache_page()
1072 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page); in null_flush_cache_page()
1074 nullb->dev->curr_cache -= PAGE_SIZE; in null_flush_cache_page()
1079 static int null_make_cache_space(struct nullb *nullb, unsigned long n) in null_make_cache_space() argument
1086 if ((nullb->dev->cache_size * 1024 * 1024) > in null_make_cache_space()
1087 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0) in null_make_cache_space()
1090 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache, in null_make_cache_space()
1091 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH); in null_make_cache_space()
1097 nullb->cache_flush_pos = c_pages[i]->page->private; in null_make_cache_space()
1112 err = null_flush_cache_page(nullb, c_pages[i]); in null_make_cache_space()
1121 nullb->cache_flush_pos = 0; in null_make_cache_space()
1124 spin_unlock_irq(&nullb->lock); in null_make_cache_space()
1125 spin_lock_irq(&nullb->lock); in null_make_cache_space()
1132 static int copy_to_nullb(struct nullb *nullb, struct page *source, in copy_to_nullb() argument
1140 temp = min_t(size_t, nullb->dev->blocksize, n - count); in copy_to_nullb()
1142 if (null_cache_active(nullb) && !is_fua) in copy_to_nullb()
1143 null_make_cache_space(nullb, PAGE_SIZE); in copy_to_nullb()
1146 t_page = null_insert_page(nullb, sector, in copy_to_nullb()
1147 !null_cache_active(nullb) || is_fua); in copy_to_nullb()
1156 null_free_sector(nullb, sector, true); in copy_to_nullb()
1164 static int copy_from_nullb(struct nullb *nullb, struct page *dest, in copy_from_nullb() argument
1172 temp = min_t(size_t, nullb->dev->blocksize, n - count); in copy_from_nullb()
1175 t_page = null_lookup_page(nullb, sector, false, in copy_from_nullb()
1176 !null_cache_active(nullb)); in copy_from_nullb()
1190 static void nullb_fill_pattern(struct nullb *nullb, struct page *page, in nullb_fill_pattern() argument
1199 struct nullb *nullb = dev->nullb; in null_handle_discard() local
1203 spin_lock_irq(&nullb->lock); in null_handle_discard()
1206 null_free_sector(nullb, sector, false); in null_handle_discard()
1207 if (null_cache_active(nullb)) in null_handle_discard()
1208 null_free_sector(nullb, sector, true); in null_handle_discard()
1212 spin_unlock_irq(&nullb->lock); in null_handle_discard()
1217 static blk_status_t null_handle_flush(struct nullb *nullb) in null_handle_flush() argument
1221 if (!null_cache_active(nullb)) in null_handle_flush()
1224 spin_lock_irq(&nullb->lock); in null_handle_flush()
1226 err = null_make_cache_space(nullb, in null_handle_flush()
1227 nullb->dev->cache_size * 1024 * 1024); in null_handle_flush()
1228 if (err || nullb->dev->curr_cache == 0) in null_handle_flush()
1232 WARN_ON(!radix_tree_empty(&nullb->dev->cache)); in null_handle_flush()
1233 spin_unlock_irq(&nullb->lock); in null_handle_flush()
1237 static int null_transfer(struct nullb *nullb, struct page *page, in null_transfer() argument
1241 struct nullb_device *dev = nullb->dev; in null_transfer()
1247 valid_len = null_zone_valid_read_len(nullb, in null_transfer()
1251 err = copy_from_nullb(nullb, page, off, in null_transfer()
1258 nullb_fill_pattern(nullb, page, len, off); in null_transfer()
1262 err = copy_to_nullb(nullb, page, off, sector, len, is_fua); in null_transfer()
1276 struct nullb *nullb = cmd->nq->dev->nullb; in null_handle_data_transfer() local
1285 spin_lock_irq(&nullb->lock); in null_handle_data_transfer()
1290 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, in null_handle_data_transfer()
1300 spin_unlock_irq(&nullb->lock); in null_handle_data_transfer()
1308 struct nullb *nullb = dev->nullb; in null_handle_throttled() local
1312 if (!hrtimer_active(&nullb->bw_timer)) in null_handle_throttled()
1313 hrtimer_restart(&nullb->bw_timer); in null_handle_throttled()
1315 if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) { in null_handle_throttled()
1316 blk_mq_stop_hw_queues(nullb->q); in null_handle_throttled()
1318 if (atomic_long_read(&nullb->cur_bytes) > 0) in null_handle_throttled()
1319 blk_mq_start_stopped_hw_queues(nullb->q, true); in null_handle_throttled()
1436 struct nullb *nullb = dev->nullb; in null_handle_cmd() local
1440 cmd->error = null_handle_flush(nullb); in null_handle_cmd()
1459 struct nullb *nullb = container_of(timer, struct nullb, bw_timer); in nullb_bwtimer_fn() local
1461 unsigned int mbps = nullb->dev->mbps; in nullb_bwtimer_fn()
1463 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps)) in nullb_bwtimer_fn()
1466 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps)); in nullb_bwtimer_fn()
1467 blk_mq_start_stopped_hw_queues(nullb->q, true); in nullb_bwtimer_fn()
1469 hrtimer_forward_now(&nullb->bw_timer, timer_interval); in nullb_bwtimer_fn()
1474 static void nullb_setup_bwtimer(struct nullb *nullb) in nullb_setup_bwtimer() argument
1478 hrtimer_setup(&nullb->bw_timer, nullb_bwtimer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in nullb_setup_bwtimer()
1479 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps)); in nullb_setup_bwtimer()
1480 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL); in nullb_setup_bwtimer()
1527 struct nullb *nullb = set->driver_data; in null_map_queues() local
1532 if (nullb) { in null_map_queues()
1533 struct nullb_device *dev = nullb->dev; in null_map_queues()
1715 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) in null_init_queue() argument
1717 nq->dev = nullb->dev; in null_init_queue()
1725 struct nullb *nullb = hctx->queue->queuedata; in null_init_hctx() local
1728 if (should_init_hctx_fail(nullb->dev)) in null_init_hctx()
1731 nq = &nullb->queues[hctx_idx]; in null_init_hctx()
1733 null_init_queue(nullb, nq); in null_init_hctx()
1748 static void null_del_dev(struct nullb *nullb) in null_del_dev() argument
1752 if (!nullb) in null_del_dev()
1755 dev = nullb->dev; in null_del_dev()
1757 ida_free(&nullb_indexes, nullb->index); in null_del_dev()
1759 list_del_init(&nullb->list); in null_del_dev()
1761 del_gendisk(nullb->disk); in null_del_dev()
1763 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) { in null_del_dev()
1764 hrtimer_cancel(&nullb->bw_timer); in null_del_dev()
1765 atomic_long_set(&nullb->cur_bytes, LONG_MAX); in null_del_dev()
1766 blk_mq_start_stopped_hw_queues(nullb->q, true); in null_del_dev()
1769 put_disk(nullb->disk); in null_del_dev()
1770 if (nullb->tag_set == &nullb->__tag_set) in null_del_dev()
1771 blk_mq_free_tag_set(nullb->tag_set); in null_del_dev()
1772 kfree(nullb->queues); in null_del_dev()
1773 if (null_cache_active(nullb)) in null_del_dev()
1774 null_free_device_storage(nullb->dev, true); in null_del_dev()
1775 kfree(nullb); in null_del_dev()
1776 dev->nullb = NULL; in null_del_dev()
1779 static void null_config_discard(struct nullb *nullb, struct queue_limits *lim) in null_config_discard() argument
1781 if (nullb->dev->discard == false) in null_config_discard()
1784 if (!nullb->dev->memory_backed) { in null_config_discard()
1785 nullb->dev->discard = false; in null_config_discard()
1790 if (nullb->dev->zoned) { in null_config_discard()
1791 nullb->dev->discard = false; in null_config_discard()
1804 static int setup_queues(struct nullb *nullb) in setup_queues() argument
1811 nullb->queues = kcalloc(nqueues, sizeof(struct nullb_queue), in setup_queues()
1813 if (!nullb->queues) in setup_queues()
1855 static int null_setup_tagset(struct nullb *nullb) in null_setup_tagset() argument
1857 if (nullb->dev->shared_tags) { in null_setup_tagset()
1858 nullb->tag_set = &tag_set; in null_setup_tagset()
1862 nullb->tag_set = &nullb->__tag_set; in null_setup_tagset()
1863 nullb->tag_set->driver_data = nullb; in null_setup_tagset()
1864 nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues; in null_setup_tagset()
1865 nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth; in null_setup_tagset()
1866 nullb->tag_set->numa_node = nullb->dev->home_node; in null_setup_tagset()
1867 if (nullb->dev->no_sched) in null_setup_tagset()
1868 nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT; in null_setup_tagset()
1869 if (nullb->dev->shared_tag_bitmap) in null_setup_tagset()
1870 nullb->tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED; in null_setup_tagset()
1871 if (nullb->dev->blocking) in null_setup_tagset()
1872 nullb->tag_set->flags |= BLK_MQ_F_BLOCKING; in null_setup_tagset()
1873 return null_init_tag_set(nullb->tag_set, nullb->dev->poll_queues); in null_setup_tagset()
1954 struct nullb *nullb; in null_add_dev() local
1961 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node); in null_add_dev()
1962 if (!nullb) { in null_add_dev()
1966 nullb->dev = dev; in null_add_dev()
1967 dev->nullb = nullb; in null_add_dev()
1969 spin_lock_init(&nullb->lock); in null_add_dev()
1971 rv = setup_queues(nullb); in null_add_dev()
1975 rv = null_setup_tagset(nullb); in null_add_dev()
1981 null_config_discard(nullb, &lim); in null_add_dev()
1989 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); in null_add_dev()
1998 nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb); in null_add_dev()
1999 if (IS_ERR(nullb->disk)) { in null_add_dev()
2000 rv = PTR_ERR(nullb->disk); in null_add_dev()
2003 nullb->q = nullb->disk->queue; in null_add_dev()
2007 nullb_setup_bwtimer(nullb); in null_add_dev()
2010 nullb->q->queuedata = nullb; in null_add_dev()
2016 nullb->index = rv; in null_add_dev()
2021 snprintf(nullb->disk_name, sizeof(nullb->disk_name), in null_add_dev()
2024 sprintf(nullb->disk_name, "nullb%d", nullb->index); in null_add_dev()
2027 set_capacity(nullb->disk, in null_add_dev()
2028 ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT); in null_add_dev()
2029 nullb->disk->major = null_major; in null_add_dev()
2030 nullb->disk->first_minor = nullb->index; in null_add_dev()
2031 nullb->disk->minors = 1; in null_add_dev()
2032 nullb->disk->fops = &null_ops; in null_add_dev()
2033 nullb->disk->private_data = nullb; in null_add_dev()
2034 strscpy(nullb->disk->disk_name, nullb->disk_name); in null_add_dev()
2036 if (nullb->dev->zoned) { in null_add_dev()
2037 rv = null_register_zoned_dev(nullb); in null_add_dev()
2042 rv = add_disk(nullb->disk); in null_add_dev()
2046 list_add_tail(&nullb->list, &nullb_list); in null_add_dev()
2048 pr_info("disk %s created\n", nullb->disk_name); in null_add_dev()
2053 ida_free(&nullb_indexes, nullb->index); in null_add_dev()
2055 put_disk(nullb->disk); in null_add_dev()
2059 if (nullb->tag_set == &nullb->__tag_set) in null_add_dev()
2060 blk_mq_free_tag_set(nullb->tag_set); in null_add_dev()
2062 kfree(nullb->queues); in null_add_dev()
2064 kfree(nullb); in null_add_dev()
2065 dev->nullb = NULL; in null_add_dev()
2070 static struct nullb *null_find_dev_by_name(const char *name) in null_find_dev_by_name()
2072 struct nullb *nullb = NULL, *nb; in null_find_dev_by_name() local
2077 nullb = nb; in null_find_dev_by_name()
2083 return nullb; in null_find_dev_by_name()
2106 static void null_destroy_dev(struct nullb *nullb) in null_destroy_dev() argument
2108 struct nullb_device *dev = nullb->dev; in null_destroy_dev()
2110 null_del_dev(nullb); in null_destroy_dev()
2119 struct nullb *nullb; in null_init() local
2178 nullb = list_entry(nullb_list.next, struct nullb, list); in null_init()
2179 null_destroy_dev(nullb); in null_init()
2189 struct nullb *nullb; in null_exit() local
2197 nullb = list_entry(nullb_list.next, struct nullb, list); in null_exit()
2198 null_destroy_dev(nullb); in null_exit()