1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __BLK_NULL_BLK_H
3 #define __BLK_NULL_BLK_H
4
5 #undef pr_fmt
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
10 #include <linux/blk-mq.h>
11 #include <linux/hrtimer.h>
12 #include <linux/configfs.h>
13 #include <linux/badblocks.h>
14 #include <linux/fault-inject.h>
15 #include <linux/spinlock.h>
16 #include <linux/mutex.h>
17
18 struct nullb_cmd {
19 union {
20 struct request *rq;
21 struct bio *bio;
22 };
23 unsigned int tag;
24 blk_status_t error;
25 bool fake_timeout;
26 struct nullb_queue *nq;
27 struct hrtimer timer;
28 };
29
30 struct nullb_queue {
31 unsigned long *tag_map;
32 wait_queue_head_t wait;
33 unsigned int queue_depth;
34 struct nullb_device *dev;
35 unsigned int requeue_selection;
36
37 struct list_head poll_list;
38 spinlock_t poll_lock;
39
40 struct nullb_cmd *cmds;
41 };
42
43 struct nullb_zone {
44 /*
45 * Zone lock to prevent concurrent modification of a zone write
46 * pointer position and condition: with memory backing, a write
47 * command execution may sleep on memory allocation. For this case,
48 * use mutex as the zone lock. Otherwise, use the spinlock for
49 * locking the zone.
50 */
51 union {
52 spinlock_t spinlock;
53 struct mutex mutex;
54 };
55 enum blk_zone_type type;
56 enum blk_zone_cond cond;
57 sector_t start;
58 sector_t wp;
59 unsigned int len;
60 unsigned int capacity;
61 };
62
63 /* Queue modes */
64 enum {
65 NULL_Q_BIO = 0,
66 NULL_Q_RQ = 1,
67 NULL_Q_MQ = 2,
68 };
69
70 struct nullb_device {
71 struct nullb *nullb;
72 struct config_item item;
73 struct radix_tree_root data; /* data stored in the disk */
74 struct radix_tree_root cache; /* disk cache data */
75 unsigned long flags; /* device flags */
76 unsigned int curr_cache;
77 struct badblocks badblocks;
78
79 unsigned int nr_zones;
80 unsigned int nr_zones_imp_open;
81 unsigned int nr_zones_exp_open;
82 unsigned int nr_zones_closed;
83 unsigned int imp_close_zone_no;
84 struct nullb_zone *zones;
85 sector_t zone_size_sects;
86 bool need_zone_res_mgmt;
87 spinlock_t zone_res_lock;
88
89 unsigned long size; /* device size in MB */
90 unsigned long completion_nsec; /* time in ns to complete a request */
91 unsigned long cache_size; /* disk cache size in MB */
92 unsigned long zone_size; /* zone size in MB if device is zoned */
93 unsigned long zone_capacity; /* zone capacity in MB if device is zoned */
94 unsigned int zone_nr_conv; /* number of conventional zones */
95 unsigned int zone_max_open; /* max number of open zones */
96 unsigned int zone_max_active; /* max number of active zones */
97 unsigned int submit_queues; /* number of submission queues */
98 unsigned int prev_submit_queues; /* number of submission queues before change */
99 unsigned int poll_queues; /* number of IOPOLL submission queues */
100 unsigned int prev_poll_queues; /* number of IOPOLL submission queues before change */
101 unsigned int home_node; /* home node for the device */
102 unsigned int queue_mode; /* block interface */
103 unsigned int blocksize; /* block size */
104 unsigned int max_sectors; /* Max sectors per command */
105 unsigned int irqmode; /* IRQ completion handler */
106 unsigned int hw_queue_depth; /* queue depth */
107 unsigned int index; /* index of the disk, only valid with a disk */
108 unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
109 bool blocking; /* blocking blk-mq device */
110 bool use_per_node_hctx; /* use per-node allocation for hardware context */
111 bool power; /* power on/off the device */
112 bool memory_backed; /* if data is stored in memory */
113 bool discard; /* if support discard */
114 bool zoned; /* if device is zoned */
115 bool virt_boundary; /* virtual boundary on/off for the device */
116 bool no_sched; /* no IO scheduler for the device */
117 bool shared_tag_bitmap; /* use hostwide shared tags */
118 };
119
120 struct nullb {
121 struct nullb_device *dev;
122 struct list_head list;
123 unsigned int index;
124 struct request_queue *q;
125 struct gendisk *disk;
126 struct blk_mq_tag_set *tag_set;
127 struct blk_mq_tag_set __tag_set;
128 unsigned int queue_depth;
129 atomic_long_t cur_bytes;
130 struct hrtimer bw_timer;
131 unsigned long cache_flush_pos;
132 spinlock_t lock;
133
134 struct nullb_queue *queues;
135 unsigned int nr_queues;
136 char disk_name[DISK_NAME_LEN];
137 };
138
139 blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
140 sector_t nr_sectors);
141 blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
142 sector_t sector, unsigned int nr_sectors);
143
144 #ifdef CONFIG_BLK_DEV_ZONED
145 int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
146 int null_register_zoned_dev(struct nullb *nullb);
147 void null_free_zoned_dev(struct nullb_device *dev);
148 int null_report_zones(struct gendisk *disk, sector_t sector,
149 unsigned int nr_zones, report_zones_cb cb, void *data);
150 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
151 sector_t sector, sector_t nr_sectors);
152 size_t null_zone_valid_read_len(struct nullb *nullb,
153 sector_t sector, unsigned int len);
154 ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
155 size_t count, enum blk_zone_cond cond);
156 #else
null_init_zoned_dev(struct nullb_device * dev,struct request_queue * q)157 static inline int null_init_zoned_dev(struct nullb_device *dev,
158 struct request_queue *q)
159 {
160 pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
161 return -EINVAL;
162 }
null_register_zoned_dev(struct nullb * nullb)163 static inline int null_register_zoned_dev(struct nullb *nullb)
164 {
165 return -ENODEV;
166 }
null_free_zoned_dev(struct nullb_device * dev)167 static inline void null_free_zoned_dev(struct nullb_device *dev) {}
null_process_zoned_cmd(struct nullb_cmd * cmd,enum req_op op,sector_t sector,sector_t nr_sectors)168 static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
169 enum req_op op, sector_t sector, sector_t nr_sectors)
170 {
171 return BLK_STS_NOTSUPP;
172 }
null_zone_valid_read_len(struct nullb * nullb,sector_t sector,unsigned int len)173 static inline size_t null_zone_valid_read_len(struct nullb *nullb,
174 sector_t sector,
175 unsigned int len)
176 {
177 return len;
178 }
zone_cond_store(struct nullb_device * dev,const char * page,size_t count,enum blk_zone_cond cond)179 static inline ssize_t zone_cond_store(struct nullb_device *dev,
180 const char *page, size_t count,
181 enum blk_zone_cond cond)
182 {
183 return -EOPNOTSUPP;
184 }
185 #define null_report_zones NULL
186 #endif /* CONFIG_BLK_DEV_ZONED */
187 #endif /* __NULL_BLK_H */
188