1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef BTRFS_BLOCK_GROUP_H
4 #define BTRFS_BLOCK_GROUP_H
5
6 #include "free-space-cache.h"
7
8 enum btrfs_disk_cache_state {
9 BTRFS_DC_WRITTEN,
10 BTRFS_DC_ERROR,
11 BTRFS_DC_CLEAR,
12 BTRFS_DC_SETUP,
13 };
14
15 enum btrfs_block_group_size_class {
16 /* Unset */
17 BTRFS_BG_SZ_NONE,
18 /* 0 < size <= 128K */
19 BTRFS_BG_SZ_SMALL,
20 /* 128K < size <= 8M */
21 BTRFS_BG_SZ_MEDIUM,
22 /* 8M < size < BG_LENGTH */
23 BTRFS_BG_SZ_LARGE,
24 };
25
26 /*
27 * This describes the state of the block_group for async discard. This is due
28 * to the two pass nature of it where extent discarding is prioritized over
29 * bitmap discarding. BTRFS_DISCARD_RESET_CURSOR is set when we are resetting
30 * between lists to prevent contention for discard state variables
31 * (eg. discard_cursor).
32 */
33 enum btrfs_discard_state {
34 BTRFS_DISCARD_EXTENTS,
35 BTRFS_DISCARD_BITMAPS,
36 BTRFS_DISCARD_RESET_CURSOR,
37 };
38
39 /*
40 * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to
41 * only allocate a chunk if we really need one.
42 *
43 * CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few
44 * chunks already allocated. This is used as part of the clustering code to
45 * help make sure we have a good pool of storage to cluster in, without filling
46 * the FS with empty chunks
47 *
48 * CHUNK_ALLOC_FORCE means it must try to allocate one
49 *
50 * CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
51 * find_free_extent() that also activaes the zone
52 */
53 enum btrfs_chunk_alloc_enum {
54 CHUNK_ALLOC_NO_FORCE,
55 CHUNK_ALLOC_LIMITED,
56 CHUNK_ALLOC_FORCE,
57 CHUNK_ALLOC_FORCE_FOR_EXTENT,
58 };
59
60 /* Block group flags set at runtime */
61 enum btrfs_block_group_flags {
62 BLOCK_GROUP_FLAG_IREF,
63 BLOCK_GROUP_FLAG_REMOVED,
64 BLOCK_GROUP_FLAG_TO_COPY,
65 BLOCK_GROUP_FLAG_RELOCATING_REPAIR,
66 BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
67 BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
68 BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
69 /* Does the block group need to be added to the free space tree? */
70 BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE,
71 /* Indicate that the block group is placed on a sequential zone */
72 BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE,
73 };
74
75 enum btrfs_caching_type {
76 BTRFS_CACHE_NO,
77 BTRFS_CACHE_STARTED,
78 BTRFS_CACHE_FINISHED,
79 BTRFS_CACHE_ERROR,
80 };
81
82 struct btrfs_caching_control {
83 struct list_head list;
84 struct mutex mutex;
85 wait_queue_head_t wait;
86 struct btrfs_work work;
87 struct btrfs_block_group *block_group;
88 refcount_t count;
89 };
90
91 /* Once caching_thread() finds this much free space, it will wake up waiters. */
92 #define CACHING_CTL_WAKE_UP SZ_2M
93
94 /*
95 * Tree to record all locked full stripes of a RAID5/6 block group
96 */
97 struct btrfs_full_stripe_locks_tree {
98 struct rb_root root;
99 struct mutex lock;
100 };
101
102 struct btrfs_block_group {
103 struct btrfs_fs_info *fs_info;
104 struct inode *inode;
105 spinlock_t lock;
106 u64 start;
107 u64 length;
108 u64 pinned;
109 u64 reserved;
110 u64 used;
111 u64 delalloc_bytes;
112 u64 bytes_super;
113 u64 flags;
114 u64 cache_generation;
115 u64 global_root_id;
116
117 /*
118 * The last committed used bytes of this block group, if the above @used
119 * is still the same as @commit_used, we don't need to update block
120 * group item of this block group.
121 */
122 u64 commit_used;
123 /*
124 * If the free space extent count exceeds this number, convert the block
125 * group to bitmaps.
126 */
127 u32 bitmap_high_thresh;
128
129 /*
130 * If the free space extent count drops below this number, convert the
131 * block group back to extents.
132 */
133 u32 bitmap_low_thresh;
134
135 /*
136 * It is just used for the delayed data space allocation because
137 * only the data space allocation and the relative metadata update
138 * can be done cross the transaction.
139 */
140 struct rw_semaphore data_rwsem;
141
142 /* For raid56, this is a full stripe, without parity */
143 unsigned long full_stripe_len;
144 unsigned long runtime_flags;
145
146 unsigned int ro;
147
148 int disk_cache_state;
149
150 /* Cache tracking stuff */
151 int cached;
152 struct btrfs_caching_control *caching_ctl;
153
154 struct btrfs_space_info *space_info;
155
156 /* Free space cache stuff */
157 struct btrfs_free_space_ctl *free_space_ctl;
158
159 /* Block group cache stuff */
160 struct rb_node cache_node;
161
162 /* For block groups in the same raid type */
163 struct list_head list;
164
165 refcount_t refs;
166
167 /*
168 * List of struct btrfs_free_clusters for this block group.
169 * Today it will only have one thing on it, but that may change
170 */
171 struct list_head cluster_list;
172
173 /* For delayed block group creation or deletion of empty block groups */
174 struct list_head bg_list;
175
176 /* For read-only block groups */
177 struct list_head ro_list;
178
179 /*
180 * When non-zero it means the block group's logical address and its
181 * device extents can not be reused for future block group allocations
182 * until the counter goes down to 0. This is to prevent them from being
183 * reused while some task is still using the block group after it was
184 * deleted - we want to make sure they can only be reused for new block
185 * groups after that task is done with the deleted block group.
186 */
187 atomic_t frozen;
188
189 /* For discard operations */
190 struct list_head discard_list;
191 int discard_index;
192 u64 discard_eligible_time;
193 u64 discard_cursor;
194 enum btrfs_discard_state discard_state;
195
196 /* For dirty block groups */
197 struct list_head dirty_list;
198 struct list_head io_list;
199
200 struct btrfs_io_ctl io_ctl;
201
202 /*
203 * Incremented when doing extent allocations and holding a read lock
204 * on the space_info's groups_sem semaphore.
205 * Decremented when an ordered extent that represents an IO against this
206 * block group's range is created (after it's added to its inode's
207 * root's list of ordered extents) or immediately after the allocation
208 * if it's a metadata extent or fallocate extent (for these cases we
209 * don't create ordered extents).
210 */
211 atomic_t reservations;
212
213 /*
214 * Incremented while holding the spinlock *lock* by a task checking if
215 * it can perform a nocow write (incremented if the value for the *ro*
216 * field is 0). Decremented by such tasks once they create an ordered
217 * extent or before that if some error happens before reaching that step.
218 * This is to prevent races between block group relocation and nocow
219 * writes through direct IO.
220 */
221 atomic_t nocow_writers;
222
223 /* Lock for free space tree operations. */
224 struct mutex free_space_lock;
225
226 /*
227 * Number of extents in this block group used for swap files.
228 * All accesses protected by the spinlock 'lock'.
229 */
230 int swap_extents;
231
232 /* Record locked full stripes for RAID5/6 block group */
233 struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
234
235 /*
236 * Allocation offset for the block group to implement sequential
237 * allocation. This is used only on a zoned filesystem.
238 */
239 u64 alloc_offset;
240 u64 zone_unusable;
241 u64 zone_capacity;
242 u64 meta_write_pointer;
243 struct map_lookup *physical_map;
244 struct list_head active_bg_list;
245 struct work_struct zone_finish_work;
246 struct extent_buffer *last_eb;
247 enum btrfs_block_group_size_class size_class;
248 };
249
btrfs_block_group_end(struct btrfs_block_group * block_group)250 static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
251 {
252 return (block_group->start + block_group->length);
253 }
254
btrfs_is_block_group_data_only(struct btrfs_block_group * block_group)255 static inline bool btrfs_is_block_group_data_only(
256 struct btrfs_block_group *block_group)
257 {
258 /*
259 * In mixed mode the fragmentation is expected to be high, lowering the
260 * efficiency, so only proper data block groups are considered.
261 */
262 return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) &&
263 !(block_group->flags & BTRFS_BLOCK_GROUP_METADATA);
264 }
265
266 #ifdef CONFIG_BTRFS_DEBUG
267 int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group);
268 #endif
269
270 struct btrfs_block_group *btrfs_lookup_first_block_group(
271 struct btrfs_fs_info *info, u64 bytenr);
272 struct btrfs_block_group *btrfs_lookup_block_group(
273 struct btrfs_fs_info *info, u64 bytenr);
274 struct btrfs_block_group *btrfs_next_block_group(
275 struct btrfs_block_group *cache);
276 void btrfs_get_block_group(struct btrfs_block_group *cache);
277 void btrfs_put_block_group(struct btrfs_block_group *cache);
278 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
279 const u64 start);
280 void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg);
281 struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info,
282 u64 bytenr);
283 void btrfs_dec_nocow_writers(struct btrfs_block_group *bg);
284 void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
285 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
286 u64 num_bytes);
287 int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait);
288 void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
289 struct btrfs_caching_control *btrfs_get_caching_control(
290 struct btrfs_block_group *cache);
291 u64 add_new_free_space(struct btrfs_block_group *block_group,
292 u64 start, u64 end);
293 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
294 struct btrfs_fs_info *fs_info,
295 const u64 chunk_offset);
296 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
297 u64 group_start, struct extent_map *em);
298 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
299 void btrfs_mark_bg_unused(struct btrfs_block_group *bg);
300 void btrfs_reclaim_bgs_work(struct work_struct *work);
301 void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info);
302 void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg);
303 int btrfs_read_block_groups(struct btrfs_fs_info *info);
304 struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
305 u64 bytes_used, u64 type,
306 u64 chunk_offset, u64 size);
307 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
308 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
309 bool do_chunk_alloc);
310 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache);
311 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans);
312 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans);
313 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
314 int btrfs_update_block_group(struct btrfs_trans_handle *trans,
315 u64 bytenr, u64 num_bytes, bool alloc);
316 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
317 u64 ram_bytes, u64 num_bytes, int delalloc,
318 bool force_wrong_size_class);
319 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
320 u64 num_bytes, int delalloc);
321 int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
322 enum btrfs_chunk_alloc_enum force);
323 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
324 void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
325 void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans,
326 bool is_item_insertion);
327 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags);
328 void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
329 int btrfs_free_block_groups(struct btrfs_fs_info *info);
330 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
331 u64 physical, u64 **logical, int *naddrs, int *stripe_len);
332
btrfs_data_alloc_profile(struct btrfs_fs_info * fs_info)333 static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
334 {
335 return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
336 }
337
btrfs_metadata_alloc_profile(struct btrfs_fs_info * fs_info)338 static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
339 {
340 return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
341 }
342
btrfs_system_alloc_profile(struct btrfs_fs_info * fs_info)343 static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
344 {
345 return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
346 }
347
btrfs_block_group_done(struct btrfs_block_group * cache)348 static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
349 {
350 smp_mb();
351 return cache->cached == BTRFS_CACHE_FINISHED ||
352 cache->cached == BTRFS_CACHE_ERROR;
353 }
354
355 void btrfs_freeze_block_group(struct btrfs_block_group *cache);
356 void btrfs_unfreeze_block_group(struct btrfs_block_group *cache);
357
358 bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg);
359 void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount);
360
361 enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size);
362 int btrfs_use_block_group_size_class(struct btrfs_block_group *bg,
363 enum btrfs_block_group_size_class size_class,
364 bool force_wrong_size_class);
365 bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg);
366
367 #endif /* BTRFS_BLOCK_GROUP_H */
368