1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * fs/f2fs/segment.h
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/blkdev.h>
9 #include <linux/backing-dev.h>
10
11 /* constant macro */
12 #define NULL_SEGNO ((unsigned int)(~0))
13 #define NULL_SECNO ((unsigned int)(~0))
14
15 #define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
16 #define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */
17
18 #define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
19 #define F2FS_MIN_META_SEGMENTS 8 /* SB + 2 (CP + SIT + NAT) + SSA */
20
21 /* L: Logical segment # in volume, R: Relative segment # in main area */
22 #define GET_L2R_SEGNO(free_i, segno) ((segno) - (free_i)->start_segno)
23 #define GET_R2L_SEGNO(free_i, segno) ((segno) + (free_i)->start_segno)
24
25 #define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA)
26 #define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE && (t) <= CURSEG_COLD_NODE)
27
sanity_check_seg_type(struct f2fs_sb_info * sbi,unsigned short seg_type)28 static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
29 unsigned short seg_type)
30 {
31 f2fs_bug_on(sbi, seg_type >= NR_PERSISTENT_LOG);
32 }
33
34 #define IS_HOT(t) ((t) == CURSEG_HOT_NODE || (t) == CURSEG_HOT_DATA)
35 #define IS_WARM(t) ((t) == CURSEG_WARM_NODE || (t) == CURSEG_WARM_DATA)
36 #define IS_COLD(t) ((t) == CURSEG_COLD_NODE || (t) == CURSEG_COLD_DATA)
37
38 #define IS_CURSEG(sbi, seg) \
39 (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
40 ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
41 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
42 ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
43 ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
44 ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno) || \
45 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno) || \
46 ((seg) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno))
47
48 #define IS_CURSEC(sbi, secno) \
49 (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
50 (sbi)->segs_per_sec) || \
51 ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
52 (sbi)->segs_per_sec) || \
53 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
54 (sbi)->segs_per_sec) || \
55 ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
56 (sbi)->segs_per_sec) || \
57 ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
58 (sbi)->segs_per_sec) || \
59 ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
60 (sbi)->segs_per_sec) || \
61 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno / \
62 (sbi)->segs_per_sec) || \
63 ((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno / \
64 (sbi)->segs_per_sec))
65
66 #define MAIN_BLKADDR(sbi) \
67 (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
68 le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
69 #define SEG0_BLKADDR(sbi) \
70 (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \
71 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
72
73 #define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
74 #define MAIN_SECS(sbi) ((sbi)->total_sections)
75
76 #define TOTAL_SEGS(sbi) \
77 (SM_I(sbi) ? SM_I(sbi)->segment_count : \
78 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
79 #define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg)
80
81 #define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
82 #define SEGMENT_SIZE(sbi) (1ULL << ((sbi)->log_blocksize + \
83 (sbi)->log_blocks_per_seg))
84
85 #define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \
86 (GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg))
87
88 #define NEXT_FREE_BLKADDR(sbi, curseg) \
89 (START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff)
90
91 #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi))
92 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
93 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg)
94 #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
95 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
96
97 #define GET_SEGNO(sbi, blk_addr) \
98 ((!__is_valid_data_blkaddr(blk_addr)) ? \
99 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
100 GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
101 #define BLKS_PER_SEC(sbi) \
102 ((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
103 #define GET_SEC_FROM_SEG(sbi, segno) \
104 (((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec)
105 #define GET_SEG_FROM_SEC(sbi, secno) \
106 ((secno) * (sbi)->segs_per_sec)
107 #define GET_ZONE_FROM_SEC(sbi, secno) \
108 (((secno) == -1) ? -1: (secno) / (sbi)->secs_per_zone)
109 #define GET_ZONE_FROM_SEG(sbi, segno) \
110 GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
111
112 #define GET_SUM_BLOCK(sbi, segno) \
113 ((sbi)->sm_info->ssa_blkaddr + (segno))
114
115 #define GET_SUM_TYPE(footer) ((footer)->entry_type)
116 #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
117
118 #define SIT_ENTRY_OFFSET(sit_i, segno) \
119 ((segno) % (sit_i)->sents_per_block)
120 #define SIT_BLOCK_OFFSET(segno) \
121 ((segno) / SIT_ENTRY_PER_BLOCK)
122 #define START_SEGNO(segno) \
123 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
124 #define SIT_BLK_CNT(sbi) \
125 DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK)
126 #define f2fs_bitmap_size(nr) \
127 (BITS_TO_LONGS(nr) * sizeof(unsigned long))
128
129 #define SECTOR_FROM_BLOCK(blk_addr) \
130 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
131 #define SECTOR_TO_BLOCK(sectors) \
132 ((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
133
134 /*
135 * indicate a block allocation direction: RIGHT and LEFT.
136 * RIGHT means allocating new sections towards the end of volume.
137 * LEFT means the opposite direction.
138 */
139 enum {
140 ALLOC_RIGHT = 0,
141 ALLOC_LEFT
142 };
143
144 /*
145 * In the victim_sel_policy->alloc_mode, there are three block allocation modes.
146 * LFS writes data sequentially with cleaning operations.
147 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
148 * AT_SSR (Age Threshold based Slack Space Recycle) merges fragments into
149 * fragmented segment which has similar aging degree.
150 */
151 enum {
152 LFS = 0,
153 SSR,
154 AT_SSR,
155 };
156
157 /*
158 * In the victim_sel_policy->gc_mode, there are three gc, aka cleaning, modes.
159 * GC_CB is based on cost-benefit algorithm.
160 * GC_GREEDY is based on greedy algorithm.
161 * GC_AT is based on age-threshold algorithm.
162 */
163 enum {
164 GC_CB = 0,
165 GC_GREEDY,
166 GC_AT,
167 ALLOC_NEXT,
168 FLUSH_DEVICE,
169 MAX_GC_POLICY,
170 };
171
172 /*
173 * BG_GC means the background cleaning job.
174 * FG_GC means the on-demand cleaning job.
175 */
176 enum {
177 BG_GC = 0,
178 FG_GC,
179 };
180
181 /* for a function parameter to select a victim segment */
182 struct victim_sel_policy {
183 int alloc_mode; /* LFS or SSR */
184 int gc_mode; /* GC_CB or GC_GREEDY */
185 unsigned long *dirty_bitmap; /* dirty segment/section bitmap */
186 unsigned int max_search; /*
187 * maximum # of segments/sections
188 * to search
189 */
190 unsigned int offset; /* last scanned bitmap offset */
191 unsigned int ofs_unit; /* bitmap search unit */
192 unsigned int min_cost; /* minimum cost */
193 unsigned long long oldest_age; /* oldest age of segments having the same min cost */
194 unsigned int min_segno; /* segment # having min. cost */
195 unsigned long long age; /* mtime of GCed section*/
196 unsigned long long age_threshold;/* age threshold */
197 };
198
199 struct seg_entry {
200 unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */
201 unsigned int valid_blocks:10; /* # of valid blocks */
202 unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */
203 unsigned int padding:6; /* padding */
204 unsigned char *cur_valid_map; /* validity bitmap of blocks */
205 #ifdef CONFIG_F2FS_CHECK_FS
206 unsigned char *cur_valid_map_mir; /* mirror of current valid bitmap */
207 #endif
208 /*
209 * # of valid blocks and the validity bitmap stored in the last
210 * checkpoint pack. This information is used by the SSR mode.
211 */
212 unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */
213 unsigned char *discard_map;
214 unsigned long long mtime; /* modification time of the segment */
215 };
216
217 struct sec_entry {
218 unsigned int valid_blocks; /* # of valid blocks in a section */
219 };
220
221 struct segment_allocation {
222 void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
223 };
224
225 #define MAX_SKIP_GC_COUNT 16
226
227 struct inmem_pages {
228 struct list_head list;
229 struct page *page;
230 block_t old_addr; /* for revoking when fail to commit */
231 };
232
233 struct sit_info {
234 const struct segment_allocation *s_ops;
235
236 block_t sit_base_addr; /* start block address of SIT area */
237 block_t sit_blocks; /* # of blocks used by SIT area */
238 block_t written_valid_blocks; /* # of valid blocks in main area */
239 char *bitmap; /* all bitmaps pointer */
240 char *sit_bitmap; /* SIT bitmap pointer */
241 #ifdef CONFIG_F2FS_CHECK_FS
242 char *sit_bitmap_mir; /* SIT bitmap mirror */
243
244 /* bitmap of segments to be ignored by GC in case of errors */
245 unsigned long *invalid_segmap;
246 #endif
247 unsigned int bitmap_size; /* SIT bitmap size */
248
249 unsigned long *tmp_map; /* bitmap for temporal use */
250 unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */
251 unsigned int dirty_sentries; /* # of dirty sentries */
252 unsigned int sents_per_block; /* # of SIT entries per block */
253 struct rw_semaphore sentry_lock; /* to protect SIT cache */
254 struct seg_entry *sentries; /* SIT segment-level cache */
255 struct sec_entry *sec_entries; /* SIT section-level cache */
256
257 /* for cost-benefit algorithm in cleaning procedure */
258 unsigned long long elapsed_time; /* elapsed time after mount */
259 unsigned long long mounted_time; /* mount time */
260 unsigned long long min_mtime; /* min. modification time */
261 unsigned long long max_mtime; /* max. modification time */
262 unsigned long long dirty_min_mtime; /* rerange candidates in GC_AT */
263 unsigned long long dirty_max_mtime; /* rerange candidates in GC_AT */
264
265 unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */
266 };
267
268 struct free_segmap_info {
269 unsigned int start_segno; /* start segment number logically */
270 unsigned int free_segments; /* # of free segments */
271 unsigned int free_sections; /* # of free sections */
272 spinlock_t segmap_lock; /* free segmap lock */
273 unsigned long *free_segmap; /* free segment bitmap */
274 unsigned long *free_secmap; /* free section bitmap */
275 };
276
277 /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
278 enum dirty_type {
279 DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */
280 DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */
281 DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */
282 DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */
283 DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */
284 DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */
285 DIRTY, /* to count # of dirty segments */
286 PRE, /* to count # of entirely obsolete segments */
287 NR_DIRTY_TYPE
288 };
289
290 struct dirty_seglist_info {
291 const struct victim_selection *v_ops; /* victim selction operation */
292 unsigned long *dirty_segmap[NR_DIRTY_TYPE];
293 unsigned long *dirty_secmap;
294 struct mutex seglist_lock; /* lock for segment bitmaps */
295 int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
296 unsigned long *victim_secmap; /* background GC victims */
297 };
298
299 /* victim selection function for cleaning and SSR */
300 struct victim_selection {
301 int (*get_victim)(struct f2fs_sb_info *, unsigned int *,
302 int, int, char, unsigned long long);
303 };
304
305 /* for active log information */
306 struct curseg_info {
307 struct mutex curseg_mutex; /* lock for consistency */
308 struct f2fs_summary_block *sum_blk; /* cached summary block */
309 struct rw_semaphore journal_rwsem; /* protect journal area */
310 struct f2fs_journal *journal; /* cached journal info */
311 unsigned char alloc_type; /* current allocation type */
312 unsigned short seg_type; /* segment type like CURSEG_XXX_TYPE */
313 unsigned int segno; /* current segment number */
314 unsigned short next_blkoff; /* next block offset to write */
315 unsigned int zone; /* current zone number */
316 unsigned int next_segno; /* preallocated segment */
317 int fragment_remained_chunk; /* remained block size in a chunk for block fragmentation mode */
318 bool inited; /* indicate inmem log is inited */
319 };
320
321 struct sit_entry_set {
322 struct list_head set_list; /* link with all sit sets */
323 unsigned int start_segno; /* start segno of sits in set */
324 unsigned int entry_cnt; /* the # of sit entries in set */
325 };
326
327 /*
328 * inline functions
329 */
CURSEG_I(struct f2fs_sb_info * sbi,int type)330 static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
331 {
332 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
333 }
334
get_seg_entry(struct f2fs_sb_info * sbi,unsigned int segno)335 static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
336 unsigned int segno)
337 {
338 struct sit_info *sit_i = SIT_I(sbi);
339 return &sit_i->sentries[segno];
340 }
341
get_sec_entry(struct f2fs_sb_info * sbi,unsigned int segno)342 static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
343 unsigned int segno)
344 {
345 struct sit_info *sit_i = SIT_I(sbi);
346 return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)];
347 }
348
get_valid_blocks(struct f2fs_sb_info * sbi,unsigned int segno,bool use_section)349 static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
350 unsigned int segno, bool use_section)
351 {
352 /*
353 * In order to get # of valid blocks in a section instantly from many
354 * segments, f2fs manages two counting structures separately.
355 */
356 if (use_section && __is_large_section(sbi))
357 return get_sec_entry(sbi, segno)->valid_blocks;
358 else
359 return get_seg_entry(sbi, segno)->valid_blocks;
360 }
361
get_ckpt_valid_blocks(struct f2fs_sb_info * sbi,unsigned int segno,bool use_section)362 static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
363 unsigned int segno, bool use_section)
364 {
365 if (use_section && __is_large_section(sbi)) {
366 unsigned int start_segno = START_SEGNO(segno);
367 unsigned int blocks = 0;
368 int i;
369
370 for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) {
371 struct seg_entry *se = get_seg_entry(sbi, start_segno);
372
373 blocks += se->ckpt_valid_blocks;
374 }
375 return blocks;
376 }
377 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
378 }
379
seg_info_from_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * rs)380 static inline void seg_info_from_raw_sit(struct seg_entry *se,
381 struct f2fs_sit_entry *rs)
382 {
383 se->valid_blocks = GET_SIT_VBLOCKS(rs);
384 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
385 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
386 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
387 #ifdef CONFIG_F2FS_CHECK_FS
388 memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
389 #endif
390 se->type = GET_SIT_TYPE(rs);
391 se->mtime = le64_to_cpu(rs->mtime);
392 }
393
__seg_info_to_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * rs)394 static inline void __seg_info_to_raw_sit(struct seg_entry *se,
395 struct f2fs_sit_entry *rs)
396 {
397 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
398 se->valid_blocks;
399 rs->vblocks = cpu_to_le16(raw_vblocks);
400 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
401 rs->mtime = cpu_to_le64(se->mtime);
402 }
403
seg_info_to_sit_page(struct f2fs_sb_info * sbi,struct page * page,unsigned int start)404 static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
405 struct page *page, unsigned int start)
406 {
407 struct f2fs_sit_block *raw_sit;
408 struct seg_entry *se;
409 struct f2fs_sit_entry *rs;
410 unsigned int end = min(start + SIT_ENTRY_PER_BLOCK,
411 (unsigned long)MAIN_SEGS(sbi));
412 int i;
413
414 raw_sit = (struct f2fs_sit_block *)page_address(page);
415 memset(raw_sit, 0, PAGE_SIZE);
416 for (i = 0; i < end - start; i++) {
417 rs = &raw_sit->entries[i];
418 se = get_seg_entry(sbi, start + i);
419 __seg_info_to_raw_sit(se, rs);
420 }
421 }
422
seg_info_to_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * rs)423 static inline void seg_info_to_raw_sit(struct seg_entry *se,
424 struct f2fs_sit_entry *rs)
425 {
426 __seg_info_to_raw_sit(se, rs);
427
428 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
429 se->ckpt_valid_blocks = se->valid_blocks;
430 }
431
find_next_inuse(struct free_segmap_info * free_i,unsigned int max,unsigned int segno)432 static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
433 unsigned int max, unsigned int segno)
434 {
435 unsigned int ret;
436 spin_lock(&free_i->segmap_lock);
437 ret = find_next_bit(free_i->free_segmap, max, segno);
438 spin_unlock(&free_i->segmap_lock);
439 return ret;
440 }
441
__set_free(struct f2fs_sb_info * sbi,unsigned int segno)442 static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
443 {
444 struct free_segmap_info *free_i = FREE_I(sbi);
445 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
446 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
447 unsigned int next;
448 unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
449
450 spin_lock(&free_i->segmap_lock);
451 clear_bit(segno, free_i->free_segmap);
452 free_i->free_segments++;
453
454 next = find_next_bit(free_i->free_segmap,
455 start_segno + sbi->segs_per_sec, start_segno);
456 if (next >= start_segno + usable_segs) {
457 clear_bit(secno, free_i->free_secmap);
458 free_i->free_sections++;
459 }
460 spin_unlock(&free_i->segmap_lock);
461 }
462
__set_inuse(struct f2fs_sb_info * sbi,unsigned int segno)463 static inline void __set_inuse(struct f2fs_sb_info *sbi,
464 unsigned int segno)
465 {
466 struct free_segmap_info *free_i = FREE_I(sbi);
467 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
468
469 set_bit(segno, free_i->free_segmap);
470 free_i->free_segments--;
471 if (!test_and_set_bit(secno, free_i->free_secmap))
472 free_i->free_sections--;
473 }
474
__set_test_and_free(struct f2fs_sb_info * sbi,unsigned int segno,bool inmem)475 static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
476 unsigned int segno, bool inmem)
477 {
478 struct free_segmap_info *free_i = FREE_I(sbi);
479 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
480 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
481 unsigned int next;
482 unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
483
484 spin_lock(&free_i->segmap_lock);
485 if (test_and_clear_bit(segno, free_i->free_segmap)) {
486 free_i->free_segments++;
487
488 if (!inmem && IS_CURSEC(sbi, secno))
489 goto skip_free;
490 next = find_next_bit(free_i->free_segmap,
491 start_segno + sbi->segs_per_sec, start_segno);
492 if (next >= start_segno + usable_segs) {
493 if (test_and_clear_bit(secno, free_i->free_secmap))
494 free_i->free_sections++;
495 }
496 }
497 skip_free:
498 spin_unlock(&free_i->segmap_lock);
499 }
500
__set_test_and_inuse(struct f2fs_sb_info * sbi,unsigned int segno)501 static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
502 unsigned int segno)
503 {
504 struct free_segmap_info *free_i = FREE_I(sbi);
505 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
506
507 spin_lock(&free_i->segmap_lock);
508 if (!test_and_set_bit(segno, free_i->free_segmap)) {
509 free_i->free_segments--;
510 if (!test_and_set_bit(secno, free_i->free_secmap))
511 free_i->free_sections--;
512 }
513 spin_unlock(&free_i->segmap_lock);
514 }
515
get_sit_bitmap(struct f2fs_sb_info * sbi,void * dst_addr)516 static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
517 void *dst_addr)
518 {
519 struct sit_info *sit_i = SIT_I(sbi);
520
521 #ifdef CONFIG_F2FS_CHECK_FS
522 if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir,
523 sit_i->bitmap_size))
524 f2fs_bug_on(sbi, 1);
525 #endif
526 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
527 }
528
written_block_count(struct f2fs_sb_info * sbi)529 static inline block_t written_block_count(struct f2fs_sb_info *sbi)
530 {
531 return SIT_I(sbi)->written_valid_blocks;
532 }
533
free_segments(struct f2fs_sb_info * sbi)534 static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
535 {
536 return FREE_I(sbi)->free_segments;
537 }
538
reserved_segments(struct f2fs_sb_info * sbi)539 static inline unsigned int reserved_segments(struct f2fs_sb_info *sbi)
540 {
541 return SM_I(sbi)->reserved_segments;
542 }
543
free_sections(struct f2fs_sb_info * sbi)544 static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
545 {
546 return FREE_I(sbi)->free_sections;
547 }
548
prefree_segments(struct f2fs_sb_info * sbi)549 static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
550 {
551 return DIRTY_I(sbi)->nr_dirty[PRE];
552 }
553
dirty_segments(struct f2fs_sb_info * sbi)554 static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
555 {
556 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
557 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
558 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
559 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
560 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
561 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
562 }
563
overprovision_segments(struct f2fs_sb_info * sbi)564 static inline int overprovision_segments(struct f2fs_sb_info *sbi)
565 {
566 return SM_I(sbi)->ovp_segments;
567 }
568
reserved_sections(struct f2fs_sb_info * sbi)569 static inline int reserved_sections(struct f2fs_sb_info *sbi)
570 {
571 return GET_SEC_FROM_SEG(sbi, reserved_segments(sbi));
572 }
573
has_curseg_enough_space(struct f2fs_sb_info * sbi)574 static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi)
575 {
576 unsigned int node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
577 get_pages(sbi, F2FS_DIRTY_DENTS);
578 unsigned int dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
579 unsigned int segno, left_blocks;
580 int i;
581
582 /* check current node segment */
583 for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
584 segno = CURSEG_I(sbi, i)->segno;
585 left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
586 get_seg_entry(sbi, segno)->ckpt_valid_blocks;
587
588 if (node_blocks > left_blocks)
589 return false;
590 }
591
592 /* check current data segment */
593 segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
594 left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
595 get_seg_entry(sbi, segno)->ckpt_valid_blocks;
596 if (dent_blocks > left_blocks)
597 return false;
598 return true;
599 }
600
has_not_enough_free_secs(struct f2fs_sb_info * sbi,int freed,int needed)601 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
602 int freed, int needed)
603 {
604 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
605 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
606 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
607
608 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
609 return false;
610
611 if (free_sections(sbi) + freed == reserved_sections(sbi) + needed &&
612 has_curseg_enough_space(sbi))
613 return false;
614 return (free_sections(sbi) + freed) <=
615 (node_secs + 2 * dent_secs + imeta_secs +
616 reserved_sections(sbi) + needed);
617 }
618
f2fs_is_checkpoint_ready(struct f2fs_sb_info * sbi)619 static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
620 {
621 if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
622 return true;
623 if (likely(!has_not_enough_free_secs(sbi, 0, 0)))
624 return true;
625 return false;
626 }
627
excess_prefree_segs(struct f2fs_sb_info * sbi)628 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
629 {
630 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
631 }
632
utilization(struct f2fs_sb_info * sbi)633 static inline int utilization(struct f2fs_sb_info *sbi)
634 {
635 return div_u64((u64)valid_user_blocks(sbi) * 100,
636 sbi->user_block_count);
637 }
638
639 /*
640 * Sometimes f2fs may be better to drop out-of-place update policy.
641 * And, users can control the policy through sysfs entries.
642 * There are five policies with triggering conditions as follows.
643 * F2FS_IPU_FORCE - all the time,
644 * F2FS_IPU_SSR - if SSR mode is activated,
645 * F2FS_IPU_UTIL - if FS utilization is over threashold,
646 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
647 * threashold,
648 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
649 * storages. IPU will be triggered only if the # of dirty
650 * pages over min_fsync_blocks. (=default option)
651 * F2FS_IPU_ASYNC - do IPU given by asynchronous write requests.
652 * F2FS_IPU_NOCACHE - disable IPU bio cache.
653 * F2FS_IPUT_DISABLE - disable IPU. (=default option in LFS mode)
654 */
655 #define DEF_MIN_IPU_UTIL 70
656 #define DEF_MIN_FSYNC_BLOCKS 8
657 #define DEF_MIN_HOT_BLOCKS 16
658
659 #define SMALL_VOLUME_SEGMENTS (16 * 512) /* 16GB */
660
661 enum {
662 F2FS_IPU_FORCE,
663 F2FS_IPU_SSR,
664 F2FS_IPU_UTIL,
665 F2FS_IPU_SSR_UTIL,
666 F2FS_IPU_FSYNC,
667 F2FS_IPU_ASYNC,
668 F2FS_IPU_NOCACHE,
669 };
670
curseg_segno(struct f2fs_sb_info * sbi,int type)671 static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
672 int type)
673 {
674 struct curseg_info *curseg = CURSEG_I(sbi, type);
675 return curseg->segno;
676 }
677
curseg_alloc_type(struct f2fs_sb_info * sbi,int type)678 static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
679 int type)
680 {
681 struct curseg_info *curseg = CURSEG_I(sbi, type);
682 return curseg->alloc_type;
683 }
684
curseg_blkoff(struct f2fs_sb_info * sbi,int type)685 static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
686 {
687 struct curseg_info *curseg = CURSEG_I(sbi, type);
688 return curseg->next_blkoff;
689 }
690
check_seg_range(struct f2fs_sb_info * sbi,unsigned int segno)691 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
692 {
693 f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
694 }
695
verify_fio_blkaddr(struct f2fs_io_info * fio)696 static inline void verify_fio_blkaddr(struct f2fs_io_info *fio)
697 {
698 struct f2fs_sb_info *sbi = fio->sbi;
699
700 if (__is_valid_data_blkaddr(fio->old_blkaddr))
701 verify_blkaddr(sbi, fio->old_blkaddr, __is_meta_io(fio) ?
702 META_GENERIC : DATA_GENERIC);
703 verify_blkaddr(sbi, fio->new_blkaddr, __is_meta_io(fio) ?
704 META_GENERIC : DATA_GENERIC_ENHANCE);
705 }
706
707 /*
708 * Summary block is always treated as an invalid block
709 */
check_block_count(struct f2fs_sb_info * sbi,int segno,struct f2fs_sit_entry * raw_sit)710 static inline int check_block_count(struct f2fs_sb_info *sbi,
711 int segno, struct f2fs_sit_entry *raw_sit)
712 {
713 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
714 int valid_blocks = 0;
715 int cur_pos = 0, next_pos;
716 unsigned int usable_blks_per_seg = f2fs_usable_blks_in_seg(sbi, segno);
717
718 /* check bitmap with valid block count */
719 do {
720 if (is_valid) {
721 next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
722 usable_blks_per_seg,
723 cur_pos);
724 valid_blocks += next_pos - cur_pos;
725 } else
726 next_pos = find_next_bit_le(&raw_sit->valid_map,
727 usable_blks_per_seg,
728 cur_pos);
729 cur_pos = next_pos;
730 is_valid = !is_valid;
731 } while (cur_pos < usable_blks_per_seg);
732
733 if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
734 f2fs_err(sbi, "Mismatch valid blocks %d vs. %d",
735 GET_SIT_VBLOCKS(raw_sit), valid_blocks);
736 set_sbi_flag(sbi, SBI_NEED_FSCK);
737 return -EFSCORRUPTED;
738 }
739
740 if (usable_blks_per_seg < sbi->blocks_per_seg)
741 f2fs_bug_on(sbi, find_next_bit_le(&raw_sit->valid_map,
742 sbi->blocks_per_seg,
743 usable_blks_per_seg) != sbi->blocks_per_seg);
744
745 /* check segment usage, and check boundary of a given segment number */
746 if (unlikely(GET_SIT_VBLOCKS(raw_sit) > usable_blks_per_seg
747 || segno > TOTAL_SEGS(sbi) - 1)) {
748 f2fs_err(sbi, "Wrong valid blocks %d or segno %u",
749 GET_SIT_VBLOCKS(raw_sit), segno);
750 set_sbi_flag(sbi, SBI_NEED_FSCK);
751 return -EFSCORRUPTED;
752 }
753 return 0;
754 }
755
current_sit_addr(struct f2fs_sb_info * sbi,unsigned int start)756 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
757 unsigned int start)
758 {
759 struct sit_info *sit_i = SIT_I(sbi);
760 unsigned int offset = SIT_BLOCK_OFFSET(start);
761 block_t blk_addr = sit_i->sit_base_addr + offset;
762
763 check_seg_range(sbi, start);
764
765 #ifdef CONFIG_F2FS_CHECK_FS
766 if (f2fs_test_bit(offset, sit_i->sit_bitmap) !=
767 f2fs_test_bit(offset, sit_i->sit_bitmap_mir))
768 f2fs_bug_on(sbi, 1);
769 #endif
770
771 /* calculate sit block address */
772 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
773 blk_addr += sit_i->sit_blocks;
774
775 return blk_addr;
776 }
777
next_sit_addr(struct f2fs_sb_info * sbi,pgoff_t block_addr)778 static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
779 pgoff_t block_addr)
780 {
781 struct sit_info *sit_i = SIT_I(sbi);
782 block_addr -= sit_i->sit_base_addr;
783 if (block_addr < sit_i->sit_blocks)
784 block_addr += sit_i->sit_blocks;
785 else
786 block_addr -= sit_i->sit_blocks;
787
788 return block_addr + sit_i->sit_base_addr;
789 }
790
set_to_next_sit(struct sit_info * sit_i,unsigned int start)791 static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
792 {
793 unsigned int block_off = SIT_BLOCK_OFFSET(start);
794
795 f2fs_change_bit(block_off, sit_i->sit_bitmap);
796 #ifdef CONFIG_F2FS_CHECK_FS
797 f2fs_change_bit(block_off, sit_i->sit_bitmap_mir);
798 #endif
799 }
800
get_mtime(struct f2fs_sb_info * sbi,bool base_time)801 static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi,
802 bool base_time)
803 {
804 struct sit_info *sit_i = SIT_I(sbi);
805 time64_t diff, now = ktime_get_boottime_seconds();
806
807 if (now >= sit_i->mounted_time)
808 return sit_i->elapsed_time + now - sit_i->mounted_time;
809
810 /* system time is set to the past */
811 if (!base_time) {
812 diff = sit_i->mounted_time - now;
813 if (sit_i->elapsed_time >= diff)
814 return sit_i->elapsed_time - diff;
815 return 0;
816 }
817 return sit_i->elapsed_time;
818 }
819
set_summary(struct f2fs_summary * sum,nid_t nid,unsigned int ofs_in_node,unsigned char version)820 static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
821 unsigned int ofs_in_node, unsigned char version)
822 {
823 sum->nid = cpu_to_le32(nid);
824 sum->ofs_in_node = cpu_to_le16(ofs_in_node);
825 sum->version = version;
826 }
827
start_sum_block(struct f2fs_sb_info * sbi)828 static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
829 {
830 return __start_cp_addr(sbi) +
831 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
832 }
833
sum_blk_addr(struct f2fs_sb_info * sbi,int base,int type)834 static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
835 {
836 return __start_cp_addr(sbi) +
837 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
838 - (base + 1) + type;
839 }
840
sec_usage_check(struct f2fs_sb_info * sbi,unsigned int secno)841 static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
842 {
843 if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
844 return true;
845 return false;
846 }
847
848 /*
849 * It is very important to gather dirty pages and write at once, so that we can
850 * submit a big bio without interfering other data writes.
851 * By default, 512 pages for directory data,
852 * 512 pages (2MB) * 8 for nodes, and
853 * 256 pages * 8 for meta are set.
854 */
nr_pages_to_skip(struct f2fs_sb_info * sbi,int type)855 static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
856 {
857 if (sbi->sb->s_bdi->wb.dirty_exceeded)
858 return 0;
859
860 if (type == DATA)
861 return sbi->blocks_per_seg;
862 else if (type == NODE)
863 return 8 * sbi->blocks_per_seg;
864 else if (type == META)
865 return 8 * BIO_MAX_VECS;
866 else
867 return 0;
868 }
869
870 /*
871 * When writing pages, it'd better align nr_to_write for segment size.
872 */
nr_pages_to_write(struct f2fs_sb_info * sbi,int type,struct writeback_control * wbc)873 static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
874 struct writeback_control *wbc)
875 {
876 long nr_to_write, desired;
877
878 if (wbc->sync_mode != WB_SYNC_NONE)
879 return 0;
880
881 nr_to_write = wbc->nr_to_write;
882 desired = BIO_MAX_VECS;
883 if (type == NODE)
884 desired <<= 1;
885
886 wbc->nr_to_write = desired;
887 return desired - nr_to_write;
888 }
889
wake_up_discard_thread(struct f2fs_sb_info * sbi,bool force)890 static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
891 {
892 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
893 bool wakeup = false;
894 int i;
895
896 if (force)
897 goto wake_up;
898
899 mutex_lock(&dcc->cmd_lock);
900 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
901 if (i + 1 < dcc->discard_granularity)
902 break;
903 if (!list_empty(&dcc->pend_list[i])) {
904 wakeup = true;
905 break;
906 }
907 }
908 mutex_unlock(&dcc->cmd_lock);
909 if (!wakeup || !is_idle(sbi, DISCARD_TIME))
910 return;
911 wake_up:
912 dcc->discard_wake = 1;
913 wake_up_interruptible_all(&dcc->discard_wait_queue);
914 }
915