Lines Matching refs:sbi

33 	struct f2fs_sb_info *sbi = data;  in gc_thread_func()  local
34 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in gc_thread_func()
35 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; in gc_thread_func()
36 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq; in gc_thread_func()
55 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq)) in gc_thread_func()
63 stat_other_skip_bggc_count(sbi); in gc_thread_func()
69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { in gc_thread_func()
71 stat_other_skip_bggc_count(sbi); in gc_thread_func()
75 if (time_to_inject(sbi, FAULT_CHECKPOINT)) in gc_thread_func()
76 f2fs_stop_checkpoint(sbi, false, in gc_thread_func()
79 if (!sb_start_write_trylock(sbi->sb)) { in gc_thread_func()
80 stat_other_skip_bggc_count(sbi); in gc_thread_func()
97 if (sbi->gc_mode == GC_URGENT_HIGH || in gc_thread_func()
98 sbi->gc_mode == GC_URGENT_MID) { in gc_thread_func()
100 f2fs_down_write(&sbi->gc_lock); in gc_thread_func()
105 f2fs_down_write(&sbi->gc_lock); in gc_thread_func()
107 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) { in gc_thread_func()
108 stat_other_skip_bggc_count(sbi); in gc_thread_func()
112 if (!is_idle(sbi, GC_TIME)) { in gc_thread_func()
114 f2fs_up_write(&sbi->gc_lock); in gc_thread_func()
115 stat_io_skip_bggc_count(sbi); in gc_thread_func()
119 if (has_enough_invalid_blocks(sbi)) in gc_thread_func()
125 stat_inc_bggc_count(sbi->stat_info); in gc_thread_func()
127 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC; in gc_thread_func()
138 if (f2fs_gc(sbi, &gc_control)) { in gc_thread_func()
151 trace_f2fs_background_gc(sbi->sb, wait_ms, in gc_thread_func()
152 prefree_segments(sbi), free_segments(sbi)); in gc_thread_func()
155 f2fs_balance_fs_bg(sbi, true); in gc_thread_func()
157 if (sbi->gc_mode != GC_NORMAL) { in gc_thread_func()
158 spin_lock(&sbi->gc_remaining_trials_lock); in gc_thread_func()
159 if (sbi->gc_remaining_trials) { in gc_thread_func()
160 sbi->gc_remaining_trials--; in gc_thread_func()
161 if (!sbi->gc_remaining_trials) in gc_thread_func()
162 sbi->gc_mode = GC_NORMAL; in gc_thread_func()
164 spin_unlock(&sbi->gc_remaining_trials_lock); in gc_thread_func()
166 sb_end_write(sbi->sb); in gc_thread_func()
172 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi) in f2fs_start_gc_thread() argument
175 dev_t dev = sbi->sb->s_bdev->bd_dev; in f2fs_start_gc_thread()
177 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL); in f2fs_start_gc_thread()
188 sbi->gc_thread = gc_th; in f2fs_start_gc_thread()
189 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); in f2fs_start_gc_thread()
190 init_waitqueue_head(&sbi->gc_thread->fggc_wq); in f2fs_start_gc_thread()
191 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, in f2fs_start_gc_thread()
197 sbi->gc_thread = NULL; in f2fs_start_gc_thread()
204 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi) in f2fs_stop_gc_thread() argument
206 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in f2fs_stop_gc_thread()
213 sbi->gc_thread = NULL; in f2fs_stop_gc_thread()
216 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type) in select_gc_type() argument
221 if (sbi->am.atgc_enabled) in select_gc_type()
229 switch (sbi->gc_mode) { in select_gc_type()
245 static void select_policy(struct f2fs_sb_info *sbi, int gc_type, in select_policy() argument
248 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in select_policy()
261 p->gc_mode = select_gc_type(sbi, gc_type); in select_policy()
262 p->ofs_unit = sbi->segs_per_sec; in select_policy()
263 if (__is_large_section(sbi)) { in select_policy()
266 0, MAIN_SECS(sbi)); in select_policy()
278 (sbi->gc_mode != GC_URGENT_HIGH) && in select_policy()
280 p->max_search > sbi->max_victim_search) in select_policy()
281 p->max_search = sbi->max_victim_search; in select_policy()
284 if (f2fs_need_rand_seg(sbi)) in select_policy()
285 p->offset = get_random_u32_below(MAIN_SECS(sbi) * sbi->segs_per_sec); in select_policy()
286 else if (test_opt(sbi, NOHEAP) && in select_policy()
290 p->offset = SIT_I(sbi)->last_victim[p->gc_mode]; in select_policy()
293 static unsigned int get_max_cost(struct f2fs_sb_info *sbi, in get_max_cost() argument
298 return sbi->blocks_per_seg; in get_max_cost()
304 return 2 * sbi->blocks_per_seg * p->ofs_unit; in get_max_cost()
313 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) in check_bg_victims() argument
315 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in check_bg_victims()
323 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { in check_bg_victims()
324 if (sec_usage_check(sbi, secno)) in check_bg_victims()
327 return GET_SEG_FROM_SEC(sbi, secno); in check_bg_victims()
332 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) in get_cb_cost() argument
334 struct sit_info *sit_i = SIT_I(sbi); in get_cb_cost()
335 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); in get_cb_cost()
336 unsigned int start = GET_SEG_FROM_SEC(sbi, secno); in get_cb_cost()
342 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno); in get_cb_cost()
345 mtime += get_seg_entry(sbi, start + i)->mtime; in get_cb_cost()
346 vblocks = get_valid_blocks(sbi, segno, true); in get_cb_cost()
351 u = (vblocks * 100) >> sbi->log_blocks_per_seg; in get_cb_cost()
365 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, in get_gc_cost() argument
369 return get_seg_entry(sbi, segno)->ckpt_valid_blocks; in get_gc_cost()
373 return get_valid_blocks(sbi, segno, true); in get_gc_cost()
375 return get_cb_cost(sbi, segno); in get_gc_cost()
377 f2fs_bug_on(sbi, 1); in get_gc_cost()
393 static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi, in attach_victim_entry() argument
398 struct atgc_management *am = &sbi->am; in attach_victim_entry()
417 static void insert_victim_entry(struct f2fs_sb_info *sbi, in insert_victim_entry() argument
420 struct atgc_management *am = &sbi->am; in insert_victim_entry()
425 p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most); in insert_victim_entry()
426 attach_victim_entry(sbi, mtime, segno, parent, p, left_most); in insert_victim_entry()
429 static void add_victim_entry(struct f2fs_sb_info *sbi, in add_victim_entry() argument
432 struct sit_info *sit_i = SIT_I(sbi); in add_victim_entry()
433 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); in add_victim_entry()
434 unsigned int start = GET_SEG_FROM_SEC(sbi, secno); in add_victim_entry()
438 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { in add_victim_entry()
440 get_valid_blocks(sbi, segno, true) == 0) in add_victim_entry()
444 for (i = 0; i < sbi->segs_per_sec; i++) in add_victim_entry()
445 mtime += get_seg_entry(sbi, start + i)->mtime; in add_victim_entry()
446 mtime = div_u64(mtime, sbi->segs_per_sec); in add_victim_entry()
462 insert_victim_entry(sbi, mtime, segno); in add_victim_entry()
465 static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi, in lookup_central_victim() argument
468 struct atgc_management *am = &sbi->am; in lookup_central_victim()
472 f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most); in lookup_central_victim()
477 static void atgc_lookup_victim(struct f2fs_sb_info *sbi, in atgc_lookup_victim() argument
480 struct sit_info *sit_i = SIT_I(sbi); in atgc_lookup_victim()
481 struct atgc_management *am = &sbi->am; in atgc_lookup_victim()
490 unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi); in atgc_lookup_victim()
524 vblocks = get_valid_blocks(sbi, ve->segno, true); in atgc_lookup_victim()
525 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks); in atgc_lookup_victim()
531 f2fs_bug_on(sbi, age + u >= UINT_MAX); in atgc_lookup_victim()
553 static void atssr_lookup_victim(struct f2fs_sb_info *sbi, in atssr_lookup_victim() argument
556 struct sit_info *sit_i = SIT_I(sbi); in atssr_lookup_victim()
557 struct atgc_management *am = &sbi->am; in atssr_lookup_victim()
564 unsigned int seg_blocks = sbi->blocks_per_seg; in atssr_lookup_victim()
577 node = lookup_central_victim(sbi, p); in atssr_lookup_victim()
593 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks; in atssr_lookup_victim()
594 f2fs_bug_on(sbi, !vblocks); in atssr_lookup_victim()
626 static void lookup_victim_by_age(struct f2fs_sb_info *sbi, in lookup_victim_by_age() argument
629 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, in lookup_victim_by_age()
630 &sbi->am.root, true)); in lookup_victim_by_age()
633 atgc_lookup_victim(sbi, p); in lookup_victim_by_age()
635 atssr_lookup_victim(sbi, p); in lookup_victim_by_age()
637 f2fs_bug_on(sbi, 1); in lookup_victim_by_age()
640 static void release_victim_entry(struct f2fs_sb_info *sbi) in release_victim_entry() argument
642 struct atgc_management *am = &sbi->am; in release_victim_entry()
653 f2fs_bug_on(sbi, am->victim_count); in release_victim_entry()
654 f2fs_bug_on(sbi, !list_empty(&am->victim_list)); in release_victim_entry()
657 static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno) in f2fs_pin_section() argument
659 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in f2fs_pin_section()
660 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); in f2fs_pin_section()
682 static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable) in f2fs_unpin_all_sections() argument
684 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); in f2fs_unpin_all_sections()
686 if (f2fs_pinned_section_exists(DIRTY_I(sbi))) { in f2fs_unpin_all_sections()
687 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size); in f2fs_unpin_all_sections()
688 DIRTY_I(sbi)->pinned_secmap_cnt = 0; in f2fs_unpin_all_sections()
690 DIRTY_I(sbi)->enable_pin_section = enable; in f2fs_unpin_all_sections()
713 static int get_victim_by_default(struct f2fs_sb_info *sbi, in get_victim_by_default() argument
717 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in get_victim_by_default()
718 struct sit_info *sm = SIT_I(sbi); in get_victim_by_default()
727 last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec; in get_victim_by_default()
731 p.age_threshold = sbi->am.age_threshold; in get_victim_by_default()
734 select_policy(sbi, gc_type, type, &p); in get_victim_by_default()
737 p.min_cost = get_max_cost(sbi, &p); in get_victim_by_default()
743 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX; in get_victim_by_default()
746 if (!get_valid_blocks(sbi, *result, false)) { in get_victim_by_default()
751 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) in get_victim_by_default()
762 if (__is_large_section(sbi) && p.alloc_mode == LFS) { in get_victim_by_default()
763 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) { in get_victim_by_default()
764 p.min_segno = sbi->next_victim_seg[BG_GC]; in get_victim_by_default()
766 sbi->next_victim_seg[BG_GC] = NULL_SEGNO; in get_victim_by_default()
770 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) { in get_victim_by_default()
771 p.min_segno = sbi->next_victim_seg[FG_GC]; in get_victim_by_default()
773 sbi->next_victim_seg[FG_GC] = NULL_SEGNO; in get_victim_by_default()
780 p.min_segno = check_bg_victims(sbi); in get_victim_by_default()
818 secno = GET_SEC_FROM_SEG(sbi, segno); in get_victim_by_default()
820 if (sec_usage_check(sbi, secno)) in get_victim_by_default()
824 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { in get_victim_by_default()
830 if (get_ckpt_valid_blocks(sbi, segno, true)) in get_victim_by_default()
838 if (!f2fs_segment_has_free_slot(sbi, segno)) in get_victim_by_default()
850 add_victim_entry(sbi, &p, segno); in get_victim_by_default()
854 cost = get_gc_cost(sbi, segno, &p); in get_victim_by_default()
868 (MAIN_SECS(sbi) * sbi->segs_per_sec); in get_victim_by_default()
875 lookup_victim_by_age(sbi, &p); in get_victim_by_default()
876 release_victim_entry(sbi); in get_victim_by_default()
890 secno = GET_SEC_FROM_SEG(sbi, p.min_segno); in get_victim_by_default()
892 sbi->cur_victim_sec = secno; in get_victim_by_default()
901 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, in get_victim_by_default()
902 sbi->cur_victim_sec, in get_victim_by_default()
903 prefree_segments(sbi), free_segments(sbi)); in get_victim_by_default()
951 static int check_valid_map(struct f2fs_sb_info *sbi, in check_valid_map() argument
954 struct sit_info *sit_i = SIT_I(sbi); in check_valid_map()
959 sentry = get_seg_entry(sbi, segno); in check_valid_map()
970 static int gc_node_segment(struct f2fs_sb_info *sbi, in gc_node_segment() argument
979 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); in gc_node_segment()
981 start_addr = START_BLOCK(sbi, segno); in gc_node_segment()
987 atomic_inc(&sbi->wb_sync_req[NODE]); in gc_node_segment()
996 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) in gc_node_segment()
999 if (check_valid_map(sbi, segno, off) == 0) in gc_node_segment()
1003 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, in gc_node_segment()
1009 f2fs_ra_node_page(sbi, nid); in gc_node_segment()
1014 node_page = f2fs_get_node_page(sbi, nid); in gc_node_segment()
1019 if (check_valid_map(sbi, segno, off) == 0) { in gc_node_segment()
1024 if (f2fs_get_node_info(sbi, nid, &ni, false)) { in gc_node_segment()
1037 stat_inc_node_blk_count(sbi, 1, gc_type); in gc_node_segment()
1044 atomic_dec(&sbi->wb_sync_req[NODE]); in gc_node_segment()
1077 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, in is_alive() argument
1088 node_page = f2fs_get_node_page(sbi, nid); in is_alive()
1092 if (f2fs_get_node_info(sbi, nid, dni, false)) { in is_alive()
1098 f2fs_warn(sbi, "%s: valid data with mismatched node version.", in is_alive()
1100 set_sbi_flag(sbi, SBI_NEED_FSCK); in is_alive()
1103 if (f2fs_check_nid_range(sbi, dni->ino)) { in is_alive()
1117 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u", in is_alive()
1129 unsigned int segno = GET_SEGNO(sbi, blkaddr); in is_alive()
1130 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); in is_alive()
1132 if (unlikely(check_valid_map(sbi, segno, offset))) { in is_alive()
1133 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) { in is_alive()
1134 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u", in is_alive()
1136 set_sbi_flag(sbi, SBI_NEED_FSCK); in is_alive()
1147 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in ra_data_block() local
1152 .sbi = sbi, in ra_data_block()
1170 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, in ra_data_block()
1173 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); in ra_data_block()
1189 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, in ra_data_block()
1192 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); in ra_data_block()
1208 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi), in ra_data_block()
1222 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE); in ra_data_block()
1223 f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE); in ra_data_block()
1241 .sbi = F2FS_I_SB(inode), in move_data_block()
1257 bool lfs_mode = f2fs_lfs_mode(fio.sbi); in move_data_block()
1258 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) && in move_data_block()
1259 (fio.sbi->gc_mode != GC_URGENT_HIGH) ? in move_data_block()
1295 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false); in move_data_block()
1304 f2fs_down_write(&fio.sbi->io_order_lock); in move_data_block()
1306 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), in move_data_block()
1323 f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO, in move_data_block()
1325 f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO, in move_data_block()
1329 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || in move_data_block()
1340 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, in move_data_block()
1343 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), in move_data_block()
1356 invalidate_mapping_pages(META_MAPPING(fio.sbi), in move_data_block()
1358 f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr); in move_data_block()
1362 dec_page_count(fio.sbi, F2FS_DIRTY_META); in move_data_block()
1377 f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE); in move_data_block()
1387 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr, in move_data_block()
1391 f2fs_up_write(&fio.sbi->io_order_lock); in move_data_block()
1427 .sbi = F2FS_I_SB(inode), in move_data_page()
1475 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, in gc_data_segment() argument
1479 struct super_block *sb = sbi->sb; in gc_data_segment()
1485 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); in gc_data_segment()
1487 start_addr = START_BLOCK(sbi, segno); in gc_data_segment()
1505 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) || in gc_data_segment()
1506 (!force_migrate && get_valid_blocks(sbi, segno, true) == in gc_data_segment()
1507 CAP_BLKS_PER_SEC(sbi))) in gc_data_segment()
1510 if (check_valid_map(sbi, segno, off) == 0) in gc_data_segment()
1514 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, in gc_data_segment()
1520 f2fs_ra_node_page(sbi, nid); in gc_data_segment()
1525 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs)) in gc_data_segment()
1529 f2fs_ra_node_page(sbi, dni.ino); in gc_data_segment()
1552 sbi->skipped_gc_rwsem++; in gc_data_segment()
1593 sbi->skipped_gc_rwsem++; in gc_data_segment()
1598 sbi->skipped_gc_rwsem++; in gc_data_segment()
1626 stat_inc_data_blk_count(sbi, 1, gc_type); in gc_data_segment()
1636 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, in __get_victim() argument
1639 struct sit_info *sit_i = SIT_I(sbi); in __get_victim()
1643 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, in __get_victim()
1649 static int do_garbage_collect(struct f2fs_sb_info *sbi, in do_garbage_collect() argument
1658 unsigned int end_segno = start_segno + sbi->segs_per_sec; in do_garbage_collect()
1660 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? in do_garbage_collect()
1664 if (__is_large_section(sbi)) in do_garbage_collect()
1665 end_segno = rounddown(end_segno, sbi->segs_per_sec); in do_garbage_collect()
1672 if (f2fs_sb_has_blkzoned(sbi)) in do_garbage_collect()
1673 end_segno -= sbi->segs_per_sec - in do_garbage_collect()
1674 f2fs_usable_segs_in_sec(sbi, segno); in do_garbage_collect()
1676 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type); in do_garbage_collect()
1679 if (__is_large_section(sbi)) in do_garbage_collect()
1680 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), in do_garbage_collect()
1685 sum_page = f2fs_get_sum_page(sbi, segno++); in do_garbage_collect()
1691 sum_page = find_get_page(META_MAPPING(sbi), in do_garbage_collect()
1692 GET_SUM_BLOCK(sbi, segno)); in do_garbage_collect()
1706 sum_page = find_get_page(META_MAPPING(sbi), in do_garbage_collect()
1707 GET_SUM_BLOCK(sbi, segno)); in do_garbage_collect()
1710 if (get_valid_blocks(sbi, segno, false) == 0) in do_garbage_collect()
1712 if (gc_type == BG_GC && __is_large_section(sbi) && in do_garbage_collect()
1713 migrated >= sbi->migration_granularity) in do_garbage_collect()
1715 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi))) in do_garbage_collect()
1720 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT", in do_garbage_collect()
1722 set_sbi_flag(sbi, SBI_NEED_FSCK); in do_garbage_collect()
1723 f2fs_stop_checkpoint(sbi, false, in do_garbage_collect()
1736 submitted += gc_node_segment(sbi, sum->entries, segno, in do_garbage_collect()
1739 submitted += gc_data_segment(sbi, sum->entries, gc_list, in do_garbage_collect()
1743 stat_inc_seg_count(sbi, type, gc_type); in do_garbage_collect()
1744 sbi->gc_reclaimed_segs[sbi->gc_mode]++; in do_garbage_collect()
1749 get_valid_blocks(sbi, segno, false) == 0) in do_garbage_collect()
1752 if (__is_large_section(sbi)) in do_garbage_collect()
1753 sbi->next_victim_seg[gc_type] = in do_garbage_collect()
1760 f2fs_submit_merged_write(sbi, in do_garbage_collect()
1765 stat_inc_call_count(sbi->stat_info); in do_garbage_collect()
1770 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control) in f2fs_gc() argument
1783 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc, in f2fs_gc()
1785 get_pages(sbi, F2FS_DIRTY_NODES), in f2fs_gc()
1786 get_pages(sbi, F2FS_DIRTY_DENTS), in f2fs_gc()
1787 get_pages(sbi, F2FS_DIRTY_IMETA), in f2fs_gc()
1788 free_sections(sbi), in f2fs_gc()
1789 free_segments(sbi), in f2fs_gc()
1790 reserved_segments(sbi), in f2fs_gc()
1791 prefree_segments(sbi)); in f2fs_gc()
1793 cpc.reason = __get_cp_reason(sbi); in f2fs_gc()
1794 sbi->skipped_gc_rwsem = 0; in f2fs_gc()
1796 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) { in f2fs_gc()
1800 if (unlikely(f2fs_cp_error(sbi))) { in f2fs_gc()
1805 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) { in f2fs_gc()
1811 if (prefree_segments(sbi)) { in f2fs_gc()
1812 ret = f2fs_write_checkpoint(sbi, &cpc); in f2fs_gc()
1816 if (has_not_enough_free_secs(sbi, 0, 0)) in f2fs_gc()
1826 ret = __get_victim(sbi, &segno, gc_type); in f2fs_gc()
1830 f2fs_pinned_section_exists(DIRTY_I(sbi))) { in f2fs_gc()
1831 f2fs_unpin_all_sections(sbi, false); in f2fs_gc()
1837 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, in f2fs_gc()
1841 if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) in f2fs_gc()
1845 sbi->cur_victim_sec = NULL_SEGNO; in f2fs_gc()
1848 !has_not_enough_free_secs(sbi, in f2fs_gc()
1857 if (sbi->skipped_gc_rwsem) in f2fs_gc()
1862 ret = f2fs_write_checkpoint(sbi, &cpc); in f2fs_gc()
1868 if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE && in f2fs_gc()
1869 prefree_segments(sbi)) { in f2fs_gc()
1870 ret = f2fs_write_checkpoint(sbi, &cpc); in f2fs_gc()
1879 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0; in f2fs_gc()
1880 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno; in f2fs_gc()
1883 f2fs_unpin_all_sections(sbi, true); in f2fs_gc()
1885 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed, in f2fs_gc()
1886 get_pages(sbi, F2FS_DIRTY_NODES), in f2fs_gc()
1887 get_pages(sbi, F2FS_DIRTY_DENTS), in f2fs_gc()
1888 get_pages(sbi, F2FS_DIRTY_IMETA), in f2fs_gc()
1889 free_sections(sbi), in f2fs_gc()
1890 free_segments(sbi), in f2fs_gc()
1891 reserved_segments(sbi), in f2fs_gc()
1892 prefree_segments(sbi)); in f2fs_gc()
1894 f2fs_up_write(&sbi->gc_lock); in f2fs_gc()
1915 static void init_atgc_management(struct f2fs_sb_info *sbi) in init_atgc_management() argument
1917 struct atgc_management *am = &sbi->am; in init_atgc_management()
1919 if (test_opt(sbi, ATGC) && in init_atgc_management()
1920 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD) in init_atgc_management()
1933 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) in f2fs_build_gc_manager() argument
1935 DIRTY_I(sbi)->v_ops = &default_v_ops; in f2fs_build_gc_manager()
1937 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; in f2fs_build_gc_manager()
1940 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi)) in f2fs_build_gc_manager()
1941 SIT_I(sbi)->last_victim[ALLOC_NEXT] = in f2fs_build_gc_manager()
1942 GET_SEGNO(sbi, FDEV(0).end_blk) + 1; in f2fs_build_gc_manager()
1944 init_atgc_management(sbi); in f2fs_build_gc_manager()
1947 static int free_segment_range(struct f2fs_sb_info *sbi, in free_segment_range() argument
1957 MAIN_SECS(sbi) -= secs; in free_segment_range()
1958 start = MAIN_SECS(sbi) * sbi->segs_per_sec; in free_segment_range()
1959 end = MAIN_SEGS(sbi) - 1; in free_segment_range()
1961 mutex_lock(&DIRTY_I(sbi)->seglist_lock); in free_segment_range()
1963 if (SIT_I(sbi)->last_victim[gc_mode] >= start) in free_segment_range()
1964 SIT_I(sbi)->last_victim[gc_mode] = 0; in free_segment_range()
1967 if (sbi->next_victim_seg[gc_type] >= start) in free_segment_range()
1968 sbi->next_victim_seg[gc_type] = NULL_SEGNO; in free_segment_range()
1969 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); in free_segment_range()
1973 f2fs_allocate_segment_for_resize(sbi, type, start, end); in free_segment_range()
1976 for (segno = start; segno <= end; segno += sbi->segs_per_sec) { in free_segment_range()
1982 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true); in free_segment_range()
1985 if (!gc_only && get_valid_blocks(sbi, segno, true)) { in free_segment_range()
1997 err = f2fs_write_checkpoint(sbi, &cpc); in free_segment_range()
2001 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start); in free_segment_range()
2003 f2fs_err(sbi, "segno %u should be free but still inuse!", in free_segment_range()
2005 f2fs_bug_on(sbi, 1); in free_segment_range()
2008 MAIN_SECS(sbi) += secs; in free_segment_range()
2012 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) in update_sb_metadata() argument
2014 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi); in update_sb_metadata()
2019 int segs = secs * sbi->segs_per_sec; in update_sb_metadata()
2021 f2fs_down_write(&sbi->sb_lock); in update_sb_metadata()
2032 (long long)segs * sbi->blocks_per_seg); in update_sb_metadata()
2033 if (f2fs_is_multi_device(sbi)) { in update_sb_metadata()
2034 int last_dev = sbi->s_ndevs - 1; in update_sb_metadata()
2042 f2fs_up_write(&sbi->sb_lock); in update_sb_metadata()
2045 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) in update_fs_metadata() argument
2047 int segs = secs * sbi->segs_per_sec; in update_fs_metadata()
2048 long long blks = (long long)segs * sbi->blocks_per_seg; in update_fs_metadata()
2050 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count); in update_fs_metadata()
2052 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs; in update_fs_metadata()
2053 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs; in update_fs_metadata()
2054 MAIN_SECS(sbi) += secs; in update_fs_metadata()
2055 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs; in update_fs_metadata()
2056 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs; in update_fs_metadata()
2057 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks); in update_fs_metadata()
2059 if (f2fs_is_multi_device(sbi)) { in update_fs_metadata()
2060 int last_dev = sbi->s_ndevs - 1; in update_fs_metadata()
2068 (int)(blks >> sbi->log_blocks_per_blkz); in update_fs_metadata()
2073 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) in f2fs_resize_fs() argument
2081 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count); in f2fs_resize_fs()
2085 if (f2fs_is_multi_device(sbi)) { in f2fs_resize_fs()
2086 int last_dev = sbi->s_ndevs - 1; in f2fs_resize_fs()
2089 if (block_count + last_segs * sbi->blocks_per_seg <= in f2fs_resize_fs()
2095 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem); in f2fs_resize_fs()
2102 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { in f2fs_resize_fs()
2103 f2fs_err(sbi, "Should run fsck to repair first."); in f2fs_resize_fs()
2107 if (test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_resize_fs()
2108 f2fs_err(sbi, "Checkpoint should be enabled."); in f2fs_resize_fs()
2113 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi)); in f2fs_resize_fs()
2116 if (!f2fs_down_write_trylock(&sbi->gc_lock)) in f2fs_resize_fs()
2120 f2fs_lock_op(sbi); in f2fs_resize_fs()
2122 spin_lock(&sbi->stat_lock); in f2fs_resize_fs()
2123 if (shrunk_blocks + valid_user_blocks(sbi) + in f2fs_resize_fs()
2124 sbi->current_reserved_blocks + sbi->unusable_block_count + in f2fs_resize_fs()
2125 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) in f2fs_resize_fs()
2127 spin_unlock(&sbi->stat_lock); in f2fs_resize_fs()
2132 err = free_segment_range(sbi, secs, true); in f2fs_resize_fs()
2135 f2fs_unlock_op(sbi); in f2fs_resize_fs()
2136 f2fs_up_write(&sbi->gc_lock); in f2fs_resize_fs()
2140 freeze_super(sbi->sb); in f2fs_resize_fs()
2141 f2fs_down_write(&sbi->gc_lock); in f2fs_resize_fs()
2142 f2fs_down_write(&sbi->cp_global_sem); in f2fs_resize_fs()
2144 spin_lock(&sbi->stat_lock); in f2fs_resize_fs()
2145 if (shrunk_blocks + valid_user_blocks(sbi) + in f2fs_resize_fs()
2146 sbi->current_reserved_blocks + sbi->unusable_block_count + in f2fs_resize_fs()
2147 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) in f2fs_resize_fs()
2150 sbi->user_block_count -= shrunk_blocks; in f2fs_resize_fs()
2151 spin_unlock(&sbi->stat_lock); in f2fs_resize_fs()
2155 set_sbi_flag(sbi, SBI_IS_RESIZEFS); in f2fs_resize_fs()
2156 err = free_segment_range(sbi, secs, false); in f2fs_resize_fs()
2160 update_sb_metadata(sbi, -secs); in f2fs_resize_fs()
2162 err = f2fs_commit_super(sbi, false); in f2fs_resize_fs()
2164 update_sb_metadata(sbi, secs); in f2fs_resize_fs()
2168 update_fs_metadata(sbi, -secs); in f2fs_resize_fs()
2169 clear_sbi_flag(sbi, SBI_IS_RESIZEFS); in f2fs_resize_fs()
2170 set_sbi_flag(sbi, SBI_IS_DIRTY); in f2fs_resize_fs()
2172 err = f2fs_write_checkpoint(sbi, &cpc); in f2fs_resize_fs()
2174 update_fs_metadata(sbi, secs); in f2fs_resize_fs()
2175 update_sb_metadata(sbi, secs); in f2fs_resize_fs()
2176 f2fs_commit_super(sbi, false); in f2fs_resize_fs()
2179 clear_sbi_flag(sbi, SBI_IS_RESIZEFS); in f2fs_resize_fs()
2181 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_resize_fs()
2182 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!"); in f2fs_resize_fs()
2184 spin_lock(&sbi->stat_lock); in f2fs_resize_fs()
2185 sbi->user_block_count += shrunk_blocks; in f2fs_resize_fs()
2186 spin_unlock(&sbi->stat_lock); in f2fs_resize_fs()
2189 f2fs_up_write(&sbi->cp_global_sem); in f2fs_resize_fs()
2190 f2fs_up_write(&sbi->gc_lock); in f2fs_resize_fs()
2191 thaw_super(sbi->sb); in f2fs_resize_fs()