Lines Matching refs:sbi

31 	struct f2fs_sb_info *sbi = data;  in gc_thread_func()  local
32 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in gc_thread_func()
33 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; in gc_thread_func()
34 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq; in gc_thread_func()
49 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq)) in gc_thread_func()
57 stat_other_skip_bggc_count(sbi); in gc_thread_func()
63 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { in gc_thread_func()
65 stat_other_skip_bggc_count(sbi); in gc_thread_func()
69 if (time_to_inject(sbi, FAULT_CHECKPOINT)) { in gc_thread_func()
70 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT); in gc_thread_func()
71 f2fs_stop_checkpoint(sbi, false, in gc_thread_func()
75 if (!sb_start_write_trylock(sbi->sb)) { in gc_thread_func()
76 stat_other_skip_bggc_count(sbi); in gc_thread_func()
93 if (sbi->gc_mode == GC_URGENT_HIGH) { in gc_thread_func()
95 f2fs_down_write(&sbi->gc_lock); in gc_thread_func()
100 f2fs_down_write(&sbi->gc_lock); in gc_thread_func()
102 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) { in gc_thread_func()
103 stat_other_skip_bggc_count(sbi); in gc_thread_func()
107 if (!is_idle(sbi, GC_TIME)) { in gc_thread_func()
109 f2fs_up_write(&sbi->gc_lock); in gc_thread_func()
110 stat_io_skip_bggc_count(sbi); in gc_thread_func()
114 if (has_enough_invalid_blocks(sbi)) in gc_thread_func()
120 stat_inc_bggc_count(sbi->stat_info); in gc_thread_func()
122 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC; in gc_thread_func()
129 if (f2fs_gc(sbi, sync_mode, !foreground, false, NULL_SEGNO)) in gc_thread_func()
135 trace_f2fs_background_gc(sbi->sb, wait_ms, in gc_thread_func()
136 prefree_segments(sbi), free_segments(sbi)); in gc_thread_func()
139 f2fs_balance_fs_bg(sbi, true); in gc_thread_func()
141 sb_end_write(sbi->sb); in gc_thread_func()
147 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi) in f2fs_start_gc_thread() argument
150 dev_t dev = sbi->sb->s_bdev->bd_dev; in f2fs_start_gc_thread()
153 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL); in f2fs_start_gc_thread()
166 sbi->gc_thread = gc_th; in f2fs_start_gc_thread()
167 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); in f2fs_start_gc_thread()
168 init_waitqueue_head(&sbi->gc_thread->fggc_wq); in f2fs_start_gc_thread()
169 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, in f2fs_start_gc_thread()
174 sbi->gc_thread = NULL; in f2fs_start_gc_thread()
180 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi) in f2fs_stop_gc_thread() argument
182 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in f2fs_stop_gc_thread()
189 sbi->gc_thread = NULL; in f2fs_stop_gc_thread()
192 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type) in select_gc_type() argument
197 if (sbi->am.atgc_enabled) in select_gc_type()
205 switch (sbi->gc_mode) { in select_gc_type()
221 static void select_policy(struct f2fs_sb_info *sbi, int gc_type, in select_policy() argument
224 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in select_policy()
237 p->gc_mode = select_gc_type(sbi, gc_type); in select_policy()
238 p->ofs_unit = sbi->segs_per_sec; in select_policy()
239 if (__is_large_section(sbi)) { in select_policy()
242 0, MAIN_SECS(sbi)); in select_policy()
254 (sbi->gc_mode != GC_URGENT_HIGH) && in select_policy()
256 p->max_search > sbi->max_victim_search) in select_policy()
257 p->max_search = sbi->max_victim_search; in select_policy()
260 if (test_opt(sbi, NOHEAP) && in select_policy()
264 p->offset = SIT_I(sbi)->last_victim[p->gc_mode]; in select_policy()
267 static unsigned int get_max_cost(struct f2fs_sb_info *sbi, in get_max_cost() argument
272 return sbi->blocks_per_seg; in get_max_cost()
278 return 2 * sbi->blocks_per_seg * p->ofs_unit; in get_max_cost()
287 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) in check_bg_victims() argument
289 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in check_bg_victims()
297 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { in check_bg_victims()
298 if (sec_usage_check(sbi, secno)) in check_bg_victims()
301 return GET_SEG_FROM_SEC(sbi, secno); in check_bg_victims()
306 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) in get_cb_cost() argument
308 struct sit_info *sit_i = SIT_I(sbi); in get_cb_cost()
309 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); in get_cb_cost()
310 unsigned int start = GET_SEG_FROM_SEC(sbi, secno); in get_cb_cost()
316 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno); in get_cb_cost()
319 mtime += get_seg_entry(sbi, start + i)->mtime; in get_cb_cost()
320 vblocks = get_valid_blocks(sbi, segno, true); in get_cb_cost()
325 u = (vblocks * 100) >> sbi->log_blocks_per_seg; in get_cb_cost()
339 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, in get_gc_cost() argument
343 return get_seg_entry(sbi, segno)->ckpt_valid_blocks; in get_gc_cost()
347 return get_valid_blocks(sbi, segno, true); in get_gc_cost()
349 return get_cb_cost(sbi, segno); in get_gc_cost()
351 f2fs_bug_on(sbi, 1); in get_gc_cost()
367 static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi, in attach_victim_entry() argument
372 struct atgc_management *am = &sbi->am; in attach_victim_entry()
390 static void insert_victim_entry(struct f2fs_sb_info *sbi, in insert_victim_entry() argument
393 struct atgc_management *am = &sbi->am; in insert_victim_entry()
398 p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most); in insert_victim_entry()
399 attach_victim_entry(sbi, mtime, segno, parent, p, left_most); in insert_victim_entry()
402 static void add_victim_entry(struct f2fs_sb_info *sbi, in add_victim_entry() argument
405 struct sit_info *sit_i = SIT_I(sbi); in add_victim_entry()
406 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); in add_victim_entry()
407 unsigned int start = GET_SEG_FROM_SEC(sbi, secno); in add_victim_entry()
411 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { in add_victim_entry()
413 get_valid_blocks(sbi, segno, true) == 0) in add_victim_entry()
417 for (i = 0; i < sbi->segs_per_sec; i++) in add_victim_entry()
418 mtime += get_seg_entry(sbi, start + i)->mtime; in add_victim_entry()
419 mtime = div_u64(mtime, sbi->segs_per_sec); in add_victim_entry()
435 insert_victim_entry(sbi, mtime, segno); in add_victim_entry()
438 static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi, in lookup_central_victim() argument
441 struct atgc_management *am = &sbi->am; in lookup_central_victim()
445 f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most); in lookup_central_victim()
450 static void atgc_lookup_victim(struct f2fs_sb_info *sbi, in atgc_lookup_victim() argument
453 struct sit_info *sit_i = SIT_I(sbi); in atgc_lookup_victim()
454 struct atgc_management *am = &sbi->am; in atgc_lookup_victim()
463 unsigned int sec_blocks = BLKS_PER_SEC(sbi); in atgc_lookup_victim()
497 vblocks = get_valid_blocks(sbi, ve->segno, true); in atgc_lookup_victim()
498 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks); in atgc_lookup_victim()
504 f2fs_bug_on(sbi, age + u >= UINT_MAX); in atgc_lookup_victim()
526 static void atssr_lookup_victim(struct f2fs_sb_info *sbi, in atssr_lookup_victim() argument
529 struct sit_info *sit_i = SIT_I(sbi); in atssr_lookup_victim()
530 struct atgc_management *am = &sbi->am; in atssr_lookup_victim()
537 unsigned int seg_blocks = sbi->blocks_per_seg; in atssr_lookup_victim()
550 node = lookup_central_victim(sbi, p); in atssr_lookup_victim()
566 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks; in atssr_lookup_victim()
567 f2fs_bug_on(sbi, !vblocks); in atssr_lookup_victim()
599 static void lookup_victim_by_age(struct f2fs_sb_info *sbi, in lookup_victim_by_age() argument
602 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, in lookup_victim_by_age()
603 &sbi->am.root, true)); in lookup_victim_by_age()
606 atgc_lookup_victim(sbi, p); in lookup_victim_by_age()
608 atssr_lookup_victim(sbi, p); in lookup_victim_by_age()
610 f2fs_bug_on(sbi, 1); in lookup_victim_by_age()
613 static void release_victim_entry(struct f2fs_sb_info *sbi) in release_victim_entry() argument
615 struct atgc_management *am = &sbi->am; in release_victim_entry()
626 f2fs_bug_on(sbi, am->victim_count); in release_victim_entry()
627 f2fs_bug_on(sbi, !list_empty(&am->victim_list)); in release_victim_entry()
638 static int get_victim_by_default(struct f2fs_sb_info *sbi, in get_victim_by_default() argument
642 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in get_victim_by_default()
643 struct sit_info *sm = SIT_I(sbi); in get_victim_by_default()
652 last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec; in get_victim_by_default()
656 p.age_threshold = sbi->am.age_threshold; in get_victim_by_default()
659 select_policy(sbi, gc_type, type, &p); in get_victim_by_default()
662 p.min_cost = get_max_cost(sbi, &p); in get_victim_by_default()
668 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX; in get_victim_by_default()
671 if (!get_valid_blocks(sbi, *result, false)) { in get_victim_by_default()
676 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) in get_victim_by_default()
687 if (__is_large_section(sbi) && p.alloc_mode == LFS) { in get_victim_by_default()
688 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) { in get_victim_by_default()
689 p.min_segno = sbi->next_victim_seg[BG_GC]; in get_victim_by_default()
691 sbi->next_victim_seg[BG_GC] = NULL_SEGNO; in get_victim_by_default()
695 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) { in get_victim_by_default()
696 p.min_segno = sbi->next_victim_seg[FG_GC]; in get_victim_by_default()
698 sbi->next_victim_seg[FG_GC] = NULL_SEGNO; in get_victim_by_default()
705 p.min_segno = check_bg_victims(sbi); in get_victim_by_default()
743 secno = GET_SEC_FROM_SEG(sbi, segno); in get_victim_by_default()
745 if (sec_usage_check(sbi, secno)) in get_victim_by_default()
749 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { in get_victim_by_default()
755 if (get_ckpt_valid_blocks(sbi, segno, true)) in get_victim_by_default()
763 if (!f2fs_segment_has_free_slot(sbi, segno)) in get_victim_by_default()
772 add_victim_entry(sbi, &p, segno); in get_victim_by_default()
776 cost = get_gc_cost(sbi, segno, &p); in get_victim_by_default()
790 (MAIN_SECS(sbi) * sbi->segs_per_sec); in get_victim_by_default()
797 lookup_victim_by_age(sbi, &p); in get_victim_by_default()
798 release_victim_entry(sbi); in get_victim_by_default()
812 secno = GET_SEC_FROM_SEG(sbi, p.min_segno); in get_victim_by_default()
814 sbi->cur_victim_sec = secno; in get_victim_by_default()
823 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, in get_victim_by_default()
824 sbi->cur_victim_sec, in get_victim_by_default()
825 prefree_segments(sbi), free_segments(sbi)); in get_victim_by_default()
872 static int check_valid_map(struct f2fs_sb_info *sbi, in check_valid_map() argument
875 struct sit_info *sit_i = SIT_I(sbi); in check_valid_map()
880 sentry = get_seg_entry(sbi, segno); in check_valid_map()
891 static int gc_node_segment(struct f2fs_sb_info *sbi, in gc_node_segment() argument
900 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); in gc_node_segment()
902 start_addr = START_BLOCK(sbi, segno); in gc_node_segment()
908 atomic_inc(&sbi->wb_sync_req[NODE]); in gc_node_segment()
917 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) in gc_node_segment()
920 if (check_valid_map(sbi, segno, off) == 0) in gc_node_segment()
924 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, in gc_node_segment()
930 f2fs_ra_node_page(sbi, nid); in gc_node_segment()
935 node_page = f2fs_get_node_page(sbi, nid); in gc_node_segment()
940 if (check_valid_map(sbi, segno, off) == 0) { in gc_node_segment()
945 if (f2fs_get_node_info(sbi, nid, &ni, false)) { in gc_node_segment()
958 stat_inc_node_blk_count(sbi, 1, gc_type); in gc_node_segment()
965 atomic_dec(&sbi->wb_sync_req[NODE]); in gc_node_segment()
998 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, in is_alive() argument
1009 node_page = f2fs_get_node_page(sbi, nid); in is_alive()
1013 if (f2fs_get_node_info(sbi, nid, dni, false)) { in is_alive()
1019 f2fs_warn(sbi, "%s: valid data with mismatched node version.", in is_alive()
1021 set_sbi_flag(sbi, SBI_NEED_FSCK); in is_alive()
1024 if (f2fs_check_nid_range(sbi, dni->ino)) { in is_alive()
1032 f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%u, nid:%u, max:%u", in is_alive()
1043 unsigned int segno = GET_SEGNO(sbi, blkaddr); in is_alive()
1044 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); in is_alive()
1046 if (unlikely(check_valid_map(sbi, segno, offset))) { in is_alive()
1047 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) { in is_alive()
1048 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u", in is_alive()
1050 f2fs_bug_on(sbi, 1); in is_alive()
1061 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in ra_data_block() local
1067 .sbi = sbi, in ra_data_block()
1085 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, in ra_data_block()
1103 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, in ra_data_block()
1121 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi), in ra_data_block()
1135 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); in ra_data_block()
1136 f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE); in ra_data_block()
1154 .sbi = F2FS_I_SB(inode), in move_data_block()
1170 bool lfs_mode = f2fs_lfs_mode(fio.sbi); in move_data_block()
1171 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) && in move_data_block()
1172 (fio.sbi->gc_mode != GC_URGENT_HIGH) ? in move_data_block()
1218 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false); in move_data_block()
1227 f2fs_down_write(&fio.sbi->io_order_lock); in move_data_block()
1229 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), in move_data_block()
1246 f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); in move_data_block()
1247 f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE); in move_data_block()
1250 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || in move_data_block()
1261 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, in move_data_block()
1264 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), in move_data_block()
1277 invalidate_mapping_pages(META_MAPPING(fio.sbi), in move_data_block()
1279 f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr); in move_data_block()
1283 dec_page_count(fio.sbi, F2FS_DIRTY_META); in move_data_block()
1299 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE); in move_data_block()
1309 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr, in move_data_block()
1313 f2fs_up_write(&fio.sbi->io_order_lock); in move_data_block()
1358 .sbi = F2FS_I_SB(inode), in move_data_page()
1407 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, in gc_data_segment() argument
1411 struct super_block *sb = sbi->sb; in gc_data_segment()
1417 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); in gc_data_segment()
1419 start_addr = START_BLOCK(sbi, segno); in gc_data_segment()
1437 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) || in gc_data_segment()
1438 (!force_migrate && get_valid_blocks(sbi, segno, true) == in gc_data_segment()
1439 BLKS_PER_SEC(sbi))) in gc_data_segment()
1442 if (check_valid_map(sbi, segno, off) == 0) in gc_data_segment()
1446 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, in gc_data_segment()
1452 f2fs_ra_node_page(sbi, nid); in gc_data_segment()
1457 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs)) in gc_data_segment()
1461 f2fs_ra_node_page(sbi, dni.ino); in gc_data_segment()
1475 sbi->skipped_gc_rwsem++; in gc_data_segment()
1516 sbi->skipped_gc_rwsem++; in gc_data_segment()
1521 sbi->skipped_gc_rwsem++; in gc_data_segment()
1549 stat_inc_data_blk_count(sbi, 1, gc_type); in gc_data_segment()
1559 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, in __get_victim() argument
1562 struct sit_info *sit_i = SIT_I(sbi); in __get_victim()
1566 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, in __get_victim()
1572 static int do_garbage_collect(struct f2fs_sb_info *sbi, in do_garbage_collect() argument
1581 unsigned int end_segno = start_segno + sbi->segs_per_sec; in do_garbage_collect()
1583 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? in do_garbage_collect()
1587 if (__is_large_section(sbi)) in do_garbage_collect()
1588 end_segno = rounddown(end_segno, sbi->segs_per_sec); in do_garbage_collect()
1595 if (f2fs_sb_has_blkzoned(sbi)) in do_garbage_collect()
1596 end_segno -= sbi->segs_per_sec - in do_garbage_collect()
1597 f2fs_usable_segs_in_sec(sbi, segno); in do_garbage_collect()
1599 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type); in do_garbage_collect()
1602 if (__is_large_section(sbi)) in do_garbage_collect()
1603 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), in do_garbage_collect()
1608 sum_page = f2fs_get_sum_page(sbi, segno++); in do_garbage_collect()
1614 sum_page = find_get_page(META_MAPPING(sbi), in do_garbage_collect()
1615 GET_SUM_BLOCK(sbi, segno)); in do_garbage_collect()
1629 sum_page = find_get_page(META_MAPPING(sbi), in do_garbage_collect()
1630 GET_SUM_BLOCK(sbi, segno)); in do_garbage_collect()
1633 if (get_valid_blocks(sbi, segno, false) == 0) in do_garbage_collect()
1635 if (gc_type == BG_GC && __is_large_section(sbi) && in do_garbage_collect()
1636 migrated >= sbi->migration_granularity) in do_garbage_collect()
1638 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi))) in do_garbage_collect()
1643 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT", in do_garbage_collect()
1645 set_sbi_flag(sbi, SBI_NEED_FSCK); in do_garbage_collect()
1646 f2fs_stop_checkpoint(sbi, false, in do_garbage_collect()
1659 submitted += gc_node_segment(sbi, sum->entries, segno, in do_garbage_collect()
1662 submitted += gc_data_segment(sbi, sum->entries, gc_list, in do_garbage_collect()
1666 stat_inc_seg_count(sbi, type, gc_type); in do_garbage_collect()
1667 sbi->gc_reclaimed_segs[sbi->gc_mode]++; in do_garbage_collect()
1672 get_valid_blocks(sbi, segno, false) == 0) in do_garbage_collect()
1675 if (__is_large_section(sbi) && segno + 1 < end_segno) in do_garbage_collect()
1676 sbi->next_victim_seg[gc_type] = segno + 1; in do_garbage_collect()
1682 f2fs_submit_merged_write(sbi, in do_garbage_collect()
1687 stat_inc_call_count(sbi->stat_info); in do_garbage_collect()
1692 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, in f2fs_gc() argument
1704 unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC]; in f2fs_gc()
1708 trace_f2fs_gc_begin(sbi->sb, sync, background, in f2fs_gc()
1709 get_pages(sbi, F2FS_DIRTY_NODES), in f2fs_gc()
1710 get_pages(sbi, F2FS_DIRTY_DENTS), in f2fs_gc()
1711 get_pages(sbi, F2FS_DIRTY_IMETA), in f2fs_gc()
1712 free_sections(sbi), in f2fs_gc()
1713 free_segments(sbi), in f2fs_gc()
1714 reserved_segments(sbi), in f2fs_gc()
1715 prefree_segments(sbi)); in f2fs_gc()
1717 cpc.reason = __get_cp_reason(sbi); in f2fs_gc()
1718 sbi->skipped_gc_rwsem = 0; in f2fs_gc()
1721 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) { in f2fs_gc()
1725 if (unlikely(f2fs_cp_error(sbi))) { in f2fs_gc()
1730 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) { in f2fs_gc()
1736 if (prefree_segments(sbi) && in f2fs_gc()
1737 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { in f2fs_gc()
1738 ret = f2fs_write_checkpoint(sbi, &cpc); in f2fs_gc()
1742 if (has_not_enough_free_secs(sbi, 0, 0)) in f2fs_gc()
1751 ret = __get_victim(sbi, &segno, gc_type); in f2fs_gc()
1755 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force); in f2fs_gc()
1757 seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) in f2fs_gc()
1762 if (sbi->skipped_atomic_files[FG_GC] > last_skipped || in f2fs_gc()
1763 sbi->skipped_gc_rwsem) in f2fs_gc()
1765 last_skipped = sbi->skipped_atomic_files[FG_GC]; in f2fs_gc()
1770 sbi->cur_victim_sec = NULL_SEGNO; in f2fs_gc()
1775 if (!has_not_enough_free_secs(sbi, sec_freed, 0)) in f2fs_gc()
1781 if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE && in f2fs_gc()
1782 prefree_segments(sbi) && in f2fs_gc()
1783 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { in f2fs_gc()
1784 ret = f2fs_write_checkpoint(sbi, &cpc); in f2fs_gc()
1793 sbi->skipped_gc_rwsem) { in f2fs_gc()
1794 f2fs_drop_inmem_pages_all(sbi, true); in f2fs_gc()
1798 if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) in f2fs_gc()
1799 ret = f2fs_write_checkpoint(sbi, &cpc); in f2fs_gc()
1801 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0; in f2fs_gc()
1802 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno; in f2fs_gc()
1804 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed, in f2fs_gc()
1805 get_pages(sbi, F2FS_DIRTY_NODES), in f2fs_gc()
1806 get_pages(sbi, F2FS_DIRTY_DENTS), in f2fs_gc()
1807 get_pages(sbi, F2FS_DIRTY_IMETA), in f2fs_gc()
1808 free_sections(sbi), in f2fs_gc()
1809 free_segments(sbi), in f2fs_gc()
1810 reserved_segments(sbi), in f2fs_gc()
1811 prefree_segments(sbi)); in f2fs_gc()
1813 f2fs_up_write(&sbi->gc_lock); in f2fs_gc()
1836 static void init_atgc_management(struct f2fs_sb_info *sbi) in init_atgc_management() argument
1838 struct atgc_management *am = &sbi->am; in init_atgc_management()
1840 if (test_opt(sbi, ATGC) && in init_atgc_management()
1841 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD) in init_atgc_management()
1854 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) in f2fs_build_gc_manager() argument
1856 DIRTY_I(sbi)->v_ops = &default_v_ops; in f2fs_build_gc_manager()
1858 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; in f2fs_build_gc_manager()
1861 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi)) in f2fs_build_gc_manager()
1862 SIT_I(sbi)->last_victim[ALLOC_NEXT] = in f2fs_build_gc_manager()
1863 GET_SEGNO(sbi, FDEV(0).end_blk) + 1; in f2fs_build_gc_manager()
1865 init_atgc_management(sbi); in f2fs_build_gc_manager()
1868 static int free_segment_range(struct f2fs_sb_info *sbi, in free_segment_range() argument
1878 MAIN_SECS(sbi) -= secs; in free_segment_range()
1879 start = MAIN_SECS(sbi) * sbi->segs_per_sec; in free_segment_range()
1880 end = MAIN_SEGS(sbi) - 1; in free_segment_range()
1882 mutex_lock(&DIRTY_I(sbi)->seglist_lock); in free_segment_range()
1884 if (SIT_I(sbi)->last_victim[gc_mode] >= start) in free_segment_range()
1885 SIT_I(sbi)->last_victim[gc_mode] = 0; in free_segment_range()
1888 if (sbi->next_victim_seg[gc_type] >= start) in free_segment_range()
1889 sbi->next_victim_seg[gc_type] = NULL_SEGNO; in free_segment_range()
1890 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); in free_segment_range()
1894 f2fs_allocate_segment_for_resize(sbi, type, start, end); in free_segment_range()
1897 for (segno = start; segno <= end; segno += sbi->segs_per_sec) { in free_segment_range()
1903 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true); in free_segment_range()
1906 if (!gc_only && get_valid_blocks(sbi, segno, true)) { in free_segment_range()
1918 err = f2fs_write_checkpoint(sbi, &cpc); in free_segment_range()
1922 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start); in free_segment_range()
1924 f2fs_err(sbi, "segno %u should be free but still inuse!", in free_segment_range()
1926 f2fs_bug_on(sbi, 1); in free_segment_range()
1929 MAIN_SECS(sbi) += secs; in free_segment_range()
1933 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) in update_sb_metadata() argument
1935 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi); in update_sb_metadata()
1940 int segs = secs * sbi->segs_per_sec; in update_sb_metadata()
1942 f2fs_down_write(&sbi->sb_lock); in update_sb_metadata()
1953 (long long)segs * sbi->blocks_per_seg); in update_sb_metadata()
1954 if (f2fs_is_multi_device(sbi)) { in update_sb_metadata()
1955 int last_dev = sbi->s_ndevs - 1; in update_sb_metadata()
1963 f2fs_up_write(&sbi->sb_lock); in update_sb_metadata()
1966 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) in update_fs_metadata() argument
1968 int segs = secs * sbi->segs_per_sec; in update_fs_metadata()
1969 long long blks = (long long)segs * sbi->blocks_per_seg; in update_fs_metadata()
1971 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count); in update_fs_metadata()
1973 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs; in update_fs_metadata()
1974 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs; in update_fs_metadata()
1975 MAIN_SECS(sbi) += secs; in update_fs_metadata()
1976 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs; in update_fs_metadata()
1977 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs; in update_fs_metadata()
1978 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks); in update_fs_metadata()
1980 if (f2fs_is_multi_device(sbi)) { in update_fs_metadata()
1981 int last_dev = sbi->s_ndevs - 1; in update_fs_metadata()
1989 (int)(blks >> sbi->log_blocks_per_blkz); in update_fs_metadata()
1994 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) in f2fs_resize_fs() argument
2002 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count); in f2fs_resize_fs()
2006 if (f2fs_is_multi_device(sbi)) { in f2fs_resize_fs()
2007 int last_dev = sbi->s_ndevs - 1; in f2fs_resize_fs()
2010 if (block_count + last_segs * sbi->blocks_per_seg <= in f2fs_resize_fs()
2016 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem); in f2fs_resize_fs()
2023 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { in f2fs_resize_fs()
2024 f2fs_err(sbi, "Should run fsck to repair first."); in f2fs_resize_fs()
2028 if (test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_resize_fs()
2029 f2fs_err(sbi, "Checkpoint should be enabled."); in f2fs_resize_fs()
2034 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi)); in f2fs_resize_fs()
2037 if (!f2fs_down_write_trylock(&sbi->gc_lock)) in f2fs_resize_fs()
2041 f2fs_lock_op(sbi); in f2fs_resize_fs()
2043 spin_lock(&sbi->stat_lock); in f2fs_resize_fs()
2044 if (shrunk_blocks + valid_user_blocks(sbi) + in f2fs_resize_fs()
2045 sbi->current_reserved_blocks + sbi->unusable_block_count + in f2fs_resize_fs()
2046 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) in f2fs_resize_fs()
2048 spin_unlock(&sbi->stat_lock); in f2fs_resize_fs()
2053 err = free_segment_range(sbi, secs, true); in f2fs_resize_fs()
2056 f2fs_unlock_op(sbi); in f2fs_resize_fs()
2057 f2fs_up_write(&sbi->gc_lock); in f2fs_resize_fs()
2061 set_sbi_flag(sbi, SBI_IS_RESIZEFS); in f2fs_resize_fs()
2063 freeze_super(sbi->sb); in f2fs_resize_fs()
2064 f2fs_down_write(&sbi->gc_lock); in f2fs_resize_fs()
2065 f2fs_down_write(&sbi->cp_global_sem); in f2fs_resize_fs()
2067 spin_lock(&sbi->stat_lock); in f2fs_resize_fs()
2068 if (shrunk_blocks + valid_user_blocks(sbi) + in f2fs_resize_fs()
2069 sbi->current_reserved_blocks + sbi->unusable_block_count + in f2fs_resize_fs()
2070 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) in f2fs_resize_fs()
2073 sbi->user_block_count -= shrunk_blocks; in f2fs_resize_fs()
2074 spin_unlock(&sbi->stat_lock); in f2fs_resize_fs()
2078 err = free_segment_range(sbi, secs, false); in f2fs_resize_fs()
2082 update_sb_metadata(sbi, -secs); in f2fs_resize_fs()
2084 err = f2fs_commit_super(sbi, false); in f2fs_resize_fs()
2086 update_sb_metadata(sbi, secs); in f2fs_resize_fs()
2090 update_fs_metadata(sbi, -secs); in f2fs_resize_fs()
2091 clear_sbi_flag(sbi, SBI_IS_RESIZEFS); in f2fs_resize_fs()
2092 set_sbi_flag(sbi, SBI_IS_DIRTY); in f2fs_resize_fs()
2094 err = f2fs_write_checkpoint(sbi, &cpc); in f2fs_resize_fs()
2096 update_fs_metadata(sbi, secs); in f2fs_resize_fs()
2097 update_sb_metadata(sbi, secs); in f2fs_resize_fs()
2098 f2fs_commit_super(sbi, false); in f2fs_resize_fs()
2102 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_resize_fs()
2103 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!"); in f2fs_resize_fs()
2105 spin_lock(&sbi->stat_lock); in f2fs_resize_fs()
2106 sbi->user_block_count += shrunk_blocks; in f2fs_resize_fs()
2107 spin_unlock(&sbi->stat_lock); in f2fs_resize_fs()
2110 f2fs_up_write(&sbi->cp_global_sem); in f2fs_resize_fs()
2111 f2fs_up_write(&sbi->gc_lock); in f2fs_resize_fs()
2112 thaw_super(sbi->sb); in f2fs_resize_fs()
2113 clear_sbi_flag(sbi, SBI_IS_RESIZEFS); in f2fs_resize_fs()