1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * fs/f2fs/gc.h
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6*4882a593Smuzhiyun * http://www.samsung.com/
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #define GC_THREAD_MIN_WB_PAGES 1 /*
9*4882a593Smuzhiyun * a threshold to determine
10*4882a593Smuzhiyun * whether IO subsystem is idle
11*4882a593Smuzhiyun * or not
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun #define DEF_GC_THREAD_URGENT_SLEEP_TIME 500 /* 500 ms */
14*4882a593Smuzhiyun #define DEF_GC_THREAD_MIN_SLEEP_TIME 30000 /* milliseconds */
15*4882a593Smuzhiyun #define DEF_GC_THREAD_MAX_SLEEP_TIME 60000
16*4882a593Smuzhiyun #define DEF_GC_THREAD_NOGC_SLEEP_TIME 300000 /* wait 5 min */
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /* choose candidates from sections which has age of more than 7 days */
19*4882a593Smuzhiyun #define DEF_GC_THREAD_AGE_THRESHOLD (60 * 60 * 24 * 7)
20*4882a593Smuzhiyun #define DEF_GC_THREAD_CANDIDATE_RATIO 20 /* select 20% oldest sections as candidates */
21*4882a593Smuzhiyun #define DEF_GC_THREAD_MAX_CANDIDATE_COUNT 10 /* select at most 10 sections as candidates */
22*4882a593Smuzhiyun #define DEF_GC_THREAD_AGE_WEIGHT 60 /* age weight */
23*4882a593Smuzhiyun #define DEFAULT_ACCURACY_CLASS 10000 /* accuracy class */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
26*4882a593Smuzhiyun #define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define DEF_GC_FAILED_PINNED_FILES 2048
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* Search max. number of dirty segments to select a victim segment */
31*4882a593Smuzhiyun #define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun struct f2fs_gc_kthread {
34*4882a593Smuzhiyun struct task_struct *f2fs_gc_task;
35*4882a593Smuzhiyun wait_queue_head_t gc_wait_queue_head;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* for gc sleep time */
38*4882a593Smuzhiyun unsigned int urgent_sleep_time;
39*4882a593Smuzhiyun unsigned int min_sleep_time;
40*4882a593Smuzhiyun unsigned int max_sleep_time;
41*4882a593Smuzhiyun unsigned int no_gc_sleep_time;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /* for changing gc mode */
44*4882a593Smuzhiyun unsigned int gc_wake;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* for GC_MERGE mount option */
47*4882a593Smuzhiyun wait_queue_head_t fggc_wq; /*
48*4882a593Smuzhiyun * caller of f2fs_balance_fs()
49*4882a593Smuzhiyun * will wait on this wait queue.
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun struct gc_inode_list {
54*4882a593Smuzhiyun struct list_head ilist;
55*4882a593Smuzhiyun struct radix_tree_root iroot;
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun struct victim_info {
59*4882a593Smuzhiyun unsigned long long mtime; /* mtime of section */
60*4882a593Smuzhiyun unsigned int segno; /* section No. */
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun struct victim_entry {
64*4882a593Smuzhiyun struct rb_node rb_node; /* rb node located in rb-tree */
65*4882a593Smuzhiyun union {
66*4882a593Smuzhiyun struct {
67*4882a593Smuzhiyun unsigned long long mtime; /* mtime of section */
68*4882a593Smuzhiyun unsigned int segno; /* segment No. */
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun struct victim_info vi; /* victim info */
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun struct list_head list;
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun * inline functions
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /*
80*4882a593Smuzhiyun * On a Zoned device zone-capacity can be less than zone-size and if
81*4882a593Smuzhiyun * zone-capacity is not aligned to f2fs segment size(2MB), then the segment
82*4882a593Smuzhiyun * starting just before zone-capacity has some blocks spanning across the
83*4882a593Smuzhiyun * zone-capacity, these blocks are not usable.
84*4882a593Smuzhiyun * Such spanning segments can be in free list so calculate the sum of usable
85*4882a593Smuzhiyun * blocks in currently free segments including normal and spanning segments.
86*4882a593Smuzhiyun */
free_segs_blk_count_zoned(struct f2fs_sb_info * sbi)87*4882a593Smuzhiyun static inline block_t free_segs_blk_count_zoned(struct f2fs_sb_info *sbi)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun block_t free_seg_blks = 0;
90*4882a593Smuzhiyun struct free_segmap_info *free_i = FREE_I(sbi);
91*4882a593Smuzhiyun int j;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun spin_lock(&free_i->segmap_lock);
94*4882a593Smuzhiyun for (j = 0; j < MAIN_SEGS(sbi); j++)
95*4882a593Smuzhiyun if (!test_bit(j, free_i->free_segmap))
96*4882a593Smuzhiyun free_seg_blks += f2fs_usable_blks_in_seg(sbi, j);
97*4882a593Smuzhiyun spin_unlock(&free_i->segmap_lock);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun return free_seg_blks;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
free_segs_blk_count(struct f2fs_sb_info * sbi)102*4882a593Smuzhiyun static inline block_t free_segs_blk_count(struct f2fs_sb_info *sbi)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun if (f2fs_sb_has_blkzoned(sbi))
105*4882a593Smuzhiyun return free_segs_blk_count_zoned(sbi);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun return free_segments(sbi) << sbi->log_blocks_per_seg;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
free_user_blocks(struct f2fs_sb_info * sbi)110*4882a593Smuzhiyun static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun block_t free_blks, ovp_blks;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun free_blks = free_segs_blk_count(sbi);
115*4882a593Smuzhiyun ovp_blks = overprovision_segments(sbi) << sbi->log_blocks_per_seg;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun if (free_blks < ovp_blks)
118*4882a593Smuzhiyun return 0;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun return free_blks - ovp_blks;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
limit_invalid_user_blocks(struct f2fs_sb_info * sbi)123*4882a593Smuzhiyun static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
limit_free_user_blocks(struct f2fs_sb_info * sbi)128*4882a593Smuzhiyun static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun block_t reclaimable_user_blocks = sbi->user_block_count -
131*4882a593Smuzhiyun written_block_count(sbi);
132*4882a593Smuzhiyun return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
increase_sleep_time(struct f2fs_gc_kthread * gc_th,unsigned int * wait)135*4882a593Smuzhiyun static inline void increase_sleep_time(struct f2fs_gc_kthread *gc_th,
136*4882a593Smuzhiyun unsigned int *wait)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun unsigned int min_time = gc_th->min_sleep_time;
139*4882a593Smuzhiyun unsigned int max_time = gc_th->max_sleep_time;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (*wait == gc_th->no_gc_sleep_time)
142*4882a593Smuzhiyun return;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if ((long long)*wait + (long long)min_time > (long long)max_time)
145*4882a593Smuzhiyun *wait = max_time;
146*4882a593Smuzhiyun else
147*4882a593Smuzhiyun *wait += min_time;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
decrease_sleep_time(struct f2fs_gc_kthread * gc_th,unsigned int * wait)150*4882a593Smuzhiyun static inline void decrease_sleep_time(struct f2fs_gc_kthread *gc_th,
151*4882a593Smuzhiyun unsigned int *wait)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun unsigned int min_time = gc_th->min_sleep_time;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun if (*wait == gc_th->no_gc_sleep_time)
156*4882a593Smuzhiyun *wait = gc_th->max_sleep_time;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun if ((long long)*wait - (long long)min_time < (long long)min_time)
159*4882a593Smuzhiyun *wait = min_time;
160*4882a593Smuzhiyun else
161*4882a593Smuzhiyun *wait -= min_time;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
has_enough_invalid_blocks(struct f2fs_sb_info * sbi)164*4882a593Smuzhiyun static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun block_t invalid_user_blocks = sbi->user_block_count -
167*4882a593Smuzhiyun written_block_count(sbi);
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun * Background GC is triggered with the following conditions.
170*4882a593Smuzhiyun * 1. There are a number of invalid blocks.
171*4882a593Smuzhiyun * 2. There is not enough free space.
172*4882a593Smuzhiyun */
173*4882a593Smuzhiyun if (invalid_user_blocks > limit_invalid_user_blocks(sbi) &&
174*4882a593Smuzhiyun free_user_blocks(sbi) < limit_free_user_blocks(sbi))
175*4882a593Smuzhiyun return true;
176*4882a593Smuzhiyun return false;
177*4882a593Smuzhiyun }
178