xref: /OK3568_Linux_fs/kernel/drivers/lightnvm/pblk-rl.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2016 CNEX Labs
4*4882a593Smuzhiyun  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5*4882a593Smuzhiyun  *                  Matias Bjorling <matias@cnexlabs.com>
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or
8*4882a593Smuzhiyun  * modify it under the terms of the GNU General Public License version
9*4882a593Smuzhiyun  * 2 as published by the Free Software Foundation.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful, but
12*4882a593Smuzhiyun  * WITHOUT ANY WARRANTY; without even the implied warranty of
13*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14*4882a593Smuzhiyun  * General Public License for more details.
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * pblk-rl.c - pblk's rate limiter for user I/O
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  */
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include "pblk.h"
21*4882a593Smuzhiyun 
pblk_rl_kick_u_timer(struct pblk_rl * rl)22*4882a593Smuzhiyun static void pblk_rl_kick_u_timer(struct pblk_rl *rl)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	mod_timer(&rl->u_timer, jiffies + msecs_to_jiffies(5000));
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun 
pblk_rl_is_limit(struct pblk_rl * rl)27*4882a593Smuzhiyun int pblk_rl_is_limit(struct pblk_rl *rl)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	int rb_space;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	rb_space = atomic_read(&rl->rb_space);
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	return (rb_space == 0);
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun 
pblk_rl_user_may_insert(struct pblk_rl * rl,int nr_entries)36*4882a593Smuzhiyun int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	int rb_user_cnt = atomic_read(&rl->rb_user_cnt);
39*4882a593Smuzhiyun 	int rb_space = atomic_read(&rl->rb_space);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	if (unlikely(rb_space >= 0) && (rb_space - nr_entries < 0))
42*4882a593Smuzhiyun 		return NVM_IO_ERR;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	if (rb_user_cnt >= rl->rb_user_max)
45*4882a593Smuzhiyun 		return NVM_IO_REQUEUE;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	return NVM_IO_OK;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun 
pblk_rl_inserted(struct pblk_rl * rl,int nr_entries)50*4882a593Smuzhiyun void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	int rb_space = atomic_read(&rl->rb_space);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	if (unlikely(rb_space >= 0))
55*4882a593Smuzhiyun 		atomic_sub(nr_entries, &rl->rb_space);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
pblk_rl_gc_may_insert(struct pblk_rl * rl,int nr_entries)58*4882a593Smuzhiyun int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	int rb_gc_cnt = atomic_read(&rl->rb_gc_cnt);
61*4882a593Smuzhiyun 	int rb_user_active;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	/* If there is no user I/O let GC take over space on the write buffer */
64*4882a593Smuzhiyun 	rb_user_active = READ_ONCE(rl->rb_user_active);
65*4882a593Smuzhiyun 	return (!(rb_gc_cnt >= rl->rb_gc_max && rb_user_active));
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
pblk_rl_user_in(struct pblk_rl * rl,int nr_entries)68*4882a593Smuzhiyun void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	atomic_add(nr_entries, &rl->rb_user_cnt);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	/* Release user I/O state. Protect from GC */
73*4882a593Smuzhiyun 	smp_store_release(&rl->rb_user_active, 1);
74*4882a593Smuzhiyun 	pblk_rl_kick_u_timer(rl);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
pblk_rl_werr_line_in(struct pblk_rl * rl)77*4882a593Smuzhiyun void pblk_rl_werr_line_in(struct pblk_rl *rl)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	atomic_inc(&rl->werr_lines);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
pblk_rl_werr_line_out(struct pblk_rl * rl)82*4882a593Smuzhiyun void pblk_rl_werr_line_out(struct pblk_rl *rl)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	atomic_dec(&rl->werr_lines);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
pblk_rl_gc_in(struct pblk_rl * rl,int nr_entries)87*4882a593Smuzhiyun void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	atomic_add(nr_entries, &rl->rb_gc_cnt);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
pblk_rl_out(struct pblk_rl * rl,int nr_user,int nr_gc)92*4882a593Smuzhiyun void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	atomic_sub(nr_user, &rl->rb_user_cnt);
95*4882a593Smuzhiyun 	atomic_sub(nr_gc, &rl->rb_gc_cnt);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
pblk_rl_nr_free_blks(struct pblk_rl * rl)98*4882a593Smuzhiyun unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	return atomic_read(&rl->free_blocks);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
pblk_rl_nr_user_free_blks(struct pblk_rl * rl)103*4882a593Smuzhiyun unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	return atomic_read(&rl->free_user_blocks);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
__pblk_rl_update_rates(struct pblk_rl * rl,unsigned long free_blocks)108*4882a593Smuzhiyun static void __pblk_rl_update_rates(struct pblk_rl *rl,
109*4882a593Smuzhiyun 				   unsigned long free_blocks)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	struct pblk *pblk = container_of(rl, struct pblk, rl);
112*4882a593Smuzhiyun 	int max = rl->rb_budget;
113*4882a593Smuzhiyun 	int werr_gc_needed = atomic_read(&rl->werr_lines);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	if (free_blocks >= rl->high) {
116*4882a593Smuzhiyun 		if (werr_gc_needed) {
117*4882a593Smuzhiyun 			/* Allocate a small budget for recovering
118*4882a593Smuzhiyun 			 * lines with write errors
119*4882a593Smuzhiyun 			 */
120*4882a593Smuzhiyun 			rl->rb_gc_max = 1 << rl->rb_windows_pw;
121*4882a593Smuzhiyun 			rl->rb_user_max = max - rl->rb_gc_max;
122*4882a593Smuzhiyun 			rl->rb_state = PBLK_RL_WERR;
123*4882a593Smuzhiyun 		} else {
124*4882a593Smuzhiyun 			rl->rb_user_max = max;
125*4882a593Smuzhiyun 			rl->rb_gc_max = 0;
126*4882a593Smuzhiyun 			rl->rb_state = PBLK_RL_OFF;
127*4882a593Smuzhiyun 		}
128*4882a593Smuzhiyun 	} else if (free_blocks < rl->high) {
129*4882a593Smuzhiyun 		int shift = rl->high_pw - rl->rb_windows_pw;
130*4882a593Smuzhiyun 		int user_windows = free_blocks >> shift;
131*4882a593Smuzhiyun 		int user_max = user_windows << ilog2(NVM_MAX_VLBA);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 		rl->rb_user_max = user_max;
134*4882a593Smuzhiyun 		rl->rb_gc_max = max - user_max;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 		if (free_blocks <= rl->rsv_blocks) {
137*4882a593Smuzhiyun 			rl->rb_user_max = 0;
138*4882a593Smuzhiyun 			rl->rb_gc_max = max;
139*4882a593Smuzhiyun 		}
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 		/* In the worst case, we will need to GC lines in the low list
142*4882a593Smuzhiyun 		 * (high valid sector count). If there are lines to GC on high
143*4882a593Smuzhiyun 		 * or mid lists, these will be prioritized
144*4882a593Smuzhiyun 		 */
145*4882a593Smuzhiyun 		rl->rb_state = PBLK_RL_LOW;
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (rl->rb_state != PBLK_RL_OFF)
149*4882a593Smuzhiyun 		pblk_gc_should_start(pblk);
150*4882a593Smuzhiyun 	else
151*4882a593Smuzhiyun 		pblk_gc_should_stop(pblk);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
pblk_rl_update_rates(struct pblk_rl * rl)154*4882a593Smuzhiyun void pblk_rl_update_rates(struct pblk_rl *rl)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	__pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
pblk_rl_free_lines_inc(struct pblk_rl * rl,struct pblk_line * line)159*4882a593Smuzhiyun void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	int blk_in_line = atomic_read(&line->blk_in_line);
162*4882a593Smuzhiyun 	int free_blocks;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	atomic_add(blk_in_line, &rl->free_blocks);
165*4882a593Smuzhiyun 	free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	__pblk_rl_update_rates(rl, free_blocks);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
pblk_rl_free_lines_dec(struct pblk_rl * rl,struct pblk_line * line,bool used)170*4882a593Smuzhiyun void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
171*4882a593Smuzhiyun 			    bool used)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	int blk_in_line = atomic_read(&line->blk_in_line);
174*4882a593Smuzhiyun 	int free_blocks;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	atomic_sub(blk_in_line, &rl->free_blocks);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	if (used)
179*4882a593Smuzhiyun 		free_blocks = atomic_sub_return(blk_in_line,
180*4882a593Smuzhiyun 							&rl->free_user_blocks);
181*4882a593Smuzhiyun 	else
182*4882a593Smuzhiyun 		free_blocks = atomic_read(&rl->free_user_blocks);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	__pblk_rl_update_rates(rl, free_blocks);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
pblk_rl_high_thrs(struct pblk_rl * rl)187*4882a593Smuzhiyun int pblk_rl_high_thrs(struct pblk_rl *rl)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	return rl->high;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
pblk_rl_max_io(struct pblk_rl * rl)192*4882a593Smuzhiyun int pblk_rl_max_io(struct pblk_rl *rl)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	return rl->rb_max_io;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
pblk_rl_u_timer(struct timer_list * t)197*4882a593Smuzhiyun static void pblk_rl_u_timer(struct timer_list *t)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	struct pblk_rl *rl = from_timer(rl, t, u_timer);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	/* Release user I/O state. Protect from GC */
202*4882a593Smuzhiyun 	smp_store_release(&rl->rb_user_active, 0);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
pblk_rl_free(struct pblk_rl * rl)205*4882a593Smuzhiyun void pblk_rl_free(struct pblk_rl *rl)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	del_timer(&rl->u_timer);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
pblk_rl_init(struct pblk_rl * rl,int budget,int threshold)210*4882a593Smuzhiyun void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	struct pblk *pblk = container_of(rl, struct pblk, rl);
213*4882a593Smuzhiyun 	struct nvm_tgt_dev *dev = pblk->dev;
214*4882a593Smuzhiyun 	struct nvm_geo *geo = &dev->geo;
215*4882a593Smuzhiyun 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
216*4882a593Smuzhiyun 	struct pblk_line_meta *lm = &pblk->lm;
217*4882a593Smuzhiyun 	int sec_meta, blk_meta;
218*4882a593Smuzhiyun 	unsigned int rb_windows;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	/* Consider sectors used for metadata */
221*4882a593Smuzhiyun 	sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
222*4882a593Smuzhiyun 	blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
225*4882a593Smuzhiyun 	rl->high_pw = get_count_order(rl->high);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	rl->rsv_blocks = pblk_get_min_chks(pblk);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/* This will always be a power-of-2 */
230*4882a593Smuzhiyun 	rb_windows = budget / NVM_MAX_VLBA;
231*4882a593Smuzhiyun 	rl->rb_windows_pw = get_count_order(rb_windows);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	/* To start with, all buffer is available to user I/O writers */
234*4882a593Smuzhiyun 	rl->rb_budget = budget;
235*4882a593Smuzhiyun 	rl->rb_user_max = budget;
236*4882a593Smuzhiyun 	rl->rb_gc_max = 0;
237*4882a593Smuzhiyun 	rl->rb_state = PBLK_RL_HIGH;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	/* Maximize I/O size and ansure that back threshold is respected */
240*4882a593Smuzhiyun 	if (threshold)
241*4882a593Smuzhiyun 		rl->rb_max_io = budget - pblk->min_write_pgs_data - threshold;
242*4882a593Smuzhiyun 	else
243*4882a593Smuzhiyun 		rl->rb_max_io = budget - pblk->min_write_pgs_data - 1;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	atomic_set(&rl->rb_user_cnt, 0);
246*4882a593Smuzhiyun 	atomic_set(&rl->rb_gc_cnt, 0);
247*4882a593Smuzhiyun 	atomic_set(&rl->rb_space, -1);
248*4882a593Smuzhiyun 	atomic_set(&rl->werr_lines, 0);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	rl->rb_user_active = 0;
253*4882a593Smuzhiyun 	rl->rb_gc_active = 0;
254*4882a593Smuzhiyun }
255