xref: /OK3568_Linux_fs/kernel/drivers/lightnvm/pblk-map.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2016 CNEX Labs
4*4882a593Smuzhiyun  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5*4882a593Smuzhiyun  *                  Matias Bjorling <matias@cnexlabs.com>
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or
8*4882a593Smuzhiyun  * modify it under the terms of the GNU General Public License version
9*4882a593Smuzhiyun  * 2 as published by the Free Software Foundation.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful, but
12*4882a593Smuzhiyun  * WITHOUT ANY WARRANTY; without even the implied warranty of
13*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14*4882a593Smuzhiyun  * General Public License for more details.
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * pblk-map.c - pblk's lba-ppa mapping strategy
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  */
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include "pblk.h"
21*4882a593Smuzhiyun 
pblk_map_page_data(struct pblk * pblk,unsigned int sentry,struct ppa_addr * ppa_list,unsigned long * lun_bitmap,void * meta_list,unsigned int valid_secs)22*4882a593Smuzhiyun static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
23*4882a593Smuzhiyun 			      struct ppa_addr *ppa_list,
24*4882a593Smuzhiyun 			      unsigned long *lun_bitmap,
25*4882a593Smuzhiyun 			      void *meta_list,
26*4882a593Smuzhiyun 			      unsigned int valid_secs)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	struct pblk_line *line = pblk_line_get_data(pblk);
29*4882a593Smuzhiyun 	struct pblk_emeta *emeta;
30*4882a593Smuzhiyun 	struct pblk_w_ctx *w_ctx;
31*4882a593Smuzhiyun 	__le64 *lba_list;
32*4882a593Smuzhiyun 	u64 paddr;
33*4882a593Smuzhiyun 	int nr_secs = pblk->min_write_pgs;
34*4882a593Smuzhiyun 	int i;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	if (!line)
37*4882a593Smuzhiyun 		return -ENOSPC;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	if (pblk_line_is_full(line)) {
40*4882a593Smuzhiyun 		struct pblk_line *prev_line = line;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 		/* If we cannot allocate a new line, make sure to store metadata
43*4882a593Smuzhiyun 		 * on current line and then fail
44*4882a593Smuzhiyun 		 */
45*4882a593Smuzhiyun 		line = pblk_line_replace_data(pblk);
46*4882a593Smuzhiyun 		pblk_line_close_meta(pblk, prev_line);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 		if (!line) {
49*4882a593Smuzhiyun 			pblk_pipeline_stop(pblk);
50*4882a593Smuzhiyun 			return -ENOSPC;
51*4882a593Smuzhiyun 		}
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	}
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	emeta = line->emeta;
56*4882a593Smuzhiyun 	lba_list = emeta_to_lbas(pblk, emeta->buf);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	paddr = pblk_alloc_page(pblk, line, nr_secs);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	for (i = 0; i < nr_secs; i++, paddr++) {
61*4882a593Smuzhiyun 		struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
62*4882a593Smuzhiyun 		__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 		/* ppa to be sent to the device */
65*4882a593Smuzhiyun 		ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 		/* Write context for target bio completion on write buffer. Note
68*4882a593Smuzhiyun 		 * that the write buffer is protected by the sync backpointer,
69*4882a593Smuzhiyun 		 * and a single writer thread have access to each specific entry
70*4882a593Smuzhiyun 		 * at a time. Thus, it is safe to modify the context for the
71*4882a593Smuzhiyun 		 * entry we are setting up for submission without taking any
72*4882a593Smuzhiyun 		 * lock or memory barrier.
73*4882a593Smuzhiyun 		 */
74*4882a593Smuzhiyun 		if (i < valid_secs) {
75*4882a593Smuzhiyun 			kref_get(&line->ref);
76*4882a593Smuzhiyun 			atomic_inc(&line->sec_to_update);
77*4882a593Smuzhiyun 			w_ctx = pblk_rb_w_ctx(&pblk->rwb, sentry + i);
78*4882a593Smuzhiyun 			w_ctx->ppa = ppa_list[i];
79*4882a593Smuzhiyun 			meta->lba = cpu_to_le64(w_ctx->lba);
80*4882a593Smuzhiyun 			lba_list[paddr] = cpu_to_le64(w_ctx->lba);
81*4882a593Smuzhiyun 			if (lba_list[paddr] != addr_empty)
82*4882a593Smuzhiyun 				line->nr_valid_lbas++;
83*4882a593Smuzhiyun 			else
84*4882a593Smuzhiyun 				atomic64_inc(&pblk->pad_wa);
85*4882a593Smuzhiyun 		} else {
86*4882a593Smuzhiyun 			lba_list[paddr] = addr_empty;
87*4882a593Smuzhiyun 			meta->lba = addr_empty;
88*4882a593Smuzhiyun 			__pblk_map_invalidate(pblk, line, paddr);
89*4882a593Smuzhiyun 		}
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	pblk_down_rq(pblk, ppa_list[0], lun_bitmap);
93*4882a593Smuzhiyun 	return 0;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
pblk_map_rq(struct pblk * pblk,struct nvm_rq * rqd,unsigned int sentry,unsigned long * lun_bitmap,unsigned int valid_secs,unsigned int off)96*4882a593Smuzhiyun int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
97*4882a593Smuzhiyun 		 unsigned long *lun_bitmap, unsigned int valid_secs,
98*4882a593Smuzhiyun 		 unsigned int off)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	void *meta_list = pblk_get_meta_for_writes(pblk, rqd);
101*4882a593Smuzhiyun 	void *meta_buffer;
102*4882a593Smuzhiyun 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
103*4882a593Smuzhiyun 	unsigned int map_secs;
104*4882a593Smuzhiyun 	int min = pblk->min_write_pgs;
105*4882a593Smuzhiyun 	int i;
106*4882a593Smuzhiyun 	int ret;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	for (i = off; i < rqd->nr_ppas; i += min) {
109*4882a593Smuzhiyun 		map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
110*4882a593Smuzhiyun 		meta_buffer = pblk_get_meta(pblk, meta_list, i);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 		ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
113*4882a593Smuzhiyun 					lun_bitmap, meta_buffer, map_secs);
114*4882a593Smuzhiyun 		if (ret)
115*4882a593Smuzhiyun 			return ret;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	return 0;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun /* only if erase_ppa is set, acquire erase semaphore */
pblk_map_erase_rq(struct pblk * pblk,struct nvm_rq * rqd,unsigned int sentry,unsigned long * lun_bitmap,unsigned int valid_secs,struct ppa_addr * erase_ppa)122*4882a593Smuzhiyun int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
123*4882a593Smuzhiyun 		       unsigned int sentry, unsigned long *lun_bitmap,
124*4882a593Smuzhiyun 		       unsigned int valid_secs, struct ppa_addr *erase_ppa)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	struct nvm_tgt_dev *dev = pblk->dev;
127*4882a593Smuzhiyun 	struct nvm_geo *geo = &dev->geo;
128*4882a593Smuzhiyun 	struct pblk_line_meta *lm = &pblk->lm;
129*4882a593Smuzhiyun 	void *meta_list = pblk_get_meta_for_writes(pblk, rqd);
130*4882a593Smuzhiyun 	void *meta_buffer;
131*4882a593Smuzhiyun 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
132*4882a593Smuzhiyun 	struct pblk_line *e_line, *d_line;
133*4882a593Smuzhiyun 	unsigned int map_secs;
134*4882a593Smuzhiyun 	int min = pblk->min_write_pgs;
135*4882a593Smuzhiyun 	int i, erase_lun;
136*4882a593Smuzhiyun 	int ret;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	for (i = 0; i < rqd->nr_ppas; i += min) {
140*4882a593Smuzhiyun 		map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
141*4882a593Smuzhiyun 		meta_buffer = pblk_get_meta(pblk, meta_list, i);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 		ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
144*4882a593Smuzhiyun 					lun_bitmap, meta_buffer, map_secs);
145*4882a593Smuzhiyun 		if (ret)
146*4882a593Smuzhiyun 			return ret;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 		erase_lun = pblk_ppa_to_pos(geo, ppa_list[i]);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 		/* line can change after page map. We might also be writing the
151*4882a593Smuzhiyun 		 * last line.
152*4882a593Smuzhiyun 		 */
153*4882a593Smuzhiyun 		e_line = pblk_line_get_erase(pblk);
154*4882a593Smuzhiyun 		if (!e_line)
155*4882a593Smuzhiyun 			return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
156*4882a593Smuzhiyun 							valid_secs, i + min);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 		spin_lock(&e_line->lock);
159*4882a593Smuzhiyun 		if (!test_bit(erase_lun, e_line->erase_bitmap)) {
160*4882a593Smuzhiyun 			set_bit(erase_lun, e_line->erase_bitmap);
161*4882a593Smuzhiyun 			atomic_dec(&e_line->left_eblks);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 			*erase_ppa = ppa_list[i];
164*4882a593Smuzhiyun 			erase_ppa->a.blk = e_line->id;
165*4882a593Smuzhiyun 			erase_ppa->a.reserved = 0;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 			spin_unlock(&e_line->lock);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 			/* Avoid evaluating e_line->left_eblks */
170*4882a593Smuzhiyun 			return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
171*4882a593Smuzhiyun 							valid_secs, i + min);
172*4882a593Smuzhiyun 		}
173*4882a593Smuzhiyun 		spin_unlock(&e_line->lock);
174*4882a593Smuzhiyun 	}
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	d_line = pblk_line_get_data(pblk);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	/* line can change after page map. We might also be writing the
179*4882a593Smuzhiyun 	 * last line.
180*4882a593Smuzhiyun 	 */
181*4882a593Smuzhiyun 	e_line = pblk_line_get_erase(pblk);
182*4882a593Smuzhiyun 	if (!e_line)
183*4882a593Smuzhiyun 		return -ENOSPC;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	/* Erase blocks that are bad in this line but might not be in next */
186*4882a593Smuzhiyun 	if (unlikely(pblk_ppa_empty(*erase_ppa)) &&
187*4882a593Smuzhiyun 			bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
188*4882a593Smuzhiyun 		int bit = -1;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun retry:
191*4882a593Smuzhiyun 		bit = find_next_bit(d_line->blk_bitmap,
192*4882a593Smuzhiyun 						lm->blk_per_line, bit + 1);
193*4882a593Smuzhiyun 		if (bit >= lm->blk_per_line)
194*4882a593Smuzhiyun 			return 0;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 		spin_lock(&e_line->lock);
197*4882a593Smuzhiyun 		if (test_bit(bit, e_line->erase_bitmap)) {
198*4882a593Smuzhiyun 			spin_unlock(&e_line->lock);
199*4882a593Smuzhiyun 			goto retry;
200*4882a593Smuzhiyun 		}
201*4882a593Smuzhiyun 		spin_unlock(&e_line->lock);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 		set_bit(bit, e_line->erase_bitmap);
204*4882a593Smuzhiyun 		atomic_dec(&e_line->left_eblks);
205*4882a593Smuzhiyun 		*erase_ppa = pblk->luns[bit].bppa; /* set ch and lun */
206*4882a593Smuzhiyun 		erase_ppa->a.blk = e_line->id;
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	return 0;
210*4882a593Smuzhiyun }
211