xref: /OK3568_Linux_fs/kernel/drivers/mtd/nand/ecc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Generic Error-Correcting Code (ECC) engine
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2019 Macronix
6*4882a593Smuzhiyun  * Author:
7*4882a593Smuzhiyun  *     Miquèl RAYNAL <miquel.raynal@bootlin.com>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * This file describes the abstraction of any NAND ECC engine. It has been
11*4882a593Smuzhiyun  * designed to fit most cases, including parallel NANDs and SPI-NANDs.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * There are three main situations where instantiating this ECC engine makes
14*4882a593Smuzhiyun  * sense:
15*4882a593Smuzhiyun  *   - external: The ECC engine is outside the NAND pipeline, typically this
16*4882a593Smuzhiyun  *               is a software ECC engine, or an hardware engine that is
17*4882a593Smuzhiyun  *               outside the NAND controller pipeline.
18*4882a593Smuzhiyun  *   - pipelined: The ECC engine is inside the NAND pipeline, ie. on the
19*4882a593Smuzhiyun  *                controller's side. This is the case of most of the raw NAND
20*4882a593Smuzhiyun  *                controllers. In the pipeline case, the ECC bytes are
21*4882a593Smuzhiyun  *                generated/data corrected on the fly when a page is
22*4882a593Smuzhiyun  *                written/read.
23*4882a593Smuzhiyun  *   - ondie: The ECC engine is inside the NAND pipeline, on the chip's side.
24*4882a593Smuzhiyun  *            Some NAND chips can correct themselves the data.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * Besides the initial setup and final cleanups, the interfaces are rather
27*4882a593Smuzhiyun  * simple:
28*4882a593Smuzhiyun  *   - prepare: Prepare an I/O request. Enable/disable the ECC engine based on
29*4882a593Smuzhiyun  *              the I/O request type. In case of software correction or external
30*4882a593Smuzhiyun  *              engine, this step may involve to derive the ECC bytes and place
31*4882a593Smuzhiyun  *              them in the OOB area before a write.
32*4882a593Smuzhiyun  *   - finish: Finish an I/O request. Correct the data in case of a read
33*4882a593Smuzhiyun  *             request and report the number of corrected bits/uncorrectable
34*4882a593Smuzhiyun  *             errors. Most likely empty for write operations, unless you have
35*4882a593Smuzhiyun  *             hardware specific stuff to do, like shutting down the engine to
36*4882a593Smuzhiyun  *             save power.
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  * The I/O request should be enclosed in a prepare()/finish() pair of calls
39*4882a593Smuzhiyun  * and will behave differently depending on the requested I/O type:
40*4882a593Smuzhiyun  *   - raw: Correction disabled
41*4882a593Smuzhiyun  *   - ecc: Correction enabled
42*4882a593Smuzhiyun  *
43*4882a593Smuzhiyun  * The request direction is impacting the logic as well:
44*4882a593Smuzhiyun  *   - read: Load data from the NAND chip
45*4882a593Smuzhiyun  *   - write: Store data in the NAND chip
46*4882a593Smuzhiyun  *
47*4882a593Smuzhiyun  * Mixing all this combinations together gives the following behavior.
48*4882a593Smuzhiyun  * Those are just examples, drivers are free to add custom steps in their
49*4882a593Smuzhiyun  * prepare/finish hook.
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  * [external ECC engine]
52*4882a593Smuzhiyun  *   - external + prepare + raw + read: do nothing
53*4882a593Smuzhiyun  *   - external + finish  + raw + read: do nothing
54*4882a593Smuzhiyun  *   - external + prepare + raw + write: do nothing
55*4882a593Smuzhiyun  *   - external + finish  + raw + write: do nothing
56*4882a593Smuzhiyun  *   - external + prepare + ecc + read: do nothing
57*4882a593Smuzhiyun  *   - external + finish  + ecc + read: calculate expected ECC bytes, extract
58*4882a593Smuzhiyun  *                                      ECC bytes from OOB buffer, correct
59*4882a593Smuzhiyun  *                                      and report any bitflip/error
60*4882a593Smuzhiyun  *   - external + prepare + ecc + write: calculate ECC bytes and store them at
61*4882a593Smuzhiyun  *                                       the right place in the OOB buffer based
62*4882a593Smuzhiyun  *                                       on the OOB layout
63*4882a593Smuzhiyun  *   - external + finish  + ecc + write: do nothing
64*4882a593Smuzhiyun  *
65*4882a593Smuzhiyun  * [pipelined ECC engine]
66*4882a593Smuzhiyun  *   - pipelined + prepare + raw + read: disable the controller's ECC engine if
67*4882a593Smuzhiyun  *                                       activated
68*4882a593Smuzhiyun  *   - pipelined + finish  + raw + read: do nothing
69*4882a593Smuzhiyun  *   - pipelined + prepare + raw + write: disable the controller's ECC engine if
70*4882a593Smuzhiyun  *                                        activated
71*4882a593Smuzhiyun  *   - pipelined + finish  + raw + write: do nothing
72*4882a593Smuzhiyun  *   - pipelined + prepare + ecc + read: enable the controller's ECC engine if
73*4882a593Smuzhiyun  *                                       deactivated
74*4882a593Smuzhiyun  *   - pipelined + finish  + ecc + read: check the status, report any
75*4882a593Smuzhiyun  *                                       error/bitflip
76*4882a593Smuzhiyun  *   - pipelined + prepare + ecc + write: enable the controller's ECC engine if
77*4882a593Smuzhiyun  *                                        deactivated
78*4882a593Smuzhiyun  *   - pipelined + finish  + ecc + write: do nothing
79*4882a593Smuzhiyun  *
80*4882a593Smuzhiyun  * [ondie ECC engine]
81*4882a593Smuzhiyun  *   - ondie + prepare + raw + read: send commands to disable the on-chip ECC
82*4882a593Smuzhiyun  *                                   engine if activated
83*4882a593Smuzhiyun  *   - ondie + finish  + raw + read: do nothing
84*4882a593Smuzhiyun  *   - ondie + prepare + raw + write: send commands to disable the on-chip ECC
85*4882a593Smuzhiyun  *                                    engine if activated
86*4882a593Smuzhiyun  *   - ondie + finish  + raw + write: do nothing
87*4882a593Smuzhiyun  *   - ondie + prepare + ecc + read: send commands to enable the on-chip ECC
88*4882a593Smuzhiyun  *                                   engine if deactivated
89*4882a593Smuzhiyun  *   - ondie + finish  + ecc + read: send commands to check the status, report
90*4882a593Smuzhiyun  *                                   any error/bitflip
91*4882a593Smuzhiyun  *   - ondie + prepare + ecc + write: send commands to enable the on-chip ECC
92*4882a593Smuzhiyun  *                                    engine if deactivated
93*4882a593Smuzhiyun  *   - ondie + finish  + ecc + write: do nothing
94*4882a593Smuzhiyun  */
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #include <linux/module.h>
97*4882a593Smuzhiyun #include <linux/mtd/nand.h>
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun /**
100*4882a593Smuzhiyun  * nand_ecc_init_ctx - Init the ECC engine context
101*4882a593Smuzhiyun  * @nand: the NAND device
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  * On success, the caller is responsible of calling @nand_ecc_cleanup_ctx().
104*4882a593Smuzhiyun  */
nand_ecc_init_ctx(struct nand_device * nand)105*4882a593Smuzhiyun int nand_ecc_init_ctx(struct nand_device *nand)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	if (!nand->ecc.engine->ops->init_ctx)
108*4882a593Smuzhiyun 		return 0;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	return nand->ecc.engine->ops->init_ctx(nand);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun EXPORT_SYMBOL(nand_ecc_init_ctx);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /**
115*4882a593Smuzhiyun  * nand_ecc_cleanup_ctx - Cleanup the ECC engine context
116*4882a593Smuzhiyun  * @nand: the NAND device
117*4882a593Smuzhiyun  */
nand_ecc_cleanup_ctx(struct nand_device * nand)118*4882a593Smuzhiyun void nand_ecc_cleanup_ctx(struct nand_device *nand)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	if (nand->ecc.engine->ops->cleanup_ctx)
121*4882a593Smuzhiyun 		nand->ecc.engine->ops->cleanup_ctx(nand);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun EXPORT_SYMBOL(nand_ecc_cleanup_ctx);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /**
126*4882a593Smuzhiyun  * nand_ecc_prepare_io_req - Prepare an I/O request
127*4882a593Smuzhiyun  * @nand: the NAND device
128*4882a593Smuzhiyun  * @req: the I/O request
129*4882a593Smuzhiyun  */
nand_ecc_prepare_io_req(struct nand_device * nand,struct nand_page_io_req * req)130*4882a593Smuzhiyun int nand_ecc_prepare_io_req(struct nand_device *nand,
131*4882a593Smuzhiyun 			    struct nand_page_io_req *req)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	if (!nand->ecc.engine->ops->prepare_io_req)
134*4882a593Smuzhiyun 		return 0;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	return nand->ecc.engine->ops->prepare_io_req(nand, req);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun EXPORT_SYMBOL(nand_ecc_prepare_io_req);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun /**
141*4882a593Smuzhiyun  * nand_ecc_finish_io_req - Finish an I/O request
142*4882a593Smuzhiyun  * @nand: the NAND device
143*4882a593Smuzhiyun  * @req: the I/O request
144*4882a593Smuzhiyun  */
nand_ecc_finish_io_req(struct nand_device * nand,struct nand_page_io_req * req)145*4882a593Smuzhiyun int nand_ecc_finish_io_req(struct nand_device *nand,
146*4882a593Smuzhiyun 			   struct nand_page_io_req *req)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	if (!nand->ecc.engine->ops->finish_io_req)
149*4882a593Smuzhiyun 		return 0;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	return nand->ecc.engine->ops->finish_io_req(nand, req);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun EXPORT_SYMBOL(nand_ecc_finish_io_req);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun /* Define default OOB placement schemes for large and small page devices */
nand_ooblayout_ecc_sp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)156*4882a593Smuzhiyun static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
157*4882a593Smuzhiyun 				 struct mtd_oob_region *oobregion)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	struct nand_device *nand = mtd_to_nanddev(mtd);
160*4882a593Smuzhiyun 	unsigned int total_ecc_bytes = nand->ecc.ctx.total;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (section > 1)
163*4882a593Smuzhiyun 		return -ERANGE;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	if (!section) {
166*4882a593Smuzhiyun 		oobregion->offset = 0;
167*4882a593Smuzhiyun 		if (mtd->oobsize == 16)
168*4882a593Smuzhiyun 			oobregion->length = 4;
169*4882a593Smuzhiyun 		else
170*4882a593Smuzhiyun 			oobregion->length = 3;
171*4882a593Smuzhiyun 	} else {
172*4882a593Smuzhiyun 		if (mtd->oobsize == 8)
173*4882a593Smuzhiyun 			return -ERANGE;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		oobregion->offset = 6;
176*4882a593Smuzhiyun 		oobregion->length = total_ecc_bytes - 4;
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	return 0;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
nand_ooblayout_free_sp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)182*4882a593Smuzhiyun static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
183*4882a593Smuzhiyun 				  struct mtd_oob_region *oobregion)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	if (section > 1)
186*4882a593Smuzhiyun 		return -ERANGE;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	if (mtd->oobsize == 16) {
189*4882a593Smuzhiyun 		if (section)
190*4882a593Smuzhiyun 			return -ERANGE;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 		oobregion->length = 8;
193*4882a593Smuzhiyun 		oobregion->offset = 8;
194*4882a593Smuzhiyun 	} else {
195*4882a593Smuzhiyun 		oobregion->length = 2;
196*4882a593Smuzhiyun 		if (!section)
197*4882a593Smuzhiyun 			oobregion->offset = 3;
198*4882a593Smuzhiyun 		else
199*4882a593Smuzhiyun 			oobregion->offset = 6;
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	return 0;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun static const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
206*4882a593Smuzhiyun 	.ecc = nand_ooblayout_ecc_sp,
207*4882a593Smuzhiyun 	.free = nand_ooblayout_free_sp,
208*4882a593Smuzhiyun };
209*4882a593Smuzhiyun 
nand_get_small_page_ooblayout(void)210*4882a593Smuzhiyun const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	return &nand_ooblayout_sp_ops;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_get_small_page_ooblayout);
215*4882a593Smuzhiyun 
nand_ooblayout_ecc_lp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)216*4882a593Smuzhiyun static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
217*4882a593Smuzhiyun 				 struct mtd_oob_region *oobregion)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	struct nand_device *nand = mtd_to_nanddev(mtd);
220*4882a593Smuzhiyun 	unsigned int total_ecc_bytes = nand->ecc.ctx.total;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	if (section || !total_ecc_bytes)
223*4882a593Smuzhiyun 		return -ERANGE;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	oobregion->length = total_ecc_bytes;
226*4882a593Smuzhiyun 	oobregion->offset = mtd->oobsize - oobregion->length;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	return 0;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
nand_ooblayout_free_lp(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)231*4882a593Smuzhiyun static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
232*4882a593Smuzhiyun 				  struct mtd_oob_region *oobregion)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	struct nand_device *nand = mtd_to_nanddev(mtd);
235*4882a593Smuzhiyun 	unsigned int total_ecc_bytes = nand->ecc.ctx.total;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	if (section)
238*4882a593Smuzhiyun 		return -ERANGE;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	oobregion->length = mtd->oobsize - total_ecc_bytes - 2;
241*4882a593Smuzhiyun 	oobregion->offset = 2;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	return 0;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun static const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
247*4882a593Smuzhiyun 	.ecc = nand_ooblayout_ecc_lp,
248*4882a593Smuzhiyun 	.free = nand_ooblayout_free_lp,
249*4882a593Smuzhiyun };
250*4882a593Smuzhiyun 
nand_get_large_page_ooblayout(void)251*4882a593Smuzhiyun const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	return &nand_ooblayout_lp_ops;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_get_large_page_ooblayout);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun /*
258*4882a593Smuzhiyun  * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
259*4882a593Smuzhiyun  * are placed at a fixed offset.
260*4882a593Smuzhiyun  */
nand_ooblayout_ecc_lp_hamming(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)261*4882a593Smuzhiyun static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
262*4882a593Smuzhiyun 					 struct mtd_oob_region *oobregion)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	struct nand_device *nand = mtd_to_nanddev(mtd);
265*4882a593Smuzhiyun 	unsigned int total_ecc_bytes = nand->ecc.ctx.total;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	if (section)
268*4882a593Smuzhiyun 		return -ERANGE;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	switch (mtd->oobsize) {
271*4882a593Smuzhiyun 	case 64:
272*4882a593Smuzhiyun 		oobregion->offset = 40;
273*4882a593Smuzhiyun 		break;
274*4882a593Smuzhiyun 	case 128:
275*4882a593Smuzhiyun 		oobregion->offset = 80;
276*4882a593Smuzhiyun 		break;
277*4882a593Smuzhiyun 	default:
278*4882a593Smuzhiyun 		return -EINVAL;
279*4882a593Smuzhiyun 	}
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	oobregion->length = total_ecc_bytes;
282*4882a593Smuzhiyun 	if (oobregion->offset + oobregion->length > mtd->oobsize)
283*4882a593Smuzhiyun 		return -ERANGE;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	return 0;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
nand_ooblayout_free_lp_hamming(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)288*4882a593Smuzhiyun static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
289*4882a593Smuzhiyun 					  struct mtd_oob_region *oobregion)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	struct nand_device *nand = mtd_to_nanddev(mtd);
292*4882a593Smuzhiyun 	unsigned int total_ecc_bytes = nand->ecc.ctx.total;
293*4882a593Smuzhiyun 	int ecc_offset = 0;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	if (section < 0 || section > 1)
296*4882a593Smuzhiyun 		return -ERANGE;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	switch (mtd->oobsize) {
299*4882a593Smuzhiyun 	case 64:
300*4882a593Smuzhiyun 		ecc_offset = 40;
301*4882a593Smuzhiyun 		break;
302*4882a593Smuzhiyun 	case 128:
303*4882a593Smuzhiyun 		ecc_offset = 80;
304*4882a593Smuzhiyun 		break;
305*4882a593Smuzhiyun 	default:
306*4882a593Smuzhiyun 		return -EINVAL;
307*4882a593Smuzhiyun 	}
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	if (section == 0) {
310*4882a593Smuzhiyun 		oobregion->offset = 2;
311*4882a593Smuzhiyun 		oobregion->length = ecc_offset - 2;
312*4882a593Smuzhiyun 	} else {
313*4882a593Smuzhiyun 		oobregion->offset = ecc_offset + total_ecc_bytes;
314*4882a593Smuzhiyun 		oobregion->length = mtd->oobsize - oobregion->offset;
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	return 0;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
321*4882a593Smuzhiyun 	.ecc = nand_ooblayout_ecc_lp_hamming,
322*4882a593Smuzhiyun 	.free = nand_ooblayout_free_lp_hamming,
323*4882a593Smuzhiyun };
324*4882a593Smuzhiyun 
nand_get_large_page_hamming_ooblayout(void)325*4882a593Smuzhiyun const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	return &nand_ooblayout_lp_hamming_ops;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_get_large_page_hamming_ooblayout);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun static enum nand_ecc_engine_type
of_get_nand_ecc_engine_type(struct device_node * np)332*4882a593Smuzhiyun of_get_nand_ecc_engine_type(struct device_node *np)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	struct device_node *eng_np;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	if (of_property_read_bool(np, "nand-no-ecc-engine"))
337*4882a593Smuzhiyun 		return NAND_ECC_ENGINE_TYPE_NONE;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	if (of_property_read_bool(np, "nand-use-soft-ecc-engine"))
340*4882a593Smuzhiyun 		return NAND_ECC_ENGINE_TYPE_SOFT;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	eng_np = of_parse_phandle(np, "nand-ecc-engine", 0);
343*4882a593Smuzhiyun 	of_node_put(eng_np);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	if (eng_np) {
346*4882a593Smuzhiyun 		if (eng_np == np)
347*4882a593Smuzhiyun 			return NAND_ECC_ENGINE_TYPE_ON_DIE;
348*4882a593Smuzhiyun 		else
349*4882a593Smuzhiyun 			return NAND_ECC_ENGINE_TYPE_ON_HOST;
350*4882a593Smuzhiyun 	}
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	return NAND_ECC_ENGINE_TYPE_INVALID;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun static const char * const nand_ecc_placement[] = {
356*4882a593Smuzhiyun 	[NAND_ECC_PLACEMENT_OOB] = "oob",
357*4882a593Smuzhiyun 	[NAND_ECC_PLACEMENT_INTERLEAVED] = "interleaved",
358*4882a593Smuzhiyun };
359*4882a593Smuzhiyun 
of_get_nand_ecc_placement(struct device_node * np)360*4882a593Smuzhiyun static enum nand_ecc_placement of_get_nand_ecc_placement(struct device_node *np)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	enum nand_ecc_placement placement;
363*4882a593Smuzhiyun 	const char *pm;
364*4882a593Smuzhiyun 	int err;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	err = of_property_read_string(np, "nand-ecc-placement", &pm);
367*4882a593Smuzhiyun 	if (!err) {
368*4882a593Smuzhiyun 		for (placement = NAND_ECC_PLACEMENT_OOB;
369*4882a593Smuzhiyun 		     placement < ARRAY_SIZE(nand_ecc_placement); placement++) {
370*4882a593Smuzhiyun 			if (!strcasecmp(pm, nand_ecc_placement[placement]))
371*4882a593Smuzhiyun 				return placement;
372*4882a593Smuzhiyun 		}
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	return NAND_ECC_PLACEMENT_UNKNOWN;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun static const char * const nand_ecc_algos[] = {
379*4882a593Smuzhiyun 	[NAND_ECC_ALGO_HAMMING] = "hamming",
380*4882a593Smuzhiyun 	[NAND_ECC_ALGO_BCH] = "bch",
381*4882a593Smuzhiyun 	[NAND_ECC_ALGO_RS] = "rs",
382*4882a593Smuzhiyun };
383*4882a593Smuzhiyun 
of_get_nand_ecc_algo(struct device_node * np)384*4882a593Smuzhiyun static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun 	enum nand_ecc_algo ecc_algo;
387*4882a593Smuzhiyun 	const char *pm;
388*4882a593Smuzhiyun 	int err;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	err = of_property_read_string(np, "nand-ecc-algo", &pm);
391*4882a593Smuzhiyun 	if (!err) {
392*4882a593Smuzhiyun 		for (ecc_algo = NAND_ECC_ALGO_HAMMING;
393*4882a593Smuzhiyun 		     ecc_algo < ARRAY_SIZE(nand_ecc_algos);
394*4882a593Smuzhiyun 		     ecc_algo++) {
395*4882a593Smuzhiyun 			if (!strcasecmp(pm, nand_ecc_algos[ecc_algo]))
396*4882a593Smuzhiyun 				return ecc_algo;
397*4882a593Smuzhiyun 		}
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	return NAND_ECC_ALGO_UNKNOWN;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
of_get_nand_ecc_step_size(struct device_node * np)403*4882a593Smuzhiyun static int of_get_nand_ecc_step_size(struct device_node *np)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun 	int ret;
406*4882a593Smuzhiyun 	u32 val;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
409*4882a593Smuzhiyun 	return ret ? ret : val;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
of_get_nand_ecc_strength(struct device_node * np)412*4882a593Smuzhiyun static int of_get_nand_ecc_strength(struct device_node *np)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	int ret;
415*4882a593Smuzhiyun 	u32 val;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	ret = of_property_read_u32(np, "nand-ecc-strength", &val);
418*4882a593Smuzhiyun 	return ret ? ret : val;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
of_get_nand_ecc_user_config(struct nand_device * nand)421*4882a593Smuzhiyun void of_get_nand_ecc_user_config(struct nand_device *nand)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	struct device_node *dn = nanddev_get_of_node(nand);
424*4882a593Smuzhiyun 	int strength, size;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	nand->ecc.user_conf.engine_type = of_get_nand_ecc_engine_type(dn);
427*4882a593Smuzhiyun 	nand->ecc.user_conf.algo = of_get_nand_ecc_algo(dn);
428*4882a593Smuzhiyun 	nand->ecc.user_conf.placement = of_get_nand_ecc_placement(dn);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	strength = of_get_nand_ecc_strength(dn);
431*4882a593Smuzhiyun 	if (strength >= 0)
432*4882a593Smuzhiyun 		nand->ecc.user_conf.strength = strength;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	size = of_get_nand_ecc_step_size(dn);
435*4882a593Smuzhiyun 	if (size >= 0)
436*4882a593Smuzhiyun 		nand->ecc.user_conf.step_size = size;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	if (of_property_read_bool(dn, "nand-ecc-maximize"))
439*4882a593Smuzhiyun 		nand->ecc.user_conf.flags |= NAND_ECC_MAXIMIZE_STRENGTH;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun EXPORT_SYMBOL(of_get_nand_ecc_user_config);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun /**
444*4882a593Smuzhiyun  * nand_ecc_is_strong_enough - Check if the chip configuration meets the
445*4882a593Smuzhiyun  *                             datasheet requirements.
446*4882a593Smuzhiyun  *
447*4882a593Smuzhiyun  * @nand: Device to check
448*4882a593Smuzhiyun  *
449*4882a593Smuzhiyun  * If our configuration corrects A bits per B bytes and the minimum
450*4882a593Smuzhiyun  * required correction level is X bits per Y bytes, then we must ensure
451*4882a593Smuzhiyun  * both of the following are true:
452*4882a593Smuzhiyun  *
453*4882a593Smuzhiyun  * (1) A / B >= X / Y
454*4882a593Smuzhiyun  * (2) A >= X
455*4882a593Smuzhiyun  *
456*4882a593Smuzhiyun  * Requirement (1) ensures we can correct for the required bitflip density.
457*4882a593Smuzhiyun  * Requirement (2) ensures we can correct even when all bitflips are clumped
458*4882a593Smuzhiyun  * in the same sector.
459*4882a593Smuzhiyun  */
nand_ecc_is_strong_enough(struct nand_device * nand)460*4882a593Smuzhiyun bool nand_ecc_is_strong_enough(struct nand_device *nand)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	const struct nand_ecc_props *reqs = nanddev_get_ecc_requirements(nand);
463*4882a593Smuzhiyun 	const struct nand_ecc_props *conf = nanddev_get_ecc_conf(nand);
464*4882a593Smuzhiyun 	struct mtd_info *mtd = nanddev_to_mtd(nand);
465*4882a593Smuzhiyun 	int corr, ds_corr;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	if (conf->step_size == 0 || reqs->step_size == 0)
468*4882a593Smuzhiyun 		/* Not enough information */
469*4882a593Smuzhiyun 		return true;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	/*
472*4882a593Smuzhiyun 	 * We get the number of corrected bits per page to compare
473*4882a593Smuzhiyun 	 * the correction density.
474*4882a593Smuzhiyun 	 */
475*4882a593Smuzhiyun 	corr = (mtd->writesize * conf->strength) / conf->step_size;
476*4882a593Smuzhiyun 	ds_corr = (mtd->writesize * reqs->strength) / reqs->step_size;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	return corr >= ds_corr && conf->strength >= reqs->strength;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun EXPORT_SYMBOL(nand_ecc_is_strong_enough);
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun MODULE_LICENSE("GPL");
483*4882a593Smuzhiyun MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
484*4882a593Smuzhiyun MODULE_DESCRIPTION("Generic ECC engine");
485