xref: /OK3568_Linux_fs/kernel/include/linux/mtd/nand.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  Copyright 2017 - Free Electrons
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Authors:
6*4882a593Smuzhiyun  *	Boris Brezillon <boris.brezillon@free-electrons.com>
7*4882a593Smuzhiyun  *	Peter Pan <peterpandong@micron.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifndef __LINUX_MTD_NAND_H
11*4882a593Smuzhiyun #define __LINUX_MTD_NAND_H
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun struct nand_device;
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /**
18*4882a593Smuzhiyun  * struct nand_memory_organization - Memory organization structure
19*4882a593Smuzhiyun  * @bits_per_cell: number of bits per NAND cell
20*4882a593Smuzhiyun  * @pagesize: page size
21*4882a593Smuzhiyun  * @oobsize: OOB area size
22*4882a593Smuzhiyun  * @pages_per_eraseblock: number of pages per eraseblock
23*4882a593Smuzhiyun  * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
24*4882a593Smuzhiyun  * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN
25*4882a593Smuzhiyun  * @planes_per_lun: number of planes per LUN
26*4882a593Smuzhiyun  * @luns_per_target: number of LUN per target (target is a synonym for die)
27*4882a593Smuzhiyun  * @ntargets: total number of targets exposed by the NAND device
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun struct nand_memory_organization {
30*4882a593Smuzhiyun 	unsigned int bits_per_cell;
31*4882a593Smuzhiyun 	unsigned int pagesize;
32*4882a593Smuzhiyun 	unsigned int oobsize;
33*4882a593Smuzhiyun 	unsigned int pages_per_eraseblock;
34*4882a593Smuzhiyun 	unsigned int eraseblocks_per_lun;
35*4882a593Smuzhiyun 	unsigned int max_bad_eraseblocks_per_lun;
36*4882a593Smuzhiyun 	unsigned int planes_per_lun;
37*4882a593Smuzhiyun 	unsigned int luns_per_target;
38*4882a593Smuzhiyun 	unsigned int ntargets;
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun #define NAND_MEMORG(bpc, ps, os, ppe, epl, mbb, ppl, lpt, nt)	\
42*4882a593Smuzhiyun 	{							\
43*4882a593Smuzhiyun 		.bits_per_cell = (bpc),				\
44*4882a593Smuzhiyun 		.pagesize = (ps),				\
45*4882a593Smuzhiyun 		.oobsize = (os),				\
46*4882a593Smuzhiyun 		.pages_per_eraseblock = (ppe),			\
47*4882a593Smuzhiyun 		.eraseblocks_per_lun = (epl),			\
48*4882a593Smuzhiyun 		.max_bad_eraseblocks_per_lun = (mbb),		\
49*4882a593Smuzhiyun 		.planes_per_lun = (ppl),			\
50*4882a593Smuzhiyun 		.luns_per_target = (lpt),			\
51*4882a593Smuzhiyun 		.ntargets = (nt),				\
52*4882a593Smuzhiyun 	}
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /**
55*4882a593Smuzhiyun  * struct nand_row_converter - Information needed to convert an absolute offset
56*4882a593Smuzhiyun  *			       into a row address
57*4882a593Smuzhiyun  * @lun_addr_shift: position of the LUN identifier in the row address
58*4882a593Smuzhiyun  * @eraseblock_addr_shift: position of the eraseblock identifier in the row
59*4882a593Smuzhiyun  *			   address
60*4882a593Smuzhiyun  */
61*4882a593Smuzhiyun struct nand_row_converter {
62*4882a593Smuzhiyun 	unsigned int lun_addr_shift;
63*4882a593Smuzhiyun 	unsigned int eraseblock_addr_shift;
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /**
67*4882a593Smuzhiyun  * struct nand_pos - NAND position object
68*4882a593Smuzhiyun  * @target: the NAND target/die
69*4882a593Smuzhiyun  * @lun: the LUN identifier
70*4882a593Smuzhiyun  * @plane: the plane within the LUN
71*4882a593Smuzhiyun  * @eraseblock: the eraseblock within the LUN
72*4882a593Smuzhiyun  * @page: the page within the LUN
73*4882a593Smuzhiyun  *
74*4882a593Smuzhiyun  * These information are usually used by specific sub-layers to select the
75*4882a593Smuzhiyun  * appropriate target/die and generate a row address to pass to the device.
76*4882a593Smuzhiyun  */
77*4882a593Smuzhiyun struct nand_pos {
78*4882a593Smuzhiyun 	unsigned int target;
79*4882a593Smuzhiyun 	unsigned int lun;
80*4882a593Smuzhiyun 	unsigned int plane;
81*4882a593Smuzhiyun 	unsigned int eraseblock;
82*4882a593Smuzhiyun 	unsigned int page;
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun /**
86*4882a593Smuzhiyun  * enum nand_page_io_req_type - Direction of an I/O request
87*4882a593Smuzhiyun  * @NAND_PAGE_READ: from the chip, to the controller
88*4882a593Smuzhiyun  * @NAND_PAGE_WRITE: from the controller, to the chip
89*4882a593Smuzhiyun  */
90*4882a593Smuzhiyun enum nand_page_io_req_type {
91*4882a593Smuzhiyun 	NAND_PAGE_READ = 0,
92*4882a593Smuzhiyun 	NAND_PAGE_WRITE,
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /**
96*4882a593Smuzhiyun  * struct nand_page_io_req - NAND I/O request object
97*4882a593Smuzhiyun  * @type: the type of page I/O: read or write
98*4882a593Smuzhiyun  * @pos: the position this I/O request is targeting
99*4882a593Smuzhiyun  * @dataoffs: the offset within the page
100*4882a593Smuzhiyun  * @datalen: number of data bytes to read from/write to this page
101*4882a593Smuzhiyun  * @databuf: buffer to store data in or get data from
102*4882a593Smuzhiyun  * @ooboffs: the OOB offset within the page
103*4882a593Smuzhiyun  * @ooblen: the number of OOB bytes to read from/write to this page
104*4882a593Smuzhiyun  * @oobbuf: buffer to store OOB data in or get OOB data from
105*4882a593Smuzhiyun  * @mode: one of the %MTD_OPS_XXX mode
106*4882a593Smuzhiyun  *
107*4882a593Smuzhiyun  * This object is used to pass per-page I/O requests to NAND sub-layers. This
108*4882a593Smuzhiyun  * way all useful information are already formatted in a useful way and
109*4882a593Smuzhiyun  * specific NAND layers can focus on translating these information into
110*4882a593Smuzhiyun  * specific commands/operations.
111*4882a593Smuzhiyun  */
112*4882a593Smuzhiyun struct nand_page_io_req {
113*4882a593Smuzhiyun 	enum nand_page_io_req_type type;
114*4882a593Smuzhiyun 	struct nand_pos pos;
115*4882a593Smuzhiyun 	unsigned int dataoffs;
116*4882a593Smuzhiyun 	unsigned int datalen;
117*4882a593Smuzhiyun 	union {
118*4882a593Smuzhiyun 		const void *out;
119*4882a593Smuzhiyun 		void *in;
120*4882a593Smuzhiyun 	} databuf;
121*4882a593Smuzhiyun 	unsigned int ooboffs;
122*4882a593Smuzhiyun 	unsigned int ooblen;
123*4882a593Smuzhiyun 	union {
124*4882a593Smuzhiyun 		const void *out;
125*4882a593Smuzhiyun 		void *in;
126*4882a593Smuzhiyun 	} oobbuf;
127*4882a593Smuzhiyun 	int mode;
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void);
131*4882a593Smuzhiyun const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void);
132*4882a593Smuzhiyun const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun /**
135*4882a593Smuzhiyun  * enum nand_ecc_engine_type - NAND ECC engine type
136*4882a593Smuzhiyun  * @NAND_ECC_ENGINE_TYPE_INVALID: Invalid value
137*4882a593Smuzhiyun  * @NAND_ECC_ENGINE_TYPE_NONE: No ECC correction
138*4882a593Smuzhiyun  * @NAND_ECC_ENGINE_TYPE_SOFT: Software ECC correction
139*4882a593Smuzhiyun  * @NAND_ECC_ENGINE_TYPE_ON_HOST: On host hardware ECC correction
140*4882a593Smuzhiyun  * @NAND_ECC_ENGINE_TYPE_ON_DIE: On chip hardware ECC correction
141*4882a593Smuzhiyun  */
142*4882a593Smuzhiyun enum nand_ecc_engine_type {
143*4882a593Smuzhiyun 	NAND_ECC_ENGINE_TYPE_INVALID,
144*4882a593Smuzhiyun 	NAND_ECC_ENGINE_TYPE_NONE,
145*4882a593Smuzhiyun 	NAND_ECC_ENGINE_TYPE_SOFT,
146*4882a593Smuzhiyun 	NAND_ECC_ENGINE_TYPE_ON_HOST,
147*4882a593Smuzhiyun 	NAND_ECC_ENGINE_TYPE_ON_DIE,
148*4882a593Smuzhiyun };
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun /**
151*4882a593Smuzhiyun  * enum nand_ecc_placement - NAND ECC bytes placement
152*4882a593Smuzhiyun  * @NAND_ECC_PLACEMENT_UNKNOWN: The actual position of the ECC bytes is unknown
153*4882a593Smuzhiyun  * @NAND_ECC_PLACEMENT_OOB: The ECC bytes are located in the OOB area
154*4882a593Smuzhiyun  * @NAND_ECC_PLACEMENT_INTERLEAVED: Syndrome layout, there are ECC bytes
155*4882a593Smuzhiyun  *                                  interleaved with regular data in the main
156*4882a593Smuzhiyun  *                                  area
157*4882a593Smuzhiyun  */
158*4882a593Smuzhiyun enum nand_ecc_placement {
159*4882a593Smuzhiyun 	NAND_ECC_PLACEMENT_UNKNOWN,
160*4882a593Smuzhiyun 	NAND_ECC_PLACEMENT_OOB,
161*4882a593Smuzhiyun 	NAND_ECC_PLACEMENT_INTERLEAVED,
162*4882a593Smuzhiyun };
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /**
165*4882a593Smuzhiyun  * enum nand_ecc_algo - NAND ECC algorithm
166*4882a593Smuzhiyun  * @NAND_ECC_ALGO_UNKNOWN: Unknown algorithm
167*4882a593Smuzhiyun  * @NAND_ECC_ALGO_HAMMING: Hamming algorithm
168*4882a593Smuzhiyun  * @NAND_ECC_ALGO_BCH: Bose-Chaudhuri-Hocquenghem algorithm
169*4882a593Smuzhiyun  * @NAND_ECC_ALGO_RS: Reed-Solomon algorithm
170*4882a593Smuzhiyun  */
171*4882a593Smuzhiyun enum nand_ecc_algo {
172*4882a593Smuzhiyun 	NAND_ECC_ALGO_UNKNOWN,
173*4882a593Smuzhiyun 	NAND_ECC_ALGO_HAMMING,
174*4882a593Smuzhiyun 	NAND_ECC_ALGO_BCH,
175*4882a593Smuzhiyun 	NAND_ECC_ALGO_RS,
176*4882a593Smuzhiyun };
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun /**
179*4882a593Smuzhiyun  * struct nand_ecc_props - NAND ECC properties
180*4882a593Smuzhiyun  * @engine_type: ECC engine type
181*4882a593Smuzhiyun  * @placement: OOB placement (if relevant)
182*4882a593Smuzhiyun  * @algo: ECC algorithm (if relevant)
183*4882a593Smuzhiyun  * @strength: ECC strength
184*4882a593Smuzhiyun  * @step_size: Number of bytes per step
185*4882a593Smuzhiyun  * @flags: Misc properties
186*4882a593Smuzhiyun  */
187*4882a593Smuzhiyun struct nand_ecc_props {
188*4882a593Smuzhiyun 	enum nand_ecc_engine_type engine_type;
189*4882a593Smuzhiyun 	enum nand_ecc_placement placement;
190*4882a593Smuzhiyun 	enum nand_ecc_algo algo;
191*4882a593Smuzhiyun 	unsigned int strength;
192*4882a593Smuzhiyun 	unsigned int step_size;
193*4882a593Smuzhiyun 	unsigned int flags;
194*4882a593Smuzhiyun };
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun #define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun /* NAND ECC misc flags */
199*4882a593Smuzhiyun #define NAND_ECC_MAXIMIZE_STRENGTH BIT(0)
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun /* nand_bbt option */
202*4882a593Smuzhiyun #define NANDDEV_BBT_SCANNED		BIT(0)
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun /* The maximum number of blocks to scan for a bbt */
205*4882a593Smuzhiyun #define NANDDEV_BBT_SCAN_MAXBLOCKS	4
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun /**
208*4882a593Smuzhiyun  * struct nand_bbt - bad block table object
209*4882a593Smuzhiyun  * @cache: in memory BBT cache
210*4882a593Smuzhiyun  * @option: the option of BBT
211*4882a593Smuzhiyun  * @version: current memory BBT cache version
212*4882a593Smuzhiyun  */
213*4882a593Smuzhiyun struct nand_bbt {
214*4882a593Smuzhiyun 	unsigned long *cache;
215*4882a593Smuzhiyun #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH
216*4882a593Smuzhiyun 	unsigned int option;
217*4882a593Smuzhiyun 	unsigned int version;
218*4882a593Smuzhiyun #endif
219*4882a593Smuzhiyun };
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun /**
222*4882a593Smuzhiyun  * struct nand_ops - NAND operations
223*4882a593Smuzhiyun  * @erase: erase a specific block. No need to check if the block is bad before
224*4882a593Smuzhiyun  *	   erasing, this has been taken care of by the generic NAND layer
225*4882a593Smuzhiyun  * @markbad: mark a specific block bad. No need to check if the block is
226*4882a593Smuzhiyun  *	     already marked bad, this has been taken care of by the generic
227*4882a593Smuzhiyun  *	     NAND layer. This method should just write the BBM (Bad Block
228*4882a593Smuzhiyun  *	     Marker) so that future call to struct_nand_ops->isbad() return
229*4882a593Smuzhiyun  *	     true
230*4882a593Smuzhiyun  * @isbad: check whether a block is bad or not. This method should just read
231*4882a593Smuzhiyun  *	   the BBM and return whether the block is bad or not based on what it
232*4882a593Smuzhiyun  *	   reads
233*4882a593Smuzhiyun  *
234*4882a593Smuzhiyun  * These are all low level operations that should be implemented by specialized
235*4882a593Smuzhiyun  * NAND layers (SPI NAND, raw NAND, ...).
236*4882a593Smuzhiyun  */
237*4882a593Smuzhiyun struct nand_ops {
238*4882a593Smuzhiyun 	int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
239*4882a593Smuzhiyun 	int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
240*4882a593Smuzhiyun 	bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
241*4882a593Smuzhiyun };
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun /**
244*4882a593Smuzhiyun  * struct nand_ecc_context - Context for the ECC engine
245*4882a593Smuzhiyun  * @conf: basic ECC engine parameters
246*4882a593Smuzhiyun  * @total: total number of bytes used for storing ECC codes, this is used by
247*4882a593Smuzhiyun  *         generic OOB layouts
248*4882a593Smuzhiyun  * @priv: ECC engine driver private data
249*4882a593Smuzhiyun  */
250*4882a593Smuzhiyun struct nand_ecc_context {
251*4882a593Smuzhiyun 	struct nand_ecc_props conf;
252*4882a593Smuzhiyun 	unsigned int total;
253*4882a593Smuzhiyun 	void *priv;
254*4882a593Smuzhiyun };
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun /**
257*4882a593Smuzhiyun  * struct nand_ecc_engine_ops - ECC engine operations
258*4882a593Smuzhiyun  * @init_ctx: given a desired user configuration for the pointed NAND device,
259*4882a593Smuzhiyun  *            requests the ECC engine driver to setup a configuration with
260*4882a593Smuzhiyun  *            values it supports.
261*4882a593Smuzhiyun  * @cleanup_ctx: clean the context initialized by @init_ctx.
262*4882a593Smuzhiyun  * @prepare_io_req: is called before reading/writing a page to prepare the I/O
263*4882a593Smuzhiyun  *                  request to be performed with ECC correction.
264*4882a593Smuzhiyun  * @finish_io_req: is called after reading/writing a page to terminate the I/O
265*4882a593Smuzhiyun  *                 request and ensure proper ECC correction.
266*4882a593Smuzhiyun  */
267*4882a593Smuzhiyun struct nand_ecc_engine_ops {
268*4882a593Smuzhiyun 	int (*init_ctx)(struct nand_device *nand);
269*4882a593Smuzhiyun 	void (*cleanup_ctx)(struct nand_device *nand);
270*4882a593Smuzhiyun 	int (*prepare_io_req)(struct nand_device *nand,
271*4882a593Smuzhiyun 			      struct nand_page_io_req *req);
272*4882a593Smuzhiyun 	int (*finish_io_req)(struct nand_device *nand,
273*4882a593Smuzhiyun 			     struct nand_page_io_req *req);
274*4882a593Smuzhiyun };
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun /**
277*4882a593Smuzhiyun  * struct nand_ecc_engine - ECC engine abstraction for NAND devices
278*4882a593Smuzhiyun  * @ops: ECC engine operations
279*4882a593Smuzhiyun  */
280*4882a593Smuzhiyun struct nand_ecc_engine {
281*4882a593Smuzhiyun 	struct nand_ecc_engine_ops *ops;
282*4882a593Smuzhiyun };
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun void of_get_nand_ecc_user_config(struct nand_device *nand);
285*4882a593Smuzhiyun int nand_ecc_init_ctx(struct nand_device *nand);
286*4882a593Smuzhiyun void nand_ecc_cleanup_ctx(struct nand_device *nand);
287*4882a593Smuzhiyun int nand_ecc_prepare_io_req(struct nand_device *nand,
288*4882a593Smuzhiyun 			    struct nand_page_io_req *req);
289*4882a593Smuzhiyun int nand_ecc_finish_io_req(struct nand_device *nand,
290*4882a593Smuzhiyun 			   struct nand_page_io_req *req);
291*4882a593Smuzhiyun bool nand_ecc_is_strong_enough(struct nand_device *nand);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun /**
294*4882a593Smuzhiyun  * struct nand_ecc - Information relative to the ECC
295*4882a593Smuzhiyun  * @defaults: Default values, depend on the underlying subsystem
296*4882a593Smuzhiyun  * @requirements: ECC requirements from the NAND chip perspective
297*4882a593Smuzhiyun  * @user_conf: User desires in terms of ECC parameters
298*4882a593Smuzhiyun  * @ctx: ECC context for the ECC engine, derived from the device @requirements
299*4882a593Smuzhiyun  *       the @user_conf and the @defaults
300*4882a593Smuzhiyun  * @ondie_engine: On-die ECC engine reference, if any
301*4882a593Smuzhiyun  * @engine: ECC engine actually bound
302*4882a593Smuzhiyun  */
303*4882a593Smuzhiyun struct nand_ecc {
304*4882a593Smuzhiyun 	struct nand_ecc_props defaults;
305*4882a593Smuzhiyun 	struct nand_ecc_props requirements;
306*4882a593Smuzhiyun 	struct nand_ecc_props user_conf;
307*4882a593Smuzhiyun 	struct nand_ecc_context ctx;
308*4882a593Smuzhiyun 	struct nand_ecc_engine *ondie_engine;
309*4882a593Smuzhiyun 	struct nand_ecc_engine *engine;
310*4882a593Smuzhiyun };
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun /**
313*4882a593Smuzhiyun  * struct nand_device - NAND device
314*4882a593Smuzhiyun  * @mtd: MTD instance attached to the NAND device
315*4882a593Smuzhiyun  * @memorg: memory layout
316*4882a593Smuzhiyun  * @ecc: NAND ECC object attached to the NAND device
317*4882a593Smuzhiyun  * @rowconv: position to row address converter
318*4882a593Smuzhiyun  * @bbt: bad block table info
319*4882a593Smuzhiyun  * @ops: NAND operations attached to the NAND device
320*4882a593Smuzhiyun  *
321*4882a593Smuzhiyun  * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
322*4882a593Smuzhiyun  * should declare their own NAND object embedding a nand_device struct (that's
323*4882a593Smuzhiyun  * how inheritance is done).
324*4882a593Smuzhiyun  * struct_nand_device->memorg and struct_nand_device->ecc.requirements should
325*4882a593Smuzhiyun  * be filled at device detection time to reflect the NAND device
326*4882a593Smuzhiyun  * capabilities/requirements. Once this is done nanddev_init() can be called.
327*4882a593Smuzhiyun  * It will take care of converting NAND information into MTD ones, which means
328*4882a593Smuzhiyun  * the specialized NAND layers should never manually tweak
329*4882a593Smuzhiyun  * struct_nand_device->mtd except for the ->_read/write() hooks.
330*4882a593Smuzhiyun  */
331*4882a593Smuzhiyun struct nand_device {
332*4882a593Smuzhiyun 	struct mtd_info mtd;
333*4882a593Smuzhiyun 	struct nand_memory_organization memorg;
334*4882a593Smuzhiyun 	struct nand_ecc ecc;
335*4882a593Smuzhiyun 	struct nand_row_converter rowconv;
336*4882a593Smuzhiyun 	struct nand_bbt bbt;
337*4882a593Smuzhiyun 	const struct nand_ops *ops;
338*4882a593Smuzhiyun };
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun /**
341*4882a593Smuzhiyun  * struct nand_io_iter - NAND I/O iterator
342*4882a593Smuzhiyun  * @req: current I/O request
343*4882a593Smuzhiyun  * @oobbytes_per_page: maximum number of OOB bytes per page
344*4882a593Smuzhiyun  * @dataleft: remaining number of data bytes to read/write
345*4882a593Smuzhiyun  * @oobleft: remaining number of OOB bytes to read/write
346*4882a593Smuzhiyun  *
347*4882a593Smuzhiyun  * Can be used by specialized NAND layers to iterate over all pages covered
348*4882a593Smuzhiyun  * by an MTD I/O request, which should greatly simplifies the boiler-plate
349*4882a593Smuzhiyun  * code needed to read/write data from/to a NAND device.
350*4882a593Smuzhiyun  */
351*4882a593Smuzhiyun struct nand_io_iter {
352*4882a593Smuzhiyun 	struct nand_page_io_req req;
353*4882a593Smuzhiyun 	unsigned int oobbytes_per_page;
354*4882a593Smuzhiyun 	unsigned int dataleft;
355*4882a593Smuzhiyun 	unsigned int oobleft;
356*4882a593Smuzhiyun };
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun /**
359*4882a593Smuzhiyun  * mtd_to_nanddev() - Get the NAND device attached to the MTD instance
360*4882a593Smuzhiyun  * @mtd: MTD instance
361*4882a593Smuzhiyun  *
362*4882a593Smuzhiyun  * Return: the NAND device embedding @mtd.
363*4882a593Smuzhiyun  */
mtd_to_nanddev(struct mtd_info * mtd)364*4882a593Smuzhiyun static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun 	return container_of(mtd, struct nand_device, mtd);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun /**
370*4882a593Smuzhiyun  * nanddev_to_mtd() - Get the MTD device attached to a NAND device
371*4882a593Smuzhiyun  * @nand: NAND device
372*4882a593Smuzhiyun  *
373*4882a593Smuzhiyun  * Return: the MTD device embedded in @nand.
374*4882a593Smuzhiyun  */
nanddev_to_mtd(struct nand_device * nand)375*4882a593Smuzhiyun static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	return &nand->mtd;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun /*
381*4882a593Smuzhiyun  * nanddev_bits_per_cell() - Get the number of bits per cell
382*4882a593Smuzhiyun  * @nand: NAND device
383*4882a593Smuzhiyun  *
384*4882a593Smuzhiyun  * Return: the number of bits per cell.
385*4882a593Smuzhiyun  */
nanddev_bits_per_cell(const struct nand_device * nand)386*4882a593Smuzhiyun static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	return nand->memorg.bits_per_cell;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun /**
392*4882a593Smuzhiyun  * nanddev_page_size() - Get NAND page size
393*4882a593Smuzhiyun  * @nand: NAND device
394*4882a593Smuzhiyun  *
395*4882a593Smuzhiyun  * Return: the page size.
396*4882a593Smuzhiyun  */
nanddev_page_size(const struct nand_device * nand)397*4882a593Smuzhiyun static inline size_t nanddev_page_size(const struct nand_device *nand)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	return nand->memorg.pagesize;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun /**
403*4882a593Smuzhiyun  * nanddev_per_page_oobsize() - Get NAND OOB size
404*4882a593Smuzhiyun  * @nand: NAND device
405*4882a593Smuzhiyun  *
406*4882a593Smuzhiyun  * Return: the OOB size.
407*4882a593Smuzhiyun  */
408*4882a593Smuzhiyun static inline unsigned int
nanddev_per_page_oobsize(const struct nand_device * nand)409*4882a593Smuzhiyun nanddev_per_page_oobsize(const struct nand_device *nand)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	return nand->memorg.oobsize;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun /**
415*4882a593Smuzhiyun  * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
416*4882a593Smuzhiyun  * @nand: NAND device
417*4882a593Smuzhiyun  *
418*4882a593Smuzhiyun  * Return: the number of pages per eraseblock.
419*4882a593Smuzhiyun  */
420*4882a593Smuzhiyun static inline unsigned int
nanddev_pages_per_eraseblock(const struct nand_device * nand)421*4882a593Smuzhiyun nanddev_pages_per_eraseblock(const struct nand_device *nand)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	return nand->memorg.pages_per_eraseblock;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun /**
427*4882a593Smuzhiyun  * nanddev_pages_per_target() - Get the number of pages per target
428*4882a593Smuzhiyun  * @nand: NAND device
429*4882a593Smuzhiyun  *
430*4882a593Smuzhiyun  * Return: the number of pages per target.
431*4882a593Smuzhiyun  */
432*4882a593Smuzhiyun static inline unsigned int
nanddev_pages_per_target(const struct nand_device * nand)433*4882a593Smuzhiyun nanddev_pages_per_target(const struct nand_device *nand)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun 	return nand->memorg.pages_per_eraseblock *
436*4882a593Smuzhiyun 	       nand->memorg.eraseblocks_per_lun *
437*4882a593Smuzhiyun 	       nand->memorg.luns_per_target;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun /**
441*4882a593Smuzhiyun  * nanddev_per_page_oobsize() - Get NAND erase block size
442*4882a593Smuzhiyun  * @nand: NAND device
443*4882a593Smuzhiyun  *
444*4882a593Smuzhiyun  * Return: the eraseblock size.
445*4882a593Smuzhiyun  */
nanddev_eraseblock_size(const struct nand_device * nand)446*4882a593Smuzhiyun static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun 	return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun /**
452*4882a593Smuzhiyun  * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
453*4882a593Smuzhiyun  * @nand: NAND device
454*4882a593Smuzhiyun  *
455*4882a593Smuzhiyun  * Return: the number of eraseblocks per LUN.
456*4882a593Smuzhiyun  */
457*4882a593Smuzhiyun static inline unsigned int
nanddev_eraseblocks_per_lun(const struct nand_device * nand)458*4882a593Smuzhiyun nanddev_eraseblocks_per_lun(const struct nand_device *nand)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun 	return nand->memorg.eraseblocks_per_lun;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun /**
464*4882a593Smuzhiyun  * nanddev_eraseblocks_per_target() - Get the number of eraseblocks per target
465*4882a593Smuzhiyun  * @nand: NAND device
466*4882a593Smuzhiyun  *
467*4882a593Smuzhiyun  * Return: the number of eraseblocks per target.
468*4882a593Smuzhiyun  */
469*4882a593Smuzhiyun static inline unsigned int
nanddev_eraseblocks_per_target(const struct nand_device * nand)470*4882a593Smuzhiyun nanddev_eraseblocks_per_target(const struct nand_device *nand)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun /**
476*4882a593Smuzhiyun  * nanddev_target_size() - Get the total size provided by a single target/die
477*4882a593Smuzhiyun  * @nand: NAND device
478*4882a593Smuzhiyun  *
479*4882a593Smuzhiyun  * Return: the total size exposed by a single target/die in bytes.
480*4882a593Smuzhiyun  */
nanddev_target_size(const struct nand_device * nand)481*4882a593Smuzhiyun static inline u64 nanddev_target_size(const struct nand_device *nand)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun 	return (u64)nand->memorg.luns_per_target *
484*4882a593Smuzhiyun 	       nand->memorg.eraseblocks_per_lun *
485*4882a593Smuzhiyun 	       nand->memorg.pages_per_eraseblock *
486*4882a593Smuzhiyun 	       nand->memorg.pagesize;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun /**
490*4882a593Smuzhiyun  * nanddev_ntarget() - Get the total of targets
491*4882a593Smuzhiyun  * @nand: NAND device
492*4882a593Smuzhiyun  *
493*4882a593Smuzhiyun  * Return: the number of targets/dies exposed by @nand.
494*4882a593Smuzhiyun  */
nanddev_ntargets(const struct nand_device * nand)495*4882a593Smuzhiyun static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun 	return nand->memorg.ntargets;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun /**
501*4882a593Smuzhiyun  * nanddev_neraseblocks() - Get the total number of eraseblocks
502*4882a593Smuzhiyun  * @nand: NAND device
503*4882a593Smuzhiyun  *
504*4882a593Smuzhiyun  * Return: the total number of eraseblocks exposed by @nand.
505*4882a593Smuzhiyun  */
nanddev_neraseblocks(const struct nand_device * nand)506*4882a593Smuzhiyun static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun 	return nand->memorg.ntargets * nand->memorg.luns_per_target *
509*4882a593Smuzhiyun 	       nand->memorg.eraseblocks_per_lun;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun /**
513*4882a593Smuzhiyun  * nanddev_size() - Get NAND size
514*4882a593Smuzhiyun  * @nand: NAND device
515*4882a593Smuzhiyun  *
516*4882a593Smuzhiyun  * Return: the total size (in bytes) exposed by @nand.
517*4882a593Smuzhiyun  */
nanddev_size(const struct nand_device * nand)518*4882a593Smuzhiyun static inline u64 nanddev_size(const struct nand_device *nand)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun 	return nanddev_target_size(nand) * nanddev_ntargets(nand);
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun /**
524*4882a593Smuzhiyun  * nanddev_get_memorg() - Extract memory organization info from a NAND device
525*4882a593Smuzhiyun  * @nand: NAND device
526*4882a593Smuzhiyun  *
527*4882a593Smuzhiyun  * This can be used by the upper layer to fill the memorg info before calling
528*4882a593Smuzhiyun  * nanddev_init().
529*4882a593Smuzhiyun  *
530*4882a593Smuzhiyun  * Return: the memorg object embedded in the NAND device.
531*4882a593Smuzhiyun  */
532*4882a593Smuzhiyun static inline struct nand_memory_organization *
nanddev_get_memorg(struct nand_device * nand)533*4882a593Smuzhiyun nanddev_get_memorg(struct nand_device *nand)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun 	return &nand->memorg;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun /**
539*4882a593Smuzhiyun  * nanddev_get_ecc_conf() - Extract the ECC configuration from a NAND device
540*4882a593Smuzhiyun  * @nand: NAND device
541*4882a593Smuzhiyun  */
542*4882a593Smuzhiyun static inline const struct nand_ecc_props *
nanddev_get_ecc_conf(struct nand_device * nand)543*4882a593Smuzhiyun nanddev_get_ecc_conf(struct nand_device *nand)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun 	return &nand->ecc.ctx.conf;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun /**
549*4882a593Smuzhiyun  * nanddev_get_ecc_requirements() - Extract the ECC requirements from a NAND
550*4882a593Smuzhiyun  *                                  device
551*4882a593Smuzhiyun  * @nand: NAND device
552*4882a593Smuzhiyun  */
553*4882a593Smuzhiyun static inline const struct nand_ecc_props *
nanddev_get_ecc_requirements(struct nand_device * nand)554*4882a593Smuzhiyun nanddev_get_ecc_requirements(struct nand_device *nand)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun 	return &nand->ecc.requirements;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun /**
560*4882a593Smuzhiyun  * nanddev_set_ecc_requirements() - Assign the ECC requirements of a NAND
561*4882a593Smuzhiyun  *                                  device
562*4882a593Smuzhiyun  * @nand: NAND device
563*4882a593Smuzhiyun  * @reqs: Requirements
564*4882a593Smuzhiyun  */
565*4882a593Smuzhiyun static inline void
nanddev_set_ecc_requirements(struct nand_device * nand,const struct nand_ecc_props * reqs)566*4882a593Smuzhiyun nanddev_set_ecc_requirements(struct nand_device *nand,
567*4882a593Smuzhiyun 			     const struct nand_ecc_props *reqs)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun 	nand->ecc.requirements = *reqs;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
573*4882a593Smuzhiyun 		 struct module *owner);
574*4882a593Smuzhiyun void nanddev_cleanup(struct nand_device *nand);
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun /**
577*4882a593Smuzhiyun  * nanddev_register() - Register a NAND device
578*4882a593Smuzhiyun  * @nand: NAND device
579*4882a593Smuzhiyun  *
580*4882a593Smuzhiyun  * Register a NAND device.
581*4882a593Smuzhiyun  * This function is just a wrapper around mtd_device_register()
582*4882a593Smuzhiyun  * registering the MTD device embedded in @nand.
583*4882a593Smuzhiyun  *
584*4882a593Smuzhiyun  * Return: 0 in case of success, a negative error code otherwise.
585*4882a593Smuzhiyun  */
nanddev_register(struct nand_device * nand)586*4882a593Smuzhiyun static inline int nanddev_register(struct nand_device *nand)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun 	return mtd_device_register(&nand->mtd, NULL, 0);
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun /**
592*4882a593Smuzhiyun  * nanddev_unregister() - Unregister a NAND device
593*4882a593Smuzhiyun  * @nand: NAND device
594*4882a593Smuzhiyun  *
595*4882a593Smuzhiyun  * Unregister a NAND device.
596*4882a593Smuzhiyun  * This function is just a wrapper around mtd_device_unregister()
597*4882a593Smuzhiyun  * unregistering the MTD device embedded in @nand.
598*4882a593Smuzhiyun  *
599*4882a593Smuzhiyun  * Return: 0 in case of success, a negative error code otherwise.
600*4882a593Smuzhiyun  */
nanddev_unregister(struct nand_device * nand)601*4882a593Smuzhiyun static inline int nanddev_unregister(struct nand_device *nand)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	return mtd_device_unregister(&nand->mtd);
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun /**
607*4882a593Smuzhiyun  * nanddev_set_of_node() - Attach a DT node to a NAND device
608*4882a593Smuzhiyun  * @nand: NAND device
609*4882a593Smuzhiyun  * @np: DT node
610*4882a593Smuzhiyun  *
611*4882a593Smuzhiyun  * Attach a DT node to a NAND device.
612*4882a593Smuzhiyun  */
nanddev_set_of_node(struct nand_device * nand,struct device_node * np)613*4882a593Smuzhiyun static inline void nanddev_set_of_node(struct nand_device *nand,
614*4882a593Smuzhiyun 				       struct device_node *np)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun 	mtd_set_of_node(&nand->mtd, np);
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun /**
620*4882a593Smuzhiyun  * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device
621*4882a593Smuzhiyun  * @nand: NAND device
622*4882a593Smuzhiyun  *
623*4882a593Smuzhiyun  * Return: the DT node attached to @nand.
624*4882a593Smuzhiyun  */
nanddev_get_of_node(struct nand_device * nand)625*4882a593Smuzhiyun static inline struct device_node *nanddev_get_of_node(struct nand_device *nand)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun 	return mtd_get_of_node(&nand->mtd);
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun /**
631*4882a593Smuzhiyun  * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
632*4882a593Smuzhiyun  * @nand: NAND device
633*4882a593Smuzhiyun  * @offs: absolute NAND offset (usually passed by the MTD layer)
634*4882a593Smuzhiyun  * @pos: a NAND position object to fill in
635*4882a593Smuzhiyun  *
636*4882a593Smuzhiyun  * Converts @offs into a nand_pos representation.
637*4882a593Smuzhiyun  *
638*4882a593Smuzhiyun  * Return: the offset within the NAND page pointed by @pos.
639*4882a593Smuzhiyun  */
nanddev_offs_to_pos(struct nand_device * nand,loff_t offs,struct nand_pos * pos)640*4882a593Smuzhiyun static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
641*4882a593Smuzhiyun 					       loff_t offs,
642*4882a593Smuzhiyun 					       struct nand_pos *pos)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun 	unsigned int pageoffs;
645*4882a593Smuzhiyun 	u64 tmp = offs;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	pageoffs = do_div(tmp, nand->memorg.pagesize);
648*4882a593Smuzhiyun 	pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
649*4882a593Smuzhiyun 	pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
650*4882a593Smuzhiyun 	pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
651*4882a593Smuzhiyun 	pos->lun = do_div(tmp, nand->memorg.luns_per_target);
652*4882a593Smuzhiyun 	pos->target = tmp;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	return pageoffs;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun /**
658*4882a593Smuzhiyun  * nanddev_pos_cmp() - Compare two NAND positions
659*4882a593Smuzhiyun  * @a: First NAND position
660*4882a593Smuzhiyun  * @b: Second NAND position
661*4882a593Smuzhiyun  *
662*4882a593Smuzhiyun  * Compares two NAND positions.
663*4882a593Smuzhiyun  *
664*4882a593Smuzhiyun  * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
665*4882a593Smuzhiyun  */
nanddev_pos_cmp(const struct nand_pos * a,const struct nand_pos * b)666*4882a593Smuzhiyun static inline int nanddev_pos_cmp(const struct nand_pos *a,
667*4882a593Smuzhiyun 				  const struct nand_pos *b)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun 	if (a->target != b->target)
670*4882a593Smuzhiyun 		return a->target < b->target ? -1 : 1;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	if (a->lun != b->lun)
673*4882a593Smuzhiyun 		return a->lun < b->lun ? -1 : 1;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	if (a->eraseblock != b->eraseblock)
676*4882a593Smuzhiyun 		return a->eraseblock < b->eraseblock ? -1 : 1;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	if (a->page != b->page)
679*4882a593Smuzhiyun 		return a->page < b->page ? -1 : 1;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	return 0;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun /**
685*4882a593Smuzhiyun  * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
686*4882a593Smuzhiyun  * @nand: NAND device
687*4882a593Smuzhiyun  * @pos: the NAND position to convert
688*4882a593Smuzhiyun  *
689*4882a593Smuzhiyun  * Converts @pos NAND position into an absolute offset.
690*4882a593Smuzhiyun  *
691*4882a593Smuzhiyun  * Return: the absolute offset. Note that @pos points to the beginning of a
692*4882a593Smuzhiyun  *	   page, if one wants to point to a specific offset within this page
693*4882a593Smuzhiyun  *	   the returned offset has to be adjusted manually.
694*4882a593Smuzhiyun  */
nanddev_pos_to_offs(struct nand_device * nand,const struct nand_pos * pos)695*4882a593Smuzhiyun static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
696*4882a593Smuzhiyun 					 const struct nand_pos *pos)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun 	unsigned int npages;
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	npages = pos->page +
701*4882a593Smuzhiyun 		 ((pos->eraseblock +
702*4882a593Smuzhiyun 		   (pos->lun +
703*4882a593Smuzhiyun 		    (pos->target * nand->memorg.luns_per_target)) *
704*4882a593Smuzhiyun 		   nand->memorg.eraseblocks_per_lun) *
705*4882a593Smuzhiyun 		  nand->memorg.pages_per_eraseblock);
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	return (loff_t)npages * nand->memorg.pagesize;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun /**
711*4882a593Smuzhiyun  * nanddev_pos_to_row() - Extract a row address from a NAND position
712*4882a593Smuzhiyun  * @nand: NAND device
713*4882a593Smuzhiyun  * @pos: the position to convert
714*4882a593Smuzhiyun  *
715*4882a593Smuzhiyun  * Converts a NAND position into a row address that can then be passed to the
716*4882a593Smuzhiyun  * device.
717*4882a593Smuzhiyun  *
718*4882a593Smuzhiyun  * Return: the row address extracted from @pos.
719*4882a593Smuzhiyun  */
nanddev_pos_to_row(struct nand_device * nand,const struct nand_pos * pos)720*4882a593Smuzhiyun static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
721*4882a593Smuzhiyun 					      const struct nand_pos *pos)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun 	return (pos->lun << nand->rowconv.lun_addr_shift) |
724*4882a593Smuzhiyun 	       (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
725*4882a593Smuzhiyun 	       pos->page;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun /**
729*4882a593Smuzhiyun  * nanddev_pos_next_target() - Move a position to the next target/die
730*4882a593Smuzhiyun  * @nand: NAND device
731*4882a593Smuzhiyun  * @pos: the position to update
732*4882a593Smuzhiyun  *
733*4882a593Smuzhiyun  * Updates @pos to point to the start of the next target/die. Useful when you
734*4882a593Smuzhiyun  * want to iterate over all targets/dies of a NAND device.
735*4882a593Smuzhiyun  */
nanddev_pos_next_target(struct nand_device * nand,struct nand_pos * pos)736*4882a593Smuzhiyun static inline void nanddev_pos_next_target(struct nand_device *nand,
737*4882a593Smuzhiyun 					   struct nand_pos *pos)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun 	pos->page = 0;
740*4882a593Smuzhiyun 	pos->plane = 0;
741*4882a593Smuzhiyun 	pos->eraseblock = 0;
742*4882a593Smuzhiyun 	pos->lun = 0;
743*4882a593Smuzhiyun 	pos->target++;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun /**
747*4882a593Smuzhiyun  * nanddev_pos_next_lun() - Move a position to the next LUN
748*4882a593Smuzhiyun  * @nand: NAND device
749*4882a593Smuzhiyun  * @pos: the position to update
750*4882a593Smuzhiyun  *
751*4882a593Smuzhiyun  * Updates @pos to point to the start of the next LUN. Useful when you want to
752*4882a593Smuzhiyun  * iterate over all LUNs of a NAND device.
753*4882a593Smuzhiyun  */
nanddev_pos_next_lun(struct nand_device * nand,struct nand_pos * pos)754*4882a593Smuzhiyun static inline void nanddev_pos_next_lun(struct nand_device *nand,
755*4882a593Smuzhiyun 					struct nand_pos *pos)
756*4882a593Smuzhiyun {
757*4882a593Smuzhiyun 	if (pos->lun >= nand->memorg.luns_per_target - 1)
758*4882a593Smuzhiyun 		return nanddev_pos_next_target(nand, pos);
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	pos->lun++;
761*4882a593Smuzhiyun 	pos->page = 0;
762*4882a593Smuzhiyun 	pos->plane = 0;
763*4882a593Smuzhiyun 	pos->eraseblock = 0;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun /**
767*4882a593Smuzhiyun  * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
768*4882a593Smuzhiyun  * @nand: NAND device
769*4882a593Smuzhiyun  * @pos: the position to update
770*4882a593Smuzhiyun  *
771*4882a593Smuzhiyun  * Updates @pos to point to the start of the next eraseblock. Useful when you
772*4882a593Smuzhiyun  * want to iterate over all eraseblocks of a NAND device.
773*4882a593Smuzhiyun  */
nanddev_pos_next_eraseblock(struct nand_device * nand,struct nand_pos * pos)774*4882a593Smuzhiyun static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
775*4882a593Smuzhiyun 					       struct nand_pos *pos)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun 	if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
778*4882a593Smuzhiyun 		return nanddev_pos_next_lun(nand, pos);
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	pos->eraseblock++;
781*4882a593Smuzhiyun 	pos->page = 0;
782*4882a593Smuzhiyun 	pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun /**
786*4882a593Smuzhiyun  * nanddev_pos_next_page() - Move a position to the next page
787*4882a593Smuzhiyun  * @nand: NAND device
788*4882a593Smuzhiyun  * @pos: the position to update
789*4882a593Smuzhiyun  *
790*4882a593Smuzhiyun  * Updates @pos to point to the start of the next page. Useful when you want to
791*4882a593Smuzhiyun  * iterate over all pages of a NAND device.
792*4882a593Smuzhiyun  */
nanddev_pos_next_page(struct nand_device * nand,struct nand_pos * pos)793*4882a593Smuzhiyun static inline void nanddev_pos_next_page(struct nand_device *nand,
794*4882a593Smuzhiyun 					 struct nand_pos *pos)
795*4882a593Smuzhiyun {
796*4882a593Smuzhiyun 	if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
797*4882a593Smuzhiyun 		return nanddev_pos_next_eraseblock(nand, pos);
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	pos->page++;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun /**
803*4882a593Smuzhiyun  * nand_io_iter_init - Initialize a NAND I/O iterator
804*4882a593Smuzhiyun  * @nand: NAND device
805*4882a593Smuzhiyun  * @offs: absolute offset
806*4882a593Smuzhiyun  * @req: MTD request
807*4882a593Smuzhiyun  * @iter: NAND I/O iterator
808*4882a593Smuzhiyun  *
809*4882a593Smuzhiyun  * Initializes a NAND iterator based on the information passed by the MTD
810*4882a593Smuzhiyun  * layer.
811*4882a593Smuzhiyun  */
nanddev_io_iter_init(struct nand_device * nand,enum nand_page_io_req_type reqtype,loff_t offs,struct mtd_oob_ops * req,struct nand_io_iter * iter)812*4882a593Smuzhiyun static inline void nanddev_io_iter_init(struct nand_device *nand,
813*4882a593Smuzhiyun 					enum nand_page_io_req_type reqtype,
814*4882a593Smuzhiyun 					loff_t offs, struct mtd_oob_ops *req,
815*4882a593Smuzhiyun 					struct nand_io_iter *iter)
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun 	struct mtd_info *mtd = nanddev_to_mtd(nand);
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	iter->req.type = reqtype;
820*4882a593Smuzhiyun 	iter->req.mode = req->mode;
821*4882a593Smuzhiyun 	iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
822*4882a593Smuzhiyun 	iter->req.ooboffs = req->ooboffs;
823*4882a593Smuzhiyun 	iter->oobbytes_per_page = mtd_oobavail(mtd, req);
824*4882a593Smuzhiyun 	iter->dataleft = req->len;
825*4882a593Smuzhiyun 	iter->oobleft = req->ooblen;
826*4882a593Smuzhiyun 	iter->req.databuf.in = req->datbuf;
827*4882a593Smuzhiyun 	iter->req.datalen = min_t(unsigned int,
828*4882a593Smuzhiyun 				  nand->memorg.pagesize - iter->req.dataoffs,
829*4882a593Smuzhiyun 				  iter->dataleft);
830*4882a593Smuzhiyun 	iter->req.oobbuf.in = req->oobbuf;
831*4882a593Smuzhiyun 	iter->req.ooblen = min_t(unsigned int,
832*4882a593Smuzhiyun 				 iter->oobbytes_per_page - iter->req.ooboffs,
833*4882a593Smuzhiyun 				 iter->oobleft);
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun /**
837*4882a593Smuzhiyun  * nand_io_iter_next_page - Move to the next page
838*4882a593Smuzhiyun  * @nand: NAND device
839*4882a593Smuzhiyun  * @iter: NAND I/O iterator
840*4882a593Smuzhiyun  *
841*4882a593Smuzhiyun  * Updates the @iter to point to the next page.
842*4882a593Smuzhiyun  */
nanddev_io_iter_next_page(struct nand_device * nand,struct nand_io_iter * iter)843*4882a593Smuzhiyun static inline void nanddev_io_iter_next_page(struct nand_device *nand,
844*4882a593Smuzhiyun 					     struct nand_io_iter *iter)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun 	nanddev_pos_next_page(nand, &iter->req.pos);
847*4882a593Smuzhiyun 	iter->dataleft -= iter->req.datalen;
848*4882a593Smuzhiyun 	iter->req.databuf.in += iter->req.datalen;
849*4882a593Smuzhiyun 	iter->oobleft -= iter->req.ooblen;
850*4882a593Smuzhiyun 	iter->req.oobbuf.in += iter->req.ooblen;
851*4882a593Smuzhiyun 	iter->req.dataoffs = 0;
852*4882a593Smuzhiyun 	iter->req.ooboffs = 0;
853*4882a593Smuzhiyun 	iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
854*4882a593Smuzhiyun 				  iter->dataleft);
855*4882a593Smuzhiyun 	iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
856*4882a593Smuzhiyun 				 iter->oobleft);
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun /**
860*4882a593Smuzhiyun  * nand_io_iter_end - Should end iteration or not
861*4882a593Smuzhiyun  * @nand: NAND device
862*4882a593Smuzhiyun  * @iter: NAND I/O iterator
863*4882a593Smuzhiyun  *
864*4882a593Smuzhiyun  * Check whether @iter has reached the end of the NAND portion it was asked to
865*4882a593Smuzhiyun  * iterate on or not.
866*4882a593Smuzhiyun  *
867*4882a593Smuzhiyun  * Return: true if @iter has reached the end of the iteration request, false
868*4882a593Smuzhiyun  *	   otherwise.
869*4882a593Smuzhiyun  */
nanddev_io_iter_end(struct nand_device * nand,const struct nand_io_iter * iter)870*4882a593Smuzhiyun static inline bool nanddev_io_iter_end(struct nand_device *nand,
871*4882a593Smuzhiyun 				       const struct nand_io_iter *iter)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun 	if (iter->dataleft || iter->oobleft)
874*4882a593Smuzhiyun 		return false;
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	return true;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun /**
880*4882a593Smuzhiyun  * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
881*4882a593Smuzhiyun  *			   request
882*4882a593Smuzhiyun  * @nand: NAND device
883*4882a593Smuzhiyun  * @start: start address to read/write from
884*4882a593Smuzhiyun  * @req: MTD I/O request
885*4882a593Smuzhiyun  * @iter: NAND I/O iterator
886*4882a593Smuzhiyun  *
887*4882a593Smuzhiyun  * Should be used for iterate over pages that are contained in an MTD request.
888*4882a593Smuzhiyun  */
889*4882a593Smuzhiyun #define nanddev_io_for_each_page(nand, type, start, req, iter)		\
890*4882a593Smuzhiyun 	for (nanddev_io_iter_init(nand, type, start, req, iter);	\
891*4882a593Smuzhiyun 	     !nanddev_io_iter_end(nand, iter);				\
892*4882a593Smuzhiyun 	     nanddev_io_iter_next_page(nand, iter))
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
895*4882a593Smuzhiyun bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
896*4882a593Smuzhiyun int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
897*4882a593Smuzhiyun int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun /* BBT related functions */
900*4882a593Smuzhiyun enum nand_bbt_block_status {
901*4882a593Smuzhiyun 	NAND_BBT_BLOCK_STATUS_UNKNOWN,
902*4882a593Smuzhiyun 	NAND_BBT_BLOCK_GOOD,
903*4882a593Smuzhiyun 	NAND_BBT_BLOCK_WORN,
904*4882a593Smuzhiyun 	NAND_BBT_BLOCK_RESERVED,
905*4882a593Smuzhiyun 	NAND_BBT_BLOCK_FACTORY_BAD,
906*4882a593Smuzhiyun 	NAND_BBT_BLOCK_NUM_STATUS,
907*4882a593Smuzhiyun };
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun int nanddev_bbt_init(struct nand_device *nand);
910*4882a593Smuzhiyun void nanddev_bbt_cleanup(struct nand_device *nand);
911*4882a593Smuzhiyun int nanddev_bbt_update(struct nand_device *nand);
912*4882a593Smuzhiyun int nanddev_bbt_get_block_status(const struct nand_device *nand,
913*4882a593Smuzhiyun 				 unsigned int entry);
914*4882a593Smuzhiyun int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
915*4882a593Smuzhiyun 				 enum nand_bbt_block_status status);
916*4882a593Smuzhiyun int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun /**
919*4882a593Smuzhiyun  * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
920*4882a593Smuzhiyun  * @nand: NAND device
921*4882a593Smuzhiyun  * @pos: the NAND position we want to get BBT entry for
922*4882a593Smuzhiyun  *
923*4882a593Smuzhiyun  * Return the BBT entry used to store information about the eraseblock pointed
924*4882a593Smuzhiyun  * by @pos.
925*4882a593Smuzhiyun  *
926*4882a593Smuzhiyun  * Return: the BBT entry storing information about eraseblock pointed by @pos.
927*4882a593Smuzhiyun  */
nanddev_bbt_pos_to_entry(struct nand_device * nand,const struct nand_pos * pos)928*4882a593Smuzhiyun static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
929*4882a593Smuzhiyun 						    const struct nand_pos *pos)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun 	return pos->eraseblock +
932*4882a593Smuzhiyun 	       ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
933*4882a593Smuzhiyun 		nand->memorg.eraseblocks_per_lun);
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun /**
937*4882a593Smuzhiyun  * nanddev_bbt_is_initialized() - Check if the BBT has been initialized
938*4882a593Smuzhiyun  * @nand: NAND device
939*4882a593Smuzhiyun  *
940*4882a593Smuzhiyun  * Return: true if the BBT has been initialized, false otherwise.
941*4882a593Smuzhiyun  */
nanddev_bbt_is_initialized(struct nand_device * nand)942*4882a593Smuzhiyun static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
943*4882a593Smuzhiyun {
944*4882a593Smuzhiyun 	return !!nand->bbt.cache;
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun /* MTD -> NAND helper functions. */
948*4882a593Smuzhiyun int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
949*4882a593Smuzhiyun int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len);
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun #endif /* __LINUX_MTD_NAND_H */
952