xref: /OK3568_Linux_fs/kernel/include/linux/mtd/mtd.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef __MTD_MTD_H__
7*4882a593Smuzhiyun #define __MTD_MTD_H__
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <linux/uio.h>
11*4882a593Smuzhiyun #include <linux/list.h>
12*4882a593Smuzhiyun #include <linux/notifier.h>
13*4882a593Smuzhiyun #include <linux/device.h>
14*4882a593Smuzhiyun #include <linux/of.h>
15*4882a593Smuzhiyun #include <linux/nvmem-provider.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <mtd/mtd-abi.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <asm/div64.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define MTD_FAIL_ADDR_UNKNOWN -1LL
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun struct mtd_info;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * If the erase fails, fail_addr might indicate exactly which block failed. If
27*4882a593Smuzhiyun  * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
28*4882a593Smuzhiyun  * or was not specific to any particular block.
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun struct erase_info {
31*4882a593Smuzhiyun 	uint64_t addr;
32*4882a593Smuzhiyun 	uint64_t len;
33*4882a593Smuzhiyun 	uint64_t fail_addr;
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun struct mtd_erase_region_info {
37*4882a593Smuzhiyun 	uint64_t offset;		/* At which this region starts, from the beginning of the MTD */
38*4882a593Smuzhiyun 	uint32_t erasesize;		/* For this region */
39*4882a593Smuzhiyun 	uint32_t numblocks;		/* Number of blocks of erasesize in this region */
40*4882a593Smuzhiyun 	unsigned long *lockmap;		/* If keeping bitmap of locks */
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /**
44*4882a593Smuzhiyun  * struct mtd_oob_ops - oob operation operands
45*4882a593Smuzhiyun  * @mode:	operation mode
46*4882a593Smuzhiyun  *
47*4882a593Smuzhiyun  * @len:	number of data bytes to write/read
48*4882a593Smuzhiyun  *
49*4882a593Smuzhiyun  * @retlen:	number of data bytes written/read
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  * @ooblen:	number of oob bytes to write/read
52*4882a593Smuzhiyun  * @oobretlen:	number of oob bytes written/read
53*4882a593Smuzhiyun  * @ooboffs:	offset of oob data in the oob area (only relevant when
54*4882a593Smuzhiyun  *		mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW)
55*4882a593Smuzhiyun  * @datbuf:	data buffer - if NULL only oob data are read/written
56*4882a593Smuzhiyun  * @oobbuf:	oob data buffer
57*4882a593Smuzhiyun  *
58*4882a593Smuzhiyun  * Note, some MTD drivers do not allow you to write more than one OOB area at
59*4882a593Smuzhiyun  * one go. If you try to do that on such an MTD device, -EINVAL will be
60*4882a593Smuzhiyun  * returned. If you want to make your implementation portable on all kind of MTD
61*4882a593Smuzhiyun  * devices you should split the write request into several sub-requests when the
62*4882a593Smuzhiyun  * request crosses a page boundary.
63*4882a593Smuzhiyun  */
64*4882a593Smuzhiyun struct mtd_oob_ops {
65*4882a593Smuzhiyun 	unsigned int	mode;
66*4882a593Smuzhiyun 	size_t		len;
67*4882a593Smuzhiyun 	size_t		retlen;
68*4882a593Smuzhiyun 	size_t		ooblen;
69*4882a593Smuzhiyun 	size_t		oobretlen;
70*4882a593Smuzhiyun 	uint32_t	ooboffs;
71*4882a593Smuzhiyun 	uint8_t		*datbuf;
72*4882a593Smuzhiyun 	uint8_t		*oobbuf;
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun #define MTD_MAX_OOBFREE_ENTRIES_LARGE	32
76*4882a593Smuzhiyun #define MTD_MAX_ECCPOS_ENTRIES_LARGE	640
77*4882a593Smuzhiyun /**
78*4882a593Smuzhiyun  * struct mtd_oob_region - oob region definition
79*4882a593Smuzhiyun  * @offset: region offset
80*4882a593Smuzhiyun  * @length: region length
81*4882a593Smuzhiyun  *
82*4882a593Smuzhiyun  * This structure describes a region of the OOB area, and is used
83*4882a593Smuzhiyun  * to retrieve ECC or free bytes sections.
84*4882a593Smuzhiyun  * Each section is defined by an offset within the OOB area and a
85*4882a593Smuzhiyun  * length.
86*4882a593Smuzhiyun  */
87*4882a593Smuzhiyun struct mtd_oob_region {
88*4882a593Smuzhiyun 	u32 offset;
89*4882a593Smuzhiyun 	u32 length;
90*4882a593Smuzhiyun };
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun  * struct mtd_ooblayout_ops - NAND OOB layout operations
94*4882a593Smuzhiyun  * @ecc: function returning an ECC region in the OOB area.
95*4882a593Smuzhiyun  *	 Should return -ERANGE if %section exceeds the total number of
96*4882a593Smuzhiyun  *	 ECC sections.
97*4882a593Smuzhiyun  * @free: function returning a free region in the OOB area.
98*4882a593Smuzhiyun  *	  Should return -ERANGE if %section exceeds the total number of
99*4882a593Smuzhiyun  *	  free sections.
100*4882a593Smuzhiyun  */
101*4882a593Smuzhiyun struct mtd_ooblayout_ops {
102*4882a593Smuzhiyun 	int (*ecc)(struct mtd_info *mtd, int section,
103*4882a593Smuzhiyun 		   struct mtd_oob_region *oobecc);
104*4882a593Smuzhiyun 	int (*free)(struct mtd_info *mtd, int section,
105*4882a593Smuzhiyun 		    struct mtd_oob_region *oobfree);
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun /**
109*4882a593Smuzhiyun  * struct mtd_pairing_info - page pairing information
110*4882a593Smuzhiyun  *
111*4882a593Smuzhiyun  * @pair: pair id
112*4882a593Smuzhiyun  * @group: group id
113*4882a593Smuzhiyun  *
114*4882a593Smuzhiyun  * The term "pair" is used here, even though TLC NANDs might group pages by 3
115*4882a593Smuzhiyun  * (3 bits in a single cell). A pair should regroup all pages that are sharing
116*4882a593Smuzhiyun  * the same cell. Pairs are then indexed in ascending order.
117*4882a593Smuzhiyun  *
118*4882a593Smuzhiyun  * @group is defining the position of a page in a given pair. It can also be
119*4882a593Smuzhiyun  * seen as the bit position in the cell: page attached to bit 0 belongs to
120*4882a593Smuzhiyun  * group 0, page attached to bit 1 belongs to group 1, etc.
121*4882a593Smuzhiyun  *
122*4882a593Smuzhiyun  * Example:
123*4882a593Smuzhiyun  * The H27UCG8T2BTR-BC datasheet describes the following pairing scheme:
124*4882a593Smuzhiyun  *
125*4882a593Smuzhiyun  *		group-0		group-1
126*4882a593Smuzhiyun  *
127*4882a593Smuzhiyun  *  pair-0	page-0		page-4
128*4882a593Smuzhiyun  *  pair-1	page-1		page-5
129*4882a593Smuzhiyun  *  pair-2	page-2		page-8
130*4882a593Smuzhiyun  *  ...
131*4882a593Smuzhiyun  *  pair-127	page-251	page-255
132*4882a593Smuzhiyun  *
133*4882a593Smuzhiyun  *
134*4882a593Smuzhiyun  * Note that the "group" and "pair" terms were extracted from Samsung and
135*4882a593Smuzhiyun  * Hynix datasheets, and might be referenced under other names in other
136*4882a593Smuzhiyun  * datasheets (Micron is describing this concept as "shared pages").
137*4882a593Smuzhiyun  */
138*4882a593Smuzhiyun struct mtd_pairing_info {
139*4882a593Smuzhiyun 	int pair;
140*4882a593Smuzhiyun 	int group;
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /**
144*4882a593Smuzhiyun  * struct mtd_pairing_scheme - page pairing scheme description
145*4882a593Smuzhiyun  *
146*4882a593Smuzhiyun  * @ngroups: number of groups. Should be related to the number of bits
147*4882a593Smuzhiyun  *	     per cell.
148*4882a593Smuzhiyun  * @get_info: converts a write-unit (page number within an erase block) into
149*4882a593Smuzhiyun  *	      mtd_pairing information (pair + group). This function should
150*4882a593Smuzhiyun  *	      fill the info parameter based on the wunit index or return
151*4882a593Smuzhiyun  *	      -EINVAL if the wunit parameter is invalid.
152*4882a593Smuzhiyun  * @get_wunit: converts pairing information into a write-unit (page) number.
153*4882a593Smuzhiyun  *	       This function should return the wunit index pointed by the
154*4882a593Smuzhiyun  *	       pairing information described in the info argument. It should
155*4882a593Smuzhiyun  *	       return -EINVAL, if there's no wunit corresponding to the
156*4882a593Smuzhiyun  *	       passed pairing information.
157*4882a593Smuzhiyun  *
158*4882a593Smuzhiyun  * See mtd_pairing_info documentation for a detailed explanation of the
159*4882a593Smuzhiyun  * pair and group concepts.
160*4882a593Smuzhiyun  *
161*4882a593Smuzhiyun  * The mtd_pairing_scheme structure provides a generic solution to represent
162*4882a593Smuzhiyun  * NAND page pairing scheme. Instead of exposing two big tables to do the
163*4882a593Smuzhiyun  * write-unit <-> (pair + group) conversions, we ask the MTD drivers to
164*4882a593Smuzhiyun  * implement the ->get_info() and ->get_wunit() functions.
165*4882a593Smuzhiyun  *
166*4882a593Smuzhiyun  * MTD users will then be able to query these information by using the
167*4882a593Smuzhiyun  * mtd_pairing_info_to_wunit() and mtd_wunit_to_pairing_info() helpers.
168*4882a593Smuzhiyun  *
169*4882a593Smuzhiyun  * @ngroups is here to help MTD users iterating over all the pages in a
170*4882a593Smuzhiyun  * given pair. This value can be retrieved by MTD users using the
171*4882a593Smuzhiyun  * mtd_pairing_groups() helper.
172*4882a593Smuzhiyun  *
173*4882a593Smuzhiyun  * Examples are given in the mtd_pairing_info_to_wunit() and
174*4882a593Smuzhiyun  * mtd_wunit_to_pairing_info() documentation.
175*4882a593Smuzhiyun  */
176*4882a593Smuzhiyun struct mtd_pairing_scheme {
177*4882a593Smuzhiyun 	int ngroups;
178*4882a593Smuzhiyun 	int (*get_info)(struct mtd_info *mtd, int wunit,
179*4882a593Smuzhiyun 			struct mtd_pairing_info *info);
180*4882a593Smuzhiyun 	int (*get_wunit)(struct mtd_info *mtd,
181*4882a593Smuzhiyun 			 const struct mtd_pairing_info *info);
182*4882a593Smuzhiyun };
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun struct module;	/* only needed for owner field in mtd_info */
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun /**
187*4882a593Smuzhiyun  * struct mtd_debug_info - debugging information for an MTD device.
188*4882a593Smuzhiyun  *
189*4882a593Smuzhiyun  * @dfs_dir: direntry object of the MTD device debugfs directory
190*4882a593Smuzhiyun  */
191*4882a593Smuzhiyun struct mtd_debug_info {
192*4882a593Smuzhiyun 	struct dentry *dfs_dir;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	const char *partname;
195*4882a593Smuzhiyun 	const char *partid;
196*4882a593Smuzhiyun };
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun /**
199*4882a593Smuzhiyun  * struct mtd_part - MTD partition specific fields
200*4882a593Smuzhiyun  *
201*4882a593Smuzhiyun  * @node: list node used to add an MTD partition to the parent partition list
202*4882a593Smuzhiyun  * @offset: offset of the partition relatively to the parent offset
203*4882a593Smuzhiyun  * @size: partition size. Should be equal to mtd->size unless
204*4882a593Smuzhiyun  *	  MTD_SLC_ON_MLC_EMULATION is set
205*4882a593Smuzhiyun  * @flags: original flags (before the mtdpart logic decided to tweak them based
206*4882a593Smuzhiyun  *	   on flash constraints, like eraseblock/pagesize alignment)
207*4882a593Smuzhiyun  *
208*4882a593Smuzhiyun  * This struct is embedded in mtd_info and contains partition-specific
209*4882a593Smuzhiyun  * properties/fields.
210*4882a593Smuzhiyun  */
211*4882a593Smuzhiyun struct mtd_part {
212*4882a593Smuzhiyun 	struct list_head node;
213*4882a593Smuzhiyun 	u64 offset;
214*4882a593Smuzhiyun 	u64 size;
215*4882a593Smuzhiyun 	u32 flags;
216*4882a593Smuzhiyun };
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun /**
219*4882a593Smuzhiyun  * struct mtd_master - MTD master specific fields
220*4882a593Smuzhiyun  *
221*4882a593Smuzhiyun  * @partitions_lock: lock protecting accesses to the partition list. Protects
222*4882a593Smuzhiyun  *		     not only the master partition list, but also all
223*4882a593Smuzhiyun  *		     sub-partitions.
224*4882a593Smuzhiyun  * @suspended: et to 1 when the device is suspended, 0 otherwise
225*4882a593Smuzhiyun  *
226*4882a593Smuzhiyun  * This struct is embedded in mtd_info and contains master-specific
227*4882a593Smuzhiyun  * properties/fields. The master is the root MTD device from the MTD partition
228*4882a593Smuzhiyun  * point of view.
229*4882a593Smuzhiyun  */
230*4882a593Smuzhiyun struct mtd_master {
231*4882a593Smuzhiyun 	struct mutex partitions_lock;
232*4882a593Smuzhiyun 	unsigned int suspended : 1;
233*4882a593Smuzhiyun };
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun struct mtd_info {
236*4882a593Smuzhiyun 	u_char type;
237*4882a593Smuzhiyun 	uint32_t flags;
238*4882a593Smuzhiyun 	uint64_t size;	 // Total size of the MTD
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	/* "Major" erase size for the device. Naïve users may take this
241*4882a593Smuzhiyun 	 * to be the only erase size available, or may use the more detailed
242*4882a593Smuzhiyun 	 * information below if they desire
243*4882a593Smuzhiyun 	 */
244*4882a593Smuzhiyun 	uint32_t erasesize;
245*4882a593Smuzhiyun 	/* Minimal writable flash unit size. In case of NOR flash it is 1 (even
246*4882a593Smuzhiyun 	 * though individual bits can be cleared), in case of NAND flash it is
247*4882a593Smuzhiyun 	 * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR
248*4882a593Smuzhiyun 	 * it is of ECC block size, etc. It is illegal to have writesize = 0.
249*4882a593Smuzhiyun 	 * Any driver registering a struct mtd_info must ensure a writesize of
250*4882a593Smuzhiyun 	 * 1 or larger.
251*4882a593Smuzhiyun 	 */
252*4882a593Smuzhiyun 	uint32_t writesize;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	/*
255*4882a593Smuzhiyun 	 * Size of the write buffer used by the MTD. MTD devices having a write
256*4882a593Smuzhiyun 	 * buffer can write multiple writesize chunks at a time. E.g. while
257*4882a593Smuzhiyun 	 * writing 4 * writesize bytes to a device with 2 * writesize bytes
258*4882a593Smuzhiyun 	 * buffer the MTD driver can (but doesn't have to) do 2 writesize
259*4882a593Smuzhiyun 	 * operations, but not 4. Currently, all NANDs have writebufsize
260*4882a593Smuzhiyun 	 * equivalent to writesize (NAND page size). Some NOR flashes do have
261*4882a593Smuzhiyun 	 * writebufsize greater than writesize.
262*4882a593Smuzhiyun 	 */
263*4882a593Smuzhiyun 	uint32_t writebufsize;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	uint32_t oobsize;   // Amount of OOB data per block (e.g. 16)
266*4882a593Smuzhiyun 	uint32_t oobavail;  // Available OOB bytes per block
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	/*
269*4882a593Smuzhiyun 	 * If erasesize is a power of 2 then the shift is stored in
270*4882a593Smuzhiyun 	 * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize.
271*4882a593Smuzhiyun 	 */
272*4882a593Smuzhiyun 	unsigned int erasesize_shift;
273*4882a593Smuzhiyun 	unsigned int writesize_shift;
274*4882a593Smuzhiyun 	/* Masks based on erasesize_shift and writesize_shift */
275*4882a593Smuzhiyun 	unsigned int erasesize_mask;
276*4882a593Smuzhiyun 	unsigned int writesize_mask;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	/*
279*4882a593Smuzhiyun 	 * read ops return -EUCLEAN if max number of bitflips corrected on any
280*4882a593Smuzhiyun 	 * one region comprising an ecc step equals or exceeds this value.
281*4882a593Smuzhiyun 	 * Settable by driver, else defaults to ecc_strength.  User can override
282*4882a593Smuzhiyun 	 * in sysfs.  N.B. The meaning of the -EUCLEAN return code has changed;
283*4882a593Smuzhiyun 	 * see Documentation/ABI/testing/sysfs-class-mtd for more detail.
284*4882a593Smuzhiyun 	 */
285*4882a593Smuzhiyun 	unsigned int bitflip_threshold;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	/* Kernel-only stuff starts here. */
288*4882a593Smuzhiyun 	const char *name;
289*4882a593Smuzhiyun 	int index;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	/* OOB layout description */
292*4882a593Smuzhiyun 	const struct mtd_ooblayout_ops *ooblayout;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	/* NAND pairing scheme, only provided for MLC/TLC NANDs */
295*4882a593Smuzhiyun 	const struct mtd_pairing_scheme *pairing;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/* the ecc step size. */
298*4882a593Smuzhiyun 	unsigned int ecc_step_size;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/* max number of correctible bit errors per ecc step */
301*4882a593Smuzhiyun 	unsigned int ecc_strength;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	/* Data for variable erase regions. If numeraseregions is zero,
304*4882a593Smuzhiyun 	 * it means that the whole device has erasesize as given above.
305*4882a593Smuzhiyun 	 */
306*4882a593Smuzhiyun 	int numeraseregions;
307*4882a593Smuzhiyun 	struct mtd_erase_region_info *eraseregions;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	/*
310*4882a593Smuzhiyun 	 * Do not call via these pointers, use corresponding mtd_*()
311*4882a593Smuzhiyun 	 * wrappers instead.
312*4882a593Smuzhiyun 	 */
313*4882a593Smuzhiyun 	int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
314*4882a593Smuzhiyun 	int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
315*4882a593Smuzhiyun 		       size_t *retlen, void **virt, resource_size_t *phys);
316*4882a593Smuzhiyun 	int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
317*4882a593Smuzhiyun 	int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
318*4882a593Smuzhiyun 		      size_t *retlen, u_char *buf);
319*4882a593Smuzhiyun 	int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
320*4882a593Smuzhiyun 		       size_t *retlen, const u_char *buf);
321*4882a593Smuzhiyun 	int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
322*4882a593Smuzhiyun 			     size_t *retlen, const u_char *buf);
323*4882a593Smuzhiyun 	int (*_read_oob) (struct mtd_info *mtd, loff_t from,
324*4882a593Smuzhiyun 			  struct mtd_oob_ops *ops);
325*4882a593Smuzhiyun 	int (*_write_oob) (struct mtd_info *mtd, loff_t to,
326*4882a593Smuzhiyun 			   struct mtd_oob_ops *ops);
327*4882a593Smuzhiyun 	int (*_get_fact_prot_info) (struct mtd_info *mtd, size_t len,
328*4882a593Smuzhiyun 				    size_t *retlen, struct otp_info *buf);
329*4882a593Smuzhiyun 	int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
330*4882a593Smuzhiyun 				    size_t len, size_t *retlen, u_char *buf);
331*4882a593Smuzhiyun 	int (*_get_user_prot_info) (struct mtd_info *mtd, size_t len,
332*4882a593Smuzhiyun 				    size_t *retlen, struct otp_info *buf);
333*4882a593Smuzhiyun 	int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
334*4882a593Smuzhiyun 				    size_t len, size_t *retlen, u_char *buf);
335*4882a593Smuzhiyun 	int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
336*4882a593Smuzhiyun 				     size_t len, size_t *retlen, u_char *buf);
337*4882a593Smuzhiyun 	int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
338*4882a593Smuzhiyun 				    size_t len);
339*4882a593Smuzhiyun 	int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs,
340*4882a593Smuzhiyun 			unsigned long count, loff_t to, size_t *retlen);
341*4882a593Smuzhiyun 	void (*_sync) (struct mtd_info *mtd);
342*4882a593Smuzhiyun 	int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
343*4882a593Smuzhiyun 	int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
344*4882a593Smuzhiyun 	int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
345*4882a593Smuzhiyun 	int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs);
346*4882a593Smuzhiyun 	int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
347*4882a593Smuzhiyun 	int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
348*4882a593Smuzhiyun 	int (*_max_bad_blocks) (struct mtd_info *mtd, loff_t ofs, size_t len);
349*4882a593Smuzhiyun 	int (*_suspend) (struct mtd_info *mtd);
350*4882a593Smuzhiyun 	void (*_resume) (struct mtd_info *mtd);
351*4882a593Smuzhiyun 	void (*_reboot) (struct mtd_info *mtd);
352*4882a593Smuzhiyun 	/*
353*4882a593Smuzhiyun 	 * If the driver is something smart, like UBI, it may need to maintain
354*4882a593Smuzhiyun 	 * its own reference counting. The below functions are only for driver.
355*4882a593Smuzhiyun 	 */
356*4882a593Smuzhiyun 	int (*_get_device) (struct mtd_info *mtd);
357*4882a593Smuzhiyun 	void (*_put_device) (struct mtd_info *mtd);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	/*
360*4882a593Smuzhiyun 	 * flag indicates a panic write, low level drivers can take appropriate
361*4882a593Smuzhiyun 	 * action if required to ensure writes go through
362*4882a593Smuzhiyun 	 */
363*4882a593Smuzhiyun 	bool oops_panic_write;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	struct notifier_block reboot_notifier;  /* default mode before reboot */
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/* ECC status information */
368*4882a593Smuzhiyun 	struct mtd_ecc_stats ecc_stats;
369*4882a593Smuzhiyun 	/* Subpage shift (NAND) */
370*4882a593Smuzhiyun 	int subpage_sft;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	void *priv;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	struct module *owner;
375*4882a593Smuzhiyun 	struct device dev;
376*4882a593Smuzhiyun 	int usecount;
377*4882a593Smuzhiyun 	struct mtd_debug_info dbg;
378*4882a593Smuzhiyun 	struct nvmem_device *nvmem;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	/*
381*4882a593Smuzhiyun 	 * Parent device from the MTD partition point of view.
382*4882a593Smuzhiyun 	 *
383*4882a593Smuzhiyun 	 * MTD masters do not have any parent, MTD partitions do. The parent
384*4882a593Smuzhiyun 	 * MTD device can itself be a partition.
385*4882a593Smuzhiyun 	 */
386*4882a593Smuzhiyun 	struct mtd_info *parent;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	/* List of partitions attached to this MTD device */
389*4882a593Smuzhiyun 	struct list_head partitions;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	struct mtd_part part;
392*4882a593Smuzhiyun 	struct mtd_master master;
393*4882a593Smuzhiyun };
394*4882a593Smuzhiyun 
mtd_get_master(struct mtd_info * mtd)395*4882a593Smuzhiyun static inline struct mtd_info *mtd_get_master(struct mtd_info *mtd)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	while (mtd->parent)
398*4882a593Smuzhiyun 		mtd = mtd->parent;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	return mtd;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
mtd_get_master_ofs(struct mtd_info * mtd,u64 ofs)403*4882a593Smuzhiyun static inline u64 mtd_get_master_ofs(struct mtd_info *mtd, u64 ofs)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun 	while (mtd->parent) {
406*4882a593Smuzhiyun 		ofs += mtd->part.offset;
407*4882a593Smuzhiyun 		mtd = mtd->parent;
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	return ofs;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
mtd_is_partition(const struct mtd_info * mtd)413*4882a593Smuzhiyun static inline bool mtd_is_partition(const struct mtd_info *mtd)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	return mtd->parent;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun 
mtd_has_partitions(const struct mtd_info * mtd)418*4882a593Smuzhiyun static inline bool mtd_has_partitions(const struct mtd_info *mtd)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	return !list_empty(&mtd->partitions);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
424*4882a593Smuzhiyun 		      struct mtd_oob_region *oobecc);
425*4882a593Smuzhiyun int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
426*4882a593Smuzhiyun 				 int *section,
427*4882a593Smuzhiyun 				 struct mtd_oob_region *oobregion);
428*4882a593Smuzhiyun int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
429*4882a593Smuzhiyun 			       const u8 *oobbuf, int start, int nbytes);
430*4882a593Smuzhiyun int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
431*4882a593Smuzhiyun 			       u8 *oobbuf, int start, int nbytes);
432*4882a593Smuzhiyun int mtd_ooblayout_free(struct mtd_info *mtd, int section,
433*4882a593Smuzhiyun 		       struct mtd_oob_region *oobfree);
434*4882a593Smuzhiyun int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
435*4882a593Smuzhiyun 				const u8 *oobbuf, int start, int nbytes);
436*4882a593Smuzhiyun int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
437*4882a593Smuzhiyun 				u8 *oobbuf, int start, int nbytes);
438*4882a593Smuzhiyun int mtd_ooblayout_count_freebytes(struct mtd_info *mtd);
439*4882a593Smuzhiyun int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd);
440*4882a593Smuzhiyun 
mtd_set_ooblayout(struct mtd_info * mtd,const struct mtd_ooblayout_ops * ooblayout)441*4882a593Smuzhiyun static inline void mtd_set_ooblayout(struct mtd_info *mtd,
442*4882a593Smuzhiyun 				     const struct mtd_ooblayout_ops *ooblayout)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	mtd->ooblayout = ooblayout;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
mtd_set_pairing_scheme(struct mtd_info * mtd,const struct mtd_pairing_scheme * pairing)447*4882a593Smuzhiyun static inline void mtd_set_pairing_scheme(struct mtd_info *mtd,
448*4882a593Smuzhiyun 				const struct mtd_pairing_scheme *pairing)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	mtd->pairing = pairing;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun 
mtd_set_of_node(struct mtd_info * mtd,struct device_node * np)453*4882a593Smuzhiyun static inline void mtd_set_of_node(struct mtd_info *mtd,
454*4882a593Smuzhiyun 				   struct device_node *np)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun 	mtd->dev.of_node = np;
457*4882a593Smuzhiyun 	if (!mtd->name)
458*4882a593Smuzhiyun 		of_property_read_string(np, "label", &mtd->name);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun 
mtd_get_of_node(struct mtd_info * mtd)461*4882a593Smuzhiyun static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	return dev_of_node(&mtd->dev);
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
mtd_oobavail(struct mtd_info * mtd,struct mtd_oob_ops * ops)466*4882a593Smuzhiyun static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun 	return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun 
mtd_max_bad_blocks(struct mtd_info * mtd,loff_t ofs,size_t len)471*4882a593Smuzhiyun static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
472*4882a593Smuzhiyun 				     loff_t ofs, size_t len)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun 	struct mtd_info *master = mtd_get_master(mtd);
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	if (!master->_max_bad_blocks)
477*4882a593Smuzhiyun 		return -ENOTSUPP;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	if (mtd->size < (len + ofs) || ofs < 0)
480*4882a593Smuzhiyun 		return -EINVAL;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	return master->_max_bad_blocks(master, mtd_get_master_ofs(mtd, ofs),
483*4882a593Smuzhiyun 				       len);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
487*4882a593Smuzhiyun 			      struct mtd_pairing_info *info);
488*4882a593Smuzhiyun int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
489*4882a593Smuzhiyun 			      const struct mtd_pairing_info *info);
490*4882a593Smuzhiyun int mtd_pairing_groups(struct mtd_info *mtd);
491*4882a593Smuzhiyun int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
492*4882a593Smuzhiyun int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
493*4882a593Smuzhiyun 	      void **virt, resource_size_t *phys);
494*4882a593Smuzhiyun int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
495*4882a593Smuzhiyun unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
496*4882a593Smuzhiyun 				    unsigned long offset, unsigned long flags);
497*4882a593Smuzhiyun int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
498*4882a593Smuzhiyun 	     u_char *buf);
499*4882a593Smuzhiyun int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
500*4882a593Smuzhiyun 	      const u_char *buf);
501*4882a593Smuzhiyun int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
502*4882a593Smuzhiyun 		    const u_char *buf);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops);
505*4882a593Smuzhiyun int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
508*4882a593Smuzhiyun 			   struct otp_info *buf);
509*4882a593Smuzhiyun int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
510*4882a593Smuzhiyun 			   size_t *retlen, u_char *buf);
511*4882a593Smuzhiyun int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
512*4882a593Smuzhiyun 			   struct otp_info *buf);
513*4882a593Smuzhiyun int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
514*4882a593Smuzhiyun 			   size_t *retlen, u_char *buf);
515*4882a593Smuzhiyun int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
516*4882a593Smuzhiyun 			    size_t *retlen, u_char *buf);
517*4882a593Smuzhiyun int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
520*4882a593Smuzhiyun 	       unsigned long count, loff_t to, size_t *retlen);
521*4882a593Smuzhiyun 
mtd_sync(struct mtd_info * mtd)522*4882a593Smuzhiyun static inline void mtd_sync(struct mtd_info *mtd)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun 	struct mtd_info *master = mtd_get_master(mtd);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	if (master->_sync)
527*4882a593Smuzhiyun 		master->_sync(master);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
531*4882a593Smuzhiyun int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
532*4882a593Smuzhiyun int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
533*4882a593Smuzhiyun int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs);
534*4882a593Smuzhiyun int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs);
535*4882a593Smuzhiyun int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
536*4882a593Smuzhiyun 
mtd_suspend(struct mtd_info * mtd)537*4882a593Smuzhiyun static inline int mtd_suspend(struct mtd_info *mtd)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	struct mtd_info *master = mtd_get_master(mtd);
540*4882a593Smuzhiyun 	int ret;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	if (master->master.suspended)
543*4882a593Smuzhiyun 		return 0;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	ret = master->_suspend ? master->_suspend(master) : 0;
546*4882a593Smuzhiyun 	if (ret)
547*4882a593Smuzhiyun 		return ret;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	master->master.suspended = 1;
550*4882a593Smuzhiyun 	return 0;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun 
mtd_resume(struct mtd_info * mtd)553*4882a593Smuzhiyun static inline void mtd_resume(struct mtd_info *mtd)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun 	struct mtd_info *master = mtd_get_master(mtd);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	if (!master->master.suspended)
558*4882a593Smuzhiyun 		return;
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	if (master->_resume)
561*4882a593Smuzhiyun 		master->_resume(master);
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	master->master.suspended = 0;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun 
mtd_div_by_eb(uint64_t sz,struct mtd_info * mtd)566*4882a593Smuzhiyun static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun 	if (mtd->erasesize_shift)
569*4882a593Smuzhiyun 		return sz >> mtd->erasesize_shift;
570*4882a593Smuzhiyun 	do_div(sz, mtd->erasesize);
571*4882a593Smuzhiyun 	return sz;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun 
mtd_mod_by_eb(uint64_t sz,struct mtd_info * mtd)574*4882a593Smuzhiyun static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	if (mtd->erasesize_shift)
577*4882a593Smuzhiyun 		return sz & mtd->erasesize_mask;
578*4882a593Smuzhiyun 	return do_div(sz, mtd->erasesize);
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun /**
582*4882a593Smuzhiyun  * mtd_align_erase_req - Adjust an erase request to align things on eraseblock
583*4882a593Smuzhiyun  *			 boundaries.
584*4882a593Smuzhiyun  * @mtd: the MTD device this erase request applies on
585*4882a593Smuzhiyun  * @req: the erase request to adjust
586*4882a593Smuzhiyun  *
587*4882a593Smuzhiyun  * This function will adjust @req->addr and @req->len to align them on
588*4882a593Smuzhiyun  * @mtd->erasesize. Of course we expect @mtd->erasesize to be != 0.
589*4882a593Smuzhiyun  */
mtd_align_erase_req(struct mtd_info * mtd,struct erase_info * req)590*4882a593Smuzhiyun static inline void mtd_align_erase_req(struct mtd_info *mtd,
591*4882a593Smuzhiyun 				       struct erase_info *req)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun 	u32 mod;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	if (WARN_ON(!mtd->erasesize))
596*4882a593Smuzhiyun 		return;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	mod = mtd_mod_by_eb(req->addr, mtd);
599*4882a593Smuzhiyun 	if (mod) {
600*4882a593Smuzhiyun 		req->addr -= mod;
601*4882a593Smuzhiyun 		req->len += mod;
602*4882a593Smuzhiyun 	}
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	mod = mtd_mod_by_eb(req->addr + req->len, mtd);
605*4882a593Smuzhiyun 	if (mod)
606*4882a593Smuzhiyun 		req->len += mtd->erasesize - mod;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun 
mtd_div_by_ws(uint64_t sz,struct mtd_info * mtd)609*4882a593Smuzhiyun static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun 	if (mtd->writesize_shift)
612*4882a593Smuzhiyun 		return sz >> mtd->writesize_shift;
613*4882a593Smuzhiyun 	do_div(sz, mtd->writesize);
614*4882a593Smuzhiyun 	return sz;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun 
mtd_mod_by_ws(uint64_t sz,struct mtd_info * mtd)617*4882a593Smuzhiyun static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun 	if (mtd->writesize_shift)
620*4882a593Smuzhiyun 		return sz & mtd->writesize_mask;
621*4882a593Smuzhiyun 	return do_div(sz, mtd->writesize);
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun 
mtd_wunit_per_eb(struct mtd_info * mtd)624*4882a593Smuzhiyun static inline int mtd_wunit_per_eb(struct mtd_info *mtd)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun 	struct mtd_info *master = mtd_get_master(mtd);
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	return master->erasesize / mtd->writesize;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun 
mtd_offset_to_wunit(struct mtd_info * mtd,loff_t offs)631*4882a593Smuzhiyun static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun 	return mtd_div_by_ws(mtd_mod_by_eb(offs, mtd), mtd);
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun 
mtd_wunit_to_offset(struct mtd_info * mtd,loff_t base,int wunit)636*4882a593Smuzhiyun static inline loff_t mtd_wunit_to_offset(struct mtd_info *mtd, loff_t base,
637*4882a593Smuzhiyun 					 int wunit)
638*4882a593Smuzhiyun {
639*4882a593Smuzhiyun 	return base + (wunit * mtd->writesize);
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 
mtd_has_oob(const struct mtd_info * mtd)643*4882a593Smuzhiyun static inline int mtd_has_oob(const struct mtd_info *mtd)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun 	struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	return master->_read_oob && master->_write_oob;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun 
mtd_type_is_nand(const struct mtd_info * mtd)650*4882a593Smuzhiyun static inline int mtd_type_is_nand(const struct mtd_info *mtd)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun 	return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun 
mtd_can_have_bb(const struct mtd_info * mtd)655*4882a593Smuzhiyun static inline int mtd_can_have_bb(const struct mtd_info *mtd)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	return !!master->_block_isbad;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	/* Kernel-side ioctl definitions */
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun struct mtd_partition;
665*4882a593Smuzhiyun struct mtd_part_parser_data;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun extern int mtd_device_parse_register(struct mtd_info *mtd,
668*4882a593Smuzhiyun 				     const char * const *part_probe_types,
669*4882a593Smuzhiyun 				     struct mtd_part_parser_data *parser_data,
670*4882a593Smuzhiyun 				     const struct mtd_partition *defparts,
671*4882a593Smuzhiyun 				     int defnr_parts);
672*4882a593Smuzhiyun #define mtd_device_register(master, parts, nr_parts)	\
673*4882a593Smuzhiyun 	mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
674*4882a593Smuzhiyun extern int mtd_device_unregister(struct mtd_info *master);
675*4882a593Smuzhiyun extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
676*4882a593Smuzhiyun extern int __get_mtd_device(struct mtd_info *mtd);
677*4882a593Smuzhiyun extern void __put_mtd_device(struct mtd_info *mtd);
678*4882a593Smuzhiyun extern struct mtd_info *get_mtd_device_nm(const char *name);
679*4882a593Smuzhiyun extern void put_mtd_device(struct mtd_info *mtd);
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun struct mtd_notifier {
683*4882a593Smuzhiyun 	void (*add)(struct mtd_info *mtd);
684*4882a593Smuzhiyun 	void (*remove)(struct mtd_info *mtd);
685*4882a593Smuzhiyun 	struct list_head list;
686*4882a593Smuzhiyun };
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun extern void register_mtd_user (struct mtd_notifier *new);
690*4882a593Smuzhiyun extern int unregister_mtd_user (struct mtd_notifier *old);
691*4882a593Smuzhiyun void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
692*4882a593Smuzhiyun 
mtd_is_bitflip(int err)693*4882a593Smuzhiyun static inline int mtd_is_bitflip(int err) {
694*4882a593Smuzhiyun 	return err == -EUCLEAN;
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun 
mtd_is_eccerr(int err)697*4882a593Smuzhiyun static inline int mtd_is_eccerr(int err) {
698*4882a593Smuzhiyun 	return err == -EBADMSG;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun 
mtd_is_bitflip_or_eccerr(int err)701*4882a593Smuzhiyun static inline int mtd_is_bitflip_or_eccerr(int err) {
702*4882a593Smuzhiyun 	return mtd_is_bitflip(err) || mtd_is_eccerr(err);
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun #endif /* __MTD_MTD_H__ */
708