1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
4*4882a593Smuzhiyun * Steven J. Hill <sjhill@realitydiluted.com>
5*4882a593Smuzhiyun * Thomas Gleixner <tglx@linutronix.de>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Info:
8*4882a593Smuzhiyun * Contains standard defines and IDs for NAND flash devices
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Changelog:
11*4882a593Smuzhiyun * See git changelog.
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun #ifndef __LINUX_MTD_RAWNAND_H
14*4882a593Smuzhiyun #define __LINUX_MTD_RAWNAND_H
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
17*4882a593Smuzhiyun #include <linux/mtd/nand.h>
18*4882a593Smuzhiyun #include <linux/mtd/flashchip.h>
19*4882a593Smuzhiyun #include <linux/mtd/bbm.h>
20*4882a593Smuzhiyun #include <linux/mtd/jedec.h>
21*4882a593Smuzhiyun #include <linux/mtd/nand.h>
22*4882a593Smuzhiyun #include <linux/mtd/onfi.h>
23*4882a593Smuzhiyun #include <linux/mutex.h>
24*4882a593Smuzhiyun #include <linux/of.h>
25*4882a593Smuzhiyun #include <linux/types.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun struct nand_chip;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /* The maximum number of NAND chips in an array */
30*4882a593Smuzhiyun #define NAND_MAX_CHIPS 8
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * Constants for hardware specific CLE/ALE/NCE function
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * These are bits which can be or'ed to set/clear multiple
36*4882a593Smuzhiyun * bits in one go.
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun /* Select the chip by setting nCE to low */
39*4882a593Smuzhiyun #define NAND_NCE 0x01
40*4882a593Smuzhiyun /* Select the command latch by setting CLE to high */
41*4882a593Smuzhiyun #define NAND_CLE 0x02
42*4882a593Smuzhiyun /* Select the address latch by setting ALE to high */
43*4882a593Smuzhiyun #define NAND_ALE 0x04
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #define NAND_CTRL_CLE (NAND_NCE | NAND_CLE)
46*4882a593Smuzhiyun #define NAND_CTRL_ALE (NAND_NCE | NAND_ALE)
47*4882a593Smuzhiyun #define NAND_CTRL_CHANGE 0x80
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun * Standard NAND flash commands
51*4882a593Smuzhiyun */
52*4882a593Smuzhiyun #define NAND_CMD_READ0 0
53*4882a593Smuzhiyun #define NAND_CMD_READ1 1
54*4882a593Smuzhiyun #define NAND_CMD_RNDOUT 5
55*4882a593Smuzhiyun #define NAND_CMD_PAGEPROG 0x10
56*4882a593Smuzhiyun #define NAND_CMD_READOOB 0x50
57*4882a593Smuzhiyun #define NAND_CMD_ERASE1 0x60
58*4882a593Smuzhiyun #define NAND_CMD_STATUS 0x70
59*4882a593Smuzhiyun #define NAND_CMD_SEQIN 0x80
60*4882a593Smuzhiyun #define NAND_CMD_RNDIN 0x85
61*4882a593Smuzhiyun #define NAND_CMD_READID 0x90
62*4882a593Smuzhiyun #define NAND_CMD_ERASE2 0xd0
63*4882a593Smuzhiyun #define NAND_CMD_PARAM 0xec
64*4882a593Smuzhiyun #define NAND_CMD_GET_FEATURES 0xee
65*4882a593Smuzhiyun #define NAND_CMD_SET_FEATURES 0xef
66*4882a593Smuzhiyun #define NAND_CMD_RESET 0xff
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* Extended commands for large page devices */
69*4882a593Smuzhiyun #define NAND_CMD_READSTART 0x30
70*4882a593Smuzhiyun #define NAND_CMD_RNDOUTSTART 0xE0
71*4882a593Smuzhiyun #define NAND_CMD_CACHEDPROG 0x15
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun #define NAND_CMD_NONE -1
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /* Status bits */
76*4882a593Smuzhiyun #define NAND_STATUS_FAIL 0x01
77*4882a593Smuzhiyun #define NAND_STATUS_FAIL_N1 0x02
78*4882a593Smuzhiyun #define NAND_STATUS_TRUE_READY 0x20
79*4882a593Smuzhiyun #define NAND_STATUS_READY 0x40
80*4882a593Smuzhiyun #define NAND_STATUS_WP 0x80
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun #define NAND_DATA_IFACE_CHECK_ONLY -1
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun * Constants for Hardware ECC
86*4882a593Smuzhiyun */
87*4882a593Smuzhiyun /* Reset Hardware ECC for read */
88*4882a593Smuzhiyun #define NAND_ECC_READ 0
89*4882a593Smuzhiyun /* Reset Hardware ECC for write */
90*4882a593Smuzhiyun #define NAND_ECC_WRITE 1
91*4882a593Smuzhiyun /* Enable Hardware ECC before syndrome is read back from flash */
92*4882a593Smuzhiyun #define NAND_ECC_READSYN 2
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /*
95*4882a593Smuzhiyun * Enable generic NAND 'page erased' check. This check is only done when
96*4882a593Smuzhiyun * ecc.correct() returns -EBADMSG.
97*4882a593Smuzhiyun * Set this flag if your implementation does not fix bitflips in erased
98*4882a593Smuzhiyun * pages and you want to rely on the default implementation.
99*4882a593Smuzhiyun */
100*4882a593Smuzhiyun #define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun * Option constants for bizarre disfunctionality and real
104*4882a593Smuzhiyun * features.
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* Buswidth is 16 bit */
108*4882a593Smuzhiyun #define NAND_BUSWIDTH_16 BIT(1)
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun * When using software implementation of Hamming, we can specify which byte
112*4882a593Smuzhiyun * ordering should be used.
113*4882a593Smuzhiyun */
114*4882a593Smuzhiyun #define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2)
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* Chip has cache program function */
117*4882a593Smuzhiyun #define NAND_CACHEPRG BIT(3)
118*4882a593Smuzhiyun /* Options valid for Samsung large page devices */
119*4882a593Smuzhiyun #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /*
122*4882a593Smuzhiyun * Chip requires ready check on read (for auto-incremented sequential read).
123*4882a593Smuzhiyun * True only for small page devices; large page devices do not support
124*4882a593Smuzhiyun * autoincrement.
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun #define NAND_NEED_READRDY BIT(8)
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* Chip does not allow subpage writes */
129*4882a593Smuzhiyun #define NAND_NO_SUBPAGE_WRITE BIT(9)
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /* Device is one of 'new' xD cards that expose fake nand command set */
132*4882a593Smuzhiyun #define NAND_BROKEN_XD BIT(10)
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* Device behaves just like nand, but is readonly */
135*4882a593Smuzhiyun #define NAND_ROM BIT(11)
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /* Device supports subpage reads */
138*4882a593Smuzhiyun #define NAND_SUBPAGE_READ BIT(12)
139*4882a593Smuzhiyun /* Macros to identify the above */
140*4882a593Smuzhiyun #define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
144*4882a593Smuzhiyun * patterns.
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun #define NAND_NEED_SCRAMBLING BIT(13)
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /* Device needs 3rd row address cycle */
149*4882a593Smuzhiyun #define NAND_ROW_ADDR_3 BIT(14)
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /* Non chip related options */
152*4882a593Smuzhiyun /* This option skips the bbt scan during initialization. */
153*4882a593Smuzhiyun #define NAND_SKIP_BBTSCAN BIT(16)
154*4882a593Smuzhiyun /* Chip may not exist, so silence any errors in scan */
155*4882a593Smuzhiyun #define NAND_SCAN_SILENT_NODEV BIT(18)
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /*
158*4882a593Smuzhiyun * Autodetect nand buswidth with readid/onfi.
159*4882a593Smuzhiyun * This suppose the driver will configure the hardware in 8 bits mode
160*4882a593Smuzhiyun * when calling nand_scan_ident, and update its configuration
161*4882a593Smuzhiyun * before calling nand_scan_tail.
162*4882a593Smuzhiyun */
163*4882a593Smuzhiyun #define NAND_BUSWIDTH_AUTO BIT(19)
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /*
166*4882a593Smuzhiyun * This option could be defined by controller drivers to protect against
167*4882a593Smuzhiyun * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
168*4882a593Smuzhiyun */
169*4882a593Smuzhiyun #define NAND_USES_DMA BIT(20)
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /*
172*4882a593Smuzhiyun * In case your controller is implementing ->legacy.cmd_ctrl() and is relying
173*4882a593Smuzhiyun * on the default ->cmdfunc() implementation, you may want to let the core
174*4882a593Smuzhiyun * handle the tCCS delay which is required when a column change (RNDIN or
175*4882a593Smuzhiyun * RNDOUT) is requested.
176*4882a593Smuzhiyun * If your controller already takes care of this delay, you don't need to set
177*4882a593Smuzhiyun * this flag.
178*4882a593Smuzhiyun */
179*4882a593Smuzhiyun #define NAND_WAIT_TCCS BIT(21)
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun * Whether the NAND chip is a boot medium. Drivers might use this information
183*4882a593Smuzhiyun * to select ECC algorithms supported by the boot ROM or similar restrictions.
184*4882a593Smuzhiyun */
185*4882a593Smuzhiyun #define NAND_IS_BOOT_MEDIUM BIT(22)
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /*
188*4882a593Smuzhiyun * Do not try to tweak the timings at runtime. This is needed when the
189*4882a593Smuzhiyun * controller initializes the timings on itself or when it relies on
190*4882a593Smuzhiyun * configuration done by the bootloader.
191*4882a593Smuzhiyun */
192*4882a593Smuzhiyun #define NAND_KEEP_TIMINGS BIT(23)
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /*
195*4882a593Smuzhiyun * There are different places where the manufacturer stores the factory bad
196*4882a593Smuzhiyun * block markers.
197*4882a593Smuzhiyun *
198*4882a593Smuzhiyun * Position within the block: Each of these pages needs to be checked for a
199*4882a593Smuzhiyun * bad block marking pattern.
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyun #define NAND_BBM_FIRSTPAGE BIT(24)
202*4882a593Smuzhiyun #define NAND_BBM_SECONDPAGE BIT(25)
203*4882a593Smuzhiyun #define NAND_BBM_LASTPAGE BIT(26)
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun * Some controllers with pipelined ECC engines override the BBM marker with
207*4882a593Smuzhiyun * data or ECC bytes, thus making bad block detection through bad block marker
208*4882a593Smuzhiyun * impossible. Let's flag those chips so the core knows it shouldn't check the
209*4882a593Smuzhiyun * BBM and consider all blocks good.
210*4882a593Smuzhiyun */
211*4882a593Smuzhiyun #define NAND_NO_BBM_QUIRK BIT(27)
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* Cell info constants */
214*4882a593Smuzhiyun #define NAND_CI_CHIPNR_MSK 0x03
215*4882a593Smuzhiyun #define NAND_CI_CELLTYPE_MSK 0x0C
216*4882a593Smuzhiyun #define NAND_CI_CELLTYPE_SHIFT 2
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /* Position within the OOB data of the page */
219*4882a593Smuzhiyun #define NAND_BBM_POS_SMALL 5
220*4882a593Smuzhiyun #define NAND_BBM_POS_LARGE 0
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /**
223*4882a593Smuzhiyun * struct nand_parameters - NAND generic parameters from the parameter page
224*4882a593Smuzhiyun * @model: Model name
225*4882a593Smuzhiyun * @supports_set_get_features: The NAND chip supports setting/getting features
226*4882a593Smuzhiyun * @set_feature_list: Bitmap of features that can be set
227*4882a593Smuzhiyun * @get_feature_list: Bitmap of features that can be get
228*4882a593Smuzhiyun * @onfi: ONFI specific parameters
229*4882a593Smuzhiyun */
230*4882a593Smuzhiyun struct nand_parameters {
231*4882a593Smuzhiyun /* Generic parameters */
232*4882a593Smuzhiyun const char *model;
233*4882a593Smuzhiyun bool supports_set_get_features;
234*4882a593Smuzhiyun DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
235*4882a593Smuzhiyun DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /* ONFI parameters */
238*4882a593Smuzhiyun struct onfi_params *onfi;
239*4882a593Smuzhiyun };
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /* The maximum expected count of bytes in the NAND ID sequence */
242*4882a593Smuzhiyun #define NAND_MAX_ID_LEN 8
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /**
245*4882a593Smuzhiyun * struct nand_id - NAND id structure
246*4882a593Smuzhiyun * @data: buffer containing the id bytes.
247*4882a593Smuzhiyun * @len: ID length.
248*4882a593Smuzhiyun */
249*4882a593Smuzhiyun struct nand_id {
250*4882a593Smuzhiyun u8 data[NAND_MAX_ID_LEN];
251*4882a593Smuzhiyun int len;
252*4882a593Smuzhiyun };
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /**
255*4882a593Smuzhiyun * struct nand_ecc_step_info - ECC step information of ECC engine
256*4882a593Smuzhiyun * @stepsize: data bytes per ECC step
257*4882a593Smuzhiyun * @strengths: array of supported strengths
258*4882a593Smuzhiyun * @nstrengths: number of supported strengths
259*4882a593Smuzhiyun */
260*4882a593Smuzhiyun struct nand_ecc_step_info {
261*4882a593Smuzhiyun int stepsize;
262*4882a593Smuzhiyun const int *strengths;
263*4882a593Smuzhiyun int nstrengths;
264*4882a593Smuzhiyun };
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /**
267*4882a593Smuzhiyun * struct nand_ecc_caps - capability of ECC engine
268*4882a593Smuzhiyun * @stepinfos: array of ECC step information
269*4882a593Smuzhiyun * @nstepinfos: number of ECC step information
270*4882a593Smuzhiyun * @calc_ecc_bytes: driver's hook to calculate ECC bytes per step
271*4882a593Smuzhiyun */
272*4882a593Smuzhiyun struct nand_ecc_caps {
273*4882a593Smuzhiyun const struct nand_ecc_step_info *stepinfos;
274*4882a593Smuzhiyun int nstepinfos;
275*4882a593Smuzhiyun int (*calc_ecc_bytes)(int step_size, int strength);
276*4882a593Smuzhiyun };
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* a shorthand to generate struct nand_ecc_caps with only one ECC stepsize */
279*4882a593Smuzhiyun #define NAND_ECC_CAPS_SINGLE(__name, __calc, __step, ...) \
280*4882a593Smuzhiyun static const int __name##_strengths[] = { __VA_ARGS__ }; \
281*4882a593Smuzhiyun static const struct nand_ecc_step_info __name##_stepinfo = { \
282*4882a593Smuzhiyun .stepsize = __step, \
283*4882a593Smuzhiyun .strengths = __name##_strengths, \
284*4882a593Smuzhiyun .nstrengths = ARRAY_SIZE(__name##_strengths), \
285*4882a593Smuzhiyun }; \
286*4882a593Smuzhiyun static const struct nand_ecc_caps __name = { \
287*4882a593Smuzhiyun .stepinfos = &__name##_stepinfo, \
288*4882a593Smuzhiyun .nstepinfos = 1, \
289*4882a593Smuzhiyun .calc_ecc_bytes = __calc, \
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /**
293*4882a593Smuzhiyun * struct nand_ecc_ctrl - Control structure for ECC
294*4882a593Smuzhiyun * @engine_type: ECC engine type
295*4882a593Smuzhiyun * @placement: OOB bytes placement
296*4882a593Smuzhiyun * @algo: ECC algorithm
297*4882a593Smuzhiyun * @steps: number of ECC steps per page
298*4882a593Smuzhiyun * @size: data bytes per ECC step
299*4882a593Smuzhiyun * @bytes: ECC bytes per step
300*4882a593Smuzhiyun * @strength: max number of correctible bits per ECC step
301*4882a593Smuzhiyun * @total: total number of ECC bytes per page
302*4882a593Smuzhiyun * @prepad: padding information for syndrome based ECC generators
303*4882a593Smuzhiyun * @postpad: padding information for syndrome based ECC generators
304*4882a593Smuzhiyun * @options: ECC specific options (see NAND_ECC_XXX flags defined above)
305*4882a593Smuzhiyun * @priv: pointer to private ECC control data
306*4882a593Smuzhiyun * @calc_buf: buffer for calculated ECC, size is oobsize.
307*4882a593Smuzhiyun * @code_buf: buffer for ECC read from flash, size is oobsize.
308*4882a593Smuzhiyun * @hwctl: function to control hardware ECC generator. Must only
309*4882a593Smuzhiyun * be provided if an hardware ECC is available
310*4882a593Smuzhiyun * @calculate: function for ECC calculation or readback from ECC hardware
311*4882a593Smuzhiyun * @correct: function for ECC correction, matching to ECC generator (sw/hw).
312*4882a593Smuzhiyun * Should return a positive number representing the number of
313*4882a593Smuzhiyun * corrected bitflips, -EBADMSG if the number of bitflips exceed
314*4882a593Smuzhiyun * ECC strength, or any other error code if the error is not
315*4882a593Smuzhiyun * directly related to correction.
316*4882a593Smuzhiyun * If -EBADMSG is returned the input buffers should be left
317*4882a593Smuzhiyun * untouched.
318*4882a593Smuzhiyun * @read_page_raw: function to read a raw page without ECC. This function
319*4882a593Smuzhiyun * should hide the specific layout used by the ECC
320*4882a593Smuzhiyun * controller and always return contiguous in-band and
321*4882a593Smuzhiyun * out-of-band data even if they're not stored
322*4882a593Smuzhiyun * contiguously on the NAND chip (e.g.
323*4882a593Smuzhiyun * NAND_ECC_PLACEMENT_INTERLEAVED interleaves in-band and
324*4882a593Smuzhiyun * out-of-band data).
325*4882a593Smuzhiyun * @write_page_raw: function to write a raw page without ECC. This function
326*4882a593Smuzhiyun * should hide the specific layout used by the ECC
327*4882a593Smuzhiyun * controller and consider the passed data as contiguous
328*4882a593Smuzhiyun * in-band and out-of-band data. ECC controller is
329*4882a593Smuzhiyun * responsible for doing the appropriate transformations
330*4882a593Smuzhiyun * to adapt to its specific layout (e.g.
331*4882a593Smuzhiyun * NAND_ECC_PLACEMENT_INTERLEAVED interleaves in-band and
332*4882a593Smuzhiyun * out-of-band data).
333*4882a593Smuzhiyun * @read_page: function to read a page according to the ECC generator
334*4882a593Smuzhiyun * requirements; returns maximum number of bitflips corrected in
335*4882a593Smuzhiyun * any single ECC step, -EIO hw error
336*4882a593Smuzhiyun * @read_subpage: function to read parts of the page covered by ECC;
337*4882a593Smuzhiyun * returns same as read_page()
338*4882a593Smuzhiyun * @write_subpage: function to write parts of the page covered by ECC.
339*4882a593Smuzhiyun * @write_page: function to write a page according to the ECC generator
340*4882a593Smuzhiyun * requirements.
341*4882a593Smuzhiyun * @write_oob_raw: function to write chip OOB data without ECC
342*4882a593Smuzhiyun * @read_oob_raw: function to read chip OOB data without ECC
343*4882a593Smuzhiyun * @read_oob: function to read chip OOB data
344*4882a593Smuzhiyun * @write_oob: function to write chip OOB data
345*4882a593Smuzhiyun */
346*4882a593Smuzhiyun struct nand_ecc_ctrl {
347*4882a593Smuzhiyun enum nand_ecc_engine_type engine_type;
348*4882a593Smuzhiyun enum nand_ecc_placement placement;
349*4882a593Smuzhiyun enum nand_ecc_algo algo;
350*4882a593Smuzhiyun int steps;
351*4882a593Smuzhiyun int size;
352*4882a593Smuzhiyun int bytes;
353*4882a593Smuzhiyun int total;
354*4882a593Smuzhiyun int strength;
355*4882a593Smuzhiyun int prepad;
356*4882a593Smuzhiyun int postpad;
357*4882a593Smuzhiyun unsigned int options;
358*4882a593Smuzhiyun void *priv;
359*4882a593Smuzhiyun u8 *calc_buf;
360*4882a593Smuzhiyun u8 *code_buf;
361*4882a593Smuzhiyun void (*hwctl)(struct nand_chip *chip, int mode);
362*4882a593Smuzhiyun int (*calculate)(struct nand_chip *chip, const uint8_t *dat,
363*4882a593Smuzhiyun uint8_t *ecc_code);
364*4882a593Smuzhiyun int (*correct)(struct nand_chip *chip, uint8_t *dat, uint8_t *read_ecc,
365*4882a593Smuzhiyun uint8_t *calc_ecc);
366*4882a593Smuzhiyun int (*read_page_raw)(struct nand_chip *chip, uint8_t *buf,
367*4882a593Smuzhiyun int oob_required, int page);
368*4882a593Smuzhiyun int (*write_page_raw)(struct nand_chip *chip, const uint8_t *buf,
369*4882a593Smuzhiyun int oob_required, int page);
370*4882a593Smuzhiyun int (*read_page)(struct nand_chip *chip, uint8_t *buf,
371*4882a593Smuzhiyun int oob_required, int page);
372*4882a593Smuzhiyun int (*read_subpage)(struct nand_chip *chip, uint32_t offs,
373*4882a593Smuzhiyun uint32_t len, uint8_t *buf, int page);
374*4882a593Smuzhiyun int (*write_subpage)(struct nand_chip *chip, uint32_t offset,
375*4882a593Smuzhiyun uint32_t data_len, const uint8_t *data_buf,
376*4882a593Smuzhiyun int oob_required, int page);
377*4882a593Smuzhiyun int (*write_page)(struct nand_chip *chip, const uint8_t *buf,
378*4882a593Smuzhiyun int oob_required, int page);
379*4882a593Smuzhiyun int (*write_oob_raw)(struct nand_chip *chip, int page);
380*4882a593Smuzhiyun int (*read_oob_raw)(struct nand_chip *chip, int page);
381*4882a593Smuzhiyun int (*read_oob)(struct nand_chip *chip, int page);
382*4882a593Smuzhiyun int (*write_oob)(struct nand_chip *chip, int page);
383*4882a593Smuzhiyun };
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /**
386*4882a593Smuzhiyun * struct nand_sdr_timings - SDR NAND chip timings
387*4882a593Smuzhiyun *
388*4882a593Smuzhiyun * This struct defines the timing requirements of a SDR NAND chip.
389*4882a593Smuzhiyun * These information can be found in every NAND datasheets and the timings
390*4882a593Smuzhiyun * meaning are described in the ONFI specifications:
391*4882a593Smuzhiyun * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing
392*4882a593Smuzhiyun * Parameters)
393*4882a593Smuzhiyun *
394*4882a593Smuzhiyun * All these timings are expressed in picoseconds.
395*4882a593Smuzhiyun *
396*4882a593Smuzhiyun * @tBERS_max: Block erase time
397*4882a593Smuzhiyun * @tCCS_min: Change column setup time
398*4882a593Smuzhiyun * @tPROG_max: Page program time
399*4882a593Smuzhiyun * @tR_max: Page read time
400*4882a593Smuzhiyun * @tALH_min: ALE hold time
401*4882a593Smuzhiyun * @tADL_min: ALE to data loading time
402*4882a593Smuzhiyun * @tALS_min: ALE setup time
403*4882a593Smuzhiyun * @tAR_min: ALE to RE# delay
404*4882a593Smuzhiyun * @tCEA_max: CE# access time
405*4882a593Smuzhiyun * @tCEH_min: CE# high hold time
406*4882a593Smuzhiyun * @tCH_min: CE# hold time
407*4882a593Smuzhiyun * @tCHZ_max: CE# high to output hi-Z
408*4882a593Smuzhiyun * @tCLH_min: CLE hold time
409*4882a593Smuzhiyun * @tCLR_min: CLE to RE# delay
410*4882a593Smuzhiyun * @tCLS_min: CLE setup time
411*4882a593Smuzhiyun * @tCOH_min: CE# high to output hold
412*4882a593Smuzhiyun * @tCS_min: CE# setup time
413*4882a593Smuzhiyun * @tDH_min: Data hold time
414*4882a593Smuzhiyun * @tDS_min: Data setup time
415*4882a593Smuzhiyun * @tFEAT_max: Busy time for Set Features and Get Features
416*4882a593Smuzhiyun * @tIR_min: Output hi-Z to RE# low
417*4882a593Smuzhiyun * @tITC_max: Interface and Timing Mode Change time
418*4882a593Smuzhiyun * @tRC_min: RE# cycle time
419*4882a593Smuzhiyun * @tREA_max: RE# access time
420*4882a593Smuzhiyun * @tREH_min: RE# high hold time
421*4882a593Smuzhiyun * @tRHOH_min: RE# high to output hold
422*4882a593Smuzhiyun * @tRHW_min: RE# high to WE# low
423*4882a593Smuzhiyun * @tRHZ_max: RE# high to output hi-Z
424*4882a593Smuzhiyun * @tRLOH_min: RE# low to output hold
425*4882a593Smuzhiyun * @tRP_min: RE# pulse width
426*4882a593Smuzhiyun * @tRR_min: Ready to RE# low (data only)
427*4882a593Smuzhiyun * @tRST_max: Device reset time, measured from the falling edge of R/B# to the
428*4882a593Smuzhiyun * rising edge of R/B#.
429*4882a593Smuzhiyun * @tWB_max: WE# high to SR[6] low
430*4882a593Smuzhiyun * @tWC_min: WE# cycle time
431*4882a593Smuzhiyun * @tWH_min: WE# high hold time
432*4882a593Smuzhiyun * @tWHR_min: WE# high to RE# low
433*4882a593Smuzhiyun * @tWP_min: WE# pulse width
434*4882a593Smuzhiyun * @tWW_min: WP# transition to WE# low
435*4882a593Smuzhiyun */
436*4882a593Smuzhiyun struct nand_sdr_timings {
437*4882a593Smuzhiyun u64 tBERS_max;
438*4882a593Smuzhiyun u32 tCCS_min;
439*4882a593Smuzhiyun u64 tPROG_max;
440*4882a593Smuzhiyun u64 tR_max;
441*4882a593Smuzhiyun u32 tALH_min;
442*4882a593Smuzhiyun u32 tADL_min;
443*4882a593Smuzhiyun u32 tALS_min;
444*4882a593Smuzhiyun u32 tAR_min;
445*4882a593Smuzhiyun u32 tCEA_max;
446*4882a593Smuzhiyun u32 tCEH_min;
447*4882a593Smuzhiyun u32 tCH_min;
448*4882a593Smuzhiyun u32 tCHZ_max;
449*4882a593Smuzhiyun u32 tCLH_min;
450*4882a593Smuzhiyun u32 tCLR_min;
451*4882a593Smuzhiyun u32 tCLS_min;
452*4882a593Smuzhiyun u32 tCOH_min;
453*4882a593Smuzhiyun u32 tCS_min;
454*4882a593Smuzhiyun u32 tDH_min;
455*4882a593Smuzhiyun u32 tDS_min;
456*4882a593Smuzhiyun u32 tFEAT_max;
457*4882a593Smuzhiyun u32 tIR_min;
458*4882a593Smuzhiyun u32 tITC_max;
459*4882a593Smuzhiyun u32 tRC_min;
460*4882a593Smuzhiyun u32 tREA_max;
461*4882a593Smuzhiyun u32 tREH_min;
462*4882a593Smuzhiyun u32 tRHOH_min;
463*4882a593Smuzhiyun u32 tRHW_min;
464*4882a593Smuzhiyun u32 tRHZ_max;
465*4882a593Smuzhiyun u32 tRLOH_min;
466*4882a593Smuzhiyun u32 tRP_min;
467*4882a593Smuzhiyun u32 tRR_min;
468*4882a593Smuzhiyun u64 tRST_max;
469*4882a593Smuzhiyun u32 tWB_max;
470*4882a593Smuzhiyun u32 tWC_min;
471*4882a593Smuzhiyun u32 tWH_min;
472*4882a593Smuzhiyun u32 tWHR_min;
473*4882a593Smuzhiyun u32 tWP_min;
474*4882a593Smuzhiyun u32 tWW_min;
475*4882a593Smuzhiyun };
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /**
478*4882a593Smuzhiyun * struct nand_nvddr_timings - NV-DDR NAND chip timings
479*4882a593Smuzhiyun *
480*4882a593Smuzhiyun * This struct defines the timing requirements of a NV-DDR NAND data interface.
481*4882a593Smuzhiyun * These information can be found in every NAND datasheets and the timings
482*4882a593Smuzhiyun * meaning are described in the ONFI specifications:
483*4882a593Smuzhiyun * https://media-www.micron.com/-/media/client/onfi/specs/onfi_4_1_gold.pdf
484*4882a593Smuzhiyun * (chapter 4.18.2 NV-DDR)
485*4882a593Smuzhiyun *
486*4882a593Smuzhiyun * All these timings are expressed in picoseconds.
487*4882a593Smuzhiyun *
488*4882a593Smuzhiyun * @tBERS_max: Block erase time
489*4882a593Smuzhiyun * @tCCS_min: Change column setup time
490*4882a593Smuzhiyun * @tPROG_max: Page program time
491*4882a593Smuzhiyun * @tR_max: Page read time
492*4882a593Smuzhiyun * @tAC_min: Access window of DQ[7:0] from CLK
493*4882a593Smuzhiyun * @tAC_max: Access window of DQ[7:0] from CLK
494*4882a593Smuzhiyun * @tADL_min: ALE to data loading time
495*4882a593Smuzhiyun * @tCAD_min: Command, Address, Data delay
496*4882a593Smuzhiyun * @tCAH_min: Command/Address DQ hold time
497*4882a593Smuzhiyun * @tCALH_min: W/R_n, CLE and ALE hold time
498*4882a593Smuzhiyun * @tCALS_min: W/R_n, CLE and ALE setup time
499*4882a593Smuzhiyun * @tCAS_min: Command/address DQ setup time
500*4882a593Smuzhiyun * @tCEH_min: CE# high hold time
501*4882a593Smuzhiyun * @tCH_min: CE# hold time
502*4882a593Smuzhiyun * @tCK_min: Average clock cycle time
503*4882a593Smuzhiyun * @tCS_min: CE# setup time
504*4882a593Smuzhiyun * @tDH_min: Data hold time
505*4882a593Smuzhiyun * @tDQSCK_min: Start of the access window of DQS from CLK
506*4882a593Smuzhiyun * @tDQSCK_max: End of the access window of DQS from CLK
507*4882a593Smuzhiyun * @tDQSD_min: Min W/R_n low to DQS/DQ driven by device
508*4882a593Smuzhiyun * @tDQSD_max: Max W/R_n low to DQS/DQ driven by device
509*4882a593Smuzhiyun * @tDQSHZ_max: W/R_n high to DQS/DQ tri-state by device
510*4882a593Smuzhiyun * @tDQSQ_max: DQS-DQ skew, DQS to last DQ valid, per access
511*4882a593Smuzhiyun * @tDS_min: Data setup time
512*4882a593Smuzhiyun * @tDSC_min: DQS cycle time
513*4882a593Smuzhiyun * @tFEAT_max: Busy time for Set Features and Get Features
514*4882a593Smuzhiyun * @tITC_max: Interface and Timing Mode Change time
515*4882a593Smuzhiyun * @tQHS_max: Data hold skew factor
516*4882a593Smuzhiyun * @tRHW_min: Data output cycle to command, address, or data input cycle
517*4882a593Smuzhiyun * @tRR_min: Ready to RE# low (data only)
518*4882a593Smuzhiyun * @tRST_max: Device reset time, measured from the falling edge of R/B# to the
519*4882a593Smuzhiyun * rising edge of R/B#.
520*4882a593Smuzhiyun * @tWB_max: WE# high to SR[6] low
521*4882a593Smuzhiyun * @tWHR_min: WE# high to RE# low
522*4882a593Smuzhiyun * @tWRCK_min: W/R_n low to data output cycle
523*4882a593Smuzhiyun * @tWW_min: WP# transition to WE# low
524*4882a593Smuzhiyun */
525*4882a593Smuzhiyun struct nand_nvddr_timings {
526*4882a593Smuzhiyun u64 tBERS_max;
527*4882a593Smuzhiyun u32 tCCS_min;
528*4882a593Smuzhiyun u64 tPROG_max;
529*4882a593Smuzhiyun u64 tR_max;
530*4882a593Smuzhiyun u32 tAC_min;
531*4882a593Smuzhiyun u32 tAC_max;
532*4882a593Smuzhiyun u32 tADL_min;
533*4882a593Smuzhiyun u32 tCAD_min;
534*4882a593Smuzhiyun u32 tCAH_min;
535*4882a593Smuzhiyun u32 tCALH_min;
536*4882a593Smuzhiyun u32 tCALS_min;
537*4882a593Smuzhiyun u32 tCAS_min;
538*4882a593Smuzhiyun u32 tCEH_min;
539*4882a593Smuzhiyun u32 tCH_min;
540*4882a593Smuzhiyun u32 tCK_min;
541*4882a593Smuzhiyun u32 tCS_min;
542*4882a593Smuzhiyun u32 tDH_min;
543*4882a593Smuzhiyun u32 tDQSCK_min;
544*4882a593Smuzhiyun u32 tDQSCK_max;
545*4882a593Smuzhiyun u32 tDQSD_min;
546*4882a593Smuzhiyun u32 tDQSD_max;
547*4882a593Smuzhiyun u32 tDQSHZ_max;
548*4882a593Smuzhiyun u32 tDQSQ_max;
549*4882a593Smuzhiyun u32 tDS_min;
550*4882a593Smuzhiyun u32 tDSC_min;
551*4882a593Smuzhiyun u32 tFEAT_max;
552*4882a593Smuzhiyun u32 tITC_max;
553*4882a593Smuzhiyun u32 tQHS_max;
554*4882a593Smuzhiyun u32 tRHW_min;
555*4882a593Smuzhiyun u32 tRR_min;
556*4882a593Smuzhiyun u32 tRST_max;
557*4882a593Smuzhiyun u32 tWB_max;
558*4882a593Smuzhiyun u32 tWHR_min;
559*4882a593Smuzhiyun u32 tWRCK_min;
560*4882a593Smuzhiyun u32 tWW_min;
561*4882a593Smuzhiyun };
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /**
564*4882a593Smuzhiyun * enum nand_interface_type - NAND interface type
565*4882a593Smuzhiyun * @NAND_SDR_IFACE: Single Data Rate interface
566*4882a593Smuzhiyun * @NAND_NVDDR_IFACE: Double Data Rate interface
567*4882a593Smuzhiyun */
568*4882a593Smuzhiyun enum nand_interface_type {
569*4882a593Smuzhiyun NAND_SDR_IFACE,
570*4882a593Smuzhiyun NAND_NVDDR_IFACE,
571*4882a593Smuzhiyun };
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun /**
574*4882a593Smuzhiyun * struct nand_interface_config - NAND interface timing
575*4882a593Smuzhiyun * @type: type of the timing
576*4882a593Smuzhiyun * @timings: The timing information
577*4882a593Smuzhiyun * @timings.mode: Timing mode as defined in the specification
578*4882a593Smuzhiyun * @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
579*4882a593Smuzhiyun * @timings.nvddr: Use it when @type is %NAND_NVDDR_IFACE.
580*4882a593Smuzhiyun */
581*4882a593Smuzhiyun struct nand_interface_config {
582*4882a593Smuzhiyun enum nand_interface_type type;
583*4882a593Smuzhiyun struct nand_timings {
584*4882a593Smuzhiyun unsigned int mode;
585*4882a593Smuzhiyun union {
586*4882a593Smuzhiyun struct nand_sdr_timings sdr;
587*4882a593Smuzhiyun struct nand_nvddr_timings nvddr;
588*4882a593Smuzhiyun };
589*4882a593Smuzhiyun } timings;
590*4882a593Smuzhiyun };
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /**
593*4882a593Smuzhiyun * nand_interface_is_sdr - get the interface type
594*4882a593Smuzhiyun * @conf: The data interface
595*4882a593Smuzhiyun */
nand_interface_is_sdr(const struct nand_interface_config * conf)596*4882a593Smuzhiyun static bool nand_interface_is_sdr(const struct nand_interface_config *conf)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun return conf->type == NAND_SDR_IFACE;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun /**
602*4882a593Smuzhiyun * nand_interface_is_nvddr - get the interface type
603*4882a593Smuzhiyun * @conf: The data interface
604*4882a593Smuzhiyun */
nand_interface_is_nvddr(const struct nand_interface_config * conf)605*4882a593Smuzhiyun static bool nand_interface_is_nvddr(const struct nand_interface_config *conf)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun return conf->type == NAND_NVDDR_IFACE;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun /**
611*4882a593Smuzhiyun * nand_get_sdr_timings - get SDR timing from data interface
612*4882a593Smuzhiyun * @conf: The data interface
613*4882a593Smuzhiyun */
614*4882a593Smuzhiyun static inline const struct nand_sdr_timings *
nand_get_sdr_timings(const struct nand_interface_config * conf)615*4882a593Smuzhiyun nand_get_sdr_timings(const struct nand_interface_config *conf)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun if (!nand_interface_is_sdr(conf))
618*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun return &conf->timings.sdr;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /**
624*4882a593Smuzhiyun * nand_get_nvddr_timings - get NV-DDR timing from data interface
625*4882a593Smuzhiyun * @conf: The data interface
626*4882a593Smuzhiyun */
627*4882a593Smuzhiyun static inline const struct nand_nvddr_timings *
nand_get_nvddr_timings(const struct nand_interface_config * conf)628*4882a593Smuzhiyun nand_get_nvddr_timings(const struct nand_interface_config *conf)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun if (!nand_interface_is_nvddr(conf))
631*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun return &conf->timings.nvddr;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun /**
637*4882a593Smuzhiyun * struct nand_op_cmd_instr - Definition of a command instruction
638*4882a593Smuzhiyun * @opcode: the command to issue in one cycle
639*4882a593Smuzhiyun */
640*4882a593Smuzhiyun struct nand_op_cmd_instr {
641*4882a593Smuzhiyun u8 opcode;
642*4882a593Smuzhiyun };
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /**
645*4882a593Smuzhiyun * struct nand_op_addr_instr - Definition of an address instruction
646*4882a593Smuzhiyun * @naddrs: length of the @addrs array
647*4882a593Smuzhiyun * @addrs: array containing the address cycles to issue
648*4882a593Smuzhiyun */
649*4882a593Smuzhiyun struct nand_op_addr_instr {
650*4882a593Smuzhiyun unsigned int naddrs;
651*4882a593Smuzhiyun const u8 *addrs;
652*4882a593Smuzhiyun };
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun /**
655*4882a593Smuzhiyun * struct nand_op_data_instr - Definition of a data instruction
656*4882a593Smuzhiyun * @len: number of data bytes to move
657*4882a593Smuzhiyun * @buf: buffer to fill
658*4882a593Smuzhiyun * @buf.in: buffer to fill when reading from the NAND chip
659*4882a593Smuzhiyun * @buf.out: buffer to read from when writing to the NAND chip
660*4882a593Smuzhiyun * @force_8bit: force 8-bit access
661*4882a593Smuzhiyun *
662*4882a593Smuzhiyun * Please note that "in" and "out" are inverted from the ONFI specification
663*4882a593Smuzhiyun * and are from the controller perspective, so a "in" is a read from the NAND
664*4882a593Smuzhiyun * chip while a "out" is a write to the NAND chip.
665*4882a593Smuzhiyun */
666*4882a593Smuzhiyun struct nand_op_data_instr {
667*4882a593Smuzhiyun unsigned int len;
668*4882a593Smuzhiyun union {
669*4882a593Smuzhiyun void *in;
670*4882a593Smuzhiyun const void *out;
671*4882a593Smuzhiyun } buf;
672*4882a593Smuzhiyun bool force_8bit;
673*4882a593Smuzhiyun };
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun /**
676*4882a593Smuzhiyun * struct nand_op_waitrdy_instr - Definition of a wait ready instruction
677*4882a593Smuzhiyun * @timeout_ms: maximum delay while waiting for the ready/busy pin in ms
678*4882a593Smuzhiyun */
679*4882a593Smuzhiyun struct nand_op_waitrdy_instr {
680*4882a593Smuzhiyun unsigned int timeout_ms;
681*4882a593Smuzhiyun };
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun /**
684*4882a593Smuzhiyun * enum nand_op_instr_type - Definition of all instruction types
685*4882a593Smuzhiyun * @NAND_OP_CMD_INSTR: command instruction
686*4882a593Smuzhiyun * @NAND_OP_ADDR_INSTR: address instruction
687*4882a593Smuzhiyun * @NAND_OP_DATA_IN_INSTR: data in instruction
688*4882a593Smuzhiyun * @NAND_OP_DATA_OUT_INSTR: data out instruction
689*4882a593Smuzhiyun * @NAND_OP_WAITRDY_INSTR: wait ready instruction
690*4882a593Smuzhiyun */
691*4882a593Smuzhiyun enum nand_op_instr_type {
692*4882a593Smuzhiyun NAND_OP_CMD_INSTR,
693*4882a593Smuzhiyun NAND_OP_ADDR_INSTR,
694*4882a593Smuzhiyun NAND_OP_DATA_IN_INSTR,
695*4882a593Smuzhiyun NAND_OP_DATA_OUT_INSTR,
696*4882a593Smuzhiyun NAND_OP_WAITRDY_INSTR,
697*4882a593Smuzhiyun };
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /**
700*4882a593Smuzhiyun * struct nand_op_instr - Instruction object
701*4882a593Smuzhiyun * @type: the instruction type
702*4882a593Smuzhiyun * @ctx: extra data associated to the instruction. You'll have to use the
703*4882a593Smuzhiyun * appropriate element depending on @type
704*4882a593Smuzhiyun * @ctx.cmd: use it if @type is %NAND_OP_CMD_INSTR
705*4882a593Smuzhiyun * @ctx.addr: use it if @type is %NAND_OP_ADDR_INSTR
706*4882a593Smuzhiyun * @ctx.data: use it if @type is %NAND_OP_DATA_IN_INSTR
707*4882a593Smuzhiyun * or %NAND_OP_DATA_OUT_INSTR
708*4882a593Smuzhiyun * @ctx.waitrdy: use it if @type is %NAND_OP_WAITRDY_INSTR
709*4882a593Smuzhiyun * @delay_ns: delay the controller should apply after the instruction has been
710*4882a593Smuzhiyun * issued on the bus. Most modern controllers have internal timings
711*4882a593Smuzhiyun * control logic, and in this case, the controller driver can ignore
712*4882a593Smuzhiyun * this field.
713*4882a593Smuzhiyun */
714*4882a593Smuzhiyun struct nand_op_instr {
715*4882a593Smuzhiyun enum nand_op_instr_type type;
716*4882a593Smuzhiyun union {
717*4882a593Smuzhiyun struct nand_op_cmd_instr cmd;
718*4882a593Smuzhiyun struct nand_op_addr_instr addr;
719*4882a593Smuzhiyun struct nand_op_data_instr data;
720*4882a593Smuzhiyun struct nand_op_waitrdy_instr waitrdy;
721*4882a593Smuzhiyun } ctx;
722*4882a593Smuzhiyun unsigned int delay_ns;
723*4882a593Smuzhiyun };
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun /*
726*4882a593Smuzhiyun * Special handling must be done for the WAITRDY timeout parameter as it usually
727*4882a593Smuzhiyun * is either tPROG (after a prog), tR (before a read), tRST (during a reset) or
728*4882a593Smuzhiyun * tBERS (during an erase) which all of them are u64 values that cannot be
729*4882a593Smuzhiyun * divided by usual kernel macros and must be handled with the special
730*4882a593Smuzhiyun * DIV_ROUND_UP_ULL() macro.
731*4882a593Smuzhiyun *
732*4882a593Smuzhiyun * Cast to type of dividend is needed here to guarantee that the result won't
733*4882a593Smuzhiyun * be an unsigned long long when the dividend is an unsigned long (or smaller),
734*4882a593Smuzhiyun * which is what the compiler does when it sees ternary operator with 2
735*4882a593Smuzhiyun * different return types (picks the largest type to make sure there's no
736*4882a593Smuzhiyun * loss).
737*4882a593Smuzhiyun */
738*4882a593Smuzhiyun #define __DIVIDE(dividend, divisor) ({ \
739*4882a593Smuzhiyun (__typeof__(dividend))(sizeof(dividend) <= sizeof(unsigned long) ? \
740*4882a593Smuzhiyun DIV_ROUND_UP(dividend, divisor) : \
741*4882a593Smuzhiyun DIV_ROUND_UP_ULL(dividend, divisor)); \
742*4882a593Smuzhiyun })
743*4882a593Smuzhiyun #define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
744*4882a593Smuzhiyun #define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun #define NAND_OP_CMD(id, ns) \
747*4882a593Smuzhiyun { \
748*4882a593Smuzhiyun .type = NAND_OP_CMD_INSTR, \
749*4882a593Smuzhiyun .ctx.cmd.opcode = id, \
750*4882a593Smuzhiyun .delay_ns = ns, \
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun #define NAND_OP_ADDR(ncycles, cycles, ns) \
754*4882a593Smuzhiyun { \
755*4882a593Smuzhiyun .type = NAND_OP_ADDR_INSTR, \
756*4882a593Smuzhiyun .ctx.addr = { \
757*4882a593Smuzhiyun .naddrs = ncycles, \
758*4882a593Smuzhiyun .addrs = cycles, \
759*4882a593Smuzhiyun }, \
760*4882a593Smuzhiyun .delay_ns = ns, \
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun #define NAND_OP_DATA_IN(l, b, ns) \
764*4882a593Smuzhiyun { \
765*4882a593Smuzhiyun .type = NAND_OP_DATA_IN_INSTR, \
766*4882a593Smuzhiyun .ctx.data = { \
767*4882a593Smuzhiyun .len = l, \
768*4882a593Smuzhiyun .buf.in = b, \
769*4882a593Smuzhiyun .force_8bit = false, \
770*4882a593Smuzhiyun }, \
771*4882a593Smuzhiyun .delay_ns = ns, \
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun #define NAND_OP_DATA_OUT(l, b, ns) \
775*4882a593Smuzhiyun { \
776*4882a593Smuzhiyun .type = NAND_OP_DATA_OUT_INSTR, \
777*4882a593Smuzhiyun .ctx.data = { \
778*4882a593Smuzhiyun .len = l, \
779*4882a593Smuzhiyun .buf.out = b, \
780*4882a593Smuzhiyun .force_8bit = false, \
781*4882a593Smuzhiyun }, \
782*4882a593Smuzhiyun .delay_ns = ns, \
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun #define NAND_OP_8BIT_DATA_IN(l, b, ns) \
786*4882a593Smuzhiyun { \
787*4882a593Smuzhiyun .type = NAND_OP_DATA_IN_INSTR, \
788*4882a593Smuzhiyun .ctx.data = { \
789*4882a593Smuzhiyun .len = l, \
790*4882a593Smuzhiyun .buf.in = b, \
791*4882a593Smuzhiyun .force_8bit = true, \
792*4882a593Smuzhiyun }, \
793*4882a593Smuzhiyun .delay_ns = ns, \
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun #define NAND_OP_8BIT_DATA_OUT(l, b, ns) \
797*4882a593Smuzhiyun { \
798*4882a593Smuzhiyun .type = NAND_OP_DATA_OUT_INSTR, \
799*4882a593Smuzhiyun .ctx.data = { \
800*4882a593Smuzhiyun .len = l, \
801*4882a593Smuzhiyun .buf.out = b, \
802*4882a593Smuzhiyun .force_8bit = true, \
803*4882a593Smuzhiyun }, \
804*4882a593Smuzhiyun .delay_ns = ns, \
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun #define NAND_OP_WAIT_RDY(tout_ms, ns) \
808*4882a593Smuzhiyun { \
809*4882a593Smuzhiyun .type = NAND_OP_WAITRDY_INSTR, \
810*4882a593Smuzhiyun .ctx.waitrdy.timeout_ms = tout_ms, \
811*4882a593Smuzhiyun .delay_ns = ns, \
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun /**
815*4882a593Smuzhiyun * struct nand_subop - a sub operation
816*4882a593Smuzhiyun * @cs: the CS line to select for this NAND sub-operation
817*4882a593Smuzhiyun * @instrs: array of instructions
818*4882a593Smuzhiyun * @ninstrs: length of the @instrs array
819*4882a593Smuzhiyun * @first_instr_start_off: offset to start from for the first instruction
820*4882a593Smuzhiyun * of the sub-operation
821*4882a593Smuzhiyun * @last_instr_end_off: offset to end at (excluded) for the last instruction
822*4882a593Smuzhiyun * of the sub-operation
823*4882a593Smuzhiyun *
824*4882a593Smuzhiyun * Both @first_instr_start_off and @last_instr_end_off only apply to data or
825*4882a593Smuzhiyun * address instructions.
826*4882a593Smuzhiyun *
827*4882a593Smuzhiyun * When an operation cannot be handled as is by the NAND controller, it will
828*4882a593Smuzhiyun * be split by the parser into sub-operations which will be passed to the
829*4882a593Smuzhiyun * controller driver.
830*4882a593Smuzhiyun */
831*4882a593Smuzhiyun struct nand_subop {
832*4882a593Smuzhiyun unsigned int cs;
833*4882a593Smuzhiyun const struct nand_op_instr *instrs;
834*4882a593Smuzhiyun unsigned int ninstrs;
835*4882a593Smuzhiyun unsigned int first_instr_start_off;
836*4882a593Smuzhiyun unsigned int last_instr_end_off;
837*4882a593Smuzhiyun };
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
840*4882a593Smuzhiyun unsigned int op_id);
841*4882a593Smuzhiyun unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
842*4882a593Smuzhiyun unsigned int op_id);
843*4882a593Smuzhiyun unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
844*4882a593Smuzhiyun unsigned int op_id);
845*4882a593Smuzhiyun unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
846*4882a593Smuzhiyun unsigned int op_id);
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun /**
849*4882a593Smuzhiyun * struct nand_op_parser_addr_constraints - Constraints for address instructions
850*4882a593Smuzhiyun * @maxcycles: maximum number of address cycles the controller can issue in a
851*4882a593Smuzhiyun * single step
852*4882a593Smuzhiyun */
853*4882a593Smuzhiyun struct nand_op_parser_addr_constraints {
854*4882a593Smuzhiyun unsigned int maxcycles;
855*4882a593Smuzhiyun };
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun /**
858*4882a593Smuzhiyun * struct nand_op_parser_data_constraints - Constraints for data instructions
859*4882a593Smuzhiyun * @maxlen: maximum data length that the controller can handle in a single step
860*4882a593Smuzhiyun */
861*4882a593Smuzhiyun struct nand_op_parser_data_constraints {
862*4882a593Smuzhiyun unsigned int maxlen;
863*4882a593Smuzhiyun };
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun /**
866*4882a593Smuzhiyun * struct nand_op_parser_pattern_elem - One element of a pattern
867*4882a593Smuzhiyun * @type: the instructuction type
868*4882a593Smuzhiyun * @optional: whether this element of the pattern is optional or mandatory
869*4882a593Smuzhiyun * @ctx: address or data constraint
870*4882a593Smuzhiyun * @ctx.addr: address constraint (number of cycles)
871*4882a593Smuzhiyun * @ctx.data: data constraint (data length)
872*4882a593Smuzhiyun */
873*4882a593Smuzhiyun struct nand_op_parser_pattern_elem {
874*4882a593Smuzhiyun enum nand_op_instr_type type;
875*4882a593Smuzhiyun bool optional;
876*4882a593Smuzhiyun union {
877*4882a593Smuzhiyun struct nand_op_parser_addr_constraints addr;
878*4882a593Smuzhiyun struct nand_op_parser_data_constraints data;
879*4882a593Smuzhiyun } ctx;
880*4882a593Smuzhiyun };
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun #define NAND_OP_PARSER_PAT_CMD_ELEM(_opt) \
883*4882a593Smuzhiyun { \
884*4882a593Smuzhiyun .type = NAND_OP_CMD_INSTR, \
885*4882a593Smuzhiyun .optional = _opt, \
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun #define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles) \
889*4882a593Smuzhiyun { \
890*4882a593Smuzhiyun .type = NAND_OP_ADDR_INSTR, \
891*4882a593Smuzhiyun .optional = _opt, \
892*4882a593Smuzhiyun .ctx.addr.maxcycles = _maxcycles, \
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun #define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen) \
896*4882a593Smuzhiyun { \
897*4882a593Smuzhiyun .type = NAND_OP_DATA_IN_INSTR, \
898*4882a593Smuzhiyun .optional = _opt, \
899*4882a593Smuzhiyun .ctx.data.maxlen = _maxlen, \
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun #define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen) \
903*4882a593Smuzhiyun { \
904*4882a593Smuzhiyun .type = NAND_OP_DATA_OUT_INSTR, \
905*4882a593Smuzhiyun .optional = _opt, \
906*4882a593Smuzhiyun .ctx.data.maxlen = _maxlen, \
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun #define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt) \
910*4882a593Smuzhiyun { \
911*4882a593Smuzhiyun .type = NAND_OP_WAITRDY_INSTR, \
912*4882a593Smuzhiyun .optional = _opt, \
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun /**
916*4882a593Smuzhiyun * struct nand_op_parser_pattern - NAND sub-operation pattern descriptor
917*4882a593Smuzhiyun * @elems: array of pattern elements
918*4882a593Smuzhiyun * @nelems: number of pattern elements in @elems array
919*4882a593Smuzhiyun * @exec: the function that will issue a sub-operation
920*4882a593Smuzhiyun *
921*4882a593Smuzhiyun * A pattern is a list of elements, each element reprensenting one instruction
922*4882a593Smuzhiyun * with its constraints. The pattern itself is used by the core to match NAND
923*4882a593Smuzhiyun * chip operation with NAND controller operations.
924*4882a593Smuzhiyun * Once a match between a NAND controller operation pattern and a NAND chip
925*4882a593Smuzhiyun * operation (or a sub-set of a NAND operation) is found, the pattern ->exec()
926*4882a593Smuzhiyun * hook is called so that the controller driver can issue the operation on the
927*4882a593Smuzhiyun * bus.
928*4882a593Smuzhiyun *
929*4882a593Smuzhiyun * Controller drivers should declare as many patterns as they support and pass
930*4882a593Smuzhiyun * this list of patterns (created with the help of the following macro) to
931*4882a593Smuzhiyun * the nand_op_parser_exec_op() helper.
932*4882a593Smuzhiyun */
933*4882a593Smuzhiyun struct nand_op_parser_pattern {
934*4882a593Smuzhiyun const struct nand_op_parser_pattern_elem *elems;
935*4882a593Smuzhiyun unsigned int nelems;
936*4882a593Smuzhiyun int (*exec)(struct nand_chip *chip, const struct nand_subop *subop);
937*4882a593Smuzhiyun };
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun #define NAND_OP_PARSER_PATTERN(_exec, ...) \
940*4882a593Smuzhiyun { \
941*4882a593Smuzhiyun .exec = _exec, \
942*4882a593Smuzhiyun .elems = (const struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \
943*4882a593Smuzhiyun .nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) / \
944*4882a593Smuzhiyun sizeof(struct nand_op_parser_pattern_elem), \
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun /**
948*4882a593Smuzhiyun * struct nand_op_parser - NAND controller operation parser descriptor
949*4882a593Smuzhiyun * @patterns: array of supported patterns
950*4882a593Smuzhiyun * @npatterns: length of the @patterns array
951*4882a593Smuzhiyun *
952*4882a593Smuzhiyun * The parser descriptor is just an array of supported patterns which will be
953*4882a593Smuzhiyun * iterated by nand_op_parser_exec_op() everytime it tries to execute an
954*4882a593Smuzhiyun * NAND operation (or tries to determine if a specific operation is supported).
955*4882a593Smuzhiyun *
956*4882a593Smuzhiyun * It is worth mentioning that patterns will be tested in their declaration
957*4882a593Smuzhiyun * order, and the first match will be taken, so it's important to order patterns
958*4882a593Smuzhiyun * appropriately so that simple/inefficient patterns are placed at the end of
959*4882a593Smuzhiyun * the list. Usually, this is where you put single instruction patterns.
960*4882a593Smuzhiyun */
961*4882a593Smuzhiyun struct nand_op_parser {
962*4882a593Smuzhiyun const struct nand_op_parser_pattern *patterns;
963*4882a593Smuzhiyun unsigned int npatterns;
964*4882a593Smuzhiyun };
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun #define NAND_OP_PARSER(...) \
967*4882a593Smuzhiyun { \
968*4882a593Smuzhiyun .patterns = (const struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \
969*4882a593Smuzhiyun .npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) / \
970*4882a593Smuzhiyun sizeof(struct nand_op_parser_pattern), \
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun /**
974*4882a593Smuzhiyun * struct nand_operation - NAND operation descriptor
975*4882a593Smuzhiyun * @cs: the CS line to select for this NAND operation
976*4882a593Smuzhiyun * @instrs: array of instructions to execute
977*4882a593Smuzhiyun * @ninstrs: length of the @instrs array
978*4882a593Smuzhiyun *
979*4882a593Smuzhiyun * The actual operation structure that will be passed to chip->exec_op().
980*4882a593Smuzhiyun */
981*4882a593Smuzhiyun struct nand_operation {
982*4882a593Smuzhiyun unsigned int cs;
983*4882a593Smuzhiyun const struct nand_op_instr *instrs;
984*4882a593Smuzhiyun unsigned int ninstrs;
985*4882a593Smuzhiyun };
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun #define NAND_OPERATION(_cs, _instrs) \
988*4882a593Smuzhiyun { \
989*4882a593Smuzhiyun .cs = _cs, \
990*4882a593Smuzhiyun .instrs = _instrs, \
991*4882a593Smuzhiyun .ninstrs = ARRAY_SIZE(_instrs), \
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun int nand_op_parser_exec_op(struct nand_chip *chip,
995*4882a593Smuzhiyun const struct nand_op_parser *parser,
996*4882a593Smuzhiyun const struct nand_operation *op, bool check_only);
997*4882a593Smuzhiyun
nand_op_trace(const char * prefix,const struct nand_op_instr * instr)998*4882a593Smuzhiyun static inline void nand_op_trace(const char *prefix,
999*4882a593Smuzhiyun const struct nand_op_instr *instr)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
1002*4882a593Smuzhiyun switch (instr->type) {
1003*4882a593Smuzhiyun case NAND_OP_CMD_INSTR:
1004*4882a593Smuzhiyun pr_debug("%sCMD [0x%02x]\n", prefix,
1005*4882a593Smuzhiyun instr->ctx.cmd.opcode);
1006*4882a593Smuzhiyun break;
1007*4882a593Smuzhiyun case NAND_OP_ADDR_INSTR:
1008*4882a593Smuzhiyun pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
1009*4882a593Smuzhiyun instr->ctx.addr.naddrs,
1010*4882a593Smuzhiyun instr->ctx.addr.naddrs < 64 ?
1011*4882a593Smuzhiyun instr->ctx.addr.naddrs : 64,
1012*4882a593Smuzhiyun instr->ctx.addr.addrs);
1013*4882a593Smuzhiyun break;
1014*4882a593Smuzhiyun case NAND_OP_DATA_IN_INSTR:
1015*4882a593Smuzhiyun pr_debug("%sDATA_IN [%d B%s]\n", prefix,
1016*4882a593Smuzhiyun instr->ctx.data.len,
1017*4882a593Smuzhiyun instr->ctx.data.force_8bit ?
1018*4882a593Smuzhiyun ", force 8-bit" : "");
1019*4882a593Smuzhiyun break;
1020*4882a593Smuzhiyun case NAND_OP_DATA_OUT_INSTR:
1021*4882a593Smuzhiyun pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
1022*4882a593Smuzhiyun instr->ctx.data.len,
1023*4882a593Smuzhiyun instr->ctx.data.force_8bit ?
1024*4882a593Smuzhiyun ", force 8-bit" : "");
1025*4882a593Smuzhiyun break;
1026*4882a593Smuzhiyun case NAND_OP_WAITRDY_INSTR:
1027*4882a593Smuzhiyun pr_debug("%sWAITRDY [max %d ms]\n", prefix,
1028*4882a593Smuzhiyun instr->ctx.waitrdy.timeout_ms);
1029*4882a593Smuzhiyun break;
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun #endif
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun /**
1035*4882a593Smuzhiyun * struct nand_controller_ops - Controller operations
1036*4882a593Smuzhiyun *
1037*4882a593Smuzhiyun * @attach_chip: this method is called after the NAND detection phase after
1038*4882a593Smuzhiyun * flash ID and MTD fields such as erase size, page size and OOB
1039*4882a593Smuzhiyun * size have been set up. ECC requirements are available if
1040*4882a593Smuzhiyun * provided by the NAND chip or device tree. Typically used to
1041*4882a593Smuzhiyun * choose the appropriate ECC configuration and allocate
1042*4882a593Smuzhiyun * associated resources.
1043*4882a593Smuzhiyun * This hook is optional.
1044*4882a593Smuzhiyun * @detach_chip: free all resources allocated/claimed in
1045*4882a593Smuzhiyun * nand_controller_ops->attach_chip().
1046*4882a593Smuzhiyun * This hook is optional.
1047*4882a593Smuzhiyun * @exec_op: controller specific method to execute NAND operations.
1048*4882a593Smuzhiyun * This method replaces chip->legacy.cmdfunc(),
1049*4882a593Smuzhiyun * chip->legacy.{read,write}_{buf,byte,word}(),
1050*4882a593Smuzhiyun * chip->legacy.dev_ready() and chip->legacy.waifunc().
1051*4882a593Smuzhiyun * @setup_interface: setup the data interface and timing. If chipnr is set to
1052*4882a593Smuzhiyun * %NAND_DATA_IFACE_CHECK_ONLY this means the configuration
1053*4882a593Smuzhiyun * should not be applied but only checked.
1054*4882a593Smuzhiyun * This hook is optional.
1055*4882a593Smuzhiyun */
1056*4882a593Smuzhiyun struct nand_controller_ops {
1057*4882a593Smuzhiyun int (*attach_chip)(struct nand_chip *chip);
1058*4882a593Smuzhiyun void (*detach_chip)(struct nand_chip *chip);
1059*4882a593Smuzhiyun int (*exec_op)(struct nand_chip *chip,
1060*4882a593Smuzhiyun const struct nand_operation *op,
1061*4882a593Smuzhiyun bool check_only);
1062*4882a593Smuzhiyun int (*setup_interface)(struct nand_chip *chip, int chipnr,
1063*4882a593Smuzhiyun const struct nand_interface_config *conf);
1064*4882a593Smuzhiyun };
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun /**
1067*4882a593Smuzhiyun * struct nand_controller - Structure used to describe a NAND controller
1068*4882a593Smuzhiyun *
1069*4882a593Smuzhiyun * @lock: lock used to serialize accesses to the NAND controller
1070*4882a593Smuzhiyun * @ops: NAND controller operations.
1071*4882a593Smuzhiyun */
1072*4882a593Smuzhiyun struct nand_controller {
1073*4882a593Smuzhiyun struct mutex lock;
1074*4882a593Smuzhiyun const struct nand_controller_ops *ops;
1075*4882a593Smuzhiyun };
1076*4882a593Smuzhiyun
nand_controller_init(struct nand_controller * nfc)1077*4882a593Smuzhiyun static inline void nand_controller_init(struct nand_controller *nfc)
1078*4882a593Smuzhiyun {
1079*4882a593Smuzhiyun mutex_init(&nfc->lock);
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun /**
1083*4882a593Smuzhiyun * struct nand_legacy - NAND chip legacy fields/hooks
1084*4882a593Smuzhiyun * @IO_ADDR_R: address to read the 8 I/O lines of the flash device
1085*4882a593Smuzhiyun * @IO_ADDR_W: address to write the 8 I/O lines of the flash device
1086*4882a593Smuzhiyun * @select_chip: select/deselect a specific target/die
1087*4882a593Smuzhiyun * @read_byte: read one byte from the chip
1088*4882a593Smuzhiyun * @write_byte: write a single byte to the chip on the low 8 I/O lines
1089*4882a593Smuzhiyun * @write_buf: write data from the buffer to the chip
1090*4882a593Smuzhiyun * @read_buf: read data from the chip into the buffer
1091*4882a593Smuzhiyun * @cmd_ctrl: hardware specific function for controlling ALE/CLE/nCE. Also used
1092*4882a593Smuzhiyun * to write command and address
1093*4882a593Smuzhiyun * @cmdfunc: hardware specific function for writing commands to the chip.
1094*4882a593Smuzhiyun * @dev_ready: hardware specific function for accessing device ready/busy line.
1095*4882a593Smuzhiyun * If set to NULL no access to ready/busy is available and the
1096*4882a593Smuzhiyun * ready/busy information is read from the chip status register.
1097*4882a593Smuzhiyun * @waitfunc: hardware specific function for wait on ready.
1098*4882a593Smuzhiyun * @block_bad: check if a block is bad, using OOB markers
1099*4882a593Smuzhiyun * @block_markbad: mark a block bad
1100*4882a593Smuzhiyun * @set_features: set the NAND chip features
1101*4882a593Smuzhiyun * @get_features: get the NAND chip features
1102*4882a593Smuzhiyun * @chip_delay: chip dependent delay for transferring data from array to read
1103*4882a593Smuzhiyun * regs (tR).
1104*4882a593Smuzhiyun * @dummy_controller: dummy controller implementation for drivers that can
1105*4882a593Smuzhiyun * only control a single chip
1106*4882a593Smuzhiyun *
1107*4882a593Smuzhiyun * If you look at this structure you're already wrong. These fields/hooks are
1108*4882a593Smuzhiyun * all deprecated.
1109*4882a593Smuzhiyun */
1110*4882a593Smuzhiyun struct nand_legacy {
1111*4882a593Smuzhiyun void __iomem *IO_ADDR_R;
1112*4882a593Smuzhiyun void __iomem *IO_ADDR_W;
1113*4882a593Smuzhiyun void (*select_chip)(struct nand_chip *chip, int cs);
1114*4882a593Smuzhiyun u8 (*read_byte)(struct nand_chip *chip);
1115*4882a593Smuzhiyun void (*write_byte)(struct nand_chip *chip, u8 byte);
1116*4882a593Smuzhiyun void (*write_buf)(struct nand_chip *chip, const u8 *buf, int len);
1117*4882a593Smuzhiyun void (*read_buf)(struct nand_chip *chip, u8 *buf, int len);
1118*4882a593Smuzhiyun void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl);
1119*4882a593Smuzhiyun void (*cmdfunc)(struct nand_chip *chip, unsigned command, int column,
1120*4882a593Smuzhiyun int page_addr);
1121*4882a593Smuzhiyun int (*dev_ready)(struct nand_chip *chip);
1122*4882a593Smuzhiyun int (*waitfunc)(struct nand_chip *chip);
1123*4882a593Smuzhiyun int (*block_bad)(struct nand_chip *chip, loff_t ofs);
1124*4882a593Smuzhiyun int (*block_markbad)(struct nand_chip *chip, loff_t ofs);
1125*4882a593Smuzhiyun int (*set_features)(struct nand_chip *chip, int feature_addr,
1126*4882a593Smuzhiyun u8 *subfeature_para);
1127*4882a593Smuzhiyun int (*get_features)(struct nand_chip *chip, int feature_addr,
1128*4882a593Smuzhiyun u8 *subfeature_para);
1129*4882a593Smuzhiyun int chip_delay;
1130*4882a593Smuzhiyun struct nand_controller dummy_controller;
1131*4882a593Smuzhiyun };
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun /**
1134*4882a593Smuzhiyun * struct nand_chip_ops - NAND chip operations
1135*4882a593Smuzhiyun * @suspend: Suspend operation
1136*4882a593Smuzhiyun * @resume: Resume operation
1137*4882a593Smuzhiyun * @lock_area: Lock operation
1138*4882a593Smuzhiyun * @unlock_area: Unlock operation
1139*4882a593Smuzhiyun * @setup_read_retry: Set the read-retry mode (mostly needed for MLC NANDs)
1140*4882a593Smuzhiyun * @choose_interface_config: Choose the best interface configuration
1141*4882a593Smuzhiyun */
1142*4882a593Smuzhiyun struct nand_chip_ops {
1143*4882a593Smuzhiyun int (*suspend)(struct nand_chip *chip);
1144*4882a593Smuzhiyun void (*resume)(struct nand_chip *chip);
1145*4882a593Smuzhiyun int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
1146*4882a593Smuzhiyun int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
1147*4882a593Smuzhiyun int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
1148*4882a593Smuzhiyun int (*choose_interface_config)(struct nand_chip *chip,
1149*4882a593Smuzhiyun struct nand_interface_config *iface);
1150*4882a593Smuzhiyun };
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun /**
1153*4882a593Smuzhiyun * struct nand_manufacturer - NAND manufacturer structure
1154*4882a593Smuzhiyun * @desc: The manufacturer description
1155*4882a593Smuzhiyun * @priv: Private information for the manufacturer driver
1156*4882a593Smuzhiyun */
1157*4882a593Smuzhiyun struct nand_manufacturer {
1158*4882a593Smuzhiyun const struct nand_manufacturer_desc *desc;
1159*4882a593Smuzhiyun void *priv;
1160*4882a593Smuzhiyun };
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun /**
1163*4882a593Smuzhiyun * struct nand_chip - NAND Private Flash Chip Data
1164*4882a593Smuzhiyun * @base: Inherit from the generic NAND device
1165*4882a593Smuzhiyun * @id: Holds NAND ID
1166*4882a593Smuzhiyun * @parameters: Holds generic parameters under an easily readable form
1167*4882a593Smuzhiyun * @manufacturer: Manufacturer information
1168*4882a593Smuzhiyun * @ops: NAND chip operations
1169*4882a593Smuzhiyun * @legacy: All legacy fields/hooks. If you develop a new driver, don't even try
1170*4882a593Smuzhiyun * to use any of these fields/hooks, and if you're modifying an
1171*4882a593Smuzhiyun * existing driver that is using those fields/hooks, you should
1172*4882a593Smuzhiyun * consider reworking the driver and avoid using them.
1173*4882a593Smuzhiyun * @options: Various chip options. They can partly be set to inform nand_scan
1174*4882a593Smuzhiyun * about special functionality. See the defines for further
1175*4882a593Smuzhiyun * explanation.
1176*4882a593Smuzhiyun * @current_interface_config: The currently used NAND interface configuration
1177*4882a593Smuzhiyun * @best_interface_config: The best NAND interface configuration which fits both
1178*4882a593Smuzhiyun * the NAND chip and NAND controller constraints. If
1179*4882a593Smuzhiyun * unset, the default reset interface configuration must
1180*4882a593Smuzhiyun * be used.
1181*4882a593Smuzhiyun * @bbt_erase_shift: Number of address bits in a bbt entry
1182*4882a593Smuzhiyun * @bbt_options: Bad block table specific options. All options used here must
1183*4882a593Smuzhiyun * come from bbm.h. By default, these options will be copied to
1184*4882a593Smuzhiyun * the appropriate nand_bbt_descr's.
1185*4882a593Smuzhiyun * @badblockpos: Bad block marker position in the oob area
1186*4882a593Smuzhiyun * @badblockbits: Minimum number of set bits in a good block's bad block marker
1187*4882a593Smuzhiyun * position; i.e., BBM = 11110111b is good when badblockbits = 7
1188*4882a593Smuzhiyun * @bbt_td: Bad block table descriptor for flash lookup
1189*4882a593Smuzhiyun * @bbt_md: Bad block table mirror descriptor
1190*4882a593Smuzhiyun * @badblock_pattern: Bad block scan pattern used for initial bad block scan
1191*4882a593Smuzhiyun * @bbt: Bad block table pointer
1192*4882a593Smuzhiyun * @page_shift: Number of address bits in a page (column address bits)
1193*4882a593Smuzhiyun * @phys_erase_shift: Number of address bits in a physical eraseblock
1194*4882a593Smuzhiyun * @chip_shift: Number of address bits in one chip
1195*4882a593Smuzhiyun * @pagemask: Page number mask = number of (pages / chip) - 1
1196*4882a593Smuzhiyun * @subpagesize: Holds the subpagesize
1197*4882a593Smuzhiyun * @data_buf: Buffer for data, size is (page size + oobsize)
1198*4882a593Smuzhiyun * @oob_poi: pointer on the OOB area covered by data_buf
1199*4882a593Smuzhiyun * @pagecache: Structure containing page cache related fields
1200*4882a593Smuzhiyun * @pagecache.bitflips: Number of bitflips of the cached page
1201*4882a593Smuzhiyun * @pagecache.page: Page number currently in the cache. -1 means no page is
1202*4882a593Smuzhiyun * currently cached
1203*4882a593Smuzhiyun * @buf_align: Minimum buffer alignment required by a platform
1204*4882a593Smuzhiyun * @lock: Lock protecting the suspended field. Also used to serialize accesses
1205*4882a593Smuzhiyun * to the NAND device
1206*4882a593Smuzhiyun * @suspended: Set to 1 when the device is suspended, 0 when it's not
1207*4882a593Smuzhiyun * @resume_wq: wait queue to sleep if rawnand is in suspended state.
1208*4882a593Smuzhiyun * @cur_cs: Currently selected target. -1 means no target selected, otherwise we
1209*4882a593Smuzhiyun * should always have cur_cs >= 0 && cur_cs < nanddev_ntargets().
1210*4882a593Smuzhiyun * NAND Controller drivers should not modify this value, but they're
1211*4882a593Smuzhiyun * allowed to read it.
1212*4882a593Smuzhiyun * @read_retries: The number of read retry modes supported
1213*4882a593Smuzhiyun * @controller: The hardware controller structure which is shared among multiple
1214*4882a593Smuzhiyun * independent devices
1215*4882a593Smuzhiyun * @ecc: The ECC controller structure
1216*4882a593Smuzhiyun * @priv: Chip private data
1217*4882a593Smuzhiyun */
1218*4882a593Smuzhiyun struct nand_chip {
1219*4882a593Smuzhiyun struct nand_device base;
1220*4882a593Smuzhiyun struct nand_id id;
1221*4882a593Smuzhiyun struct nand_parameters parameters;
1222*4882a593Smuzhiyun struct nand_manufacturer manufacturer;
1223*4882a593Smuzhiyun struct nand_chip_ops ops;
1224*4882a593Smuzhiyun struct nand_legacy legacy;
1225*4882a593Smuzhiyun unsigned int options;
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun /* Data interface */
1228*4882a593Smuzhiyun const struct nand_interface_config *current_interface_config;
1229*4882a593Smuzhiyun struct nand_interface_config *best_interface_config;
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun /* Bad block information */
1232*4882a593Smuzhiyun unsigned int bbt_erase_shift;
1233*4882a593Smuzhiyun unsigned int bbt_options;
1234*4882a593Smuzhiyun unsigned int badblockpos;
1235*4882a593Smuzhiyun unsigned int badblockbits;
1236*4882a593Smuzhiyun struct nand_bbt_descr *bbt_td;
1237*4882a593Smuzhiyun struct nand_bbt_descr *bbt_md;
1238*4882a593Smuzhiyun struct nand_bbt_descr *badblock_pattern;
1239*4882a593Smuzhiyun u8 *bbt;
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun /* Device internal layout */
1242*4882a593Smuzhiyun unsigned int page_shift;
1243*4882a593Smuzhiyun unsigned int phys_erase_shift;
1244*4882a593Smuzhiyun unsigned int chip_shift;
1245*4882a593Smuzhiyun unsigned int pagemask;
1246*4882a593Smuzhiyun unsigned int subpagesize;
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun /* Buffers */
1249*4882a593Smuzhiyun u8 *data_buf;
1250*4882a593Smuzhiyun u8 *oob_poi;
1251*4882a593Smuzhiyun struct {
1252*4882a593Smuzhiyun unsigned int bitflips;
1253*4882a593Smuzhiyun int page;
1254*4882a593Smuzhiyun } pagecache;
1255*4882a593Smuzhiyun unsigned long buf_align;
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun /* Internals */
1258*4882a593Smuzhiyun struct mutex lock;
1259*4882a593Smuzhiyun unsigned int suspended : 1;
1260*4882a593Smuzhiyun wait_queue_head_t resume_wq;
1261*4882a593Smuzhiyun int cur_cs;
1262*4882a593Smuzhiyun int read_retries;
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun /* Externals */
1265*4882a593Smuzhiyun struct nand_controller *controller;
1266*4882a593Smuzhiyun struct nand_ecc_ctrl ecc;
1267*4882a593Smuzhiyun void *priv;
1268*4882a593Smuzhiyun };
1269*4882a593Smuzhiyun
mtd_to_nand(struct mtd_info * mtd)1270*4882a593Smuzhiyun static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd)
1271*4882a593Smuzhiyun {
1272*4882a593Smuzhiyun return container_of(mtd, struct nand_chip, base.mtd);
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun
nand_to_mtd(struct nand_chip * chip)1275*4882a593Smuzhiyun static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip)
1276*4882a593Smuzhiyun {
1277*4882a593Smuzhiyun return &chip->base.mtd;
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun
nand_get_controller_data(struct nand_chip * chip)1280*4882a593Smuzhiyun static inline void *nand_get_controller_data(struct nand_chip *chip)
1281*4882a593Smuzhiyun {
1282*4882a593Smuzhiyun return chip->priv;
1283*4882a593Smuzhiyun }
1284*4882a593Smuzhiyun
nand_set_controller_data(struct nand_chip * chip,void * priv)1285*4882a593Smuzhiyun static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
1286*4882a593Smuzhiyun {
1287*4882a593Smuzhiyun chip->priv = priv;
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun
nand_set_manufacturer_data(struct nand_chip * chip,void * priv)1290*4882a593Smuzhiyun static inline void nand_set_manufacturer_data(struct nand_chip *chip,
1291*4882a593Smuzhiyun void *priv)
1292*4882a593Smuzhiyun {
1293*4882a593Smuzhiyun chip->manufacturer.priv = priv;
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun
nand_get_manufacturer_data(struct nand_chip * chip)1296*4882a593Smuzhiyun static inline void *nand_get_manufacturer_data(struct nand_chip *chip)
1297*4882a593Smuzhiyun {
1298*4882a593Smuzhiyun return chip->manufacturer.priv;
1299*4882a593Smuzhiyun }
1300*4882a593Smuzhiyun
nand_set_flash_node(struct nand_chip * chip,struct device_node * np)1301*4882a593Smuzhiyun static inline void nand_set_flash_node(struct nand_chip *chip,
1302*4882a593Smuzhiyun struct device_node *np)
1303*4882a593Smuzhiyun {
1304*4882a593Smuzhiyun mtd_set_of_node(nand_to_mtd(chip), np);
1305*4882a593Smuzhiyun }
1306*4882a593Smuzhiyun
nand_get_flash_node(struct nand_chip * chip)1307*4882a593Smuzhiyun static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
1308*4882a593Smuzhiyun {
1309*4882a593Smuzhiyun return mtd_get_of_node(nand_to_mtd(chip));
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun /**
1313*4882a593Smuzhiyun * nand_get_interface_config - Retrieve the current interface configuration
1314*4882a593Smuzhiyun * of a NAND chip
1315*4882a593Smuzhiyun * @chip: The NAND chip
1316*4882a593Smuzhiyun */
1317*4882a593Smuzhiyun static inline const struct nand_interface_config *
nand_get_interface_config(struct nand_chip * chip)1318*4882a593Smuzhiyun nand_get_interface_config(struct nand_chip *chip)
1319*4882a593Smuzhiyun {
1320*4882a593Smuzhiyun return chip->current_interface_config;
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun /*
1324*4882a593Smuzhiyun * A helper for defining older NAND chips where the second ID byte fully
1325*4882a593Smuzhiyun * defined the chip, including the geometry (chip size, eraseblock size, page
1326*4882a593Smuzhiyun * size). All these chips have 512 bytes NAND page size.
1327*4882a593Smuzhiyun */
1328*4882a593Smuzhiyun #define LEGACY_ID_NAND(nm, devid, chipsz, erasesz, opts) \
1329*4882a593Smuzhiyun { .name = (nm), {{ .dev_id = (devid) }}, .pagesize = 512, \
1330*4882a593Smuzhiyun .chipsize = (chipsz), .erasesize = (erasesz), .options = (opts) }
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun /*
1333*4882a593Smuzhiyun * A helper for defining newer chips which report their page size and
1334*4882a593Smuzhiyun * eraseblock size via the extended ID bytes.
1335*4882a593Smuzhiyun *
1336*4882a593Smuzhiyun * The real difference between LEGACY_ID_NAND and EXTENDED_ID_NAND is that with
1337*4882a593Smuzhiyun * EXTENDED_ID_NAND, manufacturers overloaded the same device ID so that the
1338*4882a593Smuzhiyun * device ID now only represented a particular total chip size (and voltage,
1339*4882a593Smuzhiyun * buswidth), and the page size, eraseblock size, and OOB size could vary while
1340*4882a593Smuzhiyun * using the same device ID.
1341*4882a593Smuzhiyun */
1342*4882a593Smuzhiyun #define EXTENDED_ID_NAND(nm, devid, chipsz, opts) \
1343*4882a593Smuzhiyun { .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \
1344*4882a593Smuzhiyun .options = (opts) }
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun #define NAND_ECC_INFO(_strength, _step) \
1347*4882a593Smuzhiyun { .strength_ds = (_strength), .step_ds = (_step) }
1348*4882a593Smuzhiyun #define NAND_ECC_STRENGTH(type) ((type)->ecc.strength_ds)
1349*4882a593Smuzhiyun #define NAND_ECC_STEP(type) ((type)->ecc.step_ds)
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun /**
1352*4882a593Smuzhiyun * struct nand_flash_dev - NAND Flash Device ID Structure
1353*4882a593Smuzhiyun * @name: a human-readable name of the NAND chip
1354*4882a593Smuzhiyun * @dev_id: the device ID (the second byte of the full chip ID array)
1355*4882a593Smuzhiyun * @mfr_id: manufacturer ID part of the full chip ID array (refers the same
1356*4882a593Smuzhiyun * memory address as ``id[0]``)
1357*4882a593Smuzhiyun * @dev_id: device ID part of the full chip ID array (refers the same memory
1358*4882a593Smuzhiyun * address as ``id[1]``)
1359*4882a593Smuzhiyun * @id: full device ID array
1360*4882a593Smuzhiyun * @pagesize: size of the NAND page in bytes; if 0, then the real page size (as
1361*4882a593Smuzhiyun * well as the eraseblock size) is determined from the extended NAND
1362*4882a593Smuzhiyun * chip ID array)
1363*4882a593Smuzhiyun * @chipsize: total chip size in MiB
1364*4882a593Smuzhiyun * @erasesize: eraseblock size in bytes (determined from the extended ID if 0)
1365*4882a593Smuzhiyun * @options: stores various chip bit options
1366*4882a593Smuzhiyun * @id_len: The valid length of the @id.
1367*4882a593Smuzhiyun * @oobsize: OOB size
1368*4882a593Smuzhiyun * @ecc: ECC correctability and step information from the datasheet.
1369*4882a593Smuzhiyun * @ecc.strength_ds: The ECC correctability from the datasheet, same as the
1370*4882a593Smuzhiyun * @ecc_strength_ds in nand_chip{}.
1371*4882a593Smuzhiyun * @ecc.step_ds: The ECC step required by the @ecc.strength_ds, same as the
1372*4882a593Smuzhiyun * @ecc_step_ds in nand_chip{}, also from the datasheet.
1373*4882a593Smuzhiyun * For example, the "4bit ECC for each 512Byte" can be set with
1374*4882a593Smuzhiyun * NAND_ECC_INFO(4, 512).
1375*4882a593Smuzhiyun */
1376*4882a593Smuzhiyun struct nand_flash_dev {
1377*4882a593Smuzhiyun char *name;
1378*4882a593Smuzhiyun union {
1379*4882a593Smuzhiyun struct {
1380*4882a593Smuzhiyun uint8_t mfr_id;
1381*4882a593Smuzhiyun uint8_t dev_id;
1382*4882a593Smuzhiyun };
1383*4882a593Smuzhiyun uint8_t id[NAND_MAX_ID_LEN];
1384*4882a593Smuzhiyun };
1385*4882a593Smuzhiyun unsigned int pagesize;
1386*4882a593Smuzhiyun unsigned int chipsize;
1387*4882a593Smuzhiyun unsigned int erasesize;
1388*4882a593Smuzhiyun unsigned int options;
1389*4882a593Smuzhiyun uint16_t id_len;
1390*4882a593Smuzhiyun uint16_t oobsize;
1391*4882a593Smuzhiyun struct {
1392*4882a593Smuzhiyun uint16_t strength_ds;
1393*4882a593Smuzhiyun uint16_t step_ds;
1394*4882a593Smuzhiyun } ecc;
1395*4882a593Smuzhiyun };
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun int nand_create_bbt(struct nand_chip *chip);
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun /*
1400*4882a593Smuzhiyun * Check if it is a SLC nand.
1401*4882a593Smuzhiyun * The !nand_is_slc() can be used to check the MLC/TLC nand chips.
1402*4882a593Smuzhiyun * We do not distinguish the MLC and TLC now.
1403*4882a593Smuzhiyun */
nand_is_slc(struct nand_chip * chip)1404*4882a593Smuzhiyun static inline bool nand_is_slc(struct nand_chip *chip)
1405*4882a593Smuzhiyun {
1406*4882a593Smuzhiyun WARN(nanddev_bits_per_cell(&chip->base) == 0,
1407*4882a593Smuzhiyun "chip->bits_per_cell is used uninitialized\n");
1408*4882a593Smuzhiyun return nanddev_bits_per_cell(&chip->base) == 1;
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun /**
1412*4882a593Smuzhiyun * Check if the opcode's address should be sent only on the lower 8 bits
1413*4882a593Smuzhiyun * @command: opcode to check
1414*4882a593Smuzhiyun */
nand_opcode_8bits(unsigned int command)1415*4882a593Smuzhiyun static inline int nand_opcode_8bits(unsigned int command)
1416*4882a593Smuzhiyun {
1417*4882a593Smuzhiyun switch (command) {
1418*4882a593Smuzhiyun case NAND_CMD_READID:
1419*4882a593Smuzhiyun case NAND_CMD_PARAM:
1420*4882a593Smuzhiyun case NAND_CMD_GET_FEATURES:
1421*4882a593Smuzhiyun case NAND_CMD_SET_FEATURES:
1422*4882a593Smuzhiyun return 1;
1423*4882a593Smuzhiyun default:
1424*4882a593Smuzhiyun break;
1425*4882a593Smuzhiyun }
1426*4882a593Smuzhiyun return 0;
1427*4882a593Smuzhiyun }
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun int nand_check_erased_ecc_chunk(void *data, int datalen,
1430*4882a593Smuzhiyun void *ecc, int ecclen,
1431*4882a593Smuzhiyun void *extraoob, int extraooblen,
1432*4882a593Smuzhiyun int threshold);
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun int nand_ecc_choose_conf(struct nand_chip *chip,
1435*4882a593Smuzhiyun const struct nand_ecc_caps *caps, int oobavail);
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun /* Default write_oob implementation */
1438*4882a593Smuzhiyun int nand_write_oob_std(struct nand_chip *chip, int page);
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun /* Default read_oob implementation */
1441*4882a593Smuzhiyun int nand_read_oob_std(struct nand_chip *chip, int page);
1442*4882a593Smuzhiyun
1443*4882a593Smuzhiyun /* Stub used by drivers that do not support GET/SET FEATURES operations */
1444*4882a593Smuzhiyun int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
1445*4882a593Smuzhiyun u8 *subfeature_param);
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun /* read_page_raw implementations */
1448*4882a593Smuzhiyun int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
1449*4882a593Smuzhiyun int page);
1450*4882a593Smuzhiyun int nand_monolithic_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1451*4882a593Smuzhiyun int oob_required, int page);
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun /* write_page_raw implementations */
1454*4882a593Smuzhiyun int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
1455*4882a593Smuzhiyun int oob_required, int page);
1456*4882a593Smuzhiyun int nand_monolithic_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
1457*4882a593Smuzhiyun int oob_required, int page);
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun /* Reset and initialize a NAND device */
1460*4882a593Smuzhiyun int nand_reset(struct nand_chip *chip, int chipnr);
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun /* NAND operation helpers */
1463*4882a593Smuzhiyun int nand_reset_op(struct nand_chip *chip);
1464*4882a593Smuzhiyun int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1465*4882a593Smuzhiyun unsigned int len);
1466*4882a593Smuzhiyun int nand_status_op(struct nand_chip *chip, u8 *status);
1467*4882a593Smuzhiyun int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
1468*4882a593Smuzhiyun int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1469*4882a593Smuzhiyun unsigned int offset_in_page, void *buf, unsigned int len);
1470*4882a593Smuzhiyun int nand_change_read_column_op(struct nand_chip *chip,
1471*4882a593Smuzhiyun unsigned int offset_in_page, void *buf,
1472*4882a593Smuzhiyun unsigned int len, bool force_8bit);
1473*4882a593Smuzhiyun int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1474*4882a593Smuzhiyun unsigned int offset_in_page, void *buf, unsigned int len);
1475*4882a593Smuzhiyun int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1476*4882a593Smuzhiyun unsigned int offset_in_page, const void *buf,
1477*4882a593Smuzhiyun unsigned int len);
1478*4882a593Smuzhiyun int nand_prog_page_end_op(struct nand_chip *chip);
1479*4882a593Smuzhiyun int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1480*4882a593Smuzhiyun unsigned int offset_in_page, const void *buf,
1481*4882a593Smuzhiyun unsigned int len);
1482*4882a593Smuzhiyun int nand_change_write_column_op(struct nand_chip *chip,
1483*4882a593Smuzhiyun unsigned int offset_in_page, const void *buf,
1484*4882a593Smuzhiyun unsigned int len, bool force_8bit);
1485*4882a593Smuzhiyun int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1486*4882a593Smuzhiyun bool force_8bit, bool check_only);
1487*4882a593Smuzhiyun int nand_write_data_op(struct nand_chip *chip, const void *buf,
1488*4882a593Smuzhiyun unsigned int len, bool force_8bit);
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun /* Scan and identify a NAND device */
1491*4882a593Smuzhiyun int nand_scan_with_ids(struct nand_chip *chip, unsigned int max_chips,
1492*4882a593Smuzhiyun struct nand_flash_dev *ids);
1493*4882a593Smuzhiyun
nand_scan(struct nand_chip * chip,unsigned int max_chips)1494*4882a593Smuzhiyun static inline int nand_scan(struct nand_chip *chip, unsigned int max_chips)
1495*4882a593Smuzhiyun {
1496*4882a593Smuzhiyun return nand_scan_with_ids(chip, max_chips, NULL);
1497*4882a593Smuzhiyun }
1498*4882a593Smuzhiyun
1499*4882a593Smuzhiyun /* Internal helper for board drivers which need to override command function */
1500*4882a593Smuzhiyun void nand_wait_ready(struct nand_chip *chip);
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun /*
1503*4882a593Smuzhiyun * Free resources held by the NAND device, must be called on error after a
1504*4882a593Smuzhiyun * sucessful nand_scan().
1505*4882a593Smuzhiyun */
1506*4882a593Smuzhiyun void nand_cleanup(struct nand_chip *chip);
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun /*
1509*4882a593Smuzhiyun * External helper for controller drivers that have to implement the WAITRDY
1510*4882a593Smuzhiyun * instruction and have no physical pin to check it.
1511*4882a593Smuzhiyun */
1512*4882a593Smuzhiyun int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
1513*4882a593Smuzhiyun struct gpio_desc;
1514*4882a593Smuzhiyun int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
1515*4882a593Smuzhiyun unsigned long timeout_ms);
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun /* Select/deselect a NAND target. */
1518*4882a593Smuzhiyun void nand_select_target(struct nand_chip *chip, unsigned int cs);
1519*4882a593Smuzhiyun void nand_deselect_target(struct nand_chip *chip);
1520*4882a593Smuzhiyun
1521*4882a593Smuzhiyun /* Bitops */
1522*4882a593Smuzhiyun void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
1523*4882a593Smuzhiyun unsigned int src_off, unsigned int nbits);
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun /**
1526*4882a593Smuzhiyun * nand_get_data_buf() - Get the internal page buffer
1527*4882a593Smuzhiyun * @chip: NAND chip object
1528*4882a593Smuzhiyun *
1529*4882a593Smuzhiyun * Returns the pre-allocated page buffer after invalidating the cache. This
1530*4882a593Smuzhiyun * function should be used by drivers that do not want to allocate their own
1531*4882a593Smuzhiyun * bounce buffer and still need such a buffer for specific operations (most
1532*4882a593Smuzhiyun * commonly when reading OOB data only).
1533*4882a593Smuzhiyun *
1534*4882a593Smuzhiyun * Be careful to never call this function in the write/write_oob path, because
1535*4882a593Smuzhiyun * the core may have placed the data to be written out in this buffer.
1536*4882a593Smuzhiyun *
1537*4882a593Smuzhiyun * Return: pointer to the page cache buffer
1538*4882a593Smuzhiyun */
nand_get_data_buf(struct nand_chip * chip)1539*4882a593Smuzhiyun static inline void *nand_get_data_buf(struct nand_chip *chip)
1540*4882a593Smuzhiyun {
1541*4882a593Smuzhiyun chip->pagecache.page = -1;
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun return chip->data_buf;
1544*4882a593Smuzhiyun }
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun #endif /* __LINUX_MTD_RAWNAND_H */
1547