1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2017 Free Electrons
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors:
6*4882a593Smuzhiyun * Boris Brezillon <boris.brezillon@free-electrons.com>
7*4882a593Smuzhiyun * Peter Pan <peterpandong@micron.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define pr_fmt(fmt) "nand: " fmt
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/mtd/nand.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /**
16*4882a593Smuzhiyun * nanddev_isbad() - Check if a block is bad
17*4882a593Smuzhiyun * @nand: NAND device
18*4882a593Smuzhiyun * @pos: position pointing to the block we want to check
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * Return: true if the block is bad, false otherwise.
21*4882a593Smuzhiyun */
nanddev_isbad(struct nand_device * nand,const struct nand_pos * pos)22*4882a593Smuzhiyun bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun if (nanddev_bbt_is_initialized(nand)) {
25*4882a593Smuzhiyun unsigned int entry;
26*4882a593Smuzhiyun int status;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun entry = nanddev_bbt_pos_to_entry(nand, pos);
29*4882a593Smuzhiyun status = nanddev_bbt_get_block_status(nand, entry);
30*4882a593Smuzhiyun /* Lazy block status retrieval */
31*4882a593Smuzhiyun if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
32*4882a593Smuzhiyun if (nand->ops->isbad(nand, pos))
33*4882a593Smuzhiyun status = NAND_BBT_BLOCK_FACTORY_BAD;
34*4882a593Smuzhiyun else
35*4882a593Smuzhiyun status = NAND_BBT_BLOCK_GOOD;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun nanddev_bbt_set_block_status(nand, entry, status);
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun if (status == NAND_BBT_BLOCK_WORN ||
41*4882a593Smuzhiyun status == NAND_BBT_BLOCK_FACTORY_BAD)
42*4882a593Smuzhiyun return true;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun return false;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun return nand->ops->isbad(nand, pos);
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nanddev_isbad);
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /**
52*4882a593Smuzhiyun * nanddev_markbad() - Mark a block as bad
53*4882a593Smuzhiyun * @nand: NAND device
54*4882a593Smuzhiyun * @pos: position of the block to mark bad
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * Mark a block bad. This function is updating the BBT if available and
57*4882a593Smuzhiyun * calls the low-level markbad hook (nand->ops->markbad()).
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * Return: 0 in case of success, a negative error code otherwise.
60*4882a593Smuzhiyun */
nanddev_markbad(struct nand_device * nand,const struct nand_pos * pos)61*4882a593Smuzhiyun int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun struct mtd_info *mtd = nanddev_to_mtd(nand);
64*4882a593Smuzhiyun unsigned int entry;
65*4882a593Smuzhiyun int ret = 0;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun if (nanddev_isbad(nand, pos))
68*4882a593Smuzhiyun return 0;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun ret = nand->ops->markbad(nand, pos);
71*4882a593Smuzhiyun if (ret)
72*4882a593Smuzhiyun pr_warn("failed to write BBM to block @%llx (err = %d)\n",
73*4882a593Smuzhiyun nanddev_pos_to_offs(nand, pos), ret);
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun if (!nanddev_bbt_is_initialized(nand))
76*4882a593Smuzhiyun goto out;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun entry = nanddev_bbt_pos_to_entry(nand, pos);
79*4882a593Smuzhiyun ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
80*4882a593Smuzhiyun if (ret)
81*4882a593Smuzhiyun goto out;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun ret = nanddev_bbt_update(nand);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun out:
86*4882a593Smuzhiyun if (!ret)
87*4882a593Smuzhiyun mtd->ecc_stats.badblocks++;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun return ret;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nanddev_markbad);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /**
94*4882a593Smuzhiyun * nanddev_isreserved() - Check whether an eraseblock is reserved or not
95*4882a593Smuzhiyun * @nand: NAND device
96*4882a593Smuzhiyun * @pos: NAND position to test
97*4882a593Smuzhiyun *
98*4882a593Smuzhiyun * Checks whether the eraseblock pointed by @pos is reserved or not.
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun * Return: true if the eraseblock is reserved, false otherwise.
101*4882a593Smuzhiyun */
nanddev_isreserved(struct nand_device * nand,const struct nand_pos * pos)102*4882a593Smuzhiyun bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun unsigned int entry;
105*4882a593Smuzhiyun int status;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun if (!nanddev_bbt_is_initialized(nand))
108*4882a593Smuzhiyun return false;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* Return info from the table */
111*4882a593Smuzhiyun entry = nanddev_bbt_pos_to_entry(nand, pos);
112*4882a593Smuzhiyun status = nanddev_bbt_get_block_status(nand, entry);
113*4882a593Smuzhiyun return status == NAND_BBT_BLOCK_RESERVED;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nanddev_isreserved);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /**
118*4882a593Smuzhiyun * nanddev_erase() - Erase a NAND portion
119*4882a593Smuzhiyun * @nand: NAND device
120*4882a593Smuzhiyun * @pos: position of the block to erase
121*4882a593Smuzhiyun *
122*4882a593Smuzhiyun * Erases the block if it's not bad.
123*4882a593Smuzhiyun *
124*4882a593Smuzhiyun * Return: 0 in case of success, a negative error code otherwise.
125*4882a593Smuzhiyun */
nanddev_erase(struct nand_device * nand,const struct nand_pos * pos)126*4882a593Smuzhiyun int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
129*4882a593Smuzhiyun pr_warn("attempt to erase a bad/reserved block @%llx\n",
130*4882a593Smuzhiyun nanddev_pos_to_offs(nand, pos));
131*4882a593Smuzhiyun return -EIO;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun return nand->ops->erase(nand, pos);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nanddev_erase);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /**
139*4882a593Smuzhiyun * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
140*4882a593Smuzhiyun * @mtd: MTD device
141*4882a593Smuzhiyun * @einfo: erase request
142*4882a593Smuzhiyun *
143*4882a593Smuzhiyun * This is a simple mtd->_erase() implementation iterating over all blocks
144*4882a593Smuzhiyun * concerned by @einfo and calling nand->ops->erase() on each of them.
145*4882a593Smuzhiyun *
146*4882a593Smuzhiyun * Note that mtd->_erase should not be directly assigned to this helper,
147*4882a593Smuzhiyun * because there's no locking here. NAND specialized layers should instead
148*4882a593Smuzhiyun * implement there own wrapper around nanddev_mtd_erase() taking the
149*4882a593Smuzhiyun * appropriate lock before calling nanddev_mtd_erase().
150*4882a593Smuzhiyun *
151*4882a593Smuzhiyun * Return: 0 in case of success, a negative error code otherwise.
152*4882a593Smuzhiyun */
nanddev_mtd_erase(struct mtd_info * mtd,struct erase_info * einfo)153*4882a593Smuzhiyun int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct nand_device *nand = mtd_to_nanddev(mtd);
156*4882a593Smuzhiyun struct nand_pos pos, last;
157*4882a593Smuzhiyun int ret;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun nanddev_offs_to_pos(nand, einfo->addr, &pos);
160*4882a593Smuzhiyun nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
161*4882a593Smuzhiyun while (nanddev_pos_cmp(&pos, &last) <= 0) {
162*4882a593Smuzhiyun ret = nanddev_erase(nand, &pos);
163*4882a593Smuzhiyun if (ret) {
164*4882a593Smuzhiyun einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun return ret;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun nanddev_pos_next_eraseblock(nand, &pos);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun return 0;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /**
177*4882a593Smuzhiyun * nanddev_mtd_max_bad_blocks() - Get the maximum number of bad eraseblock on
178*4882a593Smuzhiyun * a specific region of the NAND device
179*4882a593Smuzhiyun * @mtd: MTD device
180*4882a593Smuzhiyun * @offs: offset of the NAND region
181*4882a593Smuzhiyun * @len: length of the NAND region
182*4882a593Smuzhiyun *
183*4882a593Smuzhiyun * Default implementation for mtd->_max_bad_blocks(). Only works if
184*4882a593Smuzhiyun * nand->memorg.max_bad_eraseblocks_per_lun is > 0.
185*4882a593Smuzhiyun *
186*4882a593Smuzhiyun * Return: a positive number encoding the maximum number of eraseblocks on a
187*4882a593Smuzhiyun * portion of memory, a negative error code otherwise.
188*4882a593Smuzhiyun */
nanddev_mtd_max_bad_blocks(struct mtd_info * mtd,loff_t offs,size_t len)189*4882a593Smuzhiyun int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun struct nand_device *nand = mtd_to_nanddev(mtd);
192*4882a593Smuzhiyun struct nand_pos pos, end;
193*4882a593Smuzhiyun unsigned int max_bb = 0;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (!nand->memorg.max_bad_eraseblocks_per_lun)
196*4882a593Smuzhiyun return -ENOTSUPP;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun nanddev_offs_to_pos(nand, offs, &pos);
199*4882a593Smuzhiyun nanddev_offs_to_pos(nand, offs + len, &end);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun for (nanddev_offs_to_pos(nand, offs, &pos);
202*4882a593Smuzhiyun nanddev_pos_cmp(&pos, &end) < 0;
203*4882a593Smuzhiyun nanddev_pos_next_lun(nand, &pos))
204*4882a593Smuzhiyun max_bb += nand->memorg.max_bad_eraseblocks_per_lun;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun return max_bb;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /**
211*4882a593Smuzhiyun * nanddev_init() - Initialize a NAND device
212*4882a593Smuzhiyun * @nand: NAND device
213*4882a593Smuzhiyun * @ops: NAND device operations
214*4882a593Smuzhiyun * @owner: NAND device owner
215*4882a593Smuzhiyun *
216*4882a593Smuzhiyun * Initializes a NAND device object. Consistency checks are done on @ops and
217*4882a593Smuzhiyun * @nand->memorg. Also takes care of initializing the BBT.
218*4882a593Smuzhiyun *
219*4882a593Smuzhiyun * Return: 0 in case of success, a negative error code otherwise.
220*4882a593Smuzhiyun */
nanddev_init(struct nand_device * nand,const struct nand_ops * ops,struct module * owner)221*4882a593Smuzhiyun int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
222*4882a593Smuzhiyun struct module *owner)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun struct mtd_info *mtd = nanddev_to_mtd(nand);
225*4882a593Smuzhiyun struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (!nand || !ops)
228*4882a593Smuzhiyun return -EINVAL;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun if (!ops->erase || !ops->markbad || !ops->isbad)
231*4882a593Smuzhiyun return -EINVAL;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun if (!memorg->bits_per_cell || !memorg->pagesize ||
234*4882a593Smuzhiyun !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
235*4882a593Smuzhiyun !memorg->planes_per_lun || !memorg->luns_per_target ||
236*4882a593Smuzhiyun !memorg->ntargets)
237*4882a593Smuzhiyun return -EINVAL;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun nand->rowconv.eraseblock_addr_shift =
240*4882a593Smuzhiyun fls(memorg->pages_per_eraseblock - 1);
241*4882a593Smuzhiyun nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
242*4882a593Smuzhiyun nand->rowconv.eraseblock_addr_shift;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun nand->ops = ops;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun mtd->type = memorg->bits_per_cell == 1 ?
247*4882a593Smuzhiyun MTD_NANDFLASH : MTD_MLCNANDFLASH;
248*4882a593Smuzhiyun mtd->flags = MTD_CAP_NANDFLASH;
249*4882a593Smuzhiyun mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
250*4882a593Smuzhiyun mtd->writesize = memorg->pagesize;
251*4882a593Smuzhiyun mtd->writebufsize = memorg->pagesize;
252*4882a593Smuzhiyun mtd->oobsize = memorg->oobsize;
253*4882a593Smuzhiyun mtd->size = nanddev_size(nand);
254*4882a593Smuzhiyun mtd->owner = owner;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun return nanddev_bbt_init(nand);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nanddev_init);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /**
261*4882a593Smuzhiyun * nanddev_cleanup() - Release resources allocated in nanddev_init()
262*4882a593Smuzhiyun * @nand: NAND device
263*4882a593Smuzhiyun *
264*4882a593Smuzhiyun * Basically undoes what has been done in nanddev_init().
265*4882a593Smuzhiyun */
nanddev_cleanup(struct nand_device * nand)266*4882a593Smuzhiyun void nanddev_cleanup(struct nand_device *nand)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun if (nanddev_bbt_is_initialized(nand))
269*4882a593Smuzhiyun nanddev_bbt_cleanup(nand);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nanddev_cleanup);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun MODULE_DESCRIPTION("Generic NAND framework");
274*4882a593Smuzhiyun MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
275*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
276