1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2017 Free Electrons
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors:
6*4882a593Smuzhiyun * Boris Brezillon <boris.brezillon@free-electrons.com>
7*4882a593Smuzhiyun * Peter Pan <peterpandong@micron.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define pr_fmt(fmt) "nand: " fmt
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #ifndef __UBOOT__
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #endif
15*4882a593Smuzhiyun #include <linux/mtd/nand.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /**
18*4882a593Smuzhiyun * nanddev_isbad() - Check if a block is bad
19*4882a593Smuzhiyun * @nand: NAND device
20*4882a593Smuzhiyun * @pos: position pointing to the block we want to check
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * Return: true if the block is bad, false otherwise.
23*4882a593Smuzhiyun */
nanddev_isbad(struct nand_device * nand,const struct nand_pos * pos)24*4882a593Smuzhiyun bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun if (nanddev_bbt_is_initialized(nand)) {
27*4882a593Smuzhiyun unsigned int entry;
28*4882a593Smuzhiyun int status;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun entry = nanddev_bbt_pos_to_entry(nand, pos);
31*4882a593Smuzhiyun status = nanddev_bbt_get_block_status(nand, entry);
32*4882a593Smuzhiyun /* Lazy block status retrieval */
33*4882a593Smuzhiyun if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
34*4882a593Smuzhiyun if (nand->ops->isbad(nand, pos))
35*4882a593Smuzhiyun status = NAND_BBT_BLOCK_FACTORY_BAD;
36*4882a593Smuzhiyun else
37*4882a593Smuzhiyun status = NAND_BBT_BLOCK_GOOD;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun nanddev_bbt_set_block_status(nand, entry, status);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun if (status == NAND_BBT_BLOCK_WORN ||
43*4882a593Smuzhiyun status == NAND_BBT_BLOCK_FACTORY_BAD)
44*4882a593Smuzhiyun return true;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun return false;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun return nand->ops->isbad(nand, pos);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nanddev_isbad);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /**
54*4882a593Smuzhiyun * nanddev_markbad() - Mark a block as bad
55*4882a593Smuzhiyun * @nand: NAND device
56*4882a593Smuzhiyun * @pos: position of the block to mark bad
57*4882a593Smuzhiyun *
58*4882a593Smuzhiyun * Mark a block bad. This function is updating the BBT if available and
59*4882a593Smuzhiyun * calls the low-level markbad hook (nand->ops->markbad()).
60*4882a593Smuzhiyun *
61*4882a593Smuzhiyun * Return: 0 in case of success, a negative error code otherwise.
62*4882a593Smuzhiyun */
nanddev_markbad(struct nand_device * nand,const struct nand_pos * pos)63*4882a593Smuzhiyun int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun struct mtd_info *mtd = nanddev_to_mtd(nand);
66*4882a593Smuzhiyun unsigned int entry;
67*4882a593Smuzhiyun int ret = 0;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun if (nanddev_isbad(nand, pos))
70*4882a593Smuzhiyun return 0;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun ret = nand->ops->markbad(nand, pos);
73*4882a593Smuzhiyun if (ret)
74*4882a593Smuzhiyun pr_warn("failed to write BBM to block @%llx (err = %d)\n",
75*4882a593Smuzhiyun nanddev_pos_to_offs(nand, pos), ret);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun if (!nanddev_bbt_is_initialized(nand))
78*4882a593Smuzhiyun goto out;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun entry = nanddev_bbt_pos_to_entry(nand, pos);
81*4882a593Smuzhiyun ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
82*4882a593Smuzhiyun if (ret)
83*4882a593Smuzhiyun goto out;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun ret = nanddev_bbt_update(nand);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun out:
88*4882a593Smuzhiyun if (!ret)
89*4882a593Smuzhiyun mtd->ecc_stats.badblocks++;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun return ret;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nanddev_markbad);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /**
96*4882a593Smuzhiyun * nanddev_isreserved() - Check whether an eraseblock is reserved or not
97*4882a593Smuzhiyun * @nand: NAND device
98*4882a593Smuzhiyun * @pos: NAND position to test
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun * Checks whether the eraseblock pointed by @pos is reserved or not.
101*4882a593Smuzhiyun *
102*4882a593Smuzhiyun * Return: true if the eraseblock is reserved, false otherwise.
103*4882a593Smuzhiyun */
nanddev_isreserved(struct nand_device * nand,const struct nand_pos * pos)104*4882a593Smuzhiyun bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun unsigned int entry;
107*4882a593Smuzhiyun int status;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun if (!nanddev_bbt_is_initialized(nand))
110*4882a593Smuzhiyun return false;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* Return info from the table */
113*4882a593Smuzhiyun entry = nanddev_bbt_pos_to_entry(nand, pos);
114*4882a593Smuzhiyun status = nanddev_bbt_get_block_status(nand, entry);
115*4882a593Smuzhiyun return status == NAND_BBT_BLOCK_RESERVED;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nanddev_isreserved);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /**
120*4882a593Smuzhiyun * nanddev_erase() - Erase a NAND portion
121*4882a593Smuzhiyun * @nand: NAND device
122*4882a593Smuzhiyun * @pos: position of the block to erase
123*4882a593Smuzhiyun *
124*4882a593Smuzhiyun * Erases the block if it's not bad.
125*4882a593Smuzhiyun *
126*4882a593Smuzhiyun * Return: 0 in case of success, a negative error code otherwise.
127*4882a593Smuzhiyun */
nanddev_erase(struct nand_device * nand,const struct nand_pos * pos)128*4882a593Smuzhiyun int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
131*4882a593Smuzhiyun pr_warn("attempt to erase a bad/reserved block @%llx\n",
132*4882a593Smuzhiyun nanddev_pos_to_offs(nand, pos));
133*4882a593Smuzhiyun return -EIO;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun return nand->ops->erase(nand, pos);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nanddev_erase);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /**
141*4882a593Smuzhiyun * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
142*4882a593Smuzhiyun * @mtd: MTD device
143*4882a593Smuzhiyun * @einfo: erase request
144*4882a593Smuzhiyun *
145*4882a593Smuzhiyun * This is a simple mtd->_erase() implementation iterating over all blocks
146*4882a593Smuzhiyun * concerned by @einfo and calling nand->ops->erase() on each of them.
147*4882a593Smuzhiyun *
148*4882a593Smuzhiyun * Note that mtd->_erase should not be directly assigned to this helper,
149*4882a593Smuzhiyun * because there's no locking here. NAND specialized layers should instead
150*4882a593Smuzhiyun * implement there own wrapper around nanddev_mtd_erase() taking the
151*4882a593Smuzhiyun * appropriate lock before calling nanddev_mtd_erase().
152*4882a593Smuzhiyun *
153*4882a593Smuzhiyun * Return: 0 in case of success, a negative error code otherwise.
154*4882a593Smuzhiyun */
nanddev_mtd_erase(struct mtd_info * mtd,struct erase_info * einfo)155*4882a593Smuzhiyun int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun struct nand_device *nand = mtd_to_nanddev(mtd);
158*4882a593Smuzhiyun struct nand_pos pos, last;
159*4882a593Smuzhiyun int ret;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun nanddev_offs_to_pos(nand, einfo->addr, &pos);
162*4882a593Smuzhiyun nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
163*4882a593Smuzhiyun while (nanddev_pos_cmp(&pos, &last) <= 0) {
164*4882a593Smuzhiyun ret = nanddev_erase(nand, &pos);
165*4882a593Smuzhiyun if (ret) {
166*4882a593Smuzhiyun einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun return ret;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun nanddev_pos_next_eraseblock(nand, &pos);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun return 0;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /**
179*4882a593Smuzhiyun * nanddev_init() - Initialize a NAND device
180*4882a593Smuzhiyun * @nand: NAND device
181*4882a593Smuzhiyun * @ops: NAND device operations
182*4882a593Smuzhiyun * @owner: NAND device owner
183*4882a593Smuzhiyun *
184*4882a593Smuzhiyun * Initializes a NAND device object. Consistency checks are done on @ops and
185*4882a593Smuzhiyun * @nand->memorg. Also takes care of initializing the BBT.
186*4882a593Smuzhiyun *
187*4882a593Smuzhiyun * Return: 0 in case of success, a negative error code otherwise.
188*4882a593Smuzhiyun */
nanddev_init(struct nand_device * nand,const struct nand_ops * ops,struct module * owner)189*4882a593Smuzhiyun int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
190*4882a593Smuzhiyun struct module *owner)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun struct mtd_info *mtd = nanddev_to_mtd(nand);
193*4882a593Smuzhiyun struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (!nand || !ops)
196*4882a593Smuzhiyun return -EINVAL;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if (!ops->erase || !ops->markbad || !ops->isbad)
199*4882a593Smuzhiyun return -EINVAL;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (!memorg->bits_per_cell || !memorg->pagesize ||
202*4882a593Smuzhiyun !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
203*4882a593Smuzhiyun !memorg->planes_per_lun || !memorg->luns_per_target ||
204*4882a593Smuzhiyun !memorg->ntargets)
205*4882a593Smuzhiyun return -EINVAL;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun nand->rowconv.eraseblock_addr_shift =
208*4882a593Smuzhiyun fls(memorg->pages_per_eraseblock - 1);
209*4882a593Smuzhiyun nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
210*4882a593Smuzhiyun nand->rowconv.eraseblock_addr_shift;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun nand->ops = ops;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun mtd->type = memorg->bits_per_cell == 1 ?
215*4882a593Smuzhiyun MTD_NANDFLASH : MTD_MLCNANDFLASH;
216*4882a593Smuzhiyun mtd->flags = MTD_CAP_NANDFLASH;
217*4882a593Smuzhiyun mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
218*4882a593Smuzhiyun mtd->writesize = memorg->pagesize;
219*4882a593Smuzhiyun mtd->writebufsize = memorg->pagesize;
220*4882a593Smuzhiyun mtd->oobsize = memorg->oobsize;
221*4882a593Smuzhiyun mtd->size = nanddev_size(nand);
222*4882a593Smuzhiyun mtd->owner = owner;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun return nanddev_bbt_init(nand);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nanddev_init);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /**
229*4882a593Smuzhiyun * nanddev_cleanup() - Release resources allocated in nanddev_init()
230*4882a593Smuzhiyun * @nand: NAND device
231*4882a593Smuzhiyun *
232*4882a593Smuzhiyun * Basically undoes what has been done in nanddev_init().
233*4882a593Smuzhiyun */
nanddev_cleanup(struct nand_device * nand)234*4882a593Smuzhiyun void nanddev_cleanup(struct nand_device *nand)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun if (nanddev_bbt_is_initialized(nand))
237*4882a593Smuzhiyun nanddev_bbt_cleanup(nand);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nanddev_cleanup);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun MODULE_DESCRIPTION("Generic NAND framework");
242*4882a593Smuzhiyun MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
243*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
244