1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun /* Copyright (c) 2018 Rockchip Electronics Co. Ltd. */
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/kernel.h>
6*4882a593Smuzhiyun #include <linux/mtd/cfi.h>
7*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
8*4882a593Smuzhiyun #include <linux/mtd/partitions.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/string.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "rkflash_blk.h"
13*4882a593Smuzhiyun #include "rkflash_debug.h"
14*4882a593Smuzhiyun #include "sfc_nand.h"
15*4882a593Smuzhiyun #include "sfc_nand_mtd.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #ifdef CONFIG_RK_SFC_NAND_MTD
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun static struct mtd_partition nand_parts[MAX_PART_COUNT];
20*4882a593Smuzhiyun
mtd_to_priv(struct mtd_info * ptr_mtd)21*4882a593Smuzhiyun static inline struct snand_mtd_dev *mtd_to_priv(struct mtd_info *ptr_mtd)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun return (struct snand_mtd_dev *)((char *)ptr_mtd -
24*4882a593Smuzhiyun offsetof(struct snand_mtd_dev, mtd));
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun
sfc_nand_erase_mtd(struct mtd_info * mtd,u32 addr)27*4882a593Smuzhiyun int sfc_nand_erase_mtd(struct mtd_info *mtd, u32 addr)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun int ret;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun ret = sfc_nand_erase_block(0, addr >> mtd->writesize_shift);
32*4882a593Smuzhiyun if (ret) {
33*4882a593Smuzhiyun rkflash_print_error("%s fail ret= %d\n", __func__, ret);
34*4882a593Smuzhiyun ret = -EIO;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun return ret;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
sfc_nand_write_mtd(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)40*4882a593Smuzhiyun static int sfc_nand_write_mtd(struct mtd_info *mtd, loff_t to,
41*4882a593Smuzhiyun struct mtd_oob_ops *ops)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
44*4882a593Smuzhiyun u8 *data = (u8 *)ops->datbuf;
45*4882a593Smuzhiyun size_t remaining = ops->len;
46*4882a593Smuzhiyun u32 ret = 0;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun rkflash_print_dio("%s addr= %llx len= %x\n", __func__, to, (u32)remaining);
49*4882a593Smuzhiyun if ((to + remaining) > mtd->size || to & mtd->writesize_mask ||
50*4882a593Smuzhiyun remaining & mtd->writesize_mask || ops->ooblen) {
51*4882a593Smuzhiyun rkflash_print_error("%s input error, %llx %x\n", __func__, to, (u32)remaining);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun return -EINVAL;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun ops->retlen = 0;
57*4882a593Smuzhiyun while (remaining) {
58*4882a593Smuzhiyun memcpy(p_dev->dma_buf, data, mtd->writesize);
59*4882a593Smuzhiyun memset(p_dev->dma_buf + mtd->writesize, 0xff, mtd->oobsize);
60*4882a593Smuzhiyun ret = sfc_nand_prog_page_raw(0, to >> mtd->writesize_shift,
61*4882a593Smuzhiyun (u32 *)p_dev->dma_buf);
62*4882a593Smuzhiyun if (ret != SFC_OK) {
63*4882a593Smuzhiyun rkflash_print_error("%s addr %llx ret= %d\n",
64*4882a593Smuzhiyun __func__, to, ret);
65*4882a593Smuzhiyun ret = -EIO;
66*4882a593Smuzhiyun break;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun data += mtd->writesize;
70*4882a593Smuzhiyun ops->retlen += mtd->writesize;
71*4882a593Smuzhiyun remaining -= mtd->writesize;
72*4882a593Smuzhiyun to += mtd->writesize;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun return ret;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
sfc_nand_read_mtd(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)78*4882a593Smuzhiyun static int sfc_nand_read_mtd(struct mtd_info *mtd, loff_t from,
79*4882a593Smuzhiyun struct mtd_oob_ops *ops)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun u8 *data = (u8 *)ops->datbuf;
82*4882a593Smuzhiyun size_t remaining = ops->len;
83*4882a593Smuzhiyun u32 ret = 0;
84*4882a593Smuzhiyun bool ecc_failed = false;
85*4882a593Smuzhiyun size_t page, off, real_size;
86*4882a593Smuzhiyun int max_bitflips = 0;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun rkflash_print_dio("%s addr= %llx len= %x\n", __func__, from, (u32)remaining);
89*4882a593Smuzhiyun if ((from + remaining) > mtd->size || ops->ooblen) {
90*4882a593Smuzhiyun rkflash_print_error("%s input error, from= %llx len= %x oob= %x\n",
91*4882a593Smuzhiyun __func__, from, (u32)remaining, (u32)ops->ooblen);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun return -EINVAL;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun ops->retlen = 0;
97*4882a593Smuzhiyun while (remaining) {
98*4882a593Smuzhiyun page = from >> mtd->writesize_shift;
99*4882a593Smuzhiyun off = from & mtd->writesize_mask;
100*4882a593Smuzhiyun real_size = min_t(u32, remaining, mtd->writesize - off);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun ret = sfc_nand_read(page, (u32 *)data, off, real_size);
103*4882a593Smuzhiyun if (ret == SFC_NAND_HW_ERROR) {
104*4882a593Smuzhiyun rkflash_print_error("%s addr %llx ret= %d\n",
105*4882a593Smuzhiyun __func__, from, ret);
106*4882a593Smuzhiyun ret = -EIO;
107*4882a593Smuzhiyun break;
108*4882a593Smuzhiyun } else if (ret == SFC_NAND_ECC_ERROR) {
109*4882a593Smuzhiyun rkflash_print_error("%s addr %llx ret= %d\n",
110*4882a593Smuzhiyun __func__, from, ret);
111*4882a593Smuzhiyun ecc_failed = true;
112*4882a593Smuzhiyun mtd->ecc_stats.failed++;
113*4882a593Smuzhiyun } else if (ret == SFC_NAND_ECC_REFRESH) {
114*4882a593Smuzhiyun rkflash_print_dio("%s addr %llx ret= %d\n",
115*4882a593Smuzhiyun __func__, from, ret);
116*4882a593Smuzhiyun mtd->ecc_stats.corrected += 1;
117*4882a593Smuzhiyun max_bitflips = 1;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun ret = 0;
121*4882a593Smuzhiyun data += real_size;
122*4882a593Smuzhiyun ops->retlen += real_size;
123*4882a593Smuzhiyun remaining -= real_size;
124*4882a593Smuzhiyun from += real_size;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun if (ecc_failed && !ret)
128*4882a593Smuzhiyun ret = -EBADMSG;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun return ret ? ret : max_bitflips;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
sfc_nand_isbad_mtd(struct mtd_info * mtd,loff_t ofs)133*4882a593Smuzhiyun int sfc_nand_isbad_mtd(struct mtd_info *mtd, loff_t ofs)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun int ret;
136*4882a593Smuzhiyun struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun rkflash_print_dio("%s %llx\n", __func__, ofs);
139*4882a593Smuzhiyun if (ofs & mtd->writesize_mask) {
140*4882a593Smuzhiyun rkflash_print_error("%s %llx input error\n", __func__, ofs);
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun return -EINVAL;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun if (snanddev_bbt_is_initialized(p_dev)) {
146*4882a593Smuzhiyun unsigned int entry;
147*4882a593Smuzhiyun int status;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun entry = snanddev_bbt_pos_to_entry(p_dev, ofs);
150*4882a593Smuzhiyun status = snanddev_bbt_get_block_status(p_dev, entry);
151*4882a593Smuzhiyun /* Lazy block status retrieval */
152*4882a593Smuzhiyun if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
153*4882a593Smuzhiyun if ((int)sfc_nand_check_bad_block(0, ofs >> mtd->writesize_shift))
154*4882a593Smuzhiyun status = NAND_BBT_BLOCK_FACTORY_BAD;
155*4882a593Smuzhiyun else
156*4882a593Smuzhiyun status = NAND_BBT_BLOCK_GOOD;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun snanddev_bbt_set_block_status(p_dev, entry, status);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (status == NAND_BBT_BLOCK_WORN ||
162*4882a593Smuzhiyun status == NAND_BBT_BLOCK_FACTORY_BAD)
163*4882a593Smuzhiyun return true;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun return false;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun ret = (int)sfc_nand_check_bad_block(0, ofs >> mtd->writesize_shift);
169*4882a593Smuzhiyun if (ret)
170*4882a593Smuzhiyun pr_err("%s %llx is bad block\n", __func__, ofs);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun return ret;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
sfc_nand_markbad_mtd(struct mtd_info * mtd,loff_t ofs)175*4882a593Smuzhiyun static int sfc_nand_markbad_mtd(struct mtd_info *mtd, loff_t ofs)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun u32 ret;
178*4882a593Smuzhiyun struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
179*4882a593Smuzhiyun unsigned int entry;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun rkflash_print_error("%s %llx\n", __func__, ofs);
182*4882a593Smuzhiyun if (ofs & mtd->erasesize_mask) {
183*4882a593Smuzhiyun rkflash_print_error("%s %llx input error\n", __func__, ofs);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun return -EINVAL;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun if (sfc_nand_isbad_mtd(mtd, ofs))
189*4882a593Smuzhiyun return 0;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* Erase block before marking it bad. */
192*4882a593Smuzhiyun ret = sfc_nand_erase_block(0, ofs >> mtd->writesize_shift);
193*4882a593Smuzhiyun if (ret)
194*4882a593Smuzhiyun rkflash_print_error("%s erase fail ofs 0x%llx ret=%d\n",
195*4882a593Smuzhiyun __func__, ofs, ret);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /* Mark bad. */
198*4882a593Smuzhiyun ret = sfc_nand_mark_bad_block(0, ofs >> mtd->writesize_shift);
199*4882a593Smuzhiyun if (ret)
200*4882a593Smuzhiyun rkflash_print_error("%s mark fail ofs 0x%llx ret=%d\n",
201*4882a593Smuzhiyun __func__, ofs, ret);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun if (!snanddev_bbt_is_initialized(p_dev))
204*4882a593Smuzhiyun goto out;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun entry = snanddev_bbt_pos_to_entry(p_dev, ofs);
207*4882a593Smuzhiyun ret = snanddev_bbt_set_block_status(p_dev, entry, NAND_BBT_BLOCK_WORN);
208*4882a593Smuzhiyun if (ret)
209*4882a593Smuzhiyun goto out;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun ret = snanddev_bbt_update(p_dev);
212*4882a593Smuzhiyun out:
213*4882a593Smuzhiyun /* Mark bad recheck */
214*4882a593Smuzhiyun if (sfc_nand_check_bad_block(0, ofs >> mtd->writesize_shift)) {
215*4882a593Smuzhiyun mtd->ecc_stats.badblocks++;
216*4882a593Smuzhiyun ret = 0;
217*4882a593Smuzhiyun } else {
218*4882a593Smuzhiyun rkflash_print_error("%s recheck fail ofs 0x%llx ret=%d\n",
219*4882a593Smuzhiyun __func__, ofs, ret);
220*4882a593Smuzhiyun ret = -EIO;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun return ret;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
sfc_erase_mtd(struct mtd_info * mtd,struct erase_info * instr)226*4882a593Smuzhiyun static int sfc_erase_mtd(struct mtd_info *mtd, struct erase_info *instr)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
229*4882a593Smuzhiyun struct snand_mtd_dev *nand = mtd_to_snanddev(mtd);
230*4882a593Smuzhiyun u64 addr, remaining;
231*4882a593Smuzhiyun int ret = 0;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun mutex_lock(p_dev->lock);
234*4882a593Smuzhiyun addr = instr->addr;
235*4882a593Smuzhiyun remaining = instr->len;
236*4882a593Smuzhiyun rkflash_print_dio("%s addr= %llx len= %llx\n", __func__, addr, remaining);
237*4882a593Smuzhiyun if ((addr + remaining) > mtd->size || addr & mtd->erasesize_mask) {
238*4882a593Smuzhiyun ret = -EINVAL;
239*4882a593Smuzhiyun goto out;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun while (remaining) {
243*4882a593Smuzhiyun ret = snanddev_bbt_get_block_status(nand, addr >> mtd->erasesize_shift);
244*4882a593Smuzhiyun if (ret == NAND_BBT_BLOCK_WORN ||
245*4882a593Smuzhiyun ret == NAND_BBT_BLOCK_FACTORY_BAD) {
246*4882a593Smuzhiyun rkflash_print_error("attempt to erase a bad/reserved block @%llx\n",
247*4882a593Smuzhiyun addr >> mtd->erasesize_shift);
248*4882a593Smuzhiyun addr += mtd->erasesize;
249*4882a593Smuzhiyun remaining -= mtd->erasesize;
250*4882a593Smuzhiyun continue;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun ret = sfc_nand_erase_mtd(mtd, addr);
254*4882a593Smuzhiyun if (ret) {
255*4882a593Smuzhiyun rkflash_print_error("%s fail addr 0x%llx ret=%d\n",
256*4882a593Smuzhiyun __func__, addr, ret);
257*4882a593Smuzhiyun instr->fail_addr = addr;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun ret = -EIO;
260*4882a593Smuzhiyun goto out;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun addr += mtd->erasesize;
264*4882a593Smuzhiyun remaining -= mtd->erasesize;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun out:
268*4882a593Smuzhiyun mutex_unlock(p_dev->lock);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun return ret;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
sfc_write_mtd(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)273*4882a593Smuzhiyun static int sfc_write_mtd(struct mtd_info *mtd, loff_t to, size_t len,
274*4882a593Smuzhiyun size_t *retlen, const u_char *buf)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun int ret;
277*4882a593Smuzhiyun struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
278*4882a593Smuzhiyun struct mtd_oob_ops ops;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun mutex_lock(p_dev->lock);
281*4882a593Smuzhiyun memset(&ops, 0, sizeof(struct mtd_oob_ops));
282*4882a593Smuzhiyun ops.datbuf = (u8 *)buf;
283*4882a593Smuzhiyun ops.len = len;
284*4882a593Smuzhiyun ret = sfc_nand_write_mtd(mtd, to, &ops);
285*4882a593Smuzhiyun *retlen = ops.retlen;
286*4882a593Smuzhiyun mutex_unlock(p_dev->lock);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun return ret;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
sfc_read_mtd(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)291*4882a593Smuzhiyun static int sfc_read_mtd(struct mtd_info *mtd, loff_t from, size_t len,
292*4882a593Smuzhiyun size_t *retlen, u_char *buf)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun int ret;
295*4882a593Smuzhiyun struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
296*4882a593Smuzhiyun struct mtd_oob_ops ops;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun mutex_lock(p_dev->lock);
299*4882a593Smuzhiyun memset(&ops, 0, sizeof(struct mtd_oob_ops));
300*4882a593Smuzhiyun ops.datbuf = buf;
301*4882a593Smuzhiyun ops.len = len;
302*4882a593Smuzhiyun ret = sfc_nand_read_mtd(mtd, from, &ops);
303*4882a593Smuzhiyun *retlen = ops.retlen;
304*4882a593Smuzhiyun mutex_unlock(p_dev->lock);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun return ret;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
sfc_isbad_mtd(struct mtd_info * mtd,loff_t ofs)309*4882a593Smuzhiyun static int sfc_isbad_mtd(struct mtd_info *mtd, loff_t ofs)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun int ret;
312*4882a593Smuzhiyun struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun mutex_lock(p_dev->lock);
315*4882a593Smuzhiyun ret = sfc_nand_isbad_mtd(mtd, ofs);
316*4882a593Smuzhiyun mutex_unlock(p_dev->lock);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun return ret;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
sfc_markbad_mtd(struct mtd_info * mtd,loff_t ofs)321*4882a593Smuzhiyun static int sfc_markbad_mtd(struct mtd_info *mtd, loff_t ofs)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun u32 ret;
324*4882a593Smuzhiyun struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun mutex_lock(p_dev->lock);
327*4882a593Smuzhiyun ret = sfc_nand_markbad_mtd(mtd, ofs);
328*4882a593Smuzhiyun mutex_unlock(p_dev->lock);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun return ret;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /*
334*4882a593Smuzhiyun * if not support rk_partition and partition is confirmed, you can define
335*4882a593Smuzhiyun * strust def_nand_part by adding new partition like following example:
336*4882a593Smuzhiyun * {"u-boot", 0x1000 * 512, 0x2000 * 512},
337*4882a593Smuzhiyun * Note.
338*4882a593Smuzhiyun * 1. New partition format {name. size, offset}
339*4882a593Smuzhiyun * 2. Unit:Byte
340*4882a593Smuzhiyun * 3. Last partition 'size' can be set 0xFFFFFFFFF to fully user left space.
341*4882a593Smuzhiyun */
342*4882a593Smuzhiyun static struct mtd_partition def_nand_part[] = {};
343*4882a593Smuzhiyun
sfc_nand_mtd_init(struct SFNAND_DEV * p_dev,struct mutex * lock)344*4882a593Smuzhiyun int sfc_nand_mtd_init(struct SFNAND_DEV *p_dev, struct mutex *lock)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun int ret, i, part_num = 0;
347*4882a593Smuzhiyun int capacity;
348*4882a593Smuzhiyun struct snand_mtd_dev *nand = kzalloc(sizeof(*nand), GFP_KERNEL);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun if (!nand) {
351*4882a593Smuzhiyun rkflash_print_error("%s %d alloc failed\n", __func__, __LINE__);
352*4882a593Smuzhiyun return -ENOMEM;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun nand->snand = p_dev;
356*4882a593Smuzhiyun capacity = (1 << p_dev->capacity) << 9;
357*4882a593Smuzhiyun nand->mtd.name = "spi-nand0";
358*4882a593Smuzhiyun nand->mtd.type = MTD_NANDFLASH;
359*4882a593Smuzhiyun nand->mtd.writesize = p_dev->page_size * SFC_NAND_SECTOR_SIZE;
360*4882a593Smuzhiyun nand->mtd.flags = MTD_CAP_NANDFLASH;
361*4882a593Smuzhiyun nand->mtd.size = capacity;
362*4882a593Smuzhiyun nand->mtd._erase = sfc_erase_mtd;
363*4882a593Smuzhiyun nand->mtd._read = sfc_read_mtd;
364*4882a593Smuzhiyun nand->mtd._write = sfc_write_mtd;
365*4882a593Smuzhiyun nand->mtd._block_isbad = sfc_isbad_mtd;
366*4882a593Smuzhiyun nand->mtd._block_markbad = sfc_markbad_mtd;
367*4882a593Smuzhiyun nand->mtd.oobsize = 16 * p_dev->page_size;
368*4882a593Smuzhiyun nand->mtd.bitflip_threshold = 2;
369*4882a593Smuzhiyun nand->mtd.erasesize = p_dev->block_size * SFC_NAND_SECTOR_SIZE;
370*4882a593Smuzhiyun nand->mtd.writebufsize = p_dev->page_size * SFC_NAND_SECTOR_SIZE;
371*4882a593Smuzhiyun nand->mtd.erasesize_shift = ffs(nand->mtd.erasesize) - 1;
372*4882a593Smuzhiyun nand->mtd.erasesize_mask = (1 << nand->mtd.erasesize_shift) - 1;
373*4882a593Smuzhiyun nand->mtd.writesize_shift = ffs(nand->mtd.writesize) - 1;
374*4882a593Smuzhiyun nand->mtd.writesize_mask = (1 << nand->mtd.writesize_shift) - 1;
375*4882a593Smuzhiyun nand->mtd.bitflip_threshold = 1;
376*4882a593Smuzhiyun nand->mtd.priv = nand;
377*4882a593Smuzhiyun nand->lock = lock;
378*4882a593Smuzhiyun nand->dma_buf = kmalloc(SFC_NAND_PAGE_MAX_SIZE, GFP_KERNEL | GFP_DMA);
379*4882a593Smuzhiyun if (!nand->dma_buf) {
380*4882a593Smuzhiyun rkflash_print_error("%s dma_buf alloc failed\n", __func__);
381*4882a593Smuzhiyun ret = -ENOMEM;
382*4882a593Smuzhiyun goto error_out;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun nand->bbt.option |= NANDDEV_BBT_USE_FLASH;
386*4882a593Smuzhiyun ret = snanddev_bbt_init(nand);
387*4882a593Smuzhiyun if (ret) {
388*4882a593Smuzhiyun rkflash_print_error("snanddev_bbt_init failed, ret= %d\n", ret);
389*4882a593Smuzhiyun return ret;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun part_num = ARRAY_SIZE(def_nand_part);
393*4882a593Smuzhiyun for (i = 0; i < part_num; i++) {
394*4882a593Smuzhiyun nand_parts[i].name =
395*4882a593Smuzhiyun kstrdup(def_nand_part[i].name,
396*4882a593Smuzhiyun GFP_KERNEL);
397*4882a593Smuzhiyun if (def_nand_part[i].size == 0xFFFFFFFF)
398*4882a593Smuzhiyun def_nand_part[i].size = capacity -
399*4882a593Smuzhiyun def_nand_part[i].offset;
400*4882a593Smuzhiyun nand_parts[i].offset =
401*4882a593Smuzhiyun def_nand_part[i].offset;
402*4882a593Smuzhiyun nand_parts[i].size =
403*4882a593Smuzhiyun def_nand_part[i].size;
404*4882a593Smuzhiyun nand_parts[i].mask_flags = 0;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun ret = mtd_device_register(&nand->mtd, nand_parts, part_num);
408*4882a593Smuzhiyun if (ret) {
409*4882a593Smuzhiyun pr_err("%s register mtd fail %d\n", __func__, ret);
410*4882a593Smuzhiyun } else {
411*4882a593Smuzhiyun pr_info("%s register mtd succuss\n", __func__);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun return 0;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun kfree(nand->dma_buf);
417*4882a593Smuzhiyun error_out:
418*4882a593Smuzhiyun kfree(nand);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun return ret;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun #endif
424