1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2018 Rockchip Electronics Co. Ltd. */
4
5 #include <linux/kernel.h>
6 #include <linux/mtd/cfi.h>
7 #include <linux/mtd/mtd.h>
8 #include <linux/mtd/partitions.h>
9 #include <linux/slab.h>
10 #include <linux/string.h>
11
12 #include "rkflash_blk.h"
13 #include "rkflash_debug.h"
14 #include "sfc_nand.h"
15 #include "sfc_nand_mtd.h"
16
17 #ifdef CONFIG_RK_SFC_NAND_MTD
18
19 static struct mtd_partition nand_parts[MAX_PART_COUNT];
20
mtd_to_priv(struct mtd_info * ptr_mtd)21 static inline struct snand_mtd_dev *mtd_to_priv(struct mtd_info *ptr_mtd)
22 {
23 return (struct snand_mtd_dev *)((char *)ptr_mtd -
24 offsetof(struct snand_mtd_dev, mtd));
25 }
26
sfc_nand_erase_mtd(struct mtd_info * mtd,u32 addr)27 int sfc_nand_erase_mtd(struct mtd_info *mtd, u32 addr)
28 {
29 int ret;
30
31 ret = sfc_nand_erase_block(0, addr >> mtd->writesize_shift);
32 if (ret) {
33 rkflash_print_error("%s fail ret= %d\n", __func__, ret);
34 ret = -EIO;
35 }
36
37 return ret;
38 }
39
sfc_nand_write_mtd(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)40 static int sfc_nand_write_mtd(struct mtd_info *mtd, loff_t to,
41 struct mtd_oob_ops *ops)
42 {
43 struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
44 u8 *data = (u8 *)ops->datbuf;
45 size_t remaining = ops->len;
46 u32 ret = 0;
47
48 rkflash_print_dio("%s addr= %llx len= %x\n", __func__, to, (u32)remaining);
49 if ((to + remaining) > mtd->size || to & mtd->writesize_mask ||
50 remaining & mtd->writesize_mask || ops->ooblen) {
51 rkflash_print_error("%s input error, %llx %x\n", __func__, to, (u32)remaining);
52
53 return -EINVAL;
54 }
55
56 ops->retlen = 0;
57 while (remaining) {
58 memcpy(p_dev->dma_buf, data, mtd->writesize);
59 memset(p_dev->dma_buf + mtd->writesize, 0xff, mtd->oobsize);
60 ret = sfc_nand_prog_page_raw(0, to >> mtd->writesize_shift,
61 (u32 *)p_dev->dma_buf);
62 if (ret != SFC_OK) {
63 rkflash_print_error("%s addr %llx ret= %d\n",
64 __func__, to, ret);
65 ret = -EIO;
66 break;
67 }
68
69 data += mtd->writesize;
70 ops->retlen += mtd->writesize;
71 remaining -= mtd->writesize;
72 to += mtd->writesize;
73 }
74
75 return ret;
76 }
77
sfc_nand_read_mtd(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)78 static int sfc_nand_read_mtd(struct mtd_info *mtd, loff_t from,
79 struct mtd_oob_ops *ops)
80 {
81 u8 *data = (u8 *)ops->datbuf;
82 size_t remaining = ops->len;
83 u32 ret = 0;
84 bool ecc_failed = false;
85 size_t page, off, real_size;
86 int max_bitflips = 0;
87
88 rkflash_print_dio("%s addr= %llx len= %x\n", __func__, from, (u32)remaining);
89 if ((from + remaining) > mtd->size || ops->ooblen) {
90 rkflash_print_error("%s input error, from= %llx len= %x oob= %x\n",
91 __func__, from, (u32)remaining, (u32)ops->ooblen);
92
93 return -EINVAL;
94 }
95
96 ops->retlen = 0;
97 while (remaining) {
98 page = from >> mtd->writesize_shift;
99 off = from & mtd->writesize_mask;
100 real_size = min_t(u32, remaining, mtd->writesize - off);
101
102 ret = sfc_nand_read(page, (u32 *)data, off, real_size);
103 if (ret == SFC_NAND_HW_ERROR) {
104 rkflash_print_error("%s addr %llx ret= %d\n",
105 __func__, from, ret);
106 ret = -EIO;
107 break;
108 } else if (ret == SFC_NAND_ECC_ERROR) {
109 rkflash_print_error("%s addr %llx ret= %d\n",
110 __func__, from, ret);
111 ecc_failed = true;
112 mtd->ecc_stats.failed++;
113 } else if (ret == SFC_NAND_ECC_REFRESH) {
114 rkflash_print_dio("%s addr %llx ret= %d\n",
115 __func__, from, ret);
116 mtd->ecc_stats.corrected += 1;
117 max_bitflips = 1;
118 }
119
120 ret = 0;
121 data += real_size;
122 ops->retlen += real_size;
123 remaining -= real_size;
124 from += real_size;
125 }
126
127 if (ecc_failed && !ret)
128 ret = -EBADMSG;
129
130 return ret ? ret : max_bitflips;
131 }
132
sfc_nand_isbad_mtd(struct mtd_info * mtd,loff_t ofs)133 int sfc_nand_isbad_mtd(struct mtd_info *mtd, loff_t ofs)
134 {
135 int ret;
136 struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
137
138 rkflash_print_dio("%s %llx\n", __func__, ofs);
139 if (ofs & mtd->writesize_mask) {
140 rkflash_print_error("%s %llx input error\n", __func__, ofs);
141
142 return -EINVAL;
143 }
144
145 if (snanddev_bbt_is_initialized(p_dev)) {
146 unsigned int entry;
147 int status;
148
149 entry = snanddev_bbt_pos_to_entry(p_dev, ofs);
150 status = snanddev_bbt_get_block_status(p_dev, entry);
151 /* Lazy block status retrieval */
152 if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
153 if ((int)sfc_nand_check_bad_block(0, ofs >> mtd->writesize_shift))
154 status = NAND_BBT_BLOCK_FACTORY_BAD;
155 else
156 status = NAND_BBT_BLOCK_GOOD;
157
158 snanddev_bbt_set_block_status(p_dev, entry, status);
159 }
160
161 if (status == NAND_BBT_BLOCK_WORN ||
162 status == NAND_BBT_BLOCK_FACTORY_BAD)
163 return true;
164
165 return false;
166 }
167
168 ret = (int)sfc_nand_check_bad_block(0, ofs >> mtd->writesize_shift);
169 if (ret)
170 pr_err("%s %llx is bad block\n", __func__, ofs);
171
172 return ret;
173 }
174
sfc_nand_markbad_mtd(struct mtd_info * mtd,loff_t ofs)175 static int sfc_nand_markbad_mtd(struct mtd_info *mtd, loff_t ofs)
176 {
177 u32 ret;
178 struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
179 unsigned int entry;
180
181 rkflash_print_error("%s %llx\n", __func__, ofs);
182 if (ofs & mtd->erasesize_mask) {
183 rkflash_print_error("%s %llx input error\n", __func__, ofs);
184
185 return -EINVAL;
186 }
187
188 if (sfc_nand_isbad_mtd(mtd, ofs))
189 return 0;
190
191 /* Erase block before marking it bad. */
192 ret = sfc_nand_erase_block(0, ofs >> mtd->writesize_shift);
193 if (ret)
194 rkflash_print_error("%s erase fail ofs 0x%llx ret=%d\n",
195 __func__, ofs, ret);
196
197 /* Mark bad. */
198 ret = sfc_nand_mark_bad_block(0, ofs >> mtd->writesize_shift);
199 if (ret)
200 rkflash_print_error("%s mark fail ofs 0x%llx ret=%d\n",
201 __func__, ofs, ret);
202
203 if (!snanddev_bbt_is_initialized(p_dev))
204 goto out;
205
206 entry = snanddev_bbt_pos_to_entry(p_dev, ofs);
207 ret = snanddev_bbt_set_block_status(p_dev, entry, NAND_BBT_BLOCK_WORN);
208 if (ret)
209 goto out;
210
211 ret = snanddev_bbt_update(p_dev);
212 out:
213 /* Mark bad recheck */
214 if (sfc_nand_check_bad_block(0, ofs >> mtd->writesize_shift)) {
215 mtd->ecc_stats.badblocks++;
216 ret = 0;
217 } else {
218 rkflash_print_error("%s recheck fail ofs 0x%llx ret=%d\n",
219 __func__, ofs, ret);
220 ret = -EIO;
221 }
222
223 return ret;
224 }
225
sfc_erase_mtd(struct mtd_info * mtd,struct erase_info * instr)226 static int sfc_erase_mtd(struct mtd_info *mtd, struct erase_info *instr)
227 {
228 struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
229 struct snand_mtd_dev *nand = mtd_to_snanddev(mtd);
230 u64 addr, remaining;
231 int ret = 0;
232
233 mutex_lock(p_dev->lock);
234 addr = instr->addr;
235 remaining = instr->len;
236 rkflash_print_dio("%s addr= %llx len= %llx\n", __func__, addr, remaining);
237 if ((addr + remaining) > mtd->size || addr & mtd->erasesize_mask) {
238 ret = -EINVAL;
239 goto out;
240 }
241
242 while (remaining) {
243 ret = snanddev_bbt_get_block_status(nand, addr >> mtd->erasesize_shift);
244 if (ret == NAND_BBT_BLOCK_WORN ||
245 ret == NAND_BBT_BLOCK_FACTORY_BAD) {
246 rkflash_print_error("attempt to erase a bad/reserved block @%llx\n",
247 addr >> mtd->erasesize_shift);
248 addr += mtd->erasesize;
249 remaining -= mtd->erasesize;
250 continue;
251 }
252
253 ret = sfc_nand_erase_mtd(mtd, addr);
254 if (ret) {
255 rkflash_print_error("%s fail addr 0x%llx ret=%d\n",
256 __func__, addr, ret);
257 instr->fail_addr = addr;
258
259 ret = -EIO;
260 goto out;
261 }
262
263 addr += mtd->erasesize;
264 remaining -= mtd->erasesize;
265 }
266
267 out:
268 mutex_unlock(p_dev->lock);
269
270 return ret;
271 }
272
sfc_write_mtd(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)273 static int sfc_write_mtd(struct mtd_info *mtd, loff_t to, size_t len,
274 size_t *retlen, const u_char *buf)
275 {
276 int ret;
277 struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
278 struct mtd_oob_ops ops;
279
280 mutex_lock(p_dev->lock);
281 memset(&ops, 0, sizeof(struct mtd_oob_ops));
282 ops.datbuf = (u8 *)buf;
283 ops.len = len;
284 ret = sfc_nand_write_mtd(mtd, to, &ops);
285 *retlen = ops.retlen;
286 mutex_unlock(p_dev->lock);
287
288 return ret;
289 }
290
sfc_read_mtd(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)291 static int sfc_read_mtd(struct mtd_info *mtd, loff_t from, size_t len,
292 size_t *retlen, u_char *buf)
293 {
294 int ret;
295 struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
296 struct mtd_oob_ops ops;
297
298 mutex_lock(p_dev->lock);
299 memset(&ops, 0, sizeof(struct mtd_oob_ops));
300 ops.datbuf = buf;
301 ops.len = len;
302 ret = sfc_nand_read_mtd(mtd, from, &ops);
303 *retlen = ops.retlen;
304 mutex_unlock(p_dev->lock);
305
306 return ret;
307 }
308
sfc_isbad_mtd(struct mtd_info * mtd,loff_t ofs)309 static int sfc_isbad_mtd(struct mtd_info *mtd, loff_t ofs)
310 {
311 int ret;
312 struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
313
314 mutex_lock(p_dev->lock);
315 ret = sfc_nand_isbad_mtd(mtd, ofs);
316 mutex_unlock(p_dev->lock);
317
318 return ret;
319 }
320
sfc_markbad_mtd(struct mtd_info * mtd,loff_t ofs)321 static int sfc_markbad_mtd(struct mtd_info *mtd, loff_t ofs)
322 {
323 u32 ret;
324 struct snand_mtd_dev *p_dev = mtd_to_priv(mtd);
325
326 mutex_lock(p_dev->lock);
327 ret = sfc_nand_markbad_mtd(mtd, ofs);
328 mutex_unlock(p_dev->lock);
329
330 return ret;
331 }
332
333 /*
334 * if not support rk_partition and partition is confirmed, you can define
335 * strust def_nand_part by adding new partition like following example:
336 * {"u-boot", 0x1000 * 512, 0x2000 * 512},
337 * Note.
338 * 1. New partition format {name. size, offset}
339 * 2. Unit:Byte
340 * 3. Last partition 'size' can be set 0xFFFFFFFFF to fully user left space.
341 */
342 static struct mtd_partition def_nand_part[] = {};
343
sfc_nand_mtd_init(struct SFNAND_DEV * p_dev,struct mutex * lock)344 int sfc_nand_mtd_init(struct SFNAND_DEV *p_dev, struct mutex *lock)
345 {
346 int ret, i, part_num = 0;
347 int capacity;
348 struct snand_mtd_dev *nand = kzalloc(sizeof(*nand), GFP_KERNEL);
349
350 if (!nand) {
351 rkflash_print_error("%s %d alloc failed\n", __func__, __LINE__);
352 return -ENOMEM;
353 }
354
355 nand->snand = p_dev;
356 capacity = (1 << p_dev->capacity) << 9;
357 nand->mtd.name = "spi-nand0";
358 nand->mtd.type = MTD_NANDFLASH;
359 nand->mtd.writesize = p_dev->page_size * SFC_NAND_SECTOR_SIZE;
360 nand->mtd.flags = MTD_CAP_NANDFLASH;
361 nand->mtd.size = capacity;
362 nand->mtd._erase = sfc_erase_mtd;
363 nand->mtd._read = sfc_read_mtd;
364 nand->mtd._write = sfc_write_mtd;
365 nand->mtd._block_isbad = sfc_isbad_mtd;
366 nand->mtd._block_markbad = sfc_markbad_mtd;
367 nand->mtd.oobsize = 16 * p_dev->page_size;
368 nand->mtd.bitflip_threshold = 2;
369 nand->mtd.erasesize = p_dev->block_size * SFC_NAND_SECTOR_SIZE;
370 nand->mtd.writebufsize = p_dev->page_size * SFC_NAND_SECTOR_SIZE;
371 nand->mtd.erasesize_shift = ffs(nand->mtd.erasesize) - 1;
372 nand->mtd.erasesize_mask = (1 << nand->mtd.erasesize_shift) - 1;
373 nand->mtd.writesize_shift = ffs(nand->mtd.writesize) - 1;
374 nand->mtd.writesize_mask = (1 << nand->mtd.writesize_shift) - 1;
375 nand->mtd.bitflip_threshold = 1;
376 nand->mtd.priv = nand;
377 nand->lock = lock;
378 nand->dma_buf = kmalloc(SFC_NAND_PAGE_MAX_SIZE, GFP_KERNEL | GFP_DMA);
379 if (!nand->dma_buf) {
380 rkflash_print_error("%s dma_buf alloc failed\n", __func__);
381 ret = -ENOMEM;
382 goto error_out;
383 }
384
385 nand->bbt.option |= NANDDEV_BBT_USE_FLASH;
386 ret = snanddev_bbt_init(nand);
387 if (ret) {
388 rkflash_print_error("snanddev_bbt_init failed, ret= %d\n", ret);
389 return ret;
390 }
391
392 part_num = ARRAY_SIZE(def_nand_part);
393 for (i = 0; i < part_num; i++) {
394 nand_parts[i].name =
395 kstrdup(def_nand_part[i].name,
396 GFP_KERNEL);
397 if (def_nand_part[i].size == 0xFFFFFFFF)
398 def_nand_part[i].size = capacity -
399 def_nand_part[i].offset;
400 nand_parts[i].offset =
401 def_nand_part[i].offset;
402 nand_parts[i].size =
403 def_nand_part[i].size;
404 nand_parts[i].mask_flags = 0;
405 }
406
407 ret = mtd_device_register(&nand->mtd, nand_parts, part_num);
408 if (ret) {
409 pr_err("%s register mtd fail %d\n", __func__, ret);
410 } else {
411 pr_info("%s register mtd succuss\n", __func__);
412
413 return 0;
414 }
415
416 kfree(nand->dma_buf);
417 error_out:
418 kfree(nand);
419
420 return ret;
421 }
422
423 #endif
424