1 /*
2 * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10 #define pr_fmt(fmt) "rk_nand: " fmt
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/list.h>
16 #include <linux/fs.h>
17 #include <linux/blkdev.h>
18 #include <linux/blk-mq.h>
19 #include <linux/blkpg.h>
20 #include <linux/spinlock.h>
21 #include <linux/hdreg.h>
22 #include <linux/init.h>
23 #include <linux/semaphore.h>
24 #include <linux/platform_device.h>
25 #include <linux/interrupt.h>
26 #include <linux/timer.h>
27 #include <linux/delay.h>
28 #include <linux/clk.h>
29 #include <linux/mutex.h>
30 #include <linux/wait.h>
31 #include <linux/sched.h>
32 #include <linux/freezer.h>
33 #include <linux/kthread.h>
34 #include <linux/proc_fs.h>
35 #include <linux/seq_file.h>
36 #include <linux/version.h>
37 #include <linux/soc/rockchip/rk_vendor_storage.h>
38
39 #include "rk_nand_blk.h"
40 #include "rk_ftl_api.h"
41
42 #define PART_READONLY 0x85
43 #define PART_WRITEONLY 0x86
44 #define PART_NO_ACCESS 0x87
45
46 static unsigned long totle_read_data;
47 static unsigned long totle_write_data;
48 static unsigned long totle_read_count;
49 static unsigned long totle_write_count;
50 static int rk_nand_dev_initialised;
51 static unsigned long rk_ftl_gc_do;
52 static DECLARE_WAIT_QUEUE_HEAD(rknand_thread_wait);
53 static unsigned long rk_ftl_gc_jiffies;
54
55 static char *mtd_read_temp_buffer;
56 #define MTD_RW_SECTORS (512)
57
58 #define DISABLE_WRITE _IO('V', 0)
59 #define ENABLE_WRITE _IO('V', 1)
60 #define DISABLE_READ _IO('V', 2)
61 #define ENABLE_READ _IO('V', 3)
rknand_proc_show(struct seq_file * m,void * v)62 static int rknand_proc_show(struct seq_file *m, void *v)
63 {
64 m->count = rknand_proc_ftlread(m->buf);
65 seq_printf(m, "Totle Read %ld KB\n", totle_read_data >> 1);
66 seq_printf(m, "Totle Write %ld KB\n", totle_write_data >> 1);
67 seq_printf(m, "totle_write_count %ld\n", totle_write_count);
68 seq_printf(m, "totle_read_count %ld\n", totle_read_count);
69 return 0;
70 }
71
rknand_proc_open(struct inode * inode,struct file * file)72 static int rknand_proc_open(struct inode *inode, struct file *file)
73 {
74 return single_open(file, rknand_proc_show, PDE_DATA(inode));
75 }
76
77 static const struct proc_ops rknand_proc_fops = {
78 .proc_open = rknand_proc_open,
79 .proc_read = seq_read,
80 .proc_lseek = seq_lseek,
81 .proc_release = single_release,
82 };
83
rknand_create_procfs(void)84 static int rknand_create_procfs(void)
85 {
86 struct proc_dir_entry *ent;
87
88 ent = proc_create_data("rknand", 0444, NULL, &rknand_proc_fops,
89 (void *)0);
90 if (!ent)
91 return -1;
92
93 return 0;
94 }
95
96 static struct mutex g_rk_nand_ops_mutex;
97
rknand_device_lock_init(void)98 static void rknand_device_lock_init(void)
99 {
100 mutex_init(&g_rk_nand_ops_mutex);
101 }
102
rknand_device_lock(void)103 void rknand_device_lock(void)
104 {
105 mutex_lock(&g_rk_nand_ops_mutex);
106 }
107
rknand_device_trylock(void)108 int rknand_device_trylock(void)
109 {
110 return mutex_trylock(&g_rk_nand_ops_mutex);
111 }
112
rknand_device_unlock(void)113 void rknand_device_unlock(void)
114 {
115 mutex_unlock(&g_rk_nand_ops_mutex);
116 }
117
nand_dev_transfer(struct nand_blk_dev * dev,unsigned long start,unsigned long nsector,char * buf,int cmd)118 static int nand_dev_transfer(struct nand_blk_dev *dev,
119 unsigned long start,
120 unsigned long nsector,
121 char *buf,
122 int cmd)
123 {
124 int ret;
125
126 if (dev->disable_access ||
127 ((cmd == WRITE) && dev->readonly) ||
128 ((cmd == READ) && dev->writeonly)) {
129 return BLK_STS_IOERR;
130 }
131
132 start += dev->off_size;
133
134 switch (cmd) {
135 case READ:
136 totle_read_data += nsector;
137 totle_read_count++;
138 ret = FtlRead(0, start, nsector, buf);
139 if (ret)
140 ret = BLK_STS_IOERR;
141 break;
142
143 case WRITE:
144 totle_write_data += nsector;
145 totle_write_count++;
146 ret = FtlWrite(0, start, nsector, buf);
147 if (ret)
148 ret = BLK_STS_IOERR;
149 break;
150
151 default:
152 ret = BLK_STS_IOERR;
153 break;
154 }
155
156 return ret;
157 }
158
req_check_buffer_align(struct request * req,char ** pbuf)159 static int req_check_buffer_align(struct request *req, char **pbuf)
160 {
161 int nr_vec = 0;
162 struct bio_vec bv;
163 struct req_iterator iter;
164 char *buffer;
165 void *firstbuf = 0;
166 char *nextbuffer = 0;
167
168 rq_for_each_segment(bv, req, iter) {
169 /* high mem return 0 and using kernel buffer */
170 if (PageHighMem(bv.bv_page))
171 return 0;
172
173 buffer = page_address(bv.bv_page) + bv.bv_offset;
174 if (!buffer)
175 return 0;
176 if (!firstbuf)
177 firstbuf = buffer;
178 nr_vec++;
179 if (nextbuffer && nextbuffer != buffer)
180 return 0;
181 nextbuffer = buffer + bv.bv_len;
182 }
183 *pbuf = firstbuf;
184 return 1;
185 }
186
do_blktrans_all_request(struct nand_blk_dev * dev,struct request * req)187 static blk_status_t do_blktrans_all_request(struct nand_blk_dev *dev,
188 struct request *req)
189 {
190 unsigned long block, nsect;
191 char *buf = NULL, *page_buf;
192 struct req_iterator rq_iter;
193 struct bio_vec bvec;
194 int ret = BLK_STS_IOERR;
195 unsigned long totle_nsect;
196
197 block = blk_rq_pos(req);
198 nsect = blk_rq_cur_bytes(req) >> 9;
199 totle_nsect = (req->__data_len) >> 9;
200
201 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(req->rq_disk))
202 return BLK_STS_IOERR;
203
204 switch (req_op(req)) {
205 case REQ_OP_DISCARD:
206 if (FtlDiscard(block, nsect))
207 return BLK_STS_IOERR;
208 return BLK_STS_OK;
209 case REQ_OP_READ:
210 buf = mtd_read_temp_buffer;
211 req_check_buffer_align(req, &buf);
212 ret = nand_dev_transfer(dev, block, totle_nsect, buf, REQ_OP_READ);
213 if (buf == mtd_read_temp_buffer) {
214 char *p = buf;
215
216 rq_for_each_segment(bvec, req, rq_iter) {
217 page_buf = kmap_atomic(bvec.bv_page);
218
219 memcpy(page_buf + bvec.bv_offset, p, bvec.bv_len);
220 p += bvec.bv_len;
221 kunmap_atomic(page_buf);
222 }
223 }
224
225 if (ret)
226 return BLK_STS_IOERR;
227 else
228 return BLK_STS_OK;
229 case REQ_OP_WRITE:
230 buf = mtd_read_temp_buffer;
231 req_check_buffer_align(req, &buf);
232
233 if (buf == mtd_read_temp_buffer) {
234 char *p = buf;
235
236 rq_for_each_segment(bvec, req, rq_iter) {
237 page_buf = kmap_atomic(bvec.bv_page);
238 memcpy(p, page_buf + bvec.bv_offset, bvec.bv_len);
239 p += bvec.bv_len;
240 kunmap_atomic(page_buf);
241 }
242 }
243
244 ret = nand_dev_transfer(dev, block, totle_nsect, buf, REQ_OP_WRITE);
245
246 if (ret)
247 return BLK_STS_IOERR;
248 else
249 return BLK_STS_OK;
250
251 default:
252 return BLK_STS_IOERR;
253 }
254 }
255
rk_nand_next_request(struct nand_blk_dev * dev)256 static struct request *rk_nand_next_request(struct nand_blk_dev *dev)
257 {
258 struct nand_blk_ops *nand_ops = dev->nand_ops;
259 struct request *rq;
260
261 rq = list_first_entry_or_null(&nand_ops->rq_list, struct request, queuelist);
262 if (rq) {
263 list_del_init(&rq->queuelist);
264 blk_mq_start_request(rq);
265 return rq;
266 }
267
268 return NULL;
269 }
270
rk_nand_blktrans_work(struct nand_blk_dev * dev)271 static void rk_nand_blktrans_work(struct nand_blk_dev *dev)
272 __releases(&dev->nand_ops->queue_lock)
273 __acquires(&dev->nand_ops->queue_lock)
274 {
275 struct request *req = NULL;
276
277 while (1) {
278 blk_status_t res;
279
280 req = rk_nand_next_request(dev);
281 if (!req)
282 break;
283
284 spin_unlock_irq(&dev->nand_ops->queue_lock);
285
286 rknand_device_lock();
287 res = do_blktrans_all_request(dev, req);
288 rknand_device_unlock();
289
290 if (!blk_update_request(req, res, req->__data_len)) {
291 __blk_mq_end_request(req, res);
292 req = NULL;
293 }
294
295 spin_lock_irq(&dev->nand_ops->queue_lock);
296 }
297 }
298
rk_nand_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)299 static blk_status_t rk_nand_queue_rq(struct blk_mq_hw_ctx *hctx,
300 const struct blk_mq_queue_data *bd)
301 {
302 struct nand_blk_dev *dev;
303
304 dev = hctx->queue->queuedata;
305 if (!dev) {
306 blk_mq_start_request(bd->rq);
307 return BLK_STS_IOERR;
308 }
309
310 rk_ftl_gc_do = 0;
311 spin_lock_irq(&dev->nand_ops->queue_lock);
312 list_add_tail(&bd->rq->queuelist, &dev->nand_ops->rq_list);
313 rk_nand_blktrans_work(dev);
314 spin_unlock_irq(&dev->nand_ops->queue_lock);
315
316 /* wake up gc thread */
317 rk_ftl_gc_do = 1;
318 wake_up(&dev->nand_ops->thread_wq);
319
320 return BLK_STS_OK;
321 }
322
323 static const struct blk_mq_ops rk_nand_mq_ops = {
324 .queue_rq = rk_nand_queue_rq,
325 };
326
nand_gc_thread(void * arg)327 static int nand_gc_thread(void *arg)
328 {
329 struct nand_blk_ops *nand_ops = arg;
330 int ftl_gc_status = 0;
331 int req_empty_times = 0;
332 int gc_done_times = 0;
333
334 rk_ftl_gc_jiffies = HZ / 10;
335 rk_ftl_gc_do = 1;
336
337 while (!nand_ops->quit) {
338 DECLARE_WAITQUEUE(wait, current);
339
340 add_wait_queue(&nand_ops->thread_wq, &wait);
341 set_current_state(TASK_INTERRUPTIBLE);
342
343 if (rk_ftl_gc_do) {
344 /* do garbage collect at idle state */
345 if (rknand_device_trylock()) {
346 ftl_gc_status = rk_ftl_garbage_collect(1, 0);
347 rknand_device_unlock();
348 rk_ftl_gc_jiffies = HZ / 50;
349 if (ftl_gc_status == 0) {
350 gc_done_times++;
351 if (gc_done_times > 10)
352 rk_ftl_gc_jiffies = 10 * HZ;
353 else
354 rk_ftl_gc_jiffies = 1 * HZ;
355 } else {
356 gc_done_times = 0;
357 }
358 } else {
359 rk_ftl_gc_jiffies = 1 * HZ;
360 }
361 req_empty_times++;
362 if (req_empty_times < 10)
363 rk_ftl_gc_jiffies = HZ / 50;
364 /* cache write back after 100ms */
365 if (req_empty_times >= 5 && req_empty_times < 7) {
366 rknand_device_lock();
367 rk_ftl_cache_write_back();
368 rknand_device_unlock();
369 }
370 } else {
371 req_empty_times = 0;
372 rk_ftl_gc_jiffies = 1 * HZ;
373 }
374 wait_event_timeout(nand_ops->thread_wq, nand_ops->quit,
375 rk_ftl_gc_jiffies);
376 remove_wait_queue(&nand_ops->thread_wq, &wait);
377 continue;
378 }
379 pr_info("nand gc quited\n");
380 nand_ops->nand_th_quited = 1;
381 complete_and_exit(&nand_ops->thread_exit, 0);
382 return 0;
383 }
384
rknand_open(struct block_device * bdev,fmode_t mode)385 static int rknand_open(struct block_device *bdev, fmode_t mode)
386 {
387 return 0;
388 }
389
rknand_release(struct gendisk * disk,fmode_t mode)390 static void rknand_release(struct gendisk *disk, fmode_t mode)
391 {
392 };
393
rknand_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)394 static int rknand_ioctl(struct block_device *bdev, fmode_t mode,
395 unsigned int cmd,
396 unsigned long arg)
397 {
398 struct nand_blk_dev *dev = bdev->bd_disk->private_data;
399
400 switch (cmd) {
401 case ENABLE_WRITE:
402 dev->disable_access = 0;
403 dev->readonly = 0;
404 set_disk_ro(dev->blkcore_priv, 0);
405 return 0;
406
407 case DISABLE_WRITE:
408 dev->readonly = 1;
409 set_disk_ro(dev->blkcore_priv, 1);
410 return 0;
411
412 case ENABLE_READ:
413 dev->disable_access = 0;
414 dev->writeonly = 0;
415 return 0;
416
417 case DISABLE_READ:
418 dev->writeonly = 1;
419 return 0;
420 default:
421 return -ENOTTY;
422 }
423 }
424
425 const struct block_device_operations nand_blktrans_ops = {
426 .owner = THIS_MODULE,
427 .open = rknand_open,
428 .release = rknand_release,
429 .ioctl = rknand_ioctl,
430 };
431
432 static struct nand_blk_ops mytr = {
433 .name = "rknand",
434 .major = 31,
435 .minorbits = 0,
436 .owner = THIS_MODULE,
437 };
438
nand_add_dev(struct nand_blk_ops * nand_ops,struct nand_part * part)439 static int nand_add_dev(struct nand_blk_ops *nand_ops, struct nand_part *part)
440 {
441 struct nand_blk_dev *dev;
442 struct gendisk *gd;
443
444 if (part->size == 0)
445 return -1;
446
447 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
448 if (!dev)
449 return -ENOMEM;
450
451 gd = alloc_disk(1 << nand_ops->minorbits);
452 if (!gd) {
453 kfree(dev);
454 return -ENOMEM;
455 }
456 nand_ops->rq->queuedata = dev;
457 dev->nand_ops = nand_ops;
458 dev->size = part->size;
459 dev->off_size = part->offset;
460 dev->devnum = nand_ops->last_dev_index;
461 list_add_tail(&dev->list, &nand_ops->devs);
462 nand_ops->last_dev_index++;
463
464 gd->major = nand_ops->major;
465 gd->first_minor = (dev->devnum) << nand_ops->minorbits;
466
467 gd->fops = &nand_blktrans_ops;
468
469 gd->flags = GENHD_FL_EXT_DEVT;
470 gd->minors = 255;
471 snprintf(gd->disk_name,
472 sizeof(gd->disk_name),
473 "%s%d",
474 nand_ops->name,
475 dev->devnum);
476
477 set_capacity(gd, dev->size);
478
479 gd->private_data = dev;
480 dev->blkcore_priv = gd;
481 gd->queue = nand_ops->rq;
482
483 if (part->type == PART_NO_ACCESS)
484 dev->disable_access = 1;
485
486 if (part->type == PART_READONLY)
487 dev->readonly = 1;
488
489 if (part->type == PART_WRITEONLY)
490 dev->writeonly = 1;
491
492 if (dev->readonly)
493 set_disk_ro(gd, 1);
494
495 device_add_disk(g_nand_device, gd, NULL);
496
497 return 0;
498 }
499
nand_remove_dev(struct nand_blk_dev * dev)500 static int nand_remove_dev(struct nand_blk_dev *dev)
501 {
502 struct gendisk *gd;
503
504 gd = dev->blkcore_priv;
505 list_del(&dev->list);
506 gd->queue = NULL;
507 del_gendisk(gd);
508 put_disk(gd);
509 kfree(dev);
510
511 return 0;
512 }
513
nand_blk_register(struct nand_blk_ops * nand_ops)514 static int nand_blk_register(struct nand_blk_ops *nand_ops)
515 {
516 struct nand_part part;
517 int ret;
518
519 rk_nand_schedule_enable_config(1);
520 nand_ops->quit = 0;
521 nand_ops->nand_th_quited = 0;
522
523 ret = register_blkdev(nand_ops->major, nand_ops->name);
524 if (ret)
525 return ret;
526
527 mtd_read_temp_buffer = kmalloc(MTD_RW_SECTORS * 512, GFP_KERNEL | GFP_DMA);
528 if (!mtd_read_temp_buffer) {
529 ret = -ENOMEM;
530 goto mtd_buffer_error;
531 }
532
533 init_completion(&nand_ops->thread_exit);
534 init_waitqueue_head(&nand_ops->thread_wq);
535 rknand_device_lock_init();
536
537 /* Create the request queue */
538 spin_lock_init(&nand_ops->queue_lock);
539 INIT_LIST_HEAD(&nand_ops->rq_list);
540
541 nand_ops->tag_set = kzalloc(sizeof(*nand_ops->tag_set), GFP_KERNEL);
542 if (!nand_ops->tag_set) {
543 ret = -ENOMEM;
544 goto tag_set_error;
545 }
546
547 nand_ops->rq = blk_mq_init_sq_queue(nand_ops->tag_set, &rk_nand_mq_ops, 1,
548 BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
549 if (IS_ERR(nand_ops->rq)) {
550 ret = PTR_ERR(nand_ops->rq);
551 nand_ops->rq = NULL;
552 goto rq_init_error;
553 }
554
555 blk_queue_max_hw_sectors(nand_ops->rq, MTD_RW_SECTORS);
556 blk_queue_max_segments(nand_ops->rq, MTD_RW_SECTORS);
557
558 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nand_ops->rq);
559 blk_queue_max_discard_sectors(nand_ops->rq, UINT_MAX >> 9);
560 /* discard_granularity config to one nand page size 32KB*/
561 nand_ops->rq->limits.discard_granularity = 64 << 9;
562
563 INIT_LIST_HEAD(&nand_ops->devs);
564 kthread_run(nand_gc_thread, (void *)nand_ops, "rknand_gc");
565
566 nand_ops->last_dev_index = 0;
567 part.offset = 0;
568 part.size = rk_ftl_get_capacity();
569 part.type = 0;
570 part.name[0] = 0;
571 nand_add_dev(nand_ops, &part);
572
573 rknand_create_procfs();
574 rk_ftl_storage_sys_init();
575
576 ret = rk_ftl_vendor_storage_init();
577 if (!ret) {
578 rk_vendor_register(rk_ftl_vendor_read, rk_ftl_vendor_write);
579 rknand_vendor_storage_init();
580 pr_info("rknand vendor storage init ok !\n");
581 } else {
582 pr_info("rknand vendor storage init failed !\n");
583 }
584
585 return 0;
586
587 rq_init_error:
588 kfree(nand_ops->tag_set);
589 tag_set_error:
590 kfree(mtd_read_temp_buffer);
591 mtd_read_temp_buffer = NULL;
592 mtd_buffer_error:
593 unregister_blkdev(nand_ops->major, nand_ops->name);
594
595 return ret;
596 }
597
nand_blk_unregister(struct nand_blk_ops * nand_ops)598 static void nand_blk_unregister(struct nand_blk_ops *nand_ops)
599 {
600 struct list_head *this, *next;
601
602 if (!rk_nand_dev_initialised)
603 return;
604 nand_ops->quit = 1;
605 wake_up(&nand_ops->thread_wq);
606 wait_for_completion(&nand_ops->thread_exit);
607 list_for_each_safe(this, next, &nand_ops->devs) {
608 struct nand_blk_dev *dev
609 = list_entry(this, struct nand_blk_dev, list);
610
611 nand_remove_dev(dev);
612 }
613 blk_cleanup_queue(nand_ops->rq);
614 unregister_blkdev(nand_ops->major, nand_ops->name);
615 }
616
rknand_dev_flush(void)617 void rknand_dev_flush(void)
618 {
619 if (!rk_nand_dev_initialised)
620 return;
621 rknand_device_lock();
622 rk_ftl_cache_write_back();
623 rknand_device_unlock();
624 pr_info("Nand flash flush ok!\n");
625 }
626
rknand_dev_init(void)627 int __init rknand_dev_init(void)
628 {
629 int ret;
630 void __iomem *nandc0;
631 void __iomem *nandc1;
632
633 rknand_get_reg_addr((unsigned long *)&nandc0, (unsigned long *)&nandc1);
634 if (!nandc0)
635 return -1;
636
637 ret = rk_ftl_init();
638 if (ret) {
639 pr_err("rk_ftl_init fail\n");
640 return -1;
641 }
642
643 ret = nand_blk_register(&mytr);
644 if (ret) {
645 pr_err("nand_blk_register fail\n");
646 return -1;
647 }
648
649 rk_nand_dev_initialised = 1;
650 return ret;
651 }
652
rknand_dev_exit(void)653 int rknand_dev_exit(void)
654 {
655 if (!rk_nand_dev_initialised)
656 return -1;
657 rk_nand_dev_initialised = 0;
658 if (rknand_device_trylock()) {
659 rk_ftl_cache_write_back();
660 rknand_device_unlock();
661 }
662 nand_blk_unregister(&mytr);
663 rk_ftl_de_init();
664 pr_info("nand_blk_dev_exit:OK\n");
665 return 0;
666 }
667
rknand_dev_suspend(void)668 void rknand_dev_suspend(void)
669 {
670 if (!rk_nand_dev_initialised)
671 return;
672 pr_info("rk_nand_suspend\n");
673 rk_nand_schedule_enable_config(0);
674 rknand_device_lock();
675 rk_nand_suspend();
676 }
677
rknand_dev_resume(void)678 void rknand_dev_resume(void)
679 {
680 if (!rk_nand_dev_initialised)
681 return;
682 pr_info("rk_nand_resume\n");
683 rk_nand_resume();
684 rknand_device_unlock();
685 rk_nand_schedule_enable_config(1);
686 }
687
rknand_dev_shutdown(void)688 void rknand_dev_shutdown(void)
689 {
690 pr_info("rknand_shutdown...\n");
691 if (!rk_nand_dev_initialised)
692 return;
693 if (mytr.quit == 0) {
694 mytr.quit = 1;
695 wake_up(&mytr.thread_wq);
696 wait_for_completion(&mytr.thread_exit);
697 rk_ftl_de_init();
698 }
699 pr_info("rknand_shutdown:OK\n");
700 }
701