xref: /OK3568_Linux_fs/kernel/drivers/rk_nand/rk_nand_blk.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
5*4882a593Smuzhiyun  * it under the terms of the GNU General Public License as published by
6*4882a593Smuzhiyun  * the Free Software Foundation; either version 2 of the License, or
7*4882a593Smuzhiyun  * (at your option) any later version.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #define pr_fmt(fmt) "rk_nand: " fmt
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/list.h>
16*4882a593Smuzhiyun #include <linux/fs.h>
17*4882a593Smuzhiyun #include <linux/blkdev.h>
18*4882a593Smuzhiyun #include <linux/blk-mq.h>
19*4882a593Smuzhiyun #include <linux/blkpg.h>
20*4882a593Smuzhiyun #include <linux/spinlock.h>
21*4882a593Smuzhiyun #include <linux/hdreg.h>
22*4882a593Smuzhiyun #include <linux/init.h>
23*4882a593Smuzhiyun #include <linux/semaphore.h>
24*4882a593Smuzhiyun #include <linux/platform_device.h>
25*4882a593Smuzhiyun #include <linux/interrupt.h>
26*4882a593Smuzhiyun #include <linux/timer.h>
27*4882a593Smuzhiyun #include <linux/delay.h>
28*4882a593Smuzhiyun #include <linux/clk.h>
29*4882a593Smuzhiyun #include <linux/mutex.h>
30*4882a593Smuzhiyun #include <linux/wait.h>
31*4882a593Smuzhiyun #include <linux/sched.h>
32*4882a593Smuzhiyun #include <linux/freezer.h>
33*4882a593Smuzhiyun #include <linux/kthread.h>
34*4882a593Smuzhiyun #include <linux/proc_fs.h>
35*4882a593Smuzhiyun #include <linux/seq_file.h>
36*4882a593Smuzhiyun #include <linux/version.h>
37*4882a593Smuzhiyun #include <linux/soc/rockchip/rk_vendor_storage.h>
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #include "rk_nand_blk.h"
40*4882a593Smuzhiyun #include "rk_ftl_api.h"
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define PART_READONLY 0x85
43*4882a593Smuzhiyun #define PART_WRITEONLY 0x86
44*4882a593Smuzhiyun #define PART_NO_ACCESS 0x87
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun static unsigned long totle_read_data;
47*4882a593Smuzhiyun static unsigned long totle_write_data;
48*4882a593Smuzhiyun static unsigned long totle_read_count;
49*4882a593Smuzhiyun static unsigned long totle_write_count;
50*4882a593Smuzhiyun static int rk_nand_dev_initialised;
51*4882a593Smuzhiyun static unsigned long rk_ftl_gc_do;
52*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(rknand_thread_wait);
53*4882a593Smuzhiyun static unsigned long rk_ftl_gc_jiffies;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun static char *mtd_read_temp_buffer;
56*4882a593Smuzhiyun #define MTD_RW_SECTORS (512)
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #define DISABLE_WRITE _IO('V', 0)
59*4882a593Smuzhiyun #define ENABLE_WRITE _IO('V', 1)
60*4882a593Smuzhiyun #define DISABLE_READ _IO('V', 2)
61*4882a593Smuzhiyun #define ENABLE_READ _IO('V', 3)
rknand_proc_show(struct seq_file * m,void * v)62*4882a593Smuzhiyun static int rknand_proc_show(struct seq_file *m, void *v)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	m->count = rknand_proc_ftlread(m->buf);
65*4882a593Smuzhiyun 	seq_printf(m, "Totle Read %ld KB\n", totle_read_data >> 1);
66*4882a593Smuzhiyun 	seq_printf(m, "Totle Write %ld KB\n", totle_write_data >> 1);
67*4882a593Smuzhiyun 	seq_printf(m, "totle_write_count %ld\n", totle_write_count);
68*4882a593Smuzhiyun 	seq_printf(m, "totle_read_count %ld\n", totle_read_count);
69*4882a593Smuzhiyun 	return 0;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
rknand_proc_open(struct inode * inode,struct file * file)72*4882a593Smuzhiyun static int rknand_proc_open(struct inode *inode, struct file *file)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	return single_open(file, rknand_proc_show, PDE_DATA(inode));
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun static const struct proc_ops rknand_proc_fops = {
78*4882a593Smuzhiyun 	.proc_open	= rknand_proc_open,
79*4882a593Smuzhiyun 	.proc_read	= seq_read,
80*4882a593Smuzhiyun 	.proc_lseek	= seq_lseek,
81*4882a593Smuzhiyun 	.proc_release	= single_release,
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun 
rknand_create_procfs(void)84*4882a593Smuzhiyun static int rknand_create_procfs(void)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	struct proc_dir_entry *ent;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	ent = proc_create_data("rknand", 0444, NULL, &rknand_proc_fops,
89*4882a593Smuzhiyun 			       (void *)0);
90*4882a593Smuzhiyun 	if (!ent)
91*4882a593Smuzhiyun 		return -1;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	return 0;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun static struct mutex g_rk_nand_ops_mutex;
97*4882a593Smuzhiyun 
rknand_device_lock_init(void)98*4882a593Smuzhiyun static void rknand_device_lock_init(void)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	mutex_init(&g_rk_nand_ops_mutex);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
rknand_device_lock(void)103*4882a593Smuzhiyun void rknand_device_lock(void)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	mutex_lock(&g_rk_nand_ops_mutex);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
rknand_device_trylock(void)108*4882a593Smuzhiyun int rknand_device_trylock(void)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	return mutex_trylock(&g_rk_nand_ops_mutex);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
rknand_device_unlock(void)113*4882a593Smuzhiyun void rknand_device_unlock(void)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	mutex_unlock(&g_rk_nand_ops_mutex);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
nand_dev_transfer(struct nand_blk_dev * dev,unsigned long start,unsigned long nsector,char * buf,int cmd)118*4882a593Smuzhiyun static int nand_dev_transfer(struct nand_blk_dev *dev,
119*4882a593Smuzhiyun 			     unsigned long start,
120*4882a593Smuzhiyun 			     unsigned long nsector,
121*4882a593Smuzhiyun 			     char *buf,
122*4882a593Smuzhiyun 			     int cmd)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	int ret;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	if (dev->disable_access ||
127*4882a593Smuzhiyun 	    ((cmd == WRITE) && dev->readonly) ||
128*4882a593Smuzhiyun 	    ((cmd == READ) && dev->writeonly)) {
129*4882a593Smuzhiyun 		return BLK_STS_IOERR;
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	start += dev->off_size;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	switch (cmd) {
135*4882a593Smuzhiyun 	case READ:
136*4882a593Smuzhiyun 		totle_read_data += nsector;
137*4882a593Smuzhiyun 		totle_read_count++;
138*4882a593Smuzhiyun 		ret = FtlRead(0, start, nsector, buf);
139*4882a593Smuzhiyun 		if (ret)
140*4882a593Smuzhiyun 			ret = BLK_STS_IOERR;
141*4882a593Smuzhiyun 		break;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	case WRITE:
144*4882a593Smuzhiyun 		totle_write_data += nsector;
145*4882a593Smuzhiyun 		totle_write_count++;
146*4882a593Smuzhiyun 		ret = FtlWrite(0, start, nsector, buf);
147*4882a593Smuzhiyun 		if (ret)
148*4882a593Smuzhiyun 			ret = BLK_STS_IOERR;
149*4882a593Smuzhiyun 		break;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	default:
152*4882a593Smuzhiyun 		ret = BLK_STS_IOERR;
153*4882a593Smuzhiyun 		break;
154*4882a593Smuzhiyun 	}
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	return ret;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
req_check_buffer_align(struct request * req,char ** pbuf)159*4882a593Smuzhiyun static int req_check_buffer_align(struct request *req, char **pbuf)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	int nr_vec = 0;
162*4882a593Smuzhiyun 	struct bio_vec bv;
163*4882a593Smuzhiyun 	struct req_iterator iter;
164*4882a593Smuzhiyun 	char *buffer;
165*4882a593Smuzhiyun 	void *firstbuf = 0;
166*4882a593Smuzhiyun 	char *nextbuffer = 0;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	rq_for_each_segment(bv, req, iter) {
169*4882a593Smuzhiyun 		/* high mem return 0 and using kernel buffer */
170*4882a593Smuzhiyun 		if (PageHighMem(bv.bv_page))
171*4882a593Smuzhiyun 			return 0;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 		buffer = page_address(bv.bv_page) + bv.bv_offset;
174*4882a593Smuzhiyun 		if (!buffer)
175*4882a593Smuzhiyun 			return 0;
176*4882a593Smuzhiyun 		if (!firstbuf)
177*4882a593Smuzhiyun 			firstbuf = buffer;
178*4882a593Smuzhiyun 		nr_vec++;
179*4882a593Smuzhiyun 		if (nextbuffer && nextbuffer != buffer)
180*4882a593Smuzhiyun 			return 0;
181*4882a593Smuzhiyun 		nextbuffer = buffer + bv.bv_len;
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 	*pbuf = firstbuf;
184*4882a593Smuzhiyun 	return 1;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
do_blktrans_all_request(struct nand_blk_dev * dev,struct request * req)187*4882a593Smuzhiyun static blk_status_t do_blktrans_all_request(struct nand_blk_dev *dev,
188*4882a593Smuzhiyun 					    struct request *req)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	unsigned long block, nsect;
191*4882a593Smuzhiyun 	char *buf = NULL, *page_buf;
192*4882a593Smuzhiyun 	struct req_iterator rq_iter;
193*4882a593Smuzhiyun 	struct bio_vec bvec;
194*4882a593Smuzhiyun 	int ret = BLK_STS_IOERR;
195*4882a593Smuzhiyun 	unsigned long totle_nsect;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	block = blk_rq_pos(req);
198*4882a593Smuzhiyun 	nsect = blk_rq_cur_bytes(req) >> 9;
199*4882a593Smuzhiyun 	totle_nsect = (req->__data_len) >> 9;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(req->rq_disk))
202*4882a593Smuzhiyun 		return BLK_STS_IOERR;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	switch (req_op(req)) {
205*4882a593Smuzhiyun 	case REQ_OP_DISCARD:
206*4882a593Smuzhiyun 		if (FtlDiscard(block, nsect))
207*4882a593Smuzhiyun 			return BLK_STS_IOERR;
208*4882a593Smuzhiyun 		return BLK_STS_OK;
209*4882a593Smuzhiyun 	case REQ_OP_READ:
210*4882a593Smuzhiyun 		buf = mtd_read_temp_buffer;
211*4882a593Smuzhiyun 		req_check_buffer_align(req, &buf);
212*4882a593Smuzhiyun 		ret = nand_dev_transfer(dev, block, totle_nsect, buf, REQ_OP_READ);
213*4882a593Smuzhiyun 		if (buf == mtd_read_temp_buffer) {
214*4882a593Smuzhiyun 			char *p = buf;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 			rq_for_each_segment(bvec, req, rq_iter) {
217*4882a593Smuzhiyun 				page_buf = kmap_atomic(bvec.bv_page);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 				memcpy(page_buf + bvec.bv_offset, p, bvec.bv_len);
220*4882a593Smuzhiyun 				p += bvec.bv_len;
221*4882a593Smuzhiyun 				kunmap_atomic(page_buf);
222*4882a593Smuzhiyun 			}
223*4882a593Smuzhiyun 		}
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 		if (ret)
226*4882a593Smuzhiyun 			return BLK_STS_IOERR;
227*4882a593Smuzhiyun 		else
228*4882a593Smuzhiyun 			return BLK_STS_OK;
229*4882a593Smuzhiyun 	case REQ_OP_WRITE:
230*4882a593Smuzhiyun 		buf = mtd_read_temp_buffer;
231*4882a593Smuzhiyun 		req_check_buffer_align(req, &buf);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 		if (buf == mtd_read_temp_buffer) {
234*4882a593Smuzhiyun 			char *p = buf;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 			rq_for_each_segment(bvec, req, rq_iter) {
237*4882a593Smuzhiyun 				page_buf = kmap_atomic(bvec.bv_page);
238*4882a593Smuzhiyun 				memcpy(p, page_buf + bvec.bv_offset, bvec.bv_len);
239*4882a593Smuzhiyun 				p += bvec.bv_len;
240*4882a593Smuzhiyun 				kunmap_atomic(page_buf);
241*4882a593Smuzhiyun 			}
242*4882a593Smuzhiyun 		}
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 		ret = nand_dev_transfer(dev, block, totle_nsect, buf, REQ_OP_WRITE);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 		if (ret)
247*4882a593Smuzhiyun 			return BLK_STS_IOERR;
248*4882a593Smuzhiyun 		else
249*4882a593Smuzhiyun 			return BLK_STS_OK;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	default:
252*4882a593Smuzhiyun 		return BLK_STS_IOERR;
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
rk_nand_next_request(struct nand_blk_dev * dev)256*4882a593Smuzhiyun static struct request *rk_nand_next_request(struct nand_blk_dev *dev)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	struct nand_blk_ops *nand_ops = dev->nand_ops;
259*4882a593Smuzhiyun 	struct request *rq;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	rq = list_first_entry_or_null(&nand_ops->rq_list, struct request, queuelist);
262*4882a593Smuzhiyun 	if (rq) {
263*4882a593Smuzhiyun 		list_del_init(&rq->queuelist);
264*4882a593Smuzhiyun 		blk_mq_start_request(rq);
265*4882a593Smuzhiyun 		return rq;
266*4882a593Smuzhiyun 	}
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return NULL;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
rk_nand_blktrans_work(struct nand_blk_dev * dev)271*4882a593Smuzhiyun static void rk_nand_blktrans_work(struct nand_blk_dev *dev)
272*4882a593Smuzhiyun 	__releases(&dev->nand_ops->queue_lock)
273*4882a593Smuzhiyun 	__acquires(&dev->nand_ops->queue_lock)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	struct request *req = NULL;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	while (1) {
278*4882a593Smuzhiyun 		blk_status_t res;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 		req = rk_nand_next_request(dev);
281*4882a593Smuzhiyun 		if (!req)
282*4882a593Smuzhiyun 			break;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 		spin_unlock_irq(&dev->nand_ops->queue_lock);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 		rknand_device_lock();
287*4882a593Smuzhiyun 		res = do_blktrans_all_request(dev, req);
288*4882a593Smuzhiyun 		rknand_device_unlock();
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 		if (!blk_update_request(req, res, req->__data_len)) {
291*4882a593Smuzhiyun 			__blk_mq_end_request(req, res);
292*4882a593Smuzhiyun 			req = NULL;
293*4882a593Smuzhiyun 		}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 		spin_lock_irq(&dev->nand_ops->queue_lock);
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
rk_nand_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)299*4882a593Smuzhiyun static blk_status_t rk_nand_queue_rq(struct blk_mq_hw_ctx *hctx,
300*4882a593Smuzhiyun 				     const struct blk_mq_queue_data *bd)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	struct nand_blk_dev *dev;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	dev = hctx->queue->queuedata;
305*4882a593Smuzhiyun 	if (!dev) {
306*4882a593Smuzhiyun 		blk_mq_start_request(bd->rq);
307*4882a593Smuzhiyun 		return BLK_STS_IOERR;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	rk_ftl_gc_do = 0;
311*4882a593Smuzhiyun 	spin_lock_irq(&dev->nand_ops->queue_lock);
312*4882a593Smuzhiyun 	list_add_tail(&bd->rq->queuelist, &dev->nand_ops->rq_list);
313*4882a593Smuzhiyun 	rk_nand_blktrans_work(dev);
314*4882a593Smuzhiyun 	spin_unlock_irq(&dev->nand_ops->queue_lock);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/* wake up gc thread */
317*4882a593Smuzhiyun 	rk_ftl_gc_do = 1;
318*4882a593Smuzhiyun 	wake_up(&dev->nand_ops->thread_wq);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	return BLK_STS_OK;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun static const struct blk_mq_ops rk_nand_mq_ops = {
324*4882a593Smuzhiyun 	.queue_rq	= rk_nand_queue_rq,
325*4882a593Smuzhiyun };
326*4882a593Smuzhiyun 
nand_gc_thread(void * arg)327*4882a593Smuzhiyun static int nand_gc_thread(void *arg)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun 	struct nand_blk_ops *nand_ops = arg;
330*4882a593Smuzhiyun 	int ftl_gc_status = 0;
331*4882a593Smuzhiyun 	int req_empty_times = 0;
332*4882a593Smuzhiyun 	int gc_done_times = 0;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	rk_ftl_gc_jiffies = HZ / 10;
335*4882a593Smuzhiyun 	rk_ftl_gc_do = 1;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	while (!nand_ops->quit) {
338*4882a593Smuzhiyun 		DECLARE_WAITQUEUE(wait, current);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 		add_wait_queue(&nand_ops->thread_wq, &wait);
341*4882a593Smuzhiyun 		set_current_state(TASK_INTERRUPTIBLE);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 		if (rk_ftl_gc_do) {
344*4882a593Smuzhiyun 			 /* do garbage collect at idle state */
345*4882a593Smuzhiyun 			if (rknand_device_trylock()) {
346*4882a593Smuzhiyun 				ftl_gc_status = rk_ftl_garbage_collect(1, 0);
347*4882a593Smuzhiyun 				rknand_device_unlock();
348*4882a593Smuzhiyun 				rk_ftl_gc_jiffies = HZ / 50;
349*4882a593Smuzhiyun 				if (ftl_gc_status == 0) {
350*4882a593Smuzhiyun 					gc_done_times++;
351*4882a593Smuzhiyun 					if (gc_done_times > 10)
352*4882a593Smuzhiyun 						rk_ftl_gc_jiffies = 10 * HZ;
353*4882a593Smuzhiyun 					else
354*4882a593Smuzhiyun 						rk_ftl_gc_jiffies = 1 * HZ;
355*4882a593Smuzhiyun 				} else {
356*4882a593Smuzhiyun 					gc_done_times = 0;
357*4882a593Smuzhiyun 				}
358*4882a593Smuzhiyun 			} else {
359*4882a593Smuzhiyun 				rk_ftl_gc_jiffies = 1 * HZ;
360*4882a593Smuzhiyun 			}
361*4882a593Smuzhiyun 			req_empty_times++;
362*4882a593Smuzhiyun 			if (req_empty_times < 10)
363*4882a593Smuzhiyun 				rk_ftl_gc_jiffies = HZ / 50;
364*4882a593Smuzhiyun 			/* cache write back after 100ms */
365*4882a593Smuzhiyun 			if (req_empty_times >= 5 && req_empty_times < 7) {
366*4882a593Smuzhiyun 				rknand_device_lock();
367*4882a593Smuzhiyun 				rk_ftl_cache_write_back();
368*4882a593Smuzhiyun 				rknand_device_unlock();
369*4882a593Smuzhiyun 			}
370*4882a593Smuzhiyun 		} else {
371*4882a593Smuzhiyun 			req_empty_times = 0;
372*4882a593Smuzhiyun 			rk_ftl_gc_jiffies = 1 * HZ;
373*4882a593Smuzhiyun 		}
374*4882a593Smuzhiyun 		wait_event_timeout(nand_ops->thread_wq, nand_ops->quit,
375*4882a593Smuzhiyun 				   rk_ftl_gc_jiffies);
376*4882a593Smuzhiyun 		remove_wait_queue(&nand_ops->thread_wq, &wait);
377*4882a593Smuzhiyun 		continue;
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 	pr_info("nand gc quited\n");
380*4882a593Smuzhiyun 	nand_ops->nand_th_quited = 1;
381*4882a593Smuzhiyun 	complete_and_exit(&nand_ops->thread_exit, 0);
382*4882a593Smuzhiyun 	return 0;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
rknand_open(struct block_device * bdev,fmode_t mode)385*4882a593Smuzhiyun static int rknand_open(struct block_device *bdev, fmode_t mode)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	return 0;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun 
rknand_release(struct gendisk * disk,fmode_t mode)390*4882a593Smuzhiyun static void rknand_release(struct gendisk *disk, fmode_t mode)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun };
393*4882a593Smuzhiyun 
rknand_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)394*4882a593Smuzhiyun static int rknand_ioctl(struct block_device *bdev, fmode_t mode,
395*4882a593Smuzhiyun 			unsigned int cmd,
396*4882a593Smuzhiyun 			unsigned long arg)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	struct nand_blk_dev *dev = bdev->bd_disk->private_data;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	switch (cmd) {
401*4882a593Smuzhiyun 	case ENABLE_WRITE:
402*4882a593Smuzhiyun 		dev->disable_access = 0;
403*4882a593Smuzhiyun 		dev->readonly = 0;
404*4882a593Smuzhiyun 		set_disk_ro(dev->blkcore_priv, 0);
405*4882a593Smuzhiyun 		return 0;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	case DISABLE_WRITE:
408*4882a593Smuzhiyun 		dev->readonly = 1;
409*4882a593Smuzhiyun 		set_disk_ro(dev->blkcore_priv, 1);
410*4882a593Smuzhiyun 		return 0;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	case ENABLE_READ:
413*4882a593Smuzhiyun 		dev->disable_access = 0;
414*4882a593Smuzhiyun 		dev->writeonly = 0;
415*4882a593Smuzhiyun 		return 0;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	case DISABLE_READ:
418*4882a593Smuzhiyun 		dev->writeonly = 1;
419*4882a593Smuzhiyun 		return 0;
420*4882a593Smuzhiyun 	default:
421*4882a593Smuzhiyun 		return -ENOTTY;
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun const struct block_device_operations nand_blktrans_ops = {
426*4882a593Smuzhiyun 	.owner = THIS_MODULE,
427*4882a593Smuzhiyun 	.open = rknand_open,
428*4882a593Smuzhiyun 	.release = rknand_release,
429*4882a593Smuzhiyun 	.ioctl = rknand_ioctl,
430*4882a593Smuzhiyun };
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun static struct nand_blk_ops mytr = {
433*4882a593Smuzhiyun 	.name =  "rknand",
434*4882a593Smuzhiyun 	.major = 31,
435*4882a593Smuzhiyun 	.minorbits = 0,
436*4882a593Smuzhiyun 	.owner = THIS_MODULE,
437*4882a593Smuzhiyun };
438*4882a593Smuzhiyun 
nand_add_dev(struct nand_blk_ops * nand_ops,struct nand_part * part)439*4882a593Smuzhiyun static int nand_add_dev(struct nand_blk_ops *nand_ops, struct nand_part *part)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	struct nand_blk_dev *dev;
442*4882a593Smuzhiyun 	struct gendisk *gd;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	if (part->size == 0)
445*4882a593Smuzhiyun 		return -1;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
448*4882a593Smuzhiyun 	if (!dev)
449*4882a593Smuzhiyun 		return -ENOMEM;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	gd = alloc_disk(1 << nand_ops->minorbits);
452*4882a593Smuzhiyun 	if (!gd) {
453*4882a593Smuzhiyun 		kfree(dev);
454*4882a593Smuzhiyun 		return -ENOMEM;
455*4882a593Smuzhiyun 	}
456*4882a593Smuzhiyun 	nand_ops->rq->queuedata = dev;
457*4882a593Smuzhiyun 	dev->nand_ops = nand_ops;
458*4882a593Smuzhiyun 	dev->size = part->size;
459*4882a593Smuzhiyun 	dev->off_size = part->offset;
460*4882a593Smuzhiyun 	dev->devnum = nand_ops->last_dev_index;
461*4882a593Smuzhiyun 	list_add_tail(&dev->list, &nand_ops->devs);
462*4882a593Smuzhiyun 	nand_ops->last_dev_index++;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	gd->major = nand_ops->major;
465*4882a593Smuzhiyun 	gd->first_minor = (dev->devnum) << nand_ops->minorbits;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	gd->fops = &nand_blktrans_ops;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	gd->flags = GENHD_FL_EXT_DEVT;
470*4882a593Smuzhiyun 	gd->minors = 255;
471*4882a593Smuzhiyun 	snprintf(gd->disk_name,
472*4882a593Smuzhiyun 		 sizeof(gd->disk_name),
473*4882a593Smuzhiyun 		 "%s%d",
474*4882a593Smuzhiyun 		 nand_ops->name,
475*4882a593Smuzhiyun 		 dev->devnum);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	set_capacity(gd, dev->size);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	gd->private_data = dev;
480*4882a593Smuzhiyun 	dev->blkcore_priv = gd;
481*4882a593Smuzhiyun 	gd->queue = nand_ops->rq;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	if (part->type == PART_NO_ACCESS)
484*4882a593Smuzhiyun 		dev->disable_access = 1;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	if (part->type == PART_READONLY)
487*4882a593Smuzhiyun 		dev->readonly = 1;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	if (part->type == PART_WRITEONLY)
490*4882a593Smuzhiyun 		dev->writeonly = 1;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	if (dev->readonly)
493*4882a593Smuzhiyun 		set_disk_ro(gd, 1);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	device_add_disk(g_nand_device, gd, NULL);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	return 0;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
nand_remove_dev(struct nand_blk_dev * dev)500*4882a593Smuzhiyun static int nand_remove_dev(struct nand_blk_dev *dev)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun 	struct gendisk *gd;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	gd = dev->blkcore_priv;
505*4882a593Smuzhiyun 	list_del(&dev->list);
506*4882a593Smuzhiyun 	gd->queue = NULL;
507*4882a593Smuzhiyun 	del_gendisk(gd);
508*4882a593Smuzhiyun 	put_disk(gd);
509*4882a593Smuzhiyun 	kfree(dev);
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	return 0;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
nand_blk_register(struct nand_blk_ops * nand_ops)514*4882a593Smuzhiyun static int nand_blk_register(struct nand_blk_ops *nand_ops)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun 	struct nand_part part;
517*4882a593Smuzhiyun 	int ret;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	rk_nand_schedule_enable_config(1);
520*4882a593Smuzhiyun 	nand_ops->quit = 0;
521*4882a593Smuzhiyun 	nand_ops->nand_th_quited = 0;
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	ret = register_blkdev(nand_ops->major, nand_ops->name);
524*4882a593Smuzhiyun 	if (ret)
525*4882a593Smuzhiyun 		return ret;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	mtd_read_temp_buffer = kmalloc(MTD_RW_SECTORS * 512, GFP_KERNEL | GFP_DMA);
528*4882a593Smuzhiyun 	if (!mtd_read_temp_buffer) {
529*4882a593Smuzhiyun 		ret = -ENOMEM;
530*4882a593Smuzhiyun 		goto mtd_buffer_error;
531*4882a593Smuzhiyun 	}
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	init_completion(&nand_ops->thread_exit);
534*4882a593Smuzhiyun 	init_waitqueue_head(&nand_ops->thread_wq);
535*4882a593Smuzhiyun 	rknand_device_lock_init();
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	/* Create the request queue */
538*4882a593Smuzhiyun 	spin_lock_init(&nand_ops->queue_lock);
539*4882a593Smuzhiyun 	INIT_LIST_HEAD(&nand_ops->rq_list);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	nand_ops->tag_set = kzalloc(sizeof(*nand_ops->tag_set), GFP_KERNEL);
542*4882a593Smuzhiyun 	if (!nand_ops->tag_set) {
543*4882a593Smuzhiyun 		ret = -ENOMEM;
544*4882a593Smuzhiyun 		goto tag_set_error;
545*4882a593Smuzhiyun 	}
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	nand_ops->rq = blk_mq_init_sq_queue(nand_ops->tag_set, &rk_nand_mq_ops, 1,
548*4882a593Smuzhiyun 					   BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
549*4882a593Smuzhiyun 	if (IS_ERR(nand_ops->rq)) {
550*4882a593Smuzhiyun 		ret = PTR_ERR(nand_ops->rq);
551*4882a593Smuzhiyun 		nand_ops->rq = NULL;
552*4882a593Smuzhiyun 		goto rq_init_error;
553*4882a593Smuzhiyun 	}
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	blk_queue_max_hw_sectors(nand_ops->rq, MTD_RW_SECTORS);
556*4882a593Smuzhiyun 	blk_queue_max_segments(nand_ops->rq, MTD_RW_SECTORS);
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	blk_queue_flag_set(QUEUE_FLAG_DISCARD, nand_ops->rq);
559*4882a593Smuzhiyun 	blk_queue_max_discard_sectors(nand_ops->rq, UINT_MAX >> 9);
560*4882a593Smuzhiyun 	/* discard_granularity config to one nand page size 32KB*/
561*4882a593Smuzhiyun 	nand_ops->rq->limits.discard_granularity = 64 << 9;
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	INIT_LIST_HEAD(&nand_ops->devs);
564*4882a593Smuzhiyun 	kthread_run(nand_gc_thread, (void *)nand_ops, "rknand_gc");
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	nand_ops->last_dev_index = 0;
567*4882a593Smuzhiyun 	part.offset = 0;
568*4882a593Smuzhiyun 	part.size = rk_ftl_get_capacity();
569*4882a593Smuzhiyun 	part.type = 0;
570*4882a593Smuzhiyun 	part.name[0] = 0;
571*4882a593Smuzhiyun 	nand_add_dev(nand_ops, &part);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	rknand_create_procfs();
574*4882a593Smuzhiyun 	rk_ftl_storage_sys_init();
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	ret = rk_ftl_vendor_storage_init();
577*4882a593Smuzhiyun 	if (!ret) {
578*4882a593Smuzhiyun 		rk_vendor_register(rk_ftl_vendor_read, rk_ftl_vendor_write);
579*4882a593Smuzhiyun 		rknand_vendor_storage_init();
580*4882a593Smuzhiyun 		pr_info("rknand vendor storage init ok !\n");
581*4882a593Smuzhiyun 	} else {
582*4882a593Smuzhiyun 		pr_info("rknand vendor storage init failed !\n");
583*4882a593Smuzhiyun 	}
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	return 0;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun rq_init_error:
588*4882a593Smuzhiyun 	kfree(nand_ops->tag_set);
589*4882a593Smuzhiyun tag_set_error:
590*4882a593Smuzhiyun 	kfree(mtd_read_temp_buffer);
591*4882a593Smuzhiyun 	mtd_read_temp_buffer = NULL;
592*4882a593Smuzhiyun mtd_buffer_error:
593*4882a593Smuzhiyun 	unregister_blkdev(nand_ops->major, nand_ops->name);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	return ret;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun 
nand_blk_unregister(struct nand_blk_ops * nand_ops)598*4882a593Smuzhiyun static void nand_blk_unregister(struct nand_blk_ops *nand_ops)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun 	struct list_head *this, *next;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	if (!rk_nand_dev_initialised)
603*4882a593Smuzhiyun 		return;
604*4882a593Smuzhiyun 	nand_ops->quit = 1;
605*4882a593Smuzhiyun 	wake_up(&nand_ops->thread_wq);
606*4882a593Smuzhiyun 	wait_for_completion(&nand_ops->thread_exit);
607*4882a593Smuzhiyun 	list_for_each_safe(this, next, &nand_ops->devs) {
608*4882a593Smuzhiyun 		struct nand_blk_dev *dev
609*4882a593Smuzhiyun 			= list_entry(this, struct nand_blk_dev, list);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 		nand_remove_dev(dev);
612*4882a593Smuzhiyun 	}
613*4882a593Smuzhiyun 	blk_cleanup_queue(nand_ops->rq);
614*4882a593Smuzhiyun 	unregister_blkdev(nand_ops->major, nand_ops->name);
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun 
rknand_dev_flush(void)617*4882a593Smuzhiyun void rknand_dev_flush(void)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun 	if (!rk_nand_dev_initialised)
620*4882a593Smuzhiyun 		return;
621*4882a593Smuzhiyun 	rknand_device_lock();
622*4882a593Smuzhiyun 	rk_ftl_cache_write_back();
623*4882a593Smuzhiyun 	rknand_device_unlock();
624*4882a593Smuzhiyun 	pr_info("Nand flash flush ok!\n");
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun 
rknand_dev_init(void)627*4882a593Smuzhiyun int __init rknand_dev_init(void)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun 	int ret;
630*4882a593Smuzhiyun 	void __iomem *nandc0;
631*4882a593Smuzhiyun 	void __iomem *nandc1;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	rknand_get_reg_addr((unsigned long *)&nandc0, (unsigned long *)&nandc1);
634*4882a593Smuzhiyun 	if (!nandc0)
635*4882a593Smuzhiyun 		return -1;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	ret = rk_ftl_init();
638*4882a593Smuzhiyun 	if (ret) {
639*4882a593Smuzhiyun 		pr_err("rk_ftl_init fail\n");
640*4882a593Smuzhiyun 		return -1;
641*4882a593Smuzhiyun 	}
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	ret = nand_blk_register(&mytr);
644*4882a593Smuzhiyun 	if (ret) {
645*4882a593Smuzhiyun 		pr_err("nand_blk_register fail\n");
646*4882a593Smuzhiyun 		return -1;
647*4882a593Smuzhiyun 	}
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	rk_nand_dev_initialised = 1;
650*4882a593Smuzhiyun 	return ret;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun 
rknand_dev_exit(void)653*4882a593Smuzhiyun int rknand_dev_exit(void)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun 	if (!rk_nand_dev_initialised)
656*4882a593Smuzhiyun 		return -1;
657*4882a593Smuzhiyun 	rk_nand_dev_initialised = 0;
658*4882a593Smuzhiyun 	if (rknand_device_trylock()) {
659*4882a593Smuzhiyun 		rk_ftl_cache_write_back();
660*4882a593Smuzhiyun 		rknand_device_unlock();
661*4882a593Smuzhiyun 	}
662*4882a593Smuzhiyun 	nand_blk_unregister(&mytr);
663*4882a593Smuzhiyun 	rk_ftl_de_init();
664*4882a593Smuzhiyun 	pr_info("nand_blk_dev_exit:OK\n");
665*4882a593Smuzhiyun 	return 0;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun 
rknand_dev_suspend(void)668*4882a593Smuzhiyun void rknand_dev_suspend(void)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun 	if (!rk_nand_dev_initialised)
671*4882a593Smuzhiyun 		return;
672*4882a593Smuzhiyun 	pr_info("rk_nand_suspend\n");
673*4882a593Smuzhiyun 	rk_nand_schedule_enable_config(0);
674*4882a593Smuzhiyun 	rknand_device_lock();
675*4882a593Smuzhiyun 	rk_nand_suspend();
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun 
rknand_dev_resume(void)678*4882a593Smuzhiyun void rknand_dev_resume(void)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun 	if (!rk_nand_dev_initialised)
681*4882a593Smuzhiyun 		return;
682*4882a593Smuzhiyun 	pr_info("rk_nand_resume\n");
683*4882a593Smuzhiyun 	rk_nand_resume();
684*4882a593Smuzhiyun 	rknand_device_unlock();
685*4882a593Smuzhiyun 	rk_nand_schedule_enable_config(1);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun 
rknand_dev_shutdown(void)688*4882a593Smuzhiyun void rknand_dev_shutdown(void)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun 	pr_info("rknand_shutdown...\n");
691*4882a593Smuzhiyun 	if (!rk_nand_dev_initialised)
692*4882a593Smuzhiyun 		return;
693*4882a593Smuzhiyun 	if (mytr.quit == 0) {
694*4882a593Smuzhiyun 		mytr.quit = 1;
695*4882a593Smuzhiyun 		wake_up(&mytr.thread_wq);
696*4882a593Smuzhiyun 		wait_for_completion(&mytr.thread_exit);
697*4882a593Smuzhiyun 		rk_ftl_de_init();
698*4882a593Smuzhiyun 	}
699*4882a593Smuzhiyun 	pr_info("rknand_shutdown:OK\n");
700*4882a593Smuzhiyun }
701