xref: /OK3568_Linux_fs/kernel/drivers/rkflash/rkflash_blk.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun /* Copyright (c) 2018 Rockchip Electronics Co. Ltd. */
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/blkdev.h>
6*4882a593Smuzhiyun #include <linux/blkpg.h>
7*4882a593Smuzhiyun #include <linux/blk-mq.h>
8*4882a593Smuzhiyun #include <linux/clk.h>
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/freezer.h>
11*4882a593Smuzhiyun #include <linux/fs.h>
12*4882a593Smuzhiyun #include <linux/hdreg.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/interrupt.h>
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/kthread.h>
17*4882a593Smuzhiyun #include <linux/list.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/mutex.h>
20*4882a593Smuzhiyun #include <linux/platform_device.h>
21*4882a593Smuzhiyun #include <linux/proc_fs.h>
22*4882a593Smuzhiyun #include <linux/sched.h>
23*4882a593Smuzhiyun #include <linux/semaphore.h>
24*4882a593Smuzhiyun #include <linux/seq_file.h>
25*4882a593Smuzhiyun #include <linux/slab.h>
26*4882a593Smuzhiyun #include <linux/spinlock.h>
27*4882a593Smuzhiyun #include <linux/timer.h>
28*4882a593Smuzhiyun #include <linux/wait.h>
29*4882a593Smuzhiyun #include <linux/version.h>
30*4882a593Smuzhiyun #include <linux/soc/rockchip/rk_vendor_storage.h>
31*4882a593Smuzhiyun #include "../soc/rockchip/flash_vendor_storage.h"
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include "rkflash_blk.h"
34*4882a593Smuzhiyun #include "rkflash_debug.h"
35*4882a593Smuzhiyun #include "rk_sftl.h"
36*4882a593Smuzhiyun 
sftl_printk(char * fmt,...)37*4882a593Smuzhiyun void __printf(1, 2) sftl_printk(char *fmt, ...)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	va_list ap;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	va_start(ap, fmt);
42*4882a593Smuzhiyun 	vprintk(fmt, ap);
43*4882a593Smuzhiyun 	va_end(ap);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /* For rkflash block dev private data */
47*4882a593Smuzhiyun static const struct flash_boot_ops *g_boot_ops;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun static int g_flash_type = -1;
50*4882a593Smuzhiyun static struct flash_part disk_array[MAX_PART_COUNT];
51*4882a593Smuzhiyun static int g_max_part_num = 4;
52*4882a593Smuzhiyun #define FW_HRADER_PT_NAME		("fw_header_p")
53*4882a593Smuzhiyun static struct flash_part fw_header_p;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #define PART_READONLY 0x85
56*4882a593Smuzhiyun #define PART_WRITEONLY 0x86
57*4882a593Smuzhiyun #define PART_NO_ACCESS 0x87
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun static unsigned long totle_read_data;
60*4882a593Smuzhiyun static unsigned long totle_write_data;
61*4882a593Smuzhiyun static unsigned long totle_read_count;
62*4882a593Smuzhiyun static unsigned long totle_write_count;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun static char *mtd_read_temp_buffer;
65*4882a593Smuzhiyun #define MTD_RW_SECTORS (512)
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #define DISABLE_WRITE _IO('V', 0)
68*4882a593Smuzhiyun #define ENABLE_WRITE _IO('V', 1)
69*4882a593Smuzhiyun #define DISABLE_READ _IO('V', 2)
70*4882a593Smuzhiyun #define ENABLE_READ _IO('V', 3)
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /* Thread for gc operation */
73*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(nand_gc_thread_wait);
74*4882a593Smuzhiyun static unsigned long nand_gc_do;
75*4882a593Smuzhiyun static struct task_struct *nand_gc_thread __read_mostly;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /* For rkflash dev private data, including mtd dev and block dev */
78*4882a593Smuzhiyun static int rkflash_dev_initialised;
79*4882a593Smuzhiyun static DEFINE_MUTEX(g_flash_ops_mutex);
80*4882a593Smuzhiyun 
rk_partition_init(struct flash_part * part)81*4882a593Smuzhiyun static unsigned int rk_partition_init(struct flash_part *part)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	int i, part_num = 0;
84*4882a593Smuzhiyun 	u32 desity;
85*4882a593Smuzhiyun 	struct STRUCT_PART_INFO *g_part;  /* size 2KB */
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	g_part = kmalloc(sizeof(*g_part), GFP_KERNEL | GFP_DMA);
88*4882a593Smuzhiyun 	if (!g_part)
89*4882a593Smuzhiyun 		return 0;
90*4882a593Smuzhiyun 	mutex_lock(&g_flash_ops_mutex);
91*4882a593Smuzhiyun 	if (g_boot_ops->read(0, 4, g_part) == 0) {
92*4882a593Smuzhiyun 		if (g_part->hdr.ui_fw_tag == RK_PARTITION_TAG) {
93*4882a593Smuzhiyun 			part_num = g_part->hdr.ui_part_entry_count;
94*4882a593Smuzhiyun 			desity = g_boot_ops->get_capacity();
95*4882a593Smuzhiyun 			for (i = 0; i < part_num; i++) {
96*4882a593Smuzhiyun 				memcpy(part[i].name,
97*4882a593Smuzhiyun 				       g_part->part[i].sz_name,
98*4882a593Smuzhiyun 				       32);
99*4882a593Smuzhiyun 				part[i].offset = g_part->part[i].ui_pt_off;
100*4882a593Smuzhiyun 				part[i].size = g_part->part[i].ui_pt_sz;
101*4882a593Smuzhiyun 				part[i].type = 0;
102*4882a593Smuzhiyun 				if (part[i].size == UINT_MAX)
103*4882a593Smuzhiyun 					part[i].size = desity - part[i].offset;
104*4882a593Smuzhiyun 				if (part[i].offset + part[i].size > desity) {
105*4882a593Smuzhiyun 					part[i].size = desity - part[i].offset;
106*4882a593Smuzhiyun 					break;
107*4882a593Smuzhiyun 				}
108*4882a593Smuzhiyun 			}
109*4882a593Smuzhiyun 		}
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 	mutex_unlock(&g_flash_ops_mutex);
112*4882a593Smuzhiyun 	kfree(g_part);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	memset(&fw_header_p, 0x0, sizeof(fw_header_p));
115*4882a593Smuzhiyun 	memcpy(fw_header_p.name, FW_HRADER_PT_NAME, strlen(FW_HRADER_PT_NAME));
116*4882a593Smuzhiyun 	fw_header_p.offset = 0x0;
117*4882a593Smuzhiyun 	fw_header_p.size = 0x4;
118*4882a593Smuzhiyun 	fw_header_p.type = 0;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	return part_num;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
rkflash_blk_proc_show(struct seq_file * m,void * v)123*4882a593Smuzhiyun static int rkflash_blk_proc_show(struct seq_file *m, void *v)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	char *ftl_buf = kzalloc(4096, GFP_KERNEL);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_RK_SFTL)
128*4882a593Smuzhiyun 	int real_size = 0;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	real_size = rknand_proc_ftlread(4096, ftl_buf);
131*4882a593Smuzhiyun 	if (real_size > 0)
132*4882a593Smuzhiyun 		seq_printf(m, "%s", ftl_buf);
133*4882a593Smuzhiyun #endif
134*4882a593Smuzhiyun 	seq_printf(m, "Totle Read %ld KB\n", totle_read_data >> 1);
135*4882a593Smuzhiyun 	seq_printf(m, "Totle Write %ld KB\n", totle_write_data >> 1);
136*4882a593Smuzhiyun 	seq_printf(m, "totle_write_count %ld\n", totle_write_count);
137*4882a593Smuzhiyun 	seq_printf(m, "totle_read_count %ld\n", totle_read_count);
138*4882a593Smuzhiyun 	kfree(ftl_buf);
139*4882a593Smuzhiyun 	return 0;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
rkflash_blk_proc_open(struct inode * inode,struct file * file)142*4882a593Smuzhiyun static int rkflash_blk_proc_open(struct inode *inode, struct file *file)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	return single_open(file, rkflash_blk_proc_show, PDE_DATA(inode));
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun static const struct proc_ops rkflash_blk_proc_fops = {
148*4882a593Smuzhiyun 	.proc_open		= rkflash_blk_proc_open,
149*4882a593Smuzhiyun 	.proc_read		= seq_read,
150*4882a593Smuzhiyun 	.proc_lseek		= seq_lseek,
151*4882a593Smuzhiyun 	.proc_release	= single_release,
152*4882a593Smuzhiyun };
153*4882a593Smuzhiyun 
rkflash_blk_create_procfs(void)154*4882a593Smuzhiyun static int rkflash_blk_create_procfs(void)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	struct proc_dir_entry *ent;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	ent = proc_create_data("rkflash", 0x664, NULL, &rkflash_blk_proc_fops,
159*4882a593Smuzhiyun 			       (void *)0);
160*4882a593Smuzhiyun 	if (!ent)
161*4882a593Smuzhiyun 		return -1;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	return 0;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
rkflash_blk_discard(u32 sec,u32 n_sec)166*4882a593Smuzhiyun static int rkflash_blk_discard(u32 sec, u32 n_sec)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	int ret;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	if (g_boot_ops->discard)
171*4882a593Smuzhiyun 		ret = g_boot_ops->discard(sec, n_sec);
172*4882a593Smuzhiyun 	else
173*4882a593Smuzhiyun 		ret = -EPERM;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	return ret;
176*4882a593Smuzhiyun };
177*4882a593Smuzhiyun 
rkflash_blk_xfer(struct flash_blk_dev * dev,unsigned long start,unsigned long nsector,char * buf,int cmd)178*4882a593Smuzhiyun static int rkflash_blk_xfer(struct flash_blk_dev *dev,
179*4882a593Smuzhiyun 			    unsigned long start,
180*4882a593Smuzhiyun 			    unsigned long nsector,
181*4882a593Smuzhiyun 			    char *buf,
182*4882a593Smuzhiyun 			    int cmd)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	int ret;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	if (dev->disable_access ||
187*4882a593Smuzhiyun 	    (cmd == WRITE && dev->readonly) ||
188*4882a593Smuzhiyun 	    (cmd == READ && dev->writeonly)) {
189*4882a593Smuzhiyun 		return -EIO;
190*4882a593Smuzhiyun 	}
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	start += dev->off_size;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	switch (cmd) {
195*4882a593Smuzhiyun 	case READ:
196*4882a593Smuzhiyun 		totle_read_data += nsector;
197*4882a593Smuzhiyun 		totle_read_count++;
198*4882a593Smuzhiyun 		rkflash_print_bio("rkflash r sec= %lx, n_sec= %lx\n",
199*4882a593Smuzhiyun 				  start, nsector);
200*4882a593Smuzhiyun 		ret = g_boot_ops->read(start, nsector, buf);
201*4882a593Smuzhiyun 		if (ret)
202*4882a593Smuzhiyun 			ret = -EIO;
203*4882a593Smuzhiyun 		break;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	case WRITE:
206*4882a593Smuzhiyun 		totle_write_data += nsector;
207*4882a593Smuzhiyun 		totle_write_count++;
208*4882a593Smuzhiyun 		rkflash_print_bio("rkflash w sec= %lx, n_sec= %lx\n",
209*4882a593Smuzhiyun 				  start, nsector);
210*4882a593Smuzhiyun 		ret = g_boot_ops->write(start, nsector, buf);
211*4882a593Smuzhiyun 		if (ret)
212*4882a593Smuzhiyun 			ret = -EIO;
213*4882a593Smuzhiyun 		break;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	default:
216*4882a593Smuzhiyun 		ret = -EIO;
217*4882a593Smuzhiyun 		break;
218*4882a593Smuzhiyun 	}
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	return ret;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
rkflash_blk_check_buffer_align(struct request * req,char ** pbuf)223*4882a593Smuzhiyun static int rkflash_blk_check_buffer_align(struct request *req, char **pbuf)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	int nr_vec = 0;
226*4882a593Smuzhiyun 	struct bio_vec bv;
227*4882a593Smuzhiyun 	struct req_iterator iter;
228*4882a593Smuzhiyun 	char *buffer;
229*4882a593Smuzhiyun 	void *firstbuf = 0;
230*4882a593Smuzhiyun 	char *nextbuffer = 0;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	rq_for_each_segment(bv, req, iter) {
233*4882a593Smuzhiyun 		/* high mem return 0 and using kernel buffer */
234*4882a593Smuzhiyun 		if (PageHighMem(bv.bv_page))
235*4882a593Smuzhiyun 			return 0;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 		buffer = page_address(bv.bv_page) + bv.bv_offset;
238*4882a593Smuzhiyun 		if (!buffer)
239*4882a593Smuzhiyun 			return 0;
240*4882a593Smuzhiyun 		if (!firstbuf)
241*4882a593Smuzhiyun 			firstbuf = buffer;
242*4882a593Smuzhiyun 		nr_vec++;
243*4882a593Smuzhiyun 		if (nextbuffer && nextbuffer != buffer)
244*4882a593Smuzhiyun 			return 0;
245*4882a593Smuzhiyun 		nextbuffer = buffer + bv.bv_len;
246*4882a593Smuzhiyun 	}
247*4882a593Smuzhiyun 	*pbuf = firstbuf;
248*4882a593Smuzhiyun 	return 1;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
do_blktrans_all_request(struct flash_blk_ops * tr,struct flash_blk_dev * dev,struct request * req)251*4882a593Smuzhiyun static blk_status_t do_blktrans_all_request(struct flash_blk_ops *tr,
252*4882a593Smuzhiyun 			       struct flash_blk_dev *dev,
253*4882a593Smuzhiyun 			       struct request *req)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	unsigned long block, nsect;
256*4882a593Smuzhiyun 	char *buf = NULL, *page_buf;
257*4882a593Smuzhiyun 	struct req_iterator rq_iter;
258*4882a593Smuzhiyun 	struct bio_vec bvec;
259*4882a593Smuzhiyun 	int ret;
260*4882a593Smuzhiyun 	unsigned long totle_nsect;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	block = blk_rq_pos(req);
263*4882a593Smuzhiyun 	nsect = blk_rq_cur_bytes(req) >> 9;
264*4882a593Smuzhiyun 	totle_nsect = (req->__data_len) >> 9;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
267*4882a593Smuzhiyun 	    get_capacity(req->rq_disk))
268*4882a593Smuzhiyun 		return BLK_STS_IOERR;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	switch (req_op(req)) {
271*4882a593Smuzhiyun 	case REQ_OP_DISCARD:
272*4882a593Smuzhiyun 		rkflash_print_bio("%s discard\n", __func__);
273*4882a593Smuzhiyun 		if (rkflash_blk_discard(block, nsect))
274*4882a593Smuzhiyun 			return BLK_STS_IOERR;
275*4882a593Smuzhiyun 		return BLK_STS_OK;
276*4882a593Smuzhiyun 	case REQ_OP_READ:
277*4882a593Smuzhiyun 		rkflash_print_bio("%s read block=%lx nsec=%lx\n", __func__, block, totle_nsect);
278*4882a593Smuzhiyun 		buf = mtd_read_temp_buffer;
279*4882a593Smuzhiyun 		rkflash_blk_check_buffer_align(req, &buf);
280*4882a593Smuzhiyun 		ret = rkflash_blk_xfer(dev,
281*4882a593Smuzhiyun 				       block,
282*4882a593Smuzhiyun 				       totle_nsect,
283*4882a593Smuzhiyun 				       buf,
284*4882a593Smuzhiyun 				       REQ_OP_READ);
285*4882a593Smuzhiyun 		if (buf == mtd_read_temp_buffer) {
286*4882a593Smuzhiyun 			char *p = buf;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 			rq_for_each_segment(bvec, req, rq_iter) {
289*4882a593Smuzhiyun 				page_buf = kmap_atomic(bvec.bv_page);
290*4882a593Smuzhiyun 				memcpy(page_buf +
291*4882a593Smuzhiyun 				       bvec.bv_offset,
292*4882a593Smuzhiyun 				       p,
293*4882a593Smuzhiyun 				       bvec.bv_len);
294*4882a593Smuzhiyun 				p += bvec.bv_len;
295*4882a593Smuzhiyun 				kunmap_atomic(page_buf);
296*4882a593Smuzhiyun 			}
297*4882a593Smuzhiyun 		}
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 		if (ret)
300*4882a593Smuzhiyun 			return BLK_STS_IOERR;
301*4882a593Smuzhiyun 		else
302*4882a593Smuzhiyun 			return BLK_STS_OK;
303*4882a593Smuzhiyun 	case REQ_OP_WRITE:
304*4882a593Smuzhiyun 		rkflash_print_bio("%s write block=%lx nsec=%lx\n", __func__, block, totle_nsect);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 		buf = mtd_read_temp_buffer;
307*4882a593Smuzhiyun 		rkflash_blk_check_buffer_align(req, &buf);
308*4882a593Smuzhiyun 		if (buf == mtd_read_temp_buffer) {
309*4882a593Smuzhiyun 			char *p = buf;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 			rq_for_each_segment(bvec, req, rq_iter) {
312*4882a593Smuzhiyun 				page_buf = kmap_atomic(bvec.bv_page);
313*4882a593Smuzhiyun 				memcpy(p,
314*4882a593Smuzhiyun 					page_buf +
315*4882a593Smuzhiyun 					bvec.bv_offset,
316*4882a593Smuzhiyun 					bvec.bv_len);
317*4882a593Smuzhiyun 				p += bvec.bv_len;
318*4882a593Smuzhiyun 				kunmap_atomic(page_buf);
319*4882a593Smuzhiyun 			}
320*4882a593Smuzhiyun 		}
321*4882a593Smuzhiyun 		ret = rkflash_blk_xfer(dev,
322*4882a593Smuzhiyun 					block,
323*4882a593Smuzhiyun 					totle_nsect,
324*4882a593Smuzhiyun 					buf,
325*4882a593Smuzhiyun 					REQ_OP_WRITE);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 		if (ret)
328*4882a593Smuzhiyun 			return BLK_STS_IOERR;
329*4882a593Smuzhiyun 		else
330*4882a593Smuzhiyun 			return BLK_STS_OK;
331*4882a593Smuzhiyun 	default:
332*4882a593Smuzhiyun 		return BLK_STS_IOERR;
333*4882a593Smuzhiyun 	}
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
rkflash_next_request(struct flash_blk_dev * dev)336*4882a593Smuzhiyun static struct request *rkflash_next_request(struct flash_blk_dev *dev)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	struct request *rq;
339*4882a593Smuzhiyun 	struct flash_blk_ops *tr = dev->blk_ops;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	rq = list_first_entry_or_null(&tr->rq_list, struct request, queuelist);
342*4882a593Smuzhiyun 	if (rq) {
343*4882a593Smuzhiyun 		list_del_init(&rq->queuelist);
344*4882a593Smuzhiyun 		blk_mq_start_request(rq);
345*4882a593Smuzhiyun 		return rq;
346*4882a593Smuzhiyun 	}
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	return NULL;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
rkflash_blktrans_work(struct flash_blk_dev * dev)351*4882a593Smuzhiyun static void rkflash_blktrans_work(struct flash_blk_dev *dev)
352*4882a593Smuzhiyun 	__releases(&dev->blk_ops->queue_lock)
353*4882a593Smuzhiyun 	__acquires(&dev->blk_ops->queue_lock)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	struct flash_blk_ops *tr = dev->blk_ops;
356*4882a593Smuzhiyun 	struct request *req = NULL;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	while (1) {
359*4882a593Smuzhiyun 		blk_status_t res;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		req = rkflash_next_request(dev);
362*4882a593Smuzhiyun 		if (!req)
363*4882a593Smuzhiyun 			break;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 		spin_unlock_irq(&dev->blk_ops->queue_lock);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 		mutex_lock(&g_flash_ops_mutex);
368*4882a593Smuzhiyun 		res = do_blktrans_all_request(tr, dev, req);
369*4882a593Smuzhiyun 		mutex_unlock(&g_flash_ops_mutex);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 		if (!blk_update_request(req, res, req->__data_len)) {
372*4882a593Smuzhiyun 			__blk_mq_end_request(req, res);
373*4882a593Smuzhiyun 			req = NULL;
374*4882a593Smuzhiyun 		}
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 		spin_lock_irq(&dev->blk_ops->queue_lock);
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
rkflash_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)380*4882a593Smuzhiyun static blk_status_t rkflash_queue_rq(struct blk_mq_hw_ctx *hctx,
381*4882a593Smuzhiyun 				     const struct blk_mq_queue_data *bd)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	struct flash_blk_dev *dev;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	dev = hctx->queue->queuedata;
386*4882a593Smuzhiyun 	if (!dev) {
387*4882a593Smuzhiyun 		blk_mq_start_request(bd->rq);
388*4882a593Smuzhiyun 		return BLK_STS_IOERR;
389*4882a593Smuzhiyun 	}
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	nand_gc_do = 0;
392*4882a593Smuzhiyun 	spin_lock_irq(&dev->blk_ops->queue_lock);
393*4882a593Smuzhiyun 	list_add_tail(&bd->rq->queuelist, &dev->blk_ops->rq_list);
394*4882a593Smuzhiyun 	rkflash_blktrans_work(dev);
395*4882a593Smuzhiyun 	spin_unlock_irq(&dev->blk_ops->queue_lock);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	/* wake up gc thread */
398*4882a593Smuzhiyun 	nand_gc_do = 1;
399*4882a593Smuzhiyun 	wake_up(&nand_gc_thread_wait);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	return BLK_STS_OK;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun static const struct blk_mq_ops rkflash_mq_ops = {
405*4882a593Smuzhiyun 	.queue_rq	= rkflash_queue_rq,
406*4882a593Smuzhiyun };
407*4882a593Smuzhiyun 
nand_gc_has_work(void)408*4882a593Smuzhiyun static int nand_gc_has_work(void)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	return nand_gc_do;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
nand_gc_do_work(void)413*4882a593Smuzhiyun static int nand_gc_do_work(void)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	int ret = nand_gc_has_work();
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	/* do garbage collect at idle state */
418*4882a593Smuzhiyun 	if (ret) {
419*4882a593Smuzhiyun 		mutex_lock(&g_flash_ops_mutex);
420*4882a593Smuzhiyun 		ret = g_boot_ops->gc();
421*4882a593Smuzhiyun 		rkflash_print_bio("%s gc result= %d\n", __func__, ret);
422*4882a593Smuzhiyun 		mutex_unlock(&g_flash_ops_mutex);
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	return ret;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
nand_gc_wait_work(void)428*4882a593Smuzhiyun static void nand_gc_wait_work(void)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun 	unsigned long nand_gc_jiffies = HZ / 20;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	if (nand_gc_has_work())
433*4882a593Smuzhiyun 		wait_event_freezable_timeout(nand_gc_thread_wait,
434*4882a593Smuzhiyun 					     kthread_should_stop(),
435*4882a593Smuzhiyun 					     nand_gc_jiffies);
436*4882a593Smuzhiyun 	else
437*4882a593Smuzhiyun 		wait_event_freezable(nand_gc_thread_wait,
438*4882a593Smuzhiyun 				     kthread_should_stop() || nand_gc_has_work());
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun 
nand_gc_mythread(void * arg)441*4882a593Smuzhiyun static int nand_gc_mythread(void *arg)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun 	int gc_done_times = 0;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	set_freezable();
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	while (!kthread_should_stop()) {
448*4882a593Smuzhiyun 		if (nand_gc_do_work() == 0) {
449*4882a593Smuzhiyun 			gc_done_times++;
450*4882a593Smuzhiyun 			if (gc_done_times > 10)
451*4882a593Smuzhiyun 				nand_gc_do = 0;
452*4882a593Smuzhiyun 		} else {
453*4882a593Smuzhiyun 			gc_done_times = 0;
454*4882a593Smuzhiyun 		}
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 		nand_gc_wait_work();
457*4882a593Smuzhiyun 	}
458*4882a593Smuzhiyun 	pr_info("nand gc quited\n");
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	return 0;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun 
rkflash_blk_open(struct block_device * bdev,fmode_t mode)463*4882a593Smuzhiyun static int rkflash_blk_open(struct block_device *bdev, fmode_t mode)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun 	return 0;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun 
rkflash_blk_release(struct gendisk * disk,fmode_t mode)468*4882a593Smuzhiyun static void rkflash_blk_release(struct gendisk *disk, fmode_t mode)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun };
471*4882a593Smuzhiyun 
rkflash_blk_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)472*4882a593Smuzhiyun static int rkflash_blk_ioctl(struct block_device *bdev, fmode_t mode,
473*4882a593Smuzhiyun 			 unsigned int cmd,
474*4882a593Smuzhiyun 			 unsigned long arg)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun 	struct flash_blk_dev *dev = bdev->bd_disk->private_data;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	switch (cmd) {
479*4882a593Smuzhiyun 	case ENABLE_WRITE:
480*4882a593Smuzhiyun 		dev->disable_access = 0;
481*4882a593Smuzhiyun 		dev->readonly = 0;
482*4882a593Smuzhiyun 		set_disk_ro(dev->blkcore_priv, 0);
483*4882a593Smuzhiyun 		return 0;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	case DISABLE_WRITE:
486*4882a593Smuzhiyun 		dev->readonly = 1;
487*4882a593Smuzhiyun 		set_disk_ro(dev->blkcore_priv, 1);
488*4882a593Smuzhiyun 		return 0;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	case ENABLE_READ:
491*4882a593Smuzhiyun 		dev->disable_access = 0;
492*4882a593Smuzhiyun 		dev->writeonly = 0;
493*4882a593Smuzhiyun 		return 0;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	case DISABLE_READ:
496*4882a593Smuzhiyun 		dev->writeonly = 1;
497*4882a593Smuzhiyun 		return 0;
498*4882a593Smuzhiyun 	default:
499*4882a593Smuzhiyun 		return -ENOTTY;
500*4882a593Smuzhiyun 	}
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun const struct block_device_operations rkflash_blk_trans_ops = {
504*4882a593Smuzhiyun 	.owner = THIS_MODULE,
505*4882a593Smuzhiyun 	.open = rkflash_blk_open,
506*4882a593Smuzhiyun 	.release = rkflash_blk_release,
507*4882a593Smuzhiyun 	.ioctl = rkflash_blk_ioctl,
508*4882a593Smuzhiyun };
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun static struct flash_blk_ops mytr = {
511*4882a593Smuzhiyun 	.name =  "rkflash",
512*4882a593Smuzhiyun 	.major = 31,
513*4882a593Smuzhiyun 	.minorbits = 0,
514*4882a593Smuzhiyun 	.owner = THIS_MODULE,
515*4882a593Smuzhiyun };
516*4882a593Smuzhiyun 
rkflash_blk_add_dev(struct flash_blk_dev * dev,struct flash_blk_ops * blk_ops,struct flash_part * part)517*4882a593Smuzhiyun static int rkflash_blk_add_dev(struct flash_blk_dev *dev,
518*4882a593Smuzhiyun 			       struct flash_blk_ops *blk_ops,
519*4882a593Smuzhiyun 			       struct flash_part *part)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	struct gendisk *gd;
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	if (part->size == 0)
524*4882a593Smuzhiyun 		return -1;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	gd = alloc_disk(1 << blk_ops->minorbits);
527*4882a593Smuzhiyun 	if (!gd) {
528*4882a593Smuzhiyun 		kfree(dev);
529*4882a593Smuzhiyun 		return -ENOMEM;
530*4882a593Smuzhiyun 	}
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	dev->blk_ops = blk_ops;
533*4882a593Smuzhiyun 	dev->size = part->size;
534*4882a593Smuzhiyun 	dev->off_size = part->offset;
535*4882a593Smuzhiyun 	dev->devnum = blk_ops->last_dev_index;
536*4882a593Smuzhiyun 	list_add_tail(&dev->list, &blk_ops->devs);
537*4882a593Smuzhiyun 	blk_ops->last_dev_index++;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	gd->major = blk_ops->major;
540*4882a593Smuzhiyun 	gd->first_minor = (dev->devnum) << blk_ops->minorbits;
541*4882a593Smuzhiyun 	gd->fops = &rkflash_blk_trans_ops;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	if (part->name[0]) {
544*4882a593Smuzhiyun 		snprintf(gd->disk_name,
545*4882a593Smuzhiyun 			 sizeof(gd->disk_name),
546*4882a593Smuzhiyun 			 "%s",
547*4882a593Smuzhiyun 			 part->name);
548*4882a593Smuzhiyun 	} else {
549*4882a593Smuzhiyun 		gd->flags = GENHD_FL_EXT_DEVT;
550*4882a593Smuzhiyun 		gd->minors = 255;
551*4882a593Smuzhiyun 		snprintf(gd->disk_name,
552*4882a593Smuzhiyun 			 sizeof(gd->disk_name),
553*4882a593Smuzhiyun 			 "%s%d",
554*4882a593Smuzhiyun 			 blk_ops->name,
555*4882a593Smuzhiyun 			 dev->devnum);
556*4882a593Smuzhiyun 	}
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	set_capacity(gd, dev->size);
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	gd->private_data = dev;
561*4882a593Smuzhiyun 	dev->blkcore_priv = gd;
562*4882a593Smuzhiyun 	gd->queue = blk_ops->rq;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	if (part->type == PART_NO_ACCESS)
565*4882a593Smuzhiyun 		dev->disable_access = 1;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	if (part->type == PART_READONLY)
568*4882a593Smuzhiyun 		dev->readonly = 1;
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	if (part->type == PART_WRITEONLY)
571*4882a593Smuzhiyun 		dev->writeonly = 1;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	if (dev->readonly)
574*4882a593Smuzhiyun 		set_disk_ro(gd, 1);
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	add_disk(gd);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	return 0;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun 
rkflash_blk_remove_dev(struct flash_blk_dev * dev)581*4882a593Smuzhiyun static int rkflash_blk_remove_dev(struct flash_blk_dev *dev)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun 	struct gendisk *gd;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	gd = dev->blkcore_priv;
586*4882a593Smuzhiyun 	list_del(&dev->list);
587*4882a593Smuzhiyun 	gd->queue = NULL;
588*4882a593Smuzhiyun 	del_gendisk(gd);
589*4882a593Smuzhiyun 	put_disk(gd);
590*4882a593Smuzhiyun 	kfree(dev);
591*4882a593Smuzhiyun 	return 0;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun 
rkflash_blk_register(struct flash_blk_ops * blk_ops)594*4882a593Smuzhiyun static int rkflash_blk_register(struct flash_blk_ops *blk_ops)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun 	int i, ret;
597*4882a593Smuzhiyun 	u64 offset;
598*4882a593Smuzhiyun 	struct flash_blk_dev *dev;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
601*4882a593Smuzhiyun 	if (!dev)
602*4882a593Smuzhiyun 		return -ENOMEM;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	mtd_read_temp_buffer = kmalloc(MTD_RW_SECTORS * 512,
605*4882a593Smuzhiyun 				       GFP_KERNEL | GFP_DMA);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	ret = register_blkdev(blk_ops->major, blk_ops->name);
608*4882a593Smuzhiyun 	if (ret) {
609*4882a593Smuzhiyun 		kfree(dev);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 		return -1;
612*4882a593Smuzhiyun 	}
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	/* Create the request queue */
615*4882a593Smuzhiyun 	spin_lock_init(&blk_ops->queue_lock);
616*4882a593Smuzhiyun 	INIT_LIST_HEAD(&blk_ops->rq_list);
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	blk_ops->tag_set = kzalloc(sizeof(*blk_ops->tag_set), GFP_KERNEL);
619*4882a593Smuzhiyun 	if (!blk_ops->tag_set)
620*4882a593Smuzhiyun 		goto error1;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	blk_ops->rq = blk_mq_init_sq_queue(blk_ops->tag_set, &rkflash_mq_ops, 1,
623*4882a593Smuzhiyun 					   BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
624*4882a593Smuzhiyun 	if (IS_ERR(blk_ops->rq)) {
625*4882a593Smuzhiyun 		ret = PTR_ERR(blk_ops->rq);
626*4882a593Smuzhiyun 		blk_ops->rq = NULL;
627*4882a593Smuzhiyun 		goto error2;
628*4882a593Smuzhiyun 	}
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	blk_ops->rq->queuedata = dev;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	blk_queue_max_hw_sectors(blk_ops->rq, MTD_RW_SECTORS);
633*4882a593Smuzhiyun 	blk_queue_max_segments(blk_ops->rq, MTD_RW_SECTORS);
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	blk_queue_flag_set(QUEUE_FLAG_DISCARD, blk_ops->rq);
636*4882a593Smuzhiyun 	blk_queue_max_discard_sectors(blk_ops->rq, UINT_MAX >> 9);
637*4882a593Smuzhiyun 	blk_ops->rq->limits.discard_granularity = 64 << 9;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	if (g_flash_type == FLASH_TYPE_SFC_NAND || g_flash_type == FLASH_TYPE_NANDC_NAND)
640*4882a593Smuzhiyun 		nand_gc_thread = kthread_run(nand_gc_mythread, (void *)blk_ops, "rkflash_gc");
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	INIT_LIST_HEAD(&blk_ops->devs);
643*4882a593Smuzhiyun 	g_max_part_num = rk_partition_init(disk_array);
644*4882a593Smuzhiyun 	if (g_max_part_num) {
645*4882a593Smuzhiyun 		/* partition 0 is save vendor data, need hidden */
646*4882a593Smuzhiyun 		blk_ops->last_dev_index = 0;
647*4882a593Smuzhiyun 		for (i = 1; i < g_max_part_num; i++) {
648*4882a593Smuzhiyun 			offset = (u64)disk_array[i].offset;
649*4882a593Smuzhiyun 			pr_info("%10s: 0x%09llx -- 0x%09llx (%llu MB)\n",
650*4882a593Smuzhiyun 				disk_array[i].name,
651*4882a593Smuzhiyun 				offset * 512,
652*4882a593Smuzhiyun 				(u64)(offset + disk_array[i].size) * 512,
653*4882a593Smuzhiyun 				(u64)disk_array[i].size / 2048);
654*4882a593Smuzhiyun 			rkflash_blk_add_dev(dev, blk_ops, &disk_array[i]);
655*4882a593Smuzhiyun 		}
656*4882a593Smuzhiyun 		rkflash_blk_add_dev(dev, blk_ops, &fw_header_p);
657*4882a593Smuzhiyun 	} else {
658*4882a593Smuzhiyun 		struct flash_part part;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 		part.offset = 0;
661*4882a593Smuzhiyun 		part.size = g_boot_ops->get_capacity();
662*4882a593Smuzhiyun 		part.type = 0;
663*4882a593Smuzhiyun 		part.name[0] = 0;
664*4882a593Smuzhiyun 		rkflash_blk_add_dev(dev, blk_ops, &part);
665*4882a593Smuzhiyun 	}
666*4882a593Smuzhiyun 	rkflash_blk_create_procfs();
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	return 0;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun error2:
671*4882a593Smuzhiyun 	kfree(blk_ops->tag_set);
672*4882a593Smuzhiyun error1:
673*4882a593Smuzhiyun 	unregister_blkdev(blk_ops->major, blk_ops->name);
674*4882a593Smuzhiyun 	kfree(dev);
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	return ret;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun 
rkflash_blk_unregister(struct flash_blk_ops * blk_ops)679*4882a593Smuzhiyun static void rkflash_blk_unregister(struct flash_blk_ops *blk_ops)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun 	struct list_head *this, *next;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	list_for_each_safe(this, next, &blk_ops->devs) {
684*4882a593Smuzhiyun 		struct flash_blk_dev *dev =
685*4882a593Smuzhiyun 			list_entry(this, struct flash_blk_dev, list);
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 		rkflash_blk_remove_dev(dev);
688*4882a593Smuzhiyun 	}
689*4882a593Smuzhiyun 	blk_cleanup_queue(blk_ops->rq);
690*4882a593Smuzhiyun 	unregister_blkdev(blk_ops->major, blk_ops->name);
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun 
rkflash_dev_vendor_read(u32 sec,u32 n_sec,void * p_data)693*4882a593Smuzhiyun static int __maybe_unused rkflash_dev_vendor_read(u32 sec, u32 n_sec, void *p_data)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun 	int ret;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	if (g_boot_ops->vendor_read) {
698*4882a593Smuzhiyun 		mutex_lock(&g_flash_ops_mutex);
699*4882a593Smuzhiyun 		ret = g_boot_ops->vendor_read(sec, n_sec, p_data);
700*4882a593Smuzhiyun 		mutex_unlock(&g_flash_ops_mutex);
701*4882a593Smuzhiyun 	} else {
702*4882a593Smuzhiyun 		ret = -EPERM;
703*4882a593Smuzhiyun 	}
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	return ret;
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun 
rkflash_dev_vendor_write(u32 sec,u32 n_sec,void * p_data)708*4882a593Smuzhiyun static int __maybe_unused rkflash_dev_vendor_write(u32 sec, u32 n_sec, void *p_data)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun 	int ret;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	if (g_boot_ops->vendor_write) {
713*4882a593Smuzhiyun 		mutex_lock(&g_flash_ops_mutex);
714*4882a593Smuzhiyun 		ret = g_boot_ops->vendor_write(sec,
715*4882a593Smuzhiyun 					       n_sec,
716*4882a593Smuzhiyun 					       p_data);
717*4882a593Smuzhiyun 		mutex_unlock(&g_flash_ops_mutex);
718*4882a593Smuzhiyun 	} else {
719*4882a593Smuzhiyun 		ret = -EPERM;
720*4882a593Smuzhiyun 	}
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	return ret;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun 
rkflash_dev_init(void __iomem * reg_addr,enum flash_type type,const struct flash_boot_ops * ops)725*4882a593Smuzhiyun int rkflash_dev_init(void __iomem *reg_addr,
726*4882a593Smuzhiyun 		     enum flash_type type,
727*4882a593Smuzhiyun 		     const struct flash_boot_ops *ops)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun 	int ret = -1;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	pr_err("%s enter\n", __func__);
732*4882a593Smuzhiyun 	if (rkflash_dev_initialised) {
733*4882a593Smuzhiyun 		pr_err("rkflash has already inited as id[%d]\n", g_flash_type);
734*4882a593Smuzhiyun 		return -1;
735*4882a593Smuzhiyun 	}
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	if (!ops->init)
738*4882a593Smuzhiyun 		return -EINVAL;
739*4882a593Smuzhiyun 	ret = ops->init(reg_addr);
740*4882a593Smuzhiyun 	if (ret) {
741*4882a593Smuzhiyun 		pr_err("rkflash[%d] is invalid", type);
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 		return -ENODEV;
744*4882a593Smuzhiyun 	}
745*4882a593Smuzhiyun 	pr_info("rkflash[%d] init success\n", type);
746*4882a593Smuzhiyun 	g_boot_ops = ops;
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	/* vendor part */
749*4882a593Smuzhiyun 	switch (type) {
750*4882a593Smuzhiyun 	case FLASH_TYPE_SFC_NOR:
751*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_RK_SFC_NOR_MTD) && IS_ENABLED(CONFIG_ROCKCHIP_MTD_VENDOR_STORAGE)
752*4882a593Smuzhiyun 		break;
753*4882a593Smuzhiyun #else
754*4882a593Smuzhiyun 		flash_vendor_dev_ops_register(rkflash_dev_vendor_read,
755*4882a593Smuzhiyun 					      rkflash_dev_vendor_write);
756*4882a593Smuzhiyun #endif
757*4882a593Smuzhiyun 		break;
758*4882a593Smuzhiyun 	case FLASH_TYPE_SFC_NAND:
759*4882a593Smuzhiyun #ifdef CONFIG_RK_SFC_NAND_MTD
760*4882a593Smuzhiyun 		break;
761*4882a593Smuzhiyun #endif
762*4882a593Smuzhiyun 	case FLASH_TYPE_NANDC_NAND:
763*4882a593Smuzhiyun #if defined(CONFIG_RK_SFTL)
764*4882a593Smuzhiyun 		rk_sftl_vendor_dev_ops_register(rkflash_dev_vendor_read,
765*4882a593Smuzhiyun 						rkflash_dev_vendor_write);
766*4882a593Smuzhiyun 		ret = rk_sftl_vendor_storage_init();
767*4882a593Smuzhiyun 		if (!ret) {
768*4882a593Smuzhiyun 			rk_vendor_register(rk_sftl_vendor_read,
769*4882a593Smuzhiyun 					   rk_sftl_vendor_write);
770*4882a593Smuzhiyun 			rk_sftl_vendor_register();
771*4882a593Smuzhiyun 			pr_info("rkflashd vendor storage init ok !\n");
772*4882a593Smuzhiyun 		} else {
773*4882a593Smuzhiyun 			pr_info("rkflash vendor storage init failed !\n");
774*4882a593Smuzhiyun 		}
775*4882a593Smuzhiyun 		break;
776*4882a593Smuzhiyun #endif
777*4882a593Smuzhiyun 	default:
778*4882a593Smuzhiyun 		break;
779*4882a593Smuzhiyun 	}
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	switch (type) {
782*4882a593Smuzhiyun 	case FLASH_TYPE_SFC_NOR:
783*4882a593Smuzhiyun #ifdef CONFIG_RK_SFC_NOR_MTD
784*4882a593Smuzhiyun 		ret = sfc_nor_mtd_init(sfnor_dev, &g_flash_ops_mutex);
785*4882a593Smuzhiyun 		pr_err("%s device register as mtd dev, ret= %d\n", __func__, ret);
786*4882a593Smuzhiyun 		break;
787*4882a593Smuzhiyun #endif
788*4882a593Smuzhiyun 	case FLASH_TYPE_SFC_NAND:
789*4882a593Smuzhiyun #ifdef CONFIG_RK_SFC_NAND_MTD
790*4882a593Smuzhiyun 		ret = sfc_nand_mtd_init(sfnand_dev, &g_flash_ops_mutex);
791*4882a593Smuzhiyun 		pr_err("%s device register as mtd dev, ret= %d\n", __func__, ret);
792*4882a593Smuzhiyun 		break;
793*4882a593Smuzhiyun #endif
794*4882a593Smuzhiyun 	case FLASH_TYPE_NANDC_NAND:
795*4882a593Smuzhiyun 	default:
796*4882a593Smuzhiyun 		g_flash_type = type;
797*4882a593Smuzhiyun 		ret = rkflash_blk_register(&mytr);
798*4882a593Smuzhiyun 		pr_err("%s device register as blk dev, ret= %d\n", __func__, ret);
799*4882a593Smuzhiyun 		if (ret)
800*4882a593Smuzhiyun 			g_flash_type = -1;
801*4882a593Smuzhiyun 		break;
802*4882a593Smuzhiyun 	}
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	if (!ret)
805*4882a593Smuzhiyun 		rkflash_dev_initialised = 1;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	return ret;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun 
rkflash_dev_exit(void)810*4882a593Smuzhiyun int rkflash_dev_exit(void)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun 	if (rkflash_dev_initialised)
813*4882a593Smuzhiyun 		rkflash_dev_initialised = 0;
814*4882a593Smuzhiyun 	if (g_flash_type != -1)
815*4882a593Smuzhiyun 		rkflash_blk_unregister(&mytr);
816*4882a593Smuzhiyun 	pr_info("%s:OK\n", __func__);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	return 0;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun 
rkflash_dev_suspend(void)821*4882a593Smuzhiyun int rkflash_dev_suspend(void)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun 	mutex_lock(&g_flash_ops_mutex);
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	return 0;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun 
rkflash_dev_resume(void __iomem * reg_addr)828*4882a593Smuzhiyun int rkflash_dev_resume(void __iomem *reg_addr)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun 	g_boot_ops->resume(reg_addr);
831*4882a593Smuzhiyun 	mutex_unlock(&g_flash_ops_mutex);
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	return 0;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun 
rkflash_dev_shutdown(void)836*4882a593Smuzhiyun void rkflash_dev_shutdown(void)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun 	pr_info("rkflash_shutdown...\n");
839*4882a593Smuzhiyun 	if (g_flash_type == FLASH_TYPE_SFC_NAND || g_flash_type == FLASH_TYPE_NANDC_NAND)
840*4882a593Smuzhiyun 		kthread_stop(nand_gc_thread);
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	mutex_lock(&g_flash_ops_mutex);
843*4882a593Smuzhiyun 	g_boot_ops->deinit();
844*4882a593Smuzhiyun 	mutex_unlock(&g_flash_ops_mutex);
845*4882a593Smuzhiyun 	pr_info("rkflash_shutdown:OK\n");
846*4882a593Smuzhiyun }
847