xref: /OK3568_Linux_fs/kernel/drivers/rkflash/rkflash_blk.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2018 Rockchip Electronics Co. Ltd. */
4 
5 #include <linux/blkdev.h>
6 #include <linux/blkpg.h>
7 #include <linux/blk-mq.h>
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/freezer.h>
11 #include <linux/fs.h>
12 #include <linux/hdreg.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/kthread.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/platform_device.h>
21 #include <linux/proc_fs.h>
22 #include <linux/sched.h>
23 #include <linux/semaphore.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include <linux/timer.h>
28 #include <linux/wait.h>
29 #include <linux/version.h>
30 #include <linux/soc/rockchip/rk_vendor_storage.h>
31 #include "../soc/rockchip/flash_vendor_storage.h"
32 
33 #include "rkflash_blk.h"
34 #include "rkflash_debug.h"
35 #include "rk_sftl.h"
36 
sftl_printk(char * fmt,...)37 void __printf(1, 2) sftl_printk(char *fmt, ...)
38 {
39 	va_list ap;
40 
41 	va_start(ap, fmt);
42 	vprintk(fmt, ap);
43 	va_end(ap);
44 }
45 
46 /* For rkflash block dev private data */
47 static const struct flash_boot_ops *g_boot_ops;
48 
49 static int g_flash_type = -1;
50 static struct flash_part disk_array[MAX_PART_COUNT];
51 static int g_max_part_num = 4;
52 #define FW_HRADER_PT_NAME		("fw_header_p")
53 static struct flash_part fw_header_p;
54 
55 #define PART_READONLY 0x85
56 #define PART_WRITEONLY 0x86
57 #define PART_NO_ACCESS 0x87
58 
59 static unsigned long totle_read_data;
60 static unsigned long totle_write_data;
61 static unsigned long totle_read_count;
62 static unsigned long totle_write_count;
63 
64 static char *mtd_read_temp_buffer;
65 #define MTD_RW_SECTORS (512)
66 
67 #define DISABLE_WRITE _IO('V', 0)
68 #define ENABLE_WRITE _IO('V', 1)
69 #define DISABLE_READ _IO('V', 2)
70 #define ENABLE_READ _IO('V', 3)
71 
72 /* Thread for gc operation */
73 static DECLARE_WAIT_QUEUE_HEAD(nand_gc_thread_wait);
74 static unsigned long nand_gc_do;
75 static struct task_struct *nand_gc_thread __read_mostly;
76 
77 /* For rkflash dev private data, including mtd dev and block dev */
78 static int rkflash_dev_initialised;
79 static DEFINE_MUTEX(g_flash_ops_mutex);
80 
rk_partition_init(struct flash_part * part)81 static unsigned int rk_partition_init(struct flash_part *part)
82 {
83 	int i, part_num = 0;
84 	u32 desity;
85 	struct STRUCT_PART_INFO *g_part;  /* size 2KB */
86 
87 	g_part = kmalloc(sizeof(*g_part), GFP_KERNEL | GFP_DMA);
88 	if (!g_part)
89 		return 0;
90 	mutex_lock(&g_flash_ops_mutex);
91 	if (g_boot_ops->read(0, 4, g_part) == 0) {
92 		if (g_part->hdr.ui_fw_tag == RK_PARTITION_TAG) {
93 			part_num = g_part->hdr.ui_part_entry_count;
94 			desity = g_boot_ops->get_capacity();
95 			for (i = 0; i < part_num; i++) {
96 				memcpy(part[i].name,
97 				       g_part->part[i].sz_name,
98 				       32);
99 				part[i].offset = g_part->part[i].ui_pt_off;
100 				part[i].size = g_part->part[i].ui_pt_sz;
101 				part[i].type = 0;
102 				if (part[i].size == UINT_MAX)
103 					part[i].size = desity - part[i].offset;
104 				if (part[i].offset + part[i].size > desity) {
105 					part[i].size = desity - part[i].offset;
106 					break;
107 				}
108 			}
109 		}
110 	}
111 	mutex_unlock(&g_flash_ops_mutex);
112 	kfree(g_part);
113 
114 	memset(&fw_header_p, 0x0, sizeof(fw_header_p));
115 	memcpy(fw_header_p.name, FW_HRADER_PT_NAME, strlen(FW_HRADER_PT_NAME));
116 	fw_header_p.offset = 0x0;
117 	fw_header_p.size = 0x4;
118 	fw_header_p.type = 0;
119 
120 	return part_num;
121 }
122 
rkflash_blk_proc_show(struct seq_file * m,void * v)123 static int rkflash_blk_proc_show(struct seq_file *m, void *v)
124 {
125 	char *ftl_buf = kzalloc(4096, GFP_KERNEL);
126 
127 #if IS_ENABLED(CONFIG_RK_SFTL)
128 	int real_size = 0;
129 
130 	real_size = rknand_proc_ftlread(4096, ftl_buf);
131 	if (real_size > 0)
132 		seq_printf(m, "%s", ftl_buf);
133 #endif
134 	seq_printf(m, "Totle Read %ld KB\n", totle_read_data >> 1);
135 	seq_printf(m, "Totle Write %ld KB\n", totle_write_data >> 1);
136 	seq_printf(m, "totle_write_count %ld\n", totle_write_count);
137 	seq_printf(m, "totle_read_count %ld\n", totle_read_count);
138 	kfree(ftl_buf);
139 	return 0;
140 }
141 
rkflash_blk_proc_open(struct inode * inode,struct file * file)142 static int rkflash_blk_proc_open(struct inode *inode, struct file *file)
143 {
144 	return single_open(file, rkflash_blk_proc_show, PDE_DATA(inode));
145 }
146 
147 static const struct proc_ops rkflash_blk_proc_fops = {
148 	.proc_open		= rkflash_blk_proc_open,
149 	.proc_read		= seq_read,
150 	.proc_lseek		= seq_lseek,
151 	.proc_release	= single_release,
152 };
153 
rkflash_blk_create_procfs(void)154 static int rkflash_blk_create_procfs(void)
155 {
156 	struct proc_dir_entry *ent;
157 
158 	ent = proc_create_data("rkflash", 0x664, NULL, &rkflash_blk_proc_fops,
159 			       (void *)0);
160 	if (!ent)
161 		return -1;
162 
163 	return 0;
164 }
165 
rkflash_blk_discard(u32 sec,u32 n_sec)166 static int rkflash_blk_discard(u32 sec, u32 n_sec)
167 {
168 	int ret;
169 
170 	if (g_boot_ops->discard)
171 		ret = g_boot_ops->discard(sec, n_sec);
172 	else
173 		ret = -EPERM;
174 
175 	return ret;
176 };
177 
rkflash_blk_xfer(struct flash_blk_dev * dev,unsigned long start,unsigned long nsector,char * buf,int cmd)178 static int rkflash_blk_xfer(struct flash_blk_dev *dev,
179 			    unsigned long start,
180 			    unsigned long nsector,
181 			    char *buf,
182 			    int cmd)
183 {
184 	int ret;
185 
186 	if (dev->disable_access ||
187 	    (cmd == WRITE && dev->readonly) ||
188 	    (cmd == READ && dev->writeonly)) {
189 		return -EIO;
190 	}
191 
192 	start += dev->off_size;
193 
194 	switch (cmd) {
195 	case READ:
196 		totle_read_data += nsector;
197 		totle_read_count++;
198 		rkflash_print_bio("rkflash r sec= %lx, n_sec= %lx\n",
199 				  start, nsector);
200 		ret = g_boot_ops->read(start, nsector, buf);
201 		if (ret)
202 			ret = -EIO;
203 		break;
204 
205 	case WRITE:
206 		totle_write_data += nsector;
207 		totle_write_count++;
208 		rkflash_print_bio("rkflash w sec= %lx, n_sec= %lx\n",
209 				  start, nsector);
210 		ret = g_boot_ops->write(start, nsector, buf);
211 		if (ret)
212 			ret = -EIO;
213 		break;
214 
215 	default:
216 		ret = -EIO;
217 		break;
218 	}
219 
220 	return ret;
221 }
222 
rkflash_blk_check_buffer_align(struct request * req,char ** pbuf)223 static int rkflash_blk_check_buffer_align(struct request *req, char **pbuf)
224 {
225 	int nr_vec = 0;
226 	struct bio_vec bv;
227 	struct req_iterator iter;
228 	char *buffer;
229 	void *firstbuf = 0;
230 	char *nextbuffer = 0;
231 
232 	rq_for_each_segment(bv, req, iter) {
233 		/* high mem return 0 and using kernel buffer */
234 		if (PageHighMem(bv.bv_page))
235 			return 0;
236 
237 		buffer = page_address(bv.bv_page) + bv.bv_offset;
238 		if (!buffer)
239 			return 0;
240 		if (!firstbuf)
241 			firstbuf = buffer;
242 		nr_vec++;
243 		if (nextbuffer && nextbuffer != buffer)
244 			return 0;
245 		nextbuffer = buffer + bv.bv_len;
246 	}
247 	*pbuf = firstbuf;
248 	return 1;
249 }
250 
do_blktrans_all_request(struct flash_blk_ops * tr,struct flash_blk_dev * dev,struct request * req)251 static blk_status_t do_blktrans_all_request(struct flash_blk_ops *tr,
252 			       struct flash_blk_dev *dev,
253 			       struct request *req)
254 {
255 	unsigned long block, nsect;
256 	char *buf = NULL, *page_buf;
257 	struct req_iterator rq_iter;
258 	struct bio_vec bvec;
259 	int ret;
260 	unsigned long totle_nsect;
261 
262 	block = blk_rq_pos(req);
263 	nsect = blk_rq_cur_bytes(req) >> 9;
264 	totle_nsect = (req->__data_len) >> 9;
265 
266 	if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
267 	    get_capacity(req->rq_disk))
268 		return BLK_STS_IOERR;
269 
270 	switch (req_op(req)) {
271 	case REQ_OP_DISCARD:
272 		rkflash_print_bio("%s discard\n", __func__);
273 		if (rkflash_blk_discard(block, nsect))
274 			return BLK_STS_IOERR;
275 		return BLK_STS_OK;
276 	case REQ_OP_READ:
277 		rkflash_print_bio("%s read block=%lx nsec=%lx\n", __func__, block, totle_nsect);
278 		buf = mtd_read_temp_buffer;
279 		rkflash_blk_check_buffer_align(req, &buf);
280 		ret = rkflash_blk_xfer(dev,
281 				       block,
282 				       totle_nsect,
283 				       buf,
284 				       REQ_OP_READ);
285 		if (buf == mtd_read_temp_buffer) {
286 			char *p = buf;
287 
288 			rq_for_each_segment(bvec, req, rq_iter) {
289 				page_buf = kmap_atomic(bvec.bv_page);
290 				memcpy(page_buf +
291 				       bvec.bv_offset,
292 				       p,
293 				       bvec.bv_len);
294 				p += bvec.bv_len;
295 				kunmap_atomic(page_buf);
296 			}
297 		}
298 
299 		if (ret)
300 			return BLK_STS_IOERR;
301 		else
302 			return BLK_STS_OK;
303 	case REQ_OP_WRITE:
304 		rkflash_print_bio("%s write block=%lx nsec=%lx\n", __func__, block, totle_nsect);
305 
306 		buf = mtd_read_temp_buffer;
307 		rkflash_blk_check_buffer_align(req, &buf);
308 		if (buf == mtd_read_temp_buffer) {
309 			char *p = buf;
310 
311 			rq_for_each_segment(bvec, req, rq_iter) {
312 				page_buf = kmap_atomic(bvec.bv_page);
313 				memcpy(p,
314 					page_buf +
315 					bvec.bv_offset,
316 					bvec.bv_len);
317 				p += bvec.bv_len;
318 				kunmap_atomic(page_buf);
319 			}
320 		}
321 		ret = rkflash_blk_xfer(dev,
322 					block,
323 					totle_nsect,
324 					buf,
325 					REQ_OP_WRITE);
326 
327 		if (ret)
328 			return BLK_STS_IOERR;
329 		else
330 			return BLK_STS_OK;
331 	default:
332 		return BLK_STS_IOERR;
333 	}
334 }
335 
rkflash_next_request(struct flash_blk_dev * dev)336 static struct request *rkflash_next_request(struct flash_blk_dev *dev)
337 {
338 	struct request *rq;
339 	struct flash_blk_ops *tr = dev->blk_ops;
340 
341 	rq = list_first_entry_or_null(&tr->rq_list, struct request, queuelist);
342 	if (rq) {
343 		list_del_init(&rq->queuelist);
344 		blk_mq_start_request(rq);
345 		return rq;
346 	}
347 
348 	return NULL;
349 }
350 
rkflash_blktrans_work(struct flash_blk_dev * dev)351 static void rkflash_blktrans_work(struct flash_blk_dev *dev)
352 	__releases(&dev->blk_ops->queue_lock)
353 	__acquires(&dev->blk_ops->queue_lock)
354 {
355 	struct flash_blk_ops *tr = dev->blk_ops;
356 	struct request *req = NULL;
357 
358 	while (1) {
359 		blk_status_t res;
360 
361 		req = rkflash_next_request(dev);
362 		if (!req)
363 			break;
364 
365 		spin_unlock_irq(&dev->blk_ops->queue_lock);
366 
367 		mutex_lock(&g_flash_ops_mutex);
368 		res = do_blktrans_all_request(tr, dev, req);
369 		mutex_unlock(&g_flash_ops_mutex);
370 
371 		if (!blk_update_request(req, res, req->__data_len)) {
372 			__blk_mq_end_request(req, res);
373 			req = NULL;
374 		}
375 
376 		spin_lock_irq(&dev->blk_ops->queue_lock);
377 	}
378 }
379 
rkflash_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)380 static blk_status_t rkflash_queue_rq(struct blk_mq_hw_ctx *hctx,
381 				     const struct blk_mq_queue_data *bd)
382 {
383 	struct flash_blk_dev *dev;
384 
385 	dev = hctx->queue->queuedata;
386 	if (!dev) {
387 		blk_mq_start_request(bd->rq);
388 		return BLK_STS_IOERR;
389 	}
390 
391 	nand_gc_do = 0;
392 	spin_lock_irq(&dev->blk_ops->queue_lock);
393 	list_add_tail(&bd->rq->queuelist, &dev->blk_ops->rq_list);
394 	rkflash_blktrans_work(dev);
395 	spin_unlock_irq(&dev->blk_ops->queue_lock);
396 
397 	/* wake up gc thread */
398 	nand_gc_do = 1;
399 	wake_up(&nand_gc_thread_wait);
400 
401 	return BLK_STS_OK;
402 }
403 
404 static const struct blk_mq_ops rkflash_mq_ops = {
405 	.queue_rq	= rkflash_queue_rq,
406 };
407 
nand_gc_has_work(void)408 static int nand_gc_has_work(void)
409 {
410 	return nand_gc_do;
411 }
412 
nand_gc_do_work(void)413 static int nand_gc_do_work(void)
414 {
415 	int ret = nand_gc_has_work();
416 
417 	/* do garbage collect at idle state */
418 	if (ret) {
419 		mutex_lock(&g_flash_ops_mutex);
420 		ret = g_boot_ops->gc();
421 		rkflash_print_bio("%s gc result= %d\n", __func__, ret);
422 		mutex_unlock(&g_flash_ops_mutex);
423 	}
424 
425 	return ret;
426 }
427 
nand_gc_wait_work(void)428 static void nand_gc_wait_work(void)
429 {
430 	unsigned long nand_gc_jiffies = HZ / 20;
431 
432 	if (nand_gc_has_work())
433 		wait_event_freezable_timeout(nand_gc_thread_wait,
434 					     kthread_should_stop(),
435 					     nand_gc_jiffies);
436 	else
437 		wait_event_freezable(nand_gc_thread_wait,
438 				     kthread_should_stop() || nand_gc_has_work());
439 }
440 
nand_gc_mythread(void * arg)441 static int nand_gc_mythread(void *arg)
442 {
443 	int gc_done_times = 0;
444 
445 	set_freezable();
446 
447 	while (!kthread_should_stop()) {
448 		if (nand_gc_do_work() == 0) {
449 			gc_done_times++;
450 			if (gc_done_times > 10)
451 				nand_gc_do = 0;
452 		} else {
453 			gc_done_times = 0;
454 		}
455 
456 		nand_gc_wait_work();
457 	}
458 	pr_info("nand gc quited\n");
459 
460 	return 0;
461 }
462 
rkflash_blk_open(struct block_device * bdev,fmode_t mode)463 static int rkflash_blk_open(struct block_device *bdev, fmode_t mode)
464 {
465 	return 0;
466 }
467 
rkflash_blk_release(struct gendisk * disk,fmode_t mode)468 static void rkflash_blk_release(struct gendisk *disk, fmode_t mode)
469 {
470 };
471 
rkflash_blk_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)472 static int rkflash_blk_ioctl(struct block_device *bdev, fmode_t mode,
473 			 unsigned int cmd,
474 			 unsigned long arg)
475 {
476 	struct flash_blk_dev *dev = bdev->bd_disk->private_data;
477 
478 	switch (cmd) {
479 	case ENABLE_WRITE:
480 		dev->disable_access = 0;
481 		dev->readonly = 0;
482 		set_disk_ro(dev->blkcore_priv, 0);
483 		return 0;
484 
485 	case DISABLE_WRITE:
486 		dev->readonly = 1;
487 		set_disk_ro(dev->blkcore_priv, 1);
488 		return 0;
489 
490 	case ENABLE_READ:
491 		dev->disable_access = 0;
492 		dev->writeonly = 0;
493 		return 0;
494 
495 	case DISABLE_READ:
496 		dev->writeonly = 1;
497 		return 0;
498 	default:
499 		return -ENOTTY;
500 	}
501 }
502 
503 const struct block_device_operations rkflash_blk_trans_ops = {
504 	.owner = THIS_MODULE,
505 	.open = rkflash_blk_open,
506 	.release = rkflash_blk_release,
507 	.ioctl = rkflash_blk_ioctl,
508 };
509 
510 static struct flash_blk_ops mytr = {
511 	.name =  "rkflash",
512 	.major = 31,
513 	.minorbits = 0,
514 	.owner = THIS_MODULE,
515 };
516 
rkflash_blk_add_dev(struct flash_blk_dev * dev,struct flash_blk_ops * blk_ops,struct flash_part * part)517 static int rkflash_blk_add_dev(struct flash_blk_dev *dev,
518 			       struct flash_blk_ops *blk_ops,
519 			       struct flash_part *part)
520 {
521 	struct gendisk *gd;
522 
523 	if (part->size == 0)
524 		return -1;
525 
526 	gd = alloc_disk(1 << blk_ops->minorbits);
527 	if (!gd) {
528 		kfree(dev);
529 		return -ENOMEM;
530 	}
531 
532 	dev->blk_ops = blk_ops;
533 	dev->size = part->size;
534 	dev->off_size = part->offset;
535 	dev->devnum = blk_ops->last_dev_index;
536 	list_add_tail(&dev->list, &blk_ops->devs);
537 	blk_ops->last_dev_index++;
538 
539 	gd->major = blk_ops->major;
540 	gd->first_minor = (dev->devnum) << blk_ops->minorbits;
541 	gd->fops = &rkflash_blk_trans_ops;
542 
543 	if (part->name[0]) {
544 		snprintf(gd->disk_name,
545 			 sizeof(gd->disk_name),
546 			 "%s",
547 			 part->name);
548 	} else {
549 		gd->flags = GENHD_FL_EXT_DEVT;
550 		gd->minors = 255;
551 		snprintf(gd->disk_name,
552 			 sizeof(gd->disk_name),
553 			 "%s%d",
554 			 blk_ops->name,
555 			 dev->devnum);
556 	}
557 
558 	set_capacity(gd, dev->size);
559 
560 	gd->private_data = dev;
561 	dev->blkcore_priv = gd;
562 	gd->queue = blk_ops->rq;
563 
564 	if (part->type == PART_NO_ACCESS)
565 		dev->disable_access = 1;
566 
567 	if (part->type == PART_READONLY)
568 		dev->readonly = 1;
569 
570 	if (part->type == PART_WRITEONLY)
571 		dev->writeonly = 1;
572 
573 	if (dev->readonly)
574 		set_disk_ro(gd, 1);
575 
576 	add_disk(gd);
577 
578 	return 0;
579 }
580 
rkflash_blk_remove_dev(struct flash_blk_dev * dev)581 static int rkflash_blk_remove_dev(struct flash_blk_dev *dev)
582 {
583 	struct gendisk *gd;
584 
585 	gd = dev->blkcore_priv;
586 	list_del(&dev->list);
587 	gd->queue = NULL;
588 	del_gendisk(gd);
589 	put_disk(gd);
590 	kfree(dev);
591 	return 0;
592 }
593 
rkflash_blk_register(struct flash_blk_ops * blk_ops)594 static int rkflash_blk_register(struct flash_blk_ops *blk_ops)
595 {
596 	int i, ret;
597 	u64 offset;
598 	struct flash_blk_dev *dev;
599 
600 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
601 	if (!dev)
602 		return -ENOMEM;
603 
604 	mtd_read_temp_buffer = kmalloc(MTD_RW_SECTORS * 512,
605 				       GFP_KERNEL | GFP_DMA);
606 
607 	ret = register_blkdev(blk_ops->major, blk_ops->name);
608 	if (ret) {
609 		kfree(dev);
610 
611 		return -1;
612 	}
613 
614 	/* Create the request queue */
615 	spin_lock_init(&blk_ops->queue_lock);
616 	INIT_LIST_HEAD(&blk_ops->rq_list);
617 
618 	blk_ops->tag_set = kzalloc(sizeof(*blk_ops->tag_set), GFP_KERNEL);
619 	if (!blk_ops->tag_set)
620 		goto error1;
621 
622 	blk_ops->rq = blk_mq_init_sq_queue(blk_ops->tag_set, &rkflash_mq_ops, 1,
623 					   BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
624 	if (IS_ERR(blk_ops->rq)) {
625 		ret = PTR_ERR(blk_ops->rq);
626 		blk_ops->rq = NULL;
627 		goto error2;
628 	}
629 
630 	blk_ops->rq->queuedata = dev;
631 
632 	blk_queue_max_hw_sectors(blk_ops->rq, MTD_RW_SECTORS);
633 	blk_queue_max_segments(blk_ops->rq, MTD_RW_SECTORS);
634 
635 	blk_queue_flag_set(QUEUE_FLAG_DISCARD, blk_ops->rq);
636 	blk_queue_max_discard_sectors(blk_ops->rq, UINT_MAX >> 9);
637 	blk_ops->rq->limits.discard_granularity = 64 << 9;
638 
639 	if (g_flash_type == FLASH_TYPE_SFC_NAND || g_flash_type == FLASH_TYPE_NANDC_NAND)
640 		nand_gc_thread = kthread_run(nand_gc_mythread, (void *)blk_ops, "rkflash_gc");
641 
642 	INIT_LIST_HEAD(&blk_ops->devs);
643 	g_max_part_num = rk_partition_init(disk_array);
644 	if (g_max_part_num) {
645 		/* partition 0 is save vendor data, need hidden */
646 		blk_ops->last_dev_index = 0;
647 		for (i = 1; i < g_max_part_num; i++) {
648 			offset = (u64)disk_array[i].offset;
649 			pr_info("%10s: 0x%09llx -- 0x%09llx (%llu MB)\n",
650 				disk_array[i].name,
651 				offset * 512,
652 				(u64)(offset + disk_array[i].size) * 512,
653 				(u64)disk_array[i].size / 2048);
654 			rkflash_blk_add_dev(dev, blk_ops, &disk_array[i]);
655 		}
656 		rkflash_blk_add_dev(dev, blk_ops, &fw_header_p);
657 	} else {
658 		struct flash_part part;
659 
660 		part.offset = 0;
661 		part.size = g_boot_ops->get_capacity();
662 		part.type = 0;
663 		part.name[0] = 0;
664 		rkflash_blk_add_dev(dev, blk_ops, &part);
665 	}
666 	rkflash_blk_create_procfs();
667 
668 	return 0;
669 
670 error2:
671 	kfree(blk_ops->tag_set);
672 error1:
673 	unregister_blkdev(blk_ops->major, blk_ops->name);
674 	kfree(dev);
675 
676 	return ret;
677 }
678 
rkflash_blk_unregister(struct flash_blk_ops * blk_ops)679 static void rkflash_blk_unregister(struct flash_blk_ops *blk_ops)
680 {
681 	struct list_head *this, *next;
682 
683 	list_for_each_safe(this, next, &blk_ops->devs) {
684 		struct flash_blk_dev *dev =
685 			list_entry(this, struct flash_blk_dev, list);
686 
687 		rkflash_blk_remove_dev(dev);
688 	}
689 	blk_cleanup_queue(blk_ops->rq);
690 	unregister_blkdev(blk_ops->major, blk_ops->name);
691 }
692 
rkflash_dev_vendor_read(u32 sec,u32 n_sec,void * p_data)693 static int __maybe_unused rkflash_dev_vendor_read(u32 sec, u32 n_sec, void *p_data)
694 {
695 	int ret;
696 
697 	if (g_boot_ops->vendor_read) {
698 		mutex_lock(&g_flash_ops_mutex);
699 		ret = g_boot_ops->vendor_read(sec, n_sec, p_data);
700 		mutex_unlock(&g_flash_ops_mutex);
701 	} else {
702 		ret = -EPERM;
703 	}
704 
705 	return ret;
706 }
707 
rkflash_dev_vendor_write(u32 sec,u32 n_sec,void * p_data)708 static int __maybe_unused rkflash_dev_vendor_write(u32 sec, u32 n_sec, void *p_data)
709 {
710 	int ret;
711 
712 	if (g_boot_ops->vendor_write) {
713 		mutex_lock(&g_flash_ops_mutex);
714 		ret = g_boot_ops->vendor_write(sec,
715 					       n_sec,
716 					       p_data);
717 		mutex_unlock(&g_flash_ops_mutex);
718 	} else {
719 		ret = -EPERM;
720 	}
721 
722 	return ret;
723 }
724 
rkflash_dev_init(void __iomem * reg_addr,enum flash_type type,const struct flash_boot_ops * ops)725 int rkflash_dev_init(void __iomem *reg_addr,
726 		     enum flash_type type,
727 		     const struct flash_boot_ops *ops)
728 {
729 	int ret = -1;
730 
731 	pr_err("%s enter\n", __func__);
732 	if (rkflash_dev_initialised) {
733 		pr_err("rkflash has already inited as id[%d]\n", g_flash_type);
734 		return -1;
735 	}
736 
737 	if (!ops->init)
738 		return -EINVAL;
739 	ret = ops->init(reg_addr);
740 	if (ret) {
741 		pr_err("rkflash[%d] is invalid", type);
742 
743 		return -ENODEV;
744 	}
745 	pr_info("rkflash[%d] init success\n", type);
746 	g_boot_ops = ops;
747 
748 	/* vendor part */
749 	switch (type) {
750 	case FLASH_TYPE_SFC_NOR:
751 #if IS_ENABLED(CONFIG_RK_SFC_NOR_MTD) && IS_ENABLED(CONFIG_ROCKCHIP_MTD_VENDOR_STORAGE)
752 		break;
753 #else
754 		flash_vendor_dev_ops_register(rkflash_dev_vendor_read,
755 					      rkflash_dev_vendor_write);
756 #endif
757 		break;
758 	case FLASH_TYPE_SFC_NAND:
759 #ifdef CONFIG_RK_SFC_NAND_MTD
760 		break;
761 #endif
762 	case FLASH_TYPE_NANDC_NAND:
763 #if defined(CONFIG_RK_SFTL)
764 		rk_sftl_vendor_dev_ops_register(rkflash_dev_vendor_read,
765 						rkflash_dev_vendor_write);
766 		ret = rk_sftl_vendor_storage_init();
767 		if (!ret) {
768 			rk_vendor_register(rk_sftl_vendor_read,
769 					   rk_sftl_vendor_write);
770 			rk_sftl_vendor_register();
771 			pr_info("rkflashd vendor storage init ok !\n");
772 		} else {
773 			pr_info("rkflash vendor storage init failed !\n");
774 		}
775 		break;
776 #endif
777 	default:
778 		break;
779 	}
780 
781 	switch (type) {
782 	case FLASH_TYPE_SFC_NOR:
783 #ifdef CONFIG_RK_SFC_NOR_MTD
784 		ret = sfc_nor_mtd_init(sfnor_dev, &g_flash_ops_mutex);
785 		pr_err("%s device register as mtd dev, ret= %d\n", __func__, ret);
786 		break;
787 #endif
788 	case FLASH_TYPE_SFC_NAND:
789 #ifdef CONFIG_RK_SFC_NAND_MTD
790 		ret = sfc_nand_mtd_init(sfnand_dev, &g_flash_ops_mutex);
791 		pr_err("%s device register as mtd dev, ret= %d\n", __func__, ret);
792 		break;
793 #endif
794 	case FLASH_TYPE_NANDC_NAND:
795 	default:
796 		g_flash_type = type;
797 		ret = rkflash_blk_register(&mytr);
798 		pr_err("%s device register as blk dev, ret= %d\n", __func__, ret);
799 		if (ret)
800 			g_flash_type = -1;
801 		break;
802 	}
803 
804 	if (!ret)
805 		rkflash_dev_initialised = 1;
806 
807 	return ret;
808 }
809 
rkflash_dev_exit(void)810 int rkflash_dev_exit(void)
811 {
812 	if (rkflash_dev_initialised)
813 		rkflash_dev_initialised = 0;
814 	if (g_flash_type != -1)
815 		rkflash_blk_unregister(&mytr);
816 	pr_info("%s:OK\n", __func__);
817 
818 	return 0;
819 }
820 
rkflash_dev_suspend(void)821 int rkflash_dev_suspend(void)
822 {
823 	mutex_lock(&g_flash_ops_mutex);
824 
825 	return 0;
826 }
827 
rkflash_dev_resume(void __iomem * reg_addr)828 int rkflash_dev_resume(void __iomem *reg_addr)
829 {
830 	g_boot_ops->resume(reg_addr);
831 	mutex_unlock(&g_flash_ops_mutex);
832 
833 	return 0;
834 }
835 
rkflash_dev_shutdown(void)836 void rkflash_dev_shutdown(void)
837 {
838 	pr_info("rkflash_shutdown...\n");
839 	if (g_flash_type == FLASH_TYPE_SFC_NAND || g_flash_type == FLASH_TYPE_NANDC_NAND)
840 		kthread_stop(nand_gc_thread);
841 
842 	mutex_lock(&g_flash_ops_mutex);
843 	g_boot_ops->deinit();
844 	mutex_unlock(&g_flash_ops_mutex);
845 	pr_info("rkflash_shutdown:OK\n");
846 }
847