1 /*
2 * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10 #include <asm/cacheflush.h>
11 #include <linux/clk.h>
12 #include <linux/debugfs.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/irq.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/miscdevice.h>
18 #include <linux/module.h>
19 #ifdef CONFIG_OF
20 #include <linux/of.h>
21 #endif
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <linux/uaccess.h>
27
28 #include "rk_nand_blk.h"
29 #include "rk_ftl_api.h"
30 #include "rk_nand_base.h"
31
32 #define RKNAND_VERSION_AND_DATE "rknandbase v1.2 2021-01-07"
33
34 static struct rk_nandc_info g_nandc_info[2];
35 struct device *g_nand_device;
36 static char nand_idb_data[2048];
37 static int rk_nand_wait_busy_schedule;
38 static int rk_nand_suspend_state;
39 static int rk_nand_shutdown_state;
40 /*1:flash 2:emmc 4:sdcard0 8:sdcard1*/
41 static int rknand_boot_media = 2;
42 static DECLARE_WAIT_QUEUE_HEAD(rk29_nandc_wait);
43 static void rk_nand_iqr_timeout_hack(struct timer_list *unused);
44 static DEFINE_TIMER(rk_nand_iqr_timeout, rk_nand_iqr_timeout_hack);
45 static int nandc0_xfer_completed_flag;
46 static int nandc0_ready_completed_flag;
47 static int nandc1_xfer_completed_flag;
48 static int nandc1_ready_completed_flag;
49 static int rk_timer_add;
50
ftl_malloc(int size)51 void *ftl_malloc(int size)
52 {
53 return kmalloc(size, GFP_KERNEL | GFP_DMA);
54 }
55
ftl_free(void * buf)56 void ftl_free(void *buf)
57 {
58 kfree(buf);
59 }
60
rknand_get_clk_rate(int nandc_id)61 int rknand_get_clk_rate(int nandc_id)
62 {
63 return g_nandc_info[nandc_id].clk_rate;
64 }
65 EXPORT_SYMBOL(rknand_get_clk_rate);
66
rknand_dma_map_single(unsigned long ptr,int size,int dir)67 unsigned long rknand_dma_map_single(unsigned long ptr, int size, int dir)
68 {
69 return dma_map_single(g_nand_device, (void *)ptr, size
70 , dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
71 }
72 EXPORT_SYMBOL(rknand_dma_map_single);
73
rknand_dma_unmap_single(unsigned long ptr,int size,int dir)74 void rknand_dma_unmap_single(unsigned long ptr, int size, int dir)
75 {
76 dma_unmap_single(g_nand_device, (dma_addr_t)ptr, size
77 , dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
78 }
79 EXPORT_SYMBOL(rknand_dma_unmap_single);
80
rknand_flash_cs_init(int id)81 int rknand_flash_cs_init(int id)
82 {
83 return 0;
84 }
85 EXPORT_SYMBOL(rknand_flash_cs_init);
86
rknand_get_reg_addr(unsigned long * p_nandc0,unsigned long * p_nandc1)87 int rknand_get_reg_addr(unsigned long *p_nandc0, unsigned long *p_nandc1)
88 {
89 *p_nandc0 = (unsigned long)g_nandc_info[0].reg_base;
90 *p_nandc1 = (unsigned long)g_nandc_info[1].reg_base;
91 return 0;
92 }
93 EXPORT_SYMBOL(rknand_get_reg_addr);
94
rknand_get_boot_media(void)95 int rknand_get_boot_media(void)
96 {
97 return rknand_boot_media;
98 }
99 EXPORT_SYMBOL(rknand_get_boot_media);
100
rk_copy_from_user(void * to,const void __user * from,unsigned long n)101 unsigned long rk_copy_from_user(void *to, const void __user *from,
102 unsigned long n)
103 {
104 return copy_from_user(to, from, n);
105 }
106
rk_copy_to_user(void __user * to,const void * from,unsigned long n)107 unsigned long rk_copy_to_user(void __user *to, const void *from,
108 unsigned long n)
109 {
110 return copy_to_user(to, from, n);
111 }
112
113 static const struct file_operations rknand_sys_storage_fops = {
114 .compat_ioctl = rknand_sys_storage_ioctl,
115 .unlocked_ioctl = rknand_sys_storage_ioctl,
116 };
117
118 static struct miscdevice rknand_sys_storage_dev = {
119 .minor = MISC_DYNAMIC_MINOR,
120 .name = "rknand_sys_storage",
121 .fops = &rknand_sys_storage_fops,
122 };
123
rknand_sys_storage_init(void)124 int rknand_sys_storage_init(void)
125 {
126 return misc_register(&rknand_sys_storage_dev);
127 }
128
129 static const struct file_operations rknand_vendor_storage_fops = {
130 .compat_ioctl = rk_ftl_vendor_storage_ioctl,
131 .unlocked_ioctl = rk_ftl_vendor_storage_ioctl,
132 };
133
134 static struct miscdevice rknand_vender_storage_dev = {
135 .minor = MISC_DYNAMIC_MINOR,
136 .name = "vendor_storage",
137 .fops = &rknand_vendor_storage_fops,
138 };
139
rknand_vendor_storage_init(void)140 int rknand_vendor_storage_init(void)
141 {
142 return misc_register(&rknand_vender_storage_dev);
143 }
144
rk_nand_schedule_enable_config(int en)145 int rk_nand_schedule_enable_config(int en)
146 {
147 int tmp = rk_nand_wait_busy_schedule;
148
149 rk_nand_wait_busy_schedule = en;
150 return tmp;
151 }
152
rk_nand_iqr_timeout_hack(struct timer_list * unused)153 static void rk_nand_iqr_timeout_hack(struct timer_list *unused)
154 {
155 del_timer(&rk_nand_iqr_timeout);
156 rk_timer_add = 0;
157 nandc0_xfer_completed_flag = 1;
158 nandc0_ready_completed_flag = 1;
159 nandc1_xfer_completed_flag = 1;
160 nandc1_ready_completed_flag = 1;
161 wake_up(&rk29_nandc_wait);
162 }
163
rk_add_timer(void)164 static void rk_add_timer(void)
165 {
166 if (rk_timer_add == 0) {
167 rk_timer_add = 1;
168 rk_nand_iqr_timeout.expires = jiffies + HZ / 50;
169 add_timer(&rk_nand_iqr_timeout);
170 }
171 }
172
rk_del_timer(void)173 static void rk_del_timer(void)
174 {
175 if (rk_timer_add)
176 del_timer(&rk_nand_iqr_timeout);
177 rk_timer_add = 0;
178 }
179
rk_nandc_interrupt(int irq,void * dev_id)180 static irqreturn_t rk_nandc_interrupt(int irq, void *dev_id)
181 {
182 unsigned int irq_status = rk_nandc_get_irq_status(dev_id);
183
184 if (irq_status & (1 << 0)) {
185 rk_nandc_flash_xfer_completed(dev_id);
186 if (dev_id == g_nandc_info[0].reg_base)
187 nandc0_xfer_completed_flag = 1;
188 else
189 nandc1_xfer_completed_flag = 1;
190 }
191
192 if (irq_status & (1 << 1)) {
193 rk_nandc_flash_ready(dev_id);
194 if (dev_id == g_nandc_info[0].reg_base)
195 nandc0_ready_completed_flag = 1;
196 else
197 nandc1_ready_completed_flag = 1;
198 }
199
200 wake_up(&rk29_nandc_wait);
201 return IRQ_HANDLED;
202 }
203
rk_nandc_xfer_irq_flag_init(void * nandc_reg)204 void rk_nandc_xfer_irq_flag_init(void *nandc_reg)
205 {
206 if (nandc_reg == g_nandc_info[0].reg_base)
207 nandc0_xfer_completed_flag = 0;
208 else
209 nandc1_xfer_completed_flag = 0;
210 }
211
rk_nandc_rb_irq_flag_init(void * nandc_reg)212 void rk_nandc_rb_irq_flag_init(void *nandc_reg)
213 {
214 if (nandc_reg == g_nandc_info[0].reg_base)
215 nandc0_ready_completed_flag = 0;
216 else
217 nandc1_ready_completed_flag = 0;
218 }
219
wait_for_nandc_xfer_completed(void * nandc_reg)220 void wait_for_nandc_xfer_completed(void *nandc_reg)
221 {
222 if (rk_nand_wait_busy_schedule) {
223 rk_add_timer();
224 if (nandc_reg == g_nandc_info[0].reg_base)
225 wait_event(rk29_nandc_wait, nandc0_xfer_completed_flag);
226 else
227 wait_event(rk29_nandc_wait, nandc1_xfer_completed_flag);
228 rk_del_timer();
229 }
230 if (nandc_reg == g_nandc_info[0].reg_base)
231 nandc0_xfer_completed_flag = 0;
232 else
233 nandc1_xfer_completed_flag = 0;
234 }
235
wait_for_nand_flash_ready(void * nandc_reg)236 void wait_for_nand_flash_ready(void *nandc_reg)
237 {
238 if (rk_nand_wait_busy_schedule) {
239 rk_add_timer();
240 if (nandc_reg == g_nandc_info[0].reg_base)
241 wait_event(rk29_nandc_wait
242 , nandc0_ready_completed_flag);
243 else
244 wait_event(rk29_nandc_wait
245 , nandc1_ready_completed_flag);
246 rk_del_timer();
247 }
248 if (nandc_reg == g_nandc_info[0].reg_base)
249 nandc0_ready_completed_flag = 0;
250 else
251 nandc1_ready_completed_flag = 0;
252 }
253
rk_nandc_irq_config(int id,int mode,void * pfun)254 static int rk_nandc_irq_config(int id, int mode, void *pfun)
255 {
256 int ret = 0;
257 int irq = g_nandc_info[id].irq;
258
259 if (mode)
260 ret = request_irq(irq, pfun, 0, "nandc"
261 , g_nandc_info[id].reg_base);
262 else
263 free_irq(irq, NULL);
264 return ret;
265 }
266
rk_nandc_irq_init(void)267 int rk_nandc_irq_init(void)
268 {
269 int ret = 0;
270
271 rk_timer_add = 0;
272 nandc0_ready_completed_flag = 0;
273 nandc0_xfer_completed_flag = 0;
274 ret = rk_nandc_irq_config(0, 1, rk_nandc_interrupt);
275
276 if (!g_nandc_info[1].reg_base) {
277 nandc1_ready_completed_flag = 0;
278 nandc1_xfer_completed_flag = 0;
279 rk_nandc_irq_config(1, 1, rk_nandc_interrupt);
280 }
281 return ret;
282 }
283
rk_nandc_irq_deinit(void)284 int rk_nandc_irq_deinit(void)
285 {
286 rk_nandc_irq_config(0, 0, rk_nandc_interrupt);
287 if (!g_nandc_info[1].reg_base)
288 rk_nandc_irq_config(1, 0, rk_nandc_interrupt);
289 return 0;
290 }
291
rknand_probe(struct platform_device * pdev)292 static int rknand_probe(struct platform_device *pdev)
293 {
294 unsigned int id = 0;
295 int irq;
296 struct resource *mem;
297 void __iomem *membase;
298
299 g_nand_device = &pdev->dev;
300 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
301 membase = devm_ioremap_resource(&pdev->dev, mem);
302 if (!membase) {
303 dev_err(&pdev->dev, "no reg resource?\n");
304 return -1;
305 }
306
307 #ifdef CONFIG_OF
308 of_property_read_u32(pdev->dev.of_node, "nandc_id", &id);
309 pdev->id = id;
310 #endif
311
312 if (id == 0) {
313 memcpy(nand_idb_data, membase + 0x1000, 0x800);
314 if (*(int *)(&nand_idb_data[0]) == 0x44535953) {
315 rknand_boot_media = *(int *)(&nand_idb_data[8]);
316 if (rknand_boot_media == 2) /*boot from emmc*/
317 return -1;
318 }
319 }
320
321 irq = platform_get_irq(pdev, 0);
322 if (irq < 0) {
323 dev_err(&pdev->dev, "no irq resource?\n");
324 return irq;
325 }
326
327 g_nandc_info[id].id = id;
328 g_nandc_info[id].irq = irq;
329 g_nandc_info[id].reg_base = membase;
330
331 g_nandc_info[id].hclk = devm_clk_get(&pdev->dev, "hclk_nandc");
332 g_nandc_info[id].clk = devm_clk_get(&pdev->dev, "clk_nandc");
333 g_nandc_info[id].gclk = devm_clk_get(&pdev->dev, "g_clk_nandc");
334
335 if (unlikely(IS_ERR(g_nandc_info[id].hclk))) {
336 dev_err(&pdev->dev, "rknand_probe get hclk error\n");
337 return PTR_ERR(g_nandc_info[id].hclk);
338 }
339
340 if (!(IS_ERR(g_nandc_info[id].clk))) {
341 clk_set_rate(g_nandc_info[id].clk, 150 * 1000 * 1000);
342 g_nandc_info[id].clk_rate = clk_get_rate(g_nandc_info[id].clk);
343 clk_prepare_enable(g_nandc_info[id].clk);
344 dev_info(&pdev->dev,
345 "rknand_probe clk rate = %d\n",
346 g_nandc_info[id].clk_rate);
347 }
348
349 clk_prepare_enable(g_nandc_info[id].hclk);
350 if (!(IS_ERR(g_nandc_info[id].gclk)))
351 clk_prepare_enable(g_nandc_info[id].gclk);
352
353 pm_runtime_enable(&pdev->dev);
354 pm_runtime_get_sync(&pdev->dev);
355
356 return dma_set_mask(g_nand_device, DMA_BIT_MASK(32));
357 }
358
rknand_suspend(struct platform_device * pdev,pm_message_t state)359 static int rknand_suspend(struct platform_device *pdev, pm_message_t state)
360 {
361 if (rk_nand_suspend_state == 0) {
362 rk_nand_suspend_state = 1;
363 rknand_dev_suspend();
364 }
365 return 0;
366 }
367
rknand_resume(struct platform_device * pdev)368 static int rknand_resume(struct platform_device *pdev)
369 {
370 if (rk_nand_suspend_state == 1) {
371 rk_nand_suspend_state = 0;
372 rknand_dev_resume();
373 }
374 return 0;
375 }
376
rknand_shutdown(struct platform_device * pdev)377 static void rknand_shutdown(struct platform_device *pdev)
378 {
379 if (rk_nand_shutdown_state == 0) {
380 rk_nand_shutdown_state = 1;
381 rknand_dev_shutdown();
382 }
383 }
384
rknand_dev_cache_flush(void)385 void rknand_dev_cache_flush(void)
386 {
387 rknand_dev_flush();
388 }
389
rknand_pm_suspend(struct device * dev)390 static int rknand_pm_suspend(struct device *dev)
391 {
392 if (rk_nand_suspend_state == 0) {
393 rk_nand_suspend_state = 1;
394 rknand_dev_suspend();
395 pm_runtime_put(dev);
396 }
397 return 0;
398 }
399
rknand_pm_resume(struct device * dev)400 static int rknand_pm_resume(struct device *dev)
401 {
402 if (rk_nand_suspend_state == 1) {
403 rk_nand_suspend_state = 0;
404 pm_runtime_get_sync(dev);
405 rknand_dev_resume();
406 }
407 return 0;
408 }
409
410 static const struct dev_pm_ops rknand_dev_pm_ops = {
411 SET_SYSTEM_SLEEP_PM_OPS(rknand_pm_suspend, rknand_pm_resume)
412 };
413
414 #ifdef CONFIG_OF
415 static const struct of_device_id of_rk_nandc_match[] = {
416 {.compatible = "rockchip,rk-nandc"},
417 {.compatible = "rockchip,rk-nandc-v9"},
418 {}
419 };
420 #endif
421
422 static struct platform_driver rknand_driver = {
423 .probe = rknand_probe,
424 .suspend = rknand_suspend,
425 .resume = rknand_resume,
426 .shutdown = rknand_shutdown,
427 .driver = {
428 .name = "rknand",
429 #ifdef CONFIG_OF
430 .of_match_table = of_rk_nandc_match,
431 #endif
432 .pm = &rknand_dev_pm_ops,
433 },
434 };
435
rknand_driver_exit(void)436 static void __exit rknand_driver_exit(void)
437 {
438 rknand_dev_exit();
439 platform_driver_unregister(&rknand_driver);
440 }
441
rknand_driver_init(void)442 static int __init rknand_driver_init(void)
443 {
444 int ret = 0;
445
446 pr_err("%s\n", RKNAND_VERSION_AND_DATE);
447 ret = platform_driver_register(&rknand_driver);
448 if (ret == 0)
449 ret = rknand_dev_init();
450 return ret;
451 }
452
453 module_init(rknand_driver_init);
454 module_exit(rknand_driver_exit);
455 MODULE_ALIAS("rknand");
456 MODULE_LICENSE("GPL v2");
457