1 /*
2 * Copyright (C) 2013 ROCKCHIP, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
19 #include <linux/sched.h>
20 #include <linux/uaccess.h>
21 #include <linux/platform_device.h>
22 #include <linux/interrupt.h>
23 #include <linux/kthread.h>
24 #include <linux/poll.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/fb.h>
27 #include <linux/wakelock.h>
28 #include <linux/of.h>
29 #include <linux/of_platform.h>
30 #include <linux/io.h>
31 #include <linux/module.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/rockchip/cpu.h>
34 #include <linux/iommu.h>
35 #include <asm/cacheflush.h>
36 #include "iep_drv.h"
37 #include "hw_iep_reg.h"
38 #include "iep_iommu_ops.h"
39
40 #define IEP_MAJOR 255
41 #define IEP_CLK_ENABLE
42 /*#define IEP_TEST_CASE*/
43
44 static int debug;
45 module_param(debug, int, S_IRUGO | S_IWUSR);
46 MODULE_PARM_DESC(debug,
47 "Debug level - higher value produces more verbose messages");
48
49 #define RK_IEP_SIZE 0x1000
50 #define IEP_TIMEOUT_DELAY 2*HZ
51 #define IEP_POWER_OFF_DELAY 4*HZ
52
53 struct iep_drvdata {
54 struct miscdevice miscdev;
55 void *iep_base;
56 int irq0;
57
58 struct clk *aclk_iep;
59 struct clk *hclk_iep;
60 struct clk *pd_iep;
61 struct clk *aclk_vio1;
62
63 struct mutex mutex;
64
65 /* direct path interface mode. true: enable, false: disable */
66 bool dpi_mode;
67
68 struct delayed_work power_off_work;
69
70 /* clk enable or disable */
71 bool enable;
72 struct wake_lock wake_lock;
73
74 atomic_t iep_int;
75 atomic_t mmu_page_fault;
76 atomic_t mmu_bus_error;
77
78 /* capability for this iep device */
79 struct IEP_CAP cap;
80 struct device *dev;
81 };
82
83 struct iep_drvdata *iep_drvdata1 = NULL;
84 iep_service_info iep_service;
85
iep_reg_deinit(struct iep_reg * reg)86 static void iep_reg_deinit(struct iep_reg *reg)
87 {
88 struct iep_mem_region *mem_region = NULL, *n;
89 /* release memory region attach to this registers table.*/
90 if (iep_service.iommu_dev) {
91 list_for_each_entry_safe(mem_region, n, ®->mem_region_list,
92 reg_lnk) {
93 iep_iommu_unmap_iommu(iep_service.iommu_info,
94 reg->session, mem_region->hdl);
95 iep_iommu_free(iep_service.iommu_info,
96 reg->session, mem_region->hdl);
97 list_del_init(&mem_region->reg_lnk);
98 kfree(mem_region);
99 }
100 }
101
102 list_del_init(®->session_link);
103 list_del_init(®->status_link);
104 kfree(reg);
105 }
106
iep_reg_from_wait_to_ready(struct iep_reg * reg)107 static void iep_reg_from_wait_to_ready(struct iep_reg *reg)
108 {
109 list_del_init(®->status_link);
110 list_add_tail(®->status_link, &iep_service.ready);
111
112 list_del_init(®->session_link);
113 list_add_tail(®->session_link, ®->session->ready);
114 }
115
iep_reg_from_ready_to_running(struct iep_reg * reg)116 static void iep_reg_from_ready_to_running(struct iep_reg *reg)
117 {
118 list_del_init(®->status_link);
119 list_add_tail(®->status_link, &iep_service.running);
120
121 list_del_init(®->session_link);
122 list_add_tail(®->session_link, ®->session->running);
123 }
124
iep_del_running_list(void)125 static void iep_del_running_list(void)
126 {
127 struct iep_reg *reg;
128 int cnt = 0;
129
130 mutex_lock(&iep_service.lock);
131
132 while (!list_empty(&iep_service.running)) {
133 BUG_ON(cnt != 0);
134 reg = list_entry(iep_service.running.next,
135 struct iep_reg, status_link);
136
137 atomic_dec(®->session->task_running);
138 atomic_dec(&iep_service.total_running);
139
140 if (list_empty(®->session->waiting)) {
141 atomic_set(®->session->done, 1);
142 atomic_inc(®->session->num_done);
143 wake_up(®->session->wait);
144 }
145
146 iep_reg_deinit(reg);
147 cnt++;
148 }
149
150 mutex_unlock(&iep_service.lock);
151 }
152
iep_dump(void)153 static void iep_dump(void)
154 {
155 struct iep_status sts;
156
157 sts = iep_get_status(iep_drvdata1->iep_base);
158
159 IEP_INFO("scl_sts: %u, dil_sts %u, wyuv_sts %u, ryuv_sts %u, wrgb_sts %u, rrgb_sts %u, voi_sts %u\n",
160 sts.scl_sts, sts.dil_sts, sts.wyuv_sts, sts.ryuv_sts, sts.wrgb_sts, sts.rrgb_sts, sts.voi_sts); {
161 int *reg = (int *)iep_drvdata1->iep_base;
162 int i;
163
164 /* could not read validate data from address after base+0x40 */
165 for (i = 0; i < 0x40; i++) {
166 IEP_INFO("%08x ", reg[i]);
167
168 if ((i + 1) % 4 == 0) {
169 IEP_INFO("\n");
170 }
171 }
172
173 IEP_INFO("\n");
174 }
175 }
176
177 /* Caller must hold iep_service.lock */
iep_del_running_list_timeout(void)178 static void iep_del_running_list_timeout(void)
179 {
180 struct iep_reg *reg;
181
182 mutex_lock(&iep_service.lock);
183
184 while (!list_empty(&iep_service.running)) {
185 reg = list_entry(iep_service.running.next, struct iep_reg, status_link);
186
187 atomic_dec(®->session->task_running);
188 atomic_dec(&iep_service.total_running);
189
190 /* iep_soft_rst(iep_drvdata1->iep_base); */
191
192 iep_dump();
193
194 if (list_empty(®->session->waiting)) {
195 atomic_set(®->session->done, 1);
196 wake_up(®->session->wait);
197 }
198
199 iep_reg_deinit(reg);
200 }
201
202 mutex_unlock(&iep_service.lock);
203 }
204
iep_queue_power_off_work(void)205 static inline void iep_queue_power_off_work(void)
206 {
207 queue_delayed_work(system_wq, &iep_drvdata1->power_off_work, IEP_POWER_OFF_DELAY);
208 }
209
iep_power_on(void)210 static void iep_power_on(void)
211 {
212 static ktime_t last;
213 ktime_t now = ktime_get();
214 if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {
215 cancel_delayed_work_sync(&iep_drvdata1->power_off_work);
216 iep_queue_power_off_work();
217 last = now;
218 }
219
220 if (iep_service.enable)
221 return;
222
223 IEP_INFO("IEP Power ON\n");
224
225 /* iep_soft_rst(iep_drvdata1->iep_base); */
226
227 #ifdef IEP_CLK_ENABLE
228 pm_runtime_get_sync(iep_drvdata1->dev);
229 if (iep_drvdata1->pd_iep)
230 clk_prepare_enable(iep_drvdata1->pd_iep);
231 clk_prepare_enable(iep_drvdata1->aclk_iep);
232 clk_prepare_enable(iep_drvdata1->hclk_iep);
233 #endif
234
235 wake_lock(&iep_drvdata1->wake_lock);
236
237 iep_iommu_attach(iep_service.iommu_info);
238
239 iep_service.enable = true;
240 }
241
iep_power_off(void)242 static void iep_power_off(void)
243 {
244 int total_running;
245
246 if (!iep_service.enable) {
247 return;
248 }
249
250 IEP_INFO("IEP Power OFF\n");
251
252 total_running = atomic_read(&iep_service.total_running);
253 if (total_running) {
254 IEP_WARNING("power off when %d task running!!\n", total_running);
255 mdelay(50);
256 IEP_WARNING("delay 50 ms for running task\n");
257 iep_dump();
258 }
259
260 if (iep_service.iommu_dev) {
261 iep_iommu_detach(iep_service.iommu_info);
262 }
263
264 #ifdef IEP_CLK_ENABLE
265 clk_disable_unprepare(iep_drvdata1->aclk_iep);
266 clk_disable_unprepare(iep_drvdata1->hclk_iep);
267 if (iep_drvdata1->pd_iep)
268 clk_disable_unprepare(iep_drvdata1->pd_iep);
269 pm_runtime_put(iep_drvdata1->dev);
270 #endif
271
272 wake_unlock(&iep_drvdata1->wake_lock);
273 iep_service.enable = false;
274 }
275
iep_power_off_work(struct work_struct * work)276 static void iep_power_off_work(struct work_struct *work)
277 {
278 if (mutex_trylock(&iep_service.lock)) {
279 if (!iep_drvdata1->dpi_mode) {
280 IEP_INFO("iep dpi mode inactivity\n");
281 iep_power_off();
282 }
283 mutex_unlock(&iep_service.lock);
284 } else {
285 /* Come back later if the device is busy... */
286 iep_queue_power_off_work();
287 }
288 }
289
290 #ifdef CONFIG_FB_ROCKCHIP
291 extern void rk_direct_fb_show(struct fb_info *fbi);
292 extern struct fb_info* rk_get_fb(int fb_id);
293 extern bool rk_fb_poll_wait_frame_complete(void);
294 extern int rk_fb_dpi_open(bool open);
295 extern int rk_fb_dpi_win_sel(int layer_id);
296
iep_config_lcdc(struct iep_reg * reg)297 static void iep_config_lcdc(struct iep_reg *reg)
298 {
299 struct fb_info *fb;
300 int fbi = 0;
301 int fmt = 0;
302
303 fbi = reg->layer == 0 ? 0 : 1;
304
305 rk_fb_dpi_win_sel(fbi);
306
307 fb = rk_get_fb(fbi);
308 #if 1
309 switch (reg->format) {
310 case IEP_FORMAT_ARGB_8888:
311 case IEP_FORMAT_ABGR_8888:
312 fmt = HAL_PIXEL_FORMAT_RGBA_8888;
313 fb->var.bits_per_pixel = 32;
314
315 fb->var.red.length = 8;
316 fb->var.red.offset = 16;
317 fb->var.red.msb_right = 0;
318
319 fb->var.green.length = 8;
320 fb->var.green.offset = 8;
321 fb->var.green.msb_right = 0;
322
323 fb->var.blue.length = 8;
324 fb->var.blue.offset = 0;
325 fb->var.blue.msb_right = 0;
326
327 fb->var.transp.length = 8;
328 fb->var.transp.offset = 24;
329 fb->var.transp.msb_right = 0;
330
331 break;
332 case IEP_FORMAT_BGRA_8888:
333 fmt = HAL_PIXEL_FORMAT_BGRA_8888;
334 fb->var.bits_per_pixel = 32;
335 break;
336 case IEP_FORMAT_RGB_565:
337 fmt = HAL_PIXEL_FORMAT_RGB_565;
338 fb->var.bits_per_pixel = 16;
339
340 fb->var.red.length = 5;
341 fb->var.red.offset = 11;
342 fb->var.red.msb_right = 0;
343
344 fb->var.green.length = 6;
345 fb->var.green.offset = 5;
346 fb->var.green.msb_right = 0;
347
348 fb->var.blue.length = 5;
349 fb->var.blue.offset = 0;
350 fb->var.blue.msb_right = 0;
351
352 break;
353 case IEP_FORMAT_YCbCr_422_SP:
354 fmt = HAL_PIXEL_FORMAT_YCbCr_422_SP;
355 fb->var.bits_per_pixel = 16;
356 break;
357 case IEP_FORMAT_YCbCr_420_SP:
358 fmt = HAL_PIXEL_FORMAT_YCrCb_NV12;
359 fb->var.bits_per_pixel = 16;
360 break;
361 case IEP_FORMAT_YCbCr_422_P:
362 case IEP_FORMAT_YCrCb_422_SP:
363 case IEP_FORMAT_YCrCb_422_P:
364 case IEP_FORMAT_YCrCb_420_SP:
365 case IEP_FORMAT_YCbCr_420_P:
366 case IEP_FORMAT_YCrCb_420_P:
367 case IEP_FORMAT_RGBA_8888:
368 case IEP_FORMAT_BGR_565:
369 /* unsupported format */
370 IEP_ERR("unsupported format %d\n", reg->format);
371 break;
372 default:
373 ;
374 }
375
376 fb->var.xoffset = 0;
377 fb->var.yoffset = 0;
378 fb->var.xres = reg->act_width;
379 fb->var.yres = reg->act_height;
380 fb->var.xres_virtual = reg->act_width;
381 fb->var.yres_virtual = reg->act_height;
382 fb->var.nonstd = ((reg->off_y & 0xFFF) << 20) +
383 ((reg->off_x & 0xFFF) << 8) + (fmt & 0xFF);
384 fb->var.grayscale =
385 ((reg->vir_height & 0xFFF) << 20) +
386 ((reg->vir_width & 0xFFF) << 8) + 0;/*win0 xsize & ysize*/
387 #endif
388 rk_direct_fb_show(fb);
389 }
390
iep_switch_dpi(struct iep_reg * reg)391 static int iep_switch_dpi(struct iep_reg *reg)
392 {
393 if (reg->dpi_en) {
394 if (!iep_drvdata1->dpi_mode) {
395 /* Turn on dpi */
396 rk_fb_dpi_open(true);
397 iep_drvdata1->dpi_mode = true;
398 }
399 iep_config_lcdc(reg);
400 } else {
401 if (iep_drvdata1->dpi_mode) {
402 /* Turn off dpi */
403 /* wait_lcdc_dpi_close(); */
404 bool status;
405 rk_fb_dpi_open(false);
406 status = rk_fb_poll_wait_frame_complete();
407 iep_drvdata1->dpi_mode = false;
408 IEP_INFO("%s %d, iep dpi inactivated\n",
409 __func__, __LINE__);
410 }
411 }
412
413 return 0;
414 }
415 #endif
416
iep_reg_copy_to_hw(struct iep_reg * reg)417 static void iep_reg_copy_to_hw(struct iep_reg *reg)
418 {
419 int i;
420
421 u32 *pbase = (u32 *)iep_drvdata1->iep_base;
422
423 /* config registers */
424 for (i = 0; i < IEP_CNF_REG_LEN; i++)
425 pbase[IEP_CNF_REG_BASE + i] = reg->reg[IEP_CNF_REG_BASE + i];
426
427 /* command registers */
428 for (i = 0; i < IEP_CMD_REG_LEN; i++)
429 pbase[IEP_CMD_REG_BASE + i] = reg->reg[IEP_CMD_REG_BASE + i];
430
431 /* address registers */
432 for (i = 0; i < IEP_ADD_REG_LEN; i++)
433 pbase[IEP_ADD_REG_BASE + i] = reg->reg[IEP_ADD_REG_BASE + i];
434
435 /* dmac_flush_range(&pbase[0], &pbase[IEP_REG_LEN]); */
436 /* outer_flush_range(virt_to_phys(&pbase[0]),virt_to_phys(&pbase[IEP_REG_LEN])); */
437
438 dsb(sy);
439 }
440
441 /** switch fields order before the next lcdc frame start
442 * coming */
iep_switch_fields_order(void)443 static void iep_switch_fields_order(void)
444 {
445 void *pbase = (void *)iep_drvdata1->iep_base;
446 int mode = iep_get_deinterlace_mode(pbase);
447 #ifdef CONFIG_FB_ROCKCHIP
448 struct fb_info *fb;
449 #endif
450 switch (mode) {
451 case dein_mode_I4O1B:
452 iep_set_deinterlace_mode(dein_mode_I4O1T, pbase);
453 break;
454 case dein_mode_I4O1T:
455 iep_set_deinterlace_mode(dein_mode_I4O1B, pbase);
456 break;
457 case dein_mode_I2O1B:
458 iep_set_deinterlace_mode(dein_mode_I2O1T, pbase);
459 break;
460 case dein_mode_I2O1T:
461 iep_set_deinterlace_mode(dein_mode_I2O1B, pbase);
462 break;
463 default:
464 ;
465 }
466 #ifdef CONFIG_FB_ROCKCHIP
467 fb = rk_get_fb(1);
468 rk_direct_fb_show(fb);
469 #endif
470 /*iep_switch_input_address(pbase);*/
471 }
472
473 /* Caller must hold iep_service.lock */
iep_try_set_reg(void)474 static void iep_try_set_reg(void)
475 {
476 struct iep_reg *reg;
477
478 mutex_lock(&iep_service.lock);
479
480 if (list_empty(&iep_service.ready)) {
481 if (!list_empty(&iep_service.waiting)) {
482 reg = list_entry(iep_service.waiting.next, struct iep_reg, status_link);
483
484 iep_power_on();
485 udelay(1);
486
487 iep_reg_from_wait_to_ready(reg);
488 atomic_dec(&iep_service.waitcnt);
489
490 /*iep_soft_rst(iep_drvdata1->iep_base);*/
491
492 iep_reg_copy_to_hw(reg);
493 }
494 } else {
495 if (iep_drvdata1->dpi_mode)
496 iep_switch_fields_order();
497 }
498
499 mutex_unlock(&iep_service.lock);
500 }
501
iep_try_start_frm(void)502 static void iep_try_start_frm(void)
503 {
504 struct iep_reg *reg;
505
506 mutex_lock(&iep_service.lock);
507
508 if (list_empty(&iep_service.running)) {
509 if (!list_empty(&iep_service.ready)) {
510 reg = list_entry(iep_service.ready.next, struct iep_reg, status_link);
511 #ifdef CONFIG_FB_ROCKCHIP
512 iep_switch_dpi(reg);
513 #endif
514 iep_reg_from_ready_to_running(reg);
515 iep_config_frame_end_int_en(iep_drvdata1->iep_base);
516 iep_config_done(iep_drvdata1->iep_base);
517
518 /* Start proc */
519 atomic_inc(®->session->task_running);
520 atomic_inc(&iep_service.total_running);
521 iep_config_frm_start(iep_drvdata1->iep_base);
522 }
523 }
524
525 mutex_unlock(&iep_service.lock);
526 }
527
iep_isr(int irq,void * dev_id)528 static irqreturn_t iep_isr(int irq, void *dev_id)
529 {
530 if (atomic_read(&iep_drvdata1->iep_int) > 0) {
531 if (iep_service.enable) {
532 if (list_empty(&iep_service.waiting)) {
533 if (iep_drvdata1->dpi_mode) {
534 iep_switch_fields_order();
535 }
536 }
537 iep_del_running_list();
538 }
539
540 iep_try_set_reg();
541 iep_try_start_frm();
542
543 atomic_dec(&iep_drvdata1->iep_int);
544 }
545
546 return IRQ_HANDLED;
547 }
548
iep_irq(int irq,void * dev_id)549 static irqreturn_t iep_irq(int irq, void *dev_id)
550 {
551 /*clear INT */
552 void *pbase = (void *)iep_drvdata1->iep_base;
553
554 if (iep_probe_int(pbase)) {
555 iep_config_frame_end_int_clr(pbase);
556 atomic_inc(&iep_drvdata1->iep_int);
557 }
558
559 return IRQ_WAKE_THREAD;
560 }
561
iep_service_session_clear(iep_session * session)562 static void iep_service_session_clear(iep_session *session)
563 {
564 struct iep_reg *reg, *n;
565
566 list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
567 iep_reg_deinit(reg);
568 }
569
570 list_for_each_entry_safe(reg, n, &session->ready, session_link) {
571 iep_reg_deinit(reg);
572 }
573
574 list_for_each_entry_safe(reg, n, &session->running, session_link) {
575 iep_reg_deinit(reg);
576 }
577 }
578
iep_open(struct inode * inode,struct file * filp)579 static int iep_open(struct inode *inode, struct file *filp)
580 {
581 //DECLARE_WAITQUEUE(wait, current);
582 iep_session *session = kzalloc(sizeof(*session), GFP_KERNEL);
583 if (NULL == session) {
584 IEP_ERR("unable to allocate memory for iep_session.\n");
585 return -ENOMEM;
586 }
587
588 session->pid = current->pid;
589 INIT_LIST_HEAD(&session->waiting);
590 INIT_LIST_HEAD(&session->ready);
591 INIT_LIST_HEAD(&session->running);
592 INIT_LIST_HEAD(&session->list_session);
593 init_waitqueue_head(&session->wait);
594 /*add_wait_queue(&session->wait, wait);*/
595 /* no need to protect */
596 mutex_lock(&iep_service.lock);
597 list_add_tail(&session->list_session, &iep_service.session);
598 mutex_unlock(&iep_service.lock);
599 atomic_set(&session->task_running, 0);
600 atomic_set(&session->num_done, 0);
601
602 filp->private_data = (void *)session;
603
604 return nonseekable_open(inode, filp);
605 }
606
iep_release(struct inode * inode,struct file * filp)607 static int iep_release(struct inode *inode, struct file *filp)
608 {
609 int task_running;
610 iep_session *session = (iep_session *)filp->private_data;
611
612 if (NULL == session)
613 return -EINVAL;
614
615 task_running = atomic_read(&session->task_running);
616
617 if (task_running) {
618 IEP_ERR("iep_service session %d still "
619 "has %d task running when closing\n",
620 session->pid, task_running);
621 msleep(100);
622 /*synchronization*/
623 }
624
625 wake_up(&session->wait);
626 iep_power_on();
627 mutex_lock(&iep_service.lock);
628 list_del(&session->list_session);
629 iep_service_session_clear(session);
630 iep_iommu_clear(iep_service.iommu_info, session);
631 kfree(session);
632 mutex_unlock(&iep_service.lock);
633
634 return 0;
635 }
636
iep_poll(struct file * filp,poll_table * wait)637 static unsigned int iep_poll(struct file *filp, poll_table *wait)
638 {
639 int mask = 0;
640 iep_session *session = (iep_session *)filp->private_data;
641 if (NULL == session)
642 return POLL_ERR;
643 poll_wait(filp, &session->wait, wait);
644 if (atomic_read(&session->done))
645 mask |= POLL_IN | POLLRDNORM;
646
647 return mask;
648 }
649
iep_get_result_sync(iep_session * session)650 static int iep_get_result_sync(iep_session *session)
651 {
652 int ret = 0;
653
654 iep_try_start_frm();
655
656 ret = wait_event_timeout(session->wait,
657 atomic_read(&session->done), IEP_TIMEOUT_DELAY);
658
659 if (unlikely(ret < 0)) {
660 IEP_ERR("sync pid %d wait task ret %d\n", session->pid, ret);
661 iep_del_running_list();
662 } else if (0 == ret) {
663 IEP_ERR("sync pid %d wait %d task done timeout\n",
664 session->pid, atomic_read(&session->task_running));
665 iep_del_running_list_timeout();
666 iep_try_set_reg();
667 iep_try_start_frm();
668 ret = -ETIMEDOUT;
669 }
670
671 return ret;
672 }
673
iep_get_result_async(iep_session * session)674 static void iep_get_result_async(iep_session *session)
675 {
676 iep_try_start_frm();
677 return;
678 }
679
iep_ioctl(struct file * filp,uint32_t cmd,unsigned long arg)680 static long iep_ioctl(struct file *filp, uint32_t cmd, unsigned long arg)
681 {
682 int ret = 0;
683 iep_session *session = (iep_session *)filp->private_data;
684
685 if (NULL == session) {
686 IEP_ERR("%s [%d] iep thread session is null\n",
687 __FUNCTION__, __LINE__);
688 return -EINVAL;
689 }
690
691 mutex_lock(&iep_service.mutex);
692
693 switch (cmd) {
694 case IEP_SET_PARAMETER:
695 {
696 struct IEP_MSG *msg;
697 msg = kzalloc(sizeof(*msg), GFP_KERNEL);
698 if (msg) {
699 if (copy_from_user(msg, (struct IEP_MSG *)arg,
700 sizeof(struct IEP_MSG))) {
701 IEP_ERR("copy_from_user failure\n");
702 ret = -EFAULT;
703 }
704 }
705
706 if (ret == 0) {
707 if (atomic_read(&iep_service.waitcnt) < 10) {
708 iep_power_on();
709 iep_config(session, msg);
710 atomic_inc(&iep_service.waitcnt);
711 } else {
712 IEP_ERR("iep task queue full\n");
713 ret = -EFAULT;
714 }
715 }
716
717 /** REGISTER CONFIG must accord to Timing When DPI mode
718 * enable */
719 if (!iep_drvdata1->dpi_mode)
720 iep_try_set_reg();
721 kfree(msg);
722 }
723 break;
724 case IEP_GET_RESULT_SYNC:
725 if (0 > iep_get_result_sync(session)) {
726 ret = -ETIMEDOUT;
727 }
728 break;
729 case IEP_GET_RESULT_ASYNC:
730 iep_get_result_async(session);
731 break;
732 case IEP_RELEASE_CURRENT_TASK:
733 iep_del_running_list_timeout();
734 iep_try_set_reg();
735 iep_try_start_frm();
736 break;
737 case IEP_GET_IOMMU_STATE:
738 {
739 int iommu_enable = 0;
740
741 iommu_enable = iep_service.iommu_dev ? 1 : 0;
742
743 if (copy_to_user((void __user *)arg, &iommu_enable,
744 sizeof(int))) {
745 IEP_ERR("error: copy_to_user failed\n");
746 ret = -EFAULT;
747 }
748 }
749 break;
750 case IEP_QUERY_CAP:
751 if (copy_to_user((void __user *)arg, &iep_drvdata1->cap,
752 sizeof(struct IEP_CAP))) {
753 IEP_ERR("error: copy_to_user failed\n");
754 ret = -EFAULT;
755 }
756 break;
757 default:
758 IEP_ERR("unknown ioctl cmd!\n");
759 ret = -EINVAL;
760 }
761 mutex_unlock(&iep_service.mutex);
762
763 return ret;
764 }
765
766 #ifdef CONFIG_COMPAT
compat_iep_ioctl(struct file * filp,uint32_t cmd,unsigned long arg)767 static long compat_iep_ioctl(struct file *filp, uint32_t cmd,
768 unsigned long arg)
769 {
770 int ret = 0;
771 iep_session *session = (iep_session *)filp->private_data;
772
773 if (NULL == session) {
774 IEP_ERR("%s [%d] iep thread session is null\n",
775 __func__, __LINE__);
776 return -EINVAL;
777 }
778
779 mutex_lock(&iep_service.mutex);
780
781 switch (cmd) {
782 case COMPAT_IEP_SET_PARAMETER:
783 {
784 struct IEP_MSG *msg;
785
786 msg = kzalloc(sizeof(*msg), GFP_KERNEL);
787
788 if (msg) {
789 if (copy_from_user
790 (msg, compat_ptr((compat_uptr_t)arg),
791 sizeof(struct IEP_MSG))) {
792 IEP_ERR("copy_from_user failure\n");
793 ret = -EFAULT;
794 }
795 }
796
797 if (ret == 0) {
798 if (atomic_read(&iep_service.waitcnt) < 10) {
799 iep_power_on();
800 iep_config(session, msg);
801 atomic_inc(&iep_service.waitcnt);
802 } else {
803 IEP_ERR("iep task queue full\n");
804 ret = -EFAULT;
805 }
806 }
807
808 /** REGISTER CONFIG must accord to Timing When DPI mode
809 * enable */
810 if (!iep_drvdata1->dpi_mode)
811 iep_try_set_reg();
812 kfree(msg);
813 }
814 break;
815 case COMPAT_IEP_GET_RESULT_SYNC:
816 if (0 > iep_get_result_sync(session))
817 ret = -ETIMEDOUT;
818 break;
819 case COMPAT_IEP_GET_RESULT_ASYNC:
820 iep_get_result_async(session);
821 break;
822 case COMPAT_IEP_RELEASE_CURRENT_TASK:
823 iep_del_running_list_timeout();
824 iep_try_set_reg();
825 iep_try_start_frm();
826 break;
827 case COMPAT_IEP_GET_IOMMU_STATE:
828 {
829 int iommu_enable = 0;
830
831 iommu_enable = iep_service.iommu_dev ? 1 : 0;
832
833 if (copy_to_user((void __user *)arg, &iommu_enable,
834 sizeof(int))) {
835 IEP_ERR("error: copy_to_user failed\n");
836 ret = -EFAULT;
837 }
838 }
839 break;
840 case COMPAT_IEP_QUERY_CAP:
841 if (copy_to_user((void __user *)arg, &iep_drvdata1->cap,
842 sizeof(struct IEP_CAP))) {
843 IEP_ERR("error: copy_to_user failed\n");
844 ret = -EFAULT;
845 }
846 break;
847 default:
848 IEP_ERR("unknown ioctl cmd!\n");
849 ret = -EINVAL;
850 }
851 mutex_unlock(&iep_service.mutex);
852
853 return ret;
854 }
855 #endif
856
857 struct file_operations iep_fops = {
858 .owner = THIS_MODULE,
859 .open = iep_open,
860 .release = iep_release,
861 .poll = iep_poll,
862 .unlocked_ioctl = iep_ioctl,
863 #ifdef CONFIG_COMPAT
864 .compat_ioctl = compat_iep_ioctl,
865 #endif
866 };
867
868 static struct miscdevice iep_dev = {
869 .minor = IEP_MAJOR,
870 .name = "iep",
871 .fops = &iep_fops,
872 };
873
iep_sysmmu_fault_handler(struct iommu_domain * domain,struct device * iommu_dev,unsigned long iova,int status,void * arg)874 static int iep_sysmmu_fault_handler(struct iommu_domain *domain,
875 struct device *iommu_dev,
876 unsigned long iova, int status, void *arg)
877 {
878 struct iep_reg *reg = list_entry(iep_service.running.next,
879 struct iep_reg, status_link);
880 if (reg != NULL) {
881 struct iep_mem_region *mem, *n;
882 int i = 0;
883 pr_info("iep, fault addr 0x%08x\n", (u32)iova);
884 list_for_each_entry_safe(mem, n,
885 ®->mem_region_list,
886 reg_lnk) {
887 pr_info("iep, mem region [%02d] 0x%08x %ld\n",
888 i, (u32)mem->iova, mem->len);
889 i++;
890 }
891
892 pr_alert("iep, page fault occur\n");
893
894 iep_del_running_list();
895 }
896
897 return 0;
898 }
899
iep_drv_probe(struct platform_device * pdev)900 static int iep_drv_probe(struct platform_device *pdev)
901 {
902 struct iep_drvdata *data;
903 int ret = 0;
904 struct resource *res = NULL;
905 u32 version;
906 struct device_node *np = pdev->dev.of_node;
907 struct platform_device *sub_dev = NULL;
908 struct device_node *sub_np = NULL;
909 u32 iommu_en = 0;
910 struct iommu_domain *domain;
911
912 of_property_read_u32(np, "iommu_enabled", &iommu_en);
913
914 data = devm_kzalloc(&pdev->dev, sizeof(*data),
915 GFP_KERNEL);
916 if (NULL == data) {
917 IEP_ERR("failed to allocate driver data.\n");
918 return -ENOMEM;
919 }
920
921 iep_drvdata1 = data;
922
923 INIT_LIST_HEAD(&iep_service.waiting);
924 INIT_LIST_HEAD(&iep_service.ready);
925 INIT_LIST_HEAD(&iep_service.running);
926 INIT_LIST_HEAD(&iep_service.done);
927 INIT_LIST_HEAD(&iep_service.session);
928 atomic_set(&iep_service.waitcnt, 0);
929 mutex_init(&iep_service.lock);
930 atomic_set(&iep_service.total_running, 0);
931 iep_service.enable = false;
932
933 #ifdef IEP_CLK_ENABLE
934 data->pd_iep = devm_clk_get(&pdev->dev, "pd_iep");
935 if (IS_ERR(data->pd_iep)) {
936 IEP_ERR("failed to find iep power down clock source.\n");
937 data->pd_iep = NULL;
938 }
939
940 data->aclk_iep = devm_clk_get(&pdev->dev, "aclk_iep");
941 if (IS_ERR(data->aclk_iep)) {
942 IEP_ERR("failed to find iep axi clock source.\n");
943 ret = -ENOENT;
944 goto err_clock;
945 }
946
947 data->hclk_iep = devm_clk_get(&pdev->dev, "hclk_iep");
948 if (IS_ERR(data->hclk_iep)) {
949 IEP_ERR("failed to find iep ahb clock source.\n");
950 ret = -ENOENT;
951 goto err_clock;
952 }
953 #endif
954
955 iep_service.enable = false;
956 INIT_DELAYED_WORK(&data->power_off_work, iep_power_off_work);
957 wake_lock_init(&data->wake_lock, WAKE_LOCK_SUSPEND, "iep");
958
959 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
960
961 data->iep_base = (void *)devm_ioremap_resource(&pdev->dev, res);
962 if (data->iep_base == NULL) {
963 IEP_ERR("iep ioremap failed\n");
964 ret = -ENOENT;
965 goto err_ioremap;
966 }
967
968 atomic_set(&data->iep_int, 0);
969 atomic_set(&data->mmu_page_fault, 0);
970 atomic_set(&data->mmu_bus_error, 0);
971
972 /* get the IRQ */
973 data->irq0 = platform_get_irq(pdev, 0);
974 if (data->irq0 <= 0) {
975 IEP_ERR("failed to get iep irq resource (%d).\n", data->irq0);
976 ret = data->irq0;
977 goto err_irq;
978 }
979
980 /* request the IRQ */
981 ret = devm_request_threaded_irq(&pdev->dev, data->irq0, iep_irq,
982 iep_isr, IRQF_SHARED, dev_name(&pdev->dev), pdev);
983 if (ret) {
984 IEP_ERR("iep request_irq failed (%d).\n", ret);
985 goto err_irq;
986 }
987
988 mutex_init(&iep_service.mutex);
989
990 if (of_property_read_u32(np, "version", &version)) {
991 version = 0;
992 }
993
994 data->cap.scaling_supported = 0;
995 data->cap.i4_deinterlace_supported = 1;
996 data->cap.i2_deinterlace_supported = 1;
997 data->cap.compression_noise_reduction_supported = 1;
998 data->cap.sampling_noise_reduction_supported = 1;
999 data->cap.hsb_enhancement_supported = 1;
1000 data->cap.cg_enhancement_supported = 1;
1001 data->cap.direct_path_supported = 1;
1002 data->cap.max_dynamic_width = 1920;
1003 data->cap.max_dynamic_height = 1088;
1004 data->cap.max_static_width = 8192;
1005 data->cap.max_static_height = 8192;
1006 data->cap.max_enhance_radius = 3;
1007
1008 switch (version) {
1009 case 0:
1010 data->cap.scaling_supported = 1;
1011 break;
1012 case 1:
1013 data->cap.compression_noise_reduction_supported = 0;
1014 data->cap.sampling_noise_reduction_supported = 0;
1015 if (soc_is_rk3126b() || soc_is_rk3126c()) {
1016 data->cap.i4_deinterlace_supported = 0;
1017 data->cap.hsb_enhancement_supported = 0;
1018 data->cap.cg_enhancement_supported = 0;
1019 }
1020 break;
1021 case 2:
1022 data->cap.max_dynamic_width = 4096;
1023 data->cap.max_dynamic_height = 2340;
1024 data->cap.max_enhance_radius = 2;
1025 break;
1026 default:
1027 ;
1028 }
1029
1030 platform_set_drvdata(pdev, data);
1031
1032 ret = misc_register(&iep_dev);
1033 if (ret) {
1034 IEP_ERR("cannot register miscdev (%d)\n", ret);
1035 goto err_misc_register;
1036 }
1037
1038 data->dev = &pdev->dev;
1039 #ifdef IEP_CLK_ENABLE
1040 pm_runtime_enable(data->dev);
1041 #endif
1042
1043 iep_service.iommu_dev = NULL;
1044 sub_np = of_parse_phandle(np, "iommus", 0);
1045 if (sub_np) {
1046 sub_dev = of_find_device_by_node(sub_np);
1047 iep_service.iommu_dev = &sub_dev->dev;
1048 domain = iommu_get_domain_for_dev(&pdev->dev);
1049 iommu_set_fault_handler(domain, iep_sysmmu_fault_handler, data);
1050 }
1051
1052 of_property_read_u32(np, "allocator", (u32 *)&iep_service.alloc_type);
1053 iep_power_on();
1054 iep_service.iommu_info = iep_iommu_info_create(data->dev,
1055 iep_service.iommu_dev,
1056 iep_service.alloc_type);
1057 iep_power_off();
1058
1059 IEP_INFO("IEP Driver loaded succesfully\n");
1060
1061 return 0;
1062
1063 err_misc_register:
1064 free_irq(data->irq0, pdev);
1065 err_irq:
1066 err_ioremap:
1067 wake_lock_destroy(&data->wake_lock);
1068 #ifdef IEP_CLK_ENABLE
1069 err_clock:
1070 #endif
1071 return ret;
1072 }
1073
iep_drv_remove(struct platform_device * pdev)1074 static int iep_drv_remove(struct platform_device *pdev)
1075 {
1076 struct iep_drvdata *data = platform_get_drvdata(pdev);
1077
1078 iep_iommu_info_destroy(iep_service.iommu_info);
1079 iep_service.iommu_info = NULL;
1080
1081 wake_lock_destroy(&data->wake_lock);
1082
1083 misc_deregister(&(data->miscdev));
1084 free_irq(data->irq0, &data->miscdev);
1085
1086 #ifdef IEP_CLK_ENABLE
1087 pm_runtime_disable(data->dev);
1088 #endif
1089
1090 return 0;
1091 }
1092
1093 #if defined(CONFIG_OF)
1094 static const struct of_device_id iep_dt_ids[] = {
1095 { .compatible = "rockchip,iep", },
1096 { },
1097 };
1098 #endif
1099
1100 static struct platform_driver iep_driver = {
1101 .probe = iep_drv_probe,
1102 .remove = iep_drv_remove,
1103 .driver = {
1104 .name = "iep",
1105 #if defined(CONFIG_OF)
1106 .of_match_table = of_match_ptr(iep_dt_ids),
1107 #endif
1108 },
1109 };
1110
1111 #ifdef CONFIG_PROC_FS
1112 #include <linux/proc_fs.h>
1113 #include <linux/seq_file.h>
1114
proc_iep_show(struct seq_file * s,void * v)1115 static int proc_iep_show(struct seq_file *s, void *v)
1116 {
1117 struct iep_status sts;
1118 //mutex_lock(&iep_service.mutex);
1119 iep_power_on();
1120 seq_printf(s, "\nIEP Modules Status:\n");
1121 sts = iep_get_status(iep_drvdata1->iep_base);
1122 seq_printf(s, "scl_sts: %u, dil_sts %u, wyuv_sts %u, "
1123 "ryuv_sts %u, wrgb_sts %u, rrgb_sts %u, voi_sts %u\n",
1124 sts.scl_sts, sts.dil_sts, sts.wyuv_sts, sts.ryuv_sts,
1125 sts.wrgb_sts, sts.rrgb_sts, sts.voi_sts); {
1126 int *reg = (int *)iep_drvdata1->iep_base;
1127 int i;
1128
1129 /* could not read validate data from address after base+0x40 */
1130 for (i = 0; i < 0x40; i++) {
1131 seq_printf(s, "%08x ", reg[i]);
1132
1133 if ((i + 1) % 4 == 0)
1134 seq_printf(s, "\n");
1135 }
1136
1137 seq_printf(s, "\n");
1138 }
1139
1140 //mutex_unlock(&iep_service.mutex);
1141
1142 return 0;
1143 }
1144
proc_iep_open(struct inode * inode,struct file * file)1145 static int proc_iep_open(struct inode *inode, struct file *file)
1146 {
1147 return single_open(file, proc_iep_show, NULL);
1148 }
1149
1150 static const struct proc_ops proc_iep_fops = {
1151 .proc_open = proc_iep_open,
1152 .proc_read = seq_read,
1153 .proc_lseek = seq_lseek,
1154 .proc_release = single_release,
1155 };
1156
iep_proc_init(void)1157 static int __init iep_proc_init(void)
1158 {
1159 proc_create("iep", 0, NULL, &proc_iep_fops);
1160 return 0;
1161 }
1162
iep_proc_release(void)1163 static void __exit iep_proc_release(void)
1164 {
1165 remove_proc_entry("iep", NULL);
1166 }
1167 #endif
1168
1169 #ifdef IEP_TEST_CASE
1170 void iep_test_case0(void);
1171 #endif
1172
iep_init(void)1173 static int __init iep_init(void)
1174 {
1175 int ret;
1176
1177 if ((ret = platform_driver_register(&iep_driver)) != 0) {
1178 IEP_ERR("Platform device register failed (%d).\n", ret);
1179 return ret;
1180 }
1181
1182 #ifdef CONFIG_PROC_FS
1183 iep_proc_init();
1184 #endif
1185
1186 IEP_INFO("Module initialized.\n");
1187
1188 #ifdef IEP_TEST_CASE
1189 iep_test_case0();
1190 #endif
1191
1192 return 0;
1193 }
1194
iep_exit(void)1195 static void __exit iep_exit(void)
1196 {
1197 IEP_ERR("%s IN\n", __func__);
1198 #ifdef CONFIG_PROC_FS
1199 iep_proc_release();
1200 #endif
1201
1202 iep_power_off();
1203 platform_driver_unregister(&iep_driver);
1204 }
1205
1206 module_init(iep_init);
1207 module_exit(iep_exit);
1208
1209 /* Module information */
1210 MODULE_AUTHOR("ljf@rock-chips.com");
1211 MODULE_DESCRIPTION("Driver for iep device");
1212 MODULE_LICENSE("GPL");
1213
1214 #ifdef IEP_TEST_CASE
1215
1216 /*this test just test for iep , not test iep's iommu
1217 *so dts need cancel iommus handle
1218 */
1219
1220 #include "yuv420sp_480x480_interlaced.h"
1221 #include "yuv420sp_480x480_deinterlaced_i2o1.h"
1222
1223 //unsigned char tmp_buf[480*480*3/2];
1224
iep_test_case0(void)1225 void iep_test_case0(void)
1226 {
1227 struct IEP_MSG msg;
1228 iep_session session;
1229 unsigned int phy_src, phy_tmp;
1230 int i;
1231 int ret = 0;
1232 unsigned char *tmp_buf;
1233
1234 tmp_buf = kmalloc(480 * 480 * 3 / 2, GFP_KERNEL);
1235
1236 session.pid = current->pid;
1237 INIT_LIST_HEAD(&session.waiting);
1238 INIT_LIST_HEAD(&session.ready);
1239 INIT_LIST_HEAD(&session.running);
1240 INIT_LIST_HEAD(&session.list_session);
1241 init_waitqueue_head(&session.wait);
1242 list_add_tail(&session.list_session, &iep_service.session);
1243 atomic_set(&session.task_running, 0);
1244 atomic_set(&session.num_done, 0);
1245
1246 memset(&msg, 0, sizeof(struct IEP_MSG));
1247 memset(tmp_buf, 0xCC, 480 * 480 * 3 / 2);
1248
1249 #ifdef CONFIG_ARM
1250 dmac_flush_range(&yuv420sp_480x480_interlaced[0],
1251 &yuv420sp_480x480_interlaced[480 * 480 * 3 / 2]);
1252 outer_flush_range(virt_to_phys(&yuv420sp_480x480_interlaced[0]),
1253 virt_to_phys(&yuv420sp_480x480_interlaced[480 * 480 * 3 / 2]));
1254
1255 dmac_flush_range(&tmp_buf[0], &tmp_buf[480 * 480 * 3 / 2]);
1256 outer_flush_range(virt_to_phys(&tmp_buf[0]), virt_to_phys(&tmp_buf[480 * 480 * 3 / 2]));
1257 #elif defined(CONFIG_ARM64)
1258 __dma_flush_area(&yuv420sp_480x480_interlaced[0], 480 * 480 * 3 / 2);
1259 __dma_flush_area(&tmp_buf[0], 480 * 480 * 3 / 2);
1260 #endif
1261
1262 phy_src = virt_to_phys(&yuv420sp_480x480_interlaced[0]);
1263 phy_tmp = virt_to_phys(&tmp_buf[0]);
1264
1265 IEP_INFO("*********** IEP MSG GENARATE ************\n");
1266
1267 msg.src.act_w = 480;
1268 msg.src.act_h = 480;
1269 msg.src.x_off = 0;
1270 msg.src.y_off = 0;
1271 msg.src.vir_w = 480;
1272 msg.src.vir_h = 480;
1273 msg.src.format = IEP_FORMAT_YCbCr_420_SP;
1274 msg.src.mem_addr = phy_src;
1275 msg.src.uv_addr = (phy_src + 480 * 480);
1276 msg.src.v_addr = 0;
1277
1278 msg.dst.act_w = 480;
1279 msg.dst.act_h = 480;
1280 msg.dst.x_off = 0;
1281 msg.dst.y_off = 0;
1282 msg.dst.vir_w = 480;
1283 msg.dst.vir_h = 480;
1284 msg.dst.format = IEP_FORMAT_YCbCr_420_SP;
1285 msg.dst.mem_addr = phy_tmp;
1286 msg.dst.uv_addr = (phy_tmp + 480 * 480);
1287 msg.dst.v_addr = 0;
1288
1289 msg.dein_mode = IEP_DEINTERLACE_MODE_I2O1;
1290 msg.field_order = FIELD_ORDER_BOTTOM_FIRST;
1291
1292 IEP_INFO("*********** IEP TEST CASE 0 ************\n");
1293
1294 iep_config(&session, &msg);
1295 iep_try_set_reg();
1296 if (0 > iep_get_result_sync(&session)) {
1297 IEP_INFO("%s failed, timeout\n", __func__);
1298 ret = -ETIMEDOUT;
1299 }
1300
1301 mdelay(10);
1302
1303 IEP_INFO("*********** RESULT CHECKING ************\n");
1304
1305 for (i = 0; i < 480 * 480 * 3 / 2; i++) {
1306 if (tmp_buf[i] != yuv420sp_480x480_deinterlaced_i2o1[i]) {
1307 IEP_INFO("diff occur position %d, 0x%02x 0x%02x\n", i, tmp_buf[i], yuv420sp_480x480_deinterlaced_i2o1[i]);
1308
1309 if (i > 10) {
1310 iep_dump();
1311 break;
1312 }
1313 }
1314 }
1315
1316 if (i == 480 * 480 * 3 / 2)
1317 IEP_INFO("IEP pass the checking\n");
1318 }
1319
1320 #endif
1321