1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3 * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4 *
5 * author:
6 * Alpha Lin, alpha.lin@rock-chips.com
7 * Randy Li, randy.li@rock-chips.com
8 * Ding Wei, leo.ding@rock-chips.com
9 *
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_platform.h>
21 #include <linux/of_irq.h>
22 #include <linux/proc_fs.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/poll.h>
25 #include <linux/regmap.h>
26 #include <linux/rwsem.h>
27 #include <linux/mfd/syscon.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/uaccess.h>
31 #include <linux/nospec.h>
32
33 #include <soc/rockchip/pm_domains.h>
34
35 #include "mpp_debug.h"
36 #include "mpp_common.h"
37 #include "mpp_iommu.h"
38
39 #define MPP_WAIT_TIMEOUT_DELAY (2000)
40
41 /* Use 'v' as magic number */
42 #define MPP_IOC_MAGIC 'v'
43
44 #define MPP_IOC_CFG_V1 _IOW(MPP_IOC_MAGIC, 1, unsigned int)
45 #define MPP_IOC_CFG_V2 _IOW(MPP_IOC_MAGIC, 2, unsigned int)
46
47 /* input parmater structure for version 1 */
48 struct mpp_msg_v1 {
49 __u32 cmd;
50 __u32 flags;
51 __u32 size;
52 __u32 offset;
53 __u64 data_ptr;
54 };
55
56 #define MPP_BAT_MSG_DONE (0x00000001)
57
58 struct mpp_bat_msg {
59 __u64 flag;
60 __u32 fd;
61 __s32 ret;
62 };
63
64 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
65 const char *mpp_device_name[MPP_DEVICE_BUTT] = {
66 [MPP_DEVICE_VDPU1] = "VDPU1",
67 [MPP_DEVICE_VDPU2] = "VDPU2",
68 [MPP_DEVICE_VDPU1_PP] = "VDPU1_PP",
69 [MPP_DEVICE_VDPU2_PP] = "VDPU2_PP",
70 [MPP_DEVICE_AV1DEC] = "AV1DEC",
71 [MPP_DEVICE_HEVC_DEC] = "HEVC_DEC",
72 [MPP_DEVICE_RKVDEC] = "RKVDEC",
73 [MPP_DEVICE_AVSPLUS_DEC] = "AVSPLUS_DEC",
74 [MPP_DEVICE_RKJPEGD] = "RKJPEGD",
75 [MPP_DEVICE_RKVENC] = "RKVENC",
76 [MPP_DEVICE_VEPU1] = "VEPU1",
77 [MPP_DEVICE_VEPU2] = "VEPU2",
78 [MPP_DEVICE_VEPU2_JPEG] = "VEPU2",
79 [MPP_DEVICE_VEPU22] = "VEPU22",
80 [MPP_DEVICE_IEP2] = "IEP2",
81 [MPP_DEVICE_VDPP] = "VDPP",
82 };
83
84 const char *enc_info_item_name[ENC_INFO_BUTT] = {
85 [ENC_INFO_BASE] = "null",
86 [ENC_INFO_WIDTH] = "width",
87 [ENC_INFO_HEIGHT] = "height",
88 [ENC_INFO_FORMAT] = "format",
89 [ENC_INFO_FPS_IN] = "fps_in",
90 [ENC_INFO_FPS_OUT] = "fps_out",
91 [ENC_INFO_RC_MODE] = "rc_mode",
92 [ENC_INFO_BITRATE] = "bitrate",
93 [ENC_INFO_GOP_SIZE] = "gop_size",
94 [ENC_INFO_FPS_CALC] = "fps_calc",
95 [ENC_INFO_PROFILE] = "profile",
96 };
97
98 #endif
99
100 static void mpp_attach_workqueue(struct mpp_dev *mpp,
101 struct mpp_taskqueue *queue);
102
103 static int
mpp_taskqueue_pop_pending(struct mpp_taskqueue * queue,struct mpp_task * task)104 mpp_taskqueue_pop_pending(struct mpp_taskqueue *queue,
105 struct mpp_task *task)
106 {
107 if (!task->session || !task->session->mpp)
108 return -EINVAL;
109
110 mutex_lock(&queue->pending_lock);
111 list_del_init(&task->queue_link);
112 mutex_unlock(&queue->pending_lock);
113 kref_put(&task->ref, mpp_free_task);
114
115 return 0;
116 }
117
118 static struct mpp_task *
mpp_taskqueue_get_pending_task(struct mpp_taskqueue * queue)119 mpp_taskqueue_get_pending_task(struct mpp_taskqueue *queue)
120 {
121 struct mpp_task *task = NULL;
122
123 mutex_lock(&queue->pending_lock);
124 task = list_first_entry_or_null(&queue->pending_list,
125 struct mpp_task,
126 queue_link);
127 mutex_unlock(&queue->pending_lock);
128
129 return task;
130 }
131
132 static bool
mpp_taskqueue_is_running(struct mpp_taskqueue * queue)133 mpp_taskqueue_is_running(struct mpp_taskqueue *queue)
134 {
135 unsigned long flags;
136 bool flag;
137
138 spin_lock_irqsave(&queue->running_lock, flags);
139 flag = !list_empty(&queue->running_list);
140 spin_unlock_irqrestore(&queue->running_lock, flags);
141
142 return flag;
143 }
144
mpp_taskqueue_pending_to_run(struct mpp_taskqueue * queue,struct mpp_task * task)145 int mpp_taskqueue_pending_to_run(struct mpp_taskqueue *queue, struct mpp_task *task)
146 {
147 unsigned long flags;
148
149 mutex_lock(&queue->pending_lock);
150 spin_lock_irqsave(&queue->running_lock, flags);
151 list_move_tail(&task->queue_link, &queue->running_list);
152 spin_unlock_irqrestore(&queue->running_lock, flags);
153
154 mutex_unlock(&queue->pending_lock);
155
156 return 0;
157 }
158
159 static struct mpp_task *
mpp_taskqueue_get_running_task(struct mpp_taskqueue * queue)160 mpp_taskqueue_get_running_task(struct mpp_taskqueue *queue)
161 {
162 unsigned long flags;
163 struct mpp_task *task = NULL;
164
165 spin_lock_irqsave(&queue->running_lock, flags);
166 task = list_first_entry_or_null(&queue->running_list,
167 struct mpp_task,
168 queue_link);
169 spin_unlock_irqrestore(&queue->running_lock, flags);
170
171 return task;
172 }
173
174 static int
mpp_taskqueue_pop_running(struct mpp_taskqueue * queue,struct mpp_task * task)175 mpp_taskqueue_pop_running(struct mpp_taskqueue *queue,
176 struct mpp_task *task)
177 {
178 unsigned long flags;
179
180 if (!task->session || !task->session->mpp)
181 return -EINVAL;
182
183 spin_lock_irqsave(&queue->running_lock, flags);
184 list_del_init(&task->queue_link);
185 spin_unlock_irqrestore(&queue->running_lock, flags);
186 kref_put(&task->ref, mpp_free_task);
187
188 return 0;
189 }
190
191 static void
mpp_taskqueue_trigger_work(struct mpp_dev * mpp)192 mpp_taskqueue_trigger_work(struct mpp_dev *mpp)
193 {
194 kthread_queue_work(&mpp->queue->worker, &mpp->work);
195 }
196
mpp_power_on(struct mpp_dev * mpp)197 int mpp_power_on(struct mpp_dev *mpp)
198 {
199 pm_runtime_get_sync(mpp->dev);
200 pm_stay_awake(mpp->dev);
201
202 if (mpp->hw_ops->clk_on)
203 mpp->hw_ops->clk_on(mpp);
204
205 return 0;
206 }
207
mpp_power_off(struct mpp_dev * mpp)208 int mpp_power_off(struct mpp_dev *mpp)
209 {
210 if (mpp->hw_ops->clk_off)
211 mpp->hw_ops->clk_off(mpp);
212
213 pm_relax(mpp->dev);
214 if (mpp_taskqueue_get_pending_task(mpp->queue) ||
215 mpp_taskqueue_get_running_task(mpp->queue)) {
216 pm_runtime_mark_last_busy(mpp->dev);
217 pm_runtime_put_autosuspend(mpp->dev);
218 } else {
219 pm_runtime_put_sync_suspend(mpp->dev);
220 }
221
222 return 0;
223 }
224
task_msgs_reset(struct mpp_task_msgs * msgs)225 static void task_msgs_reset(struct mpp_task_msgs *msgs)
226 {
227 list_del_init(&msgs->list);
228
229 msgs->flags = 0;
230 msgs->req_cnt = 0;
231 msgs->set_cnt = 0;
232 msgs->poll_cnt = 0;
233 }
234
task_msgs_init(struct mpp_task_msgs * msgs,struct mpp_session * session)235 static void task_msgs_init(struct mpp_task_msgs *msgs, struct mpp_session *session)
236 {
237 INIT_LIST_HEAD(&msgs->list);
238
239 msgs->session = session;
240 msgs->queue = NULL;
241 msgs->task = NULL;
242 msgs->mpp = NULL;
243
244 msgs->ext_fd = -1;
245
246 task_msgs_reset(msgs);
247 }
248
get_task_msgs(struct mpp_session * session)249 static struct mpp_task_msgs *get_task_msgs(struct mpp_session *session)
250 {
251 unsigned long flags;
252 struct mpp_task_msgs *msgs;
253
254 spin_lock_irqsave(&session->lock_msgs, flags);
255 msgs = list_first_entry_or_null(&session->list_msgs_idle,
256 struct mpp_task_msgs, list_session);
257 if (msgs) {
258 list_move_tail(&msgs->list_session, &session->list_msgs);
259 spin_unlock_irqrestore(&session->lock_msgs, flags);
260
261 return msgs;
262 }
263 spin_unlock_irqrestore(&session->lock_msgs, flags);
264
265 msgs = kzalloc(sizeof(*msgs), GFP_KERNEL);
266 task_msgs_init(msgs, session);
267 INIT_LIST_HEAD(&msgs->list_session);
268
269 spin_lock_irqsave(&session->lock_msgs, flags);
270 list_move_tail(&msgs->list_session, &session->list_msgs);
271 session->msgs_cnt++;
272 spin_unlock_irqrestore(&session->lock_msgs, flags);
273
274 mpp_debug_func(DEBUG_TASK_INFO, "session %d:%d msgs cnt %d\n",
275 session->pid, session->index, session->msgs_cnt);
276
277 return msgs;
278 }
279
put_task_msgs(struct mpp_task_msgs * msgs)280 static void put_task_msgs(struct mpp_task_msgs *msgs)
281 {
282 struct mpp_session *session = msgs->session;
283 unsigned long flags;
284
285 if (!session) {
286 pr_err("invalid msgs without session\n");
287 return;
288 }
289
290 if (msgs->ext_fd >= 0) {
291 fdput(msgs->f);
292 msgs->ext_fd = -1;
293 }
294
295 task_msgs_reset(msgs);
296
297 spin_lock_irqsave(&session->lock_msgs, flags);
298 list_move_tail(&msgs->list_session, &session->list_msgs_idle);
299 spin_unlock_irqrestore(&session->lock_msgs, flags);
300 }
301
clear_task_msgs(struct mpp_session * session)302 static void clear_task_msgs(struct mpp_session *session)
303 {
304 struct mpp_task_msgs *msgs, *n;
305 LIST_HEAD(list_to_free);
306 unsigned long flags;
307
308 spin_lock_irqsave(&session->lock_msgs, flags);
309
310 list_for_each_entry_safe(msgs, n, &session->list_msgs, list_session)
311 list_move_tail(&msgs->list_session, &list_to_free);
312
313 list_for_each_entry_safe(msgs, n, &session->list_msgs_idle, list_session)
314 list_move_tail(&msgs->list_session, &list_to_free);
315
316 spin_unlock_irqrestore(&session->lock_msgs, flags);
317
318 list_for_each_entry_safe(msgs, n, &list_to_free, list_session)
319 kfree(msgs);
320 }
321
mpp_session_clear_pending(struct mpp_session * session)322 static void mpp_session_clear_pending(struct mpp_session *session)
323 {
324 struct mpp_task *task = NULL, *n;
325
326 /* clear session pending list */
327 mutex_lock(&session->pending_lock);
328 list_for_each_entry_safe(task, n,
329 &session->pending_list,
330 pending_link) {
331 /* abort task in taskqueue */
332 atomic_inc(&task->abort_request);
333 list_del_init(&task->pending_link);
334 kref_put(&task->ref, mpp_free_task);
335 }
336 mutex_unlock(&session->pending_lock);
337 }
338
mpp_session_cleanup_detach(struct mpp_taskqueue * queue,struct kthread_work * work)339 void mpp_session_cleanup_detach(struct mpp_taskqueue *queue, struct kthread_work *work)
340 {
341 struct mpp_session *session, *n;
342
343 if (!atomic_read(&queue->detach_count))
344 return;
345
346 mutex_lock(&queue->session_lock);
347 list_for_each_entry_safe(session, n, &queue->session_detach, session_link) {
348 s32 task_count = atomic_read(&session->task_count);
349
350 if (!task_count) {
351 list_del_init(&session->session_link);
352 atomic_dec(&queue->detach_count);
353 }
354
355 mutex_unlock(&queue->session_lock);
356
357 if (task_count) {
358 mpp_dbg_session("session %d:%d task not finished %d\n",
359 session->pid, session->index,
360 atomic_read(&queue->detach_count));
361
362 mpp_session_clear_pending(session);
363 } else {
364 mpp_dbg_session("queue detach %d\n",
365 atomic_read(&queue->detach_count));
366
367 mpp_session_deinit(session);
368 }
369
370 mutex_lock(&queue->session_lock);
371 }
372 mutex_unlock(&queue->session_lock);
373
374 if (atomic_read(&queue->detach_count)) {
375 mpp_dbg_session("queue detach %d again\n",
376 atomic_read(&queue->detach_count));
377
378 kthread_queue_work(&queue->worker, work);
379 }
380 }
381
mpp_session_init(void)382 static struct mpp_session *mpp_session_init(void)
383 {
384 struct mpp_session *session = kzalloc(sizeof(*session), GFP_KERNEL);
385
386 if (!session)
387 return NULL;
388
389 session->pid = current->pid;
390
391 mutex_init(&session->pending_lock);
392 INIT_LIST_HEAD(&session->pending_list);
393 INIT_LIST_HEAD(&session->service_link);
394 INIT_LIST_HEAD(&session->session_link);
395
396 atomic_set(&session->task_count, 0);
397 atomic_set(&session->release_request, 0);
398
399 INIT_LIST_HEAD(&session->list_msgs);
400 INIT_LIST_HEAD(&session->list_msgs_idle);
401 spin_lock_init(&session->lock_msgs);
402
403 mpp_dbg_session("session %p init\n", session);
404 return session;
405 }
406
mpp_session_deinit_default(struct mpp_session * session)407 static void mpp_session_deinit_default(struct mpp_session *session)
408 {
409 if (session->mpp) {
410 struct mpp_dev *mpp = session->mpp;
411
412 if (mpp->dev_ops->free_session)
413 mpp->dev_ops->free_session(session);
414
415 mpp_session_clear_pending(session);
416
417 if (session->dma) {
418 mpp_iommu_down_read(mpp->iommu_info);
419 mpp_dma_session_destroy(session->dma);
420 mpp_iommu_up_read(mpp->iommu_info);
421 session->dma = NULL;
422 }
423 }
424
425 if (session->srv) {
426 struct mpp_service *srv = session->srv;
427
428 mutex_lock(&srv->session_lock);
429 list_del_init(&session->service_link);
430 mutex_unlock(&srv->session_lock);
431 }
432
433 list_del_init(&session->session_link);
434 }
435
mpp_session_deinit(struct mpp_session * session)436 void mpp_session_deinit(struct mpp_session *session)
437 {
438 mpp_dbg_session("session %d:%d task %d deinit\n", session->pid,
439 session->index, atomic_read(&session->task_count));
440
441 if (likely(session->deinit))
442 session->deinit(session);
443 else
444 pr_err("invalid NULL session deinit function\n");
445
446 clear_task_msgs(session);
447
448 kfree(session);
449 }
450
mpp_session_attach_workqueue(struct mpp_session * session,struct mpp_taskqueue * queue)451 static void mpp_session_attach_workqueue(struct mpp_session *session,
452 struct mpp_taskqueue *queue)
453 {
454 mpp_dbg_session("session %d:%d attach\n", session->pid, session->index);
455 mutex_lock(&queue->session_lock);
456 list_add_tail(&session->session_link, &queue->session_attach);
457 mutex_unlock(&queue->session_lock);
458 }
459
mpp_session_detach_workqueue(struct mpp_session * session)460 static void mpp_session_detach_workqueue(struct mpp_session *session)
461 {
462 struct mpp_taskqueue *queue;
463 struct mpp_dev *mpp;
464
465 if (!session->mpp || !session->mpp->queue)
466 return;
467
468 mpp_dbg_session("session %d:%d detach\n", session->pid, session->index);
469 mpp = session->mpp;
470 queue = mpp->queue;
471
472 mutex_lock(&queue->session_lock);
473 list_del_init(&session->session_link);
474 list_add_tail(&session->session_link, &queue->session_detach);
475 atomic_inc(&queue->detach_count);
476 mutex_unlock(&queue->session_lock);
477
478 mpp_taskqueue_trigger_work(mpp);
479 }
480
481 static int
mpp_session_push_pending(struct mpp_session * session,struct mpp_task * task)482 mpp_session_push_pending(struct mpp_session *session,
483 struct mpp_task *task)
484 {
485 kref_get(&task->ref);
486 mutex_lock(&session->pending_lock);
487 if (session->srv->timing_en) {
488 task->on_pending = ktime_get();
489 set_bit(TASK_TIMING_PENDING, &task->state);
490 }
491 list_add_tail(&task->pending_link, &session->pending_list);
492 mutex_unlock(&session->pending_lock);
493
494 return 0;
495 }
496
497 static int
mpp_session_pop_pending(struct mpp_session * session,struct mpp_task * task)498 mpp_session_pop_pending(struct mpp_session *session,
499 struct mpp_task *task)
500 {
501 mutex_lock(&session->pending_lock);
502 list_del_init(&task->pending_link);
503 mutex_unlock(&session->pending_lock);
504 kref_put(&task->ref, mpp_free_task);
505
506 return 0;
507 }
508
509 static struct mpp_task *
mpp_session_get_pending_task(struct mpp_session * session)510 mpp_session_get_pending_task(struct mpp_session *session)
511 {
512 struct mpp_task *task = NULL;
513
514 mutex_lock(&session->pending_lock);
515 task = list_first_entry_or_null(&session->pending_list,
516 struct mpp_task,
517 pending_link);
518 mutex_unlock(&session->pending_lock);
519
520 return task;
521 }
522
mpp_free_task(struct kref * ref)523 void mpp_free_task(struct kref *ref)
524 {
525 struct mpp_dev *mpp;
526 struct mpp_session *session;
527 struct mpp_task *task = container_of(ref, struct mpp_task, ref);
528
529 if (!task->session) {
530 mpp_err("task %p, task->session is null.\n", task);
531 return;
532 }
533 session = task->session;
534
535 mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d free state 0x%lx abort %d\n",
536 session->index, task->task_id, task->state,
537 atomic_read(&task->abort_request));
538
539 mpp = mpp_get_task_used_device(task, session);
540 if (mpp->dev_ops->free_task)
541 mpp->dev_ops->free_task(session, task);
542
543 /* Decrease reference count */
544 atomic_dec(&session->task_count);
545 atomic_dec(&mpp->task_count);
546 }
547
mpp_task_timeout_work(struct work_struct * work_s)548 static void mpp_task_timeout_work(struct work_struct *work_s)
549 {
550 struct mpp_dev *mpp;
551 struct mpp_session *session;
552 struct mpp_task *task = container_of(to_delayed_work(work_s),
553 struct mpp_task,
554 timeout_work);
555
556 if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
557 mpp_err("task has been handled\n");
558 return;
559 }
560
561 if (!task->session) {
562 mpp_err("task %p, task->session is null.\n", task);
563 return;
564 }
565
566 session = task->session;
567 mpp_err("task %d:%d:%d processing time out!\n", session->pid,
568 session->index, task->task_id);
569
570 if (!session->mpp) {
571 mpp_err("session %d:%d, session mpp is null.\n", session->pid,
572 session->index);
573 return;
574 }
575
576 mpp_task_dump_timing(task, ktime_us_delta(ktime_get(), task->on_create));
577
578 mpp = mpp_get_task_used_device(task, session);
579
580 /* disable core irq */
581 disable_irq(mpp->irq);
582 /* disable mmu irq */
583 if (mpp->iommu_info && mpp->iommu_info->got_irq)
584 disable_irq(mpp->iommu_info->irq);
585
586 /* hardware maybe dead, reset it */
587 mpp_reset_up_read(mpp->reset_group);
588 mpp_dev_reset(mpp);
589 mpp_power_off(mpp);
590
591 set_bit(TASK_STATE_TIMEOUT, &task->state);
592 set_bit(TASK_STATE_DONE, &task->state);
593 /* Wake up the GET thread */
594 wake_up(&task->wait);
595
596 /* remove task from taskqueue running list */
597 mpp_taskqueue_pop_running(mpp->queue, task);
598
599 /* enable core irq */
600 enable_irq(mpp->irq);
601 /* enable mmu irq */
602 if (mpp->iommu_info && mpp->iommu_info->got_irq)
603 enable_irq(mpp->iommu_info->irq);
604
605 mpp_taskqueue_trigger_work(mpp);
606 }
607
mpp_process_task_default(struct mpp_session * session,struct mpp_task_msgs * msgs)608 static int mpp_process_task_default(struct mpp_session *session,
609 struct mpp_task_msgs *msgs)
610 {
611 struct mpp_task *task = NULL;
612 struct mpp_dev *mpp = session->mpp;
613 u32 timing_en;
614 ktime_t on_create;
615
616 if (unlikely(!mpp)) {
617 mpp_err("pid %d client %d found invalid process function\n",
618 session->pid, session->device_type);
619 return -EINVAL;
620 }
621
622 timing_en = session->srv->timing_en;
623 if (timing_en)
624 on_create = ktime_get();
625
626 if (mpp->dev_ops->alloc_task)
627 task = mpp->dev_ops->alloc_task(session, msgs);
628 if (!task) {
629 mpp_err("alloc_task failed.\n");
630 return -ENOMEM;
631 }
632
633 if (timing_en) {
634 task->on_create_end = ktime_get();
635 task->on_create = on_create;
636 set_bit(TASK_TIMING_CREATE_END, &task->state);
637 set_bit(TASK_TIMING_CREATE, &task->state);
638 }
639
640 /* ensure current device */
641 mpp = mpp_get_task_used_device(task, session);
642
643 kref_init(&task->ref);
644 init_waitqueue_head(&task->wait);
645 atomic_set(&task->abort_request, 0);
646 task->task_index = atomic_fetch_inc(&mpp->task_index);
647 task->task_id = atomic_fetch_inc(&mpp->queue->task_id);
648 INIT_DELAYED_WORK(&task->timeout_work, mpp_task_timeout_work);
649
650 if (mpp->auto_freq_en && mpp->hw_ops->get_freq)
651 mpp->hw_ops->get_freq(mpp, task);
652
653 msgs->queue = mpp->queue;
654 msgs->task = task;
655 msgs->mpp = mpp;
656
657 /*
658 * Push task to session should be in front of push task to queue.
659 * Otherwise, when mpp_task_finish finish and worker_thread call
660 * task worker, it may be get a task who has push in queue but
661 * not in session, cause some errors.
662 */
663 atomic_inc(&session->task_count);
664 mpp_session_push_pending(session, task);
665
666 return 0;
667 }
668
mpp_process_task(struct mpp_session * session,struct mpp_task_msgs * msgs)669 static int mpp_process_task(struct mpp_session *session,
670 struct mpp_task_msgs *msgs)
671 {
672 if (likely(session->process_task))
673 return session->process_task(session, msgs);
674
675 pr_err("invalid NULL process task function\n");
676 return -EINVAL;
677 }
678
679 struct reset_control *
mpp_reset_control_get(struct mpp_dev * mpp,enum MPP_RESET_TYPE type,const char * name)680 mpp_reset_control_get(struct mpp_dev *mpp, enum MPP_RESET_TYPE type, const char *name)
681 {
682 int index;
683 struct reset_control *rst = NULL;
684 char shared_name[32] = "shared_";
685 struct mpp_reset_group *group;
686
687 /* check reset whether belone to device alone */
688 index = of_property_match_string(mpp->dev->of_node, "reset-names", name);
689 if (index >= 0) {
690 rst = devm_reset_control_get(mpp->dev, name);
691 mpp_safe_unreset(rst);
692
693 return rst;
694 }
695
696 /* check reset whether is shared */
697 strncat(shared_name, name,
698 sizeof(shared_name) - strlen(shared_name) - 1);
699 index = of_property_match_string(mpp->dev->of_node,
700 "reset-names", shared_name);
701 if (index < 0) {
702 dev_err(mpp->dev, "%s is not found!\n", shared_name);
703 return NULL;
704 }
705
706 if (!mpp->reset_group) {
707 dev_err(mpp->dev, "reset group is empty!\n");
708 return NULL;
709 }
710 group = mpp->reset_group;
711
712 down_write(&group->rw_sem);
713 rst = group->resets[type];
714 if (!rst) {
715 rst = devm_reset_control_get(mpp->dev, shared_name);
716 mpp_safe_unreset(rst);
717 group->resets[type] = rst;
718 group->queue = mpp->queue;
719 }
720 /* if reset not in the same queue, it means different device
721 * may reset in the same time, then rw_sem_on should set true.
722 */
723 group->rw_sem_on |= (group->queue != mpp->queue) ? true : false;
724 dev_info(mpp->dev, "reset_group->rw_sem_on=%d\n", group->rw_sem_on);
725 up_write(&group->rw_sem);
726
727 return rst;
728 }
729
mpp_dev_reset(struct mpp_dev * mpp)730 int mpp_dev_reset(struct mpp_dev *mpp)
731 {
732 dev_info(mpp->dev, "resetting...\n");
733
734 /*
735 * before running, we have to switch grf ctrl bit to ensure
736 * working in current hardware
737 */
738 if (mpp->hw_ops->set_grf)
739 mpp->hw_ops->set_grf(mpp);
740 else
741 mpp_set_grf(mpp->grf_info);
742
743 if (mpp->auto_freq_en && mpp->hw_ops->reduce_freq)
744 mpp->hw_ops->reduce_freq(mpp);
745 /* FIXME lock resource lock of the other devices in combo */
746 mpp_iommu_down_write(mpp->iommu_info);
747 mpp_reset_down_write(mpp->reset_group);
748 atomic_set(&mpp->reset_request, 0);
749
750 if (mpp->hw_ops->reset)
751 mpp->hw_ops->reset(mpp);
752
753 /* Note: if the domain does not change, iommu attach will be return
754 * as an empty operation. Therefore, force to close and then open,
755 * will be update the domain. In this way, domain can really attach.
756 */
757 mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
758
759 mpp_reset_up_write(mpp->reset_group);
760 mpp_iommu_up_write(mpp->iommu_info);
761
762 dev_info(mpp->dev, "reset done\n");
763
764 return 0;
765 }
766
mpp_task_run_begin(struct mpp_task * task,u32 timing_en,u32 timeout)767 void mpp_task_run_begin(struct mpp_task *task, u32 timing_en, u32 timeout)
768 {
769 preempt_disable();
770
771 set_bit(TASK_STATE_START, &task->state);
772
773 mpp_time_record(task);
774 schedule_delayed_work(&task->timeout_work, msecs_to_jiffies(timeout));
775
776 if (timing_en) {
777 task->on_sched_timeout = ktime_get();
778 set_bit(TASK_TIMING_TO_SCHED, &task->state);
779 }
780 }
781
mpp_task_run_end(struct mpp_task * task,u32 timing_en)782 void mpp_task_run_end(struct mpp_task *task, u32 timing_en)
783 {
784 if (timing_en) {
785 task->on_run_end = ktime_get();
786 set_bit(TASK_TIMING_RUN_END, &task->state);
787 }
788
789 #ifdef MODULE
790 preempt_enable();
791 #else
792 preempt_enable_no_resched();
793 #endif
794 }
795
mpp_task_run(struct mpp_dev * mpp,struct mpp_task * task)796 static int mpp_task_run(struct mpp_dev *mpp,
797 struct mpp_task *task)
798 {
799 int ret;
800 u32 timing_en;
801
802 mpp_debug_enter();
803
804 timing_en = mpp->srv->timing_en;
805 if (timing_en) {
806 task->on_run = ktime_get();
807 set_bit(TASK_TIMING_RUN, &task->state);
808 }
809
810 /*
811 * before running, we have to switch grf ctrl bit to ensure
812 * working in current hardware
813 */
814 if (mpp->hw_ops->set_grf) {
815 ret = mpp->hw_ops->set_grf(mpp);
816 if (ret) {
817 dev_err(mpp->dev, "set grf failed\n");
818 return ret;
819 }
820 } else {
821 mpp_set_grf(mpp->grf_info);
822 }
823 /*
824 * for iommu share hardware, should attach to ensure
825 * working in current device
826 */
827 ret = mpp_iommu_attach(mpp->iommu_info);
828 if (ret) {
829 dev_err(mpp->dev, "mpp_iommu_attach failed\n");
830 return -ENODATA;
831 }
832
833 mpp_power_on(mpp);
834 mpp_debug_func(DEBUG_TASK_INFO, "pid %d run %s\n",
835 task->session->pid, dev_name(mpp->dev));
836
837 if (mpp->auto_freq_en && mpp->hw_ops->set_freq)
838 mpp->hw_ops->set_freq(mpp, task);
839 /*
840 * TODO: Lock the reader locker of the device resource lock here,
841 * release at the finish operation
842 */
843 mpp_reset_down_read(mpp->reset_group);
844
845 mpp_iommu_dev_activate(mpp->iommu_info, mpp);
846 if (mpp->dev_ops->run)
847 mpp->dev_ops->run(mpp, task);
848
849 mpp_debug_leave();
850
851 return 0;
852 }
853
mpp_task_worker_default(struct kthread_work * work_s)854 static void mpp_task_worker_default(struct kthread_work *work_s)
855 {
856 struct mpp_task *task;
857 struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
858 struct mpp_taskqueue *queue = mpp->queue;
859
860 mpp_debug_enter();
861
862 again:
863 task = mpp_taskqueue_get_pending_task(queue);
864 if (!task)
865 goto done;
866
867 /* if task timeout and aborted, remove it */
868 if (atomic_read(&task->abort_request) > 0) {
869 mpp_taskqueue_pop_pending(queue, task);
870 goto again;
871 }
872
873 /* get device for current task */
874 mpp = task->session->mpp;
875
876 /*
877 * In the link table mode, the prepare function of the device
878 * will check whether I can insert a new task into device.
879 * If the device supports the task status query(like the HEVC
880 * encoder), it can report whether the device is busy.
881 * If the device does not support multiple task or task status
882 * query, leave this job to mpp service.
883 */
884 if (mpp->dev_ops->prepare)
885 task = mpp->dev_ops->prepare(mpp, task);
886 else if (mpp_taskqueue_is_running(queue))
887 task = NULL;
888
889 /*
890 * FIXME if the hardware supports task query, but we still need to lock
891 * the running list and lock the mpp service in the current state.
892 */
893 /* Push a pending task to running queue */
894 if (task) {
895 struct mpp_dev *task_mpp = mpp_get_task_used_device(task, task->session);
896
897 atomic_inc(&task_mpp->task_count);
898 mpp_taskqueue_pending_to_run(queue, task);
899 set_bit(TASK_STATE_RUNNING, &task->state);
900 if (mpp_task_run(task_mpp, task))
901 mpp_taskqueue_pop_running(queue, task);
902 else
903 goto again;
904 }
905
906 done:
907 mpp_session_cleanup_detach(queue, work_s);
908 }
909
mpp_wait_result_default(struct mpp_session * session,struct mpp_task_msgs * msgs)910 static int mpp_wait_result_default(struct mpp_session *session,
911 struct mpp_task_msgs *msgs)
912 {
913 int ret;
914 struct mpp_task *task;
915 struct mpp_dev *mpp;
916
917 task = mpp_session_get_pending_task(session);
918 if (!task) {
919 mpp_err("session %d:%d pending list is empty!\n",
920 session->pid, session->index);
921 return -EIO;
922 }
923 mpp = mpp_get_task_used_device(task, session);
924
925 ret = wait_event_timeout(task->wait,
926 test_bit(TASK_STATE_DONE, &task->state),
927 msecs_to_jiffies(MPP_WAIT_TIMEOUT_DELAY));
928 if (ret > 0) {
929 if (mpp->dev_ops->result)
930 ret = mpp->dev_ops->result(mpp, task, msgs);
931 } else {
932 atomic_inc(&task->abort_request);
933 set_bit(TASK_STATE_ABORT, &task->state);
934 mpp_err("timeout, pid %d session %d:%d count %d cur_task %p id %d\n",
935 session->pid, session->pid, session->index,
936 atomic_read(&session->task_count), task,
937 task->task_id);
938 }
939
940 mpp_debug_func(DEBUG_TASK_INFO, "task %d kref_%d\n",
941 task->task_id, kref_read(&task->ref));
942
943 mpp_session_pop_pending(session, task);
944
945 return ret;
946 }
947
mpp_wait_result(struct mpp_session * session,struct mpp_task_msgs * msgs)948 static int mpp_wait_result(struct mpp_session *session,
949 struct mpp_task_msgs *msgs)
950 {
951 if (likely(session->wait_result))
952 return session->wait_result(session, msgs);
953
954 pr_err("invalid NULL wait result function\n");
955 return -EINVAL;
956 }
957
mpp_attach_service(struct mpp_dev * mpp,struct device * dev)958 static int mpp_attach_service(struct mpp_dev *mpp, struct device *dev)
959 {
960 u32 taskqueue_node = 0;
961 u32 reset_group_node = 0;
962 struct device_node *np = NULL;
963 struct platform_device *pdev = NULL;
964 struct mpp_taskqueue *queue = NULL;
965 int ret = 0;
966
967 np = of_parse_phandle(dev->of_node, "rockchip,srv", 0);
968 if (!np || !of_device_is_available(np)) {
969 dev_err(dev, "failed to get the mpp service node\n");
970 return -ENODEV;
971 }
972
973 pdev = of_find_device_by_node(np);
974 of_node_put(np);
975 if (!pdev) {
976 dev_err(dev, "failed to get mpp service from node\n");
977 return -ENODEV;
978 }
979
980 mpp->srv = platform_get_drvdata(pdev);
981 platform_device_put(pdev);
982 if (!mpp->srv) {
983 dev_err(dev, "failed attach service\n");
984 return -EINVAL;
985 }
986
987 ret = of_property_read_u32(dev->of_node,
988 "rockchip,taskqueue-node", &taskqueue_node);
989 if (ret) {
990 dev_err(dev, "failed to get taskqueue-node\n");
991 return ret;
992 } else if (taskqueue_node >= mpp->srv->taskqueue_cnt) {
993 dev_err(dev, "taskqueue-node %d must less than %d\n",
994 taskqueue_node, mpp->srv->taskqueue_cnt);
995 return -ENODEV;
996 }
997 /* set taskqueue according dtsi */
998 queue = mpp->srv->task_queues[taskqueue_node];
999 if (!queue) {
1000 dev_err(dev, "taskqueue attach to invalid node %d\n",
1001 taskqueue_node);
1002 return -ENODEV;
1003 }
1004 mpp_attach_workqueue(mpp, queue);
1005
1006 ret = of_property_read_u32(dev->of_node,
1007 "rockchip,resetgroup-node", &reset_group_node);
1008 if (!ret) {
1009 /* set resetgroup according dtsi */
1010 if (reset_group_node >= mpp->srv->reset_group_cnt) {
1011 dev_err(dev, "resetgroup-node %d must less than %d\n",
1012 reset_group_node, mpp->srv->reset_group_cnt);
1013 return -ENODEV;
1014 } else {
1015 mpp->reset_group = mpp->srv->reset_groups[reset_group_node];
1016 }
1017 }
1018
1019 return 0;
1020 }
1021
mpp_taskqueue_init(struct device * dev)1022 struct mpp_taskqueue *mpp_taskqueue_init(struct device *dev)
1023 {
1024 struct mpp_taskqueue *queue = devm_kzalloc(dev, sizeof(*queue),
1025 GFP_KERNEL);
1026 if (!queue)
1027 return NULL;
1028
1029 mutex_init(&queue->session_lock);
1030 mutex_init(&queue->pending_lock);
1031 spin_lock_init(&queue->running_lock);
1032 mutex_init(&queue->mmu_lock);
1033 mutex_init(&queue->dev_lock);
1034 INIT_LIST_HEAD(&queue->session_attach);
1035 INIT_LIST_HEAD(&queue->session_detach);
1036 INIT_LIST_HEAD(&queue->pending_list);
1037 INIT_LIST_HEAD(&queue->running_list);
1038 INIT_LIST_HEAD(&queue->mmu_list);
1039 INIT_LIST_HEAD(&queue->dev_list);
1040
1041 /* default taskqueue has max 16 task capacity */
1042 queue->task_capacity = MPP_MAX_TASK_CAPACITY;
1043 atomic_set(&queue->reset_request, 0);
1044 atomic_set(&queue->detach_count, 0);
1045 atomic_set(&queue->task_id, 0);
1046 queue->dev_active_flags = 0;
1047
1048 return queue;
1049 }
1050
mpp_attach_workqueue(struct mpp_dev * mpp,struct mpp_taskqueue * queue)1051 static void mpp_attach_workqueue(struct mpp_dev *mpp,
1052 struct mpp_taskqueue *queue)
1053 {
1054 s32 core_id;
1055
1056 INIT_LIST_HEAD(&mpp->queue_link);
1057
1058 mutex_lock(&queue->dev_lock);
1059
1060 if (mpp->core_id >= 0)
1061 core_id = mpp->core_id;
1062 else
1063 core_id = queue->core_count;
1064
1065 if (core_id < 0 || core_id >= MPP_MAX_CORE_NUM) {
1066 dev_err(mpp->dev, "invalid core id %d\n", core_id);
1067 goto done;
1068 }
1069
1070 /*
1071 * multi devices with no multicores share one queue,
1072 * the core_id is default value 0.
1073 */
1074 if (queue->cores[core_id]) {
1075 if (queue->cores[core_id] == mpp)
1076 goto done;
1077
1078 core_id = queue->core_count;
1079 }
1080
1081 queue->cores[core_id] = mpp;
1082 queue->core_count++;
1083
1084 set_bit(core_id, &queue->core_idle);
1085 list_add_tail(&mpp->queue_link, &queue->dev_list);
1086 if (queue->core_id_max < (u32)core_id)
1087 queue->core_id_max = (u32)core_id;
1088
1089 mpp->core_id = core_id;
1090 mpp->queue = queue;
1091
1092 mpp_dbg_core("%s attach queue as core %d\n",
1093 dev_name(mpp->dev), mpp->core_id);
1094
1095 if (queue->task_capacity > mpp->task_capacity)
1096 queue->task_capacity = mpp->task_capacity;
1097
1098 done:
1099 mutex_unlock(&queue->dev_lock);
1100 }
1101
mpp_detach_workqueue(struct mpp_dev * mpp)1102 static void mpp_detach_workqueue(struct mpp_dev *mpp)
1103 {
1104 struct mpp_taskqueue *queue = mpp->queue;
1105
1106 if (queue) {
1107 mutex_lock(&queue->dev_lock);
1108
1109 queue->cores[mpp->core_id] = NULL;
1110 queue->core_count--;
1111
1112 clear_bit(mpp->core_id, &queue->core_idle);
1113 list_del_init(&mpp->queue_link);
1114
1115 mpp->queue = NULL;
1116
1117 mutex_unlock(&queue->dev_lock);
1118 }
1119 }
1120
mpp_check_cmd_v1(__u32 cmd)1121 static int mpp_check_cmd_v1(__u32 cmd)
1122 {
1123 bool found;
1124
1125 found = (cmd < MPP_CMD_QUERY_BUTT) ? true : false;
1126 found = (cmd >= MPP_CMD_INIT_BASE && cmd < MPP_CMD_INIT_BUTT) ? true : found;
1127 found = (cmd >= MPP_CMD_SEND_BASE && cmd < MPP_CMD_SEND_BUTT) ? true : found;
1128 found = (cmd >= MPP_CMD_POLL_BASE && cmd < MPP_CMD_POLL_BUTT) ? true : found;
1129 found = (cmd >= MPP_CMD_CONTROL_BASE && cmd < MPP_CMD_CONTROL_BUTT) ? true : found;
1130
1131 return found ? 0 : -EINVAL;
1132 }
1133
mpp_msg_is_last(struct mpp_request * req)1134 static inline int mpp_msg_is_last(struct mpp_request *req)
1135 {
1136 int flag;
1137
1138 if (req->flags & MPP_FLAGS_MULTI_MSG)
1139 flag = (req->flags & MPP_FLAGS_LAST_MSG) ? 1 : 0;
1140 else
1141 flag = 1;
1142
1143 return flag;
1144 }
1145
mpp_get_cmd_butt(__u32 cmd)1146 static __u32 mpp_get_cmd_butt(__u32 cmd)
1147 {
1148 __u32 mask = 0;
1149
1150 switch (cmd) {
1151 case MPP_CMD_QUERY_BASE:
1152 mask = MPP_CMD_QUERY_BUTT;
1153 break;
1154 case MPP_CMD_INIT_BASE:
1155 mask = MPP_CMD_INIT_BUTT;
1156 break;
1157
1158 case MPP_CMD_SEND_BASE:
1159 mask = MPP_CMD_SEND_BUTT;
1160 break;
1161 case MPP_CMD_POLL_BASE:
1162 mask = MPP_CMD_POLL_BUTT;
1163 break;
1164 case MPP_CMD_CONTROL_BASE:
1165 mask = MPP_CMD_CONTROL_BUTT;
1166 break;
1167 default:
1168 mpp_err("unknown dev cmd 0x%x\n", cmd);
1169 break;
1170 }
1171
1172 return mask;
1173 }
1174
mpp_process_request(struct mpp_session * session,struct mpp_service * srv,struct mpp_request * req,struct mpp_task_msgs * msgs)1175 static int mpp_process_request(struct mpp_session *session,
1176 struct mpp_service *srv,
1177 struct mpp_request *req,
1178 struct mpp_task_msgs *msgs)
1179 {
1180 int ret;
1181 struct mpp_dev *mpp;
1182
1183 mpp_debug(DEBUG_IOCTL, "cmd %x process\n", req->cmd);
1184
1185 switch (req->cmd) {
1186 case MPP_CMD_QUERY_HW_SUPPORT: {
1187 u32 hw_support = srv->hw_support;
1188
1189 mpp_debug(DEBUG_IOCTL, "hw_support %08x\n", hw_support);
1190 if (put_user(hw_support, (u32 __user *)req->data))
1191 return -EFAULT;
1192 } break;
1193 case MPP_CMD_QUERY_HW_ID: {
1194 struct mpp_hw_info *hw_info;
1195
1196 mpp = NULL;
1197 if (session && session->mpp) {
1198 mpp = session->mpp;
1199 } else {
1200 u32 client_type;
1201
1202 if (get_user(client_type, (u32 __user *)req->data))
1203 return -EFAULT;
1204
1205 mpp_debug(DEBUG_IOCTL, "client %d\n", client_type);
1206 client_type = array_index_nospec(client_type, MPP_DEVICE_BUTT);
1207 if (test_bit(client_type, &srv->hw_support))
1208 mpp = srv->sub_devices[client_type];
1209 }
1210
1211 if (!mpp)
1212 return -EINVAL;
1213
1214 hw_info = mpp->var->hw_info;
1215 mpp_debug(DEBUG_IOCTL, "hw_id %08x\n", hw_info->hw_id);
1216 if (put_user(hw_info->hw_id, (u32 __user *)req->data))
1217 return -EFAULT;
1218 } break;
1219 case MPP_CMD_QUERY_CMD_SUPPORT: {
1220 __u32 cmd = 0;
1221
1222 if (get_user(cmd, (u32 __user *)req->data))
1223 return -EINVAL;
1224
1225 if (put_user(mpp_get_cmd_butt(cmd), (u32 __user *)req->data))
1226 return -EFAULT;
1227 } break;
1228 case MPP_CMD_INIT_CLIENT_TYPE: {
1229 u32 client_type;
1230
1231 if (get_user(client_type, (u32 __user *)req->data))
1232 return -EFAULT;
1233
1234 mpp_debug(DEBUG_IOCTL, "client %d\n", client_type);
1235 if (client_type >= MPP_DEVICE_BUTT) {
1236 mpp_err("client_type must less than %d\n",
1237 MPP_DEVICE_BUTT);
1238 return -EINVAL;
1239 }
1240 client_type = array_index_nospec(client_type, MPP_DEVICE_BUTT);
1241 mpp = srv->sub_devices[client_type];
1242 if (!mpp)
1243 return -EINVAL;
1244
1245 session->device_type = (enum MPP_DEVICE_TYPE)client_type;
1246 session->dma = mpp_dma_session_create(mpp->dev, mpp->session_max_buffers);
1247 session->mpp = mpp;
1248 if (mpp->dev_ops) {
1249 if (mpp->dev_ops->process_task)
1250 session->process_task =
1251 mpp->dev_ops->process_task;
1252
1253 if (mpp->dev_ops->wait_result)
1254 session->wait_result =
1255 mpp->dev_ops->wait_result;
1256
1257 if (mpp->dev_ops->deinit)
1258 session->deinit = mpp->dev_ops->deinit;
1259 }
1260 session->index = atomic_fetch_inc(&mpp->session_index);
1261 if (mpp->dev_ops && mpp->dev_ops->init_session) {
1262 ret = mpp->dev_ops->init_session(session);
1263 if (ret)
1264 return ret;
1265 }
1266
1267 mpp_session_attach_workqueue(session, mpp->queue);
1268 } break;
1269 case MPP_CMD_INIT_DRIVER_DATA: {
1270 u32 val;
1271
1272 mpp = session->mpp;
1273 if (!mpp)
1274 return -EINVAL;
1275 if (get_user(val, (u32 __user *)req->data))
1276 return -EFAULT;
1277 if (mpp->grf_info->grf)
1278 regmap_write(mpp->grf_info->grf, 0x5d8, val);
1279 } break;
1280 case MPP_CMD_INIT_TRANS_TABLE: {
1281 if (session && req->size) {
1282 int trans_tbl_size = sizeof(session->trans_table);
1283
1284 if (req->size > trans_tbl_size) {
1285 mpp_err("init table size %d more than %d\n",
1286 req->size, trans_tbl_size);
1287 return -ENOMEM;
1288 }
1289
1290 if (copy_from_user(session->trans_table,
1291 req->data, req->size)) {
1292 mpp_err("copy_from_user failed\n");
1293 return -EINVAL;
1294 }
1295 session->trans_count =
1296 req->size / sizeof(session->trans_table[0]);
1297 }
1298 } break;
1299 case MPP_CMD_SET_REG_WRITE:
1300 case MPP_CMD_SET_REG_READ:
1301 case MPP_CMD_SET_REG_ADDR_OFFSET:
1302 case MPP_CMD_SET_RCB_INFO: {
1303 msgs->flags |= req->flags;
1304 msgs->set_cnt++;
1305 } break;
1306 case MPP_CMD_POLL_HW_FINISH: {
1307 msgs->flags |= req->flags;
1308 msgs->poll_cnt++;
1309 msgs->poll_req = NULL;
1310 } break;
1311 case MPP_CMD_POLL_HW_IRQ: {
1312 if (msgs->poll_cnt || msgs->poll_req)
1313 mpp_err("Do NOT poll hw irq when previous call not return\n");
1314
1315 msgs->flags |= req->flags;
1316 msgs->poll_cnt++;
1317
1318 if (req->size && req->data) {
1319 if (!msgs->poll_req)
1320 msgs->poll_req = req;
1321 } else {
1322 msgs->poll_req = NULL;
1323 }
1324 } break;
1325 case MPP_CMD_RESET_SESSION: {
1326 int ret;
1327 int val;
1328
1329 ret = readx_poll_timeout(atomic_read,
1330 &session->task_count,
1331 val, val == 0, 1000, 500000);
1332 if (ret == -ETIMEDOUT) {
1333 mpp_err("wait task running time out\n");
1334 } else {
1335 mpp = session->mpp;
1336 if (!mpp)
1337 return -EINVAL;
1338
1339 mpp_session_clear_pending(session);
1340 mpp_iommu_down_write(mpp->iommu_info);
1341 ret = mpp_dma_session_destroy(session->dma);
1342 mpp_iommu_up_write(mpp->iommu_info);
1343 }
1344 return ret;
1345 } break;
1346 case MPP_CMD_TRANS_FD_TO_IOVA: {
1347 u32 i;
1348 u32 count;
1349 u32 data[MPP_MAX_REG_TRANS_NUM];
1350
1351 mpp = session->mpp;
1352 if (!mpp)
1353 return -EINVAL;
1354
1355 if (req->size <= 0 ||
1356 req->size > sizeof(data))
1357 return -EINVAL;
1358
1359 memset(data, 0, sizeof(data));
1360 if (copy_from_user(data, req->data, req->size)) {
1361 mpp_err("copy_from_user failed.\n");
1362 return -EINVAL;
1363 }
1364 count = req->size / sizeof(u32);
1365 for (i = 0; i < count; i++) {
1366 struct mpp_dma_buffer *buffer;
1367 int fd = data[i];
1368
1369 mpp_iommu_down_read(mpp->iommu_info);
1370 buffer = mpp_dma_import_fd(mpp->iommu_info,
1371 session->dma, fd);
1372 mpp_iommu_up_read(mpp->iommu_info);
1373 if (IS_ERR_OR_NULL(buffer)) {
1374 mpp_err("can not import fd %d\n", fd);
1375 return -EINVAL;
1376 }
1377 data[i] = (u32)buffer->iova;
1378 mpp_debug(DEBUG_IOMMU, "fd %d => iova %08x\n",
1379 fd, data[i]);
1380 }
1381 if (copy_to_user(req->data, data, req->size)) {
1382 mpp_err("copy_to_user failed.\n");
1383 return -EINVAL;
1384 }
1385 } break;
1386 case MPP_CMD_RELEASE_FD: {
1387 u32 i;
1388 int ret;
1389 u32 count;
1390 u32 data[MPP_MAX_REG_TRANS_NUM];
1391
1392 if (req->size <= 0 ||
1393 req->size > sizeof(data))
1394 return -EINVAL;
1395
1396 memset(data, 0, sizeof(data));
1397 if (copy_from_user(data, req->data, req->size)) {
1398 mpp_err("copy_from_user failed.\n");
1399 return -EINVAL;
1400 }
1401 count = req->size / sizeof(u32);
1402 for (i = 0; i < count; i++) {
1403 ret = mpp_dma_release_fd(session->dma, data[i]);
1404 if (ret) {
1405 mpp_err("release fd %d failed.\n", data[i]);
1406 return ret;
1407 }
1408 }
1409 } break;
1410 default: {
1411 mpp = session->mpp;
1412 if (!mpp) {
1413 mpp_err("pid %d not find client %d\n",
1414 session->pid, session->device_type);
1415 return -EINVAL;
1416 }
1417 if (mpp->dev_ops->ioctl)
1418 return mpp->dev_ops->ioctl(session, req);
1419
1420 mpp_debug(DEBUG_IOCTL, "unknown mpp ioctl cmd %x\n", req->cmd);
1421 } break;
1422 }
1423
1424 return 0;
1425 }
1426
task_msgs_add(struct mpp_task_msgs * msgs,struct list_head * head)1427 static void task_msgs_add(struct mpp_task_msgs *msgs, struct list_head *head)
1428 {
1429 struct mpp_session *session = msgs->session;
1430 int ret = 0;
1431
1432 /* process each task */
1433 if (msgs->set_cnt) {
1434 /* NOTE: update msg_flags for fd over 1024 */
1435 session->msg_flags = msgs->flags;
1436 ret = mpp_process_task(session, msgs);
1437 }
1438
1439 if (!ret) {
1440 INIT_LIST_HEAD(&msgs->list);
1441 list_add_tail(&msgs->list, head);
1442 } else {
1443 put_task_msgs(msgs);
1444 }
1445 }
1446
mpp_collect_msgs(struct list_head * head,struct mpp_session * session,unsigned int cmd,void __user * msg)1447 static int mpp_collect_msgs(struct list_head *head, struct mpp_session *session,
1448 unsigned int cmd, void __user *msg)
1449 {
1450 struct mpp_msg_v1 msg_v1;
1451 struct mpp_request *req;
1452 struct mpp_task_msgs *msgs = NULL;
1453 int last = 1;
1454 int ret;
1455
1456 if (cmd != MPP_IOC_CFG_V1) {
1457 mpp_err("unknown ioctl cmd %x\n", cmd);
1458 return -EINVAL;
1459 }
1460
1461 next:
1462 /* first, parse to fixed struct */
1463 if (copy_from_user(&msg_v1, msg, sizeof(msg_v1)))
1464 return -EFAULT;
1465
1466 msg += sizeof(msg_v1);
1467
1468 mpp_debug(DEBUG_IOCTL, "cmd %x collect flags %08x, size %d, offset %x\n",
1469 msg_v1.cmd, msg_v1.flags, msg_v1.size, msg_v1.offset);
1470
1471 if (mpp_check_cmd_v1(msg_v1.cmd)) {
1472 mpp_err("mpp cmd %x is not supported.\n", msg_v1.cmd);
1473 return -EFAULT;
1474 }
1475
1476 if (msg_v1.flags & MPP_FLAGS_MULTI_MSG)
1477 last = (msg_v1.flags & MPP_FLAGS_LAST_MSG) ? 1 : 0;
1478 else
1479 last = 1;
1480
1481 /* check cmd for change msgs session */
1482 if (msg_v1.cmd == MPP_CMD_SET_SESSION_FD) {
1483 struct mpp_bat_msg bat_msg;
1484 struct mpp_bat_msg __user *usr_cmd;
1485 struct fd f;
1486
1487 /* try session switch here */
1488 usr_cmd = (struct mpp_bat_msg __user *)(unsigned long)msg_v1.data_ptr;
1489
1490 if (copy_from_user(&bat_msg, usr_cmd, sizeof(bat_msg)))
1491 return -EFAULT;
1492
1493 /* skip finished message */
1494 if (bat_msg.flag & MPP_BAT_MSG_DONE)
1495 goto session_switch_done;
1496
1497 f = fdget(bat_msg.fd);
1498 if (!f.file) {
1499 int ret = -EBADF;
1500
1501 mpp_err("fd %d get session failed\n", bat_msg.fd);
1502
1503 if (copy_to_user(&usr_cmd->ret, &ret, sizeof(usr_cmd->ret)))
1504 mpp_err("copy_to_user failed.\n");
1505 goto session_switch_done;
1506 }
1507
1508 /* NOTE: add previous ready task to queue and drop empty task */
1509 if (msgs) {
1510 if (msgs->req_cnt)
1511 task_msgs_add(msgs, head);
1512 else
1513 put_task_msgs(msgs);
1514
1515 msgs = NULL;
1516 }
1517
1518 /* switch session */
1519 session = f.file->private_data;
1520 msgs = get_task_msgs(session);
1521
1522 if (f.file->private_data == session)
1523 msgs->ext_fd = bat_msg.fd;
1524
1525 msgs->f = f;
1526
1527 mpp_debug(DEBUG_IOCTL, "fd %d, session %d msg_cnt %d\n",
1528 bat_msg.fd, session->index, session->msgs_cnt);
1529
1530 session_switch_done:
1531 /* session id should NOT be the last message */
1532 if (last)
1533 return 0;
1534
1535 goto next;
1536 }
1537
1538 if (!msgs)
1539 msgs = get_task_msgs(session);
1540
1541 if (!msgs) {
1542 pr_err("session %d:%d failed to get task msgs",
1543 session->pid, session->index);
1544 return -EINVAL;
1545 }
1546
1547 if (msgs->req_cnt >= MPP_MAX_MSG_NUM) {
1548 mpp_err("session %d message count %d more than %d.\n",
1549 session->index, msgs->req_cnt, MPP_MAX_MSG_NUM);
1550 return -EINVAL;
1551 }
1552
1553 req = &msgs->reqs[msgs->req_cnt++];
1554 req->cmd = msg_v1.cmd;
1555 req->flags = msg_v1.flags;
1556 req->size = msg_v1.size;
1557 req->offset = msg_v1.offset;
1558 req->data = (void __user *)(unsigned long)msg_v1.data_ptr;
1559
1560 ret = mpp_process_request(session, session->srv, req, msgs);
1561 if (ret) {
1562 mpp_err("session %d process cmd %x ret %d\n",
1563 session->index, req->cmd, ret);
1564 return ret;
1565 }
1566
1567 if (!last)
1568 goto next;
1569
1570 task_msgs_add(msgs, head);
1571 msgs = NULL;
1572
1573 return 0;
1574 }
1575
mpp_msgs_trigger(struct list_head * msgs_list)1576 static void mpp_msgs_trigger(struct list_head *msgs_list)
1577 {
1578 struct mpp_task_msgs *msgs, *n;
1579 struct mpp_dev *mpp_prev = NULL;
1580 struct mpp_taskqueue *queue_prev = NULL;
1581
1582 /* push task to queue */
1583 list_for_each_entry_safe(msgs, n, msgs_list, list) {
1584 struct mpp_dev *mpp;
1585 struct mpp_task *task;
1586 struct mpp_taskqueue *queue;
1587
1588 if (!msgs->set_cnt || !msgs->queue)
1589 continue;
1590
1591 mpp = msgs->mpp;
1592 task = msgs->task;
1593 queue = msgs->queue;
1594
1595 if (queue_prev != queue) {
1596 if (queue_prev && mpp_prev) {
1597 mutex_unlock(&queue_prev->pending_lock);
1598 mpp_taskqueue_trigger_work(mpp_prev);
1599 }
1600
1601 if (queue)
1602 mutex_lock(&queue->pending_lock);
1603
1604 mpp_prev = mpp;
1605 queue_prev = queue;
1606 }
1607
1608 if (test_bit(TASK_STATE_ABORT, &task->state))
1609 pr_info("try to trigger abort task %d\n", task->task_id);
1610
1611 set_bit(TASK_STATE_PENDING, &task->state);
1612 list_add_tail(&task->queue_link, &queue->pending_list);
1613 }
1614
1615 if (mpp_prev && queue_prev) {
1616 mutex_unlock(&queue_prev->pending_lock);
1617 mpp_taskqueue_trigger_work(mpp_prev);
1618 }
1619 }
1620
mpp_msgs_wait(struct list_head * msgs_list)1621 static void mpp_msgs_wait(struct list_head *msgs_list)
1622 {
1623 struct mpp_task_msgs *msgs, *n;
1624
1625 /* poll and release each task */
1626 list_for_each_entry_safe(msgs, n, msgs_list, list) {
1627 struct mpp_session *session = msgs->session;
1628
1629 if (msgs->poll_cnt) {
1630 int ret = mpp_wait_result(session, msgs);
1631
1632 if (ret) {
1633 mpp_err("session %d wait result ret %d\n",
1634 session->index, ret);
1635 }
1636 }
1637
1638 put_task_msgs(msgs);
1639
1640 }
1641 }
1642
mpp_dev_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1643 static long mpp_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1644 {
1645 struct mpp_service *srv;
1646 struct mpp_session *session = (struct mpp_session *)filp->private_data;
1647 struct list_head msgs_list;
1648 int ret = 0;
1649
1650 mpp_debug_enter();
1651
1652 if (!session || !session->srv) {
1653 mpp_err("session %p\n", session);
1654 return -EINVAL;
1655 }
1656
1657 srv = session->srv;
1658
1659 if (atomic_read(&session->release_request) > 0) {
1660 mpp_debug(DEBUG_IOCTL, "release session had request\n");
1661 return -EBUSY;
1662 }
1663 if (atomic_read(&srv->shutdown_request) > 0) {
1664 mpp_debug(DEBUG_IOCTL, "shutdown had request\n");
1665 return -EBUSY;
1666 }
1667
1668 INIT_LIST_HEAD(&msgs_list);
1669
1670 ret = mpp_collect_msgs(&msgs_list, session, cmd, (void __user *)arg);
1671 if (ret)
1672 mpp_err("collect msgs failed %d\n", ret);
1673
1674 mpp_msgs_trigger(&msgs_list);
1675
1676 mpp_msgs_wait(&msgs_list);
1677
1678 mpp_debug_leave();
1679
1680 return ret;
1681 }
1682
mpp_dev_open(struct inode * inode,struct file * filp)1683 static int mpp_dev_open(struct inode *inode, struct file *filp)
1684 {
1685 struct mpp_session *session = NULL;
1686 struct mpp_service *srv = container_of(inode->i_cdev,
1687 struct mpp_service,
1688 mpp_cdev);
1689 mpp_debug_enter();
1690
1691 session = mpp_session_init();
1692 if (!session)
1693 return -ENOMEM;
1694
1695 session->srv = srv;
1696
1697 if (session->srv) {
1698 mutex_lock(&srv->session_lock);
1699 list_add_tail(&session->service_link, &srv->session_list);
1700 mutex_unlock(&srv->session_lock);
1701 }
1702 session->process_task = mpp_process_task_default;
1703 session->wait_result = mpp_wait_result_default;
1704 session->deinit = mpp_session_deinit_default;
1705 filp->private_data = (void *)session;
1706
1707 mpp_debug_leave();
1708
1709 return nonseekable_open(inode, filp);
1710 }
1711
mpp_dev_release(struct inode * inode,struct file * filp)1712 static int mpp_dev_release(struct inode *inode, struct file *filp)
1713 {
1714 struct mpp_session *session = filp->private_data;
1715
1716 mpp_debug_enter();
1717
1718 if (!session) {
1719 mpp_err("session is null\n");
1720 return -EINVAL;
1721 }
1722
1723 /* wait for task all done */
1724 atomic_inc(&session->release_request);
1725
1726 if (session->mpp || atomic_read(&session->task_count))
1727 mpp_session_detach_workqueue(session);
1728 else
1729 mpp_session_deinit(session);
1730
1731 filp->private_data = NULL;
1732
1733 mpp_debug_leave();
1734 return 0;
1735 }
1736
1737 const struct file_operations rockchip_mpp_fops = {
1738 .open = mpp_dev_open,
1739 .release = mpp_dev_release,
1740 .unlocked_ioctl = mpp_dev_ioctl,
1741 #ifdef CONFIG_COMPAT
1742 .compat_ioctl = mpp_dev_ioctl,
1743 #endif
1744 };
1745
1746 struct mpp_mem_region *
mpp_task_attach_fd(struct mpp_task * task,int fd)1747 mpp_task_attach_fd(struct mpp_task *task, int fd)
1748 {
1749 struct mpp_mem_region *mem_region = NULL, *loop = NULL, *n;
1750 struct mpp_dma_buffer *buffer = NULL;
1751 struct mpp_dev *mpp = task->session->mpp;
1752 struct mpp_dma_session *dma = task->session->dma;
1753 u32 mem_num = ARRAY_SIZE(task->mem_regions);
1754 bool found = false;
1755
1756 if (fd <= 0 || !dma || !mpp)
1757 return ERR_PTR(-EINVAL);
1758
1759 if (task->mem_count > mem_num) {
1760 mpp_err("mem_count %d must less than %d\n", task->mem_count, mem_num);
1761 return ERR_PTR(-ENOMEM);
1762 }
1763
1764 /* find fd whether had import */
1765 list_for_each_entry_safe_reverse(loop, n, &task->mem_region_list, reg_link) {
1766 if (loop->fd == fd) {
1767 found = true;
1768 break;
1769 }
1770 }
1771
1772 mem_region = &task->mem_regions[task->mem_count];
1773 if (found) {
1774 memcpy(mem_region, loop, sizeof(*loop));
1775 mem_region->is_dup = true;
1776 } else {
1777 mpp_iommu_down_read(mpp->iommu_info);
1778 buffer = mpp_dma_import_fd(mpp->iommu_info, dma, fd);
1779 mpp_iommu_up_read(mpp->iommu_info);
1780 if (IS_ERR(buffer)) {
1781 mpp_err("can't import dma-buf %d\n", fd);
1782 return ERR_CAST(buffer);
1783 }
1784
1785 mem_region->hdl = buffer;
1786 mem_region->iova = buffer->iova;
1787 mem_region->len = buffer->size;
1788 mem_region->fd = fd;
1789 mem_region->is_dup = false;
1790 }
1791 task->mem_count++;
1792 INIT_LIST_HEAD(&mem_region->reg_link);
1793 list_add_tail(&mem_region->reg_link, &task->mem_region_list);
1794
1795 return mem_region;
1796 }
1797
mpp_translate_reg_address(struct mpp_session * session,struct mpp_task * task,int fmt,u32 * reg,struct reg_offset_info * off_inf)1798 int mpp_translate_reg_address(struct mpp_session *session,
1799 struct mpp_task *task, int fmt,
1800 u32 *reg, struct reg_offset_info *off_inf)
1801 {
1802 int i;
1803 int cnt;
1804 const u16 *tbl;
1805
1806 mpp_debug_enter();
1807
1808 if (session->trans_count > 0) {
1809 cnt = session->trans_count;
1810 tbl = session->trans_table;
1811 } else {
1812 struct mpp_dev *mpp = mpp_get_task_used_device(task, session);
1813 struct mpp_trans_info *trans_info = mpp->var->trans_info;
1814
1815 cnt = trans_info[fmt].count;
1816 tbl = trans_info[fmt].table;
1817 }
1818
1819 for (i = 0; i < cnt; i++) {
1820 int usr_fd;
1821 u32 offset;
1822 struct mpp_mem_region *mem_region = NULL;
1823
1824 if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
1825 usr_fd = reg[tbl[i]];
1826 offset = 0;
1827 } else {
1828 usr_fd = reg[tbl[i]] & 0x3ff;
1829 offset = reg[tbl[i]] >> 10;
1830 }
1831
1832 if (usr_fd == 0)
1833 continue;
1834
1835 mem_region = mpp_task_attach_fd(task, usr_fd);
1836 if (IS_ERR(mem_region)) {
1837 mpp_err("reg[%3d]: 0x%08x fd %d failed\n",
1838 tbl[i], reg[tbl[i]], usr_fd);
1839 return PTR_ERR(mem_region);
1840 }
1841 mpp_debug(DEBUG_IOMMU,
1842 "reg[%3d]: %d => %pad, offset %10d, size %lx\n",
1843 tbl[i], usr_fd, &mem_region->iova,
1844 offset, mem_region->len);
1845 mem_region->reg_idx = tbl[i];
1846 reg[tbl[i]] = mem_region->iova + offset;
1847 }
1848
1849 mpp_debug_leave();
1850
1851 return 0;
1852 }
1853
mpp_check_req(struct mpp_request * req,int base,int max_size,u32 off_s,u32 off_e)1854 int mpp_check_req(struct mpp_request *req, int base,
1855 int max_size, u32 off_s, u32 off_e)
1856 {
1857 int req_off;
1858
1859 if (req->offset < base) {
1860 mpp_err("error: base %x, offset %x\n",
1861 base, req->offset);
1862 return -EINVAL;
1863 }
1864 req_off = req->offset - base;
1865 if ((req_off + req->size) < off_s) {
1866 mpp_err("error: req_off %x, req_size %x, off_s %x\n",
1867 req_off, req->size, off_s);
1868 return -EINVAL;
1869 }
1870 if (max_size < off_e) {
1871 mpp_err("error: off_e %x, max_size %x\n",
1872 off_e, max_size);
1873 return -EINVAL;
1874 }
1875 if (req_off > max_size) {
1876 mpp_err("error: req_off %x, max_size %x\n",
1877 req_off, max_size);
1878 return -EINVAL;
1879 }
1880 if ((req_off + req->size) > max_size) {
1881 mpp_err("error: req_off %x, req_size %x, max_size %x\n",
1882 req_off, req->size, max_size);
1883 req->size = req_off + req->size - max_size;
1884 }
1885
1886 return 0;
1887 }
1888
mpp_extract_reg_offset_info(struct reg_offset_info * off_inf,struct mpp_request * req)1889 int mpp_extract_reg_offset_info(struct reg_offset_info *off_inf,
1890 struct mpp_request *req)
1891 {
1892 int max_size = ARRAY_SIZE(off_inf->elem);
1893 int cnt = req->size / sizeof(off_inf->elem[0]);
1894
1895 if ((cnt + off_inf->cnt) > max_size) {
1896 mpp_err("count %d, total %d, max_size %d\n",
1897 cnt, off_inf->cnt, max_size);
1898 return -EINVAL;
1899 }
1900 if (copy_from_user(&off_inf->elem[off_inf->cnt],
1901 req->data, req->size)) {
1902 mpp_err("copy_from_user failed\n");
1903 return -EINVAL;
1904 }
1905 off_inf->cnt += cnt;
1906
1907 return 0;
1908 }
1909
mpp_query_reg_offset_info(struct reg_offset_info * off_inf,u32 index)1910 int mpp_query_reg_offset_info(struct reg_offset_info *off_inf,
1911 u32 index)
1912 {
1913 mpp_debug_enter();
1914 if (off_inf) {
1915 int i;
1916
1917 for (i = 0; i < off_inf->cnt; i++) {
1918 if (off_inf->elem[i].index == index)
1919 return off_inf->elem[i].offset;
1920 }
1921 }
1922 mpp_debug_leave();
1923
1924 return 0;
1925 }
1926
mpp_translate_reg_offset_info(struct mpp_task * task,struct reg_offset_info * off_inf,u32 * reg)1927 int mpp_translate_reg_offset_info(struct mpp_task *task,
1928 struct reg_offset_info *off_inf,
1929 u32 *reg)
1930 {
1931 mpp_debug_enter();
1932
1933 if (off_inf) {
1934 int i;
1935
1936 for (i = 0; i < off_inf->cnt; i++) {
1937 mpp_debug(DEBUG_IOMMU, "reg[%d] + offset %d\n",
1938 off_inf->elem[i].index,
1939 off_inf->elem[i].offset);
1940 reg[off_inf->elem[i].index] += off_inf->elem[i].offset;
1941 }
1942 }
1943 mpp_debug_leave();
1944
1945 return 0;
1946 }
1947
mpp_task_init(struct mpp_session * session,struct mpp_task * task)1948 int mpp_task_init(struct mpp_session *session, struct mpp_task *task)
1949 {
1950 INIT_LIST_HEAD(&task->pending_link);
1951 INIT_LIST_HEAD(&task->queue_link);
1952 INIT_LIST_HEAD(&task->mem_region_list);
1953 task->state = 0;
1954 task->mem_count = 0;
1955 task->session = session;
1956
1957 return 0;
1958 }
1959
mpp_task_finish(struct mpp_session * session,struct mpp_task * task)1960 int mpp_task_finish(struct mpp_session *session,
1961 struct mpp_task *task)
1962 {
1963 struct mpp_dev *mpp = mpp_get_task_used_device(task, session);
1964
1965 if (mpp->dev_ops->finish)
1966 mpp->dev_ops->finish(mpp, task);
1967
1968 mpp_reset_up_read(mpp->reset_group);
1969 if (atomic_read(&mpp->reset_request) > 0)
1970 mpp_dev_reset(mpp);
1971 mpp_power_off(mpp);
1972
1973 set_bit(TASK_STATE_FINISH, &task->state);
1974 set_bit(TASK_STATE_DONE, &task->state);
1975
1976 if (session->srv->timing_en) {
1977 s64 time_diff;
1978
1979 task->on_finish = ktime_get();
1980 set_bit(TASK_TIMING_FINISH, &task->state);
1981
1982 time_diff = ktime_us_delta(task->on_finish, task->on_create);
1983
1984 if (mpp->timing_check && time_diff > (s64)mpp->timing_check)
1985 mpp_task_dump_timing(task, time_diff);
1986 }
1987
1988 /* Wake up the GET thread */
1989 wake_up(&task->wait);
1990 mpp_taskqueue_pop_running(mpp->queue, task);
1991
1992 return 0;
1993 }
1994
mpp_task_finalize(struct mpp_session * session,struct mpp_task * task)1995 int mpp_task_finalize(struct mpp_session *session,
1996 struct mpp_task *task)
1997 {
1998 struct mpp_mem_region *mem_region = NULL, *n;
1999 struct mpp_dev *mpp = mpp_get_task_used_device(task, session);
2000
2001 /* release memory region attach to this registers table. */
2002 list_for_each_entry_safe(mem_region, n,
2003 &task->mem_region_list,
2004 reg_link) {
2005 if (!mem_region->is_dup) {
2006 mpp_iommu_down_read(mpp->iommu_info);
2007 mpp_dma_release(session->dma, mem_region->hdl);
2008 mpp_iommu_up_read(mpp->iommu_info);
2009 }
2010 list_del_init(&mem_region->reg_link);
2011 }
2012
2013 return 0;
2014 }
2015
mpp_task_dump_mem_region(struct mpp_dev * mpp,struct mpp_task * task)2016 int mpp_task_dump_mem_region(struct mpp_dev *mpp,
2017 struct mpp_task *task)
2018 {
2019 struct mpp_mem_region *mem = NULL, *n;
2020
2021 if (!task)
2022 return -EIO;
2023
2024 mpp_err("--- dump task %d mem region ---\n", task->task_index);
2025 if (!list_empty(&task->mem_region_list)) {
2026 list_for_each_entry_safe(mem, n,
2027 &task->mem_region_list,
2028 reg_link) {
2029 mpp_err("reg[%3d]: %pad, size %lx\n",
2030 mem->reg_idx, &mem->iova, mem->len);
2031 }
2032 } else {
2033 dev_err(mpp->dev, "no memory region mapped\n");
2034 }
2035
2036 return 0;
2037 }
2038
mpp_task_dump_reg(struct mpp_dev * mpp,struct mpp_task * task)2039 int mpp_task_dump_reg(struct mpp_dev *mpp,
2040 struct mpp_task *task)
2041 {
2042 if (!task)
2043 return -EIO;
2044
2045 if (mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) {
2046 mpp_err("--- dump task register ---\n");
2047 if (task->reg) {
2048 u32 i;
2049 u32 s = task->hw_info->reg_start;
2050 u32 e = task->hw_info->reg_end;
2051
2052 for (i = s; i <= e; i++) {
2053 u32 reg = i * sizeof(u32);
2054
2055 mpp_err("reg[%03d]: %04x: 0x%08x\n",
2056 i, reg, task->reg[i]);
2057 }
2058 }
2059 }
2060
2061 return 0;
2062 }
2063
mpp_task_dump_hw_reg(struct mpp_dev * mpp)2064 int mpp_task_dump_hw_reg(struct mpp_dev *mpp)
2065 {
2066 u32 i;
2067 u32 s = mpp->var->hw_info->reg_start;
2068 u32 e = mpp->var->hw_info->reg_end;
2069
2070 mpp_err("--- dump hardware register ---\n");
2071 for (i = s; i <= e; i++) {
2072 u32 reg = i * sizeof(u32);
2073
2074 mpp_err("reg[%03d]: %04x: 0x%08x\n",
2075 i, reg, readl_relaxed(mpp->reg_base + reg));
2076 }
2077
2078 return 0;
2079 }
2080
mpp_reg_show(struct mpp_dev * mpp,u32 offset)2081 void mpp_reg_show(struct mpp_dev *mpp, u32 offset)
2082 {
2083 if (!mpp)
2084 return;
2085
2086 dev_err(mpp->dev, "reg[%03d]: %04x: 0x%08x\n",
2087 offset >> 2, offset, mpp_read_relaxed(mpp, offset));
2088 }
2089
mpp_reg_show_range(struct mpp_dev * mpp,u32 start,u32 end)2090 void mpp_reg_show_range(struct mpp_dev *mpp, u32 start, u32 end)
2091 {
2092 u32 offset;
2093
2094 if (!mpp)
2095 return;
2096
2097 for (offset = start; offset < end; offset += sizeof(u32))
2098 mpp_reg_show(mpp, offset);
2099 }
2100
2101 /* The device will do more probing work after this */
mpp_dev_probe(struct mpp_dev * mpp,struct platform_device * pdev)2102 int mpp_dev_probe(struct mpp_dev *mpp,
2103 struct platform_device *pdev)
2104 {
2105 int ret;
2106 struct resource *res = NULL;
2107 struct device *dev = &pdev->dev;
2108 struct device_node *np = dev->of_node;
2109 struct mpp_hw_info *hw_info = mpp->var->hw_info;
2110
2111 /* Get disable auto frequent flag from dtsi */
2112 mpp->auto_freq_en = !device_property_read_bool(dev, "rockchip,disable-auto-freq");
2113 /* read flag for pum idle request */
2114 mpp->skip_idle = device_property_read_bool(dev, "rockchip,skip-pmu-idle-request");
2115
2116 /* read link table capacity */
2117 ret = of_property_read_u32(np, "rockchip,task-capacity",
2118 &mpp->task_capacity);
2119 if (ret)
2120 mpp->task_capacity = 1;
2121
2122 mpp->dev = dev;
2123 mpp->hw_ops = mpp->var->hw_ops;
2124 mpp->dev_ops = mpp->var->dev_ops;
2125
2126 /* Get and attach to service */
2127 ret = mpp_attach_service(mpp, dev);
2128 if (ret) {
2129 dev_err(dev, "failed to attach service\n");
2130 return -ENODEV;
2131 }
2132
2133 /* power domain autosuspend delay 2s */
2134 pm_runtime_set_autosuspend_delay(dev, 2000);
2135 pm_runtime_use_autosuspend(dev);
2136
2137 kthread_init_work(&mpp->work, mpp_task_worker_default);
2138
2139 atomic_set(&mpp->reset_request, 0);
2140 atomic_set(&mpp->session_index, 0);
2141 atomic_set(&mpp->task_count, 0);
2142 atomic_set(&mpp->task_index, 0);
2143
2144 device_init_wakeup(dev, true);
2145 pm_runtime_enable(dev);
2146 mpp->irq = platform_get_irq(pdev, 0);
2147 if (mpp->irq < 0) {
2148 dev_err(dev, "No interrupt resource found\n");
2149 ret = -ENODEV;
2150 goto failed;
2151 }
2152
2153 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2154 if (!res) {
2155 dev_err(&pdev->dev, "no memory resource defined\n");
2156 ret = -ENODEV;
2157 goto failed;
2158 }
2159 /*
2160 * Tips: here can not use function devm_ioremap_resource. The resion is
2161 * that hevc and vdpu map the same register address region in rk3368.
2162 * However, devm_ioremap_resource will call function
2163 * devm_request_mem_region to check region. Thus, use function
2164 * devm_ioremap can avoid it.
2165 */
2166 mpp->reg_base = devm_ioremap(dev, res->start, resource_size(res));
2167 if (!mpp->reg_base) {
2168 dev_err(dev, "ioremap failed for resource %pR\n", res);
2169 ret = -ENOMEM;
2170 goto failed;
2171 }
2172 mpp->io_base = res->start;
2173
2174 /*
2175 * TODO: here or at the device itself, some device does not
2176 * have the iommu, maybe in the device is better.
2177 */
2178 mpp->iommu_info = mpp_iommu_probe(dev);
2179 if (IS_ERR(mpp->iommu_info)) {
2180 dev_err(dev, "failed to attach iommu\n");
2181 mpp->iommu_info = NULL;
2182 }
2183 if (mpp->hw_ops->init) {
2184 ret = mpp->hw_ops->init(mpp);
2185 if (ret)
2186 goto failed;
2187 }
2188
2189 /* read hardware id */
2190 if (hw_info->reg_id >= 0) {
2191 pm_runtime_get_sync(dev);
2192 if (mpp->hw_ops->clk_on)
2193 mpp->hw_ops->clk_on(mpp);
2194
2195 hw_info->hw_id = mpp_read(mpp, hw_info->reg_id * sizeof(u32));
2196 if (mpp->hw_ops->clk_off)
2197 mpp->hw_ops->clk_off(mpp);
2198 pm_runtime_put_sync(dev);
2199 }
2200
2201 return ret;
2202 failed:
2203 mpp_detach_workqueue(mpp);
2204 device_init_wakeup(dev, false);
2205 pm_runtime_disable(dev);
2206
2207 return ret;
2208 }
2209
mpp_dev_remove(struct mpp_dev * mpp)2210 int mpp_dev_remove(struct mpp_dev *mpp)
2211 {
2212 if (mpp->hw_ops->exit)
2213 mpp->hw_ops->exit(mpp);
2214
2215 mpp_iommu_remove(mpp->iommu_info);
2216 mpp_detach_workqueue(mpp);
2217 device_init_wakeup(mpp->dev, false);
2218 pm_runtime_disable(mpp->dev);
2219
2220 return 0;
2221 }
2222
mpp_dev_shutdown(struct platform_device * pdev)2223 void mpp_dev_shutdown(struct platform_device *pdev)
2224 {
2225 int ret;
2226 int val;
2227 struct device *dev = &pdev->dev;
2228 struct mpp_dev *mpp = dev_get_drvdata(dev);
2229
2230 dev_info(dev, "shutdown device\n");
2231
2232 atomic_inc(&mpp->srv->shutdown_request);
2233 ret = readx_poll_timeout(atomic_read,
2234 &mpp->task_count,
2235 val, val == 0, 20000, 200000);
2236 if (ret == -ETIMEDOUT)
2237 dev_err(dev, "wait total %d running time out\n",
2238 atomic_read(&mpp->task_count));
2239 else
2240 dev_info(dev, "shutdown success\n");
2241 }
2242
mpp_dev_register_srv(struct mpp_dev * mpp,struct mpp_service * srv)2243 int mpp_dev_register_srv(struct mpp_dev *mpp, struct mpp_service *srv)
2244 {
2245 enum MPP_DEVICE_TYPE device_type = mpp->var->device_type;
2246
2247 srv->sub_devices[device_type] = mpp;
2248 set_bit(device_type, &srv->hw_support);
2249
2250 return 0;
2251 }
2252
mpp_dev_irq(int irq,void * param)2253 irqreturn_t mpp_dev_irq(int irq, void *param)
2254 {
2255 struct mpp_dev *mpp = param;
2256 struct mpp_task *task = mpp->cur_task;
2257 irqreturn_t irq_ret = IRQ_NONE;
2258 u32 timing_en = mpp->srv->timing_en;
2259
2260 if (task && timing_en) {
2261 task->on_irq = ktime_get();
2262 set_bit(TASK_TIMING_IRQ, &task->state);
2263 }
2264
2265 if (mpp->dev_ops->irq)
2266 irq_ret = mpp->dev_ops->irq(mpp);
2267
2268 if (task) {
2269 if (irq_ret == IRQ_WAKE_THREAD) {
2270 /* if wait or delayed work timeout, abort request will turn on,
2271 * isr should not to response, and handle it in delayed work
2272 */
2273 if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
2274 mpp_err("error, task has been handled, irq_status %08x\n",
2275 mpp->irq_status);
2276 irq_ret = IRQ_HANDLED;
2277 goto done;
2278 }
2279 if (timing_en) {
2280 task->on_cancel_timeout = ktime_get();
2281 set_bit(TASK_TIMING_TO_CANCEL, &task->state);
2282 }
2283 cancel_delayed_work(&task->timeout_work);
2284 /* normal condition, set state and wake up isr thread */
2285 set_bit(TASK_STATE_IRQ, &task->state);
2286 }
2287
2288 if (irq_ret == IRQ_WAKE_THREAD)
2289 mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
2290 } else {
2291 mpp_debug(DEBUG_IRQ_CHECK, "error, task is null\n");
2292 }
2293 done:
2294 return irq_ret;
2295 }
2296
mpp_dev_isr_sched(int irq,void * param)2297 irqreturn_t mpp_dev_isr_sched(int irq, void *param)
2298 {
2299 irqreturn_t ret = IRQ_NONE;
2300 struct mpp_dev *mpp = param;
2301 struct mpp_task *task = mpp->cur_task;
2302
2303 if (task && mpp->srv->timing_en) {
2304 task->on_isr = ktime_get();
2305 set_bit(TASK_TIMING_ISR, &task->state);
2306 }
2307
2308 if (mpp->auto_freq_en &&
2309 mpp->hw_ops->reduce_freq &&
2310 list_empty(&mpp->queue->pending_list))
2311 mpp->hw_ops->reduce_freq(mpp);
2312
2313 if (mpp->dev_ops->isr)
2314 ret = mpp->dev_ops->isr(mpp);
2315
2316 /* trigger current queue to run next task */
2317 mpp_taskqueue_trigger_work(mpp);
2318
2319 return ret;
2320 }
2321
mpp_get_grf(struct mpp_grf_info * grf_info)2322 u32 mpp_get_grf(struct mpp_grf_info *grf_info)
2323 {
2324 u32 val = 0;
2325
2326 if (grf_info && grf_info->grf && grf_info->val)
2327 regmap_read(grf_info->grf, grf_info->offset, &val);
2328
2329 return (val & MPP_GRF_VAL_MASK);
2330 }
2331
mpp_grf_is_changed(struct mpp_grf_info * grf_info)2332 bool mpp_grf_is_changed(struct mpp_grf_info *grf_info)
2333 {
2334 bool changed = false;
2335
2336 if (grf_info && grf_info->grf && grf_info->val) {
2337 u32 grf_status = mpp_get_grf(grf_info);
2338 u32 grf_val = grf_info->val & MPP_GRF_VAL_MASK;
2339
2340 changed = (grf_status == grf_val) ? false : true;
2341 }
2342
2343 return changed;
2344 }
2345
mpp_set_grf(struct mpp_grf_info * grf_info)2346 int mpp_set_grf(struct mpp_grf_info *grf_info)
2347 {
2348 if (grf_info && grf_info->grf && grf_info->val)
2349 regmap_write(grf_info->grf, grf_info->offset, grf_info->val);
2350
2351 return 0;
2352 }
2353
mpp_time_record(struct mpp_task * task)2354 int mpp_time_record(struct mpp_task *task)
2355 {
2356 if (mpp_debug_unlikely(DEBUG_TIMING) && task) {
2357 task->start = ktime_get();
2358 task->part = task->start;
2359 }
2360
2361 return 0;
2362 }
2363
mpp_time_part_diff(struct mpp_task * task)2364 int mpp_time_part_diff(struct mpp_task *task)
2365 {
2366 if (mpp_debug_unlikely(DEBUG_TIMING)) {
2367 ktime_t end;
2368 struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session);
2369
2370 end = ktime_get();
2371 mpp_debug(DEBUG_PART_TIMING, "%s:%d session %d:%d part time: %lld us\n",
2372 dev_name(mpp->dev), task->core_id, task->session->pid,
2373 task->session->index, ktime_us_delta(end, task->part));
2374 task->part = end;
2375 }
2376
2377 return 0;
2378 }
2379
mpp_time_diff(struct mpp_task * task)2380 int mpp_time_diff(struct mpp_task *task)
2381 {
2382 if (mpp_debug_unlikely(DEBUG_TIMING)) {
2383 ktime_t end;
2384 struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session);
2385
2386 end = ktime_get();
2387 mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us\n",
2388 dev_name(mpp->dev), task->core_id, task->session->pid,
2389 task->session->index, ktime_us_delta(end, task->start));
2390 }
2391
2392 return 0;
2393 }
2394
mpp_time_diff_with_hw_time(struct mpp_task * task,u32 clk_hz)2395 int mpp_time_diff_with_hw_time(struct mpp_task *task, u32 clk_hz)
2396 {
2397 if (mpp_debug_unlikely(DEBUG_TIMING)) {
2398 ktime_t end;
2399 struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session);
2400
2401 end = ktime_get();
2402
2403 if (clk_hz)
2404 mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us hw %d us\n",
2405 dev_name(mpp->dev), task->core_id, task->session->pid,
2406 task->session->index, ktime_us_delta(end, task->start),
2407 task->hw_cycles / (clk_hz / 1000000));
2408 else
2409 mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us\n",
2410 dev_name(mpp->dev), task->core_id, task->session->pid,
2411 task->session->index, ktime_us_delta(end, task->start));
2412 }
2413
2414 return 0;
2415 }
2416
2417 #define LOG_TIMING(state, id, stage, time, base) \
2418 do { \
2419 if (test_bit(id, &state)) \
2420 pr_info("timing: %-14s : %lld us\n", stage, ktime_us_delta(time, base)); \
2421 else \
2422 pr_info("timing: %-14s : invalid\n", stage); \
2423 } while (0)
2424
mpp_task_dump_timing(struct mpp_task * task,s64 time_diff)2425 void mpp_task_dump_timing(struct mpp_task *task, s64 time_diff)
2426 {
2427 ktime_t s = task->on_create;
2428 unsigned long state = task->state;
2429
2430 pr_info("task %d dump timing at %lld us:", task->task_id, time_diff);
2431
2432 pr_info("timing: %-14s : %lld us\n", "create", ktime_to_us(s));
2433 LOG_TIMING(state, TASK_TIMING_CREATE_END, "create end", task->on_create_end, s);
2434 LOG_TIMING(state, TASK_TIMING_PENDING, "pending", task->on_pending, s);
2435 LOG_TIMING(state, TASK_TIMING_RUN, "run", task->on_run, s);
2436 LOG_TIMING(state, TASK_TIMING_TO_SCHED, "timeout start", task->on_sched_timeout, s);
2437 LOG_TIMING(state, TASK_TIMING_RUN_END, "run end", task->on_run_end, s);
2438 LOG_TIMING(state, TASK_TIMING_IRQ, "irq", task->on_irq, s);
2439 LOG_TIMING(state, TASK_TIMING_TO_CANCEL, "timeout cancel", task->on_cancel_timeout, s);
2440 LOG_TIMING(state, TASK_TIMING_ISR, "isr", task->on_isr, s);
2441 LOG_TIMING(state, TASK_TIMING_FINISH, "finish", task->on_finish, s);
2442 }
2443
mpp_write_req(struct mpp_dev * mpp,u32 * regs,u32 start_idx,u32 end_idx,u32 en_idx)2444 int mpp_write_req(struct mpp_dev *mpp, u32 *regs,
2445 u32 start_idx, u32 end_idx, u32 en_idx)
2446 {
2447 int i;
2448
2449 for (i = start_idx; i < end_idx; i++) {
2450 if (i == en_idx)
2451 continue;
2452 mpp_write_relaxed(mpp, i * sizeof(u32), regs[i]);
2453 }
2454
2455 return 0;
2456 }
2457
mpp_read_req(struct mpp_dev * mpp,u32 * regs,u32 start_idx,u32 end_idx)2458 int mpp_read_req(struct mpp_dev *mpp, u32 *regs,
2459 u32 start_idx, u32 end_idx)
2460 {
2461 int i;
2462
2463 for (i = start_idx; i < end_idx; i++)
2464 regs[i] = mpp_read_relaxed(mpp, i * sizeof(u32));
2465
2466 return 0;
2467 }
2468
mpp_get_clk_info(struct mpp_dev * mpp,struct mpp_clk_info * clk_info,const char * name)2469 int mpp_get_clk_info(struct mpp_dev *mpp,
2470 struct mpp_clk_info *clk_info,
2471 const char *name)
2472 {
2473 int index = of_property_match_string(mpp->dev->of_node,
2474 "clock-names", name);
2475
2476 if (index < 0)
2477 return -EINVAL;
2478
2479 clk_info->clk = devm_clk_get(mpp->dev, name);
2480 of_property_read_u32_index(mpp->dev->of_node,
2481 "rockchip,normal-rates",
2482 index,
2483 &clk_info->normal_rate_hz);
2484 of_property_read_u32_index(mpp->dev->of_node,
2485 "rockchip,advanced-rates",
2486 index,
2487 &clk_info->advanced_rate_hz);
2488
2489 return 0;
2490 }
2491
mpp_set_clk_info_rate_hz(struct mpp_clk_info * clk_info,enum MPP_CLOCK_MODE mode,unsigned long val)2492 int mpp_set_clk_info_rate_hz(struct mpp_clk_info *clk_info,
2493 enum MPP_CLOCK_MODE mode,
2494 unsigned long val)
2495 {
2496 if (!clk_info->clk || !val)
2497 return 0;
2498
2499 switch (mode) {
2500 case CLK_MODE_DEBUG:
2501 clk_info->debug_rate_hz = val;
2502 break;
2503 case CLK_MODE_REDUCE:
2504 clk_info->reduce_rate_hz = val;
2505 break;
2506 case CLK_MODE_NORMAL:
2507 clk_info->normal_rate_hz = val;
2508 break;
2509 case CLK_MODE_ADVANCED:
2510 clk_info->advanced_rate_hz = val;
2511 break;
2512 case CLK_MODE_DEFAULT:
2513 clk_info->default_rate_hz = val;
2514 break;
2515 default:
2516 mpp_err("error mode %d\n", mode);
2517 break;
2518 }
2519
2520 return 0;
2521 }
2522
2523 #define MPP_REDUCE_RATE_HZ (50 * MHZ)
2524
mpp_get_clk_info_rate_hz(struct mpp_clk_info * clk_info,enum MPP_CLOCK_MODE mode)2525 unsigned long mpp_get_clk_info_rate_hz(struct mpp_clk_info *clk_info,
2526 enum MPP_CLOCK_MODE mode)
2527 {
2528 unsigned long clk_rate_hz = 0;
2529
2530 if (!clk_info->clk)
2531 return 0;
2532
2533 if (clk_info->debug_rate_hz)
2534 return clk_info->debug_rate_hz;
2535
2536 switch (mode) {
2537 case CLK_MODE_REDUCE: {
2538 if (clk_info->reduce_rate_hz)
2539 clk_rate_hz = clk_info->reduce_rate_hz;
2540 else
2541 clk_rate_hz = MPP_REDUCE_RATE_HZ;
2542 } break;
2543 case CLK_MODE_NORMAL: {
2544 if (clk_info->normal_rate_hz)
2545 clk_rate_hz = clk_info->normal_rate_hz;
2546 else
2547 clk_rate_hz = clk_info->default_rate_hz;
2548 } break;
2549 case CLK_MODE_ADVANCED: {
2550 if (clk_info->advanced_rate_hz)
2551 clk_rate_hz = clk_info->advanced_rate_hz;
2552 else if (clk_info->normal_rate_hz)
2553 clk_rate_hz = clk_info->normal_rate_hz;
2554 else
2555 clk_rate_hz = clk_info->default_rate_hz;
2556 } break;
2557 case CLK_MODE_DEFAULT:
2558 default: {
2559 clk_rate_hz = clk_info->default_rate_hz;
2560 } break;
2561 }
2562
2563 return clk_rate_hz;
2564 }
2565
mpp_clk_set_rate(struct mpp_clk_info * clk_info,enum MPP_CLOCK_MODE mode)2566 int mpp_clk_set_rate(struct mpp_clk_info *clk_info,
2567 enum MPP_CLOCK_MODE mode)
2568 {
2569 unsigned long clk_rate_hz;
2570
2571 if (!clk_info->clk)
2572 return -EINVAL;
2573
2574 clk_rate_hz = mpp_get_clk_info_rate_hz(clk_info, mode);
2575 if (clk_rate_hz) {
2576 clk_info->used_rate_hz = clk_rate_hz;
2577 clk_set_rate(clk_info->clk, clk_rate_hz);
2578 clk_info->real_rate_hz = clk_get_rate(clk_info->clk);
2579 }
2580
2581 return 0;
2582 }
2583
2584 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
fops_show_u32(struct seq_file * file,void * v)2585 static int fops_show_u32(struct seq_file *file, void *v)
2586 {
2587 u32 *val = file->private;
2588
2589 seq_printf(file, "%d\n", *val);
2590
2591 return 0;
2592 }
2593
fops_open_u32(struct inode * inode,struct file * file)2594 static int fops_open_u32(struct inode *inode, struct file *file)
2595 {
2596 return single_open(file, fops_show_u32, PDE_DATA(inode));
2597 }
2598
fops_write_u32(struct file * file,const char __user * buf,size_t count,loff_t * ppos)2599 static ssize_t fops_write_u32(struct file *file, const char __user *buf,
2600 size_t count, loff_t *ppos)
2601 {
2602 int rc;
2603 struct seq_file *priv = file->private_data;
2604
2605 rc = kstrtou32_from_user(buf, count, 0, priv->private);
2606 if (rc)
2607 return rc;
2608
2609 return count;
2610 }
2611
2612 static const struct proc_ops procfs_fops_u32 = {
2613 .proc_open = fops_open_u32,
2614 .proc_read = seq_read,
2615 .proc_release = single_release,
2616 .proc_write = fops_write_u32,
2617 };
2618
2619 struct proc_dir_entry *
mpp_procfs_create_u32(const char * name,umode_t mode,struct proc_dir_entry * parent,void * data)2620 mpp_procfs_create_u32(const char *name, umode_t mode,
2621 struct proc_dir_entry *parent, void *data)
2622 {
2623 return proc_create_data(name, mode, parent, &procfs_fops_u32, data);
2624 }
2625
mpp_procfs_create_common(struct proc_dir_entry * parent,struct mpp_dev * mpp)2626 void mpp_procfs_create_common(struct proc_dir_entry *parent, struct mpp_dev *mpp)
2627 {
2628 mpp_procfs_create_u32("disable_work", 0644, parent, &mpp->disable);
2629 mpp_procfs_create_u32("timing_check", 0644, parent, &mpp->timing_check);
2630 }
2631 #endif
2632