xref: /OK3568_Linux_fs/kernel/drivers/video/rockchip/mpp/mpp_common.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * author:
6*4882a593Smuzhiyun  *	Alpha Lin, alpha.lin@rock-chips.com
7*4882a593Smuzhiyun  *	Randy Li, randy.li@rock-chips.com
8*4882a593Smuzhiyun  *	Ding Wei, leo.ding@rock-chips.com
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/clk.h>
15*4882a593Smuzhiyun #include <linux/delay.h>
16*4882a593Smuzhiyun #include <linux/interrupt.h>
17*4882a593Smuzhiyun #include <linux/iopoll.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/of.h>
20*4882a593Smuzhiyun #include <linux/of_platform.h>
21*4882a593Smuzhiyun #include <linux/of_irq.h>
22*4882a593Smuzhiyun #include <linux/proc_fs.h>
23*4882a593Smuzhiyun #include <linux/pm_runtime.h>
24*4882a593Smuzhiyun #include <linux/poll.h>
25*4882a593Smuzhiyun #include <linux/regmap.h>
26*4882a593Smuzhiyun #include <linux/rwsem.h>
27*4882a593Smuzhiyun #include <linux/mfd/syscon.h>
28*4882a593Smuzhiyun #include <linux/seq_file.h>
29*4882a593Smuzhiyun #include <linux/slab.h>
30*4882a593Smuzhiyun #include <linux/uaccess.h>
31*4882a593Smuzhiyun #include <linux/nospec.h>
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include <soc/rockchip/pm_domains.h>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include "mpp_debug.h"
36*4882a593Smuzhiyun #include "mpp_common.h"
37*4882a593Smuzhiyun #include "mpp_iommu.h"
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #define MPP_WAIT_TIMEOUT_DELAY		(2000)
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /* Use 'v' as magic number */
42*4882a593Smuzhiyun #define MPP_IOC_MAGIC		'v'
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define MPP_IOC_CFG_V1	_IOW(MPP_IOC_MAGIC, 1, unsigned int)
45*4882a593Smuzhiyun #define MPP_IOC_CFG_V2	_IOW(MPP_IOC_MAGIC, 2, unsigned int)
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /* input parmater structure for version 1 */
48*4882a593Smuzhiyun struct mpp_msg_v1 {
49*4882a593Smuzhiyun 	__u32 cmd;
50*4882a593Smuzhiyun 	__u32 flags;
51*4882a593Smuzhiyun 	__u32 size;
52*4882a593Smuzhiyun 	__u32 offset;
53*4882a593Smuzhiyun 	__u64 data_ptr;
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define MPP_BAT_MSG_DONE		(0x00000001)
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun struct mpp_bat_msg {
59*4882a593Smuzhiyun 	__u64 flag;
60*4882a593Smuzhiyun 	__u32 fd;
61*4882a593Smuzhiyun 	__s32 ret;
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
65*4882a593Smuzhiyun const char *mpp_device_name[MPP_DEVICE_BUTT] = {
66*4882a593Smuzhiyun 	[MPP_DEVICE_VDPU1]		= "VDPU1",
67*4882a593Smuzhiyun 	[MPP_DEVICE_VDPU2]		= "VDPU2",
68*4882a593Smuzhiyun 	[MPP_DEVICE_VDPU1_PP]		= "VDPU1_PP",
69*4882a593Smuzhiyun 	[MPP_DEVICE_VDPU2_PP]		= "VDPU2_PP",
70*4882a593Smuzhiyun 	[MPP_DEVICE_AV1DEC]		= "AV1DEC",
71*4882a593Smuzhiyun 	[MPP_DEVICE_HEVC_DEC]		= "HEVC_DEC",
72*4882a593Smuzhiyun 	[MPP_DEVICE_RKVDEC]		= "RKVDEC",
73*4882a593Smuzhiyun 	[MPP_DEVICE_AVSPLUS_DEC]	= "AVSPLUS_DEC",
74*4882a593Smuzhiyun 	[MPP_DEVICE_RKJPEGD]		= "RKJPEGD",
75*4882a593Smuzhiyun 	[MPP_DEVICE_RKVENC]		= "RKVENC",
76*4882a593Smuzhiyun 	[MPP_DEVICE_VEPU1]		= "VEPU1",
77*4882a593Smuzhiyun 	[MPP_DEVICE_VEPU2]		= "VEPU2",
78*4882a593Smuzhiyun 	[MPP_DEVICE_VEPU2_JPEG]		= "VEPU2",
79*4882a593Smuzhiyun 	[MPP_DEVICE_VEPU22]		= "VEPU22",
80*4882a593Smuzhiyun 	[MPP_DEVICE_IEP2]		= "IEP2",
81*4882a593Smuzhiyun 	[MPP_DEVICE_VDPP]		= "VDPP",
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun const char *enc_info_item_name[ENC_INFO_BUTT] = {
85*4882a593Smuzhiyun 	[ENC_INFO_BASE]		= "null",
86*4882a593Smuzhiyun 	[ENC_INFO_WIDTH]	= "width",
87*4882a593Smuzhiyun 	[ENC_INFO_HEIGHT]	= "height",
88*4882a593Smuzhiyun 	[ENC_INFO_FORMAT]	= "format",
89*4882a593Smuzhiyun 	[ENC_INFO_FPS_IN]	= "fps_in",
90*4882a593Smuzhiyun 	[ENC_INFO_FPS_OUT]	= "fps_out",
91*4882a593Smuzhiyun 	[ENC_INFO_RC_MODE]	= "rc_mode",
92*4882a593Smuzhiyun 	[ENC_INFO_BITRATE]	= "bitrate",
93*4882a593Smuzhiyun 	[ENC_INFO_GOP_SIZE]	= "gop_size",
94*4882a593Smuzhiyun 	[ENC_INFO_FPS_CALC]	= "fps_calc",
95*4882a593Smuzhiyun 	[ENC_INFO_PROFILE]	= "profile",
96*4882a593Smuzhiyun };
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun #endif
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun static void mpp_attach_workqueue(struct mpp_dev *mpp,
101*4882a593Smuzhiyun 				 struct mpp_taskqueue *queue);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun static int
mpp_taskqueue_pop_pending(struct mpp_taskqueue * queue,struct mpp_task * task)104*4882a593Smuzhiyun mpp_taskqueue_pop_pending(struct mpp_taskqueue *queue,
105*4882a593Smuzhiyun 			  struct mpp_task *task)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	if (!task->session || !task->session->mpp)
108*4882a593Smuzhiyun 		return -EINVAL;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	mutex_lock(&queue->pending_lock);
111*4882a593Smuzhiyun 	list_del_init(&task->queue_link);
112*4882a593Smuzhiyun 	mutex_unlock(&queue->pending_lock);
113*4882a593Smuzhiyun 	kref_put(&task->ref, mpp_free_task);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	return 0;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun static struct mpp_task *
mpp_taskqueue_get_pending_task(struct mpp_taskqueue * queue)119*4882a593Smuzhiyun mpp_taskqueue_get_pending_task(struct mpp_taskqueue *queue)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	struct mpp_task *task = NULL;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	mutex_lock(&queue->pending_lock);
124*4882a593Smuzhiyun 	task = list_first_entry_or_null(&queue->pending_list,
125*4882a593Smuzhiyun 					struct mpp_task,
126*4882a593Smuzhiyun 					queue_link);
127*4882a593Smuzhiyun 	mutex_unlock(&queue->pending_lock);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	return task;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun static bool
mpp_taskqueue_is_running(struct mpp_taskqueue * queue)133*4882a593Smuzhiyun mpp_taskqueue_is_running(struct mpp_taskqueue *queue)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	unsigned long flags;
136*4882a593Smuzhiyun 	bool flag;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	spin_lock_irqsave(&queue->running_lock, flags);
139*4882a593Smuzhiyun 	flag = !list_empty(&queue->running_list);
140*4882a593Smuzhiyun 	spin_unlock_irqrestore(&queue->running_lock, flags);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	return flag;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
mpp_taskqueue_pending_to_run(struct mpp_taskqueue * queue,struct mpp_task * task)145*4882a593Smuzhiyun int mpp_taskqueue_pending_to_run(struct mpp_taskqueue *queue, struct mpp_task *task)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	unsigned long flags;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	mutex_lock(&queue->pending_lock);
150*4882a593Smuzhiyun 	spin_lock_irqsave(&queue->running_lock, flags);
151*4882a593Smuzhiyun 	list_move_tail(&task->queue_link, &queue->running_list);
152*4882a593Smuzhiyun 	spin_unlock_irqrestore(&queue->running_lock, flags);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	mutex_unlock(&queue->pending_lock);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	return 0;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun static struct mpp_task *
mpp_taskqueue_get_running_task(struct mpp_taskqueue * queue)160*4882a593Smuzhiyun mpp_taskqueue_get_running_task(struct mpp_taskqueue *queue)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	unsigned long flags;
163*4882a593Smuzhiyun 	struct mpp_task *task = NULL;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	spin_lock_irqsave(&queue->running_lock, flags);
166*4882a593Smuzhiyun 	task = list_first_entry_or_null(&queue->running_list,
167*4882a593Smuzhiyun 					struct mpp_task,
168*4882a593Smuzhiyun 					queue_link);
169*4882a593Smuzhiyun 	spin_unlock_irqrestore(&queue->running_lock, flags);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	return task;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun static int
mpp_taskqueue_pop_running(struct mpp_taskqueue * queue,struct mpp_task * task)175*4882a593Smuzhiyun mpp_taskqueue_pop_running(struct mpp_taskqueue *queue,
176*4882a593Smuzhiyun 			  struct mpp_task *task)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	unsigned long flags;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	if (!task->session || !task->session->mpp)
181*4882a593Smuzhiyun 		return -EINVAL;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	spin_lock_irqsave(&queue->running_lock, flags);
184*4882a593Smuzhiyun 	list_del_init(&task->queue_link);
185*4882a593Smuzhiyun 	spin_unlock_irqrestore(&queue->running_lock, flags);
186*4882a593Smuzhiyun 	kref_put(&task->ref, mpp_free_task);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	return 0;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun static void
mpp_taskqueue_trigger_work(struct mpp_dev * mpp)192*4882a593Smuzhiyun mpp_taskqueue_trigger_work(struct mpp_dev *mpp)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	kthread_queue_work(&mpp->queue->worker, &mpp->work);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
mpp_power_on(struct mpp_dev * mpp)197*4882a593Smuzhiyun int mpp_power_on(struct mpp_dev *mpp)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	pm_runtime_get_sync(mpp->dev);
200*4882a593Smuzhiyun 	pm_stay_awake(mpp->dev);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	if (mpp->hw_ops->clk_on)
203*4882a593Smuzhiyun 		mpp->hw_ops->clk_on(mpp);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	return 0;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
mpp_power_off(struct mpp_dev * mpp)208*4882a593Smuzhiyun int mpp_power_off(struct mpp_dev *mpp)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	if (mpp->hw_ops->clk_off)
211*4882a593Smuzhiyun 		mpp->hw_ops->clk_off(mpp);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	pm_relax(mpp->dev);
214*4882a593Smuzhiyun 	if (mpp_taskqueue_get_pending_task(mpp->queue) ||
215*4882a593Smuzhiyun 	    mpp_taskqueue_get_running_task(mpp->queue)) {
216*4882a593Smuzhiyun 		pm_runtime_mark_last_busy(mpp->dev);
217*4882a593Smuzhiyun 		pm_runtime_put_autosuspend(mpp->dev);
218*4882a593Smuzhiyun 	} else {
219*4882a593Smuzhiyun 		pm_runtime_put_sync_suspend(mpp->dev);
220*4882a593Smuzhiyun 	}
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	return 0;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
task_msgs_reset(struct mpp_task_msgs * msgs)225*4882a593Smuzhiyun static void task_msgs_reset(struct mpp_task_msgs *msgs)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	list_del_init(&msgs->list);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	msgs->flags = 0;
230*4882a593Smuzhiyun 	msgs->req_cnt = 0;
231*4882a593Smuzhiyun 	msgs->set_cnt = 0;
232*4882a593Smuzhiyun 	msgs->poll_cnt = 0;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
task_msgs_init(struct mpp_task_msgs * msgs,struct mpp_session * session)235*4882a593Smuzhiyun static void task_msgs_init(struct mpp_task_msgs *msgs, struct mpp_session *session)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	INIT_LIST_HEAD(&msgs->list);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	msgs->session = session;
240*4882a593Smuzhiyun 	msgs->queue = NULL;
241*4882a593Smuzhiyun 	msgs->task = NULL;
242*4882a593Smuzhiyun 	msgs->mpp = NULL;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	msgs->ext_fd = -1;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	task_msgs_reset(msgs);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
get_task_msgs(struct mpp_session * session)249*4882a593Smuzhiyun static struct mpp_task_msgs *get_task_msgs(struct mpp_session *session)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	unsigned long flags;
252*4882a593Smuzhiyun 	struct mpp_task_msgs *msgs;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	spin_lock_irqsave(&session->lock_msgs, flags);
255*4882a593Smuzhiyun 	msgs = list_first_entry_or_null(&session->list_msgs_idle,
256*4882a593Smuzhiyun 					struct mpp_task_msgs, list_session);
257*4882a593Smuzhiyun 	if (msgs) {
258*4882a593Smuzhiyun 		list_move_tail(&msgs->list_session, &session->list_msgs);
259*4882a593Smuzhiyun 		spin_unlock_irqrestore(&session->lock_msgs, flags);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 		return msgs;
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun 	spin_unlock_irqrestore(&session->lock_msgs, flags);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	msgs = kzalloc(sizeof(*msgs), GFP_KERNEL);
266*4882a593Smuzhiyun 	task_msgs_init(msgs, session);
267*4882a593Smuzhiyun 	INIT_LIST_HEAD(&msgs->list_session);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	spin_lock_irqsave(&session->lock_msgs, flags);
270*4882a593Smuzhiyun 	list_move_tail(&msgs->list_session, &session->list_msgs);
271*4882a593Smuzhiyun 	session->msgs_cnt++;
272*4882a593Smuzhiyun 	spin_unlock_irqrestore(&session->lock_msgs, flags);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	mpp_debug_func(DEBUG_TASK_INFO, "session %d:%d msgs cnt %d\n",
275*4882a593Smuzhiyun 		       session->pid, session->index, session->msgs_cnt);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	return msgs;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
put_task_msgs(struct mpp_task_msgs * msgs)280*4882a593Smuzhiyun static void put_task_msgs(struct mpp_task_msgs *msgs)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	struct mpp_session *session = msgs->session;
283*4882a593Smuzhiyun 	unsigned long flags;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	if (!session) {
286*4882a593Smuzhiyun 		pr_err("invalid msgs without session\n");
287*4882a593Smuzhiyun 		return;
288*4882a593Smuzhiyun 	}
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	if (msgs->ext_fd >= 0) {
291*4882a593Smuzhiyun 		fdput(msgs->f);
292*4882a593Smuzhiyun 		msgs->ext_fd = -1;
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	task_msgs_reset(msgs);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	spin_lock_irqsave(&session->lock_msgs, flags);
298*4882a593Smuzhiyun 	list_move_tail(&msgs->list_session, &session->list_msgs_idle);
299*4882a593Smuzhiyun 	spin_unlock_irqrestore(&session->lock_msgs, flags);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
clear_task_msgs(struct mpp_session * session)302*4882a593Smuzhiyun static void clear_task_msgs(struct mpp_session *session)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	struct mpp_task_msgs *msgs, *n;
305*4882a593Smuzhiyun 	LIST_HEAD(list_to_free);
306*4882a593Smuzhiyun 	unsigned long flags;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	spin_lock_irqsave(&session->lock_msgs, flags);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	list_for_each_entry_safe(msgs, n, &session->list_msgs, list_session)
311*4882a593Smuzhiyun 		list_move_tail(&msgs->list_session, &list_to_free);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	list_for_each_entry_safe(msgs, n, &session->list_msgs_idle, list_session)
314*4882a593Smuzhiyun 		list_move_tail(&msgs->list_session, &list_to_free);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	spin_unlock_irqrestore(&session->lock_msgs, flags);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	list_for_each_entry_safe(msgs, n, &list_to_free, list_session)
319*4882a593Smuzhiyun 		kfree(msgs);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
mpp_session_clear_pending(struct mpp_session * session)322*4882a593Smuzhiyun static void mpp_session_clear_pending(struct mpp_session *session)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	struct mpp_task *task = NULL, *n;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	/* clear session pending list */
327*4882a593Smuzhiyun 	mutex_lock(&session->pending_lock);
328*4882a593Smuzhiyun 	list_for_each_entry_safe(task, n,
329*4882a593Smuzhiyun 				 &session->pending_list,
330*4882a593Smuzhiyun 				 pending_link) {
331*4882a593Smuzhiyun 		/* abort task in taskqueue */
332*4882a593Smuzhiyun 		atomic_inc(&task->abort_request);
333*4882a593Smuzhiyun 		list_del_init(&task->pending_link);
334*4882a593Smuzhiyun 		kref_put(&task->ref, mpp_free_task);
335*4882a593Smuzhiyun 	}
336*4882a593Smuzhiyun 	mutex_unlock(&session->pending_lock);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
mpp_session_cleanup_detach(struct mpp_taskqueue * queue,struct kthread_work * work)339*4882a593Smuzhiyun void mpp_session_cleanup_detach(struct mpp_taskqueue *queue, struct kthread_work *work)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	struct mpp_session *session, *n;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	if (!atomic_read(&queue->detach_count))
344*4882a593Smuzhiyun 		return;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	mutex_lock(&queue->session_lock);
347*4882a593Smuzhiyun 	list_for_each_entry_safe(session, n, &queue->session_detach, session_link) {
348*4882a593Smuzhiyun 		s32 task_count = atomic_read(&session->task_count);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 		if (!task_count) {
351*4882a593Smuzhiyun 			list_del_init(&session->session_link);
352*4882a593Smuzhiyun 			atomic_dec(&queue->detach_count);
353*4882a593Smuzhiyun 		}
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 		mutex_unlock(&queue->session_lock);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 		if (task_count) {
358*4882a593Smuzhiyun 			mpp_dbg_session("session %d:%d task not finished %d\n",
359*4882a593Smuzhiyun 					session->pid, session->index,
360*4882a593Smuzhiyun 					atomic_read(&queue->detach_count));
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 			mpp_session_clear_pending(session);
363*4882a593Smuzhiyun 		} else {
364*4882a593Smuzhiyun 			mpp_dbg_session("queue detach %d\n",
365*4882a593Smuzhiyun 					atomic_read(&queue->detach_count));
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 			mpp_session_deinit(session);
368*4882a593Smuzhiyun 		}
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 		mutex_lock(&queue->session_lock);
371*4882a593Smuzhiyun 	}
372*4882a593Smuzhiyun 	mutex_unlock(&queue->session_lock);
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	if (atomic_read(&queue->detach_count)) {
375*4882a593Smuzhiyun 		mpp_dbg_session("queue detach %d again\n",
376*4882a593Smuzhiyun 				atomic_read(&queue->detach_count));
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 		kthread_queue_work(&queue->worker, work);
379*4882a593Smuzhiyun 	}
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun 
mpp_session_init(void)382*4882a593Smuzhiyun static struct mpp_session *mpp_session_init(void)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun 	struct mpp_session *session = kzalloc(sizeof(*session), GFP_KERNEL);
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	if (!session)
387*4882a593Smuzhiyun 		return NULL;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	session->pid = current->pid;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	mutex_init(&session->pending_lock);
392*4882a593Smuzhiyun 	INIT_LIST_HEAD(&session->pending_list);
393*4882a593Smuzhiyun 	INIT_LIST_HEAD(&session->service_link);
394*4882a593Smuzhiyun 	INIT_LIST_HEAD(&session->session_link);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	atomic_set(&session->task_count, 0);
397*4882a593Smuzhiyun 	atomic_set(&session->release_request, 0);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	INIT_LIST_HEAD(&session->list_msgs);
400*4882a593Smuzhiyun 	INIT_LIST_HEAD(&session->list_msgs_idle);
401*4882a593Smuzhiyun 	spin_lock_init(&session->lock_msgs);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	mpp_dbg_session("session %p init\n", session);
404*4882a593Smuzhiyun 	return session;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
mpp_session_deinit_default(struct mpp_session * session)407*4882a593Smuzhiyun static void mpp_session_deinit_default(struct mpp_session *session)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	if (session->mpp) {
410*4882a593Smuzhiyun 		struct mpp_dev *mpp = session->mpp;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 		if (mpp->dev_ops->free_session)
413*4882a593Smuzhiyun 			mpp->dev_ops->free_session(session);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 		mpp_session_clear_pending(session);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 		if (session->dma) {
418*4882a593Smuzhiyun 			mpp_iommu_down_read(mpp->iommu_info);
419*4882a593Smuzhiyun 			mpp_dma_session_destroy(session->dma);
420*4882a593Smuzhiyun 			mpp_iommu_up_read(mpp->iommu_info);
421*4882a593Smuzhiyun 			session->dma = NULL;
422*4882a593Smuzhiyun 		}
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	if (session->srv) {
426*4882a593Smuzhiyun 		struct mpp_service *srv = session->srv;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 		mutex_lock(&srv->session_lock);
429*4882a593Smuzhiyun 		list_del_init(&session->service_link);
430*4882a593Smuzhiyun 		mutex_unlock(&srv->session_lock);
431*4882a593Smuzhiyun 	}
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	list_del_init(&session->session_link);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
mpp_session_deinit(struct mpp_session * session)436*4882a593Smuzhiyun void mpp_session_deinit(struct mpp_session *session)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	mpp_dbg_session("session %d:%d task %d deinit\n", session->pid,
439*4882a593Smuzhiyun 			session->index, atomic_read(&session->task_count));
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	if (likely(session->deinit))
442*4882a593Smuzhiyun 		session->deinit(session);
443*4882a593Smuzhiyun 	else
444*4882a593Smuzhiyun 		pr_err("invalid NULL session deinit function\n");
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	clear_task_msgs(session);
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	kfree(session);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun 
mpp_session_attach_workqueue(struct mpp_session * session,struct mpp_taskqueue * queue)451*4882a593Smuzhiyun static void mpp_session_attach_workqueue(struct mpp_session *session,
452*4882a593Smuzhiyun 					 struct mpp_taskqueue *queue)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	mpp_dbg_session("session %d:%d attach\n", session->pid, session->index);
455*4882a593Smuzhiyun 	mutex_lock(&queue->session_lock);
456*4882a593Smuzhiyun 	list_add_tail(&session->session_link, &queue->session_attach);
457*4882a593Smuzhiyun 	mutex_unlock(&queue->session_lock);
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun 
mpp_session_detach_workqueue(struct mpp_session * session)460*4882a593Smuzhiyun static void mpp_session_detach_workqueue(struct mpp_session *session)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	struct mpp_taskqueue *queue;
463*4882a593Smuzhiyun 	struct mpp_dev *mpp;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	if (!session->mpp || !session->mpp->queue)
466*4882a593Smuzhiyun 		return;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	mpp_dbg_session("session %d:%d detach\n", session->pid, session->index);
469*4882a593Smuzhiyun 	mpp = session->mpp;
470*4882a593Smuzhiyun 	queue = mpp->queue;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	mutex_lock(&queue->session_lock);
473*4882a593Smuzhiyun 	list_del_init(&session->session_link);
474*4882a593Smuzhiyun 	list_add_tail(&session->session_link, &queue->session_detach);
475*4882a593Smuzhiyun 	atomic_inc(&queue->detach_count);
476*4882a593Smuzhiyun 	mutex_unlock(&queue->session_lock);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	mpp_taskqueue_trigger_work(mpp);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun static int
mpp_session_push_pending(struct mpp_session * session,struct mpp_task * task)482*4882a593Smuzhiyun mpp_session_push_pending(struct mpp_session *session,
483*4882a593Smuzhiyun 			 struct mpp_task *task)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	kref_get(&task->ref);
486*4882a593Smuzhiyun 	mutex_lock(&session->pending_lock);
487*4882a593Smuzhiyun 	if (session->srv->timing_en) {
488*4882a593Smuzhiyun 		task->on_pending = ktime_get();
489*4882a593Smuzhiyun 		set_bit(TASK_TIMING_PENDING, &task->state);
490*4882a593Smuzhiyun 	}
491*4882a593Smuzhiyun 	list_add_tail(&task->pending_link, &session->pending_list);
492*4882a593Smuzhiyun 	mutex_unlock(&session->pending_lock);
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	return 0;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun static int
mpp_session_pop_pending(struct mpp_session * session,struct mpp_task * task)498*4882a593Smuzhiyun mpp_session_pop_pending(struct mpp_session *session,
499*4882a593Smuzhiyun 			struct mpp_task *task)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun 	mutex_lock(&session->pending_lock);
502*4882a593Smuzhiyun 	list_del_init(&task->pending_link);
503*4882a593Smuzhiyun 	mutex_unlock(&session->pending_lock);
504*4882a593Smuzhiyun 	kref_put(&task->ref, mpp_free_task);
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	return 0;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun static struct mpp_task *
mpp_session_get_pending_task(struct mpp_session * session)510*4882a593Smuzhiyun mpp_session_get_pending_task(struct mpp_session *session)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun 	struct mpp_task *task = NULL;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	mutex_lock(&session->pending_lock);
515*4882a593Smuzhiyun 	task = list_first_entry_or_null(&session->pending_list,
516*4882a593Smuzhiyun 					struct mpp_task,
517*4882a593Smuzhiyun 					pending_link);
518*4882a593Smuzhiyun 	mutex_unlock(&session->pending_lock);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	return task;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun 
mpp_free_task(struct kref * ref)523*4882a593Smuzhiyun void mpp_free_task(struct kref *ref)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	struct mpp_dev *mpp;
526*4882a593Smuzhiyun 	struct mpp_session *session;
527*4882a593Smuzhiyun 	struct mpp_task *task = container_of(ref, struct mpp_task, ref);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	if (!task->session) {
530*4882a593Smuzhiyun 		mpp_err("task %p, task->session is null.\n", task);
531*4882a593Smuzhiyun 		return;
532*4882a593Smuzhiyun 	}
533*4882a593Smuzhiyun 	session = task->session;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d free state 0x%lx abort %d\n",
536*4882a593Smuzhiyun 		       session->index, task->task_id, task->state,
537*4882a593Smuzhiyun 		       atomic_read(&task->abort_request));
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	mpp = mpp_get_task_used_device(task, session);
540*4882a593Smuzhiyun 	if (mpp->dev_ops->free_task)
541*4882a593Smuzhiyun 		mpp->dev_ops->free_task(session, task);
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	/* Decrease reference count */
544*4882a593Smuzhiyun 	atomic_dec(&session->task_count);
545*4882a593Smuzhiyun 	atomic_dec(&mpp->task_count);
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun 
mpp_task_timeout_work(struct work_struct * work_s)548*4882a593Smuzhiyun static void mpp_task_timeout_work(struct work_struct *work_s)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun 	struct mpp_dev *mpp;
551*4882a593Smuzhiyun 	struct mpp_session *session;
552*4882a593Smuzhiyun 	struct mpp_task *task = container_of(to_delayed_work(work_s),
553*4882a593Smuzhiyun 					     struct mpp_task,
554*4882a593Smuzhiyun 					     timeout_work);
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
557*4882a593Smuzhiyun 		mpp_err("task has been handled\n");
558*4882a593Smuzhiyun 		return;
559*4882a593Smuzhiyun 	}
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	if (!task->session) {
562*4882a593Smuzhiyun 		mpp_err("task %p, task->session is null.\n", task);
563*4882a593Smuzhiyun 		return;
564*4882a593Smuzhiyun 	}
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	session = task->session;
567*4882a593Smuzhiyun 	mpp_err("task %d:%d:%d processing time out!\n", session->pid,
568*4882a593Smuzhiyun 		session->index, task->task_id);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	if (!session->mpp) {
571*4882a593Smuzhiyun 		mpp_err("session %d:%d, session mpp is null.\n", session->pid,
572*4882a593Smuzhiyun 			session->index);
573*4882a593Smuzhiyun 		return;
574*4882a593Smuzhiyun 	}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	mpp_task_dump_timing(task, ktime_us_delta(ktime_get(), task->on_create));
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	mpp = mpp_get_task_used_device(task, session);
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	/* disable core irq */
581*4882a593Smuzhiyun 	disable_irq(mpp->irq);
582*4882a593Smuzhiyun 	/* disable mmu irq */
583*4882a593Smuzhiyun 	if (mpp->iommu_info && mpp->iommu_info->got_irq)
584*4882a593Smuzhiyun 		disable_irq(mpp->iommu_info->irq);
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	/* hardware maybe dead, reset it */
587*4882a593Smuzhiyun 	mpp_reset_up_read(mpp->reset_group);
588*4882a593Smuzhiyun 	mpp_dev_reset(mpp);
589*4882a593Smuzhiyun 	mpp_power_off(mpp);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	set_bit(TASK_STATE_TIMEOUT, &task->state);
592*4882a593Smuzhiyun 	set_bit(TASK_STATE_DONE, &task->state);
593*4882a593Smuzhiyun 	/* Wake up the GET thread */
594*4882a593Smuzhiyun 	wake_up(&task->wait);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	/* remove task from taskqueue running list */
597*4882a593Smuzhiyun 	mpp_taskqueue_pop_running(mpp->queue, task);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	/* enable core irq */
600*4882a593Smuzhiyun 	enable_irq(mpp->irq);
601*4882a593Smuzhiyun 	/* enable mmu irq */
602*4882a593Smuzhiyun 	if (mpp->iommu_info && mpp->iommu_info->got_irq)
603*4882a593Smuzhiyun 		enable_irq(mpp->iommu_info->irq);
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	mpp_taskqueue_trigger_work(mpp);
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun 
mpp_process_task_default(struct mpp_session * session,struct mpp_task_msgs * msgs)608*4882a593Smuzhiyun static int mpp_process_task_default(struct mpp_session *session,
609*4882a593Smuzhiyun 				    struct mpp_task_msgs *msgs)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun 	struct mpp_task *task = NULL;
612*4882a593Smuzhiyun 	struct mpp_dev *mpp = session->mpp;
613*4882a593Smuzhiyun 	u32 timing_en;
614*4882a593Smuzhiyun 	ktime_t on_create;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	if (unlikely(!mpp)) {
617*4882a593Smuzhiyun 		mpp_err("pid %d client %d found invalid process function\n",
618*4882a593Smuzhiyun 			session->pid, session->device_type);
619*4882a593Smuzhiyun 		return -EINVAL;
620*4882a593Smuzhiyun 	}
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	timing_en = session->srv->timing_en;
623*4882a593Smuzhiyun 	if (timing_en)
624*4882a593Smuzhiyun 		on_create = ktime_get();
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	if (mpp->dev_ops->alloc_task)
627*4882a593Smuzhiyun 		task = mpp->dev_ops->alloc_task(session, msgs);
628*4882a593Smuzhiyun 	if (!task) {
629*4882a593Smuzhiyun 		mpp_err("alloc_task failed.\n");
630*4882a593Smuzhiyun 		return -ENOMEM;
631*4882a593Smuzhiyun 	}
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	if (timing_en) {
634*4882a593Smuzhiyun 		task->on_create_end = ktime_get();
635*4882a593Smuzhiyun 		task->on_create = on_create;
636*4882a593Smuzhiyun 		set_bit(TASK_TIMING_CREATE_END, &task->state);
637*4882a593Smuzhiyun 		set_bit(TASK_TIMING_CREATE, &task->state);
638*4882a593Smuzhiyun 	}
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	/* ensure current device */
641*4882a593Smuzhiyun 	mpp = mpp_get_task_used_device(task, session);
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	kref_init(&task->ref);
644*4882a593Smuzhiyun 	init_waitqueue_head(&task->wait);
645*4882a593Smuzhiyun 	atomic_set(&task->abort_request, 0);
646*4882a593Smuzhiyun 	task->task_index = atomic_fetch_inc(&mpp->task_index);
647*4882a593Smuzhiyun 	task->task_id = atomic_fetch_inc(&mpp->queue->task_id);
648*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&task->timeout_work, mpp_task_timeout_work);
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	if (mpp->auto_freq_en && mpp->hw_ops->get_freq)
651*4882a593Smuzhiyun 		mpp->hw_ops->get_freq(mpp, task);
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	msgs->queue = mpp->queue;
654*4882a593Smuzhiyun 	msgs->task = task;
655*4882a593Smuzhiyun 	msgs->mpp = mpp;
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	/*
658*4882a593Smuzhiyun 	 * Push task to session should be in front of push task to queue.
659*4882a593Smuzhiyun 	 * Otherwise, when mpp_task_finish finish and worker_thread call
660*4882a593Smuzhiyun 	 * task worker, it may be get a task who has push in queue but
661*4882a593Smuzhiyun 	 * not in session, cause some errors.
662*4882a593Smuzhiyun 	 */
663*4882a593Smuzhiyun 	atomic_inc(&session->task_count);
664*4882a593Smuzhiyun 	mpp_session_push_pending(session, task);
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	return 0;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun 
mpp_process_task(struct mpp_session * session,struct mpp_task_msgs * msgs)669*4882a593Smuzhiyun static int mpp_process_task(struct mpp_session *session,
670*4882a593Smuzhiyun 			    struct mpp_task_msgs *msgs)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun 	if (likely(session->process_task))
673*4882a593Smuzhiyun 		return session->process_task(session, msgs);
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	pr_err("invalid NULL process task function\n");
676*4882a593Smuzhiyun 	return -EINVAL;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun struct reset_control *
mpp_reset_control_get(struct mpp_dev * mpp,enum MPP_RESET_TYPE type,const char * name)680*4882a593Smuzhiyun mpp_reset_control_get(struct mpp_dev *mpp, enum MPP_RESET_TYPE type, const char *name)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun 	int index;
683*4882a593Smuzhiyun 	struct reset_control *rst = NULL;
684*4882a593Smuzhiyun 	char shared_name[32] = "shared_";
685*4882a593Smuzhiyun 	struct mpp_reset_group *group;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	/* check reset whether belone to device alone */
688*4882a593Smuzhiyun 	index = of_property_match_string(mpp->dev->of_node, "reset-names", name);
689*4882a593Smuzhiyun 	if (index >= 0) {
690*4882a593Smuzhiyun 		rst = devm_reset_control_get(mpp->dev, name);
691*4882a593Smuzhiyun 		mpp_safe_unreset(rst);
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 		return rst;
694*4882a593Smuzhiyun 	}
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	/* check reset whether is shared */
697*4882a593Smuzhiyun 	strncat(shared_name, name,
698*4882a593Smuzhiyun 		sizeof(shared_name) - strlen(shared_name) - 1);
699*4882a593Smuzhiyun 	index = of_property_match_string(mpp->dev->of_node,
700*4882a593Smuzhiyun 					 "reset-names", shared_name);
701*4882a593Smuzhiyun 	if (index < 0) {
702*4882a593Smuzhiyun 		dev_err(mpp->dev, "%s is not found!\n", shared_name);
703*4882a593Smuzhiyun 		return NULL;
704*4882a593Smuzhiyun 	}
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	if (!mpp->reset_group) {
707*4882a593Smuzhiyun 		dev_err(mpp->dev, "reset group is empty!\n");
708*4882a593Smuzhiyun 		return NULL;
709*4882a593Smuzhiyun 	}
710*4882a593Smuzhiyun 	group = mpp->reset_group;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	down_write(&group->rw_sem);
713*4882a593Smuzhiyun 	rst = group->resets[type];
714*4882a593Smuzhiyun 	if (!rst) {
715*4882a593Smuzhiyun 		rst = devm_reset_control_get(mpp->dev, shared_name);
716*4882a593Smuzhiyun 		mpp_safe_unreset(rst);
717*4882a593Smuzhiyun 		group->resets[type] = rst;
718*4882a593Smuzhiyun 		group->queue = mpp->queue;
719*4882a593Smuzhiyun 	}
720*4882a593Smuzhiyun 	/* if reset not in the same queue, it means different device
721*4882a593Smuzhiyun 	 * may reset in the same time, then rw_sem_on should set true.
722*4882a593Smuzhiyun 	 */
723*4882a593Smuzhiyun 	group->rw_sem_on |= (group->queue != mpp->queue) ? true : false;
724*4882a593Smuzhiyun 	dev_info(mpp->dev, "reset_group->rw_sem_on=%d\n", group->rw_sem_on);
725*4882a593Smuzhiyun 	up_write(&group->rw_sem);
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	return rst;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun 
mpp_dev_reset(struct mpp_dev * mpp)730*4882a593Smuzhiyun int mpp_dev_reset(struct mpp_dev *mpp)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun 	dev_info(mpp->dev, "resetting...\n");
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	/*
735*4882a593Smuzhiyun 	 * before running, we have to switch grf ctrl bit to ensure
736*4882a593Smuzhiyun 	 * working in current hardware
737*4882a593Smuzhiyun 	 */
738*4882a593Smuzhiyun 	if (mpp->hw_ops->set_grf)
739*4882a593Smuzhiyun 		mpp->hw_ops->set_grf(mpp);
740*4882a593Smuzhiyun 	else
741*4882a593Smuzhiyun 		mpp_set_grf(mpp->grf_info);
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	if (mpp->auto_freq_en && mpp->hw_ops->reduce_freq)
744*4882a593Smuzhiyun 		mpp->hw_ops->reduce_freq(mpp);
745*4882a593Smuzhiyun 	/* FIXME lock resource lock of the other devices in combo */
746*4882a593Smuzhiyun 	mpp_iommu_down_write(mpp->iommu_info);
747*4882a593Smuzhiyun 	mpp_reset_down_write(mpp->reset_group);
748*4882a593Smuzhiyun 	atomic_set(&mpp->reset_request, 0);
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	if (mpp->hw_ops->reset)
751*4882a593Smuzhiyun 		mpp->hw_ops->reset(mpp);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	/* Note: if the domain does not change, iommu attach will be return
754*4882a593Smuzhiyun 	 * as an empty operation. Therefore, force to close and then open,
755*4882a593Smuzhiyun 	 * will be update the domain. In this way, domain can really attach.
756*4882a593Smuzhiyun 	 */
757*4882a593Smuzhiyun 	mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	mpp_reset_up_write(mpp->reset_group);
760*4882a593Smuzhiyun 	mpp_iommu_up_write(mpp->iommu_info);
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	dev_info(mpp->dev, "reset done\n");
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	return 0;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun 
mpp_task_run_begin(struct mpp_task * task,u32 timing_en,u32 timeout)767*4882a593Smuzhiyun void mpp_task_run_begin(struct mpp_task *task, u32 timing_en, u32 timeout)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun 	preempt_disable();
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	set_bit(TASK_STATE_START, &task->state);
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	mpp_time_record(task);
774*4882a593Smuzhiyun 	schedule_delayed_work(&task->timeout_work, msecs_to_jiffies(timeout));
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	if (timing_en) {
777*4882a593Smuzhiyun 		task->on_sched_timeout = ktime_get();
778*4882a593Smuzhiyun 		set_bit(TASK_TIMING_TO_SCHED, &task->state);
779*4882a593Smuzhiyun 	}
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun 
mpp_task_run_end(struct mpp_task * task,u32 timing_en)782*4882a593Smuzhiyun void mpp_task_run_end(struct mpp_task *task, u32 timing_en)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun 	if (timing_en) {
785*4882a593Smuzhiyun 		task->on_run_end = ktime_get();
786*4882a593Smuzhiyun 		set_bit(TASK_TIMING_RUN_END, &task->state);
787*4882a593Smuzhiyun 	}
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun #ifdef MODULE
790*4882a593Smuzhiyun 	preempt_enable();
791*4882a593Smuzhiyun #else
792*4882a593Smuzhiyun 	preempt_enable_no_resched();
793*4882a593Smuzhiyun #endif
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun 
mpp_task_run(struct mpp_dev * mpp,struct mpp_task * task)796*4882a593Smuzhiyun static int mpp_task_run(struct mpp_dev *mpp,
797*4882a593Smuzhiyun 			struct mpp_task *task)
798*4882a593Smuzhiyun {
799*4882a593Smuzhiyun 	int ret;
800*4882a593Smuzhiyun 	u32 timing_en;
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	mpp_debug_enter();
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	timing_en = mpp->srv->timing_en;
805*4882a593Smuzhiyun 	if (timing_en) {
806*4882a593Smuzhiyun 		task->on_run = ktime_get();
807*4882a593Smuzhiyun 		set_bit(TASK_TIMING_RUN, &task->state);
808*4882a593Smuzhiyun 	}
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	/*
811*4882a593Smuzhiyun 	 * before running, we have to switch grf ctrl bit to ensure
812*4882a593Smuzhiyun 	 * working in current hardware
813*4882a593Smuzhiyun 	 */
814*4882a593Smuzhiyun 	if (mpp->hw_ops->set_grf) {
815*4882a593Smuzhiyun 		ret = mpp->hw_ops->set_grf(mpp);
816*4882a593Smuzhiyun 		if (ret) {
817*4882a593Smuzhiyun 			dev_err(mpp->dev, "set grf failed\n");
818*4882a593Smuzhiyun 			return ret;
819*4882a593Smuzhiyun 		}
820*4882a593Smuzhiyun 	} else {
821*4882a593Smuzhiyun 		mpp_set_grf(mpp->grf_info);
822*4882a593Smuzhiyun 	}
823*4882a593Smuzhiyun 	/*
824*4882a593Smuzhiyun 	 * for iommu share hardware, should attach to ensure
825*4882a593Smuzhiyun 	 * working in current device
826*4882a593Smuzhiyun 	 */
827*4882a593Smuzhiyun 	ret = mpp_iommu_attach(mpp->iommu_info);
828*4882a593Smuzhiyun 	if (ret) {
829*4882a593Smuzhiyun 		dev_err(mpp->dev, "mpp_iommu_attach failed\n");
830*4882a593Smuzhiyun 		return -ENODATA;
831*4882a593Smuzhiyun 	}
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	mpp_power_on(mpp);
834*4882a593Smuzhiyun 	mpp_debug_func(DEBUG_TASK_INFO, "pid %d run %s\n",
835*4882a593Smuzhiyun 		       task->session->pid, dev_name(mpp->dev));
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	if (mpp->auto_freq_en && mpp->hw_ops->set_freq)
838*4882a593Smuzhiyun 		mpp->hw_ops->set_freq(mpp, task);
839*4882a593Smuzhiyun 	/*
840*4882a593Smuzhiyun 	 * TODO: Lock the reader locker of the device resource lock here,
841*4882a593Smuzhiyun 	 * release at the finish operation
842*4882a593Smuzhiyun 	 */
843*4882a593Smuzhiyun 	mpp_reset_down_read(mpp->reset_group);
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	mpp_iommu_dev_activate(mpp->iommu_info, mpp);
846*4882a593Smuzhiyun 	if (mpp->dev_ops->run)
847*4882a593Smuzhiyun 		mpp->dev_ops->run(mpp, task);
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	mpp_debug_leave();
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	return 0;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun 
mpp_task_worker_default(struct kthread_work * work_s)854*4882a593Smuzhiyun static void mpp_task_worker_default(struct kthread_work *work_s)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun 	struct mpp_task *task;
857*4882a593Smuzhiyun 	struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
858*4882a593Smuzhiyun 	struct mpp_taskqueue *queue = mpp->queue;
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	mpp_debug_enter();
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun again:
863*4882a593Smuzhiyun 	task = mpp_taskqueue_get_pending_task(queue);
864*4882a593Smuzhiyun 	if (!task)
865*4882a593Smuzhiyun 		goto done;
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	/* if task timeout and aborted, remove it */
868*4882a593Smuzhiyun 	if (atomic_read(&task->abort_request) > 0) {
869*4882a593Smuzhiyun 		mpp_taskqueue_pop_pending(queue, task);
870*4882a593Smuzhiyun 		goto again;
871*4882a593Smuzhiyun 	}
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	/* get device for current task */
874*4882a593Smuzhiyun 	mpp = task->session->mpp;
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	/*
877*4882a593Smuzhiyun 	 * In the link table mode, the prepare function of the device
878*4882a593Smuzhiyun 	 * will check whether I can insert a new task into device.
879*4882a593Smuzhiyun 	 * If the device supports the task status query(like the HEVC
880*4882a593Smuzhiyun 	 * encoder), it can report whether the device is busy.
881*4882a593Smuzhiyun 	 * If the device does not support multiple task or task status
882*4882a593Smuzhiyun 	 * query, leave this job to mpp service.
883*4882a593Smuzhiyun 	 */
884*4882a593Smuzhiyun 	if (mpp->dev_ops->prepare)
885*4882a593Smuzhiyun 		task = mpp->dev_ops->prepare(mpp, task);
886*4882a593Smuzhiyun 	else if (mpp_taskqueue_is_running(queue))
887*4882a593Smuzhiyun 		task = NULL;
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	/*
890*4882a593Smuzhiyun 	 * FIXME if the hardware supports task query, but we still need to lock
891*4882a593Smuzhiyun 	 * the running list and lock the mpp service in the current state.
892*4882a593Smuzhiyun 	 */
893*4882a593Smuzhiyun 	/* Push a pending task to running queue */
894*4882a593Smuzhiyun 	if (task) {
895*4882a593Smuzhiyun 		struct mpp_dev *task_mpp = mpp_get_task_used_device(task, task->session);
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 		atomic_inc(&task_mpp->task_count);
898*4882a593Smuzhiyun 		mpp_taskqueue_pending_to_run(queue, task);
899*4882a593Smuzhiyun 		set_bit(TASK_STATE_RUNNING, &task->state);
900*4882a593Smuzhiyun 		if (mpp_task_run(task_mpp, task))
901*4882a593Smuzhiyun 			mpp_taskqueue_pop_running(queue, task);
902*4882a593Smuzhiyun 		else
903*4882a593Smuzhiyun 			goto again;
904*4882a593Smuzhiyun 	}
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun done:
907*4882a593Smuzhiyun 	mpp_session_cleanup_detach(queue, work_s);
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun 
mpp_wait_result_default(struct mpp_session * session,struct mpp_task_msgs * msgs)910*4882a593Smuzhiyun static int mpp_wait_result_default(struct mpp_session *session,
911*4882a593Smuzhiyun 				   struct mpp_task_msgs *msgs)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun 	int ret;
914*4882a593Smuzhiyun 	struct mpp_task *task;
915*4882a593Smuzhiyun 	struct mpp_dev *mpp;
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	task = mpp_session_get_pending_task(session);
918*4882a593Smuzhiyun 	if (!task) {
919*4882a593Smuzhiyun 		mpp_err("session %d:%d pending list is empty!\n",
920*4882a593Smuzhiyun 			session->pid, session->index);
921*4882a593Smuzhiyun 		return -EIO;
922*4882a593Smuzhiyun 	}
923*4882a593Smuzhiyun 	mpp = mpp_get_task_used_device(task, session);
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	ret = wait_event_timeout(task->wait,
926*4882a593Smuzhiyun 				 test_bit(TASK_STATE_DONE, &task->state),
927*4882a593Smuzhiyun 				 msecs_to_jiffies(MPP_WAIT_TIMEOUT_DELAY));
928*4882a593Smuzhiyun 	if (ret > 0) {
929*4882a593Smuzhiyun 		if (mpp->dev_ops->result)
930*4882a593Smuzhiyun 			ret = mpp->dev_ops->result(mpp, task, msgs);
931*4882a593Smuzhiyun 	} else {
932*4882a593Smuzhiyun 		atomic_inc(&task->abort_request);
933*4882a593Smuzhiyun 		set_bit(TASK_STATE_ABORT, &task->state);
934*4882a593Smuzhiyun 		mpp_err("timeout, pid %d session %d:%d count %d cur_task %p id %d\n",
935*4882a593Smuzhiyun 			session->pid, session->pid, session->index,
936*4882a593Smuzhiyun 			atomic_read(&session->task_count), task,
937*4882a593Smuzhiyun 			task->task_id);
938*4882a593Smuzhiyun 	}
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	mpp_debug_func(DEBUG_TASK_INFO, "task %d kref_%d\n",
941*4882a593Smuzhiyun 		       task->task_id, kref_read(&task->ref));
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	mpp_session_pop_pending(session, task);
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	return ret;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun 
mpp_wait_result(struct mpp_session * session,struct mpp_task_msgs * msgs)948*4882a593Smuzhiyun static int mpp_wait_result(struct mpp_session *session,
949*4882a593Smuzhiyun 			   struct mpp_task_msgs *msgs)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun 	if (likely(session->wait_result))
952*4882a593Smuzhiyun 		return session->wait_result(session, msgs);
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	pr_err("invalid NULL wait result function\n");
955*4882a593Smuzhiyun 	return -EINVAL;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun 
mpp_attach_service(struct mpp_dev * mpp,struct device * dev)958*4882a593Smuzhiyun static int mpp_attach_service(struct mpp_dev *mpp, struct device *dev)
959*4882a593Smuzhiyun {
960*4882a593Smuzhiyun 	u32 taskqueue_node = 0;
961*4882a593Smuzhiyun 	u32 reset_group_node = 0;
962*4882a593Smuzhiyun 	struct device_node *np = NULL;
963*4882a593Smuzhiyun 	struct platform_device *pdev = NULL;
964*4882a593Smuzhiyun 	struct mpp_taskqueue *queue = NULL;
965*4882a593Smuzhiyun 	int ret = 0;
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	np = of_parse_phandle(dev->of_node, "rockchip,srv", 0);
968*4882a593Smuzhiyun 	if (!np || !of_device_is_available(np)) {
969*4882a593Smuzhiyun 		dev_err(dev, "failed to get the mpp service node\n");
970*4882a593Smuzhiyun 		return -ENODEV;
971*4882a593Smuzhiyun 	}
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	pdev = of_find_device_by_node(np);
974*4882a593Smuzhiyun 	of_node_put(np);
975*4882a593Smuzhiyun 	if (!pdev) {
976*4882a593Smuzhiyun 		dev_err(dev, "failed to get mpp service from node\n");
977*4882a593Smuzhiyun 		return -ENODEV;
978*4882a593Smuzhiyun 	}
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	mpp->srv = platform_get_drvdata(pdev);
981*4882a593Smuzhiyun 	platform_device_put(pdev);
982*4882a593Smuzhiyun 	if (!mpp->srv) {
983*4882a593Smuzhiyun 		dev_err(dev, "failed attach service\n");
984*4882a593Smuzhiyun 		return -EINVAL;
985*4882a593Smuzhiyun 	}
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	ret = of_property_read_u32(dev->of_node,
988*4882a593Smuzhiyun 				   "rockchip,taskqueue-node", &taskqueue_node);
989*4882a593Smuzhiyun 	if (ret) {
990*4882a593Smuzhiyun 		dev_err(dev, "failed to get taskqueue-node\n");
991*4882a593Smuzhiyun 		return ret;
992*4882a593Smuzhiyun 	} else if (taskqueue_node >= mpp->srv->taskqueue_cnt) {
993*4882a593Smuzhiyun 		dev_err(dev, "taskqueue-node %d must less than %d\n",
994*4882a593Smuzhiyun 			taskqueue_node, mpp->srv->taskqueue_cnt);
995*4882a593Smuzhiyun 		return -ENODEV;
996*4882a593Smuzhiyun 	}
997*4882a593Smuzhiyun 	/* set taskqueue according dtsi */
998*4882a593Smuzhiyun 	queue = mpp->srv->task_queues[taskqueue_node];
999*4882a593Smuzhiyun 	if (!queue) {
1000*4882a593Smuzhiyun 		dev_err(dev, "taskqueue attach to invalid node %d\n",
1001*4882a593Smuzhiyun 			taskqueue_node);
1002*4882a593Smuzhiyun 		return -ENODEV;
1003*4882a593Smuzhiyun 	}
1004*4882a593Smuzhiyun 	mpp_attach_workqueue(mpp, queue);
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	ret = of_property_read_u32(dev->of_node,
1007*4882a593Smuzhiyun 				   "rockchip,resetgroup-node", &reset_group_node);
1008*4882a593Smuzhiyun 	if (!ret) {
1009*4882a593Smuzhiyun 		/* set resetgroup according dtsi */
1010*4882a593Smuzhiyun 		if (reset_group_node >= mpp->srv->reset_group_cnt) {
1011*4882a593Smuzhiyun 			dev_err(dev, "resetgroup-node %d must less than %d\n",
1012*4882a593Smuzhiyun 				reset_group_node, mpp->srv->reset_group_cnt);
1013*4882a593Smuzhiyun 			return -ENODEV;
1014*4882a593Smuzhiyun 		} else {
1015*4882a593Smuzhiyun 			mpp->reset_group = mpp->srv->reset_groups[reset_group_node];
1016*4882a593Smuzhiyun 		}
1017*4882a593Smuzhiyun 	}
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	return 0;
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun 
mpp_taskqueue_init(struct device * dev)1022*4882a593Smuzhiyun struct mpp_taskqueue *mpp_taskqueue_init(struct device *dev)
1023*4882a593Smuzhiyun {
1024*4882a593Smuzhiyun 	struct mpp_taskqueue *queue = devm_kzalloc(dev, sizeof(*queue),
1025*4882a593Smuzhiyun 						   GFP_KERNEL);
1026*4882a593Smuzhiyun 	if (!queue)
1027*4882a593Smuzhiyun 		return NULL;
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 	mutex_init(&queue->session_lock);
1030*4882a593Smuzhiyun 	mutex_init(&queue->pending_lock);
1031*4882a593Smuzhiyun 	spin_lock_init(&queue->running_lock);
1032*4882a593Smuzhiyun 	mutex_init(&queue->mmu_lock);
1033*4882a593Smuzhiyun 	mutex_init(&queue->dev_lock);
1034*4882a593Smuzhiyun 	INIT_LIST_HEAD(&queue->session_attach);
1035*4882a593Smuzhiyun 	INIT_LIST_HEAD(&queue->session_detach);
1036*4882a593Smuzhiyun 	INIT_LIST_HEAD(&queue->pending_list);
1037*4882a593Smuzhiyun 	INIT_LIST_HEAD(&queue->running_list);
1038*4882a593Smuzhiyun 	INIT_LIST_HEAD(&queue->mmu_list);
1039*4882a593Smuzhiyun 	INIT_LIST_HEAD(&queue->dev_list);
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	/* default taskqueue has max 16 task capacity */
1042*4882a593Smuzhiyun 	queue->task_capacity = MPP_MAX_TASK_CAPACITY;
1043*4882a593Smuzhiyun 	atomic_set(&queue->reset_request, 0);
1044*4882a593Smuzhiyun 	atomic_set(&queue->detach_count, 0);
1045*4882a593Smuzhiyun 	atomic_set(&queue->task_id, 0);
1046*4882a593Smuzhiyun 	queue->dev_active_flags = 0;
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	return queue;
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun 
mpp_attach_workqueue(struct mpp_dev * mpp,struct mpp_taskqueue * queue)1051*4882a593Smuzhiyun static void mpp_attach_workqueue(struct mpp_dev *mpp,
1052*4882a593Smuzhiyun 				 struct mpp_taskqueue *queue)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun 	s32 core_id;
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	INIT_LIST_HEAD(&mpp->queue_link);
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 	mutex_lock(&queue->dev_lock);
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	if (mpp->core_id >= 0)
1061*4882a593Smuzhiyun 		core_id = mpp->core_id;
1062*4882a593Smuzhiyun 	else
1063*4882a593Smuzhiyun 		core_id = queue->core_count;
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	if (core_id < 0 || core_id >= MPP_MAX_CORE_NUM) {
1066*4882a593Smuzhiyun 		dev_err(mpp->dev, "invalid core id %d\n", core_id);
1067*4882a593Smuzhiyun 		goto done;
1068*4882a593Smuzhiyun 	}
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	/*
1071*4882a593Smuzhiyun 	 * multi devices with no multicores share one queue,
1072*4882a593Smuzhiyun 	 * the core_id is default value 0.
1073*4882a593Smuzhiyun 	 */
1074*4882a593Smuzhiyun 	if (queue->cores[core_id]) {
1075*4882a593Smuzhiyun 		if (queue->cores[core_id] == mpp)
1076*4882a593Smuzhiyun 			goto done;
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 		core_id = queue->core_count;
1079*4882a593Smuzhiyun 	}
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	queue->cores[core_id] = mpp;
1082*4882a593Smuzhiyun 	queue->core_count++;
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	set_bit(core_id, &queue->core_idle);
1085*4882a593Smuzhiyun 	list_add_tail(&mpp->queue_link, &queue->dev_list);
1086*4882a593Smuzhiyun 	if (queue->core_id_max < (u32)core_id)
1087*4882a593Smuzhiyun 		queue->core_id_max = (u32)core_id;
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	mpp->core_id = core_id;
1090*4882a593Smuzhiyun 	mpp->queue = queue;
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	mpp_dbg_core("%s attach queue as core %d\n",
1093*4882a593Smuzhiyun 			dev_name(mpp->dev), mpp->core_id);
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	if (queue->task_capacity > mpp->task_capacity)
1096*4882a593Smuzhiyun 		queue->task_capacity = mpp->task_capacity;
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun done:
1099*4882a593Smuzhiyun 	mutex_unlock(&queue->dev_lock);
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun 
mpp_detach_workqueue(struct mpp_dev * mpp)1102*4882a593Smuzhiyun static void mpp_detach_workqueue(struct mpp_dev *mpp)
1103*4882a593Smuzhiyun {
1104*4882a593Smuzhiyun 	struct mpp_taskqueue *queue = mpp->queue;
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	if (queue) {
1107*4882a593Smuzhiyun 		mutex_lock(&queue->dev_lock);
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 		queue->cores[mpp->core_id] = NULL;
1110*4882a593Smuzhiyun 		queue->core_count--;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 		clear_bit(mpp->core_id, &queue->core_idle);
1113*4882a593Smuzhiyun 		list_del_init(&mpp->queue_link);
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 		mpp->queue = NULL;
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 		mutex_unlock(&queue->dev_lock);
1118*4882a593Smuzhiyun 	}
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun 
mpp_check_cmd_v1(__u32 cmd)1121*4882a593Smuzhiyun static int mpp_check_cmd_v1(__u32 cmd)
1122*4882a593Smuzhiyun {
1123*4882a593Smuzhiyun 	bool found;
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	found = (cmd < MPP_CMD_QUERY_BUTT) ? true : false;
1126*4882a593Smuzhiyun 	found = (cmd >= MPP_CMD_INIT_BASE && cmd < MPP_CMD_INIT_BUTT) ? true : found;
1127*4882a593Smuzhiyun 	found = (cmd >= MPP_CMD_SEND_BASE && cmd < MPP_CMD_SEND_BUTT) ? true : found;
1128*4882a593Smuzhiyun 	found = (cmd >= MPP_CMD_POLL_BASE && cmd < MPP_CMD_POLL_BUTT) ? true : found;
1129*4882a593Smuzhiyun 	found = (cmd >= MPP_CMD_CONTROL_BASE && cmd < MPP_CMD_CONTROL_BUTT) ? true : found;
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	return found ? 0 : -EINVAL;
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun 
mpp_msg_is_last(struct mpp_request * req)1134*4882a593Smuzhiyun static inline int mpp_msg_is_last(struct mpp_request *req)
1135*4882a593Smuzhiyun {
1136*4882a593Smuzhiyun 	int flag;
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	if (req->flags & MPP_FLAGS_MULTI_MSG)
1139*4882a593Smuzhiyun 		flag = (req->flags & MPP_FLAGS_LAST_MSG) ? 1 : 0;
1140*4882a593Smuzhiyun 	else
1141*4882a593Smuzhiyun 		flag = 1;
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun 	return flag;
1144*4882a593Smuzhiyun }
1145*4882a593Smuzhiyun 
mpp_get_cmd_butt(__u32 cmd)1146*4882a593Smuzhiyun static __u32 mpp_get_cmd_butt(__u32 cmd)
1147*4882a593Smuzhiyun {
1148*4882a593Smuzhiyun 	__u32 mask = 0;
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	switch (cmd) {
1151*4882a593Smuzhiyun 	case MPP_CMD_QUERY_BASE:
1152*4882a593Smuzhiyun 		mask = MPP_CMD_QUERY_BUTT;
1153*4882a593Smuzhiyun 		break;
1154*4882a593Smuzhiyun 	case MPP_CMD_INIT_BASE:
1155*4882a593Smuzhiyun 		mask = MPP_CMD_INIT_BUTT;
1156*4882a593Smuzhiyun 		break;
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun 	case MPP_CMD_SEND_BASE:
1159*4882a593Smuzhiyun 		mask = MPP_CMD_SEND_BUTT;
1160*4882a593Smuzhiyun 		break;
1161*4882a593Smuzhiyun 	case MPP_CMD_POLL_BASE:
1162*4882a593Smuzhiyun 		mask = MPP_CMD_POLL_BUTT;
1163*4882a593Smuzhiyun 		break;
1164*4882a593Smuzhiyun 	case MPP_CMD_CONTROL_BASE:
1165*4882a593Smuzhiyun 		mask = MPP_CMD_CONTROL_BUTT;
1166*4882a593Smuzhiyun 		break;
1167*4882a593Smuzhiyun 	default:
1168*4882a593Smuzhiyun 		mpp_err("unknown dev cmd 0x%x\n", cmd);
1169*4882a593Smuzhiyun 		break;
1170*4882a593Smuzhiyun 	}
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	return mask;
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun 
mpp_process_request(struct mpp_session * session,struct mpp_service * srv,struct mpp_request * req,struct mpp_task_msgs * msgs)1175*4882a593Smuzhiyun static int mpp_process_request(struct mpp_session *session,
1176*4882a593Smuzhiyun 			       struct mpp_service *srv,
1177*4882a593Smuzhiyun 			       struct mpp_request *req,
1178*4882a593Smuzhiyun 			       struct mpp_task_msgs *msgs)
1179*4882a593Smuzhiyun {
1180*4882a593Smuzhiyun 	int ret;
1181*4882a593Smuzhiyun 	struct mpp_dev *mpp;
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	mpp_debug(DEBUG_IOCTL, "cmd %x process\n", req->cmd);
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	switch (req->cmd) {
1186*4882a593Smuzhiyun 	case MPP_CMD_QUERY_HW_SUPPORT: {
1187*4882a593Smuzhiyun 		u32 hw_support = srv->hw_support;
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 		mpp_debug(DEBUG_IOCTL, "hw_support %08x\n", hw_support);
1190*4882a593Smuzhiyun 		if (put_user(hw_support, (u32 __user *)req->data))
1191*4882a593Smuzhiyun 			return -EFAULT;
1192*4882a593Smuzhiyun 	} break;
1193*4882a593Smuzhiyun 	case MPP_CMD_QUERY_HW_ID: {
1194*4882a593Smuzhiyun 		struct mpp_hw_info *hw_info;
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 		mpp = NULL;
1197*4882a593Smuzhiyun 		if (session && session->mpp) {
1198*4882a593Smuzhiyun 			mpp = session->mpp;
1199*4882a593Smuzhiyun 		} else {
1200*4882a593Smuzhiyun 			u32 client_type;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 			if (get_user(client_type, (u32 __user *)req->data))
1203*4882a593Smuzhiyun 				return -EFAULT;
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 			mpp_debug(DEBUG_IOCTL, "client %d\n", client_type);
1206*4882a593Smuzhiyun 			client_type = array_index_nospec(client_type, MPP_DEVICE_BUTT);
1207*4882a593Smuzhiyun 			if (test_bit(client_type, &srv->hw_support))
1208*4882a593Smuzhiyun 				mpp = srv->sub_devices[client_type];
1209*4882a593Smuzhiyun 		}
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 		if (!mpp)
1212*4882a593Smuzhiyun 			return -EINVAL;
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 		hw_info = mpp->var->hw_info;
1215*4882a593Smuzhiyun 		mpp_debug(DEBUG_IOCTL, "hw_id %08x\n", hw_info->hw_id);
1216*4882a593Smuzhiyun 		if (put_user(hw_info->hw_id, (u32 __user *)req->data))
1217*4882a593Smuzhiyun 			return -EFAULT;
1218*4882a593Smuzhiyun 	} break;
1219*4882a593Smuzhiyun 	case MPP_CMD_QUERY_CMD_SUPPORT: {
1220*4882a593Smuzhiyun 		__u32 cmd = 0;
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 		if (get_user(cmd, (u32 __user *)req->data))
1223*4882a593Smuzhiyun 			return -EINVAL;
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 		if (put_user(mpp_get_cmd_butt(cmd), (u32 __user *)req->data))
1226*4882a593Smuzhiyun 			return -EFAULT;
1227*4882a593Smuzhiyun 	} break;
1228*4882a593Smuzhiyun 	case MPP_CMD_INIT_CLIENT_TYPE: {
1229*4882a593Smuzhiyun 		u32 client_type;
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 		if (get_user(client_type, (u32 __user *)req->data))
1232*4882a593Smuzhiyun 			return -EFAULT;
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 		mpp_debug(DEBUG_IOCTL, "client %d\n", client_type);
1235*4882a593Smuzhiyun 		if (client_type >= MPP_DEVICE_BUTT) {
1236*4882a593Smuzhiyun 			mpp_err("client_type must less than %d\n",
1237*4882a593Smuzhiyun 				MPP_DEVICE_BUTT);
1238*4882a593Smuzhiyun 			return -EINVAL;
1239*4882a593Smuzhiyun 		}
1240*4882a593Smuzhiyun 		client_type = array_index_nospec(client_type, MPP_DEVICE_BUTT);
1241*4882a593Smuzhiyun 		mpp = srv->sub_devices[client_type];
1242*4882a593Smuzhiyun 		if (!mpp)
1243*4882a593Smuzhiyun 			return -EINVAL;
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun 		session->device_type = (enum MPP_DEVICE_TYPE)client_type;
1246*4882a593Smuzhiyun 		session->dma = mpp_dma_session_create(mpp->dev, mpp->session_max_buffers);
1247*4882a593Smuzhiyun 		session->mpp = mpp;
1248*4882a593Smuzhiyun 		if (mpp->dev_ops) {
1249*4882a593Smuzhiyun 			if (mpp->dev_ops->process_task)
1250*4882a593Smuzhiyun 				session->process_task =
1251*4882a593Smuzhiyun 					mpp->dev_ops->process_task;
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 			if (mpp->dev_ops->wait_result)
1254*4882a593Smuzhiyun 				session->wait_result =
1255*4882a593Smuzhiyun 					mpp->dev_ops->wait_result;
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun 			if (mpp->dev_ops->deinit)
1258*4882a593Smuzhiyun 				session->deinit = mpp->dev_ops->deinit;
1259*4882a593Smuzhiyun 		}
1260*4882a593Smuzhiyun 		session->index = atomic_fetch_inc(&mpp->session_index);
1261*4882a593Smuzhiyun 		if (mpp->dev_ops && mpp->dev_ops->init_session) {
1262*4882a593Smuzhiyun 			ret = mpp->dev_ops->init_session(session);
1263*4882a593Smuzhiyun 			if (ret)
1264*4882a593Smuzhiyun 				return ret;
1265*4882a593Smuzhiyun 		}
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 		mpp_session_attach_workqueue(session, mpp->queue);
1268*4882a593Smuzhiyun 	} break;
1269*4882a593Smuzhiyun 	case MPP_CMD_INIT_DRIVER_DATA: {
1270*4882a593Smuzhiyun 		u32 val;
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 		mpp = session->mpp;
1273*4882a593Smuzhiyun 		if (!mpp)
1274*4882a593Smuzhiyun 			return -EINVAL;
1275*4882a593Smuzhiyun 		if (get_user(val, (u32 __user *)req->data))
1276*4882a593Smuzhiyun 			return -EFAULT;
1277*4882a593Smuzhiyun 		if (mpp->grf_info->grf)
1278*4882a593Smuzhiyun 			regmap_write(mpp->grf_info->grf, 0x5d8, val);
1279*4882a593Smuzhiyun 	} break;
1280*4882a593Smuzhiyun 	case MPP_CMD_INIT_TRANS_TABLE: {
1281*4882a593Smuzhiyun 		if (session && req->size) {
1282*4882a593Smuzhiyun 			int trans_tbl_size = sizeof(session->trans_table);
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 			if (req->size > trans_tbl_size) {
1285*4882a593Smuzhiyun 				mpp_err("init table size %d more than %d\n",
1286*4882a593Smuzhiyun 					req->size, trans_tbl_size);
1287*4882a593Smuzhiyun 				return -ENOMEM;
1288*4882a593Smuzhiyun 			}
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 			if (copy_from_user(session->trans_table,
1291*4882a593Smuzhiyun 					   req->data, req->size)) {
1292*4882a593Smuzhiyun 				mpp_err("copy_from_user failed\n");
1293*4882a593Smuzhiyun 				return -EINVAL;
1294*4882a593Smuzhiyun 			}
1295*4882a593Smuzhiyun 			session->trans_count =
1296*4882a593Smuzhiyun 				req->size / sizeof(session->trans_table[0]);
1297*4882a593Smuzhiyun 		}
1298*4882a593Smuzhiyun 	} break;
1299*4882a593Smuzhiyun 	case MPP_CMD_SET_REG_WRITE:
1300*4882a593Smuzhiyun 	case MPP_CMD_SET_REG_READ:
1301*4882a593Smuzhiyun 	case MPP_CMD_SET_REG_ADDR_OFFSET:
1302*4882a593Smuzhiyun 	case MPP_CMD_SET_RCB_INFO: {
1303*4882a593Smuzhiyun 		msgs->flags |= req->flags;
1304*4882a593Smuzhiyun 		msgs->set_cnt++;
1305*4882a593Smuzhiyun 	} break;
1306*4882a593Smuzhiyun 	case MPP_CMD_POLL_HW_FINISH: {
1307*4882a593Smuzhiyun 		msgs->flags |= req->flags;
1308*4882a593Smuzhiyun 		msgs->poll_cnt++;
1309*4882a593Smuzhiyun 		msgs->poll_req = NULL;
1310*4882a593Smuzhiyun 	} break;
1311*4882a593Smuzhiyun 	case MPP_CMD_POLL_HW_IRQ: {
1312*4882a593Smuzhiyun 		if (msgs->poll_cnt || msgs->poll_req)
1313*4882a593Smuzhiyun 			mpp_err("Do NOT poll hw irq when previous call not return\n");
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 		msgs->flags |= req->flags;
1316*4882a593Smuzhiyun 		msgs->poll_cnt++;
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 		if (req->size && req->data) {
1319*4882a593Smuzhiyun 			if (!msgs->poll_req)
1320*4882a593Smuzhiyun 				msgs->poll_req = req;
1321*4882a593Smuzhiyun 		} else {
1322*4882a593Smuzhiyun 			msgs->poll_req = NULL;
1323*4882a593Smuzhiyun 		}
1324*4882a593Smuzhiyun 	} break;
1325*4882a593Smuzhiyun 	case MPP_CMD_RESET_SESSION: {
1326*4882a593Smuzhiyun 		int ret;
1327*4882a593Smuzhiyun 		int val;
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 		ret = readx_poll_timeout(atomic_read,
1330*4882a593Smuzhiyun 					 &session->task_count,
1331*4882a593Smuzhiyun 					 val, val == 0, 1000, 500000);
1332*4882a593Smuzhiyun 		if (ret == -ETIMEDOUT) {
1333*4882a593Smuzhiyun 			mpp_err("wait task running time out\n");
1334*4882a593Smuzhiyun 		} else {
1335*4882a593Smuzhiyun 			mpp = session->mpp;
1336*4882a593Smuzhiyun 			if (!mpp)
1337*4882a593Smuzhiyun 				return -EINVAL;
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 			mpp_session_clear_pending(session);
1340*4882a593Smuzhiyun 			mpp_iommu_down_write(mpp->iommu_info);
1341*4882a593Smuzhiyun 			ret = mpp_dma_session_destroy(session->dma);
1342*4882a593Smuzhiyun 			mpp_iommu_up_write(mpp->iommu_info);
1343*4882a593Smuzhiyun 		}
1344*4882a593Smuzhiyun 		return ret;
1345*4882a593Smuzhiyun 	} break;
1346*4882a593Smuzhiyun 	case MPP_CMD_TRANS_FD_TO_IOVA: {
1347*4882a593Smuzhiyun 		u32 i;
1348*4882a593Smuzhiyun 		u32 count;
1349*4882a593Smuzhiyun 		u32 data[MPP_MAX_REG_TRANS_NUM];
1350*4882a593Smuzhiyun 
1351*4882a593Smuzhiyun 		mpp = session->mpp;
1352*4882a593Smuzhiyun 		if (!mpp)
1353*4882a593Smuzhiyun 			return -EINVAL;
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 		if (req->size <= 0 ||
1356*4882a593Smuzhiyun 		    req->size > sizeof(data))
1357*4882a593Smuzhiyun 			return -EINVAL;
1358*4882a593Smuzhiyun 
1359*4882a593Smuzhiyun 		memset(data, 0, sizeof(data));
1360*4882a593Smuzhiyun 		if (copy_from_user(data, req->data, req->size)) {
1361*4882a593Smuzhiyun 			mpp_err("copy_from_user failed.\n");
1362*4882a593Smuzhiyun 			return -EINVAL;
1363*4882a593Smuzhiyun 		}
1364*4882a593Smuzhiyun 		count = req->size / sizeof(u32);
1365*4882a593Smuzhiyun 		for (i = 0; i < count; i++) {
1366*4882a593Smuzhiyun 			struct mpp_dma_buffer *buffer;
1367*4882a593Smuzhiyun 			int fd = data[i];
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 			mpp_iommu_down_read(mpp->iommu_info);
1370*4882a593Smuzhiyun 			buffer = mpp_dma_import_fd(mpp->iommu_info,
1371*4882a593Smuzhiyun 						   session->dma, fd);
1372*4882a593Smuzhiyun 			mpp_iommu_up_read(mpp->iommu_info);
1373*4882a593Smuzhiyun 			if (IS_ERR_OR_NULL(buffer)) {
1374*4882a593Smuzhiyun 				mpp_err("can not import fd %d\n", fd);
1375*4882a593Smuzhiyun 				return -EINVAL;
1376*4882a593Smuzhiyun 			}
1377*4882a593Smuzhiyun 			data[i] = (u32)buffer->iova;
1378*4882a593Smuzhiyun 			mpp_debug(DEBUG_IOMMU, "fd %d => iova %08x\n",
1379*4882a593Smuzhiyun 				  fd, data[i]);
1380*4882a593Smuzhiyun 		}
1381*4882a593Smuzhiyun 		if (copy_to_user(req->data, data, req->size)) {
1382*4882a593Smuzhiyun 			mpp_err("copy_to_user failed.\n");
1383*4882a593Smuzhiyun 			return -EINVAL;
1384*4882a593Smuzhiyun 		}
1385*4882a593Smuzhiyun 	} break;
1386*4882a593Smuzhiyun 	case MPP_CMD_RELEASE_FD: {
1387*4882a593Smuzhiyun 		u32 i;
1388*4882a593Smuzhiyun 		int ret;
1389*4882a593Smuzhiyun 		u32 count;
1390*4882a593Smuzhiyun 		u32 data[MPP_MAX_REG_TRANS_NUM];
1391*4882a593Smuzhiyun 
1392*4882a593Smuzhiyun 		if (req->size <= 0 ||
1393*4882a593Smuzhiyun 		    req->size > sizeof(data))
1394*4882a593Smuzhiyun 			return -EINVAL;
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 		memset(data, 0, sizeof(data));
1397*4882a593Smuzhiyun 		if (copy_from_user(data, req->data, req->size)) {
1398*4882a593Smuzhiyun 			mpp_err("copy_from_user failed.\n");
1399*4882a593Smuzhiyun 			return -EINVAL;
1400*4882a593Smuzhiyun 		}
1401*4882a593Smuzhiyun 		count = req->size / sizeof(u32);
1402*4882a593Smuzhiyun 		for (i = 0; i < count; i++) {
1403*4882a593Smuzhiyun 			ret = mpp_dma_release_fd(session->dma, data[i]);
1404*4882a593Smuzhiyun 			if (ret) {
1405*4882a593Smuzhiyun 				mpp_err("release fd %d failed.\n", data[i]);
1406*4882a593Smuzhiyun 				return ret;
1407*4882a593Smuzhiyun 			}
1408*4882a593Smuzhiyun 		}
1409*4882a593Smuzhiyun 	} break;
1410*4882a593Smuzhiyun 	default: {
1411*4882a593Smuzhiyun 		mpp = session->mpp;
1412*4882a593Smuzhiyun 		if (!mpp) {
1413*4882a593Smuzhiyun 			mpp_err("pid %d not find client %d\n",
1414*4882a593Smuzhiyun 				session->pid, session->device_type);
1415*4882a593Smuzhiyun 			return -EINVAL;
1416*4882a593Smuzhiyun 		}
1417*4882a593Smuzhiyun 		if (mpp->dev_ops->ioctl)
1418*4882a593Smuzhiyun 			return mpp->dev_ops->ioctl(session, req);
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 		mpp_debug(DEBUG_IOCTL, "unknown mpp ioctl cmd %x\n", req->cmd);
1421*4882a593Smuzhiyun 	} break;
1422*4882a593Smuzhiyun 	}
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun 	return 0;
1425*4882a593Smuzhiyun }
1426*4882a593Smuzhiyun 
task_msgs_add(struct mpp_task_msgs * msgs,struct list_head * head)1427*4882a593Smuzhiyun static void task_msgs_add(struct mpp_task_msgs *msgs, struct list_head *head)
1428*4882a593Smuzhiyun {
1429*4882a593Smuzhiyun 	struct mpp_session *session = msgs->session;
1430*4882a593Smuzhiyun 	int ret = 0;
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	/* process each task */
1433*4882a593Smuzhiyun 	if (msgs->set_cnt) {
1434*4882a593Smuzhiyun 		/* NOTE: update msg_flags for fd over 1024 */
1435*4882a593Smuzhiyun 		session->msg_flags = msgs->flags;
1436*4882a593Smuzhiyun 		ret = mpp_process_task(session, msgs);
1437*4882a593Smuzhiyun 	}
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 	if (!ret) {
1440*4882a593Smuzhiyun 		INIT_LIST_HEAD(&msgs->list);
1441*4882a593Smuzhiyun 		list_add_tail(&msgs->list, head);
1442*4882a593Smuzhiyun 	} else {
1443*4882a593Smuzhiyun 		put_task_msgs(msgs);
1444*4882a593Smuzhiyun 	}
1445*4882a593Smuzhiyun }
1446*4882a593Smuzhiyun 
mpp_collect_msgs(struct list_head * head,struct mpp_session * session,unsigned int cmd,void __user * msg)1447*4882a593Smuzhiyun static int mpp_collect_msgs(struct list_head *head, struct mpp_session *session,
1448*4882a593Smuzhiyun 			    unsigned int cmd, void __user *msg)
1449*4882a593Smuzhiyun {
1450*4882a593Smuzhiyun 	struct mpp_msg_v1 msg_v1;
1451*4882a593Smuzhiyun 	struct mpp_request *req;
1452*4882a593Smuzhiyun 	struct mpp_task_msgs *msgs = NULL;
1453*4882a593Smuzhiyun 	int last = 1;
1454*4882a593Smuzhiyun 	int ret;
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun 	if (cmd != MPP_IOC_CFG_V1) {
1457*4882a593Smuzhiyun 		mpp_err("unknown ioctl cmd %x\n", cmd);
1458*4882a593Smuzhiyun 		return -EINVAL;
1459*4882a593Smuzhiyun 	}
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun next:
1462*4882a593Smuzhiyun 	/* first, parse to fixed struct */
1463*4882a593Smuzhiyun 	if (copy_from_user(&msg_v1, msg, sizeof(msg_v1)))
1464*4882a593Smuzhiyun 		return -EFAULT;
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	msg += sizeof(msg_v1);
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 	mpp_debug(DEBUG_IOCTL, "cmd %x collect flags %08x, size %d, offset %x\n",
1469*4882a593Smuzhiyun 		  msg_v1.cmd, msg_v1.flags, msg_v1.size, msg_v1.offset);
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun 	if (mpp_check_cmd_v1(msg_v1.cmd)) {
1472*4882a593Smuzhiyun 		mpp_err("mpp cmd %x is not supported.\n", msg_v1.cmd);
1473*4882a593Smuzhiyun 		return -EFAULT;
1474*4882a593Smuzhiyun 	}
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	if (msg_v1.flags & MPP_FLAGS_MULTI_MSG)
1477*4882a593Smuzhiyun 		last = (msg_v1.flags & MPP_FLAGS_LAST_MSG) ? 1 : 0;
1478*4882a593Smuzhiyun 	else
1479*4882a593Smuzhiyun 		last = 1;
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 	/* check cmd for change msgs session */
1482*4882a593Smuzhiyun 	if (msg_v1.cmd == MPP_CMD_SET_SESSION_FD) {
1483*4882a593Smuzhiyun 		struct mpp_bat_msg bat_msg;
1484*4882a593Smuzhiyun 		struct mpp_bat_msg __user *usr_cmd;
1485*4882a593Smuzhiyun 		struct fd f;
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun 		/* try session switch here */
1488*4882a593Smuzhiyun 		usr_cmd = (struct mpp_bat_msg __user *)(unsigned long)msg_v1.data_ptr;
1489*4882a593Smuzhiyun 
1490*4882a593Smuzhiyun 		if (copy_from_user(&bat_msg, usr_cmd, sizeof(bat_msg)))
1491*4882a593Smuzhiyun 			return -EFAULT;
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 		/* skip finished message */
1494*4882a593Smuzhiyun 		if (bat_msg.flag & MPP_BAT_MSG_DONE)
1495*4882a593Smuzhiyun 			goto session_switch_done;
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 		f = fdget(bat_msg.fd);
1498*4882a593Smuzhiyun 		if (!f.file) {
1499*4882a593Smuzhiyun 			int ret = -EBADF;
1500*4882a593Smuzhiyun 
1501*4882a593Smuzhiyun 			mpp_err("fd %d get session failed\n", bat_msg.fd);
1502*4882a593Smuzhiyun 
1503*4882a593Smuzhiyun 			if (copy_to_user(&usr_cmd->ret, &ret, sizeof(usr_cmd->ret)))
1504*4882a593Smuzhiyun 				mpp_err("copy_to_user failed.\n");
1505*4882a593Smuzhiyun 			goto session_switch_done;
1506*4882a593Smuzhiyun 		}
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun 		/* NOTE: add previous ready task to queue and drop empty task */
1509*4882a593Smuzhiyun 		if (msgs) {
1510*4882a593Smuzhiyun 			if (msgs->req_cnt)
1511*4882a593Smuzhiyun 				task_msgs_add(msgs, head);
1512*4882a593Smuzhiyun 			else
1513*4882a593Smuzhiyun 				put_task_msgs(msgs);
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 			msgs = NULL;
1516*4882a593Smuzhiyun 		}
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun 		/* switch session */
1519*4882a593Smuzhiyun 		session = f.file->private_data;
1520*4882a593Smuzhiyun 		msgs = get_task_msgs(session);
1521*4882a593Smuzhiyun 
1522*4882a593Smuzhiyun 		if (f.file->private_data == session)
1523*4882a593Smuzhiyun 			msgs->ext_fd = bat_msg.fd;
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun 		msgs->f = f;
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun 		mpp_debug(DEBUG_IOCTL, "fd %d, session %d msg_cnt %d\n",
1528*4882a593Smuzhiyun 				bat_msg.fd, session->index, session->msgs_cnt);
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun session_switch_done:
1531*4882a593Smuzhiyun 		/* session id should NOT be the last message */
1532*4882a593Smuzhiyun 		if (last)
1533*4882a593Smuzhiyun 			return 0;
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 		goto next;
1536*4882a593Smuzhiyun 	}
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 	if (!msgs)
1539*4882a593Smuzhiyun 		msgs = get_task_msgs(session);
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 	if (!msgs) {
1542*4882a593Smuzhiyun 		pr_err("session %d:%d failed to get task msgs",
1543*4882a593Smuzhiyun 		       session->pid, session->index);
1544*4882a593Smuzhiyun 		return -EINVAL;
1545*4882a593Smuzhiyun 	}
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 	if (msgs->req_cnt >= MPP_MAX_MSG_NUM) {
1548*4882a593Smuzhiyun 		mpp_err("session %d message count %d more than %d.\n",
1549*4882a593Smuzhiyun 			session->index, msgs->req_cnt, MPP_MAX_MSG_NUM);
1550*4882a593Smuzhiyun 		return -EINVAL;
1551*4882a593Smuzhiyun 	}
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	req = &msgs->reqs[msgs->req_cnt++];
1554*4882a593Smuzhiyun 	req->cmd = msg_v1.cmd;
1555*4882a593Smuzhiyun 	req->flags = msg_v1.flags;
1556*4882a593Smuzhiyun 	req->size = msg_v1.size;
1557*4882a593Smuzhiyun 	req->offset = msg_v1.offset;
1558*4882a593Smuzhiyun 	req->data = (void __user *)(unsigned long)msg_v1.data_ptr;
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 	ret = mpp_process_request(session, session->srv, req, msgs);
1561*4882a593Smuzhiyun 	if (ret) {
1562*4882a593Smuzhiyun 		mpp_err("session %d process cmd %x ret %d\n",
1563*4882a593Smuzhiyun 			session->index, req->cmd, ret);
1564*4882a593Smuzhiyun 		return ret;
1565*4882a593Smuzhiyun 	}
1566*4882a593Smuzhiyun 
1567*4882a593Smuzhiyun 	if (!last)
1568*4882a593Smuzhiyun 		goto next;
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 	task_msgs_add(msgs, head);
1571*4882a593Smuzhiyun 	msgs = NULL;
1572*4882a593Smuzhiyun 
1573*4882a593Smuzhiyun 	return 0;
1574*4882a593Smuzhiyun }
1575*4882a593Smuzhiyun 
mpp_msgs_trigger(struct list_head * msgs_list)1576*4882a593Smuzhiyun static void mpp_msgs_trigger(struct list_head *msgs_list)
1577*4882a593Smuzhiyun {
1578*4882a593Smuzhiyun 	struct mpp_task_msgs *msgs, *n;
1579*4882a593Smuzhiyun 	struct mpp_dev *mpp_prev = NULL;
1580*4882a593Smuzhiyun 	struct mpp_taskqueue *queue_prev = NULL;
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 	/* push task to queue */
1583*4882a593Smuzhiyun 	list_for_each_entry_safe(msgs, n, msgs_list, list) {
1584*4882a593Smuzhiyun 		struct mpp_dev *mpp;
1585*4882a593Smuzhiyun 		struct mpp_task *task;
1586*4882a593Smuzhiyun 		struct mpp_taskqueue *queue;
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 		if (!msgs->set_cnt || !msgs->queue)
1589*4882a593Smuzhiyun 			continue;
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 		mpp = msgs->mpp;
1592*4882a593Smuzhiyun 		task = msgs->task;
1593*4882a593Smuzhiyun 		queue = msgs->queue;
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 		if (queue_prev != queue) {
1596*4882a593Smuzhiyun 			if (queue_prev && mpp_prev) {
1597*4882a593Smuzhiyun 				mutex_unlock(&queue_prev->pending_lock);
1598*4882a593Smuzhiyun 				mpp_taskqueue_trigger_work(mpp_prev);
1599*4882a593Smuzhiyun 			}
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 			if (queue)
1602*4882a593Smuzhiyun 				mutex_lock(&queue->pending_lock);
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 			mpp_prev = mpp;
1605*4882a593Smuzhiyun 			queue_prev = queue;
1606*4882a593Smuzhiyun 		}
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 		if (test_bit(TASK_STATE_ABORT, &task->state))
1609*4882a593Smuzhiyun 			pr_info("try to trigger abort task %d\n", task->task_id);
1610*4882a593Smuzhiyun 
1611*4882a593Smuzhiyun 		set_bit(TASK_STATE_PENDING, &task->state);
1612*4882a593Smuzhiyun 		list_add_tail(&task->queue_link, &queue->pending_list);
1613*4882a593Smuzhiyun 	}
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	if (mpp_prev && queue_prev) {
1616*4882a593Smuzhiyun 		mutex_unlock(&queue_prev->pending_lock);
1617*4882a593Smuzhiyun 		mpp_taskqueue_trigger_work(mpp_prev);
1618*4882a593Smuzhiyun 	}
1619*4882a593Smuzhiyun }
1620*4882a593Smuzhiyun 
mpp_msgs_wait(struct list_head * msgs_list)1621*4882a593Smuzhiyun static void mpp_msgs_wait(struct list_head *msgs_list)
1622*4882a593Smuzhiyun {
1623*4882a593Smuzhiyun 	struct mpp_task_msgs *msgs, *n;
1624*4882a593Smuzhiyun 
1625*4882a593Smuzhiyun 	/* poll and release each task */
1626*4882a593Smuzhiyun 	list_for_each_entry_safe(msgs, n, msgs_list, list) {
1627*4882a593Smuzhiyun 		struct mpp_session *session = msgs->session;
1628*4882a593Smuzhiyun 
1629*4882a593Smuzhiyun 		if (msgs->poll_cnt) {
1630*4882a593Smuzhiyun 			int ret = mpp_wait_result(session, msgs);
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun 			if (ret) {
1633*4882a593Smuzhiyun 				mpp_err("session %d wait result ret %d\n",
1634*4882a593Smuzhiyun 					session->index, ret);
1635*4882a593Smuzhiyun 			}
1636*4882a593Smuzhiyun 		}
1637*4882a593Smuzhiyun 
1638*4882a593Smuzhiyun 		put_task_msgs(msgs);
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun 	}
1641*4882a593Smuzhiyun }
1642*4882a593Smuzhiyun 
mpp_dev_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1643*4882a593Smuzhiyun static long mpp_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1644*4882a593Smuzhiyun {
1645*4882a593Smuzhiyun 	struct mpp_service *srv;
1646*4882a593Smuzhiyun 	struct mpp_session *session = (struct mpp_session *)filp->private_data;
1647*4882a593Smuzhiyun 	struct list_head msgs_list;
1648*4882a593Smuzhiyun 	int ret = 0;
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 	mpp_debug_enter();
1651*4882a593Smuzhiyun 
1652*4882a593Smuzhiyun 	if (!session || !session->srv) {
1653*4882a593Smuzhiyun 		mpp_err("session %p\n", session);
1654*4882a593Smuzhiyun 		return -EINVAL;
1655*4882a593Smuzhiyun 	}
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun 	srv = session->srv;
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun 	if (atomic_read(&session->release_request) > 0) {
1660*4882a593Smuzhiyun 		mpp_debug(DEBUG_IOCTL, "release session had request\n");
1661*4882a593Smuzhiyun 		return -EBUSY;
1662*4882a593Smuzhiyun 	}
1663*4882a593Smuzhiyun 	if (atomic_read(&srv->shutdown_request) > 0) {
1664*4882a593Smuzhiyun 		mpp_debug(DEBUG_IOCTL, "shutdown had request\n");
1665*4882a593Smuzhiyun 		return -EBUSY;
1666*4882a593Smuzhiyun 	}
1667*4882a593Smuzhiyun 
1668*4882a593Smuzhiyun 	INIT_LIST_HEAD(&msgs_list);
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 	ret = mpp_collect_msgs(&msgs_list, session, cmd, (void __user *)arg);
1671*4882a593Smuzhiyun 	if (ret)
1672*4882a593Smuzhiyun 		mpp_err("collect msgs failed %d\n", ret);
1673*4882a593Smuzhiyun 
1674*4882a593Smuzhiyun 	mpp_msgs_trigger(&msgs_list);
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun 	mpp_msgs_wait(&msgs_list);
1677*4882a593Smuzhiyun 
1678*4882a593Smuzhiyun 	mpp_debug_leave();
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun 	return ret;
1681*4882a593Smuzhiyun }
1682*4882a593Smuzhiyun 
mpp_dev_open(struct inode * inode,struct file * filp)1683*4882a593Smuzhiyun static int mpp_dev_open(struct inode *inode, struct file *filp)
1684*4882a593Smuzhiyun {
1685*4882a593Smuzhiyun 	struct mpp_session *session = NULL;
1686*4882a593Smuzhiyun 	struct mpp_service *srv = container_of(inode->i_cdev,
1687*4882a593Smuzhiyun 					       struct mpp_service,
1688*4882a593Smuzhiyun 					       mpp_cdev);
1689*4882a593Smuzhiyun 	mpp_debug_enter();
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 	session = mpp_session_init();
1692*4882a593Smuzhiyun 	if (!session)
1693*4882a593Smuzhiyun 		return -ENOMEM;
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 	session->srv = srv;
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 	if (session->srv) {
1698*4882a593Smuzhiyun 		mutex_lock(&srv->session_lock);
1699*4882a593Smuzhiyun 		list_add_tail(&session->service_link, &srv->session_list);
1700*4882a593Smuzhiyun 		mutex_unlock(&srv->session_lock);
1701*4882a593Smuzhiyun 	}
1702*4882a593Smuzhiyun 	session->process_task = mpp_process_task_default;
1703*4882a593Smuzhiyun 	session->wait_result = mpp_wait_result_default;
1704*4882a593Smuzhiyun 	session->deinit = mpp_session_deinit_default;
1705*4882a593Smuzhiyun 	filp->private_data = (void *)session;
1706*4882a593Smuzhiyun 
1707*4882a593Smuzhiyun 	mpp_debug_leave();
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 	return nonseekable_open(inode, filp);
1710*4882a593Smuzhiyun }
1711*4882a593Smuzhiyun 
mpp_dev_release(struct inode * inode,struct file * filp)1712*4882a593Smuzhiyun static int mpp_dev_release(struct inode *inode, struct file *filp)
1713*4882a593Smuzhiyun {
1714*4882a593Smuzhiyun 	struct mpp_session *session = filp->private_data;
1715*4882a593Smuzhiyun 
1716*4882a593Smuzhiyun 	mpp_debug_enter();
1717*4882a593Smuzhiyun 
1718*4882a593Smuzhiyun 	if (!session) {
1719*4882a593Smuzhiyun 		mpp_err("session is null\n");
1720*4882a593Smuzhiyun 		return -EINVAL;
1721*4882a593Smuzhiyun 	}
1722*4882a593Smuzhiyun 
1723*4882a593Smuzhiyun 	/* wait for task all done */
1724*4882a593Smuzhiyun 	atomic_inc(&session->release_request);
1725*4882a593Smuzhiyun 
1726*4882a593Smuzhiyun 	if (session->mpp || atomic_read(&session->task_count))
1727*4882a593Smuzhiyun 		mpp_session_detach_workqueue(session);
1728*4882a593Smuzhiyun 	else
1729*4882a593Smuzhiyun 		mpp_session_deinit(session);
1730*4882a593Smuzhiyun 
1731*4882a593Smuzhiyun 	filp->private_data = NULL;
1732*4882a593Smuzhiyun 
1733*4882a593Smuzhiyun 	mpp_debug_leave();
1734*4882a593Smuzhiyun 	return 0;
1735*4882a593Smuzhiyun }
1736*4882a593Smuzhiyun 
1737*4882a593Smuzhiyun const struct file_operations rockchip_mpp_fops = {
1738*4882a593Smuzhiyun 	.open		= mpp_dev_open,
1739*4882a593Smuzhiyun 	.release	= mpp_dev_release,
1740*4882a593Smuzhiyun 	.unlocked_ioctl = mpp_dev_ioctl,
1741*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
1742*4882a593Smuzhiyun 	.compat_ioctl   = mpp_dev_ioctl,
1743*4882a593Smuzhiyun #endif
1744*4882a593Smuzhiyun };
1745*4882a593Smuzhiyun 
1746*4882a593Smuzhiyun struct mpp_mem_region *
mpp_task_attach_fd(struct mpp_task * task,int fd)1747*4882a593Smuzhiyun mpp_task_attach_fd(struct mpp_task *task, int fd)
1748*4882a593Smuzhiyun {
1749*4882a593Smuzhiyun 	struct mpp_mem_region *mem_region = NULL, *loop = NULL, *n;
1750*4882a593Smuzhiyun 	struct mpp_dma_buffer *buffer = NULL;
1751*4882a593Smuzhiyun 	struct mpp_dev *mpp = task->session->mpp;
1752*4882a593Smuzhiyun 	struct mpp_dma_session *dma = task->session->dma;
1753*4882a593Smuzhiyun 	u32 mem_num = ARRAY_SIZE(task->mem_regions);
1754*4882a593Smuzhiyun 	bool found = false;
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun 	if (fd <= 0 || !dma || !mpp)
1757*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
1758*4882a593Smuzhiyun 
1759*4882a593Smuzhiyun 	if (task->mem_count > mem_num) {
1760*4882a593Smuzhiyun 		mpp_err("mem_count %d must less than %d\n", task->mem_count, mem_num);
1761*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1762*4882a593Smuzhiyun 	}
1763*4882a593Smuzhiyun 
1764*4882a593Smuzhiyun 	/* find fd whether had import */
1765*4882a593Smuzhiyun 	list_for_each_entry_safe_reverse(loop, n, &task->mem_region_list, reg_link) {
1766*4882a593Smuzhiyun 		if (loop->fd == fd) {
1767*4882a593Smuzhiyun 			found = true;
1768*4882a593Smuzhiyun 			break;
1769*4882a593Smuzhiyun 		}
1770*4882a593Smuzhiyun 	}
1771*4882a593Smuzhiyun 
1772*4882a593Smuzhiyun 	mem_region = &task->mem_regions[task->mem_count];
1773*4882a593Smuzhiyun 	if (found) {
1774*4882a593Smuzhiyun 		memcpy(mem_region, loop, sizeof(*loop));
1775*4882a593Smuzhiyun 		mem_region->is_dup = true;
1776*4882a593Smuzhiyun 	} else {
1777*4882a593Smuzhiyun 		mpp_iommu_down_read(mpp->iommu_info);
1778*4882a593Smuzhiyun 		buffer = mpp_dma_import_fd(mpp->iommu_info, dma, fd);
1779*4882a593Smuzhiyun 		mpp_iommu_up_read(mpp->iommu_info);
1780*4882a593Smuzhiyun 		if (IS_ERR(buffer)) {
1781*4882a593Smuzhiyun 			mpp_err("can't import dma-buf %d\n", fd);
1782*4882a593Smuzhiyun 			return ERR_CAST(buffer);
1783*4882a593Smuzhiyun 		}
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 		mem_region->hdl = buffer;
1786*4882a593Smuzhiyun 		mem_region->iova = buffer->iova;
1787*4882a593Smuzhiyun 		mem_region->len = buffer->size;
1788*4882a593Smuzhiyun 		mem_region->fd = fd;
1789*4882a593Smuzhiyun 		mem_region->is_dup = false;
1790*4882a593Smuzhiyun 	}
1791*4882a593Smuzhiyun 	task->mem_count++;
1792*4882a593Smuzhiyun 	INIT_LIST_HEAD(&mem_region->reg_link);
1793*4882a593Smuzhiyun 	list_add_tail(&mem_region->reg_link, &task->mem_region_list);
1794*4882a593Smuzhiyun 
1795*4882a593Smuzhiyun 	return mem_region;
1796*4882a593Smuzhiyun }
1797*4882a593Smuzhiyun 
mpp_translate_reg_address(struct mpp_session * session,struct mpp_task * task,int fmt,u32 * reg,struct reg_offset_info * off_inf)1798*4882a593Smuzhiyun int mpp_translate_reg_address(struct mpp_session *session,
1799*4882a593Smuzhiyun 			      struct mpp_task *task, int fmt,
1800*4882a593Smuzhiyun 			      u32 *reg, struct reg_offset_info *off_inf)
1801*4882a593Smuzhiyun {
1802*4882a593Smuzhiyun 	int i;
1803*4882a593Smuzhiyun 	int cnt;
1804*4882a593Smuzhiyun 	const u16 *tbl;
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 	mpp_debug_enter();
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun 	if (session->trans_count > 0) {
1809*4882a593Smuzhiyun 		cnt = session->trans_count;
1810*4882a593Smuzhiyun 		tbl = session->trans_table;
1811*4882a593Smuzhiyun 	} else {
1812*4882a593Smuzhiyun 		struct mpp_dev *mpp = mpp_get_task_used_device(task, session);
1813*4882a593Smuzhiyun 		struct mpp_trans_info *trans_info = mpp->var->trans_info;
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 		cnt = trans_info[fmt].count;
1816*4882a593Smuzhiyun 		tbl = trans_info[fmt].table;
1817*4882a593Smuzhiyun 	}
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun 	for (i = 0; i < cnt; i++) {
1820*4882a593Smuzhiyun 		int usr_fd;
1821*4882a593Smuzhiyun 		u32 offset;
1822*4882a593Smuzhiyun 		struct mpp_mem_region *mem_region = NULL;
1823*4882a593Smuzhiyun 
1824*4882a593Smuzhiyun 		if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
1825*4882a593Smuzhiyun 			usr_fd = reg[tbl[i]];
1826*4882a593Smuzhiyun 			offset = 0;
1827*4882a593Smuzhiyun 		} else {
1828*4882a593Smuzhiyun 			usr_fd = reg[tbl[i]] & 0x3ff;
1829*4882a593Smuzhiyun 			offset = reg[tbl[i]] >> 10;
1830*4882a593Smuzhiyun 		}
1831*4882a593Smuzhiyun 
1832*4882a593Smuzhiyun 		if (usr_fd == 0)
1833*4882a593Smuzhiyun 			continue;
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun 		mem_region = mpp_task_attach_fd(task, usr_fd);
1836*4882a593Smuzhiyun 		if (IS_ERR(mem_region)) {
1837*4882a593Smuzhiyun 			mpp_err("reg[%3d]: 0x%08x fd %d failed\n",
1838*4882a593Smuzhiyun 				tbl[i], reg[tbl[i]], usr_fd);
1839*4882a593Smuzhiyun 			return PTR_ERR(mem_region);
1840*4882a593Smuzhiyun 		}
1841*4882a593Smuzhiyun 		mpp_debug(DEBUG_IOMMU,
1842*4882a593Smuzhiyun 			  "reg[%3d]: %d => %pad, offset %10d, size %lx\n",
1843*4882a593Smuzhiyun 			  tbl[i], usr_fd, &mem_region->iova,
1844*4882a593Smuzhiyun 			  offset, mem_region->len);
1845*4882a593Smuzhiyun 		mem_region->reg_idx = tbl[i];
1846*4882a593Smuzhiyun 		reg[tbl[i]] = mem_region->iova + offset;
1847*4882a593Smuzhiyun 	}
1848*4882a593Smuzhiyun 
1849*4882a593Smuzhiyun 	mpp_debug_leave();
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun 	return 0;
1852*4882a593Smuzhiyun }
1853*4882a593Smuzhiyun 
mpp_check_req(struct mpp_request * req,int base,int max_size,u32 off_s,u32 off_e)1854*4882a593Smuzhiyun int mpp_check_req(struct mpp_request *req, int base,
1855*4882a593Smuzhiyun 		  int max_size, u32 off_s, u32 off_e)
1856*4882a593Smuzhiyun {
1857*4882a593Smuzhiyun 	int req_off;
1858*4882a593Smuzhiyun 
1859*4882a593Smuzhiyun 	if (req->offset < base) {
1860*4882a593Smuzhiyun 		mpp_err("error: base %x, offset %x\n",
1861*4882a593Smuzhiyun 			base, req->offset);
1862*4882a593Smuzhiyun 		return -EINVAL;
1863*4882a593Smuzhiyun 	}
1864*4882a593Smuzhiyun 	req_off = req->offset - base;
1865*4882a593Smuzhiyun 	if ((req_off + req->size) < off_s) {
1866*4882a593Smuzhiyun 		mpp_err("error: req_off %x, req_size %x, off_s %x\n",
1867*4882a593Smuzhiyun 			req_off, req->size, off_s);
1868*4882a593Smuzhiyun 		return -EINVAL;
1869*4882a593Smuzhiyun 	}
1870*4882a593Smuzhiyun 	if (max_size < off_e) {
1871*4882a593Smuzhiyun 		mpp_err("error: off_e %x, max_size %x\n",
1872*4882a593Smuzhiyun 			off_e, max_size);
1873*4882a593Smuzhiyun 		return -EINVAL;
1874*4882a593Smuzhiyun 	}
1875*4882a593Smuzhiyun 	if (req_off > max_size) {
1876*4882a593Smuzhiyun 		mpp_err("error: req_off %x, max_size %x\n",
1877*4882a593Smuzhiyun 			req_off, max_size);
1878*4882a593Smuzhiyun 		return -EINVAL;
1879*4882a593Smuzhiyun 	}
1880*4882a593Smuzhiyun 	if ((req_off + req->size) > max_size) {
1881*4882a593Smuzhiyun 		mpp_err("error: req_off %x, req_size %x, max_size %x\n",
1882*4882a593Smuzhiyun 			req_off, req->size, max_size);
1883*4882a593Smuzhiyun 		req->size = req_off + req->size - max_size;
1884*4882a593Smuzhiyun 	}
1885*4882a593Smuzhiyun 
1886*4882a593Smuzhiyun 	return 0;
1887*4882a593Smuzhiyun }
1888*4882a593Smuzhiyun 
mpp_extract_reg_offset_info(struct reg_offset_info * off_inf,struct mpp_request * req)1889*4882a593Smuzhiyun int mpp_extract_reg_offset_info(struct reg_offset_info *off_inf,
1890*4882a593Smuzhiyun 				struct mpp_request *req)
1891*4882a593Smuzhiyun {
1892*4882a593Smuzhiyun 	int max_size = ARRAY_SIZE(off_inf->elem);
1893*4882a593Smuzhiyun 	int cnt = req->size / sizeof(off_inf->elem[0]);
1894*4882a593Smuzhiyun 
1895*4882a593Smuzhiyun 	if ((cnt + off_inf->cnt) > max_size) {
1896*4882a593Smuzhiyun 		mpp_err("count %d, total %d, max_size %d\n",
1897*4882a593Smuzhiyun 			cnt, off_inf->cnt, max_size);
1898*4882a593Smuzhiyun 		return -EINVAL;
1899*4882a593Smuzhiyun 	}
1900*4882a593Smuzhiyun 	if (copy_from_user(&off_inf->elem[off_inf->cnt],
1901*4882a593Smuzhiyun 			   req->data, req->size)) {
1902*4882a593Smuzhiyun 		mpp_err("copy_from_user failed\n");
1903*4882a593Smuzhiyun 		return -EINVAL;
1904*4882a593Smuzhiyun 	}
1905*4882a593Smuzhiyun 	off_inf->cnt += cnt;
1906*4882a593Smuzhiyun 
1907*4882a593Smuzhiyun 	return 0;
1908*4882a593Smuzhiyun }
1909*4882a593Smuzhiyun 
mpp_query_reg_offset_info(struct reg_offset_info * off_inf,u32 index)1910*4882a593Smuzhiyun int mpp_query_reg_offset_info(struct reg_offset_info *off_inf,
1911*4882a593Smuzhiyun 			      u32 index)
1912*4882a593Smuzhiyun {
1913*4882a593Smuzhiyun 	mpp_debug_enter();
1914*4882a593Smuzhiyun 	if (off_inf) {
1915*4882a593Smuzhiyun 		int i;
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun 		for (i = 0; i < off_inf->cnt; i++) {
1918*4882a593Smuzhiyun 			if (off_inf->elem[i].index == index)
1919*4882a593Smuzhiyun 				return off_inf->elem[i].offset;
1920*4882a593Smuzhiyun 		}
1921*4882a593Smuzhiyun 	}
1922*4882a593Smuzhiyun 	mpp_debug_leave();
1923*4882a593Smuzhiyun 
1924*4882a593Smuzhiyun 	return 0;
1925*4882a593Smuzhiyun }
1926*4882a593Smuzhiyun 
mpp_translate_reg_offset_info(struct mpp_task * task,struct reg_offset_info * off_inf,u32 * reg)1927*4882a593Smuzhiyun int mpp_translate_reg_offset_info(struct mpp_task *task,
1928*4882a593Smuzhiyun 				  struct reg_offset_info *off_inf,
1929*4882a593Smuzhiyun 				  u32 *reg)
1930*4882a593Smuzhiyun {
1931*4882a593Smuzhiyun 	mpp_debug_enter();
1932*4882a593Smuzhiyun 
1933*4882a593Smuzhiyun 	if (off_inf) {
1934*4882a593Smuzhiyun 		int i;
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun 		for (i = 0; i < off_inf->cnt; i++) {
1937*4882a593Smuzhiyun 			mpp_debug(DEBUG_IOMMU, "reg[%d] + offset %d\n",
1938*4882a593Smuzhiyun 				  off_inf->elem[i].index,
1939*4882a593Smuzhiyun 				  off_inf->elem[i].offset);
1940*4882a593Smuzhiyun 			reg[off_inf->elem[i].index] += off_inf->elem[i].offset;
1941*4882a593Smuzhiyun 		}
1942*4882a593Smuzhiyun 	}
1943*4882a593Smuzhiyun 	mpp_debug_leave();
1944*4882a593Smuzhiyun 
1945*4882a593Smuzhiyun 	return 0;
1946*4882a593Smuzhiyun }
1947*4882a593Smuzhiyun 
mpp_task_init(struct mpp_session * session,struct mpp_task * task)1948*4882a593Smuzhiyun int mpp_task_init(struct mpp_session *session, struct mpp_task *task)
1949*4882a593Smuzhiyun {
1950*4882a593Smuzhiyun 	INIT_LIST_HEAD(&task->pending_link);
1951*4882a593Smuzhiyun 	INIT_LIST_HEAD(&task->queue_link);
1952*4882a593Smuzhiyun 	INIT_LIST_HEAD(&task->mem_region_list);
1953*4882a593Smuzhiyun 	task->state = 0;
1954*4882a593Smuzhiyun 	task->mem_count = 0;
1955*4882a593Smuzhiyun 	task->session = session;
1956*4882a593Smuzhiyun 
1957*4882a593Smuzhiyun 	return 0;
1958*4882a593Smuzhiyun }
1959*4882a593Smuzhiyun 
mpp_task_finish(struct mpp_session * session,struct mpp_task * task)1960*4882a593Smuzhiyun int mpp_task_finish(struct mpp_session *session,
1961*4882a593Smuzhiyun 		    struct mpp_task *task)
1962*4882a593Smuzhiyun {
1963*4882a593Smuzhiyun 	struct mpp_dev *mpp = mpp_get_task_used_device(task, session);
1964*4882a593Smuzhiyun 
1965*4882a593Smuzhiyun 	if (mpp->dev_ops->finish)
1966*4882a593Smuzhiyun 		mpp->dev_ops->finish(mpp, task);
1967*4882a593Smuzhiyun 
1968*4882a593Smuzhiyun 	mpp_reset_up_read(mpp->reset_group);
1969*4882a593Smuzhiyun 	if (atomic_read(&mpp->reset_request) > 0)
1970*4882a593Smuzhiyun 		mpp_dev_reset(mpp);
1971*4882a593Smuzhiyun 	mpp_power_off(mpp);
1972*4882a593Smuzhiyun 
1973*4882a593Smuzhiyun 	set_bit(TASK_STATE_FINISH, &task->state);
1974*4882a593Smuzhiyun 	set_bit(TASK_STATE_DONE, &task->state);
1975*4882a593Smuzhiyun 
1976*4882a593Smuzhiyun 	if (session->srv->timing_en) {
1977*4882a593Smuzhiyun 		s64 time_diff;
1978*4882a593Smuzhiyun 
1979*4882a593Smuzhiyun 		task->on_finish = ktime_get();
1980*4882a593Smuzhiyun 		set_bit(TASK_TIMING_FINISH, &task->state);
1981*4882a593Smuzhiyun 
1982*4882a593Smuzhiyun 		time_diff = ktime_us_delta(task->on_finish, task->on_create);
1983*4882a593Smuzhiyun 
1984*4882a593Smuzhiyun 		if (mpp->timing_check && time_diff > (s64)mpp->timing_check)
1985*4882a593Smuzhiyun 			mpp_task_dump_timing(task, time_diff);
1986*4882a593Smuzhiyun 	}
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun 	/* Wake up the GET thread */
1989*4882a593Smuzhiyun 	wake_up(&task->wait);
1990*4882a593Smuzhiyun 	mpp_taskqueue_pop_running(mpp->queue, task);
1991*4882a593Smuzhiyun 
1992*4882a593Smuzhiyun 	return 0;
1993*4882a593Smuzhiyun }
1994*4882a593Smuzhiyun 
mpp_task_finalize(struct mpp_session * session,struct mpp_task * task)1995*4882a593Smuzhiyun int mpp_task_finalize(struct mpp_session *session,
1996*4882a593Smuzhiyun 		      struct mpp_task *task)
1997*4882a593Smuzhiyun {
1998*4882a593Smuzhiyun 	struct mpp_mem_region *mem_region = NULL, *n;
1999*4882a593Smuzhiyun 	struct mpp_dev *mpp = mpp_get_task_used_device(task, session);
2000*4882a593Smuzhiyun 
2001*4882a593Smuzhiyun 	/* release memory region attach to this registers table. */
2002*4882a593Smuzhiyun 	list_for_each_entry_safe(mem_region, n,
2003*4882a593Smuzhiyun 				 &task->mem_region_list,
2004*4882a593Smuzhiyun 				 reg_link) {
2005*4882a593Smuzhiyun 		if (!mem_region->is_dup) {
2006*4882a593Smuzhiyun 			mpp_iommu_down_read(mpp->iommu_info);
2007*4882a593Smuzhiyun 			mpp_dma_release(session->dma, mem_region->hdl);
2008*4882a593Smuzhiyun 			mpp_iommu_up_read(mpp->iommu_info);
2009*4882a593Smuzhiyun 		}
2010*4882a593Smuzhiyun 		list_del_init(&mem_region->reg_link);
2011*4882a593Smuzhiyun 	}
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	return 0;
2014*4882a593Smuzhiyun }
2015*4882a593Smuzhiyun 
mpp_task_dump_mem_region(struct mpp_dev * mpp,struct mpp_task * task)2016*4882a593Smuzhiyun int mpp_task_dump_mem_region(struct mpp_dev *mpp,
2017*4882a593Smuzhiyun 			     struct mpp_task *task)
2018*4882a593Smuzhiyun {
2019*4882a593Smuzhiyun 	struct mpp_mem_region *mem = NULL, *n;
2020*4882a593Smuzhiyun 
2021*4882a593Smuzhiyun 	if (!task)
2022*4882a593Smuzhiyun 		return -EIO;
2023*4882a593Smuzhiyun 
2024*4882a593Smuzhiyun 	mpp_err("--- dump task %d mem region ---\n", task->task_index);
2025*4882a593Smuzhiyun 	if (!list_empty(&task->mem_region_list)) {
2026*4882a593Smuzhiyun 		list_for_each_entry_safe(mem, n,
2027*4882a593Smuzhiyun 					 &task->mem_region_list,
2028*4882a593Smuzhiyun 					 reg_link) {
2029*4882a593Smuzhiyun 			mpp_err("reg[%3d]: %pad, size %lx\n",
2030*4882a593Smuzhiyun 				mem->reg_idx, &mem->iova, mem->len);
2031*4882a593Smuzhiyun 		}
2032*4882a593Smuzhiyun 	} else {
2033*4882a593Smuzhiyun 		dev_err(mpp->dev, "no memory region mapped\n");
2034*4882a593Smuzhiyun 	}
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun 	return 0;
2037*4882a593Smuzhiyun }
2038*4882a593Smuzhiyun 
mpp_task_dump_reg(struct mpp_dev * mpp,struct mpp_task * task)2039*4882a593Smuzhiyun int mpp_task_dump_reg(struct mpp_dev *mpp,
2040*4882a593Smuzhiyun 		      struct mpp_task *task)
2041*4882a593Smuzhiyun {
2042*4882a593Smuzhiyun 	if (!task)
2043*4882a593Smuzhiyun 		return -EIO;
2044*4882a593Smuzhiyun 
2045*4882a593Smuzhiyun 	if (mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) {
2046*4882a593Smuzhiyun 		mpp_err("--- dump task register ---\n");
2047*4882a593Smuzhiyun 		if (task->reg) {
2048*4882a593Smuzhiyun 			u32 i;
2049*4882a593Smuzhiyun 			u32 s = task->hw_info->reg_start;
2050*4882a593Smuzhiyun 			u32 e = task->hw_info->reg_end;
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 			for (i = s; i <= e; i++) {
2053*4882a593Smuzhiyun 				u32 reg = i * sizeof(u32);
2054*4882a593Smuzhiyun 
2055*4882a593Smuzhiyun 				mpp_err("reg[%03d]: %04x: 0x%08x\n",
2056*4882a593Smuzhiyun 					i, reg, task->reg[i]);
2057*4882a593Smuzhiyun 			}
2058*4882a593Smuzhiyun 		}
2059*4882a593Smuzhiyun 	}
2060*4882a593Smuzhiyun 
2061*4882a593Smuzhiyun 	return 0;
2062*4882a593Smuzhiyun }
2063*4882a593Smuzhiyun 
mpp_task_dump_hw_reg(struct mpp_dev * mpp)2064*4882a593Smuzhiyun int mpp_task_dump_hw_reg(struct mpp_dev *mpp)
2065*4882a593Smuzhiyun {
2066*4882a593Smuzhiyun 	u32 i;
2067*4882a593Smuzhiyun 	u32 s = mpp->var->hw_info->reg_start;
2068*4882a593Smuzhiyun 	u32 e = mpp->var->hw_info->reg_end;
2069*4882a593Smuzhiyun 
2070*4882a593Smuzhiyun 	mpp_err("--- dump hardware register ---\n");
2071*4882a593Smuzhiyun 	for (i = s; i <= e; i++) {
2072*4882a593Smuzhiyun 		u32 reg = i * sizeof(u32);
2073*4882a593Smuzhiyun 
2074*4882a593Smuzhiyun 		mpp_err("reg[%03d]: %04x: 0x%08x\n",
2075*4882a593Smuzhiyun 				i, reg, readl_relaxed(mpp->reg_base + reg));
2076*4882a593Smuzhiyun 	}
2077*4882a593Smuzhiyun 
2078*4882a593Smuzhiyun 	return 0;
2079*4882a593Smuzhiyun }
2080*4882a593Smuzhiyun 
mpp_reg_show(struct mpp_dev * mpp,u32 offset)2081*4882a593Smuzhiyun void mpp_reg_show(struct mpp_dev *mpp, u32 offset)
2082*4882a593Smuzhiyun {
2083*4882a593Smuzhiyun 	if (!mpp)
2084*4882a593Smuzhiyun 		return;
2085*4882a593Smuzhiyun 
2086*4882a593Smuzhiyun 	dev_err(mpp->dev, "reg[%03d]: %04x: 0x%08x\n",
2087*4882a593Smuzhiyun 		offset >> 2, offset, mpp_read_relaxed(mpp, offset));
2088*4882a593Smuzhiyun }
2089*4882a593Smuzhiyun 
mpp_reg_show_range(struct mpp_dev * mpp,u32 start,u32 end)2090*4882a593Smuzhiyun void mpp_reg_show_range(struct mpp_dev *mpp, u32 start, u32 end)
2091*4882a593Smuzhiyun {
2092*4882a593Smuzhiyun 	u32 offset;
2093*4882a593Smuzhiyun 
2094*4882a593Smuzhiyun 	if (!mpp)
2095*4882a593Smuzhiyun 		return;
2096*4882a593Smuzhiyun 
2097*4882a593Smuzhiyun 	for (offset = start; offset < end; offset += sizeof(u32))
2098*4882a593Smuzhiyun 		mpp_reg_show(mpp, offset);
2099*4882a593Smuzhiyun }
2100*4882a593Smuzhiyun 
2101*4882a593Smuzhiyun /* The device will do more probing work after this */
mpp_dev_probe(struct mpp_dev * mpp,struct platform_device * pdev)2102*4882a593Smuzhiyun int mpp_dev_probe(struct mpp_dev *mpp,
2103*4882a593Smuzhiyun 		  struct platform_device *pdev)
2104*4882a593Smuzhiyun {
2105*4882a593Smuzhiyun 	int ret;
2106*4882a593Smuzhiyun 	struct resource *res = NULL;
2107*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
2108*4882a593Smuzhiyun 	struct device_node *np = dev->of_node;
2109*4882a593Smuzhiyun 	struct mpp_hw_info *hw_info = mpp->var->hw_info;
2110*4882a593Smuzhiyun 
2111*4882a593Smuzhiyun 	/* Get disable auto frequent flag from dtsi */
2112*4882a593Smuzhiyun 	mpp->auto_freq_en = !device_property_read_bool(dev, "rockchip,disable-auto-freq");
2113*4882a593Smuzhiyun 	/* read flag for pum idle request */
2114*4882a593Smuzhiyun 	mpp->skip_idle = device_property_read_bool(dev, "rockchip,skip-pmu-idle-request");
2115*4882a593Smuzhiyun 
2116*4882a593Smuzhiyun 	/* read link table capacity */
2117*4882a593Smuzhiyun 	ret = of_property_read_u32(np, "rockchip,task-capacity",
2118*4882a593Smuzhiyun 				   &mpp->task_capacity);
2119*4882a593Smuzhiyun 	if (ret)
2120*4882a593Smuzhiyun 		mpp->task_capacity = 1;
2121*4882a593Smuzhiyun 
2122*4882a593Smuzhiyun 	mpp->dev = dev;
2123*4882a593Smuzhiyun 	mpp->hw_ops = mpp->var->hw_ops;
2124*4882a593Smuzhiyun 	mpp->dev_ops = mpp->var->dev_ops;
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 	/* Get and attach to service */
2127*4882a593Smuzhiyun 	ret = mpp_attach_service(mpp, dev);
2128*4882a593Smuzhiyun 	if (ret) {
2129*4882a593Smuzhiyun 		dev_err(dev, "failed to attach service\n");
2130*4882a593Smuzhiyun 		return -ENODEV;
2131*4882a593Smuzhiyun 	}
2132*4882a593Smuzhiyun 
2133*4882a593Smuzhiyun 	/* power domain autosuspend delay 2s */
2134*4882a593Smuzhiyun 	pm_runtime_set_autosuspend_delay(dev, 2000);
2135*4882a593Smuzhiyun 	pm_runtime_use_autosuspend(dev);
2136*4882a593Smuzhiyun 
2137*4882a593Smuzhiyun 	kthread_init_work(&mpp->work, mpp_task_worker_default);
2138*4882a593Smuzhiyun 
2139*4882a593Smuzhiyun 	atomic_set(&mpp->reset_request, 0);
2140*4882a593Smuzhiyun 	atomic_set(&mpp->session_index, 0);
2141*4882a593Smuzhiyun 	atomic_set(&mpp->task_count, 0);
2142*4882a593Smuzhiyun 	atomic_set(&mpp->task_index, 0);
2143*4882a593Smuzhiyun 
2144*4882a593Smuzhiyun 	device_init_wakeup(dev, true);
2145*4882a593Smuzhiyun 	pm_runtime_enable(dev);
2146*4882a593Smuzhiyun 	mpp->irq = platform_get_irq(pdev, 0);
2147*4882a593Smuzhiyun 	if (mpp->irq < 0) {
2148*4882a593Smuzhiyun 		dev_err(dev, "No interrupt resource found\n");
2149*4882a593Smuzhiyun 		ret = -ENODEV;
2150*4882a593Smuzhiyun 		goto failed;
2151*4882a593Smuzhiyun 	}
2152*4882a593Smuzhiyun 
2153*4882a593Smuzhiyun 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2154*4882a593Smuzhiyun 	if (!res) {
2155*4882a593Smuzhiyun 		dev_err(&pdev->dev, "no memory resource defined\n");
2156*4882a593Smuzhiyun 		ret = -ENODEV;
2157*4882a593Smuzhiyun 		goto failed;
2158*4882a593Smuzhiyun 	}
2159*4882a593Smuzhiyun 	/*
2160*4882a593Smuzhiyun 	 * Tips: here can not use function devm_ioremap_resource. The resion is
2161*4882a593Smuzhiyun 	 * that hevc and vdpu map the same register address region in rk3368.
2162*4882a593Smuzhiyun 	 * However, devm_ioremap_resource will call function
2163*4882a593Smuzhiyun 	 * devm_request_mem_region to check region. Thus, use function
2164*4882a593Smuzhiyun 	 * devm_ioremap can avoid it.
2165*4882a593Smuzhiyun 	 */
2166*4882a593Smuzhiyun 	mpp->reg_base = devm_ioremap(dev, res->start, resource_size(res));
2167*4882a593Smuzhiyun 	if (!mpp->reg_base) {
2168*4882a593Smuzhiyun 		dev_err(dev, "ioremap failed for resource %pR\n", res);
2169*4882a593Smuzhiyun 		ret = -ENOMEM;
2170*4882a593Smuzhiyun 		goto failed;
2171*4882a593Smuzhiyun 	}
2172*4882a593Smuzhiyun 	mpp->io_base = res->start;
2173*4882a593Smuzhiyun 
2174*4882a593Smuzhiyun 	/*
2175*4882a593Smuzhiyun 	 * TODO: here or at the device itself, some device does not
2176*4882a593Smuzhiyun 	 * have the iommu, maybe in the device is better.
2177*4882a593Smuzhiyun 	 */
2178*4882a593Smuzhiyun 	mpp->iommu_info = mpp_iommu_probe(dev);
2179*4882a593Smuzhiyun 	if (IS_ERR(mpp->iommu_info)) {
2180*4882a593Smuzhiyun 		dev_err(dev, "failed to attach iommu\n");
2181*4882a593Smuzhiyun 		mpp->iommu_info = NULL;
2182*4882a593Smuzhiyun 	}
2183*4882a593Smuzhiyun 	if (mpp->hw_ops->init) {
2184*4882a593Smuzhiyun 		ret = mpp->hw_ops->init(mpp);
2185*4882a593Smuzhiyun 		if (ret)
2186*4882a593Smuzhiyun 			goto failed;
2187*4882a593Smuzhiyun 	}
2188*4882a593Smuzhiyun 
2189*4882a593Smuzhiyun 	/* read hardware id */
2190*4882a593Smuzhiyun 	if (hw_info->reg_id >= 0) {
2191*4882a593Smuzhiyun 		pm_runtime_get_sync(dev);
2192*4882a593Smuzhiyun 		if (mpp->hw_ops->clk_on)
2193*4882a593Smuzhiyun 			mpp->hw_ops->clk_on(mpp);
2194*4882a593Smuzhiyun 
2195*4882a593Smuzhiyun 		hw_info->hw_id = mpp_read(mpp, hw_info->reg_id * sizeof(u32));
2196*4882a593Smuzhiyun 		if (mpp->hw_ops->clk_off)
2197*4882a593Smuzhiyun 			mpp->hw_ops->clk_off(mpp);
2198*4882a593Smuzhiyun 		pm_runtime_put_sync(dev);
2199*4882a593Smuzhiyun 	}
2200*4882a593Smuzhiyun 
2201*4882a593Smuzhiyun 	return ret;
2202*4882a593Smuzhiyun failed:
2203*4882a593Smuzhiyun 	mpp_detach_workqueue(mpp);
2204*4882a593Smuzhiyun 	device_init_wakeup(dev, false);
2205*4882a593Smuzhiyun 	pm_runtime_disable(dev);
2206*4882a593Smuzhiyun 
2207*4882a593Smuzhiyun 	return ret;
2208*4882a593Smuzhiyun }
2209*4882a593Smuzhiyun 
mpp_dev_remove(struct mpp_dev * mpp)2210*4882a593Smuzhiyun int mpp_dev_remove(struct mpp_dev *mpp)
2211*4882a593Smuzhiyun {
2212*4882a593Smuzhiyun 	if (mpp->hw_ops->exit)
2213*4882a593Smuzhiyun 		mpp->hw_ops->exit(mpp);
2214*4882a593Smuzhiyun 
2215*4882a593Smuzhiyun 	mpp_iommu_remove(mpp->iommu_info);
2216*4882a593Smuzhiyun 	mpp_detach_workqueue(mpp);
2217*4882a593Smuzhiyun 	device_init_wakeup(mpp->dev, false);
2218*4882a593Smuzhiyun 	pm_runtime_disable(mpp->dev);
2219*4882a593Smuzhiyun 
2220*4882a593Smuzhiyun 	return 0;
2221*4882a593Smuzhiyun }
2222*4882a593Smuzhiyun 
mpp_dev_shutdown(struct platform_device * pdev)2223*4882a593Smuzhiyun void mpp_dev_shutdown(struct platform_device *pdev)
2224*4882a593Smuzhiyun {
2225*4882a593Smuzhiyun 	int ret;
2226*4882a593Smuzhiyun 	int val;
2227*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
2228*4882a593Smuzhiyun 	struct mpp_dev *mpp = dev_get_drvdata(dev);
2229*4882a593Smuzhiyun 
2230*4882a593Smuzhiyun 	dev_info(dev, "shutdown device\n");
2231*4882a593Smuzhiyun 
2232*4882a593Smuzhiyun 	atomic_inc(&mpp->srv->shutdown_request);
2233*4882a593Smuzhiyun 	ret = readx_poll_timeout(atomic_read,
2234*4882a593Smuzhiyun 				 &mpp->task_count,
2235*4882a593Smuzhiyun 				 val, val == 0, 20000, 200000);
2236*4882a593Smuzhiyun 	if (ret == -ETIMEDOUT)
2237*4882a593Smuzhiyun 		dev_err(dev, "wait total %d running time out\n",
2238*4882a593Smuzhiyun 			atomic_read(&mpp->task_count));
2239*4882a593Smuzhiyun 	else
2240*4882a593Smuzhiyun 		dev_info(dev, "shutdown success\n");
2241*4882a593Smuzhiyun }
2242*4882a593Smuzhiyun 
mpp_dev_register_srv(struct mpp_dev * mpp,struct mpp_service * srv)2243*4882a593Smuzhiyun int mpp_dev_register_srv(struct mpp_dev *mpp, struct mpp_service *srv)
2244*4882a593Smuzhiyun {
2245*4882a593Smuzhiyun 	enum MPP_DEVICE_TYPE device_type = mpp->var->device_type;
2246*4882a593Smuzhiyun 
2247*4882a593Smuzhiyun 	srv->sub_devices[device_type] = mpp;
2248*4882a593Smuzhiyun 	set_bit(device_type, &srv->hw_support);
2249*4882a593Smuzhiyun 
2250*4882a593Smuzhiyun 	return 0;
2251*4882a593Smuzhiyun }
2252*4882a593Smuzhiyun 
mpp_dev_irq(int irq,void * param)2253*4882a593Smuzhiyun irqreturn_t mpp_dev_irq(int irq, void *param)
2254*4882a593Smuzhiyun {
2255*4882a593Smuzhiyun 	struct mpp_dev *mpp = param;
2256*4882a593Smuzhiyun 	struct mpp_task *task = mpp->cur_task;
2257*4882a593Smuzhiyun 	irqreturn_t irq_ret = IRQ_NONE;
2258*4882a593Smuzhiyun 	u32 timing_en = mpp->srv->timing_en;
2259*4882a593Smuzhiyun 
2260*4882a593Smuzhiyun 	if (task && timing_en) {
2261*4882a593Smuzhiyun 		task->on_irq = ktime_get();
2262*4882a593Smuzhiyun 		set_bit(TASK_TIMING_IRQ, &task->state);
2263*4882a593Smuzhiyun 	}
2264*4882a593Smuzhiyun 
2265*4882a593Smuzhiyun 	if (mpp->dev_ops->irq)
2266*4882a593Smuzhiyun 		irq_ret = mpp->dev_ops->irq(mpp);
2267*4882a593Smuzhiyun 
2268*4882a593Smuzhiyun 	if (task) {
2269*4882a593Smuzhiyun 		if (irq_ret == IRQ_WAKE_THREAD) {
2270*4882a593Smuzhiyun 			/* if wait or delayed work timeout, abort request will turn on,
2271*4882a593Smuzhiyun 			 * isr should not to response, and handle it in delayed work
2272*4882a593Smuzhiyun 			 */
2273*4882a593Smuzhiyun 			if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
2274*4882a593Smuzhiyun 				mpp_err("error, task has been handled, irq_status %08x\n",
2275*4882a593Smuzhiyun 					mpp->irq_status);
2276*4882a593Smuzhiyun 				irq_ret = IRQ_HANDLED;
2277*4882a593Smuzhiyun 				goto done;
2278*4882a593Smuzhiyun 			}
2279*4882a593Smuzhiyun 			if (timing_en) {
2280*4882a593Smuzhiyun 				task->on_cancel_timeout = ktime_get();
2281*4882a593Smuzhiyun 				set_bit(TASK_TIMING_TO_CANCEL, &task->state);
2282*4882a593Smuzhiyun 			}
2283*4882a593Smuzhiyun 			cancel_delayed_work(&task->timeout_work);
2284*4882a593Smuzhiyun 			/* normal condition, set state and wake up isr thread */
2285*4882a593Smuzhiyun 			set_bit(TASK_STATE_IRQ, &task->state);
2286*4882a593Smuzhiyun 		}
2287*4882a593Smuzhiyun 
2288*4882a593Smuzhiyun 		if (irq_ret == IRQ_WAKE_THREAD)
2289*4882a593Smuzhiyun 			mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
2290*4882a593Smuzhiyun 	} else {
2291*4882a593Smuzhiyun 		mpp_debug(DEBUG_IRQ_CHECK, "error, task is null\n");
2292*4882a593Smuzhiyun 	}
2293*4882a593Smuzhiyun done:
2294*4882a593Smuzhiyun 	return irq_ret;
2295*4882a593Smuzhiyun }
2296*4882a593Smuzhiyun 
mpp_dev_isr_sched(int irq,void * param)2297*4882a593Smuzhiyun irqreturn_t mpp_dev_isr_sched(int irq, void *param)
2298*4882a593Smuzhiyun {
2299*4882a593Smuzhiyun 	irqreturn_t ret = IRQ_NONE;
2300*4882a593Smuzhiyun 	struct mpp_dev *mpp = param;
2301*4882a593Smuzhiyun 	struct mpp_task *task = mpp->cur_task;
2302*4882a593Smuzhiyun 
2303*4882a593Smuzhiyun 	if (task && mpp->srv->timing_en) {
2304*4882a593Smuzhiyun 		task->on_isr = ktime_get();
2305*4882a593Smuzhiyun 		set_bit(TASK_TIMING_ISR, &task->state);
2306*4882a593Smuzhiyun 	}
2307*4882a593Smuzhiyun 
2308*4882a593Smuzhiyun 	if (mpp->auto_freq_en &&
2309*4882a593Smuzhiyun 	    mpp->hw_ops->reduce_freq &&
2310*4882a593Smuzhiyun 	    list_empty(&mpp->queue->pending_list))
2311*4882a593Smuzhiyun 		mpp->hw_ops->reduce_freq(mpp);
2312*4882a593Smuzhiyun 
2313*4882a593Smuzhiyun 	if (mpp->dev_ops->isr)
2314*4882a593Smuzhiyun 		ret = mpp->dev_ops->isr(mpp);
2315*4882a593Smuzhiyun 
2316*4882a593Smuzhiyun 	/* trigger current queue to run next task */
2317*4882a593Smuzhiyun 	mpp_taskqueue_trigger_work(mpp);
2318*4882a593Smuzhiyun 
2319*4882a593Smuzhiyun 	return ret;
2320*4882a593Smuzhiyun }
2321*4882a593Smuzhiyun 
mpp_get_grf(struct mpp_grf_info * grf_info)2322*4882a593Smuzhiyun u32 mpp_get_grf(struct mpp_grf_info *grf_info)
2323*4882a593Smuzhiyun {
2324*4882a593Smuzhiyun 	u32 val = 0;
2325*4882a593Smuzhiyun 
2326*4882a593Smuzhiyun 	if (grf_info && grf_info->grf && grf_info->val)
2327*4882a593Smuzhiyun 		regmap_read(grf_info->grf, grf_info->offset, &val);
2328*4882a593Smuzhiyun 
2329*4882a593Smuzhiyun 	return (val & MPP_GRF_VAL_MASK);
2330*4882a593Smuzhiyun }
2331*4882a593Smuzhiyun 
mpp_grf_is_changed(struct mpp_grf_info * grf_info)2332*4882a593Smuzhiyun bool mpp_grf_is_changed(struct mpp_grf_info *grf_info)
2333*4882a593Smuzhiyun {
2334*4882a593Smuzhiyun 	bool changed = false;
2335*4882a593Smuzhiyun 
2336*4882a593Smuzhiyun 	if (grf_info && grf_info->grf && grf_info->val) {
2337*4882a593Smuzhiyun 		u32 grf_status = mpp_get_grf(grf_info);
2338*4882a593Smuzhiyun 		u32 grf_val = grf_info->val & MPP_GRF_VAL_MASK;
2339*4882a593Smuzhiyun 
2340*4882a593Smuzhiyun 		changed = (grf_status == grf_val) ? false : true;
2341*4882a593Smuzhiyun 	}
2342*4882a593Smuzhiyun 
2343*4882a593Smuzhiyun 	return changed;
2344*4882a593Smuzhiyun }
2345*4882a593Smuzhiyun 
mpp_set_grf(struct mpp_grf_info * grf_info)2346*4882a593Smuzhiyun int mpp_set_grf(struct mpp_grf_info *grf_info)
2347*4882a593Smuzhiyun {
2348*4882a593Smuzhiyun 	if (grf_info && grf_info->grf && grf_info->val)
2349*4882a593Smuzhiyun 		regmap_write(grf_info->grf, grf_info->offset, grf_info->val);
2350*4882a593Smuzhiyun 
2351*4882a593Smuzhiyun 	return 0;
2352*4882a593Smuzhiyun }
2353*4882a593Smuzhiyun 
mpp_time_record(struct mpp_task * task)2354*4882a593Smuzhiyun int mpp_time_record(struct mpp_task *task)
2355*4882a593Smuzhiyun {
2356*4882a593Smuzhiyun 	if (mpp_debug_unlikely(DEBUG_TIMING) && task) {
2357*4882a593Smuzhiyun 		task->start = ktime_get();
2358*4882a593Smuzhiyun 		task->part = task->start;
2359*4882a593Smuzhiyun 	}
2360*4882a593Smuzhiyun 
2361*4882a593Smuzhiyun 	return 0;
2362*4882a593Smuzhiyun }
2363*4882a593Smuzhiyun 
mpp_time_part_diff(struct mpp_task * task)2364*4882a593Smuzhiyun int mpp_time_part_diff(struct mpp_task *task)
2365*4882a593Smuzhiyun {
2366*4882a593Smuzhiyun 	if (mpp_debug_unlikely(DEBUG_TIMING)) {
2367*4882a593Smuzhiyun 		ktime_t end;
2368*4882a593Smuzhiyun 		struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session);
2369*4882a593Smuzhiyun 
2370*4882a593Smuzhiyun 		end = ktime_get();
2371*4882a593Smuzhiyun 		mpp_debug(DEBUG_PART_TIMING, "%s:%d session %d:%d part time: %lld us\n",
2372*4882a593Smuzhiyun 			dev_name(mpp->dev), task->core_id, task->session->pid,
2373*4882a593Smuzhiyun 			task->session->index, ktime_us_delta(end, task->part));
2374*4882a593Smuzhiyun 		task->part = end;
2375*4882a593Smuzhiyun 	}
2376*4882a593Smuzhiyun 
2377*4882a593Smuzhiyun 	return 0;
2378*4882a593Smuzhiyun }
2379*4882a593Smuzhiyun 
mpp_time_diff(struct mpp_task * task)2380*4882a593Smuzhiyun int mpp_time_diff(struct mpp_task *task)
2381*4882a593Smuzhiyun {
2382*4882a593Smuzhiyun 	if (mpp_debug_unlikely(DEBUG_TIMING)) {
2383*4882a593Smuzhiyun 		ktime_t end;
2384*4882a593Smuzhiyun 		struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session);
2385*4882a593Smuzhiyun 
2386*4882a593Smuzhiyun 		end = ktime_get();
2387*4882a593Smuzhiyun 		mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us\n",
2388*4882a593Smuzhiyun 			dev_name(mpp->dev), task->core_id, task->session->pid,
2389*4882a593Smuzhiyun 			task->session->index, ktime_us_delta(end, task->start));
2390*4882a593Smuzhiyun 	}
2391*4882a593Smuzhiyun 
2392*4882a593Smuzhiyun 	return 0;
2393*4882a593Smuzhiyun }
2394*4882a593Smuzhiyun 
mpp_time_diff_with_hw_time(struct mpp_task * task,u32 clk_hz)2395*4882a593Smuzhiyun int mpp_time_diff_with_hw_time(struct mpp_task *task, u32 clk_hz)
2396*4882a593Smuzhiyun {
2397*4882a593Smuzhiyun 	if (mpp_debug_unlikely(DEBUG_TIMING)) {
2398*4882a593Smuzhiyun 		ktime_t end;
2399*4882a593Smuzhiyun 		struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session);
2400*4882a593Smuzhiyun 
2401*4882a593Smuzhiyun 		end = ktime_get();
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun 		if (clk_hz)
2404*4882a593Smuzhiyun 			mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us hw %d us\n",
2405*4882a593Smuzhiyun 				dev_name(mpp->dev), task->core_id, task->session->pid,
2406*4882a593Smuzhiyun 				task->session->index, ktime_us_delta(end, task->start),
2407*4882a593Smuzhiyun 				task->hw_cycles / (clk_hz / 1000000));
2408*4882a593Smuzhiyun 		else
2409*4882a593Smuzhiyun 			mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us\n",
2410*4882a593Smuzhiyun 				dev_name(mpp->dev), task->core_id, task->session->pid,
2411*4882a593Smuzhiyun 				task->session->index, ktime_us_delta(end, task->start));
2412*4882a593Smuzhiyun 	}
2413*4882a593Smuzhiyun 
2414*4882a593Smuzhiyun 	return 0;
2415*4882a593Smuzhiyun }
2416*4882a593Smuzhiyun 
2417*4882a593Smuzhiyun #define LOG_TIMING(state, id, stage, time, base) \
2418*4882a593Smuzhiyun 	do { \
2419*4882a593Smuzhiyun 		if (test_bit(id, &state)) \
2420*4882a593Smuzhiyun 			pr_info("timing: %-14s : %lld us\n", stage, ktime_us_delta(time, base)); \
2421*4882a593Smuzhiyun 		else \
2422*4882a593Smuzhiyun 			pr_info("timing: %-14s : invalid\n", stage); \
2423*4882a593Smuzhiyun 	} while (0)
2424*4882a593Smuzhiyun 
mpp_task_dump_timing(struct mpp_task * task,s64 time_diff)2425*4882a593Smuzhiyun void mpp_task_dump_timing(struct mpp_task *task, s64 time_diff)
2426*4882a593Smuzhiyun {
2427*4882a593Smuzhiyun 	ktime_t s = task->on_create;
2428*4882a593Smuzhiyun 	unsigned long state = task->state;
2429*4882a593Smuzhiyun 
2430*4882a593Smuzhiyun 	pr_info("task %d dump timing at %lld us:", task->task_id, time_diff);
2431*4882a593Smuzhiyun 
2432*4882a593Smuzhiyun 	pr_info("timing: %-14s : %lld us\n", "create", ktime_to_us(s));
2433*4882a593Smuzhiyun 	LOG_TIMING(state, TASK_TIMING_CREATE_END, "create end",     task->on_create_end, s);
2434*4882a593Smuzhiyun 	LOG_TIMING(state, TASK_TIMING_PENDING,    "pending",        task->on_pending, s);
2435*4882a593Smuzhiyun 	LOG_TIMING(state, TASK_TIMING_RUN,        "run",            task->on_run, s);
2436*4882a593Smuzhiyun 	LOG_TIMING(state, TASK_TIMING_TO_SCHED,   "timeout start",  task->on_sched_timeout, s);
2437*4882a593Smuzhiyun 	LOG_TIMING(state, TASK_TIMING_RUN_END,    "run end",        task->on_run_end, s);
2438*4882a593Smuzhiyun 	LOG_TIMING(state, TASK_TIMING_IRQ,        "irq",            task->on_irq, s);
2439*4882a593Smuzhiyun 	LOG_TIMING(state, TASK_TIMING_TO_CANCEL,  "timeout cancel", task->on_cancel_timeout, s);
2440*4882a593Smuzhiyun 	LOG_TIMING(state, TASK_TIMING_ISR,        "isr",            task->on_isr, s);
2441*4882a593Smuzhiyun 	LOG_TIMING(state, TASK_TIMING_FINISH,     "finish",         task->on_finish, s);
2442*4882a593Smuzhiyun }
2443*4882a593Smuzhiyun 
mpp_write_req(struct mpp_dev * mpp,u32 * regs,u32 start_idx,u32 end_idx,u32 en_idx)2444*4882a593Smuzhiyun int mpp_write_req(struct mpp_dev *mpp, u32 *regs,
2445*4882a593Smuzhiyun 		  u32 start_idx, u32 end_idx, u32 en_idx)
2446*4882a593Smuzhiyun {
2447*4882a593Smuzhiyun 	int i;
2448*4882a593Smuzhiyun 
2449*4882a593Smuzhiyun 	for (i = start_idx; i < end_idx; i++) {
2450*4882a593Smuzhiyun 		if (i == en_idx)
2451*4882a593Smuzhiyun 			continue;
2452*4882a593Smuzhiyun 		mpp_write_relaxed(mpp, i * sizeof(u32), regs[i]);
2453*4882a593Smuzhiyun 	}
2454*4882a593Smuzhiyun 
2455*4882a593Smuzhiyun 	return 0;
2456*4882a593Smuzhiyun }
2457*4882a593Smuzhiyun 
mpp_read_req(struct mpp_dev * mpp,u32 * regs,u32 start_idx,u32 end_idx)2458*4882a593Smuzhiyun int mpp_read_req(struct mpp_dev *mpp, u32 *regs,
2459*4882a593Smuzhiyun 		 u32 start_idx, u32 end_idx)
2460*4882a593Smuzhiyun {
2461*4882a593Smuzhiyun 	int i;
2462*4882a593Smuzhiyun 
2463*4882a593Smuzhiyun 	for (i = start_idx; i < end_idx; i++)
2464*4882a593Smuzhiyun 		regs[i] = mpp_read_relaxed(mpp, i * sizeof(u32));
2465*4882a593Smuzhiyun 
2466*4882a593Smuzhiyun 	return 0;
2467*4882a593Smuzhiyun }
2468*4882a593Smuzhiyun 
mpp_get_clk_info(struct mpp_dev * mpp,struct mpp_clk_info * clk_info,const char * name)2469*4882a593Smuzhiyun int mpp_get_clk_info(struct mpp_dev *mpp,
2470*4882a593Smuzhiyun 		     struct mpp_clk_info *clk_info,
2471*4882a593Smuzhiyun 		     const char *name)
2472*4882a593Smuzhiyun {
2473*4882a593Smuzhiyun 	int index = of_property_match_string(mpp->dev->of_node,
2474*4882a593Smuzhiyun 					     "clock-names", name);
2475*4882a593Smuzhiyun 
2476*4882a593Smuzhiyun 	if (index < 0)
2477*4882a593Smuzhiyun 		return -EINVAL;
2478*4882a593Smuzhiyun 
2479*4882a593Smuzhiyun 	clk_info->clk = devm_clk_get(mpp->dev, name);
2480*4882a593Smuzhiyun 	of_property_read_u32_index(mpp->dev->of_node,
2481*4882a593Smuzhiyun 				   "rockchip,normal-rates",
2482*4882a593Smuzhiyun 				   index,
2483*4882a593Smuzhiyun 				   &clk_info->normal_rate_hz);
2484*4882a593Smuzhiyun 	of_property_read_u32_index(mpp->dev->of_node,
2485*4882a593Smuzhiyun 				   "rockchip,advanced-rates",
2486*4882a593Smuzhiyun 				   index,
2487*4882a593Smuzhiyun 				   &clk_info->advanced_rate_hz);
2488*4882a593Smuzhiyun 
2489*4882a593Smuzhiyun 	return 0;
2490*4882a593Smuzhiyun }
2491*4882a593Smuzhiyun 
mpp_set_clk_info_rate_hz(struct mpp_clk_info * clk_info,enum MPP_CLOCK_MODE mode,unsigned long val)2492*4882a593Smuzhiyun int mpp_set_clk_info_rate_hz(struct mpp_clk_info *clk_info,
2493*4882a593Smuzhiyun 			     enum MPP_CLOCK_MODE mode,
2494*4882a593Smuzhiyun 			     unsigned long val)
2495*4882a593Smuzhiyun {
2496*4882a593Smuzhiyun 	if (!clk_info->clk || !val)
2497*4882a593Smuzhiyun 		return 0;
2498*4882a593Smuzhiyun 
2499*4882a593Smuzhiyun 	switch (mode) {
2500*4882a593Smuzhiyun 	case CLK_MODE_DEBUG:
2501*4882a593Smuzhiyun 		clk_info->debug_rate_hz = val;
2502*4882a593Smuzhiyun 	break;
2503*4882a593Smuzhiyun 	case CLK_MODE_REDUCE:
2504*4882a593Smuzhiyun 		clk_info->reduce_rate_hz = val;
2505*4882a593Smuzhiyun 	break;
2506*4882a593Smuzhiyun 	case CLK_MODE_NORMAL:
2507*4882a593Smuzhiyun 		clk_info->normal_rate_hz = val;
2508*4882a593Smuzhiyun 	break;
2509*4882a593Smuzhiyun 	case CLK_MODE_ADVANCED:
2510*4882a593Smuzhiyun 		clk_info->advanced_rate_hz = val;
2511*4882a593Smuzhiyun 	break;
2512*4882a593Smuzhiyun 	case CLK_MODE_DEFAULT:
2513*4882a593Smuzhiyun 		clk_info->default_rate_hz = val;
2514*4882a593Smuzhiyun 	break;
2515*4882a593Smuzhiyun 	default:
2516*4882a593Smuzhiyun 		mpp_err("error mode %d\n", mode);
2517*4882a593Smuzhiyun 	break;
2518*4882a593Smuzhiyun 	}
2519*4882a593Smuzhiyun 
2520*4882a593Smuzhiyun 	return 0;
2521*4882a593Smuzhiyun }
2522*4882a593Smuzhiyun 
2523*4882a593Smuzhiyun #define MPP_REDUCE_RATE_HZ (50 * MHZ)
2524*4882a593Smuzhiyun 
mpp_get_clk_info_rate_hz(struct mpp_clk_info * clk_info,enum MPP_CLOCK_MODE mode)2525*4882a593Smuzhiyun unsigned long mpp_get_clk_info_rate_hz(struct mpp_clk_info *clk_info,
2526*4882a593Smuzhiyun 				       enum MPP_CLOCK_MODE mode)
2527*4882a593Smuzhiyun {
2528*4882a593Smuzhiyun 	unsigned long clk_rate_hz = 0;
2529*4882a593Smuzhiyun 
2530*4882a593Smuzhiyun 	if (!clk_info->clk)
2531*4882a593Smuzhiyun 		return 0;
2532*4882a593Smuzhiyun 
2533*4882a593Smuzhiyun 	if (clk_info->debug_rate_hz)
2534*4882a593Smuzhiyun 		return clk_info->debug_rate_hz;
2535*4882a593Smuzhiyun 
2536*4882a593Smuzhiyun 	switch (mode) {
2537*4882a593Smuzhiyun 	case CLK_MODE_REDUCE: {
2538*4882a593Smuzhiyun 		if (clk_info->reduce_rate_hz)
2539*4882a593Smuzhiyun 			clk_rate_hz = clk_info->reduce_rate_hz;
2540*4882a593Smuzhiyun 		else
2541*4882a593Smuzhiyun 			clk_rate_hz = MPP_REDUCE_RATE_HZ;
2542*4882a593Smuzhiyun 	} break;
2543*4882a593Smuzhiyun 	case CLK_MODE_NORMAL: {
2544*4882a593Smuzhiyun 		if (clk_info->normal_rate_hz)
2545*4882a593Smuzhiyun 			clk_rate_hz = clk_info->normal_rate_hz;
2546*4882a593Smuzhiyun 		else
2547*4882a593Smuzhiyun 			clk_rate_hz = clk_info->default_rate_hz;
2548*4882a593Smuzhiyun 	} break;
2549*4882a593Smuzhiyun 	case CLK_MODE_ADVANCED: {
2550*4882a593Smuzhiyun 		if (clk_info->advanced_rate_hz)
2551*4882a593Smuzhiyun 			clk_rate_hz = clk_info->advanced_rate_hz;
2552*4882a593Smuzhiyun 		else if (clk_info->normal_rate_hz)
2553*4882a593Smuzhiyun 			clk_rate_hz = clk_info->normal_rate_hz;
2554*4882a593Smuzhiyun 		else
2555*4882a593Smuzhiyun 			clk_rate_hz = clk_info->default_rate_hz;
2556*4882a593Smuzhiyun 	} break;
2557*4882a593Smuzhiyun 	case CLK_MODE_DEFAULT:
2558*4882a593Smuzhiyun 	default: {
2559*4882a593Smuzhiyun 		clk_rate_hz = clk_info->default_rate_hz;
2560*4882a593Smuzhiyun 	} break;
2561*4882a593Smuzhiyun 	}
2562*4882a593Smuzhiyun 
2563*4882a593Smuzhiyun 	return clk_rate_hz;
2564*4882a593Smuzhiyun }
2565*4882a593Smuzhiyun 
mpp_clk_set_rate(struct mpp_clk_info * clk_info,enum MPP_CLOCK_MODE mode)2566*4882a593Smuzhiyun int mpp_clk_set_rate(struct mpp_clk_info *clk_info,
2567*4882a593Smuzhiyun 		     enum MPP_CLOCK_MODE mode)
2568*4882a593Smuzhiyun {
2569*4882a593Smuzhiyun 	unsigned long clk_rate_hz;
2570*4882a593Smuzhiyun 
2571*4882a593Smuzhiyun 	if (!clk_info->clk)
2572*4882a593Smuzhiyun 		return -EINVAL;
2573*4882a593Smuzhiyun 
2574*4882a593Smuzhiyun 	clk_rate_hz = mpp_get_clk_info_rate_hz(clk_info, mode);
2575*4882a593Smuzhiyun 	if (clk_rate_hz) {
2576*4882a593Smuzhiyun 		clk_info->used_rate_hz = clk_rate_hz;
2577*4882a593Smuzhiyun 		clk_set_rate(clk_info->clk, clk_rate_hz);
2578*4882a593Smuzhiyun 		clk_info->real_rate_hz = clk_get_rate(clk_info->clk);
2579*4882a593Smuzhiyun 	}
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun 	return 0;
2582*4882a593Smuzhiyun }
2583*4882a593Smuzhiyun 
2584*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
fops_show_u32(struct seq_file * file,void * v)2585*4882a593Smuzhiyun static int fops_show_u32(struct seq_file *file, void *v)
2586*4882a593Smuzhiyun {
2587*4882a593Smuzhiyun 	u32 *val = file->private;
2588*4882a593Smuzhiyun 
2589*4882a593Smuzhiyun 	seq_printf(file, "%d\n", *val);
2590*4882a593Smuzhiyun 
2591*4882a593Smuzhiyun 	return 0;
2592*4882a593Smuzhiyun }
2593*4882a593Smuzhiyun 
fops_open_u32(struct inode * inode,struct file * file)2594*4882a593Smuzhiyun static int fops_open_u32(struct inode *inode, struct file *file)
2595*4882a593Smuzhiyun {
2596*4882a593Smuzhiyun 	return single_open(file, fops_show_u32, PDE_DATA(inode));
2597*4882a593Smuzhiyun }
2598*4882a593Smuzhiyun 
fops_write_u32(struct file * file,const char __user * buf,size_t count,loff_t * ppos)2599*4882a593Smuzhiyun static ssize_t fops_write_u32(struct file *file, const char __user *buf,
2600*4882a593Smuzhiyun 			      size_t count, loff_t *ppos)
2601*4882a593Smuzhiyun {
2602*4882a593Smuzhiyun 	int rc;
2603*4882a593Smuzhiyun 	struct seq_file *priv = file->private_data;
2604*4882a593Smuzhiyun 
2605*4882a593Smuzhiyun 	rc = kstrtou32_from_user(buf, count, 0, priv->private);
2606*4882a593Smuzhiyun 	if (rc)
2607*4882a593Smuzhiyun 		return rc;
2608*4882a593Smuzhiyun 
2609*4882a593Smuzhiyun 	return count;
2610*4882a593Smuzhiyun }
2611*4882a593Smuzhiyun 
2612*4882a593Smuzhiyun static const struct proc_ops procfs_fops_u32 = {
2613*4882a593Smuzhiyun 	.proc_open = fops_open_u32,
2614*4882a593Smuzhiyun 	.proc_read = seq_read,
2615*4882a593Smuzhiyun 	.proc_release = single_release,
2616*4882a593Smuzhiyun 	.proc_write = fops_write_u32,
2617*4882a593Smuzhiyun };
2618*4882a593Smuzhiyun 
2619*4882a593Smuzhiyun struct proc_dir_entry *
mpp_procfs_create_u32(const char * name,umode_t mode,struct proc_dir_entry * parent,void * data)2620*4882a593Smuzhiyun mpp_procfs_create_u32(const char *name, umode_t mode,
2621*4882a593Smuzhiyun 		      struct proc_dir_entry *parent, void *data)
2622*4882a593Smuzhiyun {
2623*4882a593Smuzhiyun 	return proc_create_data(name, mode, parent, &procfs_fops_u32, data);
2624*4882a593Smuzhiyun }
2625*4882a593Smuzhiyun 
mpp_procfs_create_common(struct proc_dir_entry * parent,struct mpp_dev * mpp)2626*4882a593Smuzhiyun void mpp_procfs_create_common(struct proc_dir_entry *parent, struct mpp_dev *mpp)
2627*4882a593Smuzhiyun {
2628*4882a593Smuzhiyun 	mpp_procfs_create_u32("disable_work", 0644, parent, &mpp->disable);
2629*4882a593Smuzhiyun 	mpp_procfs_create_u32("timing_check", 0644, parent, &mpp->timing_check);
2630*4882a593Smuzhiyun }
2631*4882a593Smuzhiyun #endif
2632