xref: /OK3568_Linux_fs/kernel/drivers/video/rockchip/mpp/mpp_av1dec.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2021 Fuzhou Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *	Ding Wei, leo.ding@rock-chips.com
7  *
8  */
9 
10 #define pr_fmt(fmt) "mpp_av1dec: " fmt
11 
12 #include <asm/cacheflush.h>
13 #include <linux/clk.h>
14 #include <linux/clk/clk-conf.h>
15 #include <linux/delay.h>
16 #include <linux/iopoll.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/of_platform.h>
21 #include <linux/clk/clk-conf.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/pm_domain.h>
24 #include <linux/slab.h>
25 #include <linux/uaccess.h>
26 #include <linux/regmap.h>
27 #include <linux/proc_fs.h>
28 #include <soc/rockchip/pm_domains.h>
29 
30 #include "mpp_debug.h"
31 #include "mpp_common.h"
32 #include "mpp_iommu.h"
33 
34 #define AV1DEC_DRIVER_NAME		"mpp_av1dec"
35 
36 #define	AV1DEC_SESSION_MAX_BUFFERS		40
37 
38 /* REG_DEC_INT, bits for interrupt */
39 #define	AV1DEC_INT_PIC_INF		BIT(24)
40 #define	AV1DEC_INT_TIMEOUT		BIT(18)
41 #define	AV1DEC_INT_SLICE		BIT(17)
42 #define	AV1DEC_INT_STRM_ERROR		BIT(16)
43 #define	AV1DEC_INT_ASO_ERROR		BIT(15)
44 #define	AV1DEC_INT_BUF_EMPTY		BIT(14)
45 #define	AV1DEC_INT_BUS_ERROR		BIT(13)
46 #define	AV1DEC_DEC_INT			BIT(12)
47 #define	AV1DEC_DEC_INT_RAW		BIT(8)
48 #define	AV1DEC_DEC_IRQ_DIS		BIT(4)
49 #define	AV1DEC_DEC_START		BIT(0)
50 
51 #define MPP_ALIGN(x, a)         (((x)+(a)-1)&~((a)-1))
52 /* REG_DEC_EN, bit for gate */
53 #define	AV1DEC_CLOCK_GATE_EN		BIT(10)
54 
55 #define to_av1dec_info(info)		\
56 		container_of(info, struct av1dec_hw_info, hw)
57 #define to_av1dec_task(ctx)		\
58 		container_of(ctx, struct av1dec_task, mpp_task)
59 #define to_av1dec_dev(dev)		\
60 		container_of(dev, struct av1dec_dev, mpp)
61 
62 /* define functions */
63 #define MPP_GET_BITS(v, p, b)	(((v) >> (p)) & ((1 << (b)) - 1))
64 #define MPP_BASE_TO_IDX(a)	((a) / sizeof(u32))
65 
66 enum AV1DEC_CLASS_TYPE {
67 	AV1DEC_CLASS_VCD	= 0,
68 	AV1DEC_CLASS_CACHE	= 1,
69 	AV1DEC_CLASS_AFBC	= 2,
70 	AV1DEC_CLASS_BUTT,
71 };
72 
73 enum av1dec_trans_type {
74 	AV1DEC_TRANS_BASE	= 0x0000,
75 
76 	AV1DEC_TRANS_VCD	= AV1DEC_TRANS_BASE + 0,
77 	AV1DEC_TRANS_CACHE	= AV1DEC_TRANS_BASE + 1,
78 	AV1DEC_TRANS_AFBC	= AV1DEC_TRANS_BASE + 2,
79 	AV1DEC_TRANS_BUTT,
80 };
81 
82 struct av1dec_hw_info {
83 	struct mpp_hw_info hw;
84 	/* register range by class */
85 	u32 reg_class_num;
86 	struct {
87 		u32 base_s;
88 		u32 base_e;
89 	} reg_class[AV1DEC_CLASS_BUTT];
90 	/* fd translate for class */
91 	u32 trans_class_num;
92 	struct {
93 		u32 class;
94 		u32 trans_fmt;
95 	} trans_class[AV1DEC_TRANS_BUTT];
96 
97 	/* interrupt config register */
98 	int int_base;
99 	/* enable hardware register */
100 	int en_base;
101 	/* status register */
102 	int sta_base;
103 	/* clear irq register */
104 	int clr_base;
105 	/* stream register */
106 	int strm_base;
107 
108 	u32 err_mask;
109 };
110 
111 struct av1dec_task {
112 	struct mpp_task mpp_task;
113 
114 	struct av1dec_hw_info *hw_info;
115 	/* for malloc register data buffer */
116 	u32 *reg_data;
117 	/* class register */
118 	struct {
119 		u32 valid;
120 		u32 base;
121 		u32 *data;
122 		/* offset base reg_data */
123 		u32 off;
124 		/* length for class */
125 		u32 len;
126 	} reg_class[AV1DEC_CLASS_BUTT];
127 	/* register offset info */
128 	struct reg_offset_info off_inf;
129 
130 	enum MPP_CLOCK_MODE clk_mode;
131 	u32 irq_status;
132 	/* req for current task */
133 	u32 w_req_cnt;
134 	struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
135 	u32 r_req_cnt;
136 	struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
137 };
138 
139 struct av1dec_dev {
140 	struct mpp_dev mpp;
141 	struct av1dec_hw_info *hw_info;
142 
143 	struct mpp_clk_info aclk_info;
144 	struct mpp_clk_info hclk_info;
145 	u32 default_max_load;
146 #ifdef CONFIG_PROC_FS
147 	struct proc_dir_entry *procfs;
148 #endif
149 	struct reset_control *rst_a;
150 	struct reset_control *rst_h;
151 
152 	void __iomem *reg_base[AV1DEC_CLASS_BUTT];
153 	int irq[AV1DEC_CLASS_BUTT];
154 };
155 
156 static struct av1dec_hw_info av1dec_hw_info = {
157 	.hw = {
158 		.reg_num = 512,
159 		.reg_id = 0,
160 		.reg_en = 1,
161 		.reg_start = 1,
162 		.reg_end = 319,
163 	},
164 	.reg_class_num = 3,
165 	.reg_class[AV1DEC_CLASS_VCD] = {
166 		.base_s = 0x0000,
167 		.base_e = 0x07fc,
168 	},
169 	.reg_class[AV1DEC_CLASS_CACHE] = {
170 		.base_s = 0x10000,
171 		.base_e = 0x10294,
172 	},
173 	.reg_class[AV1DEC_CLASS_AFBC] = {
174 		.base_s = 0x20000,
175 		.base_e = 0x2034c,
176 	},
177 	.trans_class_num = AV1DEC_TRANS_BUTT,
178 	.trans_class[AV1DEC_CLASS_VCD] = {
179 		.class = AV1DEC_CLASS_VCD,
180 		.trans_fmt = AV1DEC_TRANS_VCD,
181 	},
182 	.trans_class[AV1DEC_CLASS_CACHE] = {
183 		.class = AV1DEC_CLASS_CACHE,
184 		.trans_fmt = AV1DEC_TRANS_CACHE,
185 	},
186 	.trans_class[AV1DEC_CLASS_AFBC] = {
187 		.class = AV1DEC_CLASS_AFBC,
188 		.trans_fmt = AV1DEC_TRANS_AFBC,
189 	},
190 	.int_base = 0x0004,
191 	.en_base = 0x0004,
192 	.sta_base = 0x0004,
193 	.clr_base = 0x0004,
194 	.strm_base = 0x02a4,
195 	.err_mask = 0x7e000,
196 };
197 
198 /*
199  * file handle translate information for v2
200  */
201 static const u16 trans_tbl_av1_vcd[] = {
202 	65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91,
203 	93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115,
204 	117, 133, 135, 137, 139, 141, 143, 145, 147,
205 	167, 169, 171, 173, 175, 177, 179, 183, 190, 192, 194,
206 	196, 198, 200, 202, 204, 224, 226, 228, 230, 232, 234,
207 	236, 238, 326, 328, 339, 341, 348, 350, 505, 507
208 };
209 
210 static const u16 trans_tbl_av1_cache[] = {
211 	13, 18, 23, 28, 33, 38, 43, 48, 53, 58, 63, 68, 73, 78, 83, 88,
212 	134, 135, 138, 139, 142, 143, 146, 147,
213 };
214 
215 static const u16 trans_tbl_av1_afbc[] = {
216 	32, 33, 34, 35, 48, 49, 50, 51, 96, 97, 98, 99
217 };
218 
219 static struct mpp_trans_info trans_av1dec[] = {
220 	[AV1DEC_TRANS_VCD] = {
221 		.count = ARRAY_SIZE(trans_tbl_av1_vcd),
222 		.table = trans_tbl_av1_vcd,
223 	},
224 	[AV1DEC_TRANS_CACHE] = {
225 		.count = ARRAY_SIZE(trans_tbl_av1_cache),
226 		.table = trans_tbl_av1_cache,
227 	},
228 	[AV1DEC_TRANS_AFBC] = {
229 		.count = ARRAY_SIZE(trans_tbl_av1_afbc),
230 		.table = trans_tbl_av1_afbc,
231 	},
232 };
233 
req_over_class(struct mpp_request * req,struct av1dec_task * task,int class)234 static bool req_over_class(struct mpp_request *req,
235 			   struct av1dec_task *task, int class)
236 {
237 	bool ret;
238 	u32 base_s, base_e, req_e;
239 	struct av1dec_hw_info *hw = task->hw_info;
240 
241 	if (class > hw->reg_class_num)
242 		return false;
243 
244 	base_s = hw->reg_class[class].base_s;
245 	base_e = hw->reg_class[class].base_e;
246 	req_e = req->offset + req->size - sizeof(u32);
247 
248 	ret = (req->offset <= base_e && req_e >= base_s) ? true : false;
249 
250 	return ret;
251 }
252 
av1dec_alloc_reg_class(struct av1dec_task * task)253 static int av1dec_alloc_reg_class(struct av1dec_task *task)
254 {
255 	int i;
256 	u32 data_size;
257 	struct av1dec_hw_info *hw = task->hw_info;
258 
259 	data_size = 0;
260 	for (i = 0; i < hw->reg_class_num; i++) {
261 		u32 base_s = hw->reg_class[i].base_s;
262 		u32 base_e = hw->reg_class[i].base_e;
263 
264 		task->reg_class[i].base = base_s;
265 		task->reg_class[i].off = data_size;
266 		task->reg_class[i].len = base_e - base_s + sizeof(u32);
267 		data_size += task->reg_class[i].len;
268 	}
269 
270 	task->reg_data = kzalloc(data_size, GFP_KERNEL);
271 	if (!task->reg_data)
272 		return -ENOMEM;
273 
274 	for (i = 0; i < hw->reg_class_num; i++)
275 		task->reg_class[i].data = task->reg_data + (task->reg_class[i].off / sizeof(u32));
276 
277 	return 0;
278 }
279 
av1dec_update_req(struct av1dec_task * task,int class,struct mpp_request * req_in,struct mpp_request * req_out)280 static int av1dec_update_req(struct av1dec_task *task, int class,
281 			     struct mpp_request *req_in,
282 			     struct mpp_request *req_out)
283 {
284 	u32 base_s, base_e, req_e, s, e;
285 	struct av1dec_hw_info *hw = task->hw_info;
286 
287 	if (class > hw->reg_class_num)
288 		return -EINVAL;
289 
290 	base_s = hw->reg_class[class].base_s;
291 	base_e = hw->reg_class[class].base_e;
292 	req_e = req_in->offset + req_in->size - sizeof(u32);
293 	s = max(req_in->offset, base_s);
294 	e = min(req_e, base_e);
295 
296 	req_out->offset = s;
297 	req_out->size = e - s + sizeof(u32);
298 	req_out->data = (u8 *)req_in->data + (s - req_in->offset);
299 	mpp_debug(DEBUG_TASK_INFO, "req_out->offset=%08x, req_out->size=%d\n",
300 		  req_out->offset, req_out->size);
301 
302 	return 0;
303 }
304 
av1dec_extract_task_msg(struct av1dec_task * task,struct mpp_task_msgs * msgs)305 static int av1dec_extract_task_msg(struct av1dec_task *task,
306 				   struct mpp_task_msgs *msgs)
307 {
308 	int ret;
309 	u32 i;
310 	struct mpp_request *req;
311 	struct av1dec_hw_info *hw = task->hw_info;
312 
313 	mpp_debug_enter();
314 
315 	mpp_debug(DEBUG_TASK_INFO, "req_cnt=%d, set_cnt=%d, poll_cnt=%d, reg_class=%d\n",
316 		msgs->req_cnt, msgs->set_cnt, msgs->poll_cnt, hw->reg_class_num);
317 
318 	for (i = 0; i < msgs->req_cnt; i++) {
319 		req = &msgs->reqs[i];
320 		mpp_debug(DEBUG_TASK_INFO, "msg: cmd %08x, offset %08x, size %d\n",
321 			req->cmd, req->offset, req->size);
322 		if (!req->size)
323 			continue;
324 
325 		switch (req->cmd) {
326 		case MPP_CMD_SET_REG_WRITE: {
327 			u32 class;
328 			u32 base, *regs;
329 			struct mpp_request *wreq;
330 
331 			for (class = 0; class < hw->reg_class_num; class++) {
332 				if (!req_over_class(req, task, class))
333 					continue;
334 				mpp_debug(DEBUG_TASK_INFO, "found write_calss %d\n", class);
335 				wreq = &task->w_reqs[task->w_req_cnt];
336 				av1dec_update_req(task, class, req, wreq);
337 
338 				base = task->reg_class[class].base;
339 				regs = (u32 *)task->reg_class[class].data;
340 				regs += MPP_BASE_TO_IDX(req->offset - base);
341 				if (copy_from_user(regs, wreq->data, wreq->size)) {
342 					mpp_err("copy_from_user fail, offset %08x\n", wreq->offset);
343 					ret = -EIO;
344 					goto fail;
345 				}
346 				task->w_req_cnt++;
347 			}
348 		} break;
349 		case MPP_CMD_SET_REG_READ: {
350 			u32 class;
351 			struct mpp_request *rreq;
352 
353 			for (class = 0; class < hw->reg_class_num; class++) {
354 				if (!req_over_class(req, task, class))
355 					continue;
356 				mpp_debug(DEBUG_TASK_INFO, "found read_calss %d\n", class);
357 				rreq = &task->r_reqs[task->r_req_cnt];
358 				av1dec_update_req(task, class, req, rreq);
359 				task->r_req_cnt++;
360 			}
361 		} break;
362 		case MPP_CMD_SET_REG_ADDR_OFFSET: {
363 			mpp_extract_reg_offset_info(&task->off_inf, req);
364 		} break;
365 		default:
366 			break;
367 		}
368 	}
369 	mpp_debug(DEBUG_TASK_INFO, "w_req_cnt=%d, r_req_cnt=%d\n",
370 		  task->w_req_cnt, task->r_req_cnt);
371 
372 	mpp_debug_leave();
373 	return 0;
374 
375 fail:
376 	mpp_debug_leave();
377 	return ret;
378 }
379 
av1dec_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)380 static void *av1dec_alloc_task(struct mpp_session *session,
381 			       struct mpp_task_msgs *msgs)
382 {
383 	int ret;
384 	u32 i, j;
385 	struct mpp_task *mpp_task = NULL;
386 	struct av1dec_task *task = NULL;
387 	struct mpp_dev *mpp = session->mpp;
388 
389 	mpp_debug_enter();
390 
391 	task = kzalloc(sizeof(*task), GFP_KERNEL);
392 	if (!task)
393 		return NULL;
394 
395 	mpp_task = &task->mpp_task;
396 	mpp_task_init(session, mpp_task);
397 	mpp_task->hw_info = mpp->var->hw_info;
398 	task->hw_info = to_av1dec_info(mpp_task->hw_info);
399 
400 	/* alloc reg data for task */
401 	ret = av1dec_alloc_reg_class(task);
402 	if (ret)
403 		goto free_task;
404 	mpp_task->reg = task->reg_class[0].data;
405 	/* extract reqs for current task */
406 	ret = av1dec_extract_task_msg(task, msgs);
407 	if (ret)
408 		goto free_reg_class;
409 
410 	/* process fd in register */
411 	if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
412 		int cnt;
413 		const u16 *tbl;
414 		u32 offset;
415 		struct av1dec_hw_info *hw = task->hw_info;
416 
417 		for (i = 0; i < task->w_req_cnt; i++) {
418 			struct mpp_request *req = &task->w_reqs[i];
419 
420 			for (i = 0; i < hw->trans_class_num; i++) {
421 				u32 class = hw->trans_class[i].class;
422 				u32 fmt = hw->trans_class[i].trans_fmt;
423 				u32 *reg = task->reg_class[class].data;
424 				u32 base_idx = MPP_BASE_TO_IDX(task->reg_class[class].base);
425 
426 				if (!req_over_class(req, task, i))
427 					continue;
428 				mpp_debug(DEBUG_TASK_INFO, "class=%d, base_idx=%d\n",
429 					  class, base_idx);
430 				if (!reg)
431 					continue;
432 
433 				ret = mpp_translate_reg_address(session, mpp_task, fmt, reg, NULL);
434 				if (ret)
435 					goto fail;
436 
437 				cnt = mpp->var->trans_info[fmt].count;
438 				tbl = mpp->var->trans_info[fmt].table;
439 				for (j = 0; j < cnt; j++) {
440 					offset = mpp_query_reg_offset_info(&task->off_inf,
441 									tbl[j] + base_idx);
442 					mpp_debug(DEBUG_IOMMU,
443 						"reg[%d] + offset %d\n", tbl[j] + base_idx, offset);
444 					reg[tbl[j]] += offset;
445 				}
446 			}
447 		}
448 	}
449 	task->clk_mode = CLK_MODE_NORMAL;
450 
451 	mpp_debug_leave();
452 
453 	return mpp_task;
454 
455 fail:
456 	mpp_task_dump_mem_region(mpp, mpp_task);
457 	mpp_task_dump_reg(mpp, mpp_task);
458 	mpp_task_finalize(session, mpp_task);
459 free_reg_class:
460 	kfree(task->reg_data);
461 free_task:
462 	kfree(task);
463 
464 	return NULL;
465 }
466 #define AV1_PP_CONFIG_INDEX	321
467 #define AV1_PP_TILE_SIZE	GENMASK_ULL(10, 9)
468 #define AV1_PP_TILE_16X16	BIT(10)
469 
470 #define AV1_PP_OUT_LUMA_ADR_INDEX	326
471 #define AV1_PP_OUT_CHROMA_ADR_INDEX	328
472 
473 #define AV1_L2_CACHE_SHAPER_CTRL	0x20
474 #define AV1_L2_CACHE_SHAPER_EN		BIT(0)
475 #define AV1_L2_CACHE_INT_MASK		0x30
476 #define AV1_L2_CACHE_PP0_Y_CONFIG0	0x84
477 #define AV1_L2_CACHE_PP0_Y_CONFIG2	0x8c
478 #define AV1_L2_CACHE_PP0_Y_CONFIG3	0x90
479 #define AV1_L2_CACHE_PP0_U_CONFIG0	0x98
480 #define AV1_L2_CACHE_PP0_U_CONFIG2	0xa0
481 #define AV1_L2_CACHE_PP0_U_CONFIG3	0xa4
482 
483 #define AV1_L2_CACHE_RD_ONLY_CTRL	0x204
484 #define AV1_L2_CACHE_RD_ONLY_CONFIG	0x208
485 
av1dec_set_l2_cache(struct av1dec_dev * dec,struct av1dec_task * task)486 static int av1dec_set_l2_cache(struct av1dec_dev *dec, struct av1dec_task *task)
487 {
488 	int val;
489 	u32 *regs = (u32 *)task->reg_class[0].data;
490 	u32 width = (regs[4] >> 19) * 8;
491 	u32 height = ((regs[4] >> 6) & 0x1fff) * 8;
492 	u32 pixel_width = (((regs[322]) >> 27) & 0x1F) == 1 ? 8 : 16;
493 	u32 pre_fetch_height = 136;
494 	u32 max_h;
495 	u32 line_cnt;
496 	u32 line_size;
497 	u32 line_stride;
498 
499 	/* channel 4, PPU0_Y Configuration */
500 	/* afbc sharper can't use open cache.
501 	 * afbc out must be tile 16x16.
502 	 */
503 	if ((regs[AV1_PP_CONFIG_INDEX] & AV1_PP_TILE_SIZE) != AV1_PP_TILE_16X16) {
504 		line_size = MPP_ALIGN(MPP_ALIGN(width * pixel_width, 8) / 8, 16);
505 		line_stride = MPP_ALIGN(MPP_ALIGN(width * pixel_width, 8) / 8, 16) >> 4;
506 		line_cnt = height;
507 		max_h = pre_fetch_height;
508 
509 		writel_relaxed(regs[AV1_PP_OUT_LUMA_ADR_INDEX] + 0x1,
510 			       dec->reg_base[AV1DEC_CLASS_CACHE] + AV1_L2_CACHE_PP0_Y_CONFIG0);
511 		val = line_size | (line_stride << 16);
512 		writel_relaxed(val, dec->reg_base[AV1DEC_CLASS_CACHE] + AV1_L2_CACHE_PP0_Y_CONFIG2);
513 
514 		val = line_cnt | (max_h << 16);
515 		writel_relaxed(val, dec->reg_base[AV1DEC_CLASS_CACHE] + AV1_L2_CACHE_PP0_Y_CONFIG3);
516 
517 		/* channel 5, PPU0_U Configuration */
518 		line_size = MPP_ALIGN(MPP_ALIGN(width * pixel_width, 8) / 8, 16);
519 		line_stride = MPP_ALIGN(MPP_ALIGN(width * pixel_width, 8) / 8, 16) >> 4;
520 		line_cnt = height >> 1;
521 		max_h = pre_fetch_height >> 1;
522 
523 		writel_relaxed(regs[AV1_PP_OUT_CHROMA_ADR_INDEX] + 0x1,
524 			       dec->reg_base[AV1DEC_CLASS_CACHE] + AV1_L2_CACHE_PP0_U_CONFIG0);
525 		val = line_size | (line_stride << 16);
526 		writel_relaxed(val, dec->reg_base[AV1DEC_CLASS_CACHE] + AV1_L2_CACHE_PP0_U_CONFIG2);
527 
528 		val = line_cnt | (max_h << 16);
529 		writel_relaxed(val, dec->reg_base[AV1DEC_CLASS_CACHE] + AV1_L2_CACHE_PP0_U_CONFIG3);
530 		/* mask cache irq */
531 		writel_relaxed(0xf, dec->reg_base[AV1DEC_CLASS_CACHE] + AV1_L2_CACHE_INT_MASK);
532 
533 		/* shaper enable */
534 		writel_relaxed(AV1_L2_CACHE_SHAPER_EN,
535 			       dec->reg_base[AV1DEC_CLASS_CACHE] + AV1_L2_CACHE_SHAPER_CTRL);
536 
537 		/* not enable cache en when multi tiles */
538 		if (!(regs[10] & BIT(1)))
539 			/* cache all en */
540 			writel_relaxed(0x00000001, dec->reg_base[AV1DEC_CLASS_CACHE] +
541 				AV1_L2_CACHE_RD_ONLY_CONFIG);
542 		/* reorder_e and cache_e */
543 		writel_relaxed(0x00000081, dec->reg_base[AV1DEC_CLASS_CACHE] +
544 			       AV1_L2_CACHE_RD_ONLY_CTRL);
545 		/* wmb */
546 		wmb();
547 	}
548 
549 	return 0;
550 }
551 #define REG_CONTROL		0x20
552 #define REG_INTRENBL		0x34
553 #define REG_ACKNOWLEDGE		0x38
554 #define REG_FORMAT		0x100
555 #define REG_COMPRESSENABLE	0x340
556 #define REG_HEADERBASE		0x80
557 #define REG_PAYLOADBASE		0xC0
558 #define REG_INPUTBUFBASE	0x180
559 #define REG_INPUTBUFSTRIDE	0x200
560 #define REG_INPUTBUFSIZE	0x140
561 
av1dec_set_afbc(struct av1dec_dev * dec,struct av1dec_task * task)562 static int av1dec_set_afbc(struct av1dec_dev *dec, struct av1dec_task *task)
563 {
564 	u32 *regs = (u32 *)task->reg_class[0].data;
565 	u32 width = (regs[4] >> 19) * 8;
566 	u32 height = ((regs[4] >> 6) & 0x1fff) * 8;
567 	u32 pixel_width_y, pixel_width_c, pixel_width = 8;
568 	u32 vir_top  =  (((regs[503]) >> 16) & 0xf);
569 	u32 vir_left  =  (((regs[503]) >> 20) & 0xf);
570 	u32 vir_bottom = (((regs[503]) >> 24) & 0xf);
571 	u32 vir_right  =  (((regs[503]) >> 28) & 0xf);
572 	u32 fbc_format = 0;
573 	u32 fbc_stream_number = 0;
574 	u32 fbc_comp_en[2] = {0, 0};
575 	u32 pp_width_final[2] = {0, 0};
576 	u32 pp_height_final[2] = {0, 0};
577 	u32 pp_hdr_base[2] = {0, 0};
578 	u32 pp_payload_base[2] = {0, 0};
579 	u32 pp_input_base[2] = {0, 0};
580 	u32 pp_input_stride[2] = {0, 0};
581 	u32 bus_address;
582 	u32 i = 0;
583 
584 	pixel_width_y = ((regs[8] >> 6) & 0x3) + 8;
585 	pixel_width_c = ((regs[8] >> 4) & 0x3) + 8;
586 	pixel_width = (pixel_width_y == 8 && pixel_width_c == 8) ? 8 : 10;
587 
588 	if ((regs[AV1_PP_CONFIG_INDEX] & AV1_PP_TILE_SIZE) == AV1_PP_TILE_16X16) {
589 		u32 offset = MPP_ALIGN((vir_left + width + vir_right) *
590 			     (height + 28) / 16, 64);
591 
592 		bus_address = regs[505];
593 		fbc_stream_number++;
594 		if (pixel_width == 10)
595 			fbc_format = 3;
596 		else
597 			fbc_format = 9;
598 		fbc_comp_en[0] = 1;
599 		fbc_comp_en[1] = 1;
600 
601 		pp_width_final[0] = pp_width_final[1] = vir_left + width + vir_right;
602 		pp_height_final[0] = pp_height_final[1] = vir_top + height + vir_bottom;
603 
604 		if (pixel_width == 10)
605 			pp_input_stride[0] = pp_input_stride[1] = 2 * pp_width_final[0];
606 		else
607 			pp_input_stride[0] = pp_input_stride[1] = pp_width_final[0];
608 
609 		pp_hdr_base[0] = pp_hdr_base[1] = bus_address;
610 		pp_payload_base[0] = pp_payload_base[1] = bus_address + offset;
611 		pp_input_base[0] = pp_input_base[1] = bus_address;
612 
613 		writel_relaxed((fbc_stream_number << 9),
614 			       dec->reg_base[AV1DEC_CLASS_AFBC] + REG_CONTROL);
615 		writel_relaxed(0x1, dec->reg_base[AV1DEC_CLASS_AFBC] + REG_INTRENBL);
616 
617 		for (i = 0; i < 2; i++) {
618 			writel_relaxed(fbc_format,
619 				       dec->reg_base[AV1DEC_CLASS_AFBC] + REG_FORMAT + i * 4);
620 			writel_relaxed(fbc_comp_en[i], dec->reg_base[AV1DEC_CLASS_AFBC] +
621 				       REG_COMPRESSENABLE + i * 4);
622 			/* hdr base */
623 			writel_relaxed(pp_hdr_base[i],
624 				       dec->reg_base[AV1DEC_CLASS_AFBC] + REG_HEADERBASE + i * 4);
625 			/* payload */
626 			writel_relaxed(pp_payload_base[i],
627 				       dec->reg_base[AV1DEC_CLASS_AFBC] + REG_PAYLOADBASE + i * 4);
628 			/* bufsize */
629 			writel_relaxed(((pp_height_final[i] << 15) | pp_width_final[i]),
630 				       dec->reg_base[AV1DEC_CLASS_AFBC] + REG_INPUTBUFSIZE + i * 4);
631 			/* buf */
632 			writel_relaxed(pp_input_base[i],
633 				       dec->reg_base[AV1DEC_CLASS_AFBC] + REG_INPUTBUFBASE + i * 4);
634 			/* stride */
635 			writel_relaxed(pp_input_stride[i], dec->reg_base[AV1DEC_CLASS_AFBC] +
636 				       REG_INPUTBUFSTRIDE + i * 4);
637 		}
638 		/* wmb */
639 		wmb();
640 		writel(((fbc_stream_number << 9) | (1 << 7)),
641 		       dec->reg_base[AV1DEC_CLASS_AFBC] + REG_CONTROL); /* update */
642 		writel((fbc_stream_number << 9), dec->reg_base[AV1DEC_CLASS_AFBC] + REG_CONTROL);
643 
644 	}
645 	return 0;
646 }
647 
av1dec_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)648 static int av1dec_run(struct mpp_dev *mpp, struct mpp_task *mpp_task)
649 {
650 	int i;
651 	u32 en_val = 0;
652 	struct av1dec_dev *dec = to_av1dec_dev(mpp);
653 	struct av1dec_hw_info *hw = dec->hw_info;
654 	struct av1dec_task *task = to_av1dec_task(mpp_task);
655 	u32 timing_en = mpp->srv->timing_en;
656 
657 	mpp_debug_enter();
658 	mpp_iommu_flush_tlb(mpp->iommu_info);
659 	av1dec_set_l2_cache(dec, task);
660 	av1dec_set_afbc(dec, task);
661 
662 	for (i = 0; i < task->w_req_cnt; i++) {
663 		int class;
664 		struct mpp_request *req = &task->w_reqs[i];
665 
666 		for (class = 0; class < hw->reg_class_num; class++) {
667 			int j, s, e;
668 			u32 base, *regs;
669 
670 			if (!req_over_class(req, task, class))
671 				continue;
672 			base = task->reg_class[class].base;
673 			s = MPP_BASE_TO_IDX(req->offset - base);
674 			e = s + req->size / sizeof(u32);
675 			regs = (u32 *)task->reg_class[class].data;
676 
677 			mpp_debug(DEBUG_TASK_INFO, "found rd_class %d, base=%08x, s=%d, e=%d\n",
678 				  class, base, s, e);
679 			for (j = s; j < e; j++) {
680 				if (class == 0 && j == hw->hw.reg_en) {
681 					en_val = regs[j];
682 					continue;
683 				}
684 				writel_relaxed(regs[j], dec->reg_base[class] + j * sizeof(u32));
685 			}
686 		}
687 	}
688 
689 	/* init current task */
690 	mpp->cur_task = mpp_task;
691 
692 	mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
693 
694 	/* Flush the register before the start the device */
695 	wmb();
696 	mpp_write(mpp, hw->en_base, en_val);
697 
698 	mpp_task_run_end(mpp_task, timing_en);
699 
700 	mpp_debug_leave();
701 
702 	return 0;
703 }
704 
av1dec_vcd_irq(struct mpp_dev * mpp)705 static int av1dec_vcd_irq(struct mpp_dev *mpp)
706 {
707 	struct av1dec_dev *dec = to_av1dec_dev(mpp);
708 	struct av1dec_hw_info *hw = dec->hw_info;
709 
710 	mpp_debug_enter();
711 
712 	mpp->irq_status = mpp_read(mpp, hw->sta_base);
713 	if (!mpp->irq_status)
714 		return IRQ_NONE;
715 
716 	mpp_write(mpp, hw->clr_base, 0);
717 
718 	mpp_debug_leave();
719 
720 	return IRQ_WAKE_THREAD;
721 }
722 
av1dec_isr(struct mpp_dev * mpp)723 static int av1dec_isr(struct mpp_dev *mpp)
724 {
725 	struct mpp_task *mpp_task = mpp->cur_task;
726 	struct av1dec_dev *dec = to_av1dec_dev(mpp);
727 	struct av1dec_task *task = to_av1dec_task(mpp_task);
728 	u32 *regs = (u32 *)task->reg_class[0].data;
729 
730 	mpp_debug_enter();
731 
732 	/* FIXME use a spin lock here */
733 	if (!mpp_task) {
734 		dev_err(mpp->dev, "no current task\n");
735 		return IRQ_HANDLED;
736 	}
737 
738 	mpp_time_diff(mpp_task);
739 	mpp->cur_task = NULL;
740 
741 	/* clear l2 cache status */
742 	writel_relaxed(0x0, dec->reg_base[AV1DEC_CLASS_CACHE] + 0x020);
743 	writel_relaxed(0x0, dec->reg_base[AV1DEC_CLASS_CACHE] + 0x204);
744 	/* multi id enable bit */
745 	writel_relaxed(0x00000000, dec->reg_base[AV1DEC_CLASS_CACHE] + 0x208);
746 
747 	if (((regs[321] >> 9) & 0x3) == 0x2) {
748 		u32 ack_status = readl(dec->reg_base[AV1DEC_CLASS_AFBC] + REG_ACKNOWLEDGE);
749 
750 		if ((ack_status & 0x1) == 0x1) {
751 			u32 ctl_val = readl(dec->reg_base[AV1DEC_CLASS_AFBC] + REG_CONTROL);
752 
753 			ctl_val |= 1;
754 			writel_relaxed(ctl_val, dec->reg_base[AV1DEC_CLASS_AFBC] + REG_CONTROL);
755 		}
756 	}
757 	task->irq_status = mpp->irq_status;
758 	mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
759 	if (task->irq_status & dec->hw_info->err_mask) {
760 		atomic_inc(&mpp->reset_request);
761 		/* dump register */
762 		if (mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) {
763 			mpp_debug(DEBUG_DUMP_ERR_REG, "irq_status: %08x\n",
764 				  task->irq_status);
765 			mpp_task_dump_hw_reg(mpp);
766 		}
767 	}
768 	mpp_task_finish(mpp_task->session, mpp_task);
769 
770 	mpp_debug_leave();
771 
772 	return IRQ_HANDLED;
773 }
774 
av1dec_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)775 static int av1dec_finish(struct mpp_dev *mpp,
776 			 struct mpp_task *mpp_task)
777 {
778 	u32 i;
779 	struct av1dec_task *task = to_av1dec_task(mpp_task);
780 	struct av1dec_dev *dec = to_av1dec_dev(mpp);
781 	struct av1dec_hw_info *hw = dec->hw_info;
782 
783 	mpp_debug_enter();
784 
785 	for (i = 0; i < task->r_req_cnt; i++) {
786 		int class;
787 		struct mpp_request *req = &task->r_reqs[i];
788 
789 		for (class = 0; class < hw->reg_class_num; class++) {
790 			int j, s, e;
791 			u32 base, *regs;
792 
793 			if (!req_over_class(req, task, class))
794 				continue;
795 			base = task->reg_class[class].base;
796 			s = MPP_BASE_TO_IDX(req->offset - base);
797 			e = s + req->size / sizeof(u32);
798 			regs = (u32 *)task->reg_class[class].data;
799 
800 			mpp_debug(DEBUG_TASK_INFO, "found rd_class %d, base=%08x, s=%d, e=%d\n",
801 				  class, base, s, e);
802 			for (j = s; j < e; j++) {
803 				/* revert hack for irq status */
804 				if (class == 0 && j == MPP_BASE_TO_IDX(hw->sta_base)) {
805 					regs[j] = task->irq_status;
806 					continue;
807 				}
808 				regs[j] = readl_relaxed(dec->reg_base[class] + j * sizeof(u32));
809 			}
810 		}
811 	}
812 
813 	mpp_debug_leave();
814 
815 	return 0;
816 }
817 
av1dec_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)818 static int av1dec_result(struct mpp_dev *mpp,
819 			 struct mpp_task *mpp_task,
820 			 struct mpp_task_msgs *msgs)
821 {
822 	u32 i;
823 	struct av1dec_task *task = to_av1dec_task(mpp_task);
824 	struct av1dec_dev *dec = to_av1dec_dev(mpp);
825 	struct av1dec_hw_info *hw = dec->hw_info;
826 
827 	mpp_debug_enter();
828 
829 	for (i = 0; i < task->r_req_cnt; i++) {
830 		int class;
831 		struct mpp_request *req = &task->r_reqs[i];
832 
833 		for (class = 0; class < hw->reg_class_num; class++) {
834 			u32 base, *regs;
835 
836 			if (!req_over_class(req, task, class))
837 				continue;
838 			base = task->reg_class[class].base;
839 			regs = (u32 *)task->reg_class[class].data;
840 			regs += MPP_BASE_TO_IDX(req->offset - base);
841 
842 			if (copy_to_user(req->data, regs, req->size)) {
843 				mpp_err("copy_to_user reg fail\n");
844 				return -EIO;
845 			}
846 		}
847 	}
848 	mpp_debug_leave();
849 
850 	return 0;
851 }
852 
av1dec_free_task(struct mpp_session * session,struct mpp_task * mpp_task)853 static int av1dec_free_task(struct mpp_session *session,
854 			    struct mpp_task *mpp_task)
855 {
856 	struct av1dec_task *task = to_av1dec_task(mpp_task);
857 
858 	mpp_task_finalize(session, mpp_task);
859 	kfree(task->reg_data);
860 	kfree(task);
861 
862 	return 0;
863 }
864 
865 #ifdef CONFIG_PROC_FS
av1dec_procfs_remove(struct mpp_dev * mpp)866 static int av1dec_procfs_remove(struct mpp_dev *mpp)
867 {
868 	struct av1dec_dev *dec = to_av1dec_dev(mpp);
869 
870 	if (dec->procfs) {
871 		proc_remove(dec->procfs);
872 		dec->procfs = NULL;
873 	}
874 
875 	return 0;
876 }
877 
av1dec_procfs_init(struct mpp_dev * mpp)878 static int av1dec_procfs_init(struct mpp_dev *mpp)
879 {
880 	struct av1dec_dev *dec = to_av1dec_dev(mpp);
881 
882 	dec->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
883 	if (IS_ERR_OR_NULL(dec->procfs)) {
884 		mpp_err("failed on open procfs\n");
885 		dec->procfs = NULL;
886 		return -EIO;
887 	}
888 
889 	/* for common mpp_dev options */
890 	mpp_procfs_create_common(dec->procfs, mpp);
891 
892 	/* for debug */
893 	mpp_procfs_create_u32("aclk", 0644,
894 			      dec->procfs, &dec->aclk_info.debug_rate_hz);
895 	mpp_procfs_create_u32("session_buffers", 0644,
896 			      dec->procfs, &mpp->session_max_buffers);
897 
898 	return 0;
899 }
900 #else
av1dec_procfs_remove(struct mpp_dev * mpp)901 static inline int av1dec_procfs_remove(struct mpp_dev *mpp)
902 {
903 	return 0;
904 }
905 
av1dec_procfs_init(struct mpp_dev * mpp)906 static inline int av1dec_procfs_init(struct mpp_dev *mpp)
907 {
908 	return 0;
909 }
910 #endif
911 
av1dec_init(struct mpp_dev * mpp)912 static int av1dec_init(struct mpp_dev *mpp)
913 {
914 	struct av1dec_dev *dec = to_av1dec_dev(mpp);
915 	int ret = 0;
916 
917 	/* Get clock info from dtsi */
918 	ret = mpp_get_clk_info(mpp, &dec->aclk_info, "aclk_vcodec");
919 	if (ret)
920 		mpp_err("failed on clk_get aclk_vcodec\n");
921 	ret = mpp_get_clk_info(mpp, &dec->hclk_info, "hclk_vcodec");
922 	if (ret)
923 		mpp_err("failed on clk_get hclk_vcodec\n");
924 
925 	/* Get normal max workload from dtsi */
926 	of_property_read_u32(mpp->dev->of_node,
927 			     "rockchip,default-max-load",
928 			     &dec->default_max_load);
929 	/* Set default rates */
930 	mpp_set_clk_info_rate_hz(&dec->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
931 
932 	/* Get reset control from dtsi */
933 	dec->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
934 	if (!dec->rst_a)
935 		mpp_err("No aclk reset resource define\n");
936 	dec->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
937 	if (!dec->rst_h)
938 		mpp_err("No hclk reset resource define\n");
939 
940 	return 0;
941 }
942 
av1dec_reset(struct mpp_dev * mpp)943 static int av1dec_reset(struct mpp_dev *mpp)
944 {
945 	struct av1dec_dev *dec = to_av1dec_dev(mpp);
946 
947 	mpp_debug_enter();
948 
949 	if (dec->rst_a && dec->rst_h) {
950 		rockchip_pmu_idle_request(mpp->dev, true);
951 		mpp_safe_reset(dec->rst_a);
952 		mpp_safe_reset(dec->rst_h);
953 		udelay(5);
954 		mpp_safe_unreset(dec->rst_a);
955 		mpp_safe_unreset(dec->rst_h);
956 		rockchip_pmu_idle_request(mpp->dev, false);
957 	}
958 
959 	mpp_debug_leave();
960 
961 	return 0;
962 }
963 
av1dec_clk_on(struct mpp_dev * mpp)964 static int av1dec_clk_on(struct mpp_dev *mpp)
965 {
966 	struct av1dec_dev *dec = to_av1dec_dev(mpp);
967 
968 	mpp_clk_safe_enable(dec->aclk_info.clk);
969 	mpp_clk_safe_enable(dec->hclk_info.clk);
970 
971 	return 0;
972 }
973 
av1dec_clk_off(struct mpp_dev * mpp)974 static int av1dec_clk_off(struct mpp_dev *mpp)
975 {
976 	struct av1dec_dev *dec = to_av1dec_dev(mpp);
977 
978 	clk_disable_unprepare(dec->aclk_info.clk);
979 	clk_disable_unprepare(dec->hclk_info.clk);
980 
981 	return 0;
982 }
983 
av1dec_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)984 static int av1dec_set_freq(struct mpp_dev *mpp,
985 			   struct mpp_task *mpp_task)
986 {
987 	struct av1dec_dev *dec = to_av1dec_dev(mpp);
988 	struct av1dec_task *task = to_av1dec_task(mpp_task);
989 
990 	mpp_clk_set_rate(&dec->aclk_info, task->clk_mode);
991 
992 	return 0;
993 }
994 
995 static struct mpp_hw_ops av1dec_hw_ops = {
996 	.init = av1dec_init,
997 	.clk_on = av1dec_clk_on,
998 	.clk_off = av1dec_clk_off,
999 	.set_freq = av1dec_set_freq,
1000 	.reset = av1dec_reset,
1001 };
1002 
1003 static struct mpp_dev_ops av1dec_dev_ops = {
1004 	.alloc_task = av1dec_alloc_task,
1005 	.run = av1dec_run,
1006 	.irq = av1dec_vcd_irq,
1007 	.isr = av1dec_isr,
1008 	.finish = av1dec_finish,
1009 	.result = av1dec_result,
1010 	.free_task = av1dec_free_task,
1011 };
1012 static const struct mpp_dev_var av1dec_data = {
1013 	.device_type = MPP_DEVICE_AV1DEC,
1014 	.hw_info = &av1dec_hw_info.hw,
1015 	.trans_info = trans_av1dec,
1016 	.hw_ops = &av1dec_hw_ops,
1017 	.dev_ops = &av1dec_dev_ops,
1018 };
1019 
1020 static const struct of_device_id mpp_av1dec_dt_match[] = {
1021 	{
1022 		.compatible = "rockchip,av1-decoder",
1023 		.data = &av1dec_data,
1024 	},
1025 	{},
1026 };
1027 
av1dec_device_match(struct device * dev,struct device_driver * drv)1028 static int av1dec_device_match(struct device *dev, struct device_driver *drv)
1029 {
1030 	return 1;
1031 }
1032 
av1dec_device_probe(struct device * dev)1033 static int av1dec_device_probe(struct device *dev)
1034 {
1035 	int ret;
1036 	const struct platform_driver *drv;
1037 	struct platform_device *pdev = to_platform_device(dev);
1038 
1039 	ret = of_clk_set_defaults(dev->of_node, false);
1040 	if (ret < 0)
1041 		return ret;
1042 
1043 	ret = dev_pm_domain_attach(dev, true);
1044 	if (ret)
1045 		return ret;
1046 
1047 	drv = to_platform_driver(dev->driver);
1048 	if (drv->probe) {
1049 		ret = drv->probe(pdev);
1050 		if (ret)
1051 			dev_pm_domain_detach(dev, true);
1052 	}
1053 
1054 	return ret;
1055 }
1056 
av1dec_device_remove(struct device * dev)1057 static int av1dec_device_remove(struct device *dev)
1058 {
1059 
1060 	struct platform_device *pdev = to_platform_device(dev);
1061 	struct platform_driver *drv = to_platform_driver(dev->driver);
1062 
1063 	if (dev->driver && drv->remove)
1064 		drv->remove(pdev);
1065 
1066 	dev_pm_domain_detach(dev, true);
1067 
1068 	return 0;
1069 }
1070 
av1dec_device_shutdown(struct device * dev)1071 static void av1dec_device_shutdown(struct device *dev)
1072 {
1073 	struct platform_device *pdev = to_platform_device(dev);
1074 	struct platform_driver *drv = to_platform_driver(dev->driver);
1075 
1076 	if (dev->driver && drv->shutdown)
1077 		drv->shutdown(pdev);
1078 }
1079 
av1dec_dma_configure(struct device * dev)1080 static int av1dec_dma_configure(struct device *dev)
1081 {
1082 	return of_dma_configure(dev, dev->of_node, true);
1083 }
1084 
1085 static const struct dev_pm_ops platform_dev_pm_ops = {
1086 	.runtime_suspend = pm_generic_runtime_suspend,
1087 	.runtime_resume = pm_generic_runtime_resume,
1088 };
1089 
1090 struct bus_type av1dec_bus = {
1091 	.name		= "av1dec_bus",
1092 	.match		= av1dec_device_match,
1093 	.probe		= av1dec_device_probe,
1094 	.remove		= av1dec_device_remove,
1095 	.shutdown	= av1dec_device_shutdown,
1096 	.dma_configure  = av1dec_dma_configure,
1097 	.pm		= &platform_dev_pm_ops,
1098 };
1099 
av1_of_device_add(struct platform_device * ofdev)1100 static int av1_of_device_add(struct platform_device *ofdev)
1101 {
1102 	WARN_ON(ofdev->dev.of_node == NULL);
1103 
1104 	/* name and id have to be set so that the platform bus doesn't get
1105 	 * confused on matching
1106 	 */
1107 	ofdev->name = dev_name(&ofdev->dev);
1108 	ofdev->id = PLATFORM_DEVID_NONE;
1109 
1110 	/*
1111 	 * If this device has not binding numa node in devicetree, that is
1112 	 * of_node_to_nid returns NUMA_NO_NODE. device_add will assume that this
1113 	 * device is on the same node as the parent.
1114 	 */
1115 	set_dev_node(&ofdev->dev, of_node_to_nid(ofdev->dev.of_node));
1116 
1117 	return device_add(&ofdev->dev);
1118 }
1119 
av1dec_device_create(void)1120 static struct platform_device *av1dec_device_create(void)
1121 {
1122 	int ret = -ENODEV;
1123 	struct device_node *root, *child;
1124 	struct platform_device *pdev;
1125 
1126 	root = of_find_node_by_path("/");
1127 
1128 	for_each_child_of_node(root, child) {
1129 		if (!of_match_node(mpp_av1dec_dt_match, child))
1130 			continue;
1131 
1132 		pr_info("Adding child %pOF\n", child);
1133 
1134 		pdev = of_device_alloc(child, "av1d-master", NULL);
1135 		if (!pdev)
1136 			return ERR_PTR(-ENOMEM);
1137 
1138 		pdev->dev.bus = &av1dec_bus;
1139 
1140 		dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1141 
1142 		ret = av1_of_device_add(pdev);
1143 		if (ret) {
1144 			platform_device_put(pdev);
1145 			return ERR_PTR(-EINVAL);
1146 		}
1147 
1148 		pr_info("register device %s\n", dev_name(&pdev->dev));
1149 
1150 		return  pdev;
1151 	}
1152 
1153 	return ERR_PTR(ret);
1154 }
1155 
av1dec_device_destory(void)1156 static void av1dec_device_destory(void)
1157 {
1158 	struct platform_device *pdev;
1159 	struct device *dev;
1160 
1161 	dev = bus_find_device_by_name(&av1dec_bus, NULL, "av1d-master");
1162 	pdev = dev ? to_platform_device(dev) : NULL;
1163 	if (!pdev) {
1164 		pr_err("cannot find platform device\n");
1165 		return;
1166 	}
1167 
1168 	pr_info("destroy device %s\n", dev_name(&pdev->dev));
1169 	platform_device_del(pdev);
1170 	platform_device_put(pdev);
1171 }
1172 
av1dec_driver_unregister(struct platform_driver * drv)1173 void av1dec_driver_unregister(struct platform_driver *drv)
1174 {
1175 	/* 1. unregister av1 driver */
1176 	driver_unregister(&drv->driver);
1177 	/* 2. release device */
1178 	av1dec_device_destory();
1179 	/* 3. unregister iommu driver */
1180 	platform_driver_unregister(&rockchip_av1_iommu_driver);
1181 	/* 4. unregister bus */
1182 	bus_unregister(&av1dec_bus);
1183 }
1184 
av1dec_driver_register(struct platform_driver * drv)1185 int av1dec_driver_register(struct platform_driver *drv)
1186 {
1187 	int ret;
1188 	/* 1. register bus */
1189 	ret = bus_register(&av1dec_bus);
1190 	if (ret) {
1191 		pr_err("failed to register av1 bus: %d\n", ret);
1192 		return ret;
1193 	}
1194 	/* 2. register iommu driver */
1195 	platform_driver_register(&rockchip_av1_iommu_driver);
1196 	/* 3. create device */
1197 	av1dec_device_create();
1198 	/* 4. register av1 driver */
1199 	return driver_register(&drv->driver);
1200 }
1201 
av1dec_cache_init(struct platform_device * pdev,struct av1dec_dev * dec)1202 static int av1dec_cache_init(struct platform_device *pdev, struct av1dec_dev *dec)
1203 {
1204 	struct resource *res;
1205 	struct device *dev = &pdev->dev;
1206 
1207 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cache");
1208 	if (!res)
1209 		return -ENOMEM;
1210 
1211 	dec->reg_base[AV1DEC_CLASS_CACHE] = devm_ioremap(dev, res->start, resource_size(res));
1212 	if (!dec->reg_base[AV1DEC_CLASS_CACHE]) {
1213 		dev_err(dev, "ioremap failed for resource %pR\n", res);
1214 		return -EINVAL;
1215 	}
1216 	return 0;
1217 }
1218 
av1dec_afbc_init(struct platform_device * pdev,struct av1dec_dev * dec)1219 static int av1dec_afbc_init(struct platform_device *pdev, struct av1dec_dev *dec)
1220 {
1221 	struct resource *res;
1222 	struct device *dev = &pdev->dev;
1223 
1224 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afbc");
1225 	if (!res)
1226 		return -ENOMEM;
1227 
1228 	dec->reg_base[AV1DEC_CLASS_AFBC] = devm_ioremap(dev, res->start, resource_size(res));
1229 	if (!dec->reg_base[AV1DEC_CLASS_AFBC]) {
1230 		dev_err(dev, "ioremap failed for resource %pR\n", res);
1231 		return -EINVAL;
1232 	}
1233 	dec->irq[AV1DEC_CLASS_AFBC] = platform_get_irq(pdev, 2);
1234 
1235 	return 0;
1236 }
1237 
av1dec_probe(struct platform_device * pdev)1238 static int av1dec_probe(struct platform_device *pdev)
1239 {
1240 	int ret = 0;
1241 	struct device *dev = &pdev->dev;
1242 	struct av1dec_dev *dec = NULL;
1243 	struct mpp_dev *mpp = NULL;
1244 	const struct of_device_id *match = NULL;
1245 
1246 	dev_info(dev, "probing start\n");
1247 
1248 	dec = devm_kzalloc(dev, sizeof(*dec), GFP_KERNEL);
1249 	if (!dec)
1250 		return -ENOMEM;
1251 
1252 	mpp = &dec->mpp;
1253 	platform_set_drvdata(pdev, dec);
1254 
1255 	if (pdev->dev.of_node) {
1256 		match = of_match_node(mpp_av1dec_dt_match, pdev->dev.of_node);
1257 		if (match)
1258 			mpp->var = (struct mpp_dev_var *)match->data;
1259 	}
1260 	/* get vcd resource */
1261 	ret = mpp_dev_probe(mpp, pdev);
1262 	if (ret)
1263 		return ret;
1264 
1265 	/* iommu may disabled */
1266 	if (mpp->iommu_info)
1267 		mpp->iommu_info->av1d_iommu = 1;
1268 
1269 	dec->reg_base[AV1DEC_CLASS_VCD] = mpp->reg_base;
1270 	ret = devm_request_threaded_irq(dev, mpp->irq,
1271 					mpp_dev_irq,
1272 					mpp_dev_isr_sched,
1273 					IRQF_SHARED,
1274 					dev_name(dev), mpp);
1275 	if (ret) {
1276 		dev_err(dev, "register interrupter runtime failed\n");
1277 		goto failed_get_irq;
1278 	}
1279 	dec->irq[AV1DEC_CLASS_VCD] = mpp->irq;
1280 	/* get cache resource */
1281 	ret = av1dec_cache_init(pdev, dec);
1282 	if (ret)
1283 		goto failed_get_irq;
1284 	/* get afbc resource */
1285 	ret = av1dec_afbc_init(pdev, dec);
1286 	if (ret)
1287 		goto failed_get_irq;
1288 	mpp->session_max_buffers = AV1DEC_SESSION_MAX_BUFFERS;
1289 	dec->hw_info = to_av1dec_info(mpp->var->hw_info);
1290 	av1dec_procfs_init(mpp);
1291 	mpp_dev_register_srv(mpp, mpp->srv);
1292 	dev_info(dev, "probing finish\n");
1293 
1294 	return 0;
1295 
1296 failed_get_irq:
1297 	mpp_dev_remove(mpp);
1298 
1299 	return ret;
1300 }
1301 
av1dec_remove(struct platform_device * pdev)1302 static int av1dec_remove(struct platform_device *pdev)
1303 {
1304 	struct device *dev = &pdev->dev;
1305 	struct av1dec_dev *dec = platform_get_drvdata(pdev);
1306 
1307 	dev_info(dev, "remove device\n");
1308 	mpp_dev_remove(&dec->mpp);
1309 	av1dec_procfs_remove(&dec->mpp);
1310 
1311 	return 0;
1312 }
1313 
av1dec_shutdown(struct platform_device * pdev)1314 static void av1dec_shutdown(struct platform_device *pdev)
1315 {
1316 	int ret;
1317 	int val;
1318 	struct device *dev = &pdev->dev;
1319 	struct av1dec_dev *dec = platform_get_drvdata(pdev);
1320 	struct mpp_dev *mpp = &dec->mpp;
1321 
1322 	dev_info(dev, "shutdown device\n");
1323 
1324 	atomic_inc(&mpp->srv->shutdown_request);
1325 	ret = readx_poll_timeout(atomic_read,
1326 				 &mpp->task_count,
1327 				 val, val == 0, 1000, 200000);
1328 	if (ret == -ETIMEDOUT)
1329 		dev_err(dev, "wait total running time out\n");
1330 
1331 	dev_info(dev, "shutdown success\n");
1332 }
1333 
1334 struct platform_driver rockchip_av1dec_driver = {
1335 	.probe = av1dec_probe,
1336 	.remove = av1dec_remove,
1337 	.shutdown = av1dec_shutdown,
1338 	.driver = {
1339 		.name = AV1DEC_DRIVER_NAME,
1340 		.of_match_table = of_match_ptr(mpp_av1dec_dt_match),
1341 		.bus = &av1dec_bus,
1342 	},
1343 };
1344