xref: /OK3568_Linux_fs/kernel/drivers/video/rockchip/mpp/mpp_rkvdec2_link.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3  * Copyright (c) 2021 Rockchip Electronics Co., Ltd
4  *
5  * author:
6  *	Herman Chen <herman.chen@rock-chips.com>
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/interrupt.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/slab.h>
13 #include <soc/rockchip/pm_domains.h>
14 #include <soc/rockchip/rockchip_dmc.h>
15 #include <soc/rockchip/rockchip_iommu.h>
16 
17 #include "mpp_rkvdec2_link.h"
18 
19 #include "hack/mpp_rkvdec2_link_hack_rk3568.c"
20 
21 #define WORK_TIMEOUT_MS		(500)
22 #define WAIT_TIMEOUT_MS		(2000)
23 #define RKVDEC2_LINK_HACK_TASK_FLAG	(0xff)
24 
25 /* vdpu381 link hw info for rk3588 */
26 struct rkvdec_link_info rkvdec_link_v2_hw_info = {
27 	.tb_reg_num = 218,
28 	.tb_reg_next = 0,
29 	.tb_reg_r = 1,
30 	.tb_reg_second_en = 8,
31 
32 	.part_w_num = 6,
33 	.part_r_num = 2,
34 	.part_w[0] = {
35 		.tb_reg_off = 4,
36 		.reg_start = 8,
37 		.reg_num = 28,
38 	},
39 	.part_w[1] = {
40 		.tb_reg_off = 32,
41 		.reg_start = 64,
42 		.reg_num = 52,
43 	},
44 	.part_w[2] = {
45 		.tb_reg_off = 84,
46 		.reg_start = 128,
47 		.reg_num = 16,
48 	},
49 	.part_w[3] = {
50 		.tb_reg_off = 100,
51 		.reg_start = 160,
52 		.reg_num = 48,
53 	},
54 	.part_w[4] = {
55 		.tb_reg_off = 148,
56 		.reg_start = 224,
57 		.reg_num = 16,
58 	},
59 	.part_w[5] = {
60 		.tb_reg_off = 164,
61 		.reg_start = 256,
62 		.reg_num = 16,
63 	},
64 	.part_r[0] = {
65 		.tb_reg_off = 180,
66 		.reg_start = 224,
67 		.reg_num = 10,
68 	},
69 	.part_r[1] = {
70 		.tb_reg_off = 190,
71 		.reg_start = 258,
72 		.reg_num = 28,
73 	},
74 	.tb_reg_int = 180,
75 	.tb_reg_cycle = 195,
76 	.hack_setup = 0,
77 	.reg_status = {
78 		.dec_num_mask = 0x3fffffff,
79 		.err_flag_base = 0x010,
80 		.err_flag_bit = BIT(31),
81 	},
82 };
83 
84 /* vdpu34x link hw info for rk356x */
85 struct rkvdec_link_info rkvdec_link_rk356x_hw_info = {
86 	.tb_reg_num = 202,
87 	.tb_reg_next = 0,
88 	.tb_reg_r = 1,
89 	.tb_reg_second_en = 8,
90 
91 	.part_w_num = 6,
92 	.part_r_num = 2,
93 	.part_w[0] = {
94 		.tb_reg_off = 4,
95 		.reg_start = 8,
96 		.reg_num = 20,
97 	},
98 	.part_w[1] = {
99 		.tb_reg_off = 24,
100 		.reg_start = 64,
101 		.reg_num = 52,
102 	},
103 	.part_w[2] = {
104 		.tb_reg_off = 76,
105 		.reg_start = 128,
106 		.reg_num = 16,
107 	},
108 	.part_w[3] = {
109 		.tb_reg_off = 92,
110 		.reg_start = 160,
111 		.reg_num = 40,
112 	},
113 	.part_w[4] = {
114 		.tb_reg_off = 132,
115 		.reg_start = 224,
116 		.reg_num = 16,
117 	},
118 	.part_w[5] = {
119 		.tb_reg_off = 148,
120 		.reg_start = 256,
121 		.reg_num = 16,
122 	},
123 	.part_r[0] = {
124 		.tb_reg_off = 164,
125 		.reg_start = 224,
126 		.reg_num = 10,
127 	},
128 	.part_r[1] = {
129 		.tb_reg_off = 174,
130 		.reg_start = 258,
131 		.reg_num = 28,
132 	},
133 	.tb_reg_int = 164,
134 	.tb_reg_cycle = 179,
135 	.hack_setup = 1,
136 	.reg_status = {
137 		.dec_num_mask = 0x3fffffff,
138 		.err_flag_base = 0x010,
139 		.err_flag_bit = BIT(31),
140 	},
141 };
142 
143 /* vdpu382 link hw info */
144 struct rkvdec_link_info rkvdec_link_vdpu382_hw_info = {
145 	.tb_reg_num = 222,
146 	.tb_reg_next = 0,
147 	.tb_reg_r = 1,
148 	.tb_reg_second_en = 8,
149 
150 	.part_w_num = 6,
151 	.part_r_num = 2,
152 	.part_w[0] = {
153 		.tb_reg_off = 4,
154 		.reg_start = 8,
155 		.reg_num = 28,
156 	},
157 	.part_w[1] = {
158 		.tb_reg_off = 32,
159 		.reg_start = 64,
160 		.reg_num = 52,
161 	},
162 	.part_w[2] = {
163 		.tb_reg_off = 84,
164 		.reg_start = 128,
165 		.reg_num = 16,
166 	},
167 	.part_w[3] = {
168 		.tb_reg_off = 100,
169 		.reg_start = 160,
170 		.reg_num = 48,
171 	},
172 	.part_w[4] = {
173 		.tb_reg_off = 148,
174 		.reg_start = 224,
175 		.reg_num = 16,
176 	},
177 	.part_w[5] = {
178 		.tb_reg_off = 164,
179 		.reg_start = 256,
180 		.reg_num = 16,
181 	},
182 	.part_r[0] = {
183 		.tb_reg_off = 180,
184 		.reg_start = 224,
185 		.reg_num = 12,
186 	},
187 	.part_r[1] = {
188 		.tb_reg_off = 192,
189 		.reg_start = 258,
190 		.reg_num = 30,
191 	},
192 	.tb_reg_int = 180,
193 	.hack_setup = 0,
194 	.tb_reg_cycle = 197,
195 	.reg_status = {
196 		.dec_num_mask = 0x000fffff,
197 		.err_flag_base = 0x024,
198 		.err_flag_bit = BIT(8),
199 	},
200 };
201 
202 static void rkvdec2_link_free_task(struct kref *ref);
203 static void rkvdec2_link_timeout_proc(struct work_struct *work_s);
204 static int rkvdec2_link_iommu_fault_handle(struct iommu_domain *iommu,
205 					   struct device *iommu_dev,
206 					   unsigned long iova,
207 					   int status, void *arg);
208 
rkvdec_link_status_update(struct rkvdec_link_dev * dev)209 static void rkvdec_link_status_update(struct rkvdec_link_dev *dev)
210 {
211 	void __iomem *reg_base = dev->reg_base;
212 	u32 error_ff0, error_ff1;
213 	u32 enable_ff0, enable_ff1;
214 	u32 loop_count = 10;
215 	u32 val;
216 	struct rkvdec_link_info *link_info = dev->info;
217 	u32 dec_num_mask = link_info->reg_status.dec_num_mask;
218 	u32 err_flag_base = link_info->reg_status.err_flag_base;
219 	u32 err_flag_bit = link_info->reg_status.err_flag_bit;
220 
221 	error_ff1 = (readl(reg_base + err_flag_base) & err_flag_bit) ? 1 : 0;
222 	enable_ff1 = readl(reg_base + RKVDEC_LINK_EN_BASE);
223 
224 	dev->irq_status = readl(reg_base + RKVDEC_LINK_IRQ_BASE);
225 	dev->iova_curr = readl(reg_base + RKVDEC_LINK_CFG_ADDR_BASE);
226 	dev->link_mode = readl(reg_base + RKVDEC_LINK_MODE_BASE);
227 	dev->total = readl(reg_base + RKVDEC_LINK_TOTAL_NUM_BASE);
228 	dev->iova_next = readl(reg_base + RKVDEC_LINK_NEXT_ADDR_BASE);
229 
230 	do {
231 		val = readl(reg_base + RKVDEC_LINK_DEC_NUM_BASE);
232 		error_ff0 = (readl(reg_base + err_flag_base) & err_flag_bit) ? 1 : 0;
233 		enable_ff0 = readl(reg_base + RKVDEC_LINK_EN_BASE);
234 
235 		if (error_ff0 == error_ff1 && enable_ff0 == enable_ff1)
236 			break;
237 
238 		error_ff1 = error_ff0;
239 		enable_ff1 = enable_ff0;
240 	} while (--loop_count);
241 
242 	dev->error = error_ff0;
243 	dev->decoded_status = val;
244 	dev->decoded = val & dec_num_mask;
245 	dev->enabled = enable_ff0;
246 
247 	if (!loop_count)
248 		dev_info(dev->dev, "reach last 10 count\n");
249 }
250 
rkvdec_link_node_dump(const char * func,struct rkvdec_link_dev * dev)251 static void rkvdec_link_node_dump(const char *func, struct rkvdec_link_dev *dev)
252 {
253 	u32 *table_base = (u32 *)dev->table->vaddr;
254 	u32 reg_count = dev->link_reg_count;
255 	u32 iova = (u32)dev->table->iova;
256 	u32 *reg = NULL;
257 	u32 i, j;
258 
259 	for (i = 0; i < dev->task_capacity; i++) {
260 		reg = table_base + i * reg_count;
261 
262 		mpp_err("slot %d link config iova %08x:\n", i,
263 			iova + i * dev->link_node_size);
264 
265 		for (j = 0; j < reg_count; j++) {
266 			mpp_err("reg%03d 0x%08x\n", j, reg[j]);
267 			udelay(100);
268 		}
269 	}
270 }
271 
rkvdec_core_reg_dump(const char * func,struct rkvdec_link_dev * dev)272 static void rkvdec_core_reg_dump(const char *func, struct rkvdec_link_dev *dev)
273 {
274 	struct mpp_dev *mpp = dev->mpp;
275 	u32 s = mpp->var->hw_info->reg_start;
276 	u32 e = mpp->var->hw_info->reg_end;
277 	u32 i;
278 
279 	mpp_err("--- dump hardware register ---\n");
280 
281 	for (i = s; i <= e; i++) {
282 		u32 reg = i * sizeof(u32);
283 
284 		mpp_err("reg[%03d]: %04x: 0x%08x\n",
285 			i, reg, readl_relaxed(mpp->reg_base + reg));
286 		udelay(100);
287 	}
288 }
289 
rkvdec_link_reg_dump(const char * func,struct rkvdec_link_dev * dev)290 static void rkvdec_link_reg_dump(const char *func, struct rkvdec_link_dev *dev)
291 {
292 	mpp_err("dump link config status from %s\n", func);
293 	mpp_err("reg 0 %08x - irq status\n", dev->irq_status);
294 	mpp_err("reg 1 %08x - cfg addr\n", dev->iova_curr);
295 	mpp_err("reg 2 %08x - link mode\n", dev->link_mode);
296 	mpp_err("reg 4 %08x - decoded num\n", dev->decoded_status);
297 	mpp_err("reg 5 %08x - total num\n", dev->total);
298 	mpp_err("reg 6 %08x - link mode en\n", dev->enabled);
299 	mpp_err("reg 6 %08x - next ltb addr\n", dev->iova_next);
300 }
301 
rkvdec_link_counter(const char * func,struct rkvdec_link_dev * dev)302 static void rkvdec_link_counter(const char *func, struct rkvdec_link_dev *dev)
303 {
304 	mpp_err("dump link counter from %s\n", func);
305 
306 	mpp_err("task pending %d running %d\n",
307 		atomic_read(&dev->task_pending), dev->task_running);
308 }
309 
rkvdec_link_dump(struct mpp_dev * mpp)310 int rkvdec_link_dump(struct mpp_dev *mpp)
311 {
312 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
313 	struct rkvdec_link_dev *dev = dec->link_dec;
314 
315 	rkvdec_link_status_update(dev);
316 	rkvdec_link_reg_dump(__func__, dev);
317 	rkvdec_link_counter(__func__, dev);
318 	rkvdec_core_reg_dump(__func__, dev);
319 	rkvdec_link_node_dump(__func__, dev);
320 
321 	return 0;
322 }
323 
rkvdec2_clear_cache(struct mpp_dev * mpp)324 static void rkvdec2_clear_cache(struct mpp_dev *mpp)
325 {
326 	/* set cache size */
327 	u32 reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS |
328 		  RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
329 
330 	if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
331 		reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
332 
333 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
334 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
335 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE2_SIZE_BASE, reg);
336 
337 	/* clear cache */
338 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
339 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
340 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
341 }
342 
rkvdec2_link_enqueue(struct rkvdec_link_dev * link_dec,struct mpp_task * mpp_task)343 static int rkvdec2_link_enqueue(struct rkvdec_link_dev *link_dec,
344 				struct mpp_task *mpp_task)
345 {
346 	void __iomem *reg_base = link_dec->reg_base;
347 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
348 	struct mpp_dma_buffer *table = task->table;
349 	u32 link_en = 0;
350 	u32 frame_num = 1;
351 	u32 link_mode;
352 	u32 timing_en = link_dec->mpp->srv->timing_en;
353 
354 	link_en = readl(reg_base + RKVDEC_LINK_EN_BASE);
355 	if (!link_en) {
356 		rkvdec2_clear_cache(link_dec->mpp);
357 		/* cleanup counter in hardware */
358 		writel(0, reg_base + RKVDEC_LINK_MODE_BASE);
359 		/* start config before all registers are set */
360 		wmb();
361 		writel(RKVDEC_LINK_BIT_CFG_DONE, reg_base + RKVDEC_LINK_CFG_CTRL_BASE);
362 		/* write zero count config */
363 		wmb();
364 		/* clear counter and enable link mode hardware */
365 		writel(RKVDEC_LINK_BIT_EN, reg_base + RKVDEC_LINK_EN_BASE);
366 		writel_relaxed(table->iova, reg_base + RKVDEC_LINK_CFG_ADDR_BASE);
367 		link_mode = frame_num;
368 	} else
369 		link_mode = (frame_num | RKVDEC_LINK_BIT_ADD_MODE);
370 
371 	/* set link mode */
372 	writel_relaxed(link_mode, reg_base + RKVDEC_LINK_MODE_BASE);
373 
374 	/* start config before all registers are set */
375 	wmb();
376 
377 	mpp_iommu_flush_tlb(link_dec->mpp->iommu_info);
378 	mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
379 
380 	link_dec->task_running++;
381 	/* configure done */
382 	writel(RKVDEC_LINK_BIT_CFG_DONE, reg_base + RKVDEC_LINK_CFG_CTRL_BASE);
383 	if (!link_en) {
384 		/* start hardware before all registers are set */
385 		wmb();
386 		/* clear counter and enable link mode hardware */
387 		writel(RKVDEC_LINK_BIT_EN, reg_base + RKVDEC_LINK_EN_BASE);
388 	}
389 	mpp_task_run_end(mpp_task, timing_en);
390 
391 	return 0;
392 }
393 
rkvdec2_link_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)394 static int rkvdec2_link_finish(struct mpp_dev *mpp, struct mpp_task *mpp_task)
395 {
396 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
397 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
398 	struct rkvdec_link_dev *link_dec = dec->link_dec;
399 	struct mpp_dma_buffer *table = link_dec->table;
400 	struct rkvdec_link_info *info = link_dec->info;
401 	struct rkvdec_link_part *part = info->part_r;
402 	u32 *tb_reg = (u32 *)table->vaddr;
403 	u32 off, s, n;
404 	u32 i;
405 
406 	mpp_debug_enter();
407 
408 	for (i = 0; i < info->part_r_num; i++) {
409 		off = part[i].tb_reg_off;
410 		s = part[i].reg_start;
411 		n = part[i].reg_num;
412 		memcpy(&task->reg[s], &tb_reg[off], n * sizeof(u32));
413 	}
414 	/* revert hack for irq status */
415 	task->reg[RKVDEC_REG_INT_EN_INDEX] = task->irq_status;
416 
417 	mpp_debug_leave();
418 
419 	return 0;
420 }
421 
rkvdec2_link_prepare(struct mpp_dev * mpp,struct mpp_task * mpp_task)422 static void *rkvdec2_link_prepare(struct mpp_dev *mpp,
423 				  struct mpp_task *mpp_task)
424 {
425 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
426 	struct rkvdec_link_dev *link_dec = dec->link_dec;
427 	struct mpp_dma_buffer *table = NULL;
428 	struct rkvdec_link_part *part;
429 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
430 	struct rkvdec_link_info *info = link_dec->info;
431 	u32 i, off, s, n;
432 	u32 *tb_reg;
433 
434 	mpp_debug_enter();
435 
436 	if (test_bit(TASK_STATE_PREPARE, &mpp_task->state)) {
437 		dev_err(mpp->dev, "task %d has prepared\n", mpp_task->task_index);
438 		return mpp_task;
439 	}
440 
441 	table = list_first_entry_or_null(&link_dec->unused_list, struct mpp_dma_buffer, link);
442 
443 	if (!table)
444 		return NULL;
445 
446 	/* fill regs value */
447 	tb_reg = (u32 *)table->vaddr;
448 	part = info->part_w;
449 	for (i = 0; i < info->part_w_num; i++) {
450 		off = part[i].tb_reg_off;
451 		s = part[i].reg_start;
452 		n = part[i].reg_num;
453 		memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
454 	}
455 
456 	/* setup error mode flag */
457 	tb_reg[9] |= BIT(18) | BIT(9);
458 	tb_reg[info->tb_reg_second_en] |= RKVDEC_WAIT_RESET_EN;
459 
460 	/* memset read registers */
461 	part = info->part_r;
462 	for (i = 0; i < info->part_r_num; i++) {
463 		off = part[i].tb_reg_off;
464 		n = part[i].reg_num;
465 		memset(&tb_reg[off], 0, n * sizeof(u32));
466 	}
467 
468 	list_move_tail(&table->link, &link_dec->used_list);
469 	task->table = table;
470 	set_bit(TASK_STATE_PREPARE, &mpp_task->state);
471 
472 	mpp_dbg_link("session %d task %d prepare pending %d running %d\n",
473 		     mpp_task->session->index, mpp_task->task_index,
474 		     atomic_read(&link_dec->task_pending), link_dec->task_running);
475 	mpp_debug_leave();
476 
477 	return mpp_task;
478 }
479 
rkvdec2_link_reset(struct mpp_dev * mpp)480 static int rkvdec2_link_reset(struct mpp_dev *mpp)
481 {
482 
483 	dev_info(mpp->dev, "resetting...\n");
484 
485 	disable_irq(mpp->irq);
486 	mpp_iommu_disable_irq(mpp->iommu_info);
487 
488 	/* FIXME lock resource lock of the other devices in combo */
489 	mpp_iommu_down_write(mpp->iommu_info);
490 	mpp_reset_down_write(mpp->reset_group);
491 	atomic_set(&mpp->reset_request, 0);
492 
493 	rockchip_save_qos(mpp->dev);
494 
495 	if (mpp->hw_ops->reset)
496 		mpp->hw_ops->reset(mpp);
497 
498 	rockchip_restore_qos(mpp->dev);
499 
500 	/* Note: if the domain does not change, iommu attach will be return
501 	 * as an empty operation. Therefore, force to close and then open,
502 	 * will be update the domain. In this way, domain can really attach.
503 	 */
504 	mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
505 
506 	mpp_reset_up_write(mpp->reset_group);
507 	mpp_iommu_up_write(mpp->iommu_info);
508 
509 	enable_irq(mpp->irq);
510 	mpp_iommu_enable_irq(mpp->iommu_info);
511 	dev_info(mpp->dev, "reset done\n");
512 
513 	return 0;
514 }
515 
rkvdec2_link_irq(struct mpp_dev * mpp)516 static int rkvdec2_link_irq(struct mpp_dev *mpp)
517 {
518 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
519 	struct rkvdec_link_dev *link_dec = dec->link_dec;
520 	u32 irq_status = 0;
521 
522 	if (!atomic_read(&link_dec->power_enabled)) {
523 		dev_info(link_dec->dev, "irq on power off\n");
524 		return -1;
525 	}
526 
527 	irq_status = readl(link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
528 
529 	if (irq_status & RKVDEC_LINK_BIT_IRQ_RAW) {
530 		u32 enabled = readl(link_dec->reg_base + RKVDEC_LINK_EN_BASE);
531 
532 		if (!enabled) {
533 			u32 bus = mpp_read_relaxed(mpp, 273 * 4);
534 
535 			if (bus & 0x7ffff)
536 				dev_info(link_dec->dev,
537 					 "invalid bus status %08x\n", bus);
538 		}
539 
540 		link_dec->irq_status = irq_status;
541 		mpp->irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
542 
543 		writel_relaxed(0, link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
544 	}
545 
546 	mpp_debug(DEBUG_IRQ_STATUS | DEBUG_LINK_TABLE, "irq_status: %08x : %08x\n",
547 		  irq_status, mpp->irq_status);
548 
549 	return 0;
550 }
551 
rkvdec2_link_remove(struct mpp_dev * mpp,struct rkvdec_link_dev * link_dec)552 int rkvdec2_link_remove(struct mpp_dev *mpp, struct rkvdec_link_dev *link_dec)
553 {
554 	mpp_debug_enter();
555 
556 	if (link_dec && link_dec->table) {
557 		mpp_dma_free(link_dec->table);
558 		link_dec->table = NULL;
559 	}
560 
561 	mpp_debug_leave();
562 
563 	return 0;
564 }
565 
rkvdec2_link_alloc_table(struct mpp_dev * mpp,struct rkvdec_link_dev * link_dec)566 static int rkvdec2_link_alloc_table(struct mpp_dev *mpp,
567 				    struct rkvdec_link_dev *link_dec)
568 {
569 	int ret;
570 	struct mpp_dma_buffer *table;
571 	struct rkvdec_link_info *info = link_dec->info;
572 	/* NOTE: link table address requires 64 align */
573 	u32 task_capacity = link_dec->task_capacity;
574 	u32 link_node_size = ALIGN(info->tb_reg_num * sizeof(u32), 256);
575 	u32 link_info_size = task_capacity * link_node_size;
576 	u32 *v_curr;
577 	u32 io_curr, io_next, io_start;
578 	u32 offset_r = info->part_r[0].tb_reg_off * sizeof(u32);
579 	u32 i;
580 
581 	table = mpp_dma_alloc(mpp->dev, link_info_size);
582 	if (!table) {
583 		ret = -ENOMEM;
584 		goto err_free_node;
585 	}
586 
587 	link_dec->link_node_size = link_node_size;
588 	link_dec->link_reg_count = link_node_size >> 2;
589 	io_start = table->iova;
590 
591 	for (i = 0; i < task_capacity; i++) {
592 		v_curr  = (u32 *)(table->vaddr + i * link_node_size);
593 		io_curr = io_start + i * link_node_size;
594 		io_next = (i == task_capacity - 1) ?
595 			  io_start : io_start + (i + 1) * link_node_size;
596 
597 		v_curr[info->tb_reg_next] = io_next;
598 		v_curr[info->tb_reg_r] = io_curr + offset_r;
599 	}
600 
601 	link_dec->table	     = table;
602 
603 	return 0;
604 err_free_node:
605 	rkvdec2_link_remove(mpp, link_dec);
606 	return ret;
607 }
608 
609 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
rkvdec2_link_procfs_init(struct mpp_dev * mpp)610 int rkvdec2_link_procfs_init(struct mpp_dev *mpp)
611 {
612 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
613 	struct rkvdec_link_dev *link_dec = dec->link_dec;
614 
615 	if (!link_dec)
616 		return 0;
617 
618 	link_dec->statistic_count = 0;
619 
620 	if (dec->procfs)
621 		mpp_procfs_create_u32("statistic_count", 0644,
622 				      dec->procfs, &link_dec->statistic_count);
623 
624 	return 0;
625 }
626 #else
rkvdec2_link_procfs_init(struct mpp_dev * mpp)627 int rkvdec2_link_procfs_init(struct mpp_dev *mpp)
628 {
629 	return 0;
630 }
631 #endif
632 
rkvdec2_link_init(struct platform_device * pdev,struct rkvdec2_dev * dec)633 int rkvdec2_link_init(struct platform_device *pdev, struct rkvdec2_dev *dec)
634 {
635 	int ret;
636 	struct resource *res = NULL;
637 	struct rkvdec_link_dev *link_dec = NULL;
638 	struct device *dev = &pdev->dev;
639 	struct mpp_dev *mpp = &dec->mpp;
640 	struct mpp_dma_buffer *table;
641 	int i;
642 
643 	mpp_debug_enter();
644 
645 	link_dec = devm_kzalloc(dev, sizeof(*link_dec), GFP_KERNEL);
646 	if (!link_dec) {
647 		ret = -ENOMEM;
648 		goto done;
649 	}
650 
651 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
652 	if (res)
653 		link_dec->info = mpp->var->hw_info->link_info;
654 	else {
655 		dev_err(dev, "link mode resource not found\n");
656 		ret = -ENOMEM;
657 		goto done;
658 	}
659 
660 	link_dec->reg_base = devm_ioremap(dev, res->start, resource_size(res));
661 	if (!link_dec->reg_base) {
662 		dev_err(dev, "ioremap failed for resource %pR\n", res);
663 		ret = -ENOMEM;
664 		goto done;
665 	}
666 
667 	link_dec->task_capacity = mpp->task_capacity;
668 	ret = rkvdec2_link_alloc_table(&dec->mpp, link_dec);
669 	if (ret)
670 		goto done;
671 
672 	/* alloc table pointer array */
673 	table = devm_kmalloc_array(mpp->dev, mpp->task_capacity,
674 				   sizeof(*table), GFP_KERNEL | __GFP_ZERO);
675 	if (!table)
676 		return -ENOMEM;
677 
678 	/* init table array */
679 	link_dec->table_array = table;
680 	INIT_LIST_HEAD(&link_dec->used_list);
681 	INIT_LIST_HEAD(&link_dec->unused_list);
682 	for (i = 0; i < mpp->task_capacity; i++) {
683 		table[i].iova = link_dec->table->iova + i * link_dec->link_node_size;
684 		table[i].vaddr = link_dec->table->vaddr + i * link_dec->link_node_size;
685 		table[i].size = link_dec->link_node_size;
686 		INIT_LIST_HEAD(&table[i].link);
687 		list_add_tail(&table[i].link, &link_dec->unused_list);
688 	}
689 
690 	if (dec->fix)
691 		rkvdec2_link_hack_data_setup(dec->fix);
692 
693 	mpp->fault_handler = rkvdec2_link_iommu_fault_handle;
694 
695 	link_dec->mpp = mpp;
696 	link_dec->dev = dev;
697 	atomic_set(&link_dec->task_timeout, 0);
698 	atomic_set(&link_dec->task_pending, 0);
699 	atomic_set(&link_dec->power_enabled, 0);
700 	link_dec->irq_enabled = 1;
701 
702 	dec->link_dec = link_dec;
703 	dev_info(dev, "link mode probe finish\n");
704 
705 done:
706 	if (ret) {
707 		if (link_dec) {
708 			if (link_dec->reg_base) {
709 				devm_iounmap(dev, link_dec->reg_base);
710 				link_dec->reg_base = NULL;
711 			}
712 			devm_kfree(dev, link_dec);
713 			link_dec = NULL;
714 		}
715 		dec->link_dec = NULL;
716 	}
717 	mpp_debug_leave();
718 
719 	return ret;
720 }
721 
rkvdec2_link_free_task(struct kref * ref)722 static void rkvdec2_link_free_task(struct kref *ref)
723 {
724 	struct mpp_dev *mpp;
725 	struct mpp_session *session;
726 	struct mpp_task *task = container_of(ref, struct mpp_task, ref);
727 
728 	if (!task->session) {
729 		mpp_err("task %d task->session is null.\n", task->task_id);
730 		return;
731 	}
732 	session = task->session;
733 
734 	mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d state 0x%lx\n",
735 		       session->index, task->task_id, task->state);
736 	if (!session->mpp) {
737 		mpp_err("session %d session->mpp is null.\n", session->index);
738 		return;
739 	}
740 	mpp = session->mpp;
741 	list_del_init(&task->queue_link);
742 
743 	rkvdec2_free_task(session, task);
744 	/* Decrease reference count */
745 	atomic_dec(&session->task_count);
746 	atomic_dec(&mpp->task_count);
747 }
748 
rkvdec2_link_trigger_work(struct mpp_dev * mpp)749 static void rkvdec2_link_trigger_work(struct mpp_dev *mpp)
750 {
751 	kthread_queue_work(&mpp->queue->worker, &mpp->work);
752 }
753 
rkvdec2_link_power_on(struct mpp_dev * mpp)754 static int rkvdec2_link_power_on(struct mpp_dev *mpp)
755 {
756 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
757 	struct rkvdec_link_dev *link_dec = dec->link_dec;
758 
759 	if (!atomic_xchg(&link_dec->power_enabled, 1)) {
760 		if (mpp_iommu_attach(mpp->iommu_info)) {
761 			dev_err(mpp->dev, "mpp_iommu_attach failed\n");
762 			return -ENODATA;
763 		}
764 		pm_runtime_get_sync(mpp->dev);
765 		pm_stay_awake(mpp->dev);
766 
767 		if (mpp->hw_ops->clk_on)
768 			mpp->hw_ops->clk_on(mpp);
769 
770 		if (!link_dec->irq_enabled) {
771 			enable_irq(mpp->irq);
772 			mpp_iommu_enable_irq(mpp->iommu_info);
773 			link_dec->irq_enabled = 1;
774 		}
775 
776 		mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_ADVANCED);
777 		mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_ADVANCED);
778 		mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_ADVANCED);
779 		mpp_devfreq_set_core_rate(mpp, CLK_MODE_ADVANCED);
780 		mpp_iommu_dev_activate(mpp->iommu_info, mpp);
781 	}
782 	return 0;
783 }
784 
rkvdec2_link_power_off(struct mpp_dev * mpp)785 static void rkvdec2_link_power_off(struct mpp_dev *mpp)
786 {
787 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
788 	struct rkvdec_link_dev *link_dec = dec->link_dec;
789 
790 	if (atomic_xchg(&link_dec->power_enabled, 0)) {
791 		disable_irq(mpp->irq);
792 		mpp_iommu_disable_irq(mpp->iommu_info);
793 		link_dec->irq_enabled = 0;
794 
795 		if (mpp->hw_ops->clk_off)
796 			mpp->hw_ops->clk_off(mpp);
797 
798 		pm_relax(mpp->dev);
799 		pm_runtime_put_sync_suspend(mpp->dev);
800 
801 		mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_NORMAL);
802 		mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_NORMAL);
803 		mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_NORMAL);
804 		mpp_devfreq_set_core_rate(mpp, CLK_MODE_NORMAL);
805 		mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
806 	}
807 }
808 
rkvdec2_link_timeout_proc(struct work_struct * work_s)809 static void rkvdec2_link_timeout_proc(struct work_struct *work_s)
810 {
811 	struct mpp_dev *mpp;
812 	struct rkvdec2_dev *dec;
813 	struct mpp_session *session;
814 	struct mpp_task *task = container_of(to_delayed_work(work_s),
815 					     struct mpp_task, timeout_work);
816 
817 	if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
818 		mpp_err("task %d state %lx has been handled\n",
819 			task->task_id, task->state);
820 		return;
821 	}
822 
823 	if (!task->session) {
824 		mpp_err("task %d session is null.\n", task->task_id);
825 		return;
826 	}
827 	session = task->session;
828 
829 	if (!session->mpp) {
830 		mpp_err("task %d:%d mpp is null.\n", session->index,
831 			task->task_id);
832 		return;
833 	}
834 	mpp = session->mpp;
835 	set_bit(TASK_STATE_TIMEOUT, &task->state);
836 
837 	dec = to_rkvdec2_dev(mpp);
838 	atomic_inc(&dec->link_dec->task_timeout);
839 
840 	dev_err(mpp->dev, "session %d task %d state %#lx timeout, cnt %d\n",
841 		session->index, task->task_index, task->state,
842 		atomic_read(&dec->link_dec->task_timeout));
843 
844 	rkvdec2_link_trigger_work(mpp);
845 }
846 
rkvdec2_link_iommu_fault_handle(struct iommu_domain * iommu,struct device * iommu_dev,unsigned long iova,int status,void * arg)847 static int rkvdec2_link_iommu_fault_handle(struct iommu_domain *iommu,
848 					    struct device *iommu_dev,
849 					    unsigned long iova,
850 					    int status, void *arg)
851 {
852 	struct mpp_dev *mpp = (struct mpp_dev *)arg;
853 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
854 	struct mpp_task *mpp_task = NULL, *n;
855 	struct mpp_taskqueue *queue;
856 
857 	dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n",
858 		iova, status, arg);
859 
860 	if (!mpp) {
861 		dev_err(iommu_dev, "pagefault without device to handle\n");
862 		return 0;
863 	}
864 	queue = mpp->queue;
865 	list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
866 		struct rkvdec_link_info *info = dec->link_dec->info;
867 		struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
868 		u32 *tb_reg = (u32 *)task->table->vaddr;
869 		u32 irq_status = tb_reg[info->tb_reg_int];
870 
871 		if (!irq_status) {
872 			mpp_task_dump_mem_region(mpp, mpp_task);
873 			break;
874 		}
875 	}
876 
877 	mpp_task_dump_hw_reg(mpp);
878 	/*
879 	 * Mask iommu irq, in order for iommu not repeatedly trigger pagefault.
880 	 * Until the pagefault task finish by hw timeout.
881 	 */
882 	rockchip_iommu_mask_irq(mpp->dev);
883 	dec->mmu_fault = 1;
884 
885 	return 0;
886 }
887 
rkvdec2_link_resend(struct mpp_dev * mpp)888 static void rkvdec2_link_resend(struct mpp_dev *mpp)
889 {
890 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
891 	struct rkvdec_link_dev *link_dec = dec->link_dec;
892 	struct mpp_taskqueue *queue = mpp->queue;
893 	struct mpp_task *mpp_task, *n;
894 
895 	link_dec->task_running = 0;
896 	list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
897 		dev_err(mpp->dev, "resend task %d\n", mpp_task->task_index);
898 		cancel_delayed_work_sync(&mpp_task->timeout_work);
899 		clear_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
900 		clear_bit(TASK_STATE_HANDLE, &mpp_task->state);
901 		rkvdec2_link_enqueue(link_dec, mpp_task);
902 	}
903 }
904 
rkvdec2_link_try_dequeue(struct mpp_dev * mpp)905 static void rkvdec2_link_try_dequeue(struct mpp_dev *mpp)
906 {
907 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
908 	struct rkvdec_link_dev *link_dec = dec->link_dec;
909 	struct mpp_taskqueue *queue = mpp->queue;
910 	struct mpp_task *mpp_task = NULL, *n;
911 	struct rkvdec_link_info *info = link_dec->info;
912 	u32 reset_flag = 0;
913 	u32 iommu_fault = dec->mmu_fault && (mpp->irq_status & RKVDEC_TIMEOUT_STA);
914 	u32 link_en = atomic_read(&link_dec->power_enabled) ?
915 		      readl(link_dec->reg_base + RKVDEC_LINK_EN_BASE) : 0;
916 	u32 force_dequeue = iommu_fault || !link_en;
917 	u32 dequeue_cnt = 0;
918 
919 	list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
920 		/*
921 		 * Because there are multiple tasks enqueue at the same time,
922 		 * soft timeout may be triggered at the same time, but in reality only
923 		 * first task is being timeout because of the hardware stuck,
924 		 * so only process the first task.
925 		 */
926 		u32 timeout_flag = dequeue_cnt ? 0 : test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
927 		struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
928 		u32 *tb_reg = (u32 *)task->table->vaddr;
929 		u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
930 		u32 irq_status = tb_reg[info->tb_reg_int];
931 		u32 task_done = irq_status || timeout_flag || abort_flag;
932 
933 		/*
934 		 * there are some cases will cause hw cannot write reg to ddr:
935 		 * 1. iommu pagefault
936 		 * 2. link stop(link_en == 0) because of err task, it is a rk356x issue.
937 		 * so need force dequeue one task.
938 		 */
939 		if (force_dequeue)
940 			task_done = 1;
941 
942 		if (!task_done)
943 			break;
944 
945 		dequeue_cnt++;
946 		/* check hack task only for rk356x*/
947 		if (task->need_hack == RKVDEC2_LINK_HACK_TASK_FLAG) {
948 			cancel_delayed_work_sync(&mpp_task->timeout_work);
949 			list_move_tail(&task->table->link, &link_dec->unused_list);
950 			list_del_init(&mpp_task->queue_link);
951 			link_dec->task_running--;
952 			link_dec->hack_task_running--;
953 			kfree(task);
954 			mpp_dbg_link("hack running %d irq_status %#08x timeout %d abort %d\n",
955 				     link_dec->hack_task_running, irq_status,
956 				     timeout_flag, abort_flag);
957 			continue;
958 		}
959 
960 		/*
961 		 * if timeout/abort/force dequeue found, reset and stop hw first.
962 		 */
963 		if ((timeout_flag || abort_flag || force_dequeue) && !reset_flag) {
964 			dev_err(mpp->dev, "session %d task %d timeout %d abort %d force_dequeue %d\n",
965 				mpp_task->session->index, mpp_task->task_index,
966 				timeout_flag, abort_flag, force_dequeue);
967 			rkvdec2_link_reset(mpp);
968 			reset_flag = 1;
969 			dec->mmu_fault = 0;
970 			mpp->irq_status = 0;
971 			force_dequeue = 0;
972 		}
973 
974 		cancel_delayed_work_sync(&mpp_task->timeout_work);
975 
976 		task->irq_status = irq_status;
977 		mpp_task->hw_cycles = tb_reg[info->tb_reg_cycle];
978 		mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
979 		rkvdec2_link_finish(mpp, mpp_task);
980 
981 		list_move_tail(&task->table->link, &link_dec->unused_list);
982 		list_del_init(&mpp_task->queue_link);
983 
984 		set_bit(TASK_STATE_HANDLE, &mpp_task->state);
985 		set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
986 		set_bit(TASK_STATE_FINISH, &mpp_task->state);
987 		set_bit(TASK_STATE_DONE, &mpp_task->state);
988 		if (test_bit(TASK_STATE_ABORT, &mpp_task->state))
989 			set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
990 
991 		wake_up(&mpp_task->wait);
992 		kref_put(&mpp_task->ref, rkvdec2_link_free_task);
993 		link_dec->task_running--;
994 
995 		mpp_dbg_link("session %d task %d irq_status %#08x timeout %d abort %d\n",
996 			     mpp_task->session->index, mpp_task->task_index,
997 			     irq_status, timeout_flag, abort_flag);
998 		if (irq_status & RKVDEC_INT_ERROR_MASK) {
999 			dev_err(mpp->dev,
1000 				"session %d task %d irq_status %#08x timeout %u abort %u\n",
1001 				mpp_task->session->index, mpp_task->task_index,
1002 				irq_status, timeout_flag, abort_flag);
1003 			if (!reset_flag)
1004 				atomic_inc(&mpp->reset_request);
1005 		}
1006 	}
1007 
1008 	/* resend running task after reset */
1009 	if (reset_flag && !list_empty(&queue->running_list))
1010 		rkvdec2_link_resend(mpp);
1011 }
1012 
mpp_task_queue(struct mpp_dev * mpp,struct mpp_task * mpp_task)1013 static int mpp_task_queue(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1014 {
1015 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1016 	struct rkvdec_link_dev *link_dec = dec->link_dec;
1017 	struct mpp_taskqueue *queue = mpp->queue;
1018 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1019 
1020 	mpp_debug_enter();
1021 
1022 	rkvdec2_link_power_on(mpp);
1023 
1024 	/* hack for rk356x */
1025 	if (task->need_hack) {
1026 		u32 *tb_reg;
1027 		struct mpp_dma_buffer *table;
1028 		struct rkvdec2_task *hack_task;
1029 		struct rkvdec_link_info *info = link_dec->info;
1030 
1031 		/* need reserved 2 unused task for need hack task */
1032 		if (link_dec->task_running > (link_dec->task_capacity - 2))
1033 			return -EBUSY;
1034 
1035 		table = list_first_entry_or_null(&link_dec->unused_list,
1036 						 struct mpp_dma_buffer,
1037 						 link);
1038 		if (!table)
1039 			return -EBUSY;
1040 
1041 		hack_task = kzalloc(sizeof(*hack_task), GFP_KERNEL);
1042 
1043 		if (!hack_task)
1044 			return -ENOMEM;
1045 
1046 		mpp_task_init(mpp_task->session, &hack_task->mpp_task);
1047 		INIT_DELAYED_WORK(&hack_task->mpp_task.timeout_work,
1048 					rkvdec2_link_timeout_proc);
1049 
1050 		tb_reg = (u32 *)table->vaddr;
1051 		memset(tb_reg + info->part_r[0].tb_reg_off, 0, info->part_r[0].reg_num);
1052 		rkvdec2_3568_hack_fix_link(tb_reg + 4);
1053 		list_move_tail(&table->link, &link_dec->used_list);
1054 		hack_task->table = table;
1055 		hack_task->need_hack = RKVDEC2_LINK_HACK_TASK_FLAG;
1056 		rkvdec2_link_enqueue(link_dec, &hack_task->mpp_task);
1057 		mpp_taskqueue_pending_to_run(queue, &hack_task->mpp_task);
1058 		link_dec->hack_task_running++;
1059 		mpp_dbg_link("hack task send to hw, hack running %d\n",
1060 			     link_dec->hack_task_running);
1061 	}
1062 
1063 	/* process normal */
1064 	if (!rkvdec2_link_prepare(mpp, mpp_task))
1065 		return -EBUSY;
1066 
1067 	rkvdec2_link_enqueue(link_dec, mpp_task);
1068 
1069 	set_bit(TASK_STATE_RUNNING, &mpp_task->state);
1070 	atomic_dec(&link_dec->task_pending);
1071 	mpp_taskqueue_pending_to_run(queue, mpp_task);
1072 
1073 	mpp_dbg_link("session %d task %d send to hw pending %d running %d\n",
1074 		     mpp_task->session->index, mpp_task->task_index,
1075 		     atomic_read(&link_dec->task_pending), link_dec->task_running);
1076 	mpp_debug_leave();
1077 
1078 	return 0;
1079 }
1080 
rkvdec2_link_irq_proc(int irq,void * param)1081 irqreturn_t rkvdec2_link_irq_proc(int irq, void *param)
1082 {
1083 	struct mpp_dev *mpp = param;
1084 	int ret = rkvdec2_link_irq(mpp);
1085 
1086 	if (!ret)
1087 		rkvdec2_link_trigger_work(mpp);
1088 
1089 	return IRQ_HANDLED;
1090 }
1091 
1092 static struct mpp_task *
mpp_session_get_pending_task(struct mpp_session * session)1093 mpp_session_get_pending_task(struct mpp_session *session)
1094 {
1095 	struct mpp_task *task = NULL;
1096 
1097 	mutex_lock(&session->pending_lock);
1098 	task = list_first_entry_or_null(&session->pending_list, struct mpp_task,
1099 					pending_link);
1100 	mutex_unlock(&session->pending_lock);
1101 
1102 	return task;
1103 }
1104 
task_is_done(struct mpp_task * task)1105 static int task_is_done(struct mpp_task *task)
1106 {
1107 	return test_bit(TASK_STATE_PROC_DONE, &task->state);
1108 }
1109 
mpp_session_pop_pending(struct mpp_session * session,struct mpp_task * task)1110 static int mpp_session_pop_pending(struct mpp_session *session,
1111 				   struct mpp_task *task)
1112 {
1113 	mutex_lock(&session->pending_lock);
1114 	list_del_init(&task->pending_link);
1115 	mutex_unlock(&session->pending_lock);
1116 	kref_put(&task->ref, rkvdec2_link_free_task);
1117 
1118 	return 0;
1119 }
1120 
mpp_session_pop_done(struct mpp_session * session,struct mpp_task * task)1121 static int mpp_session_pop_done(struct mpp_session *session,
1122 				struct mpp_task *task)
1123 {
1124 	set_bit(TASK_STATE_DONE, &task->state);
1125 
1126 	return 0;
1127 }
1128 
rkvdec2_link_process_task(struct mpp_session * session,struct mpp_task_msgs * msgs)1129 int rkvdec2_link_process_task(struct mpp_session *session,
1130 			      struct mpp_task_msgs *msgs)
1131 {
1132 	struct mpp_task *task = NULL;
1133 	struct mpp_dev *mpp = session->mpp;
1134 	struct rkvdec_link_info *link_info = mpp->var->hw_info->link_info;
1135 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1136 	struct rkvdec_link_dev *link_dec = dec->link_dec;
1137 
1138 	task = rkvdec2_alloc_task(session, msgs);
1139 	if (!task) {
1140 		mpp_err("alloc_task failed.\n");
1141 		return -ENOMEM;
1142 	}
1143 
1144 	if (link_info->hack_setup) {
1145 		u32 fmt;
1146 		struct rkvdec2_task *dec_task = NULL;
1147 
1148 		dec_task = to_rkvdec2_task(task);
1149 		fmt = RKVDEC_GET_FORMAT(dec_task->reg[RKVDEC_REG_FORMAT_INDEX]);
1150 		dec_task->need_hack = (fmt == RKVDEC_FMT_H264D);
1151 	}
1152 
1153 	kref_init(&task->ref);
1154 	atomic_set(&task->abort_request, 0);
1155 	task->task_index = atomic_fetch_inc(&mpp->task_index);
1156 	task->task_id = atomic_fetch_inc(&mpp->queue->task_id);
1157 	INIT_DELAYED_WORK(&task->timeout_work, rkvdec2_link_timeout_proc);
1158 
1159 	atomic_inc(&session->task_count);
1160 
1161 	kref_get(&task->ref);
1162 	mutex_lock(&session->pending_lock);
1163 	list_add_tail(&task->pending_link, &session->pending_list);
1164 	mutex_unlock(&session->pending_lock);
1165 
1166 	kref_get(&task->ref);
1167 	mutex_lock(&mpp->queue->pending_lock);
1168 	list_add_tail(&task->queue_link, &mpp->queue->pending_list);
1169 	mutex_unlock(&mpp->queue->pending_lock);
1170 	atomic_inc(&link_dec->task_pending);
1171 
1172 	/* push current task to queue */
1173 	atomic_inc(&mpp->task_count);
1174 	set_bit(TASK_STATE_PENDING, &task->state);
1175 	/* trigger current queue to run task */
1176 	rkvdec2_link_trigger_work(mpp);
1177 	kref_put(&task->ref, rkvdec2_link_free_task);
1178 
1179 	return 0;
1180 }
1181 
rkvdec2_link_wait_result(struct mpp_session * session,struct mpp_task_msgs * msgs)1182 int rkvdec2_link_wait_result(struct mpp_session *session,
1183 			     struct mpp_task_msgs *msgs)
1184 {
1185 	struct mpp_dev *mpp = session->mpp;
1186 	struct mpp_task *mpp_task;
1187 	int ret;
1188 
1189 	mpp_task = mpp_session_get_pending_task(session);
1190 	if (!mpp_task) {
1191 		mpp_err("session %p pending list is empty!\n", session);
1192 		return -EIO;
1193 	}
1194 
1195 	ret = wait_event_timeout(mpp_task->wait, task_is_done(mpp_task),
1196 				 msecs_to_jiffies(WAIT_TIMEOUT_MS));
1197 	if (ret) {
1198 		ret = rkvdec2_result(mpp, mpp_task, msgs);
1199 
1200 		mpp_session_pop_done(session, mpp_task);
1201 	} else {
1202 		mpp_err("task %d:%d state %lx timeout -> abort\n",
1203 			session->index, mpp_task->task_id, mpp_task->state);
1204 
1205 		atomic_inc(&mpp_task->abort_request);
1206 		set_bit(TASK_STATE_ABORT, &mpp_task->state);
1207 	}
1208 
1209 	mpp_session_pop_pending(session, mpp_task);
1210 	return ret;
1211 }
1212 
rkvdec2_link_worker(struct kthread_work * work_s)1213 void rkvdec2_link_worker(struct kthread_work *work_s)
1214 {
1215 	struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
1216 	struct mpp_task *task;
1217 	struct mpp_taskqueue *queue = mpp->queue;
1218 	u32 all_done;
1219 
1220 	mpp_debug_enter();
1221 
1222 	/* dequeue running task */
1223 	rkvdec2_link_try_dequeue(mpp);
1224 
1225 	/* process reset */
1226 	if (atomic_read(&mpp->reset_request)) {
1227 		rkvdec2_link_reset(mpp);
1228 		/* resend running task after reset */
1229 		if (!list_empty(&queue->running_list))
1230 			rkvdec2_link_resend(mpp);
1231 	}
1232 
1233 again:
1234 	/* get pending task to process */
1235 	mutex_lock(&queue->pending_lock);
1236 	task = list_first_entry_or_null(&queue->pending_list, struct mpp_task,
1237 					queue_link);
1238 	mutex_unlock(&queue->pending_lock);
1239 	if (!task)
1240 		goto done;
1241 
1242 	/* check abort task */
1243 	if (atomic_read(&task->abort_request)) {
1244 		mutex_lock(&queue->pending_lock);
1245 		list_del_init(&task->queue_link);
1246 
1247 		set_bit(TASK_STATE_ABORT_READY, &task->state);
1248 		set_bit(TASK_STATE_PROC_DONE, &task->state);
1249 
1250 		mutex_unlock(&queue->pending_lock);
1251 		wake_up(&task->wait);
1252 		kref_put(&task->ref, rkvdec2_link_free_task);
1253 		goto again;
1254 	}
1255 
1256 	/* queue task to hw */
1257 	if (!mpp_task_queue(mpp, task))
1258 		goto again;
1259 
1260 done:
1261 
1262 	/* if no task in pending and running list, power off device */
1263 	mutex_lock(&queue->pending_lock);
1264 	all_done = list_empty(&queue->pending_list) && list_empty(&queue->running_list);
1265 	mutex_unlock(&queue->pending_lock);
1266 
1267 	if (all_done)
1268 		rkvdec2_link_power_off(mpp);
1269 
1270 	mpp_session_cleanup_detach(queue, work_s);
1271 
1272 	mpp_debug_leave();
1273 }
1274 
rkvdec2_link_session_deinit(struct mpp_session * session)1275 void rkvdec2_link_session_deinit(struct mpp_session *session)
1276 {
1277 	struct mpp_dev *mpp = session->mpp;
1278 
1279 	mpp_debug_enter();
1280 
1281 	rkvdec2_free_session(session);
1282 
1283 	if (session->dma) {
1284 		mpp_dbg_session("session %d destroy dma\n", session->index);
1285 		mpp_iommu_down_write(mpp->iommu_info);
1286 		mpp_dma_session_destroy(session->dma);
1287 		mpp_iommu_up_write(mpp->iommu_info);
1288 		session->dma = NULL;
1289 	}
1290 	if (session->srv) {
1291 		struct mpp_service *srv = session->srv;
1292 
1293 		mutex_lock(&srv->session_lock);
1294 		list_del_init(&session->service_link);
1295 		mutex_unlock(&srv->session_lock);
1296 	}
1297 	list_del_init(&session->session_link);
1298 
1299 	mpp_dbg_session("session %d release\n", session->index);
1300 
1301 	mpp_debug_leave();
1302 }
1303 
1304 #define RKVDEC2_1080P_PIXELS	(1920*1080)
1305 #define RKVDEC2_4K_PIXELS	(4096*2304)
1306 #define RKVDEC2_8K_PIXELS	(7680*4320)
1307 #define RKVDEC2_CCU_TIMEOUT_20MS	(0xefffff)
1308 #define RKVDEC2_CCU_TIMEOUT_50MS	(0x2cfffff)
1309 #define RKVDEC2_CCU_TIMEOUT_100MS	(0x4ffffff)
1310 
rkvdec2_ccu_get_timeout_threshold(struct rkvdec2_task * task)1311 static u32 rkvdec2_ccu_get_timeout_threshold(struct rkvdec2_task *task)
1312 {
1313 	u32 pixels = task->pixels;
1314 
1315 	if (pixels < RKVDEC2_1080P_PIXELS)
1316 		return RKVDEC2_CCU_TIMEOUT_20MS;
1317 	else if (pixels < RKVDEC2_4K_PIXELS)
1318 		return RKVDEC2_CCU_TIMEOUT_50MS;
1319 	else
1320 		return RKVDEC2_CCU_TIMEOUT_100MS;
1321 }
1322 
rkvdec2_attach_ccu(struct device * dev,struct rkvdec2_dev * dec)1323 int rkvdec2_attach_ccu(struct device *dev, struct rkvdec2_dev *dec)
1324 {
1325 	int ret;
1326 	struct device_node *np;
1327 	struct platform_device *pdev;
1328 	struct rkvdec2_ccu *ccu;
1329 
1330 	mpp_debug_enter();
1331 
1332 	np = of_parse_phandle(dev->of_node, "rockchip,ccu", 0);
1333 	if (!np || !of_device_is_available(np))
1334 		return -ENODEV;
1335 
1336 	pdev = of_find_device_by_node(np);
1337 	of_node_put(np);
1338 	if (!pdev)
1339 		return -ENODEV;
1340 
1341 	ccu = platform_get_drvdata(pdev);
1342 	if (!ccu)
1343 		return -ENOMEM;
1344 
1345 	ret = of_property_read_u32(dev->of_node, "rockchip,core-mask", &dec->core_mask);
1346 	if (ret)
1347 		return ret;
1348 	dev_info(dev, "core_mask=%08x\n", dec->core_mask);
1349 
1350 	/* if not the main-core, then attach the main core domain to current */
1351 	if (dec->mpp.core_id != 0) {
1352 		struct mpp_taskqueue *queue;
1353 		struct mpp_iommu_info *ccu_info, *cur_info;
1354 
1355 		queue = dec->mpp.queue;
1356 		/* set the ccu-domain for current device */
1357 		ccu_info = queue->cores[0]->iommu_info;
1358 		cur_info = dec->mpp.iommu_info;
1359 		if (cur_info)
1360 			cur_info->domain = ccu_info->domain;
1361 		mpp_iommu_attach(cur_info);
1362 	}
1363 
1364 	dec->ccu = ccu;
1365 
1366 	dev_info(dev, "attach ccu as core %d\n", dec->mpp.core_id);
1367 	mpp_debug_enter();
1368 
1369 	return 0;
1370 }
1371 
rkvdec2_ccu_timeout_work(struct work_struct * work_s)1372 static void rkvdec2_ccu_timeout_work(struct work_struct *work_s)
1373 {
1374 	struct mpp_dev *mpp;
1375 	struct mpp_task *task = container_of(to_delayed_work(work_s),
1376 					     struct mpp_task, timeout_work);
1377 
1378 	if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
1379 		mpp_err("task %d state %lx has been handled\n",
1380 			task->task_id, task->state);
1381 		return;
1382 	}
1383 
1384 	if (!task->session) {
1385 		mpp_err("task %d session is null.\n", task->task_id);
1386 		return;
1387 	}
1388 	mpp = mpp_get_task_used_device(task, task->session);
1389 	mpp_err("%s, task %d state %#lx timeout\n", dev_name(mpp->dev),
1390 		task->task_index, task->state);
1391 	set_bit(TASK_STATE_TIMEOUT, &task->state);
1392 	atomic_inc(&mpp->reset_request);
1393 	atomic_inc(&mpp->queue->reset_request);
1394 	kthread_queue_work(&mpp->queue->worker, &mpp->work);
1395 }
1396 
rkvdec2_ccu_link_init(struct platform_device * pdev,struct rkvdec2_dev * dec)1397 int rkvdec2_ccu_link_init(struct platform_device *pdev, struct rkvdec2_dev *dec)
1398 {
1399 	struct resource *res;
1400 	struct rkvdec_link_dev *link_dec;
1401 	struct device *dev = &pdev->dev;
1402 
1403 	mpp_debug_enter();
1404 
1405 	/* link structure */
1406 	link_dec = devm_kzalloc(dev, sizeof(*link_dec), GFP_KERNEL);
1407 	if (!link_dec)
1408 		return -ENOMEM;
1409 
1410 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
1411 	if (!res)
1412 		return -ENOMEM;
1413 
1414 	link_dec->info = dec->mpp.var->hw_info->link_info;
1415 	link_dec->reg_base = devm_ioremap(dev, res->start, resource_size(res));
1416 	if (!link_dec->reg_base) {
1417 		dev_err(dev, "ioremap failed for resource %pR\n", res);
1418 		return -ENOMEM;
1419 	}
1420 
1421 	dec->link_dec = link_dec;
1422 
1423 	mpp_debug_leave();
1424 
1425 	return 0;
1426 }
1427 
rkvdec2_ccu_power_on(struct mpp_taskqueue * queue,struct rkvdec2_ccu * ccu)1428 static int rkvdec2_ccu_power_on(struct mpp_taskqueue *queue,
1429 				struct rkvdec2_ccu *ccu)
1430 {
1431 	if (!atomic_xchg(&ccu->power_enabled, 1)) {
1432 		u32 i;
1433 		struct mpp_dev *mpp;
1434 
1435 		/* ccu pd and clk on */
1436 		pm_runtime_get_sync(ccu->dev);
1437 		pm_stay_awake(ccu->dev);
1438 		mpp_clk_safe_enable(ccu->aclk_info.clk);
1439 		/* core pd and clk on */
1440 		for (i = 0; i < queue->core_count; i++) {
1441 			struct rkvdec2_dev *dec;
1442 
1443 			mpp = queue->cores[i];
1444 			dec = to_rkvdec2_dev(mpp);
1445 			pm_runtime_get_sync(mpp->dev);
1446 			pm_stay_awake(mpp->dev);
1447 			if (mpp->hw_ops->clk_on)
1448 				mpp->hw_ops->clk_on(mpp);
1449 
1450 			mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_NORMAL);
1451 			mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_NORMAL);
1452 			mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_NORMAL);
1453 			mpp_devfreq_set_core_rate(mpp, CLK_MODE_NORMAL);
1454 			mpp_iommu_dev_activate(mpp->iommu_info, mpp);
1455 		}
1456 		mpp_debug(DEBUG_CCU, "power on\n");
1457 	}
1458 
1459 	return 0;
1460 }
1461 
rkvdec2_ccu_power_off(struct mpp_taskqueue * queue,struct rkvdec2_ccu * ccu)1462 static int rkvdec2_ccu_power_off(struct mpp_taskqueue *queue,
1463 				 struct rkvdec2_ccu *ccu)
1464 {
1465 	if (atomic_xchg(&ccu->power_enabled, 0)) {
1466 		u32 i;
1467 		struct mpp_dev *mpp;
1468 
1469 		/* ccu pd and clk off */
1470 		mpp_clk_safe_disable(ccu->aclk_info.clk);
1471 		pm_relax(ccu->dev);
1472 		pm_runtime_mark_last_busy(ccu->dev);
1473 		pm_runtime_put_autosuspend(ccu->dev);
1474 		/* core pd and clk off */
1475 		for (i = 0; i < queue->core_count; i++) {
1476 			mpp = queue->cores[i];
1477 
1478 			if (mpp->hw_ops->clk_off)
1479 				mpp->hw_ops->clk_off(mpp);
1480 			pm_relax(mpp->dev);
1481 			pm_runtime_mark_last_busy(mpp->dev);
1482 			pm_runtime_put_autosuspend(mpp->dev);
1483 			mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
1484 		}
1485 		mpp_debug(DEBUG_CCU, "power off\n");
1486 	}
1487 
1488 	return 0;
1489 }
1490 
rkvdec2_soft_ccu_dequeue(struct mpp_taskqueue * queue)1491 static int rkvdec2_soft_ccu_dequeue(struct mpp_taskqueue *queue)
1492 {
1493 	struct mpp_task *mpp_task = NULL, *n;
1494 
1495 	mpp_debug_enter();
1496 
1497 	list_for_each_entry_safe(mpp_task, n,
1498 				 &queue->running_list,
1499 				 queue_link) {
1500 		struct mpp_dev *mpp = mpp_get_task_used_device(mpp_task, mpp_task->session);
1501 		struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1502 		u32 irq_status = mpp->irq_status;
1503 		u32 timeout_flag = test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
1504 		u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
1505 		u32 timing_en = mpp->srv->timing_en;
1506 
1507 		if (irq_status || timeout_flag || abort_flag) {
1508 			struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1509 
1510 			if (timing_en) {
1511 				mpp_task->on_irq = ktime_get();
1512 				set_bit(TASK_TIMING_IRQ, &mpp_task->state);
1513 
1514 				mpp_task->on_cancel_timeout = mpp_task->on_irq;
1515 				set_bit(TASK_TIMING_TO_CANCEL, &mpp_task->state);
1516 
1517 				mpp_task->on_isr = mpp_task->on_irq;
1518 				set_bit(TASK_TIMING_ISR, &mpp_task->state);
1519 			}
1520 
1521 			set_bit(TASK_STATE_HANDLE, &mpp_task->state);
1522 			cancel_delayed_work(&mpp_task->timeout_work);
1523 			mpp_task->hw_cycles = mpp_read(mpp, RKVDEC_PERF_WORKING_CNT);
1524 			mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
1525 			task->irq_status = irq_status;
1526 			mpp_debug(DEBUG_IRQ_CHECK, "irq_status=%08x, timeout=%u, abort=%u\n",
1527 				  irq_status, timeout_flag, abort_flag);
1528 			if (irq_status && mpp->dev_ops->finish)
1529 				mpp->dev_ops->finish(mpp, mpp_task);
1530 			else
1531 				task->reg[RKVDEC_REG_INT_EN_INDEX] = RKVDEC_TIMEOUT_STA;
1532 
1533 			set_bit(TASK_STATE_FINISH, &mpp_task->state);
1534 			set_bit(TASK_STATE_DONE, &mpp_task->state);
1535 
1536 			set_bit(mpp->core_id, &queue->core_idle);
1537 			mpp_dbg_core("set core %d idle %lx\n", mpp->core_id, queue->core_idle);
1538 			/* Wake up the GET thread */
1539 			wake_up(&mpp_task->wait);
1540 			/* free task */
1541 			list_del_init(&mpp_task->queue_link);
1542 			kref_put(&mpp_task->ref, mpp_free_task);
1543 		} else {
1544 			/* NOTE: break when meet not finish */
1545 			break;
1546 		}
1547 	}
1548 
1549 	mpp_debug_leave();
1550 	return 0;
1551 }
1552 
rkvdec2_soft_ccu_reset(struct mpp_taskqueue * queue,struct rkvdec2_ccu * ccu)1553 static int rkvdec2_soft_ccu_reset(struct mpp_taskqueue *queue,
1554 				  struct rkvdec2_ccu *ccu)
1555 {
1556 	int i;
1557 
1558 	for (i = queue->core_count - 1; i >= 0; i--) {
1559 		u32 val;
1560 
1561 		struct mpp_dev *mpp = queue->cores[i];
1562 		struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1563 
1564 		if (mpp->disable)
1565 			continue;
1566 
1567 		dev_info(mpp->dev, "resetting...\n");
1568 		disable_hardirq(mpp->irq);
1569 
1570 		/* foce idle, disconnect core and ccu */
1571 		writel(dec->core_mask, ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
1572 
1573 		/* soft reset */
1574 		mpp_write(mpp, RKVDEC_REG_IMPORTANT_BASE, RKVDEC_SOFTREST_EN);
1575 		udelay(5);
1576 		val = mpp_read(mpp, RKVDEC_REG_INT_EN);
1577 		if (!(val & RKVDEC_SOFT_RESET_READY))
1578 			mpp_err("soft reset fail, int %08x\n", val);
1579 		mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
1580 
1581 		/* check bus idle */
1582 		val = mpp_read(mpp, RKVDEC_REG_DEBUG_INT_BASE);
1583 		if (!(val & RKVDEC_BIT_BUS_IDLE))
1584 			mpp_err("bus busy\n");
1585 
1586 		if (IS_REACHABLE(CONFIG_ROCKCHIP_SIP)) {
1587 			/* sip reset */
1588 			rockchip_dmcfreq_lock();
1589 			sip_smc_vpu_reset(i, 0, 0);
1590 			rockchip_dmcfreq_unlock();
1591 		} else {
1592 			rkvdec2_reset(mpp);
1593 		}
1594 		/* clear error mask */
1595 		writel(dec->core_mask & RKVDEC_CCU_CORE_RW_MASK,
1596 		       ccu->reg_base + RKVDEC_CCU_CORE_ERR_BASE);
1597 		/* connect core and ccu */
1598 		writel(dec->core_mask & RKVDEC_CCU_CORE_RW_MASK,
1599 		       ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
1600 		mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
1601 		atomic_set(&mpp->reset_request, 0);
1602 
1603 		enable_irq(mpp->irq);
1604 		dev_info(mpp->dev, "reset done\n");
1605 	}
1606 	atomic_set(&queue->reset_request, 0);
1607 
1608 	return 0;
1609 }
1610 
rkvdec2_ccu_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)1611 void *rkvdec2_ccu_alloc_task(struct mpp_session *session,
1612 			     struct mpp_task_msgs *msgs)
1613 {
1614 	int ret;
1615 	struct rkvdec2_task *task;
1616 
1617 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1618 	if (!task)
1619 		return NULL;
1620 
1621 	ret = rkvdec2_task_init(session->mpp, session, task, msgs);
1622 	if (ret) {
1623 		kfree(task);
1624 		return NULL;
1625 	}
1626 
1627 	return &task->mpp_task;
1628 }
1629 
rkvdec2_ccu_check_pagefault_info(struct mpp_dev * mpp)1630 static void rkvdec2_ccu_check_pagefault_info(struct mpp_dev *mpp)
1631 {
1632 	u32 i = 0;
1633 
1634 	for (i = 0; i < mpp->queue->core_count; i++) {
1635 		struct mpp_dev *core = mpp->queue->cores[i];
1636 		struct rkvdec2_dev *dec = to_rkvdec2_dev(core);
1637 		void __iomem *mmu_base = dec->mmu_base;
1638 		u32 mmu0_st;
1639 		u32 mmu1_st;
1640 		u32 mmu0_pta;
1641 		u32 mmu1_pta;
1642 
1643 		if (!mmu_base)
1644 			return;
1645 
1646 		#define FAULT_STATUS 0x7e2
1647 		rkvdec2_ccu_power_on(mpp->queue, dec->ccu);
1648 
1649 		mmu0_st = readl(mmu_base + 0x4);
1650 		mmu1_st = readl(mmu_base + 0x44);
1651 		mmu0_pta = readl(mmu_base + 0xc);
1652 		mmu1_pta = readl(mmu_base + 0x4c);
1653 
1654 		dec->mmu0_st = mmu0_st;
1655 		dec->mmu1_st = mmu1_st;
1656 		dec->mmu0_pta = mmu0_pta;
1657 		dec->mmu1_pta = mmu1_pta;
1658 
1659 		pr_err("core %d mmu0 %08x %08x mm1 %08x %08x\n",
1660 			core->core_id, mmu0_st, mmu0_pta, mmu1_st, mmu1_pta);
1661 		if ((mmu0_st & FAULT_STATUS) || (mmu1_st & FAULT_STATUS) ||
1662 		    mmu0_pta || mmu1_pta) {
1663 			dec->fault_iova = readl(dec->link_dec->reg_base + 0x4);
1664 			dec->mmu_fault = 1;
1665 			pr_err("core %d fault iova %08x\n", core->core_id, dec->fault_iova);
1666 			rockchip_iommu_mask_irq(core->dev);
1667 		} else {
1668 			dec->mmu_fault = 0;
1669 			dec->fault_iova = 0;
1670 		}
1671 	}
1672 }
1673 
rkvdec2_ccu_iommu_fault_handle(struct iommu_domain * iommu,struct device * iommu_dev,unsigned long iova,int status,void * arg)1674 int rkvdec2_ccu_iommu_fault_handle(struct iommu_domain *iommu,
1675 				   struct device *iommu_dev,
1676 				   unsigned long iova, int status, void *arg)
1677 {
1678 	struct mpp_dev *mpp = (struct mpp_dev *)arg;
1679 
1680 	mpp_debug_enter();
1681 
1682 	rkvdec2_ccu_check_pagefault_info(mpp);
1683 
1684 	mpp->queue->iommu_fault = 1;
1685 	atomic_inc(&mpp->queue->reset_request);
1686 	kthread_queue_work(&mpp->queue->worker, &mpp->work);
1687 
1688 	mpp_debug_leave();
1689 
1690 	return 0;
1691 }
1692 
rkvdec2_soft_ccu_irq(int irq,void * param)1693 irqreturn_t rkvdec2_soft_ccu_irq(int irq, void *param)
1694 {
1695 	struct mpp_dev *mpp = param;
1696 	u32 irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
1697 
1698 	if (irq_status & RKVDEC_IRQ_RAW) {
1699 		mpp_debug(DEBUG_IRQ_STATUS, "irq_status=%08x\n", irq_status);
1700 		if (irq_status & RKVDEC_INT_ERROR_MASK) {
1701 			atomic_inc(&mpp->reset_request);
1702 			atomic_inc(&mpp->queue->reset_request);
1703 		}
1704 		mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
1705 		mpp->irq_status = irq_status;
1706 		kthread_queue_work(&mpp->queue->worker, &mpp->work);
1707 		return IRQ_HANDLED;
1708 	}
1709 	return IRQ_NONE;
1710 }
1711 
rkvdec2_set_core_info(u32 * reg,int idx)1712 static inline int rkvdec2_set_core_info(u32 *reg, int idx)
1713 {
1714 	u32 val = (idx << 16) & RKVDEC_REG_FILM_IDX_MASK;
1715 
1716 	reg[RKVDEC_REG_CORE_CTRL_INDEX] &= ~RKVDEC_REG_FILM_IDX_MASK;
1717 
1718 	reg[RKVDEC_REG_CORE_CTRL_INDEX] |= val;
1719 
1720 	return 0;
1721 }
1722 
rkvdec2_soft_ccu_enqueue(struct mpp_dev * mpp,struct mpp_task * mpp_task)1723 static int rkvdec2_soft_ccu_enqueue(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1724 {
1725 	u32 i, reg_en, reg;
1726 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1727 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1728 	u32 timing_en = mpp->srv->timing_en;
1729 
1730 	mpp_debug_enter();
1731 
1732 	/* set reg for link */
1733 	reg = RKVDEC_LINK_BIT_CORE_WORK_MODE | RKVDEC_LINK_BIT_CCU_WORK_MODE;
1734 	writel_relaxed(reg, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
1735 
1736 	/* set reg for ccu */
1737 	writel_relaxed(RKVDEC_CCU_BIT_WORK_EN, dec->ccu->reg_base + RKVDEC_CCU_WORK_BASE);
1738 	writel_relaxed(RKVDEC_CCU_BIT_WORK_MODE, dec->ccu->reg_base + RKVDEC_CCU_WORK_MODE_BASE);
1739 	writel_relaxed(dec->core_mask, dec->ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
1740 
1741 	/* set cache size */
1742 	reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS |
1743 		  RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
1744 	if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
1745 		reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
1746 
1747 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
1748 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
1749 	mpp_write_relaxed(mpp, RKVDEC_REG_CACHE2_SIZE_BASE, reg);
1750 	/* clear cache */
1751 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
1752 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
1753 	mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
1754 
1755 	mpp_iommu_flush_tlb(mpp->iommu_info);
1756 	/* disable multicore pu/colmv offset req timeout reset */
1757 	task->reg[RKVDEC_REG_EN_MODE_SET] |= BIT(1);
1758 	task->reg[RKVDEC_REG_TIMEOUT_THRESHOLD] = rkvdec2_ccu_get_timeout_threshold(task);
1759 	/* set registers for hardware */
1760 	reg_en = mpp_task->hw_info->reg_en;
1761 	for (i = 0; i < task->w_req_cnt; i++) {
1762 		int s, e;
1763 		struct mpp_request *req = &task->w_reqs[i];
1764 
1765 		s = req->offset / sizeof(u32);
1766 		e = s + req->size / sizeof(u32);
1767 		mpp_write_req(mpp, task->reg, s, e, reg_en);
1768 	}
1769 	/* init current task */
1770 	mpp->cur_task = mpp_task;
1771 
1772 	mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
1773 
1774 	mpp->irq_status = 0;
1775 	writel_relaxed(dec->core_mask, dec->ccu->reg_base + RKVDEC_CCU_CORE_STA_BASE);
1776 	/* Flush the register before the start the device */
1777 	wmb();
1778 	mpp_write(mpp, RKVDEC_REG_START_EN_BASE, task->reg[reg_en] | RKVDEC_START_EN);
1779 
1780 	mpp_task_run_end(mpp_task, timing_en);
1781 
1782 	mpp_debug_leave();
1783 
1784 	return 0;
1785 }
1786 
rkvdec2_get_idle_core(struct mpp_taskqueue * queue,struct mpp_task * mpp_task)1787 static struct mpp_dev *rkvdec2_get_idle_core(struct mpp_taskqueue *queue,
1788 					     struct mpp_task *mpp_task)
1789 {
1790 	u32 i = 0;
1791 	struct rkvdec2_dev *dec = NULL;
1792 
1793 	for (i = 0; i < queue->core_count; i++) {
1794 		struct mpp_dev *mpp = queue->cores[i];
1795 		struct rkvdec2_dev *core = to_rkvdec2_dev(mpp);
1796 
1797 		if (mpp->disable)
1798 			continue;
1799 
1800 		if (test_bit(i, &queue->core_idle)) {
1801 			if (!dec) {
1802 				dec = core;
1803 				continue;
1804 			}
1805 			/* set the less work core */
1806 			if (core->task_index < dec->task_index)
1807 				dec = core;
1808 		}
1809 	}
1810 	/* if get core */
1811 	if (dec) {
1812 		mpp_task->mpp = &dec->mpp;
1813 		mpp_task->core_id = dec->mpp.core_id;
1814 		clear_bit(mpp_task->core_id, &queue->core_idle);
1815 		dec->task_index++;
1816 		atomic_inc(&dec->mpp.task_count);
1817 		mpp_dbg_core("clear core %d idle\n", mpp_task->core_id);
1818 		return mpp_task->mpp;
1819 	}
1820 
1821 	return NULL;
1822 }
1823 
rkvdec2_core_working(struct mpp_taskqueue * queue)1824 static bool rkvdec2_core_working(struct mpp_taskqueue *queue)
1825 {
1826 	struct mpp_dev *mpp;
1827 	bool flag = false;
1828 	u32 i = 0;
1829 
1830 	for (i = 0; i < queue->core_count; i++) {
1831 		mpp = queue->cores[i];
1832 		if (mpp->disable)
1833 			continue;
1834 		if (!test_bit(i, &queue->core_idle)) {
1835 			flag = true;
1836 			break;
1837 		}
1838 	}
1839 
1840 	return flag;
1841 }
1842 
rkvdec2_ccu_link_session_detach(struct mpp_dev * mpp,struct mpp_taskqueue * queue)1843 static int rkvdec2_ccu_link_session_detach(struct mpp_dev *mpp,
1844 					   struct mpp_taskqueue *queue)
1845 {
1846 	mutex_lock(&queue->session_lock);
1847 	while (atomic_read(&queue->detach_count)) {
1848 		struct mpp_session *session = NULL;
1849 
1850 		session = list_first_entry_or_null(&queue->session_detach,
1851 						   struct mpp_session,
1852 						   session_link);
1853 		if (session) {
1854 			list_del_init(&session->session_link);
1855 			atomic_dec(&queue->detach_count);
1856 		}
1857 
1858 		mutex_unlock(&queue->session_lock);
1859 
1860 		if (session) {
1861 			mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev),
1862 					atomic_read(&queue->detach_count));
1863 			mpp_session_deinit(session);
1864 		}
1865 
1866 		mutex_lock(&queue->session_lock);
1867 	}
1868 	mutex_unlock(&queue->session_lock);
1869 
1870 	return 0;
1871 }
1872 
rkvdec2_soft_ccu_worker(struct kthread_work * work_s)1873 void rkvdec2_soft_ccu_worker(struct kthread_work *work_s)
1874 {
1875 	struct mpp_task *mpp_task;
1876 	struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
1877 	struct mpp_taskqueue *queue = mpp->queue;
1878 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1879 	u32 timing_en = mpp->srv->timing_en;
1880 
1881 	mpp_debug_enter();
1882 
1883 	/* 1. process all finished task in running list */
1884 	rkvdec2_soft_ccu_dequeue(queue);
1885 
1886 	/* 2. process reset request */
1887 	if (atomic_read(&queue->reset_request)) {
1888 		if (!rkvdec2_core_working(queue)) {
1889 			rkvdec2_ccu_power_on(queue, dec->ccu);
1890 			rkvdec2_soft_ccu_reset(queue, dec->ccu);
1891 		}
1892 	}
1893 
1894 	/* 3. process pending task */
1895 	while (1) {
1896 		if (atomic_read(&queue->reset_request))
1897 			break;
1898 		/* get one task form pending list */
1899 		mutex_lock(&queue->pending_lock);
1900 		mpp_task = list_first_entry_or_null(&queue->pending_list,
1901 						struct mpp_task, queue_link);
1902 		mutex_unlock(&queue->pending_lock);
1903 		if (!mpp_task)
1904 			break;
1905 
1906 		if (test_bit(TASK_STATE_ABORT, &mpp_task->state)) {
1907 			mutex_lock(&queue->pending_lock);
1908 			list_del_init(&mpp_task->queue_link);
1909 
1910 			set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
1911 			set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
1912 
1913 			mutex_unlock(&queue->pending_lock);
1914 			wake_up(&mpp_task->wait);
1915 			kref_put(&mpp_task->ref, rkvdec2_link_free_task);
1916 			continue;
1917 		}
1918 		/* find one core is idle */
1919 		mpp = rkvdec2_get_idle_core(queue, mpp_task);
1920 		if (!mpp)
1921 			break;
1922 
1923 		if (timing_en) {
1924 			mpp_task->on_run = ktime_get();
1925 			set_bit(TASK_TIMING_RUN, &mpp_task->state);
1926 		}
1927 
1928 		/* set session index */
1929 		rkvdec2_set_core_info(mpp_task->reg, mpp_task->session->index);
1930 		/* set rcb buffer */
1931 		mpp_set_rcbbuf(mpp, mpp_task->session, mpp_task);
1932 
1933 		INIT_DELAYED_WORK(&mpp_task->timeout_work, rkvdec2_ccu_timeout_work);
1934 		rkvdec2_ccu_power_on(queue, dec->ccu);
1935 		rkvdec2_soft_ccu_enqueue(mpp, mpp_task);
1936 		/* pending to running */
1937 		mpp_taskqueue_pending_to_run(queue, mpp_task);
1938 		set_bit(TASK_STATE_RUNNING, &mpp_task->state);
1939 	}
1940 
1941 	/* 4. poweroff when running and pending list are empty */
1942 	if (list_empty(&queue->running_list) &&
1943 	    list_empty(&queue->pending_list))
1944 		rkvdec2_ccu_power_off(queue, dec->ccu);
1945 
1946 	/* 5. check session detach out of queue */
1947 	rkvdec2_ccu_link_session_detach(mpp, queue);
1948 
1949 	mpp_debug_leave();
1950 }
1951 
rkvdec2_ccu_alloc_table(struct rkvdec2_dev * dec,struct rkvdec_link_dev * link_dec)1952 int rkvdec2_ccu_alloc_table(struct rkvdec2_dev *dec,
1953 			    struct rkvdec_link_dev *link_dec)
1954 {
1955 	int ret, i;
1956 	struct mpp_dma_buffer *table;
1957 	struct mpp_dev *mpp = &dec->mpp;
1958 
1959 	mpp_debug_enter();
1960 
1961 	/* alloc table pointer array */
1962 	table = devm_kmalloc_array(mpp->dev, mpp->task_capacity,
1963 				   sizeof(*table), GFP_KERNEL | __GFP_ZERO);
1964 	if (!table)
1965 		return -ENOMEM;
1966 
1967 	/* alloc table buffer */
1968 	ret = rkvdec2_link_alloc_table(mpp, link_dec);
1969 	if (ret)
1970 		return ret;
1971 
1972 	/* init table array */
1973 	dec->ccu->table_array = table;
1974 	for (i = 0; i < mpp->task_capacity; i++) {
1975 		table[i].iova = link_dec->table->iova + i * link_dec->link_node_size;
1976 		table[i].vaddr = link_dec->table->vaddr + i * link_dec->link_node_size;
1977 		table[i].size = link_dec->link_node_size;
1978 		INIT_LIST_HEAD(&table[i].link);
1979 		list_add_tail(&table[i].link, &dec->ccu->unused_list);
1980 	}
1981 
1982 	return 0;
1983 }
1984 
rkvdec2_dump_ccu(struct rkvdec2_ccu * ccu)1985 static void rkvdec2_dump_ccu(struct rkvdec2_ccu *ccu)
1986 {
1987 	u32 i;
1988 
1989 	for (i = 0; i < 10; i++)
1990 		mpp_err("ccu:reg[%d]=%08x\n", i, readl(ccu->reg_base + 4 * i));
1991 
1992 	for (i = 16; i < 22; i++)
1993 		mpp_err("ccu:reg[%d]=%08x\n", i, readl(ccu->reg_base + 4 * i));
1994 }
1995 
rkvdec2_dump_link(struct rkvdec2_dev * dec)1996 static void rkvdec2_dump_link(struct rkvdec2_dev *dec)
1997 {
1998 	u32 i;
1999 
2000 	for (i = 0; i < 10; i++)
2001 		mpp_err("link:reg[%d]=%08x\n", i, readl(dec->link_dec->reg_base + 4 * i));
2002 }
2003 
rkvdec2_dump_core(struct mpp_dev * mpp,struct rkvdec2_task * task)2004 static void rkvdec2_dump_core(struct mpp_dev *mpp, struct rkvdec2_task *task)
2005 {
2006 	u32 j;
2007 
2008 	if (task) {
2009 		for (j = 0; j < 273; j++)
2010 			mpp_err("reg[%d]=%08x, %08x\n", j, mpp_read(mpp, j*4), task->reg[j]);
2011 	} else {
2012 		for (j = 0; j < 273; j++)
2013 			mpp_err("reg[%d]=%08x\n", j, mpp_read(mpp, j*4));
2014 	}
2015 }
2016 
rkvdec2_hard_ccu_irq(int irq,void * param)2017 irqreturn_t rkvdec2_hard_ccu_irq(int irq, void *param)
2018 {
2019 	u32 irq_status;
2020 	struct mpp_dev *mpp = param;
2021 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2022 
2023 	irq_status = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2024 	dec->ccu->ccu_core_work_mode = readl(dec->ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2025 	if (irq_status & RKVDEC_LINK_BIT_IRQ_RAW) {
2026 		dec->link_dec->irq_status = irq_status;
2027 		mpp->irq_status = mpp_read(mpp, RKVDEC_REG_INT_EN);
2028 		mpp_debug(DEBUG_IRQ_STATUS, "core %d link_irq=%08x, core_irq=%08x\n",
2029 			  mpp->core_id, irq_status, mpp->irq_status);
2030 
2031 		writel(irq_status & 0xfffff0ff,
2032 		       dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2033 
2034 		kthread_queue_work(&mpp->queue->worker, &mpp->work);
2035 		return IRQ_HANDLED;
2036 	}
2037 
2038 	return IRQ_NONE;
2039 }
2040 
rkvdec2_hard_ccu_finish(struct rkvdec_link_info * hw,struct rkvdec2_task * task)2041 static int rkvdec2_hard_ccu_finish(struct rkvdec_link_info *hw, struct rkvdec2_task *task)
2042 {
2043 	u32 i, off, s, n;
2044 	struct rkvdec_link_part *part = hw->part_r;
2045 	u32 *tb_reg = (u32 *)task->table->vaddr;
2046 
2047 	mpp_debug_enter();
2048 
2049 	for (i = 0; i < hw->part_r_num; i++) {
2050 		off = part[i].tb_reg_off;
2051 		s = part[i].reg_start;
2052 		n = part[i].reg_num;
2053 		memcpy(&task->reg[s], &tb_reg[off], n * sizeof(u32));
2054 	}
2055 	/* revert hack for irq status */
2056 	task->reg[RKVDEC_REG_INT_EN_INDEX] = task->irq_status;
2057 
2058 	mpp_debug_leave();
2059 
2060 	return 0;
2061 }
2062 
rkvdec2_hard_ccu_dequeue(struct mpp_taskqueue * queue,struct rkvdec2_ccu * ccu,struct rkvdec_link_info * hw)2063 static int rkvdec2_hard_ccu_dequeue(struct mpp_taskqueue *queue,
2064 				    struct rkvdec2_ccu *ccu,
2065 				    struct rkvdec_link_info *hw)
2066 {
2067 	struct mpp_task *mpp_task = NULL, *n;
2068 	u32 dump_reg = 0;
2069 	u32 dequeue_none = 0;
2070 
2071 	mpp_debug_enter();
2072 	list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
2073 		u32 timeout_flag = test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
2074 		u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
2075 		struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2076 		u32 *tb_reg = (u32 *)task->table->vaddr;
2077 		u32 irq_status = tb_reg[hw->tb_reg_int];
2078 		u32 ccu_decoded_num, ccu_total_dec_num;
2079 
2080 		ccu_decoded_num = readl(ccu->reg_base + RKVDEC_CCU_DEC_NUM_BASE);
2081 		ccu_total_dec_num = readl(ccu->reg_base + RKVDEC_CCU_TOTAL_NUM_BASE);
2082 		mpp_debug(DEBUG_IRQ_CHECK,
2083 			  "session %d task %d w:h[%d %d] err %d irq_status %08x timeout=%u abort=%u iova %08x next %08x ccu[%d %d]\n",
2084 			  mpp_task->session->index, mpp_task->task_index, task->width,
2085 			  task->height, !!(irq_status & RKVDEC_INT_ERROR_MASK), irq_status,
2086 			  timeout_flag, abort_flag, (u32)task->table->iova,
2087 			  ((u32 *)task->table->vaddr)[hw->tb_reg_next],
2088 			  ccu_decoded_num, ccu_total_dec_num);
2089 
2090 		if (irq_status || timeout_flag || abort_flag) {
2091 			struct rkvdec2_dev *dec = to_rkvdec2_dev(queue->cores[0]);
2092 
2093 			set_bit(TASK_STATE_HANDLE, &mpp_task->state);
2094 			cancel_delayed_work(&mpp_task->timeout_work);
2095 			mpp_task->hw_cycles = tb_reg[hw->tb_reg_cycle];
2096 			mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
2097 			task->irq_status = irq_status;
2098 
2099 			if (irq_status)
2100 				rkvdec2_hard_ccu_finish(hw, task);
2101 
2102 			set_bit(TASK_STATE_FINISH, &mpp_task->state);
2103 			set_bit(TASK_STATE_DONE, &mpp_task->state);
2104 
2105 			if (timeout_flag && !dump_reg && mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) {
2106 				u32 i;
2107 
2108 				mpp_err("###### ccu #####\n");
2109 				rkvdec2_dump_ccu(ccu);
2110 				for (i = 0; i < queue->core_count; i++) {
2111 					mpp_err("###### core %d #####\n", i);
2112 					rkvdec2_dump_link(to_rkvdec2_dev(queue->cores[i]));
2113 					rkvdec2_dump_core(queue->cores[i], task);
2114 				}
2115 				dump_reg = 1;
2116 			}
2117 			list_move_tail(&task->table->link, &ccu->unused_list);
2118 			/* free task */
2119 			list_del_init(&mpp_task->queue_link);
2120 			/* Wake up the GET thread */
2121 			wake_up(&mpp_task->wait);
2122 			if ((irq_status & RKVDEC_INT_ERROR_MASK) || timeout_flag) {
2123 				pr_err("session %d task %d irq_status %08x timeout=%u abort=%u\n",
2124 					mpp_task->session->index, mpp_task->task_index,
2125 					irq_status, timeout_flag, abort_flag);
2126 				atomic_inc(&queue->reset_request);
2127 			}
2128 
2129 			kref_put(&mpp_task->ref, mpp_free_task);
2130 		} else {
2131 			dequeue_none++;
2132 			/*
2133 			 * there are only 2 cores,
2134 			 * if dequeue not finish task more than 2,
2135 			 * means the others task still not get run by hw, can break early.
2136 			 */
2137 			if (dequeue_none > 2)
2138 				break;
2139 		}
2140 	}
2141 
2142 	mpp_debug_leave();
2143 	return 0;
2144 }
2145 
rkvdec2_hard_ccu_reset(struct mpp_taskqueue * queue,struct rkvdec2_ccu * ccu)2146 static int rkvdec2_hard_ccu_reset(struct mpp_taskqueue *queue, struct rkvdec2_ccu *ccu)
2147 {
2148 	int i = 0;
2149 
2150 	mpp_debug_enter();
2151 
2152 	/* reset and active core */
2153 	for (i = 0; i < queue->core_count; i++) {
2154 		u32 val = 0;
2155 		struct mpp_dev *mpp = queue->cores[i];
2156 		struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2157 
2158 		if (mpp->disable)
2159 			continue;
2160 		dev_info(mpp->dev, "resetting...\n");
2161 		disable_hardirq(mpp->irq);
2162 		/* force idle */
2163 		writel(dec->core_mask, ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
2164 		writel(0, ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2165 
2166 		{
2167 			/* soft reset */
2168 			u32 val;
2169 
2170 			mpp_write(mpp, RKVDEC_REG_IMPORTANT_BASE, RKVDEC_SOFTREST_EN);
2171 			udelay(5);
2172 			val = mpp_read(mpp, RKVDEC_REG_INT_EN);
2173 			if (!(val & RKVDEC_SOFT_RESET_READY))
2174 				mpp_err("soft reset fail, int %08x\n", val);
2175 
2176 			// /* cru reset */
2177 			// dev_info(mpp->dev, "cru reset\n");
2178 			// rkvdec2_reset(mpp);
2179 		}
2180 #if IS_ENABLED(CONFIG_ROCKCHIP_SIP)
2181 		rockchip_dmcfreq_lock();
2182 		sip_smc_vpu_reset(i, 0, 0);
2183 		rockchip_dmcfreq_unlock();
2184 #else
2185 		rkvdec2_reset(mpp);
2186 #endif
2187 		mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
2188 		enable_irq(mpp->irq);
2189 		atomic_set(&mpp->reset_request, 0);
2190 		val = mpp_read_relaxed(mpp, 272*4);
2191 		dev_info(mpp->dev, "reset done, idle %d\n", (val & 1));
2192 	}
2193 	/* reset ccu */
2194 	mpp_safe_reset(ccu->rst_a);
2195 	udelay(5);
2196 	mpp_safe_unreset(ccu->rst_a);
2197 
2198 	mpp_debug_leave();
2199 	return 0;
2200 }
2201 
2202 static struct mpp_task *
rkvdec2_hard_ccu_prepare(struct mpp_task * mpp_task,struct rkvdec2_ccu * ccu,struct rkvdec_link_info * hw)2203 rkvdec2_hard_ccu_prepare(struct mpp_task *mpp_task,
2204 			 struct rkvdec2_ccu *ccu, struct rkvdec_link_info *hw)
2205 {
2206 	u32 i, off, s, n;
2207 	u32 *tb_reg;
2208 	struct mpp_dma_buffer *table = NULL;
2209 	struct rkvdec_link_part *part;
2210 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2211 
2212 	mpp_debug_enter();
2213 
2214 	if (test_bit(TASK_STATE_PREPARE, &mpp_task->state))
2215 		return mpp_task;
2216 
2217 	/* ensure that cur table iova points to the next link table*/
2218 	{
2219 		struct mpp_dma_buffer *table0 = NULL, *table1 = NULL, *n;
2220 
2221 		list_for_each_entry_safe(table, n, &ccu->unused_list, link) {
2222 			if (!table0) {
2223 				table0 = table;
2224 				continue;
2225 			}
2226 			if (!table1)
2227 				table1 = table;
2228 			break;
2229 		}
2230 		if (!table0 || !table1)
2231 			return NULL;
2232 		((u32 *)table0->vaddr)[hw->tb_reg_next] = table1->iova;
2233 		table = table0;
2234 	}
2235 
2236 	/* set session idx */
2237 	rkvdec2_set_core_info(task->reg, mpp_task->session->index);
2238 	tb_reg = (u32 *)table->vaddr;
2239 	part = hw->part_w;
2240 
2241 	/* disable multicore pu/colmv offset req timeout reset */
2242 	task->reg[RKVDEC_REG_EN_MODE_SET] |= BIT(1);
2243 	task->reg[RKVDEC_REG_TIMEOUT_THRESHOLD] = rkvdec2_ccu_get_timeout_threshold(task);
2244 
2245 	for (i = 0; i < hw->part_w_num; i++) {
2246 		off = part[i].tb_reg_off;
2247 		s = part[i].reg_start;
2248 		n = part[i].reg_num;
2249 		memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
2250 	}
2251 
2252 	/* memset read registers */
2253 	part = hw->part_r;
2254 	for (i = 0; i < hw->part_r_num; i++) {
2255 		off = part[i].tb_reg_off;
2256 		n = part[i].reg_num;
2257 		memset(&tb_reg[off], 0, n * sizeof(u32));
2258 	}
2259 	list_move_tail(&table->link, &ccu->used_list);
2260 	task->table = table;
2261 	set_bit(TASK_STATE_PREPARE, &mpp_task->state);
2262 	mpp_dbg_ccu("session %d task %d iova %08x next %08x\n",
2263 		    mpp_task->session->index, mpp_task->task_index, (u32)task->table->iova,
2264 		    ((u32 *)task->table->vaddr)[hw->tb_reg_next]);
2265 
2266 	mpp_debug_leave();
2267 
2268 	return mpp_task;
2269 }
2270 
rkvdec2_ccu_link_fix_rcb_regs(struct rkvdec2_dev * dec)2271 static int rkvdec2_ccu_link_fix_rcb_regs(struct rkvdec2_dev *dec)
2272 {
2273 	int ret = 0;
2274 	u32 i, val;
2275 	u32 reg, reg_idx, rcb_size, rcb_offset;
2276 
2277 	if (!dec->rcb_iova && !dec->rcb_info_count)
2278 		goto done;
2279 	/* check whether fixed */
2280 	val = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2281 	if (val & RKVDEC_CCU_BIT_FIX_RCB)
2282 		goto done;
2283 	/* set registers */
2284 	rcb_offset = 0;
2285 	for (i = 0; i < dec->rcb_info_count; i += 2) {
2286 		reg_idx = dec->rcb_infos[i];
2287 		rcb_size = dec->rcb_infos[i + 1];
2288 		mpp_debug(DEBUG_SRAM_INFO,
2289 			  "rcb: reg %u size %u offset %u sram_size %u rcb_size %u\n",
2290 			  reg_idx, rcb_size, rcb_offset, dec->sram_size, dec->rcb_size);
2291 		if ((rcb_offset + rcb_size) > dec->rcb_size) {
2292 			mpp_err("rcb: reg[%u] set failed.\n", reg_idx);
2293 			ret = -ENOMEM;
2294 			goto done;
2295 		}
2296 		reg = dec->rcb_iova + rcb_offset;
2297 		mpp_write(&dec->mpp, reg_idx * sizeof(u32), reg);
2298 		rcb_offset += rcb_size;
2299 	}
2300 
2301 	val |= RKVDEC_CCU_BIT_FIX_RCB;
2302 	writel(val, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2303 done:
2304 	return ret;
2305 }
2306 
rkvdec2_hard_ccu_enqueue(struct rkvdec2_ccu * ccu,struct mpp_task * mpp_task,struct mpp_taskqueue * queue,struct mpp_dev * mpp)2307 static int rkvdec2_hard_ccu_enqueue(struct rkvdec2_ccu *ccu,
2308 				    struct mpp_task *mpp_task,
2309 				    struct mpp_taskqueue *queue,
2310 				    struct mpp_dev *mpp)
2311 {
2312 	u32 ccu_en, work_mode, link_mode;
2313 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2314 	u32 timing_en = mpp->srv->timing_en;
2315 
2316 	mpp_debug_enter();
2317 
2318 	if (test_bit(TASK_STATE_START, &mpp_task->state))
2319 		goto done;
2320 
2321 	ccu_en = readl(ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2322 	mpp_dbg_ccu("ccu_en=%d\n", ccu_en);
2323 	if (!ccu_en) {
2324 		u32 i;
2325 
2326 		/* set work mode */
2327 		work_mode = 0;
2328 		for (i = 0; i < queue->core_count; i++) {
2329 			u32 val;
2330 			struct mpp_dev *core = queue->cores[i];
2331 			struct rkvdec2_dev *dec = to_rkvdec2_dev(core);
2332 
2333 			if (mpp->disable)
2334 				continue;
2335 			work_mode |= dec->core_mask;
2336 			rkvdec2_ccu_link_fix_rcb_regs(dec);
2337 			/* control by ccu */
2338 			val = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2339 			val |= RKVDEC_LINK_BIT_CCU_WORK_MODE;
2340 			writel(val, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2341 		}
2342 		writel(work_mode, ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2343 		ccu->ccu_core_work_mode = readl(ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2344 		mpp_dbg_ccu("ccu_work_mode=%08x, ccu_work_status=%08x\n",
2345 			    readl(ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE),
2346 			    readl(ccu->reg_base + RKVDEC_CCU_CORE_STA_BASE));
2347 
2348 		/* set auto gating */
2349 		writel(RKVDEC_CCU_BIT_AUTOGATE, ccu->reg_base + RKVDEC_CCU_CTRL_BASE);
2350 		/* link start base */
2351 		writel(task->table->iova, ccu->reg_base + RKVDEC_CCU_CFG_ADDR_BASE);
2352 		/* enable link */
2353 		writel(RKVDEC_CCU_BIT_WORK_EN, ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2354 	}
2355 
2356 	/* set link mode */
2357 	link_mode = ccu_en ? RKVDEC_CCU_BIT_ADD_MODE : 0;
2358 	writel(link_mode | RKVDEC_LINK_ADD_CFG_NUM, ccu->reg_base + RKVDEC_CCU_LINK_MODE_BASE);
2359 
2360 	/* flush tlb before starting hardware */
2361 	mpp_iommu_flush_tlb(mpp->iommu_info);
2362 	/* wmb */
2363 	wmb();
2364 	INIT_DELAYED_WORK(&mpp_task->timeout_work, rkvdec2_ccu_timeout_work);
2365 	mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
2366 	/* configure done */
2367 	writel(RKVDEC_CCU_BIT_CFG_DONE, ccu->reg_base + RKVDEC_CCU_CFG_DONE_BASE);
2368 	mpp_task_run_end(mpp_task, timing_en);
2369 
2370 	/* pending to running */
2371 	set_bit(TASK_STATE_RUNNING, &mpp_task->state);
2372 	mpp_taskqueue_pending_to_run(queue, mpp_task);
2373 	mpp_dbg_ccu("session %d task %d iova=%08x task->state=%lx link_mode=%08x\n",
2374 		    mpp_task->session->index, mpp_task->task_index,
2375 		    (u32)task->table->iova, mpp_task->state,
2376 		    readl(ccu->reg_base + RKVDEC_CCU_LINK_MODE_BASE));
2377 done:
2378 	mpp_debug_leave();
2379 
2380 	return 0;
2381 }
2382 
rkvdec2_hard_ccu_handle_pagefault_task(struct rkvdec2_dev * dec,struct mpp_task * mpp_task)2383 static void rkvdec2_hard_ccu_handle_pagefault_task(struct rkvdec2_dev *dec,
2384 						   struct mpp_task *mpp_task)
2385 {
2386 	struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2387 
2388 	mpp_dbg_ccu("session %d task %d w:h[%d %d] pagefault mmu0[%08x %08x] mmu1[%08x %08x] fault_iova %08x\n",
2389 		    mpp_task->session->index, mpp_task->task_index,
2390 		    task->width, task->height, dec->mmu0_st, dec->mmu0_pta,
2391 		    dec->mmu1_st, dec->mmu1_pta, dec->fault_iova);
2392 
2393 	set_bit(TASK_STATE_HANDLE, &mpp_task->state);
2394 	task->irq_status |= BIT(4);
2395 	cancel_delayed_work(&mpp_task->timeout_work);
2396 	rkvdec2_hard_ccu_finish(dec->link_dec->info, task);
2397 	set_bit(TASK_STATE_FINISH, &mpp_task->state);
2398 	set_bit(TASK_STATE_DONE, &mpp_task->state);
2399 	list_move_tail(&task->table->link, &dec->ccu->unused_list);
2400 	list_del_init(&mpp_task->queue_link);
2401 	/* Wake up the GET thread */
2402 	wake_up(&mpp_task->wait);
2403 	kref_put(&mpp_task->ref, mpp_free_task);
2404 	dec->mmu_fault = 0;
2405 	dec->fault_iova = 0;
2406 }
2407 
rkvdec2_hard_ccu_pagefault_proc(struct mpp_taskqueue * queue)2408 static void rkvdec2_hard_ccu_pagefault_proc(struct mpp_taskqueue *queue)
2409 {
2410 	struct mpp_task *loop = NULL, *n;
2411 
2412 	list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2413 		struct rkvdec2_task *task = to_rkvdec2_task(loop);
2414 		u32 iova = (u32)task->table->iova;
2415 		u32 i;
2416 
2417 		for (i = 0; i < queue->core_count; i++) {
2418 			struct mpp_dev *core = queue->cores[i];
2419 			struct rkvdec2_dev *dec = to_rkvdec2_dev(core);
2420 
2421 			if (!dec->mmu_fault || dec->fault_iova != iova)
2422 				continue;
2423 			rkvdec2_hard_ccu_handle_pagefault_task(dec, loop);
2424 		}
2425 	}
2426 }
2427 
rkvdec2_hard_ccu_resend_tasks(struct mpp_dev * mpp,struct mpp_taskqueue * queue)2428 static void rkvdec2_hard_ccu_resend_tasks(struct mpp_dev *mpp, struct mpp_taskqueue *queue)
2429 {
2430 	struct rkvdec2_task *task_pre = NULL;
2431 	struct mpp_task *loop = NULL, *n;
2432 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2433 
2434 	/* re sort running list */
2435 	list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2436 		struct rkvdec2_task *task = to_rkvdec2_task(loop);
2437 		u32 *tb_reg = (u32 *)task->table->vaddr;
2438 		u32 irq_status = tb_reg[dec->link_dec->info->tb_reg_int];
2439 
2440 		if (!irq_status) {
2441 			if (task_pre) {
2442 				tb_reg = (u32 *)task_pre->table->vaddr;
2443 				tb_reg[dec->link_dec->info->tb_reg_next] = task->table->iova;
2444 			}
2445 			task_pre = task;
2446 		}
2447 	}
2448 
2449 	if (task_pre) {
2450 		struct mpp_dma_buffer *tbl;
2451 		u32 *tb_reg;
2452 
2453 		tbl = list_first_entry_or_null(&dec->ccu->unused_list,
2454 				struct mpp_dma_buffer, link);
2455 		WARN_ON(!tbl);
2456 		if (tbl) {
2457 			tb_reg = (u32 *)task_pre->table->vaddr;
2458 			tb_reg[dec->link_dec->info->tb_reg_next] = tbl->iova;
2459 		}
2460 	}
2461 
2462 	/* resend */
2463 	list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2464 		struct rkvdec2_task *task = to_rkvdec2_task(loop);
2465 		u32 *tb_reg = (u32 *)task->table->vaddr;
2466 		u32 irq_status = tb_reg[dec->link_dec->info->tb_reg_int];
2467 
2468 		mpp_dbg_ccu("reback: session %d task %d iova %08x next %08x irq_status 0x%08x\n",
2469 				loop->session->index, loop->task_index, (u32)task->table->iova,
2470 				tb_reg[dec->link_dec->info->tb_reg_next], irq_status);
2471 
2472 		if (!irq_status) {
2473 			cancel_delayed_work(&loop->timeout_work);
2474 			clear_bit(TASK_STATE_START, &loop->state);
2475 			rkvdec2_hard_ccu_enqueue(dec->ccu, loop, queue, mpp);
2476 		}
2477 	}
2478 }
2479 
rkvdec2_hard_ccu_worker(struct kthread_work * work_s)2480 void rkvdec2_hard_ccu_worker(struct kthread_work *work_s)
2481 {
2482 	struct mpp_task *mpp_task;
2483 	struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
2484 	struct mpp_taskqueue *queue = mpp->queue;
2485 	struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2486 
2487 	mpp_debug_enter();
2488 
2489 	/* 1. process all finished task in running list */
2490 	rkvdec2_hard_ccu_dequeue(queue, dec->ccu, dec->link_dec->info);
2491 
2492 	/* 2. process reset request */
2493 	if (atomic_read(&queue->reset_request) &&
2494 	    (list_empty(&queue->running_list) || !dec->ccu->ccu_core_work_mode)) {
2495 		/*
2496 		 * cancel running list timeout work to avoid
2497 		 * sw timeout causeby reset long time
2498 		 */
2499 		struct mpp_task *loop = NULL, *n;
2500 
2501 		list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2502 			cancel_delayed_work(&loop->timeout_work);
2503 		}
2504 		/* reset process */
2505 		rkvdec2_hard_ccu_reset(queue, dec->ccu);
2506 		atomic_set(&queue->reset_request, 0);
2507 		/* if iommu pagefault, find the fault task and drop it */
2508 		if (queue->iommu_fault) {
2509 			rkvdec2_hard_ccu_pagefault_proc(queue);
2510 			queue->iommu_fault = 0;
2511 		}
2512 
2513 		/* relink running task iova in list, and resend them to hw */
2514 		if (!list_empty(&queue->running_list))
2515 			rkvdec2_hard_ccu_resend_tasks(mpp, queue);
2516 	}
2517 
2518 	/* 3. process pending task */
2519 	while (1) {
2520 		if (atomic_read(&queue->reset_request))
2521 			break;
2522 
2523 		/* get one task form pending list */
2524 		mutex_lock(&queue->pending_lock);
2525 		mpp_task = list_first_entry_or_null(&queue->pending_list,
2526 						struct mpp_task, queue_link);
2527 		mutex_unlock(&queue->pending_lock);
2528 
2529 		if (!mpp_task)
2530 			break;
2531 		if (test_bit(TASK_STATE_ABORT, &mpp_task->state)) {
2532 			mutex_lock(&queue->pending_lock);
2533 			list_del_init(&mpp_task->queue_link);
2534 			mutex_unlock(&queue->pending_lock);
2535 			kref_put(&mpp_task->ref, mpp_free_task);
2536 			continue;
2537 		}
2538 
2539 		mpp_task = rkvdec2_hard_ccu_prepare(mpp_task, dec->ccu, dec->link_dec->info);
2540 		if (!mpp_task)
2541 			break;
2542 
2543 		rkvdec2_ccu_power_on(queue, dec->ccu);
2544 		rkvdec2_hard_ccu_enqueue(dec->ccu, mpp_task, queue, mpp);
2545 	}
2546 
2547 	/* 4. poweroff when running and pending list are empty */
2548 	mutex_lock(&queue->pending_lock);
2549 	if (list_empty(&queue->running_list) &&
2550 	    list_empty(&queue->pending_list))
2551 		rkvdec2_ccu_power_off(queue, dec->ccu);
2552 	mutex_unlock(&queue->pending_lock);
2553 
2554 	/* 5. check session detach out of queue */
2555 	mpp_session_cleanup_detach(queue, work_s);
2556 
2557 	mpp_debug_leave();
2558 }
2559