1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2021 Rockchip Electronics Co., Ltd
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * author:
6*4882a593Smuzhiyun * Herman Chen <herman.chen@rock-chips.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/interrupt.h>
11*4882a593Smuzhiyun #include <linux/pm_runtime.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <soc/rockchip/pm_domains.h>
14*4882a593Smuzhiyun #include <soc/rockchip/rockchip_dmc.h>
15*4882a593Smuzhiyun #include <soc/rockchip/rockchip_iommu.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "mpp_rkvdec2_link.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include "hack/mpp_rkvdec2_link_hack_rk3568.c"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #define WORK_TIMEOUT_MS (500)
22*4882a593Smuzhiyun #define WAIT_TIMEOUT_MS (2000)
23*4882a593Smuzhiyun #define RKVDEC2_LINK_HACK_TASK_FLAG (0xff)
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /* vdpu381 link hw info for rk3588 */
26*4882a593Smuzhiyun struct rkvdec_link_info rkvdec_link_v2_hw_info = {
27*4882a593Smuzhiyun .tb_reg_num = 218,
28*4882a593Smuzhiyun .tb_reg_next = 0,
29*4882a593Smuzhiyun .tb_reg_r = 1,
30*4882a593Smuzhiyun .tb_reg_second_en = 8,
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun .part_w_num = 6,
33*4882a593Smuzhiyun .part_r_num = 2,
34*4882a593Smuzhiyun .part_w[0] = {
35*4882a593Smuzhiyun .tb_reg_off = 4,
36*4882a593Smuzhiyun .reg_start = 8,
37*4882a593Smuzhiyun .reg_num = 28,
38*4882a593Smuzhiyun },
39*4882a593Smuzhiyun .part_w[1] = {
40*4882a593Smuzhiyun .tb_reg_off = 32,
41*4882a593Smuzhiyun .reg_start = 64,
42*4882a593Smuzhiyun .reg_num = 52,
43*4882a593Smuzhiyun },
44*4882a593Smuzhiyun .part_w[2] = {
45*4882a593Smuzhiyun .tb_reg_off = 84,
46*4882a593Smuzhiyun .reg_start = 128,
47*4882a593Smuzhiyun .reg_num = 16,
48*4882a593Smuzhiyun },
49*4882a593Smuzhiyun .part_w[3] = {
50*4882a593Smuzhiyun .tb_reg_off = 100,
51*4882a593Smuzhiyun .reg_start = 160,
52*4882a593Smuzhiyun .reg_num = 48,
53*4882a593Smuzhiyun },
54*4882a593Smuzhiyun .part_w[4] = {
55*4882a593Smuzhiyun .tb_reg_off = 148,
56*4882a593Smuzhiyun .reg_start = 224,
57*4882a593Smuzhiyun .reg_num = 16,
58*4882a593Smuzhiyun },
59*4882a593Smuzhiyun .part_w[5] = {
60*4882a593Smuzhiyun .tb_reg_off = 164,
61*4882a593Smuzhiyun .reg_start = 256,
62*4882a593Smuzhiyun .reg_num = 16,
63*4882a593Smuzhiyun },
64*4882a593Smuzhiyun .part_r[0] = {
65*4882a593Smuzhiyun .tb_reg_off = 180,
66*4882a593Smuzhiyun .reg_start = 224,
67*4882a593Smuzhiyun .reg_num = 10,
68*4882a593Smuzhiyun },
69*4882a593Smuzhiyun .part_r[1] = {
70*4882a593Smuzhiyun .tb_reg_off = 190,
71*4882a593Smuzhiyun .reg_start = 258,
72*4882a593Smuzhiyun .reg_num = 28,
73*4882a593Smuzhiyun },
74*4882a593Smuzhiyun .tb_reg_int = 180,
75*4882a593Smuzhiyun .tb_reg_cycle = 195,
76*4882a593Smuzhiyun .hack_setup = 0,
77*4882a593Smuzhiyun .reg_status = {
78*4882a593Smuzhiyun .dec_num_mask = 0x3fffffff,
79*4882a593Smuzhiyun .err_flag_base = 0x010,
80*4882a593Smuzhiyun .err_flag_bit = BIT(31),
81*4882a593Smuzhiyun },
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* vdpu34x link hw info for rk356x */
85*4882a593Smuzhiyun struct rkvdec_link_info rkvdec_link_rk356x_hw_info = {
86*4882a593Smuzhiyun .tb_reg_num = 202,
87*4882a593Smuzhiyun .tb_reg_next = 0,
88*4882a593Smuzhiyun .tb_reg_r = 1,
89*4882a593Smuzhiyun .tb_reg_second_en = 8,
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun .part_w_num = 6,
92*4882a593Smuzhiyun .part_r_num = 2,
93*4882a593Smuzhiyun .part_w[0] = {
94*4882a593Smuzhiyun .tb_reg_off = 4,
95*4882a593Smuzhiyun .reg_start = 8,
96*4882a593Smuzhiyun .reg_num = 20,
97*4882a593Smuzhiyun },
98*4882a593Smuzhiyun .part_w[1] = {
99*4882a593Smuzhiyun .tb_reg_off = 24,
100*4882a593Smuzhiyun .reg_start = 64,
101*4882a593Smuzhiyun .reg_num = 52,
102*4882a593Smuzhiyun },
103*4882a593Smuzhiyun .part_w[2] = {
104*4882a593Smuzhiyun .tb_reg_off = 76,
105*4882a593Smuzhiyun .reg_start = 128,
106*4882a593Smuzhiyun .reg_num = 16,
107*4882a593Smuzhiyun },
108*4882a593Smuzhiyun .part_w[3] = {
109*4882a593Smuzhiyun .tb_reg_off = 92,
110*4882a593Smuzhiyun .reg_start = 160,
111*4882a593Smuzhiyun .reg_num = 40,
112*4882a593Smuzhiyun },
113*4882a593Smuzhiyun .part_w[4] = {
114*4882a593Smuzhiyun .tb_reg_off = 132,
115*4882a593Smuzhiyun .reg_start = 224,
116*4882a593Smuzhiyun .reg_num = 16,
117*4882a593Smuzhiyun },
118*4882a593Smuzhiyun .part_w[5] = {
119*4882a593Smuzhiyun .tb_reg_off = 148,
120*4882a593Smuzhiyun .reg_start = 256,
121*4882a593Smuzhiyun .reg_num = 16,
122*4882a593Smuzhiyun },
123*4882a593Smuzhiyun .part_r[0] = {
124*4882a593Smuzhiyun .tb_reg_off = 164,
125*4882a593Smuzhiyun .reg_start = 224,
126*4882a593Smuzhiyun .reg_num = 10,
127*4882a593Smuzhiyun },
128*4882a593Smuzhiyun .part_r[1] = {
129*4882a593Smuzhiyun .tb_reg_off = 174,
130*4882a593Smuzhiyun .reg_start = 258,
131*4882a593Smuzhiyun .reg_num = 28,
132*4882a593Smuzhiyun },
133*4882a593Smuzhiyun .tb_reg_int = 164,
134*4882a593Smuzhiyun .tb_reg_cycle = 179,
135*4882a593Smuzhiyun .hack_setup = 1,
136*4882a593Smuzhiyun .reg_status = {
137*4882a593Smuzhiyun .dec_num_mask = 0x3fffffff,
138*4882a593Smuzhiyun .err_flag_base = 0x010,
139*4882a593Smuzhiyun .err_flag_bit = BIT(31),
140*4882a593Smuzhiyun },
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /* vdpu382 link hw info */
144*4882a593Smuzhiyun struct rkvdec_link_info rkvdec_link_vdpu382_hw_info = {
145*4882a593Smuzhiyun .tb_reg_num = 222,
146*4882a593Smuzhiyun .tb_reg_next = 0,
147*4882a593Smuzhiyun .tb_reg_r = 1,
148*4882a593Smuzhiyun .tb_reg_second_en = 8,
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun .part_w_num = 6,
151*4882a593Smuzhiyun .part_r_num = 2,
152*4882a593Smuzhiyun .part_w[0] = {
153*4882a593Smuzhiyun .tb_reg_off = 4,
154*4882a593Smuzhiyun .reg_start = 8,
155*4882a593Smuzhiyun .reg_num = 28,
156*4882a593Smuzhiyun },
157*4882a593Smuzhiyun .part_w[1] = {
158*4882a593Smuzhiyun .tb_reg_off = 32,
159*4882a593Smuzhiyun .reg_start = 64,
160*4882a593Smuzhiyun .reg_num = 52,
161*4882a593Smuzhiyun },
162*4882a593Smuzhiyun .part_w[2] = {
163*4882a593Smuzhiyun .tb_reg_off = 84,
164*4882a593Smuzhiyun .reg_start = 128,
165*4882a593Smuzhiyun .reg_num = 16,
166*4882a593Smuzhiyun },
167*4882a593Smuzhiyun .part_w[3] = {
168*4882a593Smuzhiyun .tb_reg_off = 100,
169*4882a593Smuzhiyun .reg_start = 160,
170*4882a593Smuzhiyun .reg_num = 48,
171*4882a593Smuzhiyun },
172*4882a593Smuzhiyun .part_w[4] = {
173*4882a593Smuzhiyun .tb_reg_off = 148,
174*4882a593Smuzhiyun .reg_start = 224,
175*4882a593Smuzhiyun .reg_num = 16,
176*4882a593Smuzhiyun },
177*4882a593Smuzhiyun .part_w[5] = {
178*4882a593Smuzhiyun .tb_reg_off = 164,
179*4882a593Smuzhiyun .reg_start = 256,
180*4882a593Smuzhiyun .reg_num = 16,
181*4882a593Smuzhiyun },
182*4882a593Smuzhiyun .part_r[0] = {
183*4882a593Smuzhiyun .tb_reg_off = 180,
184*4882a593Smuzhiyun .reg_start = 224,
185*4882a593Smuzhiyun .reg_num = 12,
186*4882a593Smuzhiyun },
187*4882a593Smuzhiyun .part_r[1] = {
188*4882a593Smuzhiyun .tb_reg_off = 192,
189*4882a593Smuzhiyun .reg_start = 258,
190*4882a593Smuzhiyun .reg_num = 30,
191*4882a593Smuzhiyun },
192*4882a593Smuzhiyun .tb_reg_int = 180,
193*4882a593Smuzhiyun .hack_setup = 0,
194*4882a593Smuzhiyun .tb_reg_cycle = 197,
195*4882a593Smuzhiyun .reg_status = {
196*4882a593Smuzhiyun .dec_num_mask = 0x000fffff,
197*4882a593Smuzhiyun .err_flag_base = 0x024,
198*4882a593Smuzhiyun .err_flag_bit = BIT(8),
199*4882a593Smuzhiyun },
200*4882a593Smuzhiyun };
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun static void rkvdec2_link_free_task(struct kref *ref);
203*4882a593Smuzhiyun static void rkvdec2_link_timeout_proc(struct work_struct *work_s);
204*4882a593Smuzhiyun static int rkvdec2_link_iommu_fault_handle(struct iommu_domain *iommu,
205*4882a593Smuzhiyun struct device *iommu_dev,
206*4882a593Smuzhiyun unsigned long iova,
207*4882a593Smuzhiyun int status, void *arg);
208*4882a593Smuzhiyun
rkvdec_link_status_update(struct rkvdec_link_dev * dev)209*4882a593Smuzhiyun static void rkvdec_link_status_update(struct rkvdec_link_dev *dev)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun void __iomem *reg_base = dev->reg_base;
212*4882a593Smuzhiyun u32 error_ff0, error_ff1;
213*4882a593Smuzhiyun u32 enable_ff0, enable_ff1;
214*4882a593Smuzhiyun u32 loop_count = 10;
215*4882a593Smuzhiyun u32 val;
216*4882a593Smuzhiyun struct rkvdec_link_info *link_info = dev->info;
217*4882a593Smuzhiyun u32 dec_num_mask = link_info->reg_status.dec_num_mask;
218*4882a593Smuzhiyun u32 err_flag_base = link_info->reg_status.err_flag_base;
219*4882a593Smuzhiyun u32 err_flag_bit = link_info->reg_status.err_flag_bit;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun error_ff1 = (readl(reg_base + err_flag_base) & err_flag_bit) ? 1 : 0;
222*4882a593Smuzhiyun enable_ff1 = readl(reg_base + RKVDEC_LINK_EN_BASE);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun dev->irq_status = readl(reg_base + RKVDEC_LINK_IRQ_BASE);
225*4882a593Smuzhiyun dev->iova_curr = readl(reg_base + RKVDEC_LINK_CFG_ADDR_BASE);
226*4882a593Smuzhiyun dev->link_mode = readl(reg_base + RKVDEC_LINK_MODE_BASE);
227*4882a593Smuzhiyun dev->total = readl(reg_base + RKVDEC_LINK_TOTAL_NUM_BASE);
228*4882a593Smuzhiyun dev->iova_next = readl(reg_base + RKVDEC_LINK_NEXT_ADDR_BASE);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun do {
231*4882a593Smuzhiyun val = readl(reg_base + RKVDEC_LINK_DEC_NUM_BASE);
232*4882a593Smuzhiyun error_ff0 = (readl(reg_base + err_flag_base) & err_flag_bit) ? 1 : 0;
233*4882a593Smuzhiyun enable_ff0 = readl(reg_base + RKVDEC_LINK_EN_BASE);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (error_ff0 == error_ff1 && enable_ff0 == enable_ff1)
236*4882a593Smuzhiyun break;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun error_ff1 = error_ff0;
239*4882a593Smuzhiyun enable_ff1 = enable_ff0;
240*4882a593Smuzhiyun } while (--loop_count);
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun dev->error = error_ff0;
243*4882a593Smuzhiyun dev->decoded_status = val;
244*4882a593Smuzhiyun dev->decoded = val & dec_num_mask;
245*4882a593Smuzhiyun dev->enabled = enable_ff0;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun if (!loop_count)
248*4882a593Smuzhiyun dev_info(dev->dev, "reach last 10 count\n");
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
rkvdec_link_node_dump(const char * func,struct rkvdec_link_dev * dev)251*4882a593Smuzhiyun static void rkvdec_link_node_dump(const char *func, struct rkvdec_link_dev *dev)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun u32 *table_base = (u32 *)dev->table->vaddr;
254*4882a593Smuzhiyun u32 reg_count = dev->link_reg_count;
255*4882a593Smuzhiyun u32 iova = (u32)dev->table->iova;
256*4882a593Smuzhiyun u32 *reg = NULL;
257*4882a593Smuzhiyun u32 i, j;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun for (i = 0; i < dev->task_capacity; i++) {
260*4882a593Smuzhiyun reg = table_base + i * reg_count;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun mpp_err("slot %d link config iova %08x:\n", i,
263*4882a593Smuzhiyun iova + i * dev->link_node_size);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun for (j = 0; j < reg_count; j++) {
266*4882a593Smuzhiyun mpp_err("reg%03d 0x%08x\n", j, reg[j]);
267*4882a593Smuzhiyun udelay(100);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
rkvdec_core_reg_dump(const char * func,struct rkvdec_link_dev * dev)272*4882a593Smuzhiyun static void rkvdec_core_reg_dump(const char *func, struct rkvdec_link_dev *dev)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun struct mpp_dev *mpp = dev->mpp;
275*4882a593Smuzhiyun u32 s = mpp->var->hw_info->reg_start;
276*4882a593Smuzhiyun u32 e = mpp->var->hw_info->reg_end;
277*4882a593Smuzhiyun u32 i;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun mpp_err("--- dump hardware register ---\n");
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun for (i = s; i <= e; i++) {
282*4882a593Smuzhiyun u32 reg = i * sizeof(u32);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun mpp_err("reg[%03d]: %04x: 0x%08x\n",
285*4882a593Smuzhiyun i, reg, readl_relaxed(mpp->reg_base + reg));
286*4882a593Smuzhiyun udelay(100);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
rkvdec_link_reg_dump(const char * func,struct rkvdec_link_dev * dev)290*4882a593Smuzhiyun static void rkvdec_link_reg_dump(const char *func, struct rkvdec_link_dev *dev)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun mpp_err("dump link config status from %s\n", func);
293*4882a593Smuzhiyun mpp_err("reg 0 %08x - irq status\n", dev->irq_status);
294*4882a593Smuzhiyun mpp_err("reg 1 %08x - cfg addr\n", dev->iova_curr);
295*4882a593Smuzhiyun mpp_err("reg 2 %08x - link mode\n", dev->link_mode);
296*4882a593Smuzhiyun mpp_err("reg 4 %08x - decoded num\n", dev->decoded_status);
297*4882a593Smuzhiyun mpp_err("reg 5 %08x - total num\n", dev->total);
298*4882a593Smuzhiyun mpp_err("reg 6 %08x - link mode en\n", dev->enabled);
299*4882a593Smuzhiyun mpp_err("reg 6 %08x - next ltb addr\n", dev->iova_next);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
rkvdec_link_counter(const char * func,struct rkvdec_link_dev * dev)302*4882a593Smuzhiyun static void rkvdec_link_counter(const char *func, struct rkvdec_link_dev *dev)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun mpp_err("dump link counter from %s\n", func);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun mpp_err("task pending %d running %d\n",
307*4882a593Smuzhiyun atomic_read(&dev->task_pending), dev->task_running);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
rkvdec_link_dump(struct mpp_dev * mpp)310*4882a593Smuzhiyun int rkvdec_link_dump(struct mpp_dev *mpp)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
313*4882a593Smuzhiyun struct rkvdec_link_dev *dev = dec->link_dec;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun rkvdec_link_status_update(dev);
316*4882a593Smuzhiyun rkvdec_link_reg_dump(__func__, dev);
317*4882a593Smuzhiyun rkvdec_link_counter(__func__, dev);
318*4882a593Smuzhiyun rkvdec_core_reg_dump(__func__, dev);
319*4882a593Smuzhiyun rkvdec_link_node_dump(__func__, dev);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun return 0;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
rkvdec2_clear_cache(struct mpp_dev * mpp)324*4882a593Smuzhiyun static void rkvdec2_clear_cache(struct mpp_dev *mpp)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun /* set cache size */
327*4882a593Smuzhiyun u32 reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS |
328*4882a593Smuzhiyun RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
331*4882a593Smuzhiyun reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
334*4882a593Smuzhiyun mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
335*4882a593Smuzhiyun mpp_write_relaxed(mpp, RKVDEC_REG_CACHE2_SIZE_BASE, reg);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /* clear cache */
338*4882a593Smuzhiyun mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
339*4882a593Smuzhiyun mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
340*4882a593Smuzhiyun mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
rkvdec2_link_enqueue(struct rkvdec_link_dev * link_dec,struct mpp_task * mpp_task)343*4882a593Smuzhiyun static int rkvdec2_link_enqueue(struct rkvdec_link_dev *link_dec,
344*4882a593Smuzhiyun struct mpp_task *mpp_task)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun void __iomem *reg_base = link_dec->reg_base;
347*4882a593Smuzhiyun struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
348*4882a593Smuzhiyun struct mpp_dma_buffer *table = task->table;
349*4882a593Smuzhiyun u32 link_en = 0;
350*4882a593Smuzhiyun u32 frame_num = 1;
351*4882a593Smuzhiyun u32 link_mode;
352*4882a593Smuzhiyun u32 timing_en = link_dec->mpp->srv->timing_en;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun link_en = readl(reg_base + RKVDEC_LINK_EN_BASE);
355*4882a593Smuzhiyun if (!link_en) {
356*4882a593Smuzhiyun rkvdec2_clear_cache(link_dec->mpp);
357*4882a593Smuzhiyun /* cleanup counter in hardware */
358*4882a593Smuzhiyun writel(0, reg_base + RKVDEC_LINK_MODE_BASE);
359*4882a593Smuzhiyun /* start config before all registers are set */
360*4882a593Smuzhiyun wmb();
361*4882a593Smuzhiyun writel(RKVDEC_LINK_BIT_CFG_DONE, reg_base + RKVDEC_LINK_CFG_CTRL_BASE);
362*4882a593Smuzhiyun /* write zero count config */
363*4882a593Smuzhiyun wmb();
364*4882a593Smuzhiyun /* clear counter and enable link mode hardware */
365*4882a593Smuzhiyun writel(RKVDEC_LINK_BIT_EN, reg_base + RKVDEC_LINK_EN_BASE);
366*4882a593Smuzhiyun writel_relaxed(table->iova, reg_base + RKVDEC_LINK_CFG_ADDR_BASE);
367*4882a593Smuzhiyun link_mode = frame_num;
368*4882a593Smuzhiyun } else
369*4882a593Smuzhiyun link_mode = (frame_num | RKVDEC_LINK_BIT_ADD_MODE);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /* set link mode */
372*4882a593Smuzhiyun writel_relaxed(link_mode, reg_base + RKVDEC_LINK_MODE_BASE);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /* start config before all registers are set */
375*4882a593Smuzhiyun wmb();
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun mpp_iommu_flush_tlb(link_dec->mpp->iommu_info);
378*4882a593Smuzhiyun mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun link_dec->task_running++;
381*4882a593Smuzhiyun /* configure done */
382*4882a593Smuzhiyun writel(RKVDEC_LINK_BIT_CFG_DONE, reg_base + RKVDEC_LINK_CFG_CTRL_BASE);
383*4882a593Smuzhiyun if (!link_en) {
384*4882a593Smuzhiyun /* start hardware before all registers are set */
385*4882a593Smuzhiyun wmb();
386*4882a593Smuzhiyun /* clear counter and enable link mode hardware */
387*4882a593Smuzhiyun writel(RKVDEC_LINK_BIT_EN, reg_base + RKVDEC_LINK_EN_BASE);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun mpp_task_run_end(mpp_task, timing_en);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun return 0;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
rkvdec2_link_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)394*4882a593Smuzhiyun static int rkvdec2_link_finish(struct mpp_dev *mpp, struct mpp_task *mpp_task)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
397*4882a593Smuzhiyun struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
398*4882a593Smuzhiyun struct rkvdec_link_dev *link_dec = dec->link_dec;
399*4882a593Smuzhiyun struct mpp_dma_buffer *table = link_dec->table;
400*4882a593Smuzhiyun struct rkvdec_link_info *info = link_dec->info;
401*4882a593Smuzhiyun struct rkvdec_link_part *part = info->part_r;
402*4882a593Smuzhiyun u32 *tb_reg = (u32 *)table->vaddr;
403*4882a593Smuzhiyun u32 off, s, n;
404*4882a593Smuzhiyun u32 i;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun mpp_debug_enter();
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun for (i = 0; i < info->part_r_num; i++) {
409*4882a593Smuzhiyun off = part[i].tb_reg_off;
410*4882a593Smuzhiyun s = part[i].reg_start;
411*4882a593Smuzhiyun n = part[i].reg_num;
412*4882a593Smuzhiyun memcpy(&task->reg[s], &tb_reg[off], n * sizeof(u32));
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun /* revert hack for irq status */
415*4882a593Smuzhiyun task->reg[RKVDEC_REG_INT_EN_INDEX] = task->irq_status;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun mpp_debug_leave();
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun return 0;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
rkvdec2_link_prepare(struct mpp_dev * mpp,struct mpp_task * mpp_task)422*4882a593Smuzhiyun static void *rkvdec2_link_prepare(struct mpp_dev *mpp,
423*4882a593Smuzhiyun struct mpp_task *mpp_task)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
426*4882a593Smuzhiyun struct rkvdec_link_dev *link_dec = dec->link_dec;
427*4882a593Smuzhiyun struct mpp_dma_buffer *table = NULL;
428*4882a593Smuzhiyun struct rkvdec_link_part *part;
429*4882a593Smuzhiyun struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
430*4882a593Smuzhiyun struct rkvdec_link_info *info = link_dec->info;
431*4882a593Smuzhiyun u32 i, off, s, n;
432*4882a593Smuzhiyun u32 *tb_reg;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun mpp_debug_enter();
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun if (test_bit(TASK_STATE_PREPARE, &mpp_task->state)) {
437*4882a593Smuzhiyun dev_err(mpp->dev, "task %d has prepared\n", mpp_task->task_index);
438*4882a593Smuzhiyun return mpp_task;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun table = list_first_entry_or_null(&link_dec->unused_list, struct mpp_dma_buffer, link);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun if (!table)
444*4882a593Smuzhiyun return NULL;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /* fill regs value */
447*4882a593Smuzhiyun tb_reg = (u32 *)table->vaddr;
448*4882a593Smuzhiyun part = info->part_w;
449*4882a593Smuzhiyun for (i = 0; i < info->part_w_num; i++) {
450*4882a593Smuzhiyun off = part[i].tb_reg_off;
451*4882a593Smuzhiyun s = part[i].reg_start;
452*4882a593Smuzhiyun n = part[i].reg_num;
453*4882a593Smuzhiyun memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /* setup error mode flag */
457*4882a593Smuzhiyun tb_reg[9] |= BIT(18) | BIT(9);
458*4882a593Smuzhiyun tb_reg[info->tb_reg_second_en] |= RKVDEC_WAIT_RESET_EN;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /* memset read registers */
461*4882a593Smuzhiyun part = info->part_r;
462*4882a593Smuzhiyun for (i = 0; i < info->part_r_num; i++) {
463*4882a593Smuzhiyun off = part[i].tb_reg_off;
464*4882a593Smuzhiyun n = part[i].reg_num;
465*4882a593Smuzhiyun memset(&tb_reg[off], 0, n * sizeof(u32));
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun list_move_tail(&table->link, &link_dec->used_list);
469*4882a593Smuzhiyun task->table = table;
470*4882a593Smuzhiyun set_bit(TASK_STATE_PREPARE, &mpp_task->state);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun mpp_dbg_link("session %d task %d prepare pending %d running %d\n",
473*4882a593Smuzhiyun mpp_task->session->index, mpp_task->task_index,
474*4882a593Smuzhiyun atomic_read(&link_dec->task_pending), link_dec->task_running);
475*4882a593Smuzhiyun mpp_debug_leave();
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun return mpp_task;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
rkvdec2_link_reset(struct mpp_dev * mpp)480*4882a593Smuzhiyun static int rkvdec2_link_reset(struct mpp_dev *mpp)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun dev_info(mpp->dev, "resetting...\n");
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun disable_irq(mpp->irq);
486*4882a593Smuzhiyun mpp_iommu_disable_irq(mpp->iommu_info);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun /* FIXME lock resource lock of the other devices in combo */
489*4882a593Smuzhiyun mpp_iommu_down_write(mpp->iommu_info);
490*4882a593Smuzhiyun mpp_reset_down_write(mpp->reset_group);
491*4882a593Smuzhiyun atomic_set(&mpp->reset_request, 0);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun rockchip_save_qos(mpp->dev);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun if (mpp->hw_ops->reset)
496*4882a593Smuzhiyun mpp->hw_ops->reset(mpp);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun rockchip_restore_qos(mpp->dev);
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /* Note: if the domain does not change, iommu attach will be return
501*4882a593Smuzhiyun * as an empty operation. Therefore, force to close and then open,
502*4882a593Smuzhiyun * will be update the domain. In this way, domain can really attach.
503*4882a593Smuzhiyun */
504*4882a593Smuzhiyun mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun mpp_reset_up_write(mpp->reset_group);
507*4882a593Smuzhiyun mpp_iommu_up_write(mpp->iommu_info);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun enable_irq(mpp->irq);
510*4882a593Smuzhiyun mpp_iommu_enable_irq(mpp->iommu_info);
511*4882a593Smuzhiyun dev_info(mpp->dev, "reset done\n");
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun return 0;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
rkvdec2_link_irq(struct mpp_dev * mpp)516*4882a593Smuzhiyun static int rkvdec2_link_irq(struct mpp_dev *mpp)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
519*4882a593Smuzhiyun struct rkvdec_link_dev *link_dec = dec->link_dec;
520*4882a593Smuzhiyun u32 irq_status = 0;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun if (!atomic_read(&link_dec->power_enabled)) {
523*4882a593Smuzhiyun dev_info(link_dec->dev, "irq on power off\n");
524*4882a593Smuzhiyun return -1;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun irq_status = readl(link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun if (irq_status & RKVDEC_LINK_BIT_IRQ_RAW) {
530*4882a593Smuzhiyun u32 enabled = readl(link_dec->reg_base + RKVDEC_LINK_EN_BASE);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun if (!enabled) {
533*4882a593Smuzhiyun u32 bus = mpp_read_relaxed(mpp, 273 * 4);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun if (bus & 0x7ffff)
536*4882a593Smuzhiyun dev_info(link_dec->dev,
537*4882a593Smuzhiyun "invalid bus status %08x\n", bus);
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun link_dec->irq_status = irq_status;
541*4882a593Smuzhiyun mpp->irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun writel_relaxed(0, link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun mpp_debug(DEBUG_IRQ_STATUS | DEBUG_LINK_TABLE, "irq_status: %08x : %08x\n",
547*4882a593Smuzhiyun irq_status, mpp->irq_status);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun return 0;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
rkvdec2_link_remove(struct mpp_dev * mpp,struct rkvdec_link_dev * link_dec)552*4882a593Smuzhiyun int rkvdec2_link_remove(struct mpp_dev *mpp, struct rkvdec_link_dev *link_dec)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun mpp_debug_enter();
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun if (link_dec && link_dec->table) {
557*4882a593Smuzhiyun mpp_dma_free(link_dec->table);
558*4882a593Smuzhiyun link_dec->table = NULL;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun mpp_debug_leave();
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun return 0;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
rkvdec2_link_alloc_table(struct mpp_dev * mpp,struct rkvdec_link_dev * link_dec)566*4882a593Smuzhiyun static int rkvdec2_link_alloc_table(struct mpp_dev *mpp,
567*4882a593Smuzhiyun struct rkvdec_link_dev *link_dec)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun int ret;
570*4882a593Smuzhiyun struct mpp_dma_buffer *table;
571*4882a593Smuzhiyun struct rkvdec_link_info *info = link_dec->info;
572*4882a593Smuzhiyun /* NOTE: link table address requires 64 align */
573*4882a593Smuzhiyun u32 task_capacity = link_dec->task_capacity;
574*4882a593Smuzhiyun u32 link_node_size = ALIGN(info->tb_reg_num * sizeof(u32), 256);
575*4882a593Smuzhiyun u32 link_info_size = task_capacity * link_node_size;
576*4882a593Smuzhiyun u32 *v_curr;
577*4882a593Smuzhiyun u32 io_curr, io_next, io_start;
578*4882a593Smuzhiyun u32 offset_r = info->part_r[0].tb_reg_off * sizeof(u32);
579*4882a593Smuzhiyun u32 i;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun table = mpp_dma_alloc(mpp->dev, link_info_size);
582*4882a593Smuzhiyun if (!table) {
583*4882a593Smuzhiyun ret = -ENOMEM;
584*4882a593Smuzhiyun goto err_free_node;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun link_dec->link_node_size = link_node_size;
588*4882a593Smuzhiyun link_dec->link_reg_count = link_node_size >> 2;
589*4882a593Smuzhiyun io_start = table->iova;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun for (i = 0; i < task_capacity; i++) {
592*4882a593Smuzhiyun v_curr = (u32 *)(table->vaddr + i * link_node_size);
593*4882a593Smuzhiyun io_curr = io_start + i * link_node_size;
594*4882a593Smuzhiyun io_next = (i == task_capacity - 1) ?
595*4882a593Smuzhiyun io_start : io_start + (i + 1) * link_node_size;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun v_curr[info->tb_reg_next] = io_next;
598*4882a593Smuzhiyun v_curr[info->tb_reg_r] = io_curr + offset_r;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun link_dec->table = table;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun return 0;
604*4882a593Smuzhiyun err_free_node:
605*4882a593Smuzhiyun rkvdec2_link_remove(mpp, link_dec);
606*4882a593Smuzhiyun return ret;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
rkvdec2_link_procfs_init(struct mpp_dev * mpp)610*4882a593Smuzhiyun int rkvdec2_link_procfs_init(struct mpp_dev *mpp)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
613*4882a593Smuzhiyun struct rkvdec_link_dev *link_dec = dec->link_dec;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun if (!link_dec)
616*4882a593Smuzhiyun return 0;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun link_dec->statistic_count = 0;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun if (dec->procfs)
621*4882a593Smuzhiyun mpp_procfs_create_u32("statistic_count", 0644,
622*4882a593Smuzhiyun dec->procfs, &link_dec->statistic_count);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun return 0;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun #else
rkvdec2_link_procfs_init(struct mpp_dev * mpp)627*4882a593Smuzhiyun int rkvdec2_link_procfs_init(struct mpp_dev *mpp)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun return 0;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun #endif
632*4882a593Smuzhiyun
rkvdec2_link_init(struct platform_device * pdev,struct rkvdec2_dev * dec)633*4882a593Smuzhiyun int rkvdec2_link_init(struct platform_device *pdev, struct rkvdec2_dev *dec)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun int ret;
636*4882a593Smuzhiyun struct resource *res = NULL;
637*4882a593Smuzhiyun struct rkvdec_link_dev *link_dec = NULL;
638*4882a593Smuzhiyun struct device *dev = &pdev->dev;
639*4882a593Smuzhiyun struct mpp_dev *mpp = &dec->mpp;
640*4882a593Smuzhiyun struct mpp_dma_buffer *table;
641*4882a593Smuzhiyun int i;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun mpp_debug_enter();
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun link_dec = devm_kzalloc(dev, sizeof(*link_dec), GFP_KERNEL);
646*4882a593Smuzhiyun if (!link_dec) {
647*4882a593Smuzhiyun ret = -ENOMEM;
648*4882a593Smuzhiyun goto done;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
652*4882a593Smuzhiyun if (res)
653*4882a593Smuzhiyun link_dec->info = mpp->var->hw_info->link_info;
654*4882a593Smuzhiyun else {
655*4882a593Smuzhiyun dev_err(dev, "link mode resource not found\n");
656*4882a593Smuzhiyun ret = -ENOMEM;
657*4882a593Smuzhiyun goto done;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun link_dec->reg_base = devm_ioremap(dev, res->start, resource_size(res));
661*4882a593Smuzhiyun if (!link_dec->reg_base) {
662*4882a593Smuzhiyun dev_err(dev, "ioremap failed for resource %pR\n", res);
663*4882a593Smuzhiyun ret = -ENOMEM;
664*4882a593Smuzhiyun goto done;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun link_dec->task_capacity = mpp->task_capacity;
668*4882a593Smuzhiyun ret = rkvdec2_link_alloc_table(&dec->mpp, link_dec);
669*4882a593Smuzhiyun if (ret)
670*4882a593Smuzhiyun goto done;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun /* alloc table pointer array */
673*4882a593Smuzhiyun table = devm_kmalloc_array(mpp->dev, mpp->task_capacity,
674*4882a593Smuzhiyun sizeof(*table), GFP_KERNEL | __GFP_ZERO);
675*4882a593Smuzhiyun if (!table)
676*4882a593Smuzhiyun return -ENOMEM;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun /* init table array */
679*4882a593Smuzhiyun link_dec->table_array = table;
680*4882a593Smuzhiyun INIT_LIST_HEAD(&link_dec->used_list);
681*4882a593Smuzhiyun INIT_LIST_HEAD(&link_dec->unused_list);
682*4882a593Smuzhiyun for (i = 0; i < mpp->task_capacity; i++) {
683*4882a593Smuzhiyun table[i].iova = link_dec->table->iova + i * link_dec->link_node_size;
684*4882a593Smuzhiyun table[i].vaddr = link_dec->table->vaddr + i * link_dec->link_node_size;
685*4882a593Smuzhiyun table[i].size = link_dec->link_node_size;
686*4882a593Smuzhiyun INIT_LIST_HEAD(&table[i].link);
687*4882a593Smuzhiyun list_add_tail(&table[i].link, &link_dec->unused_list);
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun if (dec->fix)
691*4882a593Smuzhiyun rkvdec2_link_hack_data_setup(dec->fix);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun mpp->fault_handler = rkvdec2_link_iommu_fault_handle;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun link_dec->mpp = mpp;
696*4882a593Smuzhiyun link_dec->dev = dev;
697*4882a593Smuzhiyun atomic_set(&link_dec->task_timeout, 0);
698*4882a593Smuzhiyun atomic_set(&link_dec->task_pending, 0);
699*4882a593Smuzhiyun atomic_set(&link_dec->power_enabled, 0);
700*4882a593Smuzhiyun link_dec->irq_enabled = 1;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun dec->link_dec = link_dec;
703*4882a593Smuzhiyun dev_info(dev, "link mode probe finish\n");
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun done:
706*4882a593Smuzhiyun if (ret) {
707*4882a593Smuzhiyun if (link_dec) {
708*4882a593Smuzhiyun if (link_dec->reg_base) {
709*4882a593Smuzhiyun devm_iounmap(dev, link_dec->reg_base);
710*4882a593Smuzhiyun link_dec->reg_base = NULL;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun devm_kfree(dev, link_dec);
713*4882a593Smuzhiyun link_dec = NULL;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun dec->link_dec = NULL;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun mpp_debug_leave();
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun return ret;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
rkvdec2_link_free_task(struct kref * ref)722*4882a593Smuzhiyun static void rkvdec2_link_free_task(struct kref *ref)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun struct mpp_dev *mpp;
725*4882a593Smuzhiyun struct mpp_session *session;
726*4882a593Smuzhiyun struct mpp_task *task = container_of(ref, struct mpp_task, ref);
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun if (!task->session) {
729*4882a593Smuzhiyun mpp_err("task %d task->session is null.\n", task->task_id);
730*4882a593Smuzhiyun return;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun session = task->session;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d state 0x%lx\n",
735*4882a593Smuzhiyun session->index, task->task_id, task->state);
736*4882a593Smuzhiyun if (!session->mpp) {
737*4882a593Smuzhiyun mpp_err("session %d session->mpp is null.\n", session->index);
738*4882a593Smuzhiyun return;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun mpp = session->mpp;
741*4882a593Smuzhiyun list_del_init(&task->queue_link);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun rkvdec2_free_task(session, task);
744*4882a593Smuzhiyun /* Decrease reference count */
745*4882a593Smuzhiyun atomic_dec(&session->task_count);
746*4882a593Smuzhiyun atomic_dec(&mpp->task_count);
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun
rkvdec2_link_trigger_work(struct mpp_dev * mpp)749*4882a593Smuzhiyun static void rkvdec2_link_trigger_work(struct mpp_dev *mpp)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun kthread_queue_work(&mpp->queue->worker, &mpp->work);
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
rkvdec2_link_power_on(struct mpp_dev * mpp)754*4882a593Smuzhiyun static int rkvdec2_link_power_on(struct mpp_dev *mpp)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
757*4882a593Smuzhiyun struct rkvdec_link_dev *link_dec = dec->link_dec;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun if (!atomic_xchg(&link_dec->power_enabled, 1)) {
760*4882a593Smuzhiyun if (mpp_iommu_attach(mpp->iommu_info)) {
761*4882a593Smuzhiyun dev_err(mpp->dev, "mpp_iommu_attach failed\n");
762*4882a593Smuzhiyun return -ENODATA;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun pm_runtime_get_sync(mpp->dev);
765*4882a593Smuzhiyun pm_stay_awake(mpp->dev);
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun if (mpp->hw_ops->clk_on)
768*4882a593Smuzhiyun mpp->hw_ops->clk_on(mpp);
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun if (!link_dec->irq_enabled) {
771*4882a593Smuzhiyun enable_irq(mpp->irq);
772*4882a593Smuzhiyun mpp_iommu_enable_irq(mpp->iommu_info);
773*4882a593Smuzhiyun link_dec->irq_enabled = 1;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_ADVANCED);
777*4882a593Smuzhiyun mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_ADVANCED);
778*4882a593Smuzhiyun mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_ADVANCED);
779*4882a593Smuzhiyun mpp_devfreq_set_core_rate(mpp, CLK_MODE_ADVANCED);
780*4882a593Smuzhiyun mpp_iommu_dev_activate(mpp->iommu_info, mpp);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun return 0;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
rkvdec2_link_power_off(struct mpp_dev * mpp)785*4882a593Smuzhiyun static void rkvdec2_link_power_off(struct mpp_dev *mpp)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
788*4882a593Smuzhiyun struct rkvdec_link_dev *link_dec = dec->link_dec;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun if (atomic_xchg(&link_dec->power_enabled, 0)) {
791*4882a593Smuzhiyun disable_irq(mpp->irq);
792*4882a593Smuzhiyun mpp_iommu_disable_irq(mpp->iommu_info);
793*4882a593Smuzhiyun link_dec->irq_enabled = 0;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun if (mpp->hw_ops->clk_off)
796*4882a593Smuzhiyun mpp->hw_ops->clk_off(mpp);
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun pm_relax(mpp->dev);
799*4882a593Smuzhiyun pm_runtime_put_sync_suspend(mpp->dev);
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_NORMAL);
802*4882a593Smuzhiyun mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_NORMAL);
803*4882a593Smuzhiyun mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_NORMAL);
804*4882a593Smuzhiyun mpp_devfreq_set_core_rate(mpp, CLK_MODE_NORMAL);
805*4882a593Smuzhiyun mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun
rkvdec2_link_timeout_proc(struct work_struct * work_s)809*4882a593Smuzhiyun static void rkvdec2_link_timeout_proc(struct work_struct *work_s)
810*4882a593Smuzhiyun {
811*4882a593Smuzhiyun struct mpp_dev *mpp;
812*4882a593Smuzhiyun struct rkvdec2_dev *dec;
813*4882a593Smuzhiyun struct mpp_session *session;
814*4882a593Smuzhiyun struct mpp_task *task = container_of(to_delayed_work(work_s),
815*4882a593Smuzhiyun struct mpp_task, timeout_work);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
818*4882a593Smuzhiyun mpp_err("task %d state %lx has been handled\n",
819*4882a593Smuzhiyun task->task_id, task->state);
820*4882a593Smuzhiyun return;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun if (!task->session) {
824*4882a593Smuzhiyun mpp_err("task %d session is null.\n", task->task_id);
825*4882a593Smuzhiyun return;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun session = task->session;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun if (!session->mpp) {
830*4882a593Smuzhiyun mpp_err("task %d:%d mpp is null.\n", session->index,
831*4882a593Smuzhiyun task->task_id);
832*4882a593Smuzhiyun return;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun mpp = session->mpp;
835*4882a593Smuzhiyun set_bit(TASK_STATE_TIMEOUT, &task->state);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun dec = to_rkvdec2_dev(mpp);
838*4882a593Smuzhiyun atomic_inc(&dec->link_dec->task_timeout);
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun dev_err(mpp->dev, "session %d task %d state %#lx timeout, cnt %d\n",
841*4882a593Smuzhiyun session->index, task->task_index, task->state,
842*4882a593Smuzhiyun atomic_read(&dec->link_dec->task_timeout));
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun rkvdec2_link_trigger_work(mpp);
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
rkvdec2_link_iommu_fault_handle(struct iommu_domain * iommu,struct device * iommu_dev,unsigned long iova,int status,void * arg)847*4882a593Smuzhiyun static int rkvdec2_link_iommu_fault_handle(struct iommu_domain *iommu,
848*4882a593Smuzhiyun struct device *iommu_dev,
849*4882a593Smuzhiyun unsigned long iova,
850*4882a593Smuzhiyun int status, void *arg)
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun struct mpp_dev *mpp = (struct mpp_dev *)arg;
853*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
854*4882a593Smuzhiyun struct mpp_task *mpp_task = NULL, *n;
855*4882a593Smuzhiyun struct mpp_taskqueue *queue;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n",
858*4882a593Smuzhiyun iova, status, arg);
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun if (!mpp) {
861*4882a593Smuzhiyun dev_err(iommu_dev, "pagefault without device to handle\n");
862*4882a593Smuzhiyun return 0;
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun queue = mpp->queue;
865*4882a593Smuzhiyun list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
866*4882a593Smuzhiyun struct rkvdec_link_info *info = dec->link_dec->info;
867*4882a593Smuzhiyun struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
868*4882a593Smuzhiyun u32 *tb_reg = (u32 *)task->table->vaddr;
869*4882a593Smuzhiyun u32 irq_status = tb_reg[info->tb_reg_int];
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun if (!irq_status) {
872*4882a593Smuzhiyun mpp_task_dump_mem_region(mpp, mpp_task);
873*4882a593Smuzhiyun break;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun mpp_task_dump_hw_reg(mpp);
878*4882a593Smuzhiyun /*
879*4882a593Smuzhiyun * Mask iommu irq, in order for iommu not repeatedly trigger pagefault.
880*4882a593Smuzhiyun * Until the pagefault task finish by hw timeout.
881*4882a593Smuzhiyun */
882*4882a593Smuzhiyun rockchip_iommu_mask_irq(mpp->dev);
883*4882a593Smuzhiyun dec->mmu_fault = 1;
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun return 0;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
rkvdec2_link_resend(struct mpp_dev * mpp)888*4882a593Smuzhiyun static void rkvdec2_link_resend(struct mpp_dev *mpp)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
891*4882a593Smuzhiyun struct rkvdec_link_dev *link_dec = dec->link_dec;
892*4882a593Smuzhiyun struct mpp_taskqueue *queue = mpp->queue;
893*4882a593Smuzhiyun struct mpp_task *mpp_task, *n;
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun link_dec->task_running = 0;
896*4882a593Smuzhiyun list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
897*4882a593Smuzhiyun dev_err(mpp->dev, "resend task %d\n", mpp_task->task_index);
898*4882a593Smuzhiyun cancel_delayed_work_sync(&mpp_task->timeout_work);
899*4882a593Smuzhiyun clear_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
900*4882a593Smuzhiyun clear_bit(TASK_STATE_HANDLE, &mpp_task->state);
901*4882a593Smuzhiyun rkvdec2_link_enqueue(link_dec, mpp_task);
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
rkvdec2_link_try_dequeue(struct mpp_dev * mpp)905*4882a593Smuzhiyun static void rkvdec2_link_try_dequeue(struct mpp_dev *mpp)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
908*4882a593Smuzhiyun struct rkvdec_link_dev *link_dec = dec->link_dec;
909*4882a593Smuzhiyun struct mpp_taskqueue *queue = mpp->queue;
910*4882a593Smuzhiyun struct mpp_task *mpp_task = NULL, *n;
911*4882a593Smuzhiyun struct rkvdec_link_info *info = link_dec->info;
912*4882a593Smuzhiyun u32 reset_flag = 0;
913*4882a593Smuzhiyun u32 iommu_fault = dec->mmu_fault && (mpp->irq_status & RKVDEC_TIMEOUT_STA);
914*4882a593Smuzhiyun u32 link_en = atomic_read(&link_dec->power_enabled) ?
915*4882a593Smuzhiyun readl(link_dec->reg_base + RKVDEC_LINK_EN_BASE) : 0;
916*4882a593Smuzhiyun u32 force_dequeue = iommu_fault || !link_en;
917*4882a593Smuzhiyun u32 dequeue_cnt = 0;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
920*4882a593Smuzhiyun /*
921*4882a593Smuzhiyun * Because there are multiple tasks enqueue at the same time,
922*4882a593Smuzhiyun * soft timeout may be triggered at the same time, but in reality only
923*4882a593Smuzhiyun * first task is being timeout because of the hardware stuck,
924*4882a593Smuzhiyun * so only process the first task.
925*4882a593Smuzhiyun */
926*4882a593Smuzhiyun u32 timeout_flag = dequeue_cnt ? 0 : test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
927*4882a593Smuzhiyun struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
928*4882a593Smuzhiyun u32 *tb_reg = (u32 *)task->table->vaddr;
929*4882a593Smuzhiyun u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
930*4882a593Smuzhiyun u32 irq_status = tb_reg[info->tb_reg_int];
931*4882a593Smuzhiyun u32 task_done = irq_status || timeout_flag || abort_flag;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun /*
934*4882a593Smuzhiyun * there are some cases will cause hw cannot write reg to ddr:
935*4882a593Smuzhiyun * 1. iommu pagefault
936*4882a593Smuzhiyun * 2. link stop(link_en == 0) because of err task, it is a rk356x issue.
937*4882a593Smuzhiyun * so need force dequeue one task.
938*4882a593Smuzhiyun */
939*4882a593Smuzhiyun if (force_dequeue)
940*4882a593Smuzhiyun task_done = 1;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun if (!task_done)
943*4882a593Smuzhiyun break;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun dequeue_cnt++;
946*4882a593Smuzhiyun /* check hack task only for rk356x*/
947*4882a593Smuzhiyun if (task->need_hack == RKVDEC2_LINK_HACK_TASK_FLAG) {
948*4882a593Smuzhiyun cancel_delayed_work_sync(&mpp_task->timeout_work);
949*4882a593Smuzhiyun list_move_tail(&task->table->link, &link_dec->unused_list);
950*4882a593Smuzhiyun list_del_init(&mpp_task->queue_link);
951*4882a593Smuzhiyun link_dec->task_running--;
952*4882a593Smuzhiyun link_dec->hack_task_running--;
953*4882a593Smuzhiyun kfree(task);
954*4882a593Smuzhiyun mpp_dbg_link("hack running %d irq_status %#08x timeout %d abort %d\n",
955*4882a593Smuzhiyun link_dec->hack_task_running, irq_status,
956*4882a593Smuzhiyun timeout_flag, abort_flag);
957*4882a593Smuzhiyun continue;
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun /*
961*4882a593Smuzhiyun * if timeout/abort/force dequeue found, reset and stop hw first.
962*4882a593Smuzhiyun */
963*4882a593Smuzhiyun if ((timeout_flag || abort_flag || force_dequeue) && !reset_flag) {
964*4882a593Smuzhiyun dev_err(mpp->dev, "session %d task %d timeout %d abort %d force_dequeue %d\n",
965*4882a593Smuzhiyun mpp_task->session->index, mpp_task->task_index,
966*4882a593Smuzhiyun timeout_flag, abort_flag, force_dequeue);
967*4882a593Smuzhiyun rkvdec2_link_reset(mpp);
968*4882a593Smuzhiyun reset_flag = 1;
969*4882a593Smuzhiyun dec->mmu_fault = 0;
970*4882a593Smuzhiyun mpp->irq_status = 0;
971*4882a593Smuzhiyun force_dequeue = 0;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun cancel_delayed_work_sync(&mpp_task->timeout_work);
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun task->irq_status = irq_status;
977*4882a593Smuzhiyun mpp_task->hw_cycles = tb_reg[info->tb_reg_cycle];
978*4882a593Smuzhiyun mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
979*4882a593Smuzhiyun rkvdec2_link_finish(mpp, mpp_task);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun list_move_tail(&task->table->link, &link_dec->unused_list);
982*4882a593Smuzhiyun list_del_init(&mpp_task->queue_link);
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun set_bit(TASK_STATE_HANDLE, &mpp_task->state);
985*4882a593Smuzhiyun set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
986*4882a593Smuzhiyun set_bit(TASK_STATE_FINISH, &mpp_task->state);
987*4882a593Smuzhiyun set_bit(TASK_STATE_DONE, &mpp_task->state);
988*4882a593Smuzhiyun if (test_bit(TASK_STATE_ABORT, &mpp_task->state))
989*4882a593Smuzhiyun set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun wake_up(&mpp_task->wait);
992*4882a593Smuzhiyun kref_put(&mpp_task->ref, rkvdec2_link_free_task);
993*4882a593Smuzhiyun link_dec->task_running--;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun mpp_dbg_link("session %d task %d irq_status %#08x timeout %d abort %d\n",
996*4882a593Smuzhiyun mpp_task->session->index, mpp_task->task_index,
997*4882a593Smuzhiyun irq_status, timeout_flag, abort_flag);
998*4882a593Smuzhiyun if (irq_status & RKVDEC_INT_ERROR_MASK) {
999*4882a593Smuzhiyun dev_err(mpp->dev,
1000*4882a593Smuzhiyun "session %d task %d irq_status %#08x timeout %u abort %u\n",
1001*4882a593Smuzhiyun mpp_task->session->index, mpp_task->task_index,
1002*4882a593Smuzhiyun irq_status, timeout_flag, abort_flag);
1003*4882a593Smuzhiyun if (!reset_flag)
1004*4882a593Smuzhiyun atomic_inc(&mpp->reset_request);
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun /* resend running task after reset */
1009*4882a593Smuzhiyun if (reset_flag && !list_empty(&queue->running_list))
1010*4882a593Smuzhiyun rkvdec2_link_resend(mpp);
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun
mpp_task_queue(struct mpp_dev * mpp,struct mpp_task * mpp_task)1013*4882a593Smuzhiyun static int mpp_task_queue(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1014*4882a593Smuzhiyun {
1015*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1016*4882a593Smuzhiyun struct rkvdec_link_dev *link_dec = dec->link_dec;
1017*4882a593Smuzhiyun struct mpp_taskqueue *queue = mpp->queue;
1018*4882a593Smuzhiyun struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun mpp_debug_enter();
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun rkvdec2_link_power_on(mpp);
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun /* hack for rk356x */
1025*4882a593Smuzhiyun if (task->need_hack) {
1026*4882a593Smuzhiyun u32 *tb_reg;
1027*4882a593Smuzhiyun struct mpp_dma_buffer *table;
1028*4882a593Smuzhiyun struct rkvdec2_task *hack_task;
1029*4882a593Smuzhiyun struct rkvdec_link_info *info = link_dec->info;
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun /* need reserved 2 unused task for need hack task */
1032*4882a593Smuzhiyun if (link_dec->task_running > (link_dec->task_capacity - 2))
1033*4882a593Smuzhiyun return -EBUSY;
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun table = list_first_entry_or_null(&link_dec->unused_list,
1036*4882a593Smuzhiyun struct mpp_dma_buffer,
1037*4882a593Smuzhiyun link);
1038*4882a593Smuzhiyun if (!table)
1039*4882a593Smuzhiyun return -EBUSY;
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun hack_task = kzalloc(sizeof(*hack_task), GFP_KERNEL);
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun if (!hack_task)
1044*4882a593Smuzhiyun return -ENOMEM;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun mpp_task_init(mpp_task->session, &hack_task->mpp_task);
1047*4882a593Smuzhiyun INIT_DELAYED_WORK(&hack_task->mpp_task.timeout_work,
1048*4882a593Smuzhiyun rkvdec2_link_timeout_proc);
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun tb_reg = (u32 *)table->vaddr;
1051*4882a593Smuzhiyun memset(tb_reg + info->part_r[0].tb_reg_off, 0, info->part_r[0].reg_num);
1052*4882a593Smuzhiyun rkvdec2_3568_hack_fix_link(tb_reg + 4);
1053*4882a593Smuzhiyun list_move_tail(&table->link, &link_dec->used_list);
1054*4882a593Smuzhiyun hack_task->table = table;
1055*4882a593Smuzhiyun hack_task->need_hack = RKVDEC2_LINK_HACK_TASK_FLAG;
1056*4882a593Smuzhiyun rkvdec2_link_enqueue(link_dec, &hack_task->mpp_task);
1057*4882a593Smuzhiyun mpp_taskqueue_pending_to_run(queue, &hack_task->mpp_task);
1058*4882a593Smuzhiyun link_dec->hack_task_running++;
1059*4882a593Smuzhiyun mpp_dbg_link("hack task send to hw, hack running %d\n",
1060*4882a593Smuzhiyun link_dec->hack_task_running);
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun /* process normal */
1064*4882a593Smuzhiyun if (!rkvdec2_link_prepare(mpp, mpp_task))
1065*4882a593Smuzhiyun return -EBUSY;
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun rkvdec2_link_enqueue(link_dec, mpp_task);
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun set_bit(TASK_STATE_RUNNING, &mpp_task->state);
1070*4882a593Smuzhiyun atomic_dec(&link_dec->task_pending);
1071*4882a593Smuzhiyun mpp_taskqueue_pending_to_run(queue, mpp_task);
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun mpp_dbg_link("session %d task %d send to hw pending %d running %d\n",
1074*4882a593Smuzhiyun mpp_task->session->index, mpp_task->task_index,
1075*4882a593Smuzhiyun atomic_read(&link_dec->task_pending), link_dec->task_running);
1076*4882a593Smuzhiyun mpp_debug_leave();
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun return 0;
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun
rkvdec2_link_irq_proc(int irq,void * param)1081*4882a593Smuzhiyun irqreturn_t rkvdec2_link_irq_proc(int irq, void *param)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun struct mpp_dev *mpp = param;
1084*4882a593Smuzhiyun int ret = rkvdec2_link_irq(mpp);
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun if (!ret)
1087*4882a593Smuzhiyun rkvdec2_link_trigger_work(mpp);
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun return IRQ_HANDLED;
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun static struct mpp_task *
mpp_session_get_pending_task(struct mpp_session * session)1093*4882a593Smuzhiyun mpp_session_get_pending_task(struct mpp_session *session)
1094*4882a593Smuzhiyun {
1095*4882a593Smuzhiyun struct mpp_task *task = NULL;
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun mutex_lock(&session->pending_lock);
1098*4882a593Smuzhiyun task = list_first_entry_or_null(&session->pending_list, struct mpp_task,
1099*4882a593Smuzhiyun pending_link);
1100*4882a593Smuzhiyun mutex_unlock(&session->pending_lock);
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun return task;
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun
task_is_done(struct mpp_task * task)1105*4882a593Smuzhiyun static int task_is_done(struct mpp_task *task)
1106*4882a593Smuzhiyun {
1107*4882a593Smuzhiyun return test_bit(TASK_STATE_PROC_DONE, &task->state);
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun
mpp_session_pop_pending(struct mpp_session * session,struct mpp_task * task)1110*4882a593Smuzhiyun static int mpp_session_pop_pending(struct mpp_session *session,
1111*4882a593Smuzhiyun struct mpp_task *task)
1112*4882a593Smuzhiyun {
1113*4882a593Smuzhiyun mutex_lock(&session->pending_lock);
1114*4882a593Smuzhiyun list_del_init(&task->pending_link);
1115*4882a593Smuzhiyun mutex_unlock(&session->pending_lock);
1116*4882a593Smuzhiyun kref_put(&task->ref, rkvdec2_link_free_task);
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun return 0;
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun
mpp_session_pop_done(struct mpp_session * session,struct mpp_task * task)1121*4882a593Smuzhiyun static int mpp_session_pop_done(struct mpp_session *session,
1122*4882a593Smuzhiyun struct mpp_task *task)
1123*4882a593Smuzhiyun {
1124*4882a593Smuzhiyun set_bit(TASK_STATE_DONE, &task->state);
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun return 0;
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun
rkvdec2_link_process_task(struct mpp_session * session,struct mpp_task_msgs * msgs)1129*4882a593Smuzhiyun int rkvdec2_link_process_task(struct mpp_session *session,
1130*4882a593Smuzhiyun struct mpp_task_msgs *msgs)
1131*4882a593Smuzhiyun {
1132*4882a593Smuzhiyun struct mpp_task *task = NULL;
1133*4882a593Smuzhiyun struct mpp_dev *mpp = session->mpp;
1134*4882a593Smuzhiyun struct rkvdec_link_info *link_info = mpp->var->hw_info->link_info;
1135*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1136*4882a593Smuzhiyun struct rkvdec_link_dev *link_dec = dec->link_dec;
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun task = rkvdec2_alloc_task(session, msgs);
1139*4882a593Smuzhiyun if (!task) {
1140*4882a593Smuzhiyun mpp_err("alloc_task failed.\n");
1141*4882a593Smuzhiyun return -ENOMEM;
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun if (link_info->hack_setup) {
1145*4882a593Smuzhiyun u32 fmt;
1146*4882a593Smuzhiyun struct rkvdec2_task *dec_task = NULL;
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun dec_task = to_rkvdec2_task(task);
1149*4882a593Smuzhiyun fmt = RKVDEC_GET_FORMAT(dec_task->reg[RKVDEC_REG_FORMAT_INDEX]);
1150*4882a593Smuzhiyun dec_task->need_hack = (fmt == RKVDEC_FMT_H264D);
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun kref_init(&task->ref);
1154*4882a593Smuzhiyun atomic_set(&task->abort_request, 0);
1155*4882a593Smuzhiyun task->task_index = atomic_fetch_inc(&mpp->task_index);
1156*4882a593Smuzhiyun task->task_id = atomic_fetch_inc(&mpp->queue->task_id);
1157*4882a593Smuzhiyun INIT_DELAYED_WORK(&task->timeout_work, rkvdec2_link_timeout_proc);
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun atomic_inc(&session->task_count);
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun kref_get(&task->ref);
1162*4882a593Smuzhiyun mutex_lock(&session->pending_lock);
1163*4882a593Smuzhiyun list_add_tail(&task->pending_link, &session->pending_list);
1164*4882a593Smuzhiyun mutex_unlock(&session->pending_lock);
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun kref_get(&task->ref);
1167*4882a593Smuzhiyun mutex_lock(&mpp->queue->pending_lock);
1168*4882a593Smuzhiyun list_add_tail(&task->queue_link, &mpp->queue->pending_list);
1169*4882a593Smuzhiyun mutex_unlock(&mpp->queue->pending_lock);
1170*4882a593Smuzhiyun atomic_inc(&link_dec->task_pending);
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun /* push current task to queue */
1173*4882a593Smuzhiyun atomic_inc(&mpp->task_count);
1174*4882a593Smuzhiyun set_bit(TASK_STATE_PENDING, &task->state);
1175*4882a593Smuzhiyun /* trigger current queue to run task */
1176*4882a593Smuzhiyun rkvdec2_link_trigger_work(mpp);
1177*4882a593Smuzhiyun kref_put(&task->ref, rkvdec2_link_free_task);
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun return 0;
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun
rkvdec2_link_wait_result(struct mpp_session * session,struct mpp_task_msgs * msgs)1182*4882a593Smuzhiyun int rkvdec2_link_wait_result(struct mpp_session *session,
1183*4882a593Smuzhiyun struct mpp_task_msgs *msgs)
1184*4882a593Smuzhiyun {
1185*4882a593Smuzhiyun struct mpp_dev *mpp = session->mpp;
1186*4882a593Smuzhiyun struct mpp_task *mpp_task;
1187*4882a593Smuzhiyun int ret;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun mpp_task = mpp_session_get_pending_task(session);
1190*4882a593Smuzhiyun if (!mpp_task) {
1191*4882a593Smuzhiyun mpp_err("session %p pending list is empty!\n", session);
1192*4882a593Smuzhiyun return -EIO;
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun ret = wait_event_timeout(mpp_task->wait, task_is_done(mpp_task),
1196*4882a593Smuzhiyun msecs_to_jiffies(WAIT_TIMEOUT_MS));
1197*4882a593Smuzhiyun if (ret) {
1198*4882a593Smuzhiyun ret = rkvdec2_result(mpp, mpp_task, msgs);
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun mpp_session_pop_done(session, mpp_task);
1201*4882a593Smuzhiyun } else {
1202*4882a593Smuzhiyun mpp_err("task %d:%d state %lx timeout -> abort\n",
1203*4882a593Smuzhiyun session->index, mpp_task->task_id, mpp_task->state);
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun atomic_inc(&mpp_task->abort_request);
1206*4882a593Smuzhiyun set_bit(TASK_STATE_ABORT, &mpp_task->state);
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun mpp_session_pop_pending(session, mpp_task);
1210*4882a593Smuzhiyun return ret;
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun
rkvdec2_link_worker(struct kthread_work * work_s)1213*4882a593Smuzhiyun void rkvdec2_link_worker(struct kthread_work *work_s)
1214*4882a593Smuzhiyun {
1215*4882a593Smuzhiyun struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
1216*4882a593Smuzhiyun struct mpp_task *task;
1217*4882a593Smuzhiyun struct mpp_taskqueue *queue = mpp->queue;
1218*4882a593Smuzhiyun u32 all_done;
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun mpp_debug_enter();
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun /* dequeue running task */
1223*4882a593Smuzhiyun rkvdec2_link_try_dequeue(mpp);
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun /* process reset */
1226*4882a593Smuzhiyun if (atomic_read(&mpp->reset_request)) {
1227*4882a593Smuzhiyun rkvdec2_link_reset(mpp);
1228*4882a593Smuzhiyun /* resend running task after reset */
1229*4882a593Smuzhiyun if (!list_empty(&queue->running_list))
1230*4882a593Smuzhiyun rkvdec2_link_resend(mpp);
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun again:
1234*4882a593Smuzhiyun /* get pending task to process */
1235*4882a593Smuzhiyun mutex_lock(&queue->pending_lock);
1236*4882a593Smuzhiyun task = list_first_entry_or_null(&queue->pending_list, struct mpp_task,
1237*4882a593Smuzhiyun queue_link);
1238*4882a593Smuzhiyun mutex_unlock(&queue->pending_lock);
1239*4882a593Smuzhiyun if (!task)
1240*4882a593Smuzhiyun goto done;
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun /* check abort task */
1243*4882a593Smuzhiyun if (atomic_read(&task->abort_request)) {
1244*4882a593Smuzhiyun mutex_lock(&queue->pending_lock);
1245*4882a593Smuzhiyun list_del_init(&task->queue_link);
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun set_bit(TASK_STATE_ABORT_READY, &task->state);
1248*4882a593Smuzhiyun set_bit(TASK_STATE_PROC_DONE, &task->state);
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun mutex_unlock(&queue->pending_lock);
1251*4882a593Smuzhiyun wake_up(&task->wait);
1252*4882a593Smuzhiyun kref_put(&task->ref, rkvdec2_link_free_task);
1253*4882a593Smuzhiyun goto again;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun /* queue task to hw */
1257*4882a593Smuzhiyun if (!mpp_task_queue(mpp, task))
1258*4882a593Smuzhiyun goto again;
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun done:
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun /* if no task in pending and running list, power off device */
1263*4882a593Smuzhiyun mutex_lock(&queue->pending_lock);
1264*4882a593Smuzhiyun all_done = list_empty(&queue->pending_list) && list_empty(&queue->running_list);
1265*4882a593Smuzhiyun mutex_unlock(&queue->pending_lock);
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun if (all_done)
1268*4882a593Smuzhiyun rkvdec2_link_power_off(mpp);
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun mpp_session_cleanup_detach(queue, work_s);
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun mpp_debug_leave();
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun
rkvdec2_link_session_deinit(struct mpp_session * session)1275*4882a593Smuzhiyun void rkvdec2_link_session_deinit(struct mpp_session *session)
1276*4882a593Smuzhiyun {
1277*4882a593Smuzhiyun struct mpp_dev *mpp = session->mpp;
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun mpp_debug_enter();
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun rkvdec2_free_session(session);
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun if (session->dma) {
1284*4882a593Smuzhiyun mpp_dbg_session("session %d destroy dma\n", session->index);
1285*4882a593Smuzhiyun mpp_iommu_down_write(mpp->iommu_info);
1286*4882a593Smuzhiyun mpp_dma_session_destroy(session->dma);
1287*4882a593Smuzhiyun mpp_iommu_up_write(mpp->iommu_info);
1288*4882a593Smuzhiyun session->dma = NULL;
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun if (session->srv) {
1291*4882a593Smuzhiyun struct mpp_service *srv = session->srv;
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun mutex_lock(&srv->session_lock);
1294*4882a593Smuzhiyun list_del_init(&session->service_link);
1295*4882a593Smuzhiyun mutex_unlock(&srv->session_lock);
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun list_del_init(&session->session_link);
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun mpp_dbg_session("session %d release\n", session->index);
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun mpp_debug_leave();
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun #define RKVDEC2_1080P_PIXELS (1920*1080)
1305*4882a593Smuzhiyun #define RKVDEC2_4K_PIXELS (4096*2304)
1306*4882a593Smuzhiyun #define RKVDEC2_8K_PIXELS (7680*4320)
1307*4882a593Smuzhiyun #define RKVDEC2_CCU_TIMEOUT_20MS (0xefffff)
1308*4882a593Smuzhiyun #define RKVDEC2_CCU_TIMEOUT_50MS (0x2cfffff)
1309*4882a593Smuzhiyun #define RKVDEC2_CCU_TIMEOUT_100MS (0x4ffffff)
1310*4882a593Smuzhiyun
rkvdec2_ccu_get_timeout_threshold(struct rkvdec2_task * task)1311*4882a593Smuzhiyun static u32 rkvdec2_ccu_get_timeout_threshold(struct rkvdec2_task *task)
1312*4882a593Smuzhiyun {
1313*4882a593Smuzhiyun u32 pixels = task->pixels;
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun if (pixels < RKVDEC2_1080P_PIXELS)
1316*4882a593Smuzhiyun return RKVDEC2_CCU_TIMEOUT_20MS;
1317*4882a593Smuzhiyun else if (pixels < RKVDEC2_4K_PIXELS)
1318*4882a593Smuzhiyun return RKVDEC2_CCU_TIMEOUT_50MS;
1319*4882a593Smuzhiyun else
1320*4882a593Smuzhiyun return RKVDEC2_CCU_TIMEOUT_100MS;
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun
rkvdec2_attach_ccu(struct device * dev,struct rkvdec2_dev * dec)1323*4882a593Smuzhiyun int rkvdec2_attach_ccu(struct device *dev, struct rkvdec2_dev *dec)
1324*4882a593Smuzhiyun {
1325*4882a593Smuzhiyun int ret;
1326*4882a593Smuzhiyun struct device_node *np;
1327*4882a593Smuzhiyun struct platform_device *pdev;
1328*4882a593Smuzhiyun struct rkvdec2_ccu *ccu;
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun mpp_debug_enter();
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun np = of_parse_phandle(dev->of_node, "rockchip,ccu", 0);
1333*4882a593Smuzhiyun if (!np || !of_device_is_available(np))
1334*4882a593Smuzhiyun return -ENODEV;
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun pdev = of_find_device_by_node(np);
1337*4882a593Smuzhiyun of_node_put(np);
1338*4882a593Smuzhiyun if (!pdev)
1339*4882a593Smuzhiyun return -ENODEV;
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun ccu = platform_get_drvdata(pdev);
1342*4882a593Smuzhiyun if (!ccu)
1343*4882a593Smuzhiyun return -ENOMEM;
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun ret = of_property_read_u32(dev->of_node, "rockchip,core-mask", &dec->core_mask);
1346*4882a593Smuzhiyun if (ret)
1347*4882a593Smuzhiyun return ret;
1348*4882a593Smuzhiyun dev_info(dev, "core_mask=%08x\n", dec->core_mask);
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun /* if not the main-core, then attach the main core domain to current */
1351*4882a593Smuzhiyun if (dec->mpp.core_id != 0) {
1352*4882a593Smuzhiyun struct mpp_taskqueue *queue;
1353*4882a593Smuzhiyun struct mpp_iommu_info *ccu_info, *cur_info;
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun queue = dec->mpp.queue;
1356*4882a593Smuzhiyun /* set the ccu-domain for current device */
1357*4882a593Smuzhiyun ccu_info = queue->cores[0]->iommu_info;
1358*4882a593Smuzhiyun cur_info = dec->mpp.iommu_info;
1359*4882a593Smuzhiyun if (cur_info)
1360*4882a593Smuzhiyun cur_info->domain = ccu_info->domain;
1361*4882a593Smuzhiyun mpp_iommu_attach(cur_info);
1362*4882a593Smuzhiyun }
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun dec->ccu = ccu;
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun dev_info(dev, "attach ccu as core %d\n", dec->mpp.core_id);
1367*4882a593Smuzhiyun mpp_debug_enter();
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun return 0;
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun
rkvdec2_ccu_timeout_work(struct work_struct * work_s)1372*4882a593Smuzhiyun static void rkvdec2_ccu_timeout_work(struct work_struct *work_s)
1373*4882a593Smuzhiyun {
1374*4882a593Smuzhiyun struct mpp_dev *mpp;
1375*4882a593Smuzhiyun struct mpp_task *task = container_of(to_delayed_work(work_s),
1376*4882a593Smuzhiyun struct mpp_task, timeout_work);
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
1379*4882a593Smuzhiyun mpp_err("task %d state %lx has been handled\n",
1380*4882a593Smuzhiyun task->task_id, task->state);
1381*4882a593Smuzhiyun return;
1382*4882a593Smuzhiyun }
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun if (!task->session) {
1385*4882a593Smuzhiyun mpp_err("task %d session is null.\n", task->task_id);
1386*4882a593Smuzhiyun return;
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun mpp = mpp_get_task_used_device(task, task->session);
1389*4882a593Smuzhiyun mpp_err("%s, task %d state %#lx timeout\n", dev_name(mpp->dev),
1390*4882a593Smuzhiyun task->task_index, task->state);
1391*4882a593Smuzhiyun set_bit(TASK_STATE_TIMEOUT, &task->state);
1392*4882a593Smuzhiyun atomic_inc(&mpp->reset_request);
1393*4882a593Smuzhiyun atomic_inc(&mpp->queue->reset_request);
1394*4882a593Smuzhiyun kthread_queue_work(&mpp->queue->worker, &mpp->work);
1395*4882a593Smuzhiyun }
1396*4882a593Smuzhiyun
rkvdec2_ccu_link_init(struct platform_device * pdev,struct rkvdec2_dev * dec)1397*4882a593Smuzhiyun int rkvdec2_ccu_link_init(struct platform_device *pdev, struct rkvdec2_dev *dec)
1398*4882a593Smuzhiyun {
1399*4882a593Smuzhiyun struct resource *res;
1400*4882a593Smuzhiyun struct rkvdec_link_dev *link_dec;
1401*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun mpp_debug_enter();
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun /* link structure */
1406*4882a593Smuzhiyun link_dec = devm_kzalloc(dev, sizeof(*link_dec), GFP_KERNEL);
1407*4882a593Smuzhiyun if (!link_dec)
1408*4882a593Smuzhiyun return -ENOMEM;
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyun res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
1411*4882a593Smuzhiyun if (!res)
1412*4882a593Smuzhiyun return -ENOMEM;
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun link_dec->info = dec->mpp.var->hw_info->link_info;
1415*4882a593Smuzhiyun link_dec->reg_base = devm_ioremap(dev, res->start, resource_size(res));
1416*4882a593Smuzhiyun if (!link_dec->reg_base) {
1417*4882a593Smuzhiyun dev_err(dev, "ioremap failed for resource %pR\n", res);
1418*4882a593Smuzhiyun return -ENOMEM;
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun dec->link_dec = link_dec;
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun mpp_debug_leave();
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun return 0;
1426*4882a593Smuzhiyun }
1427*4882a593Smuzhiyun
rkvdec2_ccu_power_on(struct mpp_taskqueue * queue,struct rkvdec2_ccu * ccu)1428*4882a593Smuzhiyun static int rkvdec2_ccu_power_on(struct mpp_taskqueue *queue,
1429*4882a593Smuzhiyun struct rkvdec2_ccu *ccu)
1430*4882a593Smuzhiyun {
1431*4882a593Smuzhiyun if (!atomic_xchg(&ccu->power_enabled, 1)) {
1432*4882a593Smuzhiyun u32 i;
1433*4882a593Smuzhiyun struct mpp_dev *mpp;
1434*4882a593Smuzhiyun
1435*4882a593Smuzhiyun /* ccu pd and clk on */
1436*4882a593Smuzhiyun pm_runtime_get_sync(ccu->dev);
1437*4882a593Smuzhiyun pm_stay_awake(ccu->dev);
1438*4882a593Smuzhiyun mpp_clk_safe_enable(ccu->aclk_info.clk);
1439*4882a593Smuzhiyun /* core pd and clk on */
1440*4882a593Smuzhiyun for (i = 0; i < queue->core_count; i++) {
1441*4882a593Smuzhiyun struct rkvdec2_dev *dec;
1442*4882a593Smuzhiyun
1443*4882a593Smuzhiyun mpp = queue->cores[i];
1444*4882a593Smuzhiyun dec = to_rkvdec2_dev(mpp);
1445*4882a593Smuzhiyun pm_runtime_get_sync(mpp->dev);
1446*4882a593Smuzhiyun pm_stay_awake(mpp->dev);
1447*4882a593Smuzhiyun if (mpp->hw_ops->clk_on)
1448*4882a593Smuzhiyun mpp->hw_ops->clk_on(mpp);
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun mpp_clk_set_rate(&dec->aclk_info, CLK_MODE_NORMAL);
1451*4882a593Smuzhiyun mpp_clk_set_rate(&dec->cabac_clk_info, CLK_MODE_NORMAL);
1452*4882a593Smuzhiyun mpp_clk_set_rate(&dec->hevc_cabac_clk_info, CLK_MODE_NORMAL);
1453*4882a593Smuzhiyun mpp_devfreq_set_core_rate(mpp, CLK_MODE_NORMAL);
1454*4882a593Smuzhiyun mpp_iommu_dev_activate(mpp->iommu_info, mpp);
1455*4882a593Smuzhiyun }
1456*4882a593Smuzhiyun mpp_debug(DEBUG_CCU, "power on\n");
1457*4882a593Smuzhiyun }
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun return 0;
1460*4882a593Smuzhiyun }
1461*4882a593Smuzhiyun
rkvdec2_ccu_power_off(struct mpp_taskqueue * queue,struct rkvdec2_ccu * ccu)1462*4882a593Smuzhiyun static int rkvdec2_ccu_power_off(struct mpp_taskqueue *queue,
1463*4882a593Smuzhiyun struct rkvdec2_ccu *ccu)
1464*4882a593Smuzhiyun {
1465*4882a593Smuzhiyun if (atomic_xchg(&ccu->power_enabled, 0)) {
1466*4882a593Smuzhiyun u32 i;
1467*4882a593Smuzhiyun struct mpp_dev *mpp;
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun /* ccu pd and clk off */
1470*4882a593Smuzhiyun mpp_clk_safe_disable(ccu->aclk_info.clk);
1471*4882a593Smuzhiyun pm_relax(ccu->dev);
1472*4882a593Smuzhiyun pm_runtime_mark_last_busy(ccu->dev);
1473*4882a593Smuzhiyun pm_runtime_put_autosuspend(ccu->dev);
1474*4882a593Smuzhiyun /* core pd and clk off */
1475*4882a593Smuzhiyun for (i = 0; i < queue->core_count; i++) {
1476*4882a593Smuzhiyun mpp = queue->cores[i];
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun if (mpp->hw_ops->clk_off)
1479*4882a593Smuzhiyun mpp->hw_ops->clk_off(mpp);
1480*4882a593Smuzhiyun pm_relax(mpp->dev);
1481*4882a593Smuzhiyun pm_runtime_mark_last_busy(mpp->dev);
1482*4882a593Smuzhiyun pm_runtime_put_autosuspend(mpp->dev);
1483*4882a593Smuzhiyun mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
1484*4882a593Smuzhiyun }
1485*4882a593Smuzhiyun mpp_debug(DEBUG_CCU, "power off\n");
1486*4882a593Smuzhiyun }
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun return 0;
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun
rkvdec2_soft_ccu_dequeue(struct mpp_taskqueue * queue)1491*4882a593Smuzhiyun static int rkvdec2_soft_ccu_dequeue(struct mpp_taskqueue *queue)
1492*4882a593Smuzhiyun {
1493*4882a593Smuzhiyun struct mpp_task *mpp_task = NULL, *n;
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun mpp_debug_enter();
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun list_for_each_entry_safe(mpp_task, n,
1498*4882a593Smuzhiyun &queue->running_list,
1499*4882a593Smuzhiyun queue_link) {
1500*4882a593Smuzhiyun struct mpp_dev *mpp = mpp_get_task_used_device(mpp_task, mpp_task->session);
1501*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1502*4882a593Smuzhiyun u32 irq_status = mpp->irq_status;
1503*4882a593Smuzhiyun u32 timeout_flag = test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
1504*4882a593Smuzhiyun u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
1505*4882a593Smuzhiyun u32 timing_en = mpp->srv->timing_en;
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun if (irq_status || timeout_flag || abort_flag) {
1508*4882a593Smuzhiyun struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun if (timing_en) {
1511*4882a593Smuzhiyun mpp_task->on_irq = ktime_get();
1512*4882a593Smuzhiyun set_bit(TASK_TIMING_IRQ, &mpp_task->state);
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun mpp_task->on_cancel_timeout = mpp_task->on_irq;
1515*4882a593Smuzhiyun set_bit(TASK_TIMING_TO_CANCEL, &mpp_task->state);
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun mpp_task->on_isr = mpp_task->on_irq;
1518*4882a593Smuzhiyun set_bit(TASK_TIMING_ISR, &mpp_task->state);
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun
1521*4882a593Smuzhiyun set_bit(TASK_STATE_HANDLE, &mpp_task->state);
1522*4882a593Smuzhiyun cancel_delayed_work(&mpp_task->timeout_work);
1523*4882a593Smuzhiyun mpp_task->hw_cycles = mpp_read(mpp, RKVDEC_PERF_WORKING_CNT);
1524*4882a593Smuzhiyun mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
1525*4882a593Smuzhiyun task->irq_status = irq_status;
1526*4882a593Smuzhiyun mpp_debug(DEBUG_IRQ_CHECK, "irq_status=%08x, timeout=%u, abort=%u\n",
1527*4882a593Smuzhiyun irq_status, timeout_flag, abort_flag);
1528*4882a593Smuzhiyun if (irq_status && mpp->dev_ops->finish)
1529*4882a593Smuzhiyun mpp->dev_ops->finish(mpp, mpp_task);
1530*4882a593Smuzhiyun else
1531*4882a593Smuzhiyun task->reg[RKVDEC_REG_INT_EN_INDEX] = RKVDEC_TIMEOUT_STA;
1532*4882a593Smuzhiyun
1533*4882a593Smuzhiyun set_bit(TASK_STATE_FINISH, &mpp_task->state);
1534*4882a593Smuzhiyun set_bit(TASK_STATE_DONE, &mpp_task->state);
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun set_bit(mpp->core_id, &queue->core_idle);
1537*4882a593Smuzhiyun mpp_dbg_core("set core %d idle %lx\n", mpp->core_id, queue->core_idle);
1538*4882a593Smuzhiyun /* Wake up the GET thread */
1539*4882a593Smuzhiyun wake_up(&mpp_task->wait);
1540*4882a593Smuzhiyun /* free task */
1541*4882a593Smuzhiyun list_del_init(&mpp_task->queue_link);
1542*4882a593Smuzhiyun kref_put(&mpp_task->ref, mpp_free_task);
1543*4882a593Smuzhiyun } else {
1544*4882a593Smuzhiyun /* NOTE: break when meet not finish */
1545*4882a593Smuzhiyun break;
1546*4882a593Smuzhiyun }
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun mpp_debug_leave();
1550*4882a593Smuzhiyun return 0;
1551*4882a593Smuzhiyun }
1552*4882a593Smuzhiyun
rkvdec2_soft_ccu_reset(struct mpp_taskqueue * queue,struct rkvdec2_ccu * ccu)1553*4882a593Smuzhiyun static int rkvdec2_soft_ccu_reset(struct mpp_taskqueue *queue,
1554*4882a593Smuzhiyun struct rkvdec2_ccu *ccu)
1555*4882a593Smuzhiyun {
1556*4882a593Smuzhiyun int i;
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun for (i = queue->core_count - 1; i >= 0; i--) {
1559*4882a593Smuzhiyun u32 val;
1560*4882a593Smuzhiyun
1561*4882a593Smuzhiyun struct mpp_dev *mpp = queue->cores[i];
1562*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun if (mpp->disable)
1565*4882a593Smuzhiyun continue;
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun dev_info(mpp->dev, "resetting...\n");
1568*4882a593Smuzhiyun disable_hardirq(mpp->irq);
1569*4882a593Smuzhiyun
1570*4882a593Smuzhiyun /* foce idle, disconnect core and ccu */
1571*4882a593Smuzhiyun writel(dec->core_mask, ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun /* soft reset */
1574*4882a593Smuzhiyun mpp_write(mpp, RKVDEC_REG_IMPORTANT_BASE, RKVDEC_SOFTREST_EN);
1575*4882a593Smuzhiyun udelay(5);
1576*4882a593Smuzhiyun val = mpp_read(mpp, RKVDEC_REG_INT_EN);
1577*4882a593Smuzhiyun if (!(val & RKVDEC_SOFT_RESET_READY))
1578*4882a593Smuzhiyun mpp_err("soft reset fail, int %08x\n", val);
1579*4882a593Smuzhiyun mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun /* check bus idle */
1582*4882a593Smuzhiyun val = mpp_read(mpp, RKVDEC_REG_DEBUG_INT_BASE);
1583*4882a593Smuzhiyun if (!(val & RKVDEC_BIT_BUS_IDLE))
1584*4882a593Smuzhiyun mpp_err("bus busy\n");
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun if (IS_REACHABLE(CONFIG_ROCKCHIP_SIP)) {
1587*4882a593Smuzhiyun /* sip reset */
1588*4882a593Smuzhiyun rockchip_dmcfreq_lock();
1589*4882a593Smuzhiyun sip_smc_vpu_reset(i, 0, 0);
1590*4882a593Smuzhiyun rockchip_dmcfreq_unlock();
1591*4882a593Smuzhiyun } else {
1592*4882a593Smuzhiyun rkvdec2_reset(mpp);
1593*4882a593Smuzhiyun }
1594*4882a593Smuzhiyun /* clear error mask */
1595*4882a593Smuzhiyun writel(dec->core_mask & RKVDEC_CCU_CORE_RW_MASK,
1596*4882a593Smuzhiyun ccu->reg_base + RKVDEC_CCU_CORE_ERR_BASE);
1597*4882a593Smuzhiyun /* connect core and ccu */
1598*4882a593Smuzhiyun writel(dec->core_mask & RKVDEC_CCU_CORE_RW_MASK,
1599*4882a593Smuzhiyun ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
1600*4882a593Smuzhiyun mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
1601*4882a593Smuzhiyun atomic_set(&mpp->reset_request, 0);
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun enable_irq(mpp->irq);
1604*4882a593Smuzhiyun dev_info(mpp->dev, "reset done\n");
1605*4882a593Smuzhiyun }
1606*4882a593Smuzhiyun atomic_set(&queue->reset_request, 0);
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyun return 0;
1609*4882a593Smuzhiyun }
1610*4882a593Smuzhiyun
rkvdec2_ccu_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)1611*4882a593Smuzhiyun void *rkvdec2_ccu_alloc_task(struct mpp_session *session,
1612*4882a593Smuzhiyun struct mpp_task_msgs *msgs)
1613*4882a593Smuzhiyun {
1614*4882a593Smuzhiyun int ret;
1615*4882a593Smuzhiyun struct rkvdec2_task *task;
1616*4882a593Smuzhiyun
1617*4882a593Smuzhiyun task = kzalloc(sizeof(*task), GFP_KERNEL);
1618*4882a593Smuzhiyun if (!task)
1619*4882a593Smuzhiyun return NULL;
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun ret = rkvdec2_task_init(session->mpp, session, task, msgs);
1622*4882a593Smuzhiyun if (ret) {
1623*4882a593Smuzhiyun kfree(task);
1624*4882a593Smuzhiyun return NULL;
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun return &task->mpp_task;
1628*4882a593Smuzhiyun }
1629*4882a593Smuzhiyun
rkvdec2_ccu_check_pagefault_info(struct mpp_dev * mpp)1630*4882a593Smuzhiyun static void rkvdec2_ccu_check_pagefault_info(struct mpp_dev *mpp)
1631*4882a593Smuzhiyun {
1632*4882a593Smuzhiyun u32 i = 0;
1633*4882a593Smuzhiyun
1634*4882a593Smuzhiyun for (i = 0; i < mpp->queue->core_count; i++) {
1635*4882a593Smuzhiyun struct mpp_dev *core = mpp->queue->cores[i];
1636*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(core);
1637*4882a593Smuzhiyun void __iomem *mmu_base = dec->mmu_base;
1638*4882a593Smuzhiyun u32 mmu0_st;
1639*4882a593Smuzhiyun u32 mmu1_st;
1640*4882a593Smuzhiyun u32 mmu0_pta;
1641*4882a593Smuzhiyun u32 mmu1_pta;
1642*4882a593Smuzhiyun
1643*4882a593Smuzhiyun if (!mmu_base)
1644*4882a593Smuzhiyun return;
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun #define FAULT_STATUS 0x7e2
1647*4882a593Smuzhiyun rkvdec2_ccu_power_on(mpp->queue, dec->ccu);
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun mmu0_st = readl(mmu_base + 0x4);
1650*4882a593Smuzhiyun mmu1_st = readl(mmu_base + 0x44);
1651*4882a593Smuzhiyun mmu0_pta = readl(mmu_base + 0xc);
1652*4882a593Smuzhiyun mmu1_pta = readl(mmu_base + 0x4c);
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun dec->mmu0_st = mmu0_st;
1655*4882a593Smuzhiyun dec->mmu1_st = mmu1_st;
1656*4882a593Smuzhiyun dec->mmu0_pta = mmu0_pta;
1657*4882a593Smuzhiyun dec->mmu1_pta = mmu1_pta;
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun pr_err("core %d mmu0 %08x %08x mm1 %08x %08x\n",
1660*4882a593Smuzhiyun core->core_id, mmu0_st, mmu0_pta, mmu1_st, mmu1_pta);
1661*4882a593Smuzhiyun if ((mmu0_st & FAULT_STATUS) || (mmu1_st & FAULT_STATUS) ||
1662*4882a593Smuzhiyun mmu0_pta || mmu1_pta) {
1663*4882a593Smuzhiyun dec->fault_iova = readl(dec->link_dec->reg_base + 0x4);
1664*4882a593Smuzhiyun dec->mmu_fault = 1;
1665*4882a593Smuzhiyun pr_err("core %d fault iova %08x\n", core->core_id, dec->fault_iova);
1666*4882a593Smuzhiyun rockchip_iommu_mask_irq(core->dev);
1667*4882a593Smuzhiyun } else {
1668*4882a593Smuzhiyun dec->mmu_fault = 0;
1669*4882a593Smuzhiyun dec->fault_iova = 0;
1670*4882a593Smuzhiyun }
1671*4882a593Smuzhiyun }
1672*4882a593Smuzhiyun }
1673*4882a593Smuzhiyun
rkvdec2_ccu_iommu_fault_handle(struct iommu_domain * iommu,struct device * iommu_dev,unsigned long iova,int status,void * arg)1674*4882a593Smuzhiyun int rkvdec2_ccu_iommu_fault_handle(struct iommu_domain *iommu,
1675*4882a593Smuzhiyun struct device *iommu_dev,
1676*4882a593Smuzhiyun unsigned long iova, int status, void *arg)
1677*4882a593Smuzhiyun {
1678*4882a593Smuzhiyun struct mpp_dev *mpp = (struct mpp_dev *)arg;
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun mpp_debug_enter();
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun rkvdec2_ccu_check_pagefault_info(mpp);
1683*4882a593Smuzhiyun
1684*4882a593Smuzhiyun mpp->queue->iommu_fault = 1;
1685*4882a593Smuzhiyun atomic_inc(&mpp->queue->reset_request);
1686*4882a593Smuzhiyun kthread_queue_work(&mpp->queue->worker, &mpp->work);
1687*4882a593Smuzhiyun
1688*4882a593Smuzhiyun mpp_debug_leave();
1689*4882a593Smuzhiyun
1690*4882a593Smuzhiyun return 0;
1691*4882a593Smuzhiyun }
1692*4882a593Smuzhiyun
rkvdec2_soft_ccu_irq(int irq,void * param)1693*4882a593Smuzhiyun irqreturn_t rkvdec2_soft_ccu_irq(int irq, void *param)
1694*4882a593Smuzhiyun {
1695*4882a593Smuzhiyun struct mpp_dev *mpp = param;
1696*4882a593Smuzhiyun u32 irq_status = mpp_read_relaxed(mpp, RKVDEC_REG_INT_EN);
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun if (irq_status & RKVDEC_IRQ_RAW) {
1699*4882a593Smuzhiyun mpp_debug(DEBUG_IRQ_STATUS, "irq_status=%08x\n", irq_status);
1700*4882a593Smuzhiyun if (irq_status & RKVDEC_INT_ERROR_MASK) {
1701*4882a593Smuzhiyun atomic_inc(&mpp->reset_request);
1702*4882a593Smuzhiyun atomic_inc(&mpp->queue->reset_request);
1703*4882a593Smuzhiyun }
1704*4882a593Smuzhiyun mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
1705*4882a593Smuzhiyun mpp->irq_status = irq_status;
1706*4882a593Smuzhiyun kthread_queue_work(&mpp->queue->worker, &mpp->work);
1707*4882a593Smuzhiyun return IRQ_HANDLED;
1708*4882a593Smuzhiyun }
1709*4882a593Smuzhiyun return IRQ_NONE;
1710*4882a593Smuzhiyun }
1711*4882a593Smuzhiyun
rkvdec2_set_core_info(u32 * reg,int idx)1712*4882a593Smuzhiyun static inline int rkvdec2_set_core_info(u32 *reg, int idx)
1713*4882a593Smuzhiyun {
1714*4882a593Smuzhiyun u32 val = (idx << 16) & RKVDEC_REG_FILM_IDX_MASK;
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun reg[RKVDEC_REG_CORE_CTRL_INDEX] &= ~RKVDEC_REG_FILM_IDX_MASK;
1717*4882a593Smuzhiyun
1718*4882a593Smuzhiyun reg[RKVDEC_REG_CORE_CTRL_INDEX] |= val;
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun return 0;
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun
rkvdec2_soft_ccu_enqueue(struct mpp_dev * mpp,struct mpp_task * mpp_task)1723*4882a593Smuzhiyun static int rkvdec2_soft_ccu_enqueue(struct mpp_dev *mpp, struct mpp_task *mpp_task)
1724*4882a593Smuzhiyun {
1725*4882a593Smuzhiyun u32 i, reg_en, reg;
1726*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1727*4882a593Smuzhiyun struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1728*4882a593Smuzhiyun u32 timing_en = mpp->srv->timing_en;
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun mpp_debug_enter();
1731*4882a593Smuzhiyun
1732*4882a593Smuzhiyun /* set reg for link */
1733*4882a593Smuzhiyun reg = RKVDEC_LINK_BIT_CORE_WORK_MODE | RKVDEC_LINK_BIT_CCU_WORK_MODE;
1734*4882a593Smuzhiyun writel_relaxed(reg, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun /* set reg for ccu */
1737*4882a593Smuzhiyun writel_relaxed(RKVDEC_CCU_BIT_WORK_EN, dec->ccu->reg_base + RKVDEC_CCU_WORK_BASE);
1738*4882a593Smuzhiyun writel_relaxed(RKVDEC_CCU_BIT_WORK_MODE, dec->ccu->reg_base + RKVDEC_CCU_WORK_MODE_BASE);
1739*4882a593Smuzhiyun writel_relaxed(dec->core_mask, dec->ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun /* set cache size */
1742*4882a593Smuzhiyun reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS |
1743*4882a593Smuzhiyun RKVDEC_CACHE_PERMIT_READ_ALLOCATE;
1744*4882a593Smuzhiyun if (!mpp_debug_unlikely(DEBUG_CACHE_32B))
1745*4882a593Smuzhiyun reg |= RKVDEC_CACHE_LINE_SIZE_64_BYTES;
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun mpp_write_relaxed(mpp, RKVDEC_REG_CACHE0_SIZE_BASE, reg);
1748*4882a593Smuzhiyun mpp_write_relaxed(mpp, RKVDEC_REG_CACHE1_SIZE_BASE, reg);
1749*4882a593Smuzhiyun mpp_write_relaxed(mpp, RKVDEC_REG_CACHE2_SIZE_BASE, reg);
1750*4882a593Smuzhiyun /* clear cache */
1751*4882a593Smuzhiyun mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE0_BASE, 1);
1752*4882a593Smuzhiyun mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE1_BASE, 1);
1753*4882a593Smuzhiyun mpp_write_relaxed(mpp, RKVDEC_REG_CLR_CACHE2_BASE, 1);
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun mpp_iommu_flush_tlb(mpp->iommu_info);
1756*4882a593Smuzhiyun /* disable multicore pu/colmv offset req timeout reset */
1757*4882a593Smuzhiyun task->reg[RKVDEC_REG_EN_MODE_SET] |= BIT(1);
1758*4882a593Smuzhiyun task->reg[RKVDEC_REG_TIMEOUT_THRESHOLD] = rkvdec2_ccu_get_timeout_threshold(task);
1759*4882a593Smuzhiyun /* set registers for hardware */
1760*4882a593Smuzhiyun reg_en = mpp_task->hw_info->reg_en;
1761*4882a593Smuzhiyun for (i = 0; i < task->w_req_cnt; i++) {
1762*4882a593Smuzhiyun int s, e;
1763*4882a593Smuzhiyun struct mpp_request *req = &task->w_reqs[i];
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun s = req->offset / sizeof(u32);
1766*4882a593Smuzhiyun e = s + req->size / sizeof(u32);
1767*4882a593Smuzhiyun mpp_write_req(mpp, task->reg, s, e, reg_en);
1768*4882a593Smuzhiyun }
1769*4882a593Smuzhiyun /* init current task */
1770*4882a593Smuzhiyun mpp->cur_task = mpp_task;
1771*4882a593Smuzhiyun
1772*4882a593Smuzhiyun mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
1773*4882a593Smuzhiyun
1774*4882a593Smuzhiyun mpp->irq_status = 0;
1775*4882a593Smuzhiyun writel_relaxed(dec->core_mask, dec->ccu->reg_base + RKVDEC_CCU_CORE_STA_BASE);
1776*4882a593Smuzhiyun /* Flush the register before the start the device */
1777*4882a593Smuzhiyun wmb();
1778*4882a593Smuzhiyun mpp_write(mpp, RKVDEC_REG_START_EN_BASE, task->reg[reg_en] | RKVDEC_START_EN);
1779*4882a593Smuzhiyun
1780*4882a593Smuzhiyun mpp_task_run_end(mpp_task, timing_en);
1781*4882a593Smuzhiyun
1782*4882a593Smuzhiyun mpp_debug_leave();
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun return 0;
1785*4882a593Smuzhiyun }
1786*4882a593Smuzhiyun
rkvdec2_get_idle_core(struct mpp_taskqueue * queue,struct mpp_task * mpp_task)1787*4882a593Smuzhiyun static struct mpp_dev *rkvdec2_get_idle_core(struct mpp_taskqueue *queue,
1788*4882a593Smuzhiyun struct mpp_task *mpp_task)
1789*4882a593Smuzhiyun {
1790*4882a593Smuzhiyun u32 i = 0;
1791*4882a593Smuzhiyun struct rkvdec2_dev *dec = NULL;
1792*4882a593Smuzhiyun
1793*4882a593Smuzhiyun for (i = 0; i < queue->core_count; i++) {
1794*4882a593Smuzhiyun struct mpp_dev *mpp = queue->cores[i];
1795*4882a593Smuzhiyun struct rkvdec2_dev *core = to_rkvdec2_dev(mpp);
1796*4882a593Smuzhiyun
1797*4882a593Smuzhiyun if (mpp->disable)
1798*4882a593Smuzhiyun continue;
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun if (test_bit(i, &queue->core_idle)) {
1801*4882a593Smuzhiyun if (!dec) {
1802*4882a593Smuzhiyun dec = core;
1803*4882a593Smuzhiyun continue;
1804*4882a593Smuzhiyun }
1805*4882a593Smuzhiyun /* set the less work core */
1806*4882a593Smuzhiyun if (core->task_index < dec->task_index)
1807*4882a593Smuzhiyun dec = core;
1808*4882a593Smuzhiyun }
1809*4882a593Smuzhiyun }
1810*4882a593Smuzhiyun /* if get core */
1811*4882a593Smuzhiyun if (dec) {
1812*4882a593Smuzhiyun mpp_task->mpp = &dec->mpp;
1813*4882a593Smuzhiyun mpp_task->core_id = dec->mpp.core_id;
1814*4882a593Smuzhiyun clear_bit(mpp_task->core_id, &queue->core_idle);
1815*4882a593Smuzhiyun dec->task_index++;
1816*4882a593Smuzhiyun atomic_inc(&dec->mpp.task_count);
1817*4882a593Smuzhiyun mpp_dbg_core("clear core %d idle\n", mpp_task->core_id);
1818*4882a593Smuzhiyun return mpp_task->mpp;
1819*4882a593Smuzhiyun }
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun return NULL;
1822*4882a593Smuzhiyun }
1823*4882a593Smuzhiyun
rkvdec2_core_working(struct mpp_taskqueue * queue)1824*4882a593Smuzhiyun static bool rkvdec2_core_working(struct mpp_taskqueue *queue)
1825*4882a593Smuzhiyun {
1826*4882a593Smuzhiyun struct mpp_dev *mpp;
1827*4882a593Smuzhiyun bool flag = false;
1828*4882a593Smuzhiyun u32 i = 0;
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun for (i = 0; i < queue->core_count; i++) {
1831*4882a593Smuzhiyun mpp = queue->cores[i];
1832*4882a593Smuzhiyun if (mpp->disable)
1833*4882a593Smuzhiyun continue;
1834*4882a593Smuzhiyun if (!test_bit(i, &queue->core_idle)) {
1835*4882a593Smuzhiyun flag = true;
1836*4882a593Smuzhiyun break;
1837*4882a593Smuzhiyun }
1838*4882a593Smuzhiyun }
1839*4882a593Smuzhiyun
1840*4882a593Smuzhiyun return flag;
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun
rkvdec2_ccu_link_session_detach(struct mpp_dev * mpp,struct mpp_taskqueue * queue)1843*4882a593Smuzhiyun static int rkvdec2_ccu_link_session_detach(struct mpp_dev *mpp,
1844*4882a593Smuzhiyun struct mpp_taskqueue *queue)
1845*4882a593Smuzhiyun {
1846*4882a593Smuzhiyun mutex_lock(&queue->session_lock);
1847*4882a593Smuzhiyun while (atomic_read(&queue->detach_count)) {
1848*4882a593Smuzhiyun struct mpp_session *session = NULL;
1849*4882a593Smuzhiyun
1850*4882a593Smuzhiyun session = list_first_entry_or_null(&queue->session_detach,
1851*4882a593Smuzhiyun struct mpp_session,
1852*4882a593Smuzhiyun session_link);
1853*4882a593Smuzhiyun if (session) {
1854*4882a593Smuzhiyun list_del_init(&session->session_link);
1855*4882a593Smuzhiyun atomic_dec(&queue->detach_count);
1856*4882a593Smuzhiyun }
1857*4882a593Smuzhiyun
1858*4882a593Smuzhiyun mutex_unlock(&queue->session_lock);
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun if (session) {
1861*4882a593Smuzhiyun mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev),
1862*4882a593Smuzhiyun atomic_read(&queue->detach_count));
1863*4882a593Smuzhiyun mpp_session_deinit(session);
1864*4882a593Smuzhiyun }
1865*4882a593Smuzhiyun
1866*4882a593Smuzhiyun mutex_lock(&queue->session_lock);
1867*4882a593Smuzhiyun }
1868*4882a593Smuzhiyun mutex_unlock(&queue->session_lock);
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun return 0;
1871*4882a593Smuzhiyun }
1872*4882a593Smuzhiyun
rkvdec2_soft_ccu_worker(struct kthread_work * work_s)1873*4882a593Smuzhiyun void rkvdec2_soft_ccu_worker(struct kthread_work *work_s)
1874*4882a593Smuzhiyun {
1875*4882a593Smuzhiyun struct mpp_task *mpp_task;
1876*4882a593Smuzhiyun struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
1877*4882a593Smuzhiyun struct mpp_taskqueue *queue = mpp->queue;
1878*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1879*4882a593Smuzhiyun u32 timing_en = mpp->srv->timing_en;
1880*4882a593Smuzhiyun
1881*4882a593Smuzhiyun mpp_debug_enter();
1882*4882a593Smuzhiyun
1883*4882a593Smuzhiyun /* 1. process all finished task in running list */
1884*4882a593Smuzhiyun rkvdec2_soft_ccu_dequeue(queue);
1885*4882a593Smuzhiyun
1886*4882a593Smuzhiyun /* 2. process reset request */
1887*4882a593Smuzhiyun if (atomic_read(&queue->reset_request)) {
1888*4882a593Smuzhiyun if (!rkvdec2_core_working(queue)) {
1889*4882a593Smuzhiyun rkvdec2_ccu_power_on(queue, dec->ccu);
1890*4882a593Smuzhiyun rkvdec2_soft_ccu_reset(queue, dec->ccu);
1891*4882a593Smuzhiyun }
1892*4882a593Smuzhiyun }
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun /* 3. process pending task */
1895*4882a593Smuzhiyun while (1) {
1896*4882a593Smuzhiyun if (atomic_read(&queue->reset_request))
1897*4882a593Smuzhiyun break;
1898*4882a593Smuzhiyun /* get one task form pending list */
1899*4882a593Smuzhiyun mutex_lock(&queue->pending_lock);
1900*4882a593Smuzhiyun mpp_task = list_first_entry_or_null(&queue->pending_list,
1901*4882a593Smuzhiyun struct mpp_task, queue_link);
1902*4882a593Smuzhiyun mutex_unlock(&queue->pending_lock);
1903*4882a593Smuzhiyun if (!mpp_task)
1904*4882a593Smuzhiyun break;
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun if (test_bit(TASK_STATE_ABORT, &mpp_task->state)) {
1907*4882a593Smuzhiyun mutex_lock(&queue->pending_lock);
1908*4882a593Smuzhiyun list_del_init(&mpp_task->queue_link);
1909*4882a593Smuzhiyun
1910*4882a593Smuzhiyun set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
1911*4882a593Smuzhiyun set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun mutex_unlock(&queue->pending_lock);
1914*4882a593Smuzhiyun wake_up(&mpp_task->wait);
1915*4882a593Smuzhiyun kref_put(&mpp_task->ref, rkvdec2_link_free_task);
1916*4882a593Smuzhiyun continue;
1917*4882a593Smuzhiyun }
1918*4882a593Smuzhiyun /* find one core is idle */
1919*4882a593Smuzhiyun mpp = rkvdec2_get_idle_core(queue, mpp_task);
1920*4882a593Smuzhiyun if (!mpp)
1921*4882a593Smuzhiyun break;
1922*4882a593Smuzhiyun
1923*4882a593Smuzhiyun if (timing_en) {
1924*4882a593Smuzhiyun mpp_task->on_run = ktime_get();
1925*4882a593Smuzhiyun set_bit(TASK_TIMING_RUN, &mpp_task->state);
1926*4882a593Smuzhiyun }
1927*4882a593Smuzhiyun
1928*4882a593Smuzhiyun /* set session index */
1929*4882a593Smuzhiyun rkvdec2_set_core_info(mpp_task->reg, mpp_task->session->index);
1930*4882a593Smuzhiyun /* set rcb buffer */
1931*4882a593Smuzhiyun mpp_set_rcbbuf(mpp, mpp_task->session, mpp_task);
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun INIT_DELAYED_WORK(&mpp_task->timeout_work, rkvdec2_ccu_timeout_work);
1934*4882a593Smuzhiyun rkvdec2_ccu_power_on(queue, dec->ccu);
1935*4882a593Smuzhiyun rkvdec2_soft_ccu_enqueue(mpp, mpp_task);
1936*4882a593Smuzhiyun /* pending to running */
1937*4882a593Smuzhiyun mpp_taskqueue_pending_to_run(queue, mpp_task);
1938*4882a593Smuzhiyun set_bit(TASK_STATE_RUNNING, &mpp_task->state);
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun
1941*4882a593Smuzhiyun /* 4. poweroff when running and pending list are empty */
1942*4882a593Smuzhiyun if (list_empty(&queue->running_list) &&
1943*4882a593Smuzhiyun list_empty(&queue->pending_list))
1944*4882a593Smuzhiyun rkvdec2_ccu_power_off(queue, dec->ccu);
1945*4882a593Smuzhiyun
1946*4882a593Smuzhiyun /* 5. check session detach out of queue */
1947*4882a593Smuzhiyun rkvdec2_ccu_link_session_detach(mpp, queue);
1948*4882a593Smuzhiyun
1949*4882a593Smuzhiyun mpp_debug_leave();
1950*4882a593Smuzhiyun }
1951*4882a593Smuzhiyun
rkvdec2_ccu_alloc_table(struct rkvdec2_dev * dec,struct rkvdec_link_dev * link_dec)1952*4882a593Smuzhiyun int rkvdec2_ccu_alloc_table(struct rkvdec2_dev *dec,
1953*4882a593Smuzhiyun struct rkvdec_link_dev *link_dec)
1954*4882a593Smuzhiyun {
1955*4882a593Smuzhiyun int ret, i;
1956*4882a593Smuzhiyun struct mpp_dma_buffer *table;
1957*4882a593Smuzhiyun struct mpp_dev *mpp = &dec->mpp;
1958*4882a593Smuzhiyun
1959*4882a593Smuzhiyun mpp_debug_enter();
1960*4882a593Smuzhiyun
1961*4882a593Smuzhiyun /* alloc table pointer array */
1962*4882a593Smuzhiyun table = devm_kmalloc_array(mpp->dev, mpp->task_capacity,
1963*4882a593Smuzhiyun sizeof(*table), GFP_KERNEL | __GFP_ZERO);
1964*4882a593Smuzhiyun if (!table)
1965*4882a593Smuzhiyun return -ENOMEM;
1966*4882a593Smuzhiyun
1967*4882a593Smuzhiyun /* alloc table buffer */
1968*4882a593Smuzhiyun ret = rkvdec2_link_alloc_table(mpp, link_dec);
1969*4882a593Smuzhiyun if (ret)
1970*4882a593Smuzhiyun return ret;
1971*4882a593Smuzhiyun
1972*4882a593Smuzhiyun /* init table array */
1973*4882a593Smuzhiyun dec->ccu->table_array = table;
1974*4882a593Smuzhiyun for (i = 0; i < mpp->task_capacity; i++) {
1975*4882a593Smuzhiyun table[i].iova = link_dec->table->iova + i * link_dec->link_node_size;
1976*4882a593Smuzhiyun table[i].vaddr = link_dec->table->vaddr + i * link_dec->link_node_size;
1977*4882a593Smuzhiyun table[i].size = link_dec->link_node_size;
1978*4882a593Smuzhiyun INIT_LIST_HEAD(&table[i].link);
1979*4882a593Smuzhiyun list_add_tail(&table[i].link, &dec->ccu->unused_list);
1980*4882a593Smuzhiyun }
1981*4882a593Smuzhiyun
1982*4882a593Smuzhiyun return 0;
1983*4882a593Smuzhiyun }
1984*4882a593Smuzhiyun
rkvdec2_dump_ccu(struct rkvdec2_ccu * ccu)1985*4882a593Smuzhiyun static void rkvdec2_dump_ccu(struct rkvdec2_ccu *ccu)
1986*4882a593Smuzhiyun {
1987*4882a593Smuzhiyun u32 i;
1988*4882a593Smuzhiyun
1989*4882a593Smuzhiyun for (i = 0; i < 10; i++)
1990*4882a593Smuzhiyun mpp_err("ccu:reg[%d]=%08x\n", i, readl(ccu->reg_base + 4 * i));
1991*4882a593Smuzhiyun
1992*4882a593Smuzhiyun for (i = 16; i < 22; i++)
1993*4882a593Smuzhiyun mpp_err("ccu:reg[%d]=%08x\n", i, readl(ccu->reg_base + 4 * i));
1994*4882a593Smuzhiyun }
1995*4882a593Smuzhiyun
rkvdec2_dump_link(struct rkvdec2_dev * dec)1996*4882a593Smuzhiyun static void rkvdec2_dump_link(struct rkvdec2_dev *dec)
1997*4882a593Smuzhiyun {
1998*4882a593Smuzhiyun u32 i;
1999*4882a593Smuzhiyun
2000*4882a593Smuzhiyun for (i = 0; i < 10; i++)
2001*4882a593Smuzhiyun mpp_err("link:reg[%d]=%08x\n", i, readl(dec->link_dec->reg_base + 4 * i));
2002*4882a593Smuzhiyun }
2003*4882a593Smuzhiyun
rkvdec2_dump_core(struct mpp_dev * mpp,struct rkvdec2_task * task)2004*4882a593Smuzhiyun static void rkvdec2_dump_core(struct mpp_dev *mpp, struct rkvdec2_task *task)
2005*4882a593Smuzhiyun {
2006*4882a593Smuzhiyun u32 j;
2007*4882a593Smuzhiyun
2008*4882a593Smuzhiyun if (task) {
2009*4882a593Smuzhiyun for (j = 0; j < 273; j++)
2010*4882a593Smuzhiyun mpp_err("reg[%d]=%08x, %08x\n", j, mpp_read(mpp, j*4), task->reg[j]);
2011*4882a593Smuzhiyun } else {
2012*4882a593Smuzhiyun for (j = 0; j < 273; j++)
2013*4882a593Smuzhiyun mpp_err("reg[%d]=%08x\n", j, mpp_read(mpp, j*4));
2014*4882a593Smuzhiyun }
2015*4882a593Smuzhiyun }
2016*4882a593Smuzhiyun
rkvdec2_hard_ccu_irq(int irq,void * param)2017*4882a593Smuzhiyun irqreturn_t rkvdec2_hard_ccu_irq(int irq, void *param)
2018*4882a593Smuzhiyun {
2019*4882a593Smuzhiyun u32 irq_status;
2020*4882a593Smuzhiyun struct mpp_dev *mpp = param;
2021*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2022*4882a593Smuzhiyun
2023*4882a593Smuzhiyun irq_status = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2024*4882a593Smuzhiyun dec->ccu->ccu_core_work_mode = readl(dec->ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2025*4882a593Smuzhiyun if (irq_status & RKVDEC_LINK_BIT_IRQ_RAW) {
2026*4882a593Smuzhiyun dec->link_dec->irq_status = irq_status;
2027*4882a593Smuzhiyun mpp->irq_status = mpp_read(mpp, RKVDEC_REG_INT_EN);
2028*4882a593Smuzhiyun mpp_debug(DEBUG_IRQ_STATUS, "core %d link_irq=%08x, core_irq=%08x\n",
2029*4882a593Smuzhiyun mpp->core_id, irq_status, mpp->irq_status);
2030*4882a593Smuzhiyun
2031*4882a593Smuzhiyun writel(irq_status & 0xfffff0ff,
2032*4882a593Smuzhiyun dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2033*4882a593Smuzhiyun
2034*4882a593Smuzhiyun kthread_queue_work(&mpp->queue->worker, &mpp->work);
2035*4882a593Smuzhiyun return IRQ_HANDLED;
2036*4882a593Smuzhiyun }
2037*4882a593Smuzhiyun
2038*4882a593Smuzhiyun return IRQ_NONE;
2039*4882a593Smuzhiyun }
2040*4882a593Smuzhiyun
rkvdec2_hard_ccu_finish(struct rkvdec_link_info * hw,struct rkvdec2_task * task)2041*4882a593Smuzhiyun static int rkvdec2_hard_ccu_finish(struct rkvdec_link_info *hw, struct rkvdec2_task *task)
2042*4882a593Smuzhiyun {
2043*4882a593Smuzhiyun u32 i, off, s, n;
2044*4882a593Smuzhiyun struct rkvdec_link_part *part = hw->part_r;
2045*4882a593Smuzhiyun u32 *tb_reg = (u32 *)task->table->vaddr;
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun mpp_debug_enter();
2048*4882a593Smuzhiyun
2049*4882a593Smuzhiyun for (i = 0; i < hw->part_r_num; i++) {
2050*4882a593Smuzhiyun off = part[i].tb_reg_off;
2051*4882a593Smuzhiyun s = part[i].reg_start;
2052*4882a593Smuzhiyun n = part[i].reg_num;
2053*4882a593Smuzhiyun memcpy(&task->reg[s], &tb_reg[off], n * sizeof(u32));
2054*4882a593Smuzhiyun }
2055*4882a593Smuzhiyun /* revert hack for irq status */
2056*4882a593Smuzhiyun task->reg[RKVDEC_REG_INT_EN_INDEX] = task->irq_status;
2057*4882a593Smuzhiyun
2058*4882a593Smuzhiyun mpp_debug_leave();
2059*4882a593Smuzhiyun
2060*4882a593Smuzhiyun return 0;
2061*4882a593Smuzhiyun }
2062*4882a593Smuzhiyun
rkvdec2_hard_ccu_dequeue(struct mpp_taskqueue * queue,struct rkvdec2_ccu * ccu,struct rkvdec_link_info * hw)2063*4882a593Smuzhiyun static int rkvdec2_hard_ccu_dequeue(struct mpp_taskqueue *queue,
2064*4882a593Smuzhiyun struct rkvdec2_ccu *ccu,
2065*4882a593Smuzhiyun struct rkvdec_link_info *hw)
2066*4882a593Smuzhiyun {
2067*4882a593Smuzhiyun struct mpp_task *mpp_task = NULL, *n;
2068*4882a593Smuzhiyun u32 dump_reg = 0;
2069*4882a593Smuzhiyun u32 dequeue_none = 0;
2070*4882a593Smuzhiyun
2071*4882a593Smuzhiyun mpp_debug_enter();
2072*4882a593Smuzhiyun list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
2073*4882a593Smuzhiyun u32 timeout_flag = test_bit(TASK_STATE_TIMEOUT, &mpp_task->state);
2074*4882a593Smuzhiyun u32 abort_flag = test_bit(TASK_STATE_ABORT, &mpp_task->state);
2075*4882a593Smuzhiyun struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2076*4882a593Smuzhiyun u32 *tb_reg = (u32 *)task->table->vaddr;
2077*4882a593Smuzhiyun u32 irq_status = tb_reg[hw->tb_reg_int];
2078*4882a593Smuzhiyun u32 ccu_decoded_num, ccu_total_dec_num;
2079*4882a593Smuzhiyun
2080*4882a593Smuzhiyun ccu_decoded_num = readl(ccu->reg_base + RKVDEC_CCU_DEC_NUM_BASE);
2081*4882a593Smuzhiyun ccu_total_dec_num = readl(ccu->reg_base + RKVDEC_CCU_TOTAL_NUM_BASE);
2082*4882a593Smuzhiyun mpp_debug(DEBUG_IRQ_CHECK,
2083*4882a593Smuzhiyun "session %d task %d w:h[%d %d] err %d irq_status %08x timeout=%u abort=%u iova %08x next %08x ccu[%d %d]\n",
2084*4882a593Smuzhiyun mpp_task->session->index, mpp_task->task_index, task->width,
2085*4882a593Smuzhiyun task->height, !!(irq_status & RKVDEC_INT_ERROR_MASK), irq_status,
2086*4882a593Smuzhiyun timeout_flag, abort_flag, (u32)task->table->iova,
2087*4882a593Smuzhiyun ((u32 *)task->table->vaddr)[hw->tb_reg_next],
2088*4882a593Smuzhiyun ccu_decoded_num, ccu_total_dec_num);
2089*4882a593Smuzhiyun
2090*4882a593Smuzhiyun if (irq_status || timeout_flag || abort_flag) {
2091*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(queue->cores[0]);
2092*4882a593Smuzhiyun
2093*4882a593Smuzhiyun set_bit(TASK_STATE_HANDLE, &mpp_task->state);
2094*4882a593Smuzhiyun cancel_delayed_work(&mpp_task->timeout_work);
2095*4882a593Smuzhiyun mpp_task->hw_cycles = tb_reg[hw->tb_reg_cycle];
2096*4882a593Smuzhiyun mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
2097*4882a593Smuzhiyun task->irq_status = irq_status;
2098*4882a593Smuzhiyun
2099*4882a593Smuzhiyun if (irq_status)
2100*4882a593Smuzhiyun rkvdec2_hard_ccu_finish(hw, task);
2101*4882a593Smuzhiyun
2102*4882a593Smuzhiyun set_bit(TASK_STATE_FINISH, &mpp_task->state);
2103*4882a593Smuzhiyun set_bit(TASK_STATE_DONE, &mpp_task->state);
2104*4882a593Smuzhiyun
2105*4882a593Smuzhiyun if (timeout_flag && !dump_reg && mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) {
2106*4882a593Smuzhiyun u32 i;
2107*4882a593Smuzhiyun
2108*4882a593Smuzhiyun mpp_err("###### ccu #####\n");
2109*4882a593Smuzhiyun rkvdec2_dump_ccu(ccu);
2110*4882a593Smuzhiyun for (i = 0; i < queue->core_count; i++) {
2111*4882a593Smuzhiyun mpp_err("###### core %d #####\n", i);
2112*4882a593Smuzhiyun rkvdec2_dump_link(to_rkvdec2_dev(queue->cores[i]));
2113*4882a593Smuzhiyun rkvdec2_dump_core(queue->cores[i], task);
2114*4882a593Smuzhiyun }
2115*4882a593Smuzhiyun dump_reg = 1;
2116*4882a593Smuzhiyun }
2117*4882a593Smuzhiyun list_move_tail(&task->table->link, &ccu->unused_list);
2118*4882a593Smuzhiyun /* free task */
2119*4882a593Smuzhiyun list_del_init(&mpp_task->queue_link);
2120*4882a593Smuzhiyun /* Wake up the GET thread */
2121*4882a593Smuzhiyun wake_up(&mpp_task->wait);
2122*4882a593Smuzhiyun if ((irq_status & RKVDEC_INT_ERROR_MASK) || timeout_flag) {
2123*4882a593Smuzhiyun pr_err("session %d task %d irq_status %08x timeout=%u abort=%u\n",
2124*4882a593Smuzhiyun mpp_task->session->index, mpp_task->task_index,
2125*4882a593Smuzhiyun irq_status, timeout_flag, abort_flag);
2126*4882a593Smuzhiyun atomic_inc(&queue->reset_request);
2127*4882a593Smuzhiyun }
2128*4882a593Smuzhiyun
2129*4882a593Smuzhiyun kref_put(&mpp_task->ref, mpp_free_task);
2130*4882a593Smuzhiyun } else {
2131*4882a593Smuzhiyun dequeue_none++;
2132*4882a593Smuzhiyun /*
2133*4882a593Smuzhiyun * there are only 2 cores,
2134*4882a593Smuzhiyun * if dequeue not finish task more than 2,
2135*4882a593Smuzhiyun * means the others task still not get run by hw, can break early.
2136*4882a593Smuzhiyun */
2137*4882a593Smuzhiyun if (dequeue_none > 2)
2138*4882a593Smuzhiyun break;
2139*4882a593Smuzhiyun }
2140*4882a593Smuzhiyun }
2141*4882a593Smuzhiyun
2142*4882a593Smuzhiyun mpp_debug_leave();
2143*4882a593Smuzhiyun return 0;
2144*4882a593Smuzhiyun }
2145*4882a593Smuzhiyun
rkvdec2_hard_ccu_reset(struct mpp_taskqueue * queue,struct rkvdec2_ccu * ccu)2146*4882a593Smuzhiyun static int rkvdec2_hard_ccu_reset(struct mpp_taskqueue *queue, struct rkvdec2_ccu *ccu)
2147*4882a593Smuzhiyun {
2148*4882a593Smuzhiyun int i = 0;
2149*4882a593Smuzhiyun
2150*4882a593Smuzhiyun mpp_debug_enter();
2151*4882a593Smuzhiyun
2152*4882a593Smuzhiyun /* reset and active core */
2153*4882a593Smuzhiyun for (i = 0; i < queue->core_count; i++) {
2154*4882a593Smuzhiyun u32 val = 0;
2155*4882a593Smuzhiyun struct mpp_dev *mpp = queue->cores[i];
2156*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2157*4882a593Smuzhiyun
2158*4882a593Smuzhiyun if (mpp->disable)
2159*4882a593Smuzhiyun continue;
2160*4882a593Smuzhiyun dev_info(mpp->dev, "resetting...\n");
2161*4882a593Smuzhiyun disable_hardirq(mpp->irq);
2162*4882a593Smuzhiyun /* force idle */
2163*4882a593Smuzhiyun writel(dec->core_mask, ccu->reg_base + RKVDEC_CCU_CORE_IDLE_BASE);
2164*4882a593Smuzhiyun writel(0, ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2165*4882a593Smuzhiyun
2166*4882a593Smuzhiyun {
2167*4882a593Smuzhiyun /* soft reset */
2168*4882a593Smuzhiyun u32 val;
2169*4882a593Smuzhiyun
2170*4882a593Smuzhiyun mpp_write(mpp, RKVDEC_REG_IMPORTANT_BASE, RKVDEC_SOFTREST_EN);
2171*4882a593Smuzhiyun udelay(5);
2172*4882a593Smuzhiyun val = mpp_read(mpp, RKVDEC_REG_INT_EN);
2173*4882a593Smuzhiyun if (!(val & RKVDEC_SOFT_RESET_READY))
2174*4882a593Smuzhiyun mpp_err("soft reset fail, int %08x\n", val);
2175*4882a593Smuzhiyun
2176*4882a593Smuzhiyun // /* cru reset */
2177*4882a593Smuzhiyun // dev_info(mpp->dev, "cru reset\n");
2178*4882a593Smuzhiyun // rkvdec2_reset(mpp);
2179*4882a593Smuzhiyun }
2180*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_ROCKCHIP_SIP)
2181*4882a593Smuzhiyun rockchip_dmcfreq_lock();
2182*4882a593Smuzhiyun sip_smc_vpu_reset(i, 0, 0);
2183*4882a593Smuzhiyun rockchip_dmcfreq_unlock();
2184*4882a593Smuzhiyun #else
2185*4882a593Smuzhiyun rkvdec2_reset(mpp);
2186*4882a593Smuzhiyun #endif
2187*4882a593Smuzhiyun mpp_iommu_refresh(mpp->iommu_info, mpp->dev);
2188*4882a593Smuzhiyun enable_irq(mpp->irq);
2189*4882a593Smuzhiyun atomic_set(&mpp->reset_request, 0);
2190*4882a593Smuzhiyun val = mpp_read_relaxed(mpp, 272*4);
2191*4882a593Smuzhiyun dev_info(mpp->dev, "reset done, idle %d\n", (val & 1));
2192*4882a593Smuzhiyun }
2193*4882a593Smuzhiyun /* reset ccu */
2194*4882a593Smuzhiyun mpp_safe_reset(ccu->rst_a);
2195*4882a593Smuzhiyun udelay(5);
2196*4882a593Smuzhiyun mpp_safe_unreset(ccu->rst_a);
2197*4882a593Smuzhiyun
2198*4882a593Smuzhiyun mpp_debug_leave();
2199*4882a593Smuzhiyun return 0;
2200*4882a593Smuzhiyun }
2201*4882a593Smuzhiyun
2202*4882a593Smuzhiyun static struct mpp_task *
rkvdec2_hard_ccu_prepare(struct mpp_task * mpp_task,struct rkvdec2_ccu * ccu,struct rkvdec_link_info * hw)2203*4882a593Smuzhiyun rkvdec2_hard_ccu_prepare(struct mpp_task *mpp_task,
2204*4882a593Smuzhiyun struct rkvdec2_ccu *ccu, struct rkvdec_link_info *hw)
2205*4882a593Smuzhiyun {
2206*4882a593Smuzhiyun u32 i, off, s, n;
2207*4882a593Smuzhiyun u32 *tb_reg;
2208*4882a593Smuzhiyun struct mpp_dma_buffer *table = NULL;
2209*4882a593Smuzhiyun struct rkvdec_link_part *part;
2210*4882a593Smuzhiyun struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2211*4882a593Smuzhiyun
2212*4882a593Smuzhiyun mpp_debug_enter();
2213*4882a593Smuzhiyun
2214*4882a593Smuzhiyun if (test_bit(TASK_STATE_PREPARE, &mpp_task->state))
2215*4882a593Smuzhiyun return mpp_task;
2216*4882a593Smuzhiyun
2217*4882a593Smuzhiyun /* ensure that cur table iova points to the next link table*/
2218*4882a593Smuzhiyun {
2219*4882a593Smuzhiyun struct mpp_dma_buffer *table0 = NULL, *table1 = NULL, *n;
2220*4882a593Smuzhiyun
2221*4882a593Smuzhiyun list_for_each_entry_safe(table, n, &ccu->unused_list, link) {
2222*4882a593Smuzhiyun if (!table0) {
2223*4882a593Smuzhiyun table0 = table;
2224*4882a593Smuzhiyun continue;
2225*4882a593Smuzhiyun }
2226*4882a593Smuzhiyun if (!table1)
2227*4882a593Smuzhiyun table1 = table;
2228*4882a593Smuzhiyun break;
2229*4882a593Smuzhiyun }
2230*4882a593Smuzhiyun if (!table0 || !table1)
2231*4882a593Smuzhiyun return NULL;
2232*4882a593Smuzhiyun ((u32 *)table0->vaddr)[hw->tb_reg_next] = table1->iova;
2233*4882a593Smuzhiyun table = table0;
2234*4882a593Smuzhiyun }
2235*4882a593Smuzhiyun
2236*4882a593Smuzhiyun /* set session idx */
2237*4882a593Smuzhiyun rkvdec2_set_core_info(task->reg, mpp_task->session->index);
2238*4882a593Smuzhiyun tb_reg = (u32 *)table->vaddr;
2239*4882a593Smuzhiyun part = hw->part_w;
2240*4882a593Smuzhiyun
2241*4882a593Smuzhiyun /* disable multicore pu/colmv offset req timeout reset */
2242*4882a593Smuzhiyun task->reg[RKVDEC_REG_EN_MODE_SET] |= BIT(1);
2243*4882a593Smuzhiyun task->reg[RKVDEC_REG_TIMEOUT_THRESHOLD] = rkvdec2_ccu_get_timeout_threshold(task);
2244*4882a593Smuzhiyun
2245*4882a593Smuzhiyun for (i = 0; i < hw->part_w_num; i++) {
2246*4882a593Smuzhiyun off = part[i].tb_reg_off;
2247*4882a593Smuzhiyun s = part[i].reg_start;
2248*4882a593Smuzhiyun n = part[i].reg_num;
2249*4882a593Smuzhiyun memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
2250*4882a593Smuzhiyun }
2251*4882a593Smuzhiyun
2252*4882a593Smuzhiyun /* memset read registers */
2253*4882a593Smuzhiyun part = hw->part_r;
2254*4882a593Smuzhiyun for (i = 0; i < hw->part_r_num; i++) {
2255*4882a593Smuzhiyun off = part[i].tb_reg_off;
2256*4882a593Smuzhiyun n = part[i].reg_num;
2257*4882a593Smuzhiyun memset(&tb_reg[off], 0, n * sizeof(u32));
2258*4882a593Smuzhiyun }
2259*4882a593Smuzhiyun list_move_tail(&table->link, &ccu->used_list);
2260*4882a593Smuzhiyun task->table = table;
2261*4882a593Smuzhiyun set_bit(TASK_STATE_PREPARE, &mpp_task->state);
2262*4882a593Smuzhiyun mpp_dbg_ccu("session %d task %d iova %08x next %08x\n",
2263*4882a593Smuzhiyun mpp_task->session->index, mpp_task->task_index, (u32)task->table->iova,
2264*4882a593Smuzhiyun ((u32 *)task->table->vaddr)[hw->tb_reg_next]);
2265*4882a593Smuzhiyun
2266*4882a593Smuzhiyun mpp_debug_leave();
2267*4882a593Smuzhiyun
2268*4882a593Smuzhiyun return mpp_task;
2269*4882a593Smuzhiyun }
2270*4882a593Smuzhiyun
rkvdec2_ccu_link_fix_rcb_regs(struct rkvdec2_dev * dec)2271*4882a593Smuzhiyun static int rkvdec2_ccu_link_fix_rcb_regs(struct rkvdec2_dev *dec)
2272*4882a593Smuzhiyun {
2273*4882a593Smuzhiyun int ret = 0;
2274*4882a593Smuzhiyun u32 i, val;
2275*4882a593Smuzhiyun u32 reg, reg_idx, rcb_size, rcb_offset;
2276*4882a593Smuzhiyun
2277*4882a593Smuzhiyun if (!dec->rcb_iova && !dec->rcb_info_count)
2278*4882a593Smuzhiyun goto done;
2279*4882a593Smuzhiyun /* check whether fixed */
2280*4882a593Smuzhiyun val = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2281*4882a593Smuzhiyun if (val & RKVDEC_CCU_BIT_FIX_RCB)
2282*4882a593Smuzhiyun goto done;
2283*4882a593Smuzhiyun /* set registers */
2284*4882a593Smuzhiyun rcb_offset = 0;
2285*4882a593Smuzhiyun for (i = 0; i < dec->rcb_info_count; i += 2) {
2286*4882a593Smuzhiyun reg_idx = dec->rcb_infos[i];
2287*4882a593Smuzhiyun rcb_size = dec->rcb_infos[i + 1];
2288*4882a593Smuzhiyun mpp_debug(DEBUG_SRAM_INFO,
2289*4882a593Smuzhiyun "rcb: reg %u size %u offset %u sram_size %u rcb_size %u\n",
2290*4882a593Smuzhiyun reg_idx, rcb_size, rcb_offset, dec->sram_size, dec->rcb_size);
2291*4882a593Smuzhiyun if ((rcb_offset + rcb_size) > dec->rcb_size) {
2292*4882a593Smuzhiyun mpp_err("rcb: reg[%u] set failed.\n", reg_idx);
2293*4882a593Smuzhiyun ret = -ENOMEM;
2294*4882a593Smuzhiyun goto done;
2295*4882a593Smuzhiyun }
2296*4882a593Smuzhiyun reg = dec->rcb_iova + rcb_offset;
2297*4882a593Smuzhiyun mpp_write(&dec->mpp, reg_idx * sizeof(u32), reg);
2298*4882a593Smuzhiyun rcb_offset += rcb_size;
2299*4882a593Smuzhiyun }
2300*4882a593Smuzhiyun
2301*4882a593Smuzhiyun val |= RKVDEC_CCU_BIT_FIX_RCB;
2302*4882a593Smuzhiyun writel(val, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2303*4882a593Smuzhiyun done:
2304*4882a593Smuzhiyun return ret;
2305*4882a593Smuzhiyun }
2306*4882a593Smuzhiyun
rkvdec2_hard_ccu_enqueue(struct rkvdec2_ccu * ccu,struct mpp_task * mpp_task,struct mpp_taskqueue * queue,struct mpp_dev * mpp)2307*4882a593Smuzhiyun static int rkvdec2_hard_ccu_enqueue(struct rkvdec2_ccu *ccu,
2308*4882a593Smuzhiyun struct mpp_task *mpp_task,
2309*4882a593Smuzhiyun struct mpp_taskqueue *queue,
2310*4882a593Smuzhiyun struct mpp_dev *mpp)
2311*4882a593Smuzhiyun {
2312*4882a593Smuzhiyun u32 ccu_en, work_mode, link_mode;
2313*4882a593Smuzhiyun struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2314*4882a593Smuzhiyun u32 timing_en = mpp->srv->timing_en;
2315*4882a593Smuzhiyun
2316*4882a593Smuzhiyun mpp_debug_enter();
2317*4882a593Smuzhiyun
2318*4882a593Smuzhiyun if (test_bit(TASK_STATE_START, &mpp_task->state))
2319*4882a593Smuzhiyun goto done;
2320*4882a593Smuzhiyun
2321*4882a593Smuzhiyun ccu_en = readl(ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2322*4882a593Smuzhiyun mpp_dbg_ccu("ccu_en=%d\n", ccu_en);
2323*4882a593Smuzhiyun if (!ccu_en) {
2324*4882a593Smuzhiyun u32 i;
2325*4882a593Smuzhiyun
2326*4882a593Smuzhiyun /* set work mode */
2327*4882a593Smuzhiyun work_mode = 0;
2328*4882a593Smuzhiyun for (i = 0; i < queue->core_count; i++) {
2329*4882a593Smuzhiyun u32 val;
2330*4882a593Smuzhiyun struct mpp_dev *core = queue->cores[i];
2331*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(core);
2332*4882a593Smuzhiyun
2333*4882a593Smuzhiyun if (mpp->disable)
2334*4882a593Smuzhiyun continue;
2335*4882a593Smuzhiyun work_mode |= dec->core_mask;
2336*4882a593Smuzhiyun rkvdec2_ccu_link_fix_rcb_regs(dec);
2337*4882a593Smuzhiyun /* control by ccu */
2338*4882a593Smuzhiyun val = readl(dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2339*4882a593Smuzhiyun val |= RKVDEC_LINK_BIT_CCU_WORK_MODE;
2340*4882a593Smuzhiyun writel(val, dec->link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
2341*4882a593Smuzhiyun }
2342*4882a593Smuzhiyun writel(work_mode, ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2343*4882a593Smuzhiyun ccu->ccu_core_work_mode = readl(ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE);
2344*4882a593Smuzhiyun mpp_dbg_ccu("ccu_work_mode=%08x, ccu_work_status=%08x\n",
2345*4882a593Smuzhiyun readl(ccu->reg_base + RKVDEC_CCU_CORE_WORK_BASE),
2346*4882a593Smuzhiyun readl(ccu->reg_base + RKVDEC_CCU_CORE_STA_BASE));
2347*4882a593Smuzhiyun
2348*4882a593Smuzhiyun /* set auto gating */
2349*4882a593Smuzhiyun writel(RKVDEC_CCU_BIT_AUTOGATE, ccu->reg_base + RKVDEC_CCU_CTRL_BASE);
2350*4882a593Smuzhiyun /* link start base */
2351*4882a593Smuzhiyun writel(task->table->iova, ccu->reg_base + RKVDEC_CCU_CFG_ADDR_BASE);
2352*4882a593Smuzhiyun /* enable link */
2353*4882a593Smuzhiyun writel(RKVDEC_CCU_BIT_WORK_EN, ccu->reg_base + RKVDEC_CCU_WORK_BASE);
2354*4882a593Smuzhiyun }
2355*4882a593Smuzhiyun
2356*4882a593Smuzhiyun /* set link mode */
2357*4882a593Smuzhiyun link_mode = ccu_en ? RKVDEC_CCU_BIT_ADD_MODE : 0;
2358*4882a593Smuzhiyun writel(link_mode | RKVDEC_LINK_ADD_CFG_NUM, ccu->reg_base + RKVDEC_CCU_LINK_MODE_BASE);
2359*4882a593Smuzhiyun
2360*4882a593Smuzhiyun /* flush tlb before starting hardware */
2361*4882a593Smuzhiyun mpp_iommu_flush_tlb(mpp->iommu_info);
2362*4882a593Smuzhiyun /* wmb */
2363*4882a593Smuzhiyun wmb();
2364*4882a593Smuzhiyun INIT_DELAYED_WORK(&mpp_task->timeout_work, rkvdec2_ccu_timeout_work);
2365*4882a593Smuzhiyun mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
2366*4882a593Smuzhiyun /* configure done */
2367*4882a593Smuzhiyun writel(RKVDEC_CCU_BIT_CFG_DONE, ccu->reg_base + RKVDEC_CCU_CFG_DONE_BASE);
2368*4882a593Smuzhiyun mpp_task_run_end(mpp_task, timing_en);
2369*4882a593Smuzhiyun
2370*4882a593Smuzhiyun /* pending to running */
2371*4882a593Smuzhiyun set_bit(TASK_STATE_RUNNING, &mpp_task->state);
2372*4882a593Smuzhiyun mpp_taskqueue_pending_to_run(queue, mpp_task);
2373*4882a593Smuzhiyun mpp_dbg_ccu("session %d task %d iova=%08x task->state=%lx link_mode=%08x\n",
2374*4882a593Smuzhiyun mpp_task->session->index, mpp_task->task_index,
2375*4882a593Smuzhiyun (u32)task->table->iova, mpp_task->state,
2376*4882a593Smuzhiyun readl(ccu->reg_base + RKVDEC_CCU_LINK_MODE_BASE));
2377*4882a593Smuzhiyun done:
2378*4882a593Smuzhiyun mpp_debug_leave();
2379*4882a593Smuzhiyun
2380*4882a593Smuzhiyun return 0;
2381*4882a593Smuzhiyun }
2382*4882a593Smuzhiyun
rkvdec2_hard_ccu_handle_pagefault_task(struct rkvdec2_dev * dec,struct mpp_task * mpp_task)2383*4882a593Smuzhiyun static void rkvdec2_hard_ccu_handle_pagefault_task(struct rkvdec2_dev *dec,
2384*4882a593Smuzhiyun struct mpp_task *mpp_task)
2385*4882a593Smuzhiyun {
2386*4882a593Smuzhiyun struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2387*4882a593Smuzhiyun
2388*4882a593Smuzhiyun mpp_dbg_ccu("session %d task %d w:h[%d %d] pagefault mmu0[%08x %08x] mmu1[%08x %08x] fault_iova %08x\n",
2389*4882a593Smuzhiyun mpp_task->session->index, mpp_task->task_index,
2390*4882a593Smuzhiyun task->width, task->height, dec->mmu0_st, dec->mmu0_pta,
2391*4882a593Smuzhiyun dec->mmu1_st, dec->mmu1_pta, dec->fault_iova);
2392*4882a593Smuzhiyun
2393*4882a593Smuzhiyun set_bit(TASK_STATE_HANDLE, &mpp_task->state);
2394*4882a593Smuzhiyun task->irq_status |= BIT(4);
2395*4882a593Smuzhiyun cancel_delayed_work(&mpp_task->timeout_work);
2396*4882a593Smuzhiyun rkvdec2_hard_ccu_finish(dec->link_dec->info, task);
2397*4882a593Smuzhiyun set_bit(TASK_STATE_FINISH, &mpp_task->state);
2398*4882a593Smuzhiyun set_bit(TASK_STATE_DONE, &mpp_task->state);
2399*4882a593Smuzhiyun list_move_tail(&task->table->link, &dec->ccu->unused_list);
2400*4882a593Smuzhiyun list_del_init(&mpp_task->queue_link);
2401*4882a593Smuzhiyun /* Wake up the GET thread */
2402*4882a593Smuzhiyun wake_up(&mpp_task->wait);
2403*4882a593Smuzhiyun kref_put(&mpp_task->ref, mpp_free_task);
2404*4882a593Smuzhiyun dec->mmu_fault = 0;
2405*4882a593Smuzhiyun dec->fault_iova = 0;
2406*4882a593Smuzhiyun }
2407*4882a593Smuzhiyun
rkvdec2_hard_ccu_pagefault_proc(struct mpp_taskqueue * queue)2408*4882a593Smuzhiyun static void rkvdec2_hard_ccu_pagefault_proc(struct mpp_taskqueue *queue)
2409*4882a593Smuzhiyun {
2410*4882a593Smuzhiyun struct mpp_task *loop = NULL, *n;
2411*4882a593Smuzhiyun
2412*4882a593Smuzhiyun list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2413*4882a593Smuzhiyun struct rkvdec2_task *task = to_rkvdec2_task(loop);
2414*4882a593Smuzhiyun u32 iova = (u32)task->table->iova;
2415*4882a593Smuzhiyun u32 i;
2416*4882a593Smuzhiyun
2417*4882a593Smuzhiyun for (i = 0; i < queue->core_count; i++) {
2418*4882a593Smuzhiyun struct mpp_dev *core = queue->cores[i];
2419*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(core);
2420*4882a593Smuzhiyun
2421*4882a593Smuzhiyun if (!dec->mmu_fault || dec->fault_iova != iova)
2422*4882a593Smuzhiyun continue;
2423*4882a593Smuzhiyun rkvdec2_hard_ccu_handle_pagefault_task(dec, loop);
2424*4882a593Smuzhiyun }
2425*4882a593Smuzhiyun }
2426*4882a593Smuzhiyun }
2427*4882a593Smuzhiyun
rkvdec2_hard_ccu_resend_tasks(struct mpp_dev * mpp,struct mpp_taskqueue * queue)2428*4882a593Smuzhiyun static void rkvdec2_hard_ccu_resend_tasks(struct mpp_dev *mpp, struct mpp_taskqueue *queue)
2429*4882a593Smuzhiyun {
2430*4882a593Smuzhiyun struct rkvdec2_task *task_pre = NULL;
2431*4882a593Smuzhiyun struct mpp_task *loop = NULL, *n;
2432*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2433*4882a593Smuzhiyun
2434*4882a593Smuzhiyun /* re sort running list */
2435*4882a593Smuzhiyun list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2436*4882a593Smuzhiyun struct rkvdec2_task *task = to_rkvdec2_task(loop);
2437*4882a593Smuzhiyun u32 *tb_reg = (u32 *)task->table->vaddr;
2438*4882a593Smuzhiyun u32 irq_status = tb_reg[dec->link_dec->info->tb_reg_int];
2439*4882a593Smuzhiyun
2440*4882a593Smuzhiyun if (!irq_status) {
2441*4882a593Smuzhiyun if (task_pre) {
2442*4882a593Smuzhiyun tb_reg = (u32 *)task_pre->table->vaddr;
2443*4882a593Smuzhiyun tb_reg[dec->link_dec->info->tb_reg_next] = task->table->iova;
2444*4882a593Smuzhiyun }
2445*4882a593Smuzhiyun task_pre = task;
2446*4882a593Smuzhiyun }
2447*4882a593Smuzhiyun }
2448*4882a593Smuzhiyun
2449*4882a593Smuzhiyun if (task_pre) {
2450*4882a593Smuzhiyun struct mpp_dma_buffer *tbl;
2451*4882a593Smuzhiyun u32 *tb_reg;
2452*4882a593Smuzhiyun
2453*4882a593Smuzhiyun tbl = list_first_entry_or_null(&dec->ccu->unused_list,
2454*4882a593Smuzhiyun struct mpp_dma_buffer, link);
2455*4882a593Smuzhiyun WARN_ON(!tbl);
2456*4882a593Smuzhiyun if (tbl) {
2457*4882a593Smuzhiyun tb_reg = (u32 *)task_pre->table->vaddr;
2458*4882a593Smuzhiyun tb_reg[dec->link_dec->info->tb_reg_next] = tbl->iova;
2459*4882a593Smuzhiyun }
2460*4882a593Smuzhiyun }
2461*4882a593Smuzhiyun
2462*4882a593Smuzhiyun /* resend */
2463*4882a593Smuzhiyun list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2464*4882a593Smuzhiyun struct rkvdec2_task *task = to_rkvdec2_task(loop);
2465*4882a593Smuzhiyun u32 *tb_reg = (u32 *)task->table->vaddr;
2466*4882a593Smuzhiyun u32 irq_status = tb_reg[dec->link_dec->info->tb_reg_int];
2467*4882a593Smuzhiyun
2468*4882a593Smuzhiyun mpp_dbg_ccu("reback: session %d task %d iova %08x next %08x irq_status 0x%08x\n",
2469*4882a593Smuzhiyun loop->session->index, loop->task_index, (u32)task->table->iova,
2470*4882a593Smuzhiyun tb_reg[dec->link_dec->info->tb_reg_next], irq_status);
2471*4882a593Smuzhiyun
2472*4882a593Smuzhiyun if (!irq_status) {
2473*4882a593Smuzhiyun cancel_delayed_work(&loop->timeout_work);
2474*4882a593Smuzhiyun clear_bit(TASK_STATE_START, &loop->state);
2475*4882a593Smuzhiyun rkvdec2_hard_ccu_enqueue(dec->ccu, loop, queue, mpp);
2476*4882a593Smuzhiyun }
2477*4882a593Smuzhiyun }
2478*4882a593Smuzhiyun }
2479*4882a593Smuzhiyun
rkvdec2_hard_ccu_worker(struct kthread_work * work_s)2480*4882a593Smuzhiyun void rkvdec2_hard_ccu_worker(struct kthread_work *work_s)
2481*4882a593Smuzhiyun {
2482*4882a593Smuzhiyun struct mpp_task *mpp_task;
2483*4882a593Smuzhiyun struct mpp_dev *mpp = container_of(work_s, struct mpp_dev, work);
2484*4882a593Smuzhiyun struct mpp_taskqueue *queue = mpp->queue;
2485*4882a593Smuzhiyun struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
2486*4882a593Smuzhiyun
2487*4882a593Smuzhiyun mpp_debug_enter();
2488*4882a593Smuzhiyun
2489*4882a593Smuzhiyun /* 1. process all finished task in running list */
2490*4882a593Smuzhiyun rkvdec2_hard_ccu_dequeue(queue, dec->ccu, dec->link_dec->info);
2491*4882a593Smuzhiyun
2492*4882a593Smuzhiyun /* 2. process reset request */
2493*4882a593Smuzhiyun if (atomic_read(&queue->reset_request) &&
2494*4882a593Smuzhiyun (list_empty(&queue->running_list) || !dec->ccu->ccu_core_work_mode)) {
2495*4882a593Smuzhiyun /*
2496*4882a593Smuzhiyun * cancel running list timeout work to avoid
2497*4882a593Smuzhiyun * sw timeout causeby reset long time
2498*4882a593Smuzhiyun */
2499*4882a593Smuzhiyun struct mpp_task *loop = NULL, *n;
2500*4882a593Smuzhiyun
2501*4882a593Smuzhiyun list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2502*4882a593Smuzhiyun cancel_delayed_work(&loop->timeout_work);
2503*4882a593Smuzhiyun }
2504*4882a593Smuzhiyun /* reset process */
2505*4882a593Smuzhiyun rkvdec2_hard_ccu_reset(queue, dec->ccu);
2506*4882a593Smuzhiyun atomic_set(&queue->reset_request, 0);
2507*4882a593Smuzhiyun /* if iommu pagefault, find the fault task and drop it */
2508*4882a593Smuzhiyun if (queue->iommu_fault) {
2509*4882a593Smuzhiyun rkvdec2_hard_ccu_pagefault_proc(queue);
2510*4882a593Smuzhiyun queue->iommu_fault = 0;
2511*4882a593Smuzhiyun }
2512*4882a593Smuzhiyun
2513*4882a593Smuzhiyun /* relink running task iova in list, and resend them to hw */
2514*4882a593Smuzhiyun if (!list_empty(&queue->running_list))
2515*4882a593Smuzhiyun rkvdec2_hard_ccu_resend_tasks(mpp, queue);
2516*4882a593Smuzhiyun }
2517*4882a593Smuzhiyun
2518*4882a593Smuzhiyun /* 3. process pending task */
2519*4882a593Smuzhiyun while (1) {
2520*4882a593Smuzhiyun if (atomic_read(&queue->reset_request))
2521*4882a593Smuzhiyun break;
2522*4882a593Smuzhiyun
2523*4882a593Smuzhiyun /* get one task form pending list */
2524*4882a593Smuzhiyun mutex_lock(&queue->pending_lock);
2525*4882a593Smuzhiyun mpp_task = list_first_entry_or_null(&queue->pending_list,
2526*4882a593Smuzhiyun struct mpp_task, queue_link);
2527*4882a593Smuzhiyun mutex_unlock(&queue->pending_lock);
2528*4882a593Smuzhiyun
2529*4882a593Smuzhiyun if (!mpp_task)
2530*4882a593Smuzhiyun break;
2531*4882a593Smuzhiyun if (test_bit(TASK_STATE_ABORT, &mpp_task->state)) {
2532*4882a593Smuzhiyun mutex_lock(&queue->pending_lock);
2533*4882a593Smuzhiyun list_del_init(&mpp_task->queue_link);
2534*4882a593Smuzhiyun mutex_unlock(&queue->pending_lock);
2535*4882a593Smuzhiyun kref_put(&mpp_task->ref, mpp_free_task);
2536*4882a593Smuzhiyun continue;
2537*4882a593Smuzhiyun }
2538*4882a593Smuzhiyun
2539*4882a593Smuzhiyun mpp_task = rkvdec2_hard_ccu_prepare(mpp_task, dec->ccu, dec->link_dec->info);
2540*4882a593Smuzhiyun if (!mpp_task)
2541*4882a593Smuzhiyun break;
2542*4882a593Smuzhiyun
2543*4882a593Smuzhiyun rkvdec2_ccu_power_on(queue, dec->ccu);
2544*4882a593Smuzhiyun rkvdec2_hard_ccu_enqueue(dec->ccu, mpp_task, queue, mpp);
2545*4882a593Smuzhiyun }
2546*4882a593Smuzhiyun
2547*4882a593Smuzhiyun /* 4. poweroff when running and pending list are empty */
2548*4882a593Smuzhiyun mutex_lock(&queue->pending_lock);
2549*4882a593Smuzhiyun if (list_empty(&queue->running_list) &&
2550*4882a593Smuzhiyun list_empty(&queue->pending_list))
2551*4882a593Smuzhiyun rkvdec2_ccu_power_off(queue, dec->ccu);
2552*4882a593Smuzhiyun mutex_unlock(&queue->pending_lock);
2553*4882a593Smuzhiyun
2554*4882a593Smuzhiyun /* 5. check session detach out of queue */
2555*4882a593Smuzhiyun mpp_session_cleanup_detach(queue, work_s);
2556*4882a593Smuzhiyun
2557*4882a593Smuzhiyun mpp_debug_leave();
2558*4882a593Smuzhiyun }
2559