1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3 * Copyright (c) 2020 Rockchip Electronics Co., Ltd.
4 *
5 * author:
6 * Ding Wei, leo.ding@rock-chips.com
7 * Alpha Lin, alpha.lin@rock-chips.com
8 *
9 */
10 #include <asm/cacheflush.h>
11 #include <linux/delay.h>
12 #include <linux/iopoll.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/of_platform.h>
17 #include <linux/slab.h>
18 #include <linux/dma-buf.h>
19 #include <linux/uaccess.h>
20 #include <linux/regmap.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/proc_fs.h>
23 #include <soc/rockchip/pm_domains.h>
24
25 #include "rockchip_iep2_regs.h"
26 #include "mpp_debug.h"
27 #include "mpp_common.h"
28 #include "mpp_iommu.h"
29
30 #define IEP2_DRIVER_NAME "mpp-iep2"
31
32 #define IEP2_SESSION_MAX_BUFFERS 20
33
34 #define TILE_WIDTH 16
35 #define TILE_HEIGHT 4
36 #define MVL 28
37 #define MVR 27
38
39 enum rockchip_iep2_fmt {
40 ROCKCHIP_IEP2_FMT_YUV422 = 2,
41 ROCKCHIP_IEP2_FMT_YUV420
42 };
43
44 enum rockchip_iep2_yuv_swap {
45 ROCKCHIP_IEP2_YUV_SWAP_SP_UV,
46 ROCKCHIP_IEP2_YUV_SWAP_SP_VU,
47 ROCKCHIP_IEP2_YUV_SWAP_P0,
48 ROCKCHIP_IEP2_YUV_SWAP_P
49 };
50
51 enum rockchip_iep2_dil_ff_order {
52 ROCKCHIP_IEP2_DIL_FF_ORDER_TB,
53 ROCKCHIP_IEP2_DIL_FF_ORDER_BT
54 };
55
56 enum rockchip_iep2_dil_mode {
57 ROCKCHIP_IEP2_DIL_MODE_DISABLE,
58 ROCKCHIP_IEP2_DIL_MODE_I5O2,
59 ROCKCHIP_IEP2_DIL_MODE_I5O1T,
60 ROCKCHIP_IEP2_DIL_MODE_I5O1B,
61 ROCKCHIP_IEP2_DIL_MODE_I2O2,
62 ROCKCHIP_IEP2_DIL_MODE_I1O1T,
63 ROCKCHIP_IEP2_DIL_MODE_I1O1B,
64 ROCKCHIP_IEP2_DIL_MODE_PD,
65 ROCKCHIP_IEP2_DIL_MODE_BYPASS,
66 ROCKCHIP_IEP2_DIL_MODE_DECT
67 };
68
69 enum ROCKCHIP_IEP2_PD_COMP_FLAG {
70 ROCKCHIP_IEP2_PD_COMP_FLAG_CC,
71 ROCKCHIP_IEP2_PD_COMP_FLAG_CN,
72 ROCKCHIP_IEP2_PD_COMP_FLAG_NC,
73 ROCKCHIP_IEP2_PD_COMP_FLAG_NON
74 };
75
76 /* default iep2 mtn table */
77 static u32 iep2_mtn_tab[] = {
78 0x00000000, 0x00000000, 0x00000000, 0x00000000,
79 0x01010000, 0x06050302, 0x0f0d0a08, 0x1c191512,
80 0x2b282420, 0x3634312e, 0x3d3c3a38, 0x40403f3e,
81 0x40404040, 0x40404040, 0x40404040, 0x40404040
82 };
83
84 #define to_iep_task(task) \
85 container_of(task, struct iep_task, mpp_task)
86 #define to_iep2_dev(dev) \
87 container_of(dev, struct iep2_dev, mpp)
88
89 struct iep2_addr {
90 u32 y;
91 u32 cbcr;
92 u32 cr;
93 };
94
95 struct iep2_params {
96 u32 src_fmt;
97 u32 src_yuv_swap;
98 u32 dst_fmt;
99 u32 dst_yuv_swap;
100 u32 tile_cols;
101 u32 tile_rows;
102 u32 src_y_stride;
103 u32 src_uv_stride;
104 u32 dst_y_stride;
105
106 /* current, previous, next. */
107 struct iep2_addr src[3];
108 struct iep2_addr dst[2];
109 u32 mv_addr;
110 u32 md_addr;
111
112 u32 dil_mode;
113 u32 dil_out_mode;
114 u32 dil_field_order;
115
116 u32 md_theta;
117 u32 md_r;
118 u32 md_lambda;
119
120 u32 dect_resi_thr;
121 u32 osd_area_num;
122 u32 osd_gradh_thr;
123 u32 osd_gradv_thr;
124
125 u32 osd_pos_limit_en;
126 u32 osd_pos_limit_num;
127
128 u32 osd_limit_area[2];
129
130 u32 osd_line_num;
131 u32 osd_pec_thr;
132
133 u32 osd_x_sta[8];
134 u32 osd_x_end[8];
135 u32 osd_y_sta[8];
136 u32 osd_y_end[8];
137
138 u32 me_pena;
139 u32 mv_bonus;
140 u32 mv_similar_thr;
141 u32 mv_similar_num_thr0;
142 s32 me_thr_offset;
143
144 u32 mv_left_limit;
145 u32 mv_right_limit;
146
147 s8 mv_tru_list[8];
148 u32 mv_tru_vld[8];
149
150 u32 eedi_thr0;
151
152 u32 ble_backtoma_num;
153
154 u32 comb_cnt_thr;
155 u32 comb_feature_thr;
156 u32 comb_t_thr;
157 u32 comb_osd_vld[8];
158
159 u32 mtn_en;
160 u32 mtn_tab[16];
161
162 u32 pd_mode;
163
164 u32 roi_en;
165 u32 roi_layer_num;
166 u32 roi_mode[8];
167 u32 xsta[8];
168 u32 xend[8];
169 u32 ysta[8];
170 u32 yend[8];
171 };
172
173 struct iep2_output {
174 u32 mv_hist[MVL + MVR + 1];
175 u32 dect_pd_tcnt;
176 u32 dect_pd_bcnt;
177 u32 dect_ff_cur_tcnt;
178 u32 dect_ff_cur_bcnt;
179 u32 dect_ff_nxt_tcnt;
180 u32 dect_ff_nxt_bcnt;
181 u32 dect_ff_ble_tcnt;
182 u32 dect_ff_ble_bcnt;
183 u32 dect_ff_nz;
184 u32 dect_ff_comb_f;
185 u32 dect_osd_cnt;
186 u32 out_comb_cnt;
187 u32 out_osd_comb_cnt;
188 u32 ff_gradt_tcnt;
189 u32 ff_gradt_bcnt;
190 u32 x_sta[8];
191 u32 x_end[8];
192 u32 y_sta[8];
193 u32 y_end[8];
194 };
195
196 struct iep_task {
197 struct mpp_task mpp_task;
198 struct mpp_hw_info *hw_info;
199
200 enum MPP_CLOCK_MODE clk_mode;
201 struct iep2_params params;
202 struct iep2_output output;
203
204 struct reg_offset_info off_inf;
205 u32 irq_status;
206 /* req for current task */
207 u32 w_req_cnt;
208 struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
209 u32 r_req_cnt;
210 struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
211 };
212
213 struct iep2_dev {
214 struct mpp_dev mpp;
215
216 struct mpp_clk_info aclk_info;
217 struct mpp_clk_info hclk_info;
218 struct mpp_clk_info sclk_info;
219 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
220 struct proc_dir_entry *procfs;
221 #endif
222 struct reset_control *rst_a;
223 struct reset_control *rst_h;
224 struct reset_control *rst_s;
225
226 struct mpp_dma_buffer roi;
227 };
228
229 static int iep2_addr_rnum[] = {
230 24, 27, 28, /* src cur */
231 25, 29, 30, /* src nxt */
232 26, 31, 32, /* src prv */
233 44, 46, -1, /* dst top */
234 45, 47, -1, /* dst bot */
235 34, /* mv */
236 33, /* md */
237 };
238
iep2_process_reg_fd(struct mpp_session * session,struct iep_task * task,struct mpp_task_msgs * msgs)239 static int iep2_process_reg_fd(struct mpp_session *session,
240 struct iep_task *task,
241 struct mpp_task_msgs *msgs)
242 {
243 int i;
244 /* see the detail at above table iep2_addr_rnum */
245 int addr_num =
246 ARRAY_SIZE(task->params.src) * 3 +
247 ARRAY_SIZE(task->params.dst) * 3 + 2;
248
249 u32 *paddr = &task->params.src[0].y;
250
251 for (i = 0; i < addr_num; ++i) {
252 int usr_fd;
253 u32 offset;
254 struct mpp_mem_region *mem_region = NULL;
255
256 if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET) {
257 usr_fd = paddr[i];
258 offset = 0;
259 } else {
260 usr_fd = paddr[i] & 0x3ff;
261 offset = paddr[i] >> 10;
262 }
263
264 if (usr_fd == 0 || iep2_addr_rnum[i] == -1)
265 continue;
266
267 mem_region = mpp_task_attach_fd(&task->mpp_task, usr_fd);
268 if (IS_ERR(mem_region)) {
269 mpp_err("reg[%03d]: %08x failed\n",
270 iep2_addr_rnum[i], paddr[i]);
271 return PTR_ERR(mem_region);
272 }
273
274 mem_region->reg_idx = iep2_addr_rnum[i];
275 mpp_debug(DEBUG_IOMMU, "reg[%3d]: %3d => %pad + offset %10d\n",
276 iep2_addr_rnum[i], usr_fd, &mem_region->iova, offset);
277 paddr[i] = mem_region->iova + offset;
278 }
279
280 return 0;
281 }
282
iep2_extract_task_msg(struct iep_task * task,struct mpp_task_msgs * msgs)283 static int iep2_extract_task_msg(struct iep_task *task,
284 struct mpp_task_msgs *msgs)
285 {
286 u32 i;
287 struct mpp_request *req;
288
289 for (i = 0; i < msgs->req_cnt; i++) {
290 req = &msgs->reqs[i];
291 if (!req->size)
292 continue;
293
294 switch (req->cmd) {
295 case MPP_CMD_SET_REG_WRITE: {
296 if (copy_from_user(&task->params,
297 req->data, req->size)) {
298 mpp_err("copy_from_user params failed\n");
299 return -EIO;
300 }
301 } break;
302 case MPP_CMD_SET_REG_READ: {
303 memcpy(&task->r_reqs[task->r_req_cnt++],
304 req, sizeof(*req));
305 } break;
306 case MPP_CMD_SET_REG_ADDR_OFFSET: {
307 mpp_extract_reg_offset_info(&task->off_inf, req);
308 } break;
309 default:
310 break;
311 }
312 }
313 mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
314 task->w_req_cnt, task->r_req_cnt);
315
316 return 0;
317 }
318
iep2_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)319 static void *iep2_alloc_task(struct mpp_session *session,
320 struct mpp_task_msgs *msgs)
321 {
322 int ret;
323 struct iep_task *task = NULL;
324
325 mpp_debug_enter();
326
327 task = kzalloc(sizeof(*task), GFP_KERNEL);
328 if (!task)
329 return NULL;
330
331 mpp_task_init(session, &task->mpp_task);
332 /* extract reqs for current task */
333 ret = iep2_extract_task_msg(task, msgs);
334 if (ret)
335 goto fail;
336 /* process fd in register */
337 if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
338 ret = iep2_process_reg_fd(session, task, msgs);
339 if (ret)
340 goto fail;
341 }
342 task->clk_mode = CLK_MODE_NORMAL;
343
344 mpp_debug_leave();
345
346 return &task->mpp_task;
347
348 fail:
349 mpp_task_finalize(session, &task->mpp_task);
350 kfree(task);
351 return NULL;
352 }
353
iep2_config(struct mpp_dev * mpp,struct iep_task * task)354 static void iep2_config(struct mpp_dev *mpp, struct iep_task *task)
355 {
356 struct iep2_dev *iep = to_iep2_dev(mpp);
357 struct iep2_params *cfg = &task->params;
358 u32 reg;
359 u32 width, height;
360
361 width = cfg->tile_cols * TILE_WIDTH;
362 height = cfg->tile_rows * TILE_HEIGHT;
363
364 reg = IEP2_REG_SRC_FMT(cfg->src_fmt)
365 | IEP2_REG_SRC_YUV_SWAP(cfg->src_yuv_swap)
366 | IEP2_REG_DST_FMT(cfg->dst_fmt)
367 | IEP2_REG_DST_YUV_SWAP(cfg->dst_yuv_swap)
368 | IEP2_REG_DEBUG_DATA_EN;
369 mpp_write_relaxed(mpp, IEP2_REG_IEP_CONFIG0, reg);
370
371 mpp_write_relaxed(mpp, IEP2_REG_WORK_MODE, IEP2_REG_IEP2_MODE);
372
373 reg = IEP2_REG_SRC_PIC_WIDTH(width - 1)
374 | IEP2_REG_SRC_PIC_HEIGHT(height - 1);
375 mpp_write_relaxed(mpp, IEP2_REG_SRC_IMG_SIZE, reg);
376
377 reg = IEP2_REG_SRC_VIR_Y_STRIDE(cfg->src_y_stride)
378 | IEP2_REG_SRC_VIR_UV_STRIDE(cfg->src_uv_stride);
379 mpp_write_relaxed(mpp, IEP2_REG_VIR_SRC_IMG_WIDTH, reg);
380
381 reg = IEP2_REG_DST_VIR_STRIDE(cfg->dst_y_stride);
382 mpp_write_relaxed(mpp, IEP2_REG_VIR_DST_IMG_WIDTH, reg);
383
384 reg = IEP2_REG_DIL_MV_HIST_EN
385 | IEP2_REG_DIL_COMB_EN
386 | IEP2_REG_DIL_BLE_EN
387 | IEP2_REG_DIL_EEDI_EN
388 | IEP2_REG_DIL_MEMC_EN
389 | IEP2_REG_DIL_OSD_EN
390 | IEP2_REG_DIL_PD_EN
391 | IEP2_REG_DIL_FF_EN
392 | IEP2_REG_DIL_FIELD_ORDER(cfg->dil_field_order)
393 | IEP2_REG_DIL_OUT_MODE(cfg->dil_out_mode)
394 | IEP2_REG_DIL_MODE(cfg->dil_mode);
395 if (cfg->roi_en)
396 reg |= IEP2_REG_DIL_ROI_EN;
397 if (cfg->md_lambda < 8)
398 reg |= IEP2_REG_DIL_MD_PRE_EN;
399 mpp_write_relaxed(mpp, IEP2_REG_DIL_CONFIG0, reg);
400
401 if (cfg->dil_mode != ROCKCHIP_IEP2_DIL_MODE_PD) {
402 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_CURY,
403 cfg->src[0].y);
404 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_CURUV,
405 cfg->src[0].cbcr);
406 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_CURV,
407 cfg->src[0].cr);
408
409 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_NXTY,
410 cfg->src[1].y);
411 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_NXTUV,
412 cfg->src[1].cbcr);
413 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_NXTV,
414 cfg->src[1].cr);
415 } else {
416 struct iep2_addr *top, *bot;
417
418 switch (cfg->pd_mode) {
419 default:
420 case ROCKCHIP_IEP2_PD_COMP_FLAG_CC:
421 top = &cfg->src[0];
422 bot = &cfg->src[0];
423 break;
424 case ROCKCHIP_IEP2_PD_COMP_FLAG_CN:
425 top = &cfg->src[0];
426 bot = &cfg->src[1];
427 break;
428 case ROCKCHIP_IEP2_PD_COMP_FLAG_NC:
429 top = &cfg->src[1];
430 bot = &cfg->src[0];
431 break;
432 }
433
434 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_CURY, top->y);
435 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_CURUV, top->cbcr);
436 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_CURV, top->cr);
437 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_NXTY, bot->y);
438 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_NXTUV, bot->cbcr);
439 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_NXTV, bot->cr);
440 }
441
442 reg = IEP2_REG_TIMEOUT_CFG_EN | 0x3ffffff;
443 mpp_write_relaxed(mpp, IEP2_REG_TIMEOUT_CFG, reg);
444
445 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_PREY, cfg->src[2].y);
446 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_PREUV, cfg->src[2].cbcr);
447 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_PREV, cfg->src[2].cr);
448
449 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_MD, cfg->md_addr);
450 mpp_write_relaxed(mpp, IEP2_REG_SRC_ADDR_MV, cfg->mv_addr);
451 mpp_write_relaxed(mpp, IEP2_REG_DST_ADDR_MD, cfg->md_addr);
452 mpp_write_relaxed(mpp, IEP2_REG_DST_ADDR_MV, cfg->mv_addr);
453 mpp_write_relaxed(mpp, IEP2_REG_ROI_ADDR, (u32)iep->roi.iova);
454
455 mpp_write_relaxed(mpp, IEP2_REG_DST_ADDR_TOPY, cfg->dst[0].y);
456 mpp_write_relaxed(mpp, IEP2_REG_DST_ADDR_TOPC, cfg->dst[0].cbcr);
457 mpp_write_relaxed(mpp, IEP2_REG_DST_ADDR_BOTY, cfg->dst[1].y);
458 mpp_write_relaxed(mpp, IEP2_REG_DST_ADDR_BOTC, cfg->dst[1].cbcr);
459
460 reg = IEP2_REG_MD_THETA(cfg->md_theta)
461 | IEP2_REG_MD_R(cfg->md_r)
462 | IEP2_REG_MD_LAMBDA(cfg->md_lambda);
463 mpp_write_relaxed(mpp, IEP2_REG_MD_CONFIG0, reg);
464
465 reg = IEP2_REG_DECT_RESI_THR(cfg->dect_resi_thr)
466 | IEP2_REG_OSD_AREA_NUM(cfg->osd_area_num)
467 | IEP2_REG_OSD_GRADH_THR(cfg->osd_gradh_thr)
468 | IEP2_REG_OSD_GRADV_THR(cfg->osd_gradv_thr);
469 mpp_write_relaxed(mpp, IEP2_REG_DECT_CONFIG0, reg);
470
471 reg = IEP2_REG_OSD_POS_LIMIT_NUM(cfg->osd_pos_limit_num);
472 if (cfg->osd_pos_limit_en)
473 reg |= IEP2_REG_OSD_POS_LIMIT_EN;
474 mpp_write_relaxed(mpp, IEP2_REG_OSD_LIMIT_CONFIG, reg);
475
476 mpp_write_relaxed(mpp, IEP2_REG_OSD_LIMIT_AREA(0),
477 cfg->osd_limit_area[0]);
478 mpp_write_relaxed(mpp, IEP2_REG_OSD_LIMIT_AREA(1),
479 cfg->osd_limit_area[1]);
480
481 reg = IEP2_REG_OSD_PEC_THR(cfg->osd_pec_thr)
482 | IEP2_REG_OSD_LINE_NUM(cfg->osd_line_num);
483 mpp_write_relaxed(mpp, IEP2_REG_OSD_CONFIG0, reg);
484
485 reg = IEP2_REG_ME_PENA(cfg->me_pena)
486 | IEP2_REG_MV_BONUS(cfg->mv_bonus)
487 | IEP2_REG_MV_SIMILAR_THR(cfg->mv_similar_thr)
488 | IEP2_REG_MV_SIMILAR_NUM_THR0(cfg->mv_similar_num_thr0)
489 | IEP2_REG_ME_THR_OFFSET(cfg->me_thr_offset);
490 mpp_write_relaxed(mpp, IEP2_REG_ME_CONFIG0, reg);
491
492 reg = IEP2_REG_MV_LEFT_LIMIT((~cfg->mv_left_limit) + 1)
493 | IEP2_REG_MV_RIGHT_LIMIT(cfg->mv_right_limit);
494 mpp_write_relaxed(mpp, IEP2_REG_ME_LIMIT_CONFIG, reg);
495
496 mpp_write_relaxed(mpp, IEP2_REG_EEDI_CONFIG0,
497 IEP2_REG_EEDI_THR0(cfg->eedi_thr0));
498 mpp_write_relaxed(mpp, IEP2_REG_BLE_CONFIG0,
499 IEP2_REG_BLE_BACKTOMA_NUM(cfg->ble_backtoma_num));
500 }
501
iep2_osd_cfg(struct mpp_dev * mpp,struct iep_task * task)502 static void iep2_osd_cfg(struct mpp_dev *mpp, struct iep_task *task)
503 {
504 struct iep2_params *hw_cfg = &task->params;
505 int i;
506 u32 reg;
507
508 for (i = 0; i < hw_cfg->osd_area_num; ++i) {
509 reg = IEP2_REG_OSD_X_STA(hw_cfg->osd_x_sta[i])
510 | IEP2_REG_OSD_X_END(hw_cfg->osd_x_end[i])
511 | IEP2_REG_OSD_Y_STA(hw_cfg->osd_y_sta[i])
512 | IEP2_REG_OSD_Y_END(hw_cfg->osd_y_end[i]);
513 mpp_write_relaxed(mpp, IEP2_REG_OSD_AREA_CONF(i), reg);
514 }
515
516 for (; i < ARRAY_SIZE(hw_cfg->osd_x_sta); ++i)
517 mpp_write_relaxed(mpp, IEP2_REG_OSD_AREA_CONF(i), 0);
518 }
519
iep2_mtn_tab_cfg(struct mpp_dev * mpp,struct iep_task * task)520 static void iep2_mtn_tab_cfg(struct mpp_dev *mpp, struct iep_task *task)
521 {
522 struct iep2_params *hw_cfg = &task->params;
523 int i;
524 u32 *mtn_tab = hw_cfg->mtn_en ? hw_cfg->mtn_tab : iep2_mtn_tab;
525
526 for (i = 0; i < ARRAY_SIZE(hw_cfg->mtn_tab); ++i)
527 mpp_write_relaxed(mpp, IEP2_REG_DIL_MTN_TAB(i), mtn_tab[i]);
528 }
529
530 static u32 iep2_tru_list_vld_tab[] = {
531 IEP2_REG_MV_TRU_LIST0_4_VLD, IEP2_REG_MV_TRU_LIST1_5_VLD,
532 IEP2_REG_MV_TRU_LIST2_6_VLD, IEP2_REG_MV_TRU_LIST3_7_VLD,
533 IEP2_REG_MV_TRU_LIST0_4_VLD, IEP2_REG_MV_TRU_LIST1_5_VLD,
534 IEP2_REG_MV_TRU_LIST2_6_VLD, IEP2_REG_MV_TRU_LIST3_7_VLD
535 };
536
iep2_tru_list_cfg(struct mpp_dev * mpp,struct iep_task * task)537 static void iep2_tru_list_cfg(struct mpp_dev *mpp, struct iep_task *task)
538 {
539 struct iep2_params *cfg = &task->params;
540 int i;
541 u32 reg;
542
543 for (i = 0; i < ARRAY_SIZE(cfg->mv_tru_list); i += 4) {
544 reg = 0;
545
546 if (cfg->mv_tru_vld[i])
547 reg |= IEP2_REG_MV_TRU_LIST0_4(cfg->mv_tru_list[i])
548 | iep2_tru_list_vld_tab[i];
549
550 if (cfg->mv_tru_vld[i + 1])
551 reg |= IEP2_REG_MV_TRU_LIST1_5(cfg->mv_tru_list[i + 1])
552 | iep2_tru_list_vld_tab[i + 1];
553
554 if (cfg->mv_tru_vld[i + 2])
555 reg |= IEP2_REG_MV_TRU_LIST2_6(cfg->mv_tru_list[i + 2])
556 | iep2_tru_list_vld_tab[i + 2];
557
558 if (cfg->mv_tru_vld[i + 3])
559 reg |= IEP2_REG_MV_TRU_LIST3_7(cfg->mv_tru_list[i + 3])
560 | iep2_tru_list_vld_tab[i + 3];
561
562 mpp_write_relaxed(mpp, IEP2_REG_MV_TRU_LIST(i / 4), reg);
563 }
564 }
565
iep2_comb_cfg(struct mpp_dev * mpp,struct iep_task * task)566 static void iep2_comb_cfg(struct mpp_dev *mpp, struct iep_task *task)
567 {
568 struct iep2_params *hw_cfg = &task->params;
569 int i;
570 u32 reg = 0;
571
572 for (i = 0; i < ARRAY_SIZE(hw_cfg->comb_osd_vld); ++i) {
573 if (hw_cfg->comb_osd_vld[i])
574 reg |= IEP2_REG_COMB_OSD_VLD(i);
575 }
576
577 reg |= IEP2_REG_COMB_T_THR(hw_cfg->comb_t_thr)
578 | IEP2_REG_COMB_FEATRUE_THR(hw_cfg->comb_feature_thr)
579 | IEP2_REG_COMB_CNT_THR(hw_cfg->comb_cnt_thr);
580 mpp_write_relaxed(mpp, IEP2_REG_COMB_CONFIG0, reg);
581 }
582
iep2_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)583 static int iep2_run(struct mpp_dev *mpp,
584 struct mpp_task *mpp_task)
585 {
586 struct iep_task *task = NULL;
587 u32 timing_en = mpp->srv->timing_en;
588
589 mpp_debug_enter();
590
591 task = to_iep_task(mpp_task);
592
593 /* init current task */
594 mpp->cur_task = mpp_task;
595
596 iep2_config(mpp, task);
597 iep2_osd_cfg(mpp, task);
598 iep2_mtn_tab_cfg(mpp, task);
599 iep2_tru_list_cfg(mpp, task);
600 iep2_comb_cfg(mpp, task);
601
602 /* set interrupt enable bits */
603 mpp_write_relaxed(mpp, IEP2_REG_INT_EN,
604 IEP2_REG_FRM_DONE_EN
605 | IEP2_REG_OSD_MAX_EN
606 | IEP2_REG_BUS_ERROR_EN
607 | IEP2_REG_TIMEOUT_EN);
608
609 /* flush tlb before starting hardware */
610 mpp_iommu_flush_tlb(mpp->iommu_info);
611
612 mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
613
614 /* Last, flush the registers */
615 wmb();
616 /* start iep2 */
617 mpp_write(mpp, IEP2_REG_FRM_START, 1);
618
619 mpp_task_run_end(mpp_task, timing_en);
620
621 mpp_debug_leave();
622
623 return 0;
624 }
625
iep2_irq(struct mpp_dev * mpp)626 static int iep2_irq(struct mpp_dev *mpp)
627 {
628 u32 work_mode = mpp_read(mpp, IEP2_REG_WORK_MODE);
629
630 if (work_mode && !(work_mode & IEP2_REG_IEP2_MODE))
631 return IRQ_NONE;
632 mpp->irq_status = mpp_read(mpp, IEP2_REG_INT_STS);
633 mpp_write(mpp, IEP2_REG_INT_CLR, 0xffffffff);
634
635 if (!IEP2_REG_RO_VALID_INT_STS(mpp->irq_status))
636 return IRQ_NONE;
637
638 return IRQ_WAKE_THREAD;
639 }
640
iep2_isr(struct mpp_dev * mpp)641 static int iep2_isr(struct mpp_dev *mpp)
642 {
643 struct mpp_task *mpp_task = NULL;
644 struct iep_task *task = NULL;
645 struct iep2_dev *iep = to_iep2_dev(mpp);
646
647 mpp_task = mpp->cur_task;
648 task = to_iep_task(mpp_task);
649 if (!task) {
650 dev_err(iep->mpp.dev, "no current task\n");
651 return IRQ_HANDLED;
652 }
653
654 mpp_time_diff(mpp_task);
655 mpp->cur_task = NULL;
656 task->irq_status = mpp->irq_status;
657 mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n",
658 task->irq_status);
659
660 if (IEP2_REG_RO_BUS_ERROR_STS(task->irq_status) ||
661 IEP2_REG_RO_TIMEOUT_STS(task->irq_status))
662 atomic_inc(&mpp->reset_request);
663
664 mpp_task_finish(mpp_task->session, mpp_task);
665
666 mpp_debug_leave();
667
668 return IRQ_HANDLED;
669 }
670
iep2_osd_done(struct mpp_dev * mpp,struct iep_task * task)671 static void iep2_osd_done(struct mpp_dev *mpp, struct iep_task *task)
672 {
673 int i;
674 u32 reg;
675
676 for (i = 0; i < task->output.dect_osd_cnt; ++i) {
677 reg = mpp_read(mpp, IEP2_REG_RO_OSD_AREA_X(i));
678 task->output.x_sta[i] = IEP2_REG_RO_X_STA(reg) / 16;
679 task->output.x_end[i] = IEP2_REG_RO_X_END(reg) / 16;
680
681 reg = mpp_read(mpp, IEP2_REG_RO_OSD_AREA_Y(i));
682 task->output.y_sta[i] = IEP2_REG_RO_Y_STA(reg) / 4;
683 task->output.y_end[i] = IEP2_REG_RO_Y_END(reg) / 4;
684 }
685
686 for (; i < ARRAY_SIZE(task->output.x_sta); ++i) {
687 task->output.x_sta[i] = 0;
688 task->output.x_end[i] = 0;
689 task->output.y_sta[i] = 0;
690 task->output.y_end[i] = 0;
691 }
692 }
693
iep2_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)694 static int iep2_finish(struct mpp_dev *mpp,
695 struct mpp_task *mpp_task)
696 {
697 struct iep_task *task = to_iep_task(mpp_task);
698 struct iep2_output *output = &task->output;
699 u32 i;
700 u32 reg;
701
702 mpp_debug_enter();
703
704 output->dect_pd_tcnt = mpp_read(mpp, IEP2_REG_RO_PD_TCNT);
705 output->dect_pd_bcnt = mpp_read(mpp, IEP2_REG_RO_PD_BCNT);
706 output->dect_ff_cur_tcnt = mpp_read(mpp, IEP2_REG_RO_FF_CUR_TCNT);
707 output->dect_ff_cur_bcnt = mpp_read(mpp, IEP2_REG_RO_FF_CUR_BCNT);
708 output->dect_ff_nxt_tcnt = mpp_read(mpp, IEP2_REG_RO_FF_NXT_TCNT);
709 output->dect_ff_nxt_bcnt = mpp_read(mpp, IEP2_REG_RO_FF_NXT_BCNT);
710 output->dect_ff_ble_tcnt = mpp_read(mpp, IEP2_REG_RO_FF_BLE_TCNT);
711 output->dect_ff_ble_bcnt = mpp_read(mpp, IEP2_REG_RO_FF_BLE_BCNT);
712 output->dect_ff_nz = mpp_read(mpp, IEP2_REG_RO_FF_COMB_NZ);
713 output->dect_ff_comb_f = mpp_read(mpp, IEP2_REG_RO_FF_COMB_F);
714 output->dect_osd_cnt = mpp_read(mpp, IEP2_REG_RO_OSD_NUM);
715
716 reg = mpp_read(mpp, IEP2_REG_RO_COMB_CNT);
717 output->out_comb_cnt = IEP2_REG_RO_OUT_COMB_CNT(reg);
718 output->out_osd_comb_cnt = IEP2_REG_RO_OUT_OSD_COMB_CNT(reg);
719 output->ff_gradt_tcnt = mpp_read(mpp, IEP2_REG_RO_FF_GRADT_TCNT);
720 output->ff_gradt_bcnt = mpp_read(mpp, IEP2_REG_RO_FF_GRADT_BCNT);
721
722 iep2_osd_done(mpp, task);
723
724 for (i = 0; i < ARRAY_SIZE(output->mv_hist); i += 2) {
725 reg = mpp_read(mpp, IEP2_REG_RO_MV_HIST_BIN(i / 2));
726 output->mv_hist[i] = IEP2_REG_RO_MV_HIST_EVEN(reg);
727 output->mv_hist[i + 1] = IEP2_REG_RO_MV_HIST_ODD(reg);
728 }
729
730 mpp_debug_leave();
731
732 return 0;
733 }
734
iep2_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)735 static int iep2_result(struct mpp_dev *mpp,
736 struct mpp_task *mpp_task,
737 struct mpp_task_msgs *msgs)
738 {
739 u32 i;
740 struct mpp_request *req;
741 struct iep_task *task = to_iep_task(mpp_task);
742
743 /* FIXME may overflow the kernel */
744 for (i = 0; i < task->r_req_cnt; i++) {
745 req = &task->r_reqs[i];
746
747 if (copy_to_user(req->data, (u8 *)&task->output, req->size)) {
748 mpp_err("copy_to_user reg fail\n");
749 return -EIO;
750 }
751 }
752
753 return 0;
754 }
755
iep2_free_task(struct mpp_session * session,struct mpp_task * mpp_task)756 static int iep2_free_task(struct mpp_session *session,
757 struct mpp_task *mpp_task)
758 {
759 struct iep_task *task = to_iep_task(mpp_task);
760
761 mpp_task_finalize(session, mpp_task);
762 kfree(task);
763
764 return 0;
765 }
766
767 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
iep2_procfs_remove(struct mpp_dev * mpp)768 static int iep2_procfs_remove(struct mpp_dev *mpp)
769 {
770 struct iep2_dev *iep = to_iep2_dev(mpp);
771
772 if (iep->procfs) {
773 proc_remove(iep->procfs);
774 iep->procfs = NULL;
775 }
776
777 return 0;
778 }
779
iep2_procfs_init(struct mpp_dev * mpp)780 static int iep2_procfs_init(struct mpp_dev *mpp)
781 {
782 struct iep2_dev *iep = to_iep2_dev(mpp);
783
784 iep->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
785 if (IS_ERR_OR_NULL(iep->procfs)) {
786 mpp_err("failed on mkdir\n");
787 iep->procfs = NULL;
788 return -EIO;
789 }
790
791 /* for common mpp_dev options */
792 mpp_procfs_create_common(iep->procfs, mpp);
793
794 mpp_procfs_create_u32("aclk", 0644,
795 iep->procfs, &iep->aclk_info.debug_rate_hz);
796 mpp_procfs_create_u32("session_buffers", 0644,
797 iep->procfs, &mpp->session_max_buffers);
798
799 return 0;
800 }
801 #else
iep2_procfs_remove(struct mpp_dev * mpp)802 static inline int iep2_procfs_remove(struct mpp_dev *mpp)
803 {
804 return 0;
805 }
806
iep2_procfs_init(struct mpp_dev * mpp)807 static inline int iep2_procfs_init(struct mpp_dev *mpp)
808 {
809 return 0;
810 }
811 #endif
812
813 #define IEP2_TILE_W_MAX 120
814 #define IEP2_TILE_H_MAX 272
815
iep2_init(struct mpp_dev * mpp)816 static int iep2_init(struct mpp_dev *mpp)
817 {
818 int ret;
819 struct iep2_dev *iep = to_iep2_dev(mpp);
820
821 mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_IEP2];
822
823 /* Get clock info from dtsi */
824 ret = mpp_get_clk_info(mpp, &iep->aclk_info, "aclk");
825 if (ret)
826 mpp_err("failed on clk_get aclk\n");
827 ret = mpp_get_clk_info(mpp, &iep->hclk_info, "hclk");
828 if (ret)
829 mpp_err("failed on clk_get hclk\n");
830 ret = mpp_get_clk_info(mpp, &iep->sclk_info, "sclk");
831 if (ret)
832 mpp_err("failed on clk_get sclk\n");
833 /* Set default rates */
834 mpp_set_clk_info_rate_hz(&iep->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
835
836 iep->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "rst_a");
837 if (!iep->rst_a)
838 mpp_err("No aclk reset resource define\n");
839 iep->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "rst_h");
840 if (!iep->rst_h)
841 mpp_err("No hclk reset resource define\n");
842 iep->rst_s = mpp_reset_control_get(mpp, RST_TYPE_CORE, "rst_s");
843 if (!iep->rst_s)
844 mpp_err("No sclk reset resource define\n");
845
846 iep->roi.size = IEP2_TILE_W_MAX * IEP2_TILE_H_MAX;
847 iep->roi.vaddr = dma_alloc_coherent(mpp->dev, iep->roi.size,
848 &iep->roi.iova,
849 GFP_KERNEL);
850 if (iep->roi.vaddr) {
851 dev_err(mpp->dev, "allocate roi buffer failed\n");
852 //return -ENOMEM;
853 }
854
855 return 0;
856 }
857
iep2_clk_on(struct mpp_dev * mpp)858 static int iep2_clk_on(struct mpp_dev *mpp)
859 {
860 struct iep2_dev *iep = to_iep2_dev(mpp);
861
862 mpp_clk_safe_enable(iep->aclk_info.clk);
863 mpp_clk_safe_enable(iep->hclk_info.clk);
864 mpp_clk_safe_enable(iep->sclk_info.clk);
865
866 return 0;
867 }
868
iep2_clk_off(struct mpp_dev * mpp)869 static int iep2_clk_off(struct mpp_dev *mpp)
870 {
871 struct iep2_dev *iep = to_iep2_dev(mpp);
872
873 mpp_clk_safe_disable(iep->aclk_info.clk);
874 mpp_clk_safe_disable(iep->hclk_info.clk);
875 mpp_clk_safe_disable(iep->sclk_info.clk);
876
877 return 0;
878 }
879
iep2_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)880 static int iep2_set_freq(struct mpp_dev *mpp,
881 struct mpp_task *mpp_task)
882 {
883 struct iep2_dev *iep = to_iep2_dev(mpp);
884 struct iep_task *task = to_iep_task(mpp_task);
885
886 mpp_clk_set_rate(&iep->aclk_info, task->clk_mode);
887
888 return 0;
889 }
890
iep2_reset(struct mpp_dev * mpp)891 static int iep2_reset(struct mpp_dev *mpp)
892 {
893 struct iep2_dev *iep = to_iep2_dev(mpp);
894
895 int ret = 0;
896 u32 rst_status = 0;
897
898 /* soft rest first */
899 mpp_write(mpp, IEP2_REG_IEP_CONFIG0, IEP2_REG_ACLK_SRESET_P);
900 ret = readl_relaxed_poll_timeout(mpp->reg_base + IEP2_REG_STATUS,
901 rst_status,
902 rst_status & IEP2_REG_ARST_FINISH_DONE,
903 0, 5);
904 if (ret) {
905 mpp_err("soft reset timeout, use cru reset\n");
906 if (iep->rst_a && iep->rst_h && iep->rst_s) {
907 /* Don't skip this or iommu won't work after reset */
908 mpp_pmu_idle_request(mpp, true);
909 mpp_safe_reset(iep->rst_a);
910 mpp_safe_reset(iep->rst_h);
911 mpp_safe_reset(iep->rst_s);
912 udelay(5);
913 mpp_safe_unreset(iep->rst_a);
914 mpp_safe_unreset(iep->rst_h);
915 mpp_safe_unreset(iep->rst_s);
916 mpp_pmu_idle_request(mpp, false);
917 }
918 }
919
920 return 0;
921 }
922
923 static struct mpp_hw_ops iep_v2_hw_ops = {
924 .init = iep2_init,
925 .clk_on = iep2_clk_on,
926 .clk_off = iep2_clk_off,
927 .set_freq = iep2_set_freq,
928 .reset = iep2_reset,
929 };
930
931 static struct mpp_dev_ops iep_v2_dev_ops = {
932 .alloc_task = iep2_alloc_task,
933 .run = iep2_run,
934 .irq = iep2_irq,
935 .isr = iep2_isr,
936 .finish = iep2_finish,
937 .result = iep2_result,
938 .free_task = iep2_free_task,
939 };
940
941 static struct mpp_hw_info iep2_hw_info = {
942 .reg_id = -1,
943 };
944
945 static const struct mpp_dev_var iep2_v2_data = {
946 .device_type = MPP_DEVICE_IEP2,
947 .hw_ops = &iep_v2_hw_ops,
948 .dev_ops = &iep_v2_dev_ops,
949 .hw_info = &iep2_hw_info,
950 };
951
952 static const struct of_device_id mpp_iep2_match[] = {
953 {
954 .compatible = "rockchip,iep-v2",
955 .data = &iep2_v2_data,
956 },
957 #ifdef CONFIG_CPU_RV1126
958 {
959 .compatible = "rockchip,rv1126-iep",
960 .data = &iep2_v2_data,
961 },
962 #endif
963 {},
964 };
965
iep2_probe(struct platform_device * pdev)966 static int iep2_probe(struct platform_device *pdev)
967 {
968 struct device *dev = &pdev->dev;
969 struct iep2_dev *iep = NULL;
970 struct mpp_dev *mpp = NULL;
971 const struct of_device_id *match = NULL;
972 int ret = 0;
973
974 dev_info(dev, "probe device\n");
975 iep = devm_kzalloc(dev, sizeof(struct iep2_dev), GFP_KERNEL);
976 if (!iep)
977 return -ENOMEM;
978
979 mpp = &iep->mpp;
980 platform_set_drvdata(pdev, mpp);
981
982 if (pdev->dev.of_node) {
983 match = of_match_node(mpp_iep2_match, pdev->dev.of_node);
984 if (match)
985 mpp->var = (struct mpp_dev_var *)match->data;
986 }
987
988 ret = mpp_dev_probe(mpp, pdev);
989 if (ret) {
990 dev_err(dev, "probe sub driver failed\n");
991 return -EINVAL;
992 }
993
994 ret = devm_request_threaded_irq(dev, mpp->irq,
995 mpp_dev_irq,
996 mpp_dev_isr_sched,
997 IRQF_SHARED,
998 dev_name(dev), mpp);
999 if (ret) {
1000 dev_err(dev, "register interrupter runtime failed\n");
1001 return -EINVAL;
1002 }
1003
1004 mpp->session_max_buffers = IEP2_SESSION_MAX_BUFFERS;
1005 iep2_procfs_init(mpp);
1006 /* register current device to mpp service */
1007 mpp_dev_register_srv(mpp, mpp->srv);
1008 dev_info(dev, "probing finish\n");
1009
1010 return 0;
1011 }
1012
iep2_remove(struct platform_device * pdev)1013 static int iep2_remove(struct platform_device *pdev)
1014 {
1015 struct device *dev = &pdev->dev;
1016 struct mpp_dev *mpp = dev_get_drvdata(dev);
1017 struct iep2_dev *iep = to_iep2_dev(mpp);
1018
1019 dma_free_coherent(dev, iep->roi.size, iep->roi.vaddr, iep->roi.iova);
1020
1021 dev_info(dev, "remove device\n");
1022 mpp_dev_remove(mpp);
1023 iep2_procfs_remove(mpp);
1024
1025 return 0;
1026 }
1027
1028 struct platform_driver rockchip_iep2_driver = {
1029 .probe = iep2_probe,
1030 .remove = iep2_remove,
1031 .shutdown = mpp_dev_shutdown,
1032 .driver = {
1033 .name = IEP2_DRIVER_NAME,
1034 .of_match_table = of_match_ptr(mpp_iep2_match),
1035 },
1036 };
1037 EXPORT_SYMBOL(rockchip_iep2_driver);
1038
1039