1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3 * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4 *
5 * author:
6 * Alpha Lin, alpha.lin@rock-chips.com
7 * Randy Li, randy.li@rock-chips.com
8 * Ding Wei, leo.ding@rock-chips.com
9 *
10 */
11
12 #include <asm/cacheflush.h>
13 #include <linux/delay.h>
14 #include <linux/devfreq.h>
15 #include <linux/devfreq_cooling.h>
16 #include <linux/iopoll.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/of_platform.h>
22 #include <linux/slab.h>
23 #include <linux/seq_file.h>
24 #include <linux/uaccess.h>
25 #include <linux/regmap.h>
26 #include <linux/regulator/consumer.h>
27 #include <linux/proc_fs.h>
28 #include <linux/nospec.h>
29 #include <linux/workqueue.h>
30 #include <soc/rockchip/pm_domains.h>
31 #include <soc/rockchip/rockchip_iommu.h>
32 #include <soc/rockchip/rockchip_ipa.h>
33 #include <soc/rockchip/rockchip_opp_select.h>
34 #include <soc/rockchip/rockchip_system_monitor.h>
35
36 #ifdef CONFIG_PM_DEVFREQ
37 #include "../../../devfreq/governor.h"
38 #endif
39
40 #include "mpp_debug.h"
41 #include "mpp_iommu.h"
42 #include "mpp_common.h"
43
44 #define RKVENC_DRIVER_NAME "mpp_rkvenc"
45
46 #define IOMMU_GET_BUS_ID(x) (((x) >> 6) & 0x1f)
47 #define IOMMU_PAGE_SIZE SZ_4K
48
49 #define RKVENC_SESSION_MAX_BUFFERS 40
50 /* The maximum registers number of all the version */
51 #define RKVENC_REG_L1_NUM 780
52 #define RKVENC_REG_L2_NUM 320
53 #define RKVENC_REG_START_INDEX 0
54 #define RKVENC_REG_END_INDEX 131
55 /* rkvenc register info */
56 #define RKVENC_REG_NUM 112
57 #define RKVENC_REG_HW_ID_INDEX 0
58 #define RKVENC_REG_CLR_CACHE_BASE 0x884
59
60 #define RKVENC_ENC_START_INDEX 1
61 #define RKVENC_ENC_START_BASE 0x004
62 #define RKVENC_LKT_NUM(x) ((x) & 0xff)
63 #define RKVENC_CMD(x) (((x) & 0x3) << 8)
64 #define RKVENC_CLK_GATE_EN BIT(16)
65 #define RKVENC_CLR_BASE 0x008
66 #define RKVENC_SAFE_CLR_BIT BIT(0)
67 #define RKVENC_FORCE_CLR_BIT BIT(1)
68 #define RKVENC_LKT_ADDR_BASE 0x00c
69
70 #define RKVENC_INT_EN_INDEX 4
71 #define RKVENC_INT_EN_BASE 0x010
72 #define RKVENC_INT_MSK_BASE 0x014
73 #define RKVENC_INT_CLR_BASE 0x018
74 #define RKVENC_INT_STATUS_INDEX 7
75 #define RKVENC_INT_STATUS_BASE 0x01c
76 /* bit for int mask clr status */
77 #define RKVENC_BIT_ONE_FRAME BIT(0)
78 #define RKVENC_BIT_LINK_TABLE BIT(1)
79 #define RKVENC_BIT_SAFE_CLEAR BIT(2)
80 #define RKVENC_BIT_ONE_SLICE BIT(3)
81 #define RKVENC_BIT_STREAM_OVERFLOW BIT(4)
82 #define RKVENC_BIT_AXI_WRITE_FIFO_FULL BIT(5)
83 #define RKVENC_BIT_AXI_WRITE_CHANNEL BIT(6)
84 #define RKVENC_BIT_AXI_READ_CHANNEL BIT(7)
85 #define RKVENC_BIT_TIMEOUT BIT(8)
86 #define RKVENC_INT_ERROR_BITS ((RKVENC_BIT_STREAM_OVERFLOW) |\
87 (RKVENC_BIT_AXI_WRITE_FIFO_FULL) |\
88 (RKVENC_BIT_AXI_WRITE_CHANNEL) |\
89 (RKVENC_BIT_AXI_READ_CHANNEL) |\
90 (RKVENC_BIT_TIMEOUT))
91 #define RKVENC_ENC_RSL_INDEX 12
92 #define RKVENC_ENC_PIC_INDEX 13
93 #define RKVENC_ENC_PIC_BASE 0x034
94 #define RKVENC_GET_FORMAT(x) ((x) & 0x1)
95 #define RKVENC_ENC_PIC_NODE_INT_EN BIT(31)
96 #define RKVENC_ENC_WDG_BASE 0x038
97 #define RKVENC_PPLN_ENC_LMT(x) ((x) & 0xf)
98 #define RKVENC_OSD_CFG_BASE 0x1c0
99 #define RKVENC_OSD_PLT_TYPE BIT(17)
100 #define RKVENC_OSD_CLK_SEL_BIT BIT(16)
101 #define RKVENC_STATUS_BASE(i) (0x210 + (4 * (i)))
102 #define RKVENC_BSL_STATUS_BASE 0x210
103 #define RKVENC_BITSTREAM_LENGTH(x) ((x) & 0x7FFFFFF)
104 #define RKVENC_ENC_STATUS_BASE 0x220
105 #define RKVENC_ENC_STATUS_ENC(x) (((x) >> 0) & 0x3)
106 #define RKVENC_LKT_STATUS_BASE 0x224
107 #define RKVENC_LKT_STATUS_FNUM_ENC(x) (((x) >> 0) & 0xff)
108 #define RKVENC_LKT_STATUS_FNUM_CFG(x) (((x) >> 8) & 0xff)
109 #define RKVENC_LKT_STATUS_FNUM_INT(x) (((x) >> 16) & 0xff)
110 #define RKVENC_OSD_PLT_BASE(i) (0x400 + (4 * (i)))
111
112 #define RKVENC_L2_OFFSET (0x10000)
113 #define RKVENC_L2_ADDR_BASE (0x3f0)
114 #define RKVENC_L2_WRITE_BASE (0x3f4)
115 #define RKVENC_L2_READ_BASE (0x3f8)
116 #define RKVENC_L2_BURST_TYPE BIT(0)
117
118 #define RKVENC_GET_WIDTH(x) (((x & 0x1ff) + 1) << 3)
119 #define RKVENC_GET_HEIGHT(x) ((((x >> 16) & 0x1ff) + 1) << 3)
120
121 #define to_rkvenc_task(ctx) \
122 container_of(ctx, struct rkvenc_task, mpp_task)
123 #define to_rkvenc_dev(dev) \
124 container_of(dev, struct rkvenc_dev, mpp)
125
126 enum rkvenc_format_type {
127 RKVENC_FMT_H264E = 0,
128 RKVENC_FMT_H265E = 1,
129 RKVENC_FMT_BUTT,
130 };
131
132 enum RKVENC_MODE {
133 RKVENC_MODE_NONE,
134 RKVENC_MODE_ONEFRAME,
135 RKVENC_MODE_LINKTABLE_FIX,
136 RKVENC_MODE_LINKTABLE_UPDATE,
137 RKVENC_MODE_BUTT
138 };
139
140 struct rkvenc_task {
141 struct mpp_task mpp_task;
142
143 int link_flags;
144 int fmt;
145 enum RKVENC_MODE link_mode;
146
147 /* level 1 register setting */
148 u32 reg_offset;
149 u32 reg_num;
150 u32 reg[RKVENC_REG_L1_NUM];
151 u32 width;
152 u32 height;
153 u32 pixels;
154 /* level 2 register setting */
155 u32 reg_l2_offset;
156 u32 reg_l2_num;
157 u32 reg_l2[RKVENC_REG_L2_NUM];
158 /* register offset info */
159 struct reg_offset_info off_inf;
160
161 enum MPP_CLOCK_MODE clk_mode;
162 u32 irq_status;
163 /* req for current task */
164 u32 w_req_cnt;
165 struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
166 u32 r_req_cnt;
167 struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
168 };
169
170 struct rkvenc_session_priv {
171 struct rw_semaphore rw_sem;
172 /* codec info from user */
173 struct {
174 /* show mode */
175 u32 flag;
176 /* item data */
177 u64 val;
178 } codec_info[ENC_INFO_BUTT];
179 };
180
181 struct rkvenc_dev {
182 struct mpp_dev mpp;
183
184 struct mpp_clk_info aclk_info;
185 struct mpp_clk_info hclk_info;
186 struct mpp_clk_info core_clk_info;
187 u32 default_max_load;
188 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
189 struct proc_dir_entry *procfs;
190 #endif
191 struct reset_control *rst_a;
192 struct reset_control *rst_h;
193 struct reset_control *rst_core;
194
195 #ifdef CONFIG_PM_DEVFREQ
196 struct regulator *vdd;
197 struct devfreq *devfreq;
198 unsigned long volt;
199 unsigned long core_rate_hz;
200 unsigned long core_last_rate_hz;
201 struct ipa_power_model_data *model_data;
202 struct thermal_cooling_device *devfreq_cooling;
203 struct monitor_dev_info *mdev_info;
204 #endif
205 /* for iommu pagefault handle */
206 struct work_struct iommu_work;
207 struct workqueue_struct *iommu_wq;
208 struct page *aux_page;
209 unsigned long aux_iova;
210 unsigned long fault_iova;
211 };
212
213 struct link_table_elem {
214 dma_addr_t lkt_dma_addr;
215 void *lkt_cpu_addr;
216 u32 lkt_index;
217 struct list_head list;
218 };
219
220 static struct mpp_hw_info rkvenc_hw_info = {
221 .reg_num = RKVENC_REG_NUM,
222 .reg_id = RKVENC_REG_HW_ID_INDEX,
223 .reg_en = RKVENC_ENC_START_INDEX,
224 .reg_start = RKVENC_REG_START_INDEX,
225 .reg_end = RKVENC_REG_END_INDEX,
226 };
227
228 /*
229 * file handle translate information
230 */
231 static const u16 trans_tbl_h264e[] = {
232 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
233 80, 81, 82, 83, 84, 85, 86, 124, 125,
234 126, 127, 128, 129, 130, 131
235 };
236
237 static const u16 trans_tbl_h265e[] = {
238 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
239 80, 81, 82, 83, 84, 85, 86, 124, 125,
240 126, 127, 128, 129, 130, 131, 95, 96
241 };
242
243 static struct mpp_trans_info trans_rk_rkvenc[] = {
244 [RKVENC_FMT_H264E] = {
245 .count = ARRAY_SIZE(trans_tbl_h264e),
246 .table = trans_tbl_h264e,
247 },
248 [RKVENC_FMT_H265E] = {
249 .count = ARRAY_SIZE(trans_tbl_h265e),
250 .table = trans_tbl_h265e,
251 },
252 };
253
rkvenc_extract_task_msg(struct rkvenc_task * task,struct mpp_task_msgs * msgs)254 static int rkvenc_extract_task_msg(struct rkvenc_task *task,
255 struct mpp_task_msgs *msgs)
256 {
257 u32 i;
258 int ret;
259 struct mpp_request *req;
260
261 for (i = 0; i < msgs->req_cnt; i++) {
262 req = &msgs->reqs[i];
263 if (!req->size)
264 continue;
265
266 switch (req->cmd) {
267 case MPP_CMD_SET_REG_WRITE: {
268 int req_base;
269 int max_size;
270 u8 *dst = NULL;
271
272 if (req->offset >= RKVENC_L2_OFFSET) {
273 req_base = RKVENC_L2_OFFSET;
274 max_size = sizeof(task->reg_l2);
275 dst = (u8 *)task->reg_l2;
276 } else {
277 req_base = 0;
278 max_size = sizeof(task->reg);
279 dst = (u8 *)task->reg;
280 }
281
282 ret = mpp_check_req(req, req_base, max_size,
283 0, max_size);
284 if (ret)
285 return ret;
286
287 dst += req->offset - req_base;
288 if (copy_from_user(dst, req->data, req->size)) {
289 mpp_err("copy_from_user reg failed\n");
290 return -EIO;
291 }
292 memcpy(&task->w_reqs[task->w_req_cnt++],
293 req, sizeof(*req));
294 } break;
295 case MPP_CMD_SET_REG_READ: {
296 int req_base;
297 int max_size;
298
299 if (req->offset >= RKVENC_L2_OFFSET) {
300 req_base = RKVENC_L2_OFFSET;
301 max_size = sizeof(task->reg_l2);
302 } else {
303 req_base = 0;
304 max_size = sizeof(task->reg);
305 }
306
307 ret = mpp_check_req(req, req_base, max_size,
308 0, max_size);
309 if (ret)
310 return ret;
311
312 memcpy(&task->r_reqs[task->r_req_cnt++],
313 req, sizeof(*req));
314 } break;
315 case MPP_CMD_SET_REG_ADDR_OFFSET: {
316 mpp_extract_reg_offset_info(&task->off_inf, req);
317 } break;
318 default:
319 break;
320 }
321 }
322 mpp_debug(DEBUG_TASK_INFO, "w_req_cnt=%d, r_req_cnt=%d\n",
323 task->w_req_cnt, task->r_req_cnt);
324
325 return 0;
326 }
327
rkvenc_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)328 static void *rkvenc_alloc_task(struct mpp_session *session,
329 struct mpp_task_msgs *msgs)
330 {
331 int ret;
332 struct mpp_task *mpp_task = NULL;
333 struct rkvenc_task *task = NULL;
334 struct mpp_dev *mpp = session->mpp;
335
336 mpp_debug_enter();
337
338 task = kzalloc(sizeof(*task), GFP_KERNEL);
339 if (!task)
340 return NULL;
341
342 mpp_task = &task->mpp_task;
343 mpp_task_init(session, mpp_task);
344 mpp_task->hw_info = mpp->var->hw_info;
345 mpp_task->reg = task->reg;
346 /* extract reqs for current task */
347 ret = rkvenc_extract_task_msg(task, msgs);
348 if (ret)
349 goto fail;
350 task->fmt = RKVENC_GET_FORMAT(task->reg[RKVENC_ENC_PIC_INDEX]);
351 /* process fd in register */
352 if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
353 ret = mpp_translate_reg_address(session,
354 mpp_task, task->fmt,
355 task->reg, &task->off_inf);
356 if (ret)
357 goto fail;
358 mpp_translate_reg_offset_info(mpp_task,
359 &task->off_inf, task->reg);
360 }
361 task->link_mode = RKVENC_MODE_ONEFRAME;
362 task->clk_mode = CLK_MODE_NORMAL;
363 /* get resolution info */
364 task->width = RKVENC_GET_WIDTH(task->reg[RKVENC_ENC_RSL_INDEX]);
365 task->height = RKVENC_GET_HEIGHT(task->reg[RKVENC_ENC_RSL_INDEX]);
366 task->pixels = task->width * task->height;
367 mpp_debug(DEBUG_TASK_INFO, "width=%d, height=%d\n", task->width, task->height);
368
369 mpp_debug_leave();
370
371 return mpp_task;
372
373 fail:
374 mpp_task_dump_mem_region(mpp, mpp_task);
375 mpp_task_dump_reg(mpp, mpp_task);
376 mpp_task_finalize(session, mpp_task);
377 kfree(task);
378 return NULL;
379 }
380
rkvenc_write_req_l2(struct mpp_dev * mpp,u32 * regs,u32 start_idx,u32 end_idx)381 static int rkvenc_write_req_l2(struct mpp_dev *mpp,
382 u32 *regs,
383 u32 start_idx, u32 end_idx)
384 {
385 int i;
386
387 for (i = start_idx; i < end_idx; i++) {
388 int reg = i * sizeof(u32);
389
390 mpp_debug(DEBUG_SET_REG_L2, "reg[%03d]: %04x: 0x%08x\n", i, reg, regs[i]);
391 writel_relaxed(reg, mpp->reg_base + RKVENC_L2_ADDR_BASE);
392 writel_relaxed(regs[i], mpp->reg_base + RKVENC_L2_WRITE_BASE);
393 }
394
395 return 0;
396 }
397
rkvenc_read_req_l2(struct mpp_dev * mpp,u32 * regs,u32 start_idx,u32 end_idx)398 static int rkvenc_read_req_l2(struct mpp_dev *mpp,
399 u32 *regs,
400 u32 start_idx, u32 end_idx)
401 {
402 int i;
403
404 for (i = start_idx; i < end_idx; i++) {
405 int reg = i * sizeof(u32);
406
407 writel_relaxed(reg, mpp->reg_base + RKVENC_L2_ADDR_BASE);
408 regs[i] = readl_relaxed(mpp->reg_base + RKVENC_L2_READ_BASE);
409 mpp_debug(DEBUG_GET_REG_L2, "reg[%03d]: %04x: 0x%08x\n", i, reg, regs[i]);
410 }
411
412 return 0;
413 }
414
rkvenc_write_req_backward(struct mpp_dev * mpp,u32 * regs,s32 start_idx,s32 end_idx,s32 en_idx)415 static int rkvenc_write_req_backward(struct mpp_dev *mpp, u32 *regs,
416 s32 start_idx, s32 end_idx, s32 en_idx)
417 {
418 int i;
419
420 for (i = end_idx - 1; i >= start_idx; i--) {
421 if (i == en_idx)
422 continue;
423 mpp_write_relaxed(mpp, i * sizeof(u32), regs[i]);
424 }
425
426 return 0;
427 }
428
rkvenc_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)429 static int rkvenc_run(struct mpp_dev *mpp,
430 struct mpp_task *mpp_task)
431 {
432 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
433
434 mpp_debug_enter();
435
436 /* clear cache */
437 mpp_write_relaxed(mpp, RKVENC_REG_CLR_CACHE_BASE, 1);
438 switch (task->link_mode) {
439 case RKVENC_MODE_ONEFRAME: {
440 int i;
441 struct mpp_request *req;
442 u32 reg_en = mpp_task->hw_info->reg_en;
443 u32 timing_en = mpp->srv->timing_en;
444
445 /*
446 * Tips: ensure osd plt clock is 0 before setting register,
447 * otherwise, osd setting will not work
448 */
449 mpp_write_relaxed(mpp, RKVENC_OSD_CFG_BASE, 0);
450 /* ensure clear finish */
451 wmb();
452 for (i = 0; i < task->w_req_cnt; i++) {
453 int s, e;
454
455 req = &task->w_reqs[i];
456 /* set register L2 */
457 if (req->offset >= RKVENC_L2_OFFSET) {
458 int off = req->offset - RKVENC_L2_OFFSET;
459
460 s = off / sizeof(u32);
461 e = s + req->size / sizeof(u32);
462 rkvenc_write_req_l2(mpp, task->reg_l2, s, e);
463 } else {
464 /* set register L1 */
465 s = req->offset / sizeof(u32);
466 e = s + req->size / sizeof(u32);
467 /* NOTE: for rkvenc, register should set backward */
468 rkvenc_write_req_backward(mpp, task->reg, s, e, reg_en);
469 }
470 }
471
472 /* flush tlb before starting hardware */
473 mpp_iommu_flush_tlb(mpp->iommu_info);
474
475 /* init current task */
476 mpp->cur_task = mpp_task;
477
478 mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
479
480 /* Flush the register before the start the device */
481 wmb();
482 mpp_write(mpp, RKVENC_ENC_START_BASE, task->reg[reg_en]);
483
484 mpp_task_run_end(mpp_task, timing_en);
485 } break;
486 case RKVENC_MODE_LINKTABLE_FIX:
487 case RKVENC_MODE_LINKTABLE_UPDATE:
488 default: {
489 mpp_err("link_mode %d failed.\n", task->link_mode);
490 } break;
491 }
492
493 mpp_debug_leave();
494
495 return 0;
496 }
497
rkvenc_irq(struct mpp_dev * mpp)498 static int rkvenc_irq(struct mpp_dev *mpp)
499 {
500 mpp_debug_enter();
501
502 mpp->irq_status = mpp_read(mpp, RKVENC_INT_STATUS_BASE);
503 if (!mpp->irq_status)
504 return IRQ_NONE;
505
506 mpp_write(mpp, RKVENC_INT_MSK_BASE, 0x100);
507 mpp_write(mpp, RKVENC_INT_CLR_BASE, 0xffffffff);
508 mpp_write(mpp, RKVENC_INT_STATUS_BASE, 0);
509
510 mpp_debug_leave();
511
512 return IRQ_WAKE_THREAD;
513 }
514
rkvenc_isr(struct mpp_dev * mpp)515 static int rkvenc_isr(struct mpp_dev *mpp)
516 {
517 struct rkvenc_task *task = NULL;
518 struct mpp_task *mpp_task = mpp->cur_task;
519 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
520
521 mpp_debug_enter();
522
523 /* FIXME use a spin lock here */
524 if (!mpp_task) {
525 dev_err(mpp->dev, "no current task\n");
526 return IRQ_HANDLED;
527 }
528
529 mpp_time_diff(mpp_task);
530 mpp->cur_task = NULL;
531 task = to_rkvenc_task(mpp_task);
532 task->irq_status = mpp->irq_status;
533 mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
534
535 if (task->irq_status & RKVENC_INT_ERROR_BITS) {
536 atomic_inc(&mpp->reset_request);
537 if (mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) {
538 /* dump error register */
539 mpp_debug(DEBUG_DUMP_ERR_REG, "irq_status: %08x\n", task->irq_status);
540 mpp_task_dump_hw_reg(mpp);
541 }
542 }
543
544 /* unmap reserve buffer */
545 if (enc->aux_iova != -1) {
546 iommu_unmap(mpp->iommu_info->domain, enc->aux_iova, IOMMU_PAGE_SIZE);
547 enc->aux_iova = -1;
548 }
549
550 mpp_task_finish(mpp_task->session, mpp_task);
551
552 mpp_debug_leave();
553
554 return IRQ_HANDLED;
555 }
556
rkvenc_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)557 static int rkvenc_finish(struct mpp_dev *mpp,
558 struct mpp_task *mpp_task)
559 {
560 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
561
562 mpp_debug_enter();
563
564 switch (task->link_mode) {
565 case RKVENC_MODE_ONEFRAME: {
566 u32 i;
567 struct mpp_request *req;
568
569 for (i = 0; i < task->r_req_cnt; i++) {
570 int s, e;
571
572 req = &task->r_reqs[i];
573 if (req->offset >= RKVENC_L2_OFFSET) {
574 int off = req->offset - RKVENC_L2_OFFSET;
575
576 s = off / sizeof(u32);
577 e = s + req->size / sizeof(u32);
578 rkvenc_read_req_l2(mpp, task->reg_l2, s, e);
579 } else {
580 s = req->offset / sizeof(u32);
581 e = s + req->size / sizeof(u32);
582 mpp_read_req(mpp, task->reg, s, e);
583 }
584 }
585 task->reg[RKVENC_INT_STATUS_INDEX] = task->irq_status;
586 } break;
587 case RKVENC_MODE_LINKTABLE_FIX:
588 case RKVENC_MODE_LINKTABLE_UPDATE:
589 default: {
590 mpp_err("link_mode %d failed.\n", task->link_mode);
591 } break;
592 }
593
594 mpp_debug_leave();
595
596 return 0;
597 }
598
rkvenc_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)599 static int rkvenc_result(struct mpp_dev *mpp,
600 struct mpp_task *mpp_task,
601 struct mpp_task_msgs *msgs)
602 {
603 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
604
605 mpp_debug_enter();
606
607 switch (task->link_mode) {
608 case RKVENC_MODE_ONEFRAME: {
609 u32 i;
610 struct mpp_request *req;
611
612 for (i = 0; i < task->r_req_cnt; i++) {
613 req = &task->r_reqs[i];
614 /* set register L2 */
615 if (req->offset >= RKVENC_L2_OFFSET) {
616 int off = req->offset - RKVENC_L2_OFFSET;
617
618 if (copy_to_user(req->data,
619 (u8 *)task->reg_l2 + off,
620 req->size)) {
621 mpp_err("copy_to_user reg_l2 fail\n");
622 return -EIO;
623 }
624 } else {
625 if (copy_to_user(req->data,
626 (u8 *)task->reg + req->offset,
627 req->size)) {
628 mpp_err("copy_to_user reg fail\n");
629 return -EIO;
630 }
631 }
632 }
633 } break;
634 case RKVENC_MODE_LINKTABLE_FIX:
635 case RKVENC_MODE_LINKTABLE_UPDATE:
636 default: {
637 mpp_err("link_mode %d failed.\n", task->link_mode);
638 } break;
639 }
640
641 return 0;
642 }
643
rkvenc_free_task(struct mpp_session * session,struct mpp_task * mpp_task)644 static int rkvenc_free_task(struct mpp_session *session,
645 struct mpp_task *mpp_task)
646 {
647 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
648
649 mpp_task_finalize(session, mpp_task);
650 kfree(task);
651
652 return 0;
653 }
654
rkvenc_control(struct mpp_session * session,struct mpp_request * req)655 static int rkvenc_control(struct mpp_session *session, struct mpp_request *req)
656 {
657 switch (req->cmd) {
658 case MPP_CMD_SEND_CODEC_INFO: {
659 int i;
660 int cnt;
661 struct codec_info_elem elem;
662 struct rkvenc_session_priv *priv;
663
664 if (!session || !session->priv) {
665 mpp_err("session info null\n");
666 return -EINVAL;
667 }
668 priv = session->priv;
669
670 cnt = req->size / sizeof(elem);
671 cnt = (cnt > ENC_INFO_BUTT) ? ENC_INFO_BUTT : cnt;
672 mpp_debug(DEBUG_IOCTL, "codec info count %d\n", cnt);
673 for (i = 0; i < cnt; i++) {
674 if (copy_from_user(&elem, req->data + i * sizeof(elem), sizeof(elem))) {
675 mpp_err("copy_from_user failed\n");
676 continue;
677 }
678 if (elem.type > ENC_INFO_BASE && elem.type < ENC_INFO_BUTT &&
679 elem.flag > CODEC_INFO_FLAG_NULL && elem.flag < CODEC_INFO_FLAG_BUTT) {
680 elem.type = array_index_nospec(elem.type, ENC_INFO_BUTT);
681 priv->codec_info[elem.type].flag = elem.flag;
682 priv->codec_info[elem.type].val = elem.data;
683 } else {
684 mpp_err("codec info invalid, type %d, flag %d\n",
685 elem.type, elem.flag);
686 }
687 }
688 } break;
689 default: {
690 mpp_err("unknown mpp ioctl cmd %x\n", req->cmd);
691 } break;
692 }
693
694 return 0;
695 }
696
rkvenc_free_session(struct mpp_session * session)697 static int rkvenc_free_session(struct mpp_session *session)
698 {
699 if (session && session->priv) {
700 kfree(session->priv);
701 session->priv = NULL;
702 }
703
704 return 0;
705 }
706
rkvenc_init_session(struct mpp_session * session)707 static int rkvenc_init_session(struct mpp_session *session)
708 {
709 struct rkvenc_session_priv *priv;
710
711 if (!session) {
712 mpp_err("session is null\n");
713 return -EINVAL;
714 }
715
716 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
717 if (!priv)
718 return -ENOMEM;
719
720 init_rwsem(&priv->rw_sem);
721 session->priv = priv;
722
723 return 0;
724 }
725
726 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
rkvenc_procfs_remove(struct mpp_dev * mpp)727 static int rkvenc_procfs_remove(struct mpp_dev *mpp)
728 {
729 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
730
731 if (enc->procfs) {
732 proc_remove(enc->procfs);
733 enc->procfs = NULL;
734 }
735
736 return 0;
737 }
738
rkvenc_dump_session(struct mpp_session * session,struct seq_file * seq)739 static int rkvenc_dump_session(struct mpp_session *session, struct seq_file *seq)
740 {
741 int i;
742 struct rkvenc_session_priv *priv = session->priv;
743
744 down_read(&priv->rw_sem);
745 /* item name */
746 seq_puts(seq, "------------------------------------------------------");
747 seq_puts(seq, "------------------------------------------------------\n");
748 seq_printf(seq, "|%8s|", (const char *)"session");
749 seq_printf(seq, "%8s|", (const char *)"device");
750 for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) {
751 bool show = priv->codec_info[i].flag;
752
753 if (show)
754 seq_printf(seq, "%8s|", enc_info_item_name[i]);
755 }
756 seq_puts(seq, "\n");
757 /* item data*/
758 seq_printf(seq, "|%8d|", session->index);
759 seq_printf(seq, "%8s|", mpp_device_name[session->device_type]);
760 for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) {
761 u32 flag = priv->codec_info[i].flag;
762
763 if (!flag)
764 continue;
765 if (flag == CODEC_INFO_FLAG_NUMBER) {
766 u32 data = priv->codec_info[i].val;
767
768 seq_printf(seq, "%8d|", data);
769 } else if (flag == CODEC_INFO_FLAG_STRING) {
770 const char *name = (const char *)&priv->codec_info[i].val;
771
772 seq_printf(seq, "%8s|", name);
773 } else {
774 seq_printf(seq, "%8s|", (const char *)"null");
775 }
776 }
777 seq_puts(seq, "\n");
778 up_read(&priv->rw_sem);
779
780 return 0;
781 }
782
rkvenc_show_session_info(struct seq_file * seq,void * offset)783 static int rkvenc_show_session_info(struct seq_file *seq, void *offset)
784 {
785 struct mpp_session *session = NULL, *n;
786 struct mpp_dev *mpp = seq->private;
787
788 mutex_lock(&mpp->srv->session_lock);
789 list_for_each_entry_safe(session, n,
790 &mpp->srv->session_list,
791 service_link) {
792 if (session->device_type != MPP_DEVICE_RKVENC)
793 continue;
794 if (!session->priv)
795 continue;
796 if (mpp->dev_ops->dump_session)
797 mpp->dev_ops->dump_session(session, seq);
798 }
799 mutex_unlock(&mpp->srv->session_lock);
800
801 return 0;
802 }
803
rkvenc_procfs_init(struct mpp_dev * mpp)804 static int rkvenc_procfs_init(struct mpp_dev *mpp)
805 {
806 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
807
808 enc->procfs = proc_mkdir(mpp->dev->of_node->name, mpp->srv->procfs);
809 if (IS_ERR_OR_NULL(enc->procfs)) {
810 mpp_err("failed on open procfs\n");
811 enc->procfs = NULL;
812 return -EIO;
813 }
814
815 /* for common mpp_dev options */
816 mpp_procfs_create_common(enc->procfs, mpp);
817
818 /* for debug */
819 mpp_procfs_create_u32("aclk", 0644,
820 enc->procfs, &enc->aclk_info.debug_rate_hz);
821 mpp_procfs_create_u32("clk_core", 0644,
822 enc->procfs, &enc->core_clk_info.debug_rate_hz);
823 mpp_procfs_create_u32("session_buffers", 0644,
824 enc->procfs, &mpp->session_max_buffers);
825 /* for show session info */
826 proc_create_single_data("sessions-info", 0444,
827 enc->procfs, rkvenc_show_session_info, mpp);
828
829 return 0;
830 }
831 #else
rkvenc_procfs_remove(struct mpp_dev * mpp)832 static inline int rkvenc_procfs_remove(struct mpp_dev *mpp)
833 {
834 return 0;
835 }
836
rkvenc_procfs_init(struct mpp_dev * mpp)837 static inline int rkvenc_procfs_init(struct mpp_dev *mpp)
838 {
839 return 0;
840 }
841
rkvenc_dump_session(struct mpp_session * session,struct seq_file * seq)842 static inline int rkvenc_dump_session(struct mpp_session *session, struct seq_file *seq)
843 {
844 return 0;
845 }
846 #endif
847
848 #ifdef CONFIG_PM_DEVFREQ
rkvenc_devfreq_target(struct device * dev,unsigned long * freq,u32 flags)849 static int rkvenc_devfreq_target(struct device *dev,
850 unsigned long *freq, u32 flags)
851 {
852 struct dev_pm_opp *opp;
853 unsigned long target_volt, target_freq;
854 int ret = 0;
855
856 struct rkvenc_dev *enc = dev_get_drvdata(dev);
857 struct devfreq *devfreq = enc->devfreq;
858 struct devfreq_dev_status *stat = &devfreq->last_status;
859 unsigned long old_clk_rate = stat->current_frequency;
860
861 opp = devfreq_recommended_opp(dev, freq, flags);
862 if (IS_ERR(opp)) {
863 dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
864 return PTR_ERR(opp);
865 }
866 target_freq = dev_pm_opp_get_freq(opp);
867 target_volt = dev_pm_opp_get_voltage(opp);
868 dev_pm_opp_put(opp);
869
870 if (old_clk_rate == target_freq) {
871 enc->core_last_rate_hz = target_freq;
872 if (enc->volt == target_volt)
873 return ret;
874 ret = regulator_set_voltage(enc->vdd, target_volt, INT_MAX);
875 if (ret) {
876 dev_err(dev, "Cannot set voltage %lu uV\n",
877 target_volt);
878 return ret;
879 }
880 enc->volt = target_volt;
881 return 0;
882 }
883
884 if (old_clk_rate < target_freq) {
885 ret = regulator_set_voltage(enc->vdd, target_volt, INT_MAX);
886 if (ret) {
887 dev_err(dev, "set voltage %lu uV\n", target_volt);
888 return ret;
889 }
890 }
891
892 dev_dbg(dev, "%lu-->%lu\n", old_clk_rate, target_freq);
893 clk_set_rate(enc->core_clk_info.clk, target_freq);
894 stat->current_frequency = target_freq;
895 enc->core_last_rate_hz = target_freq;
896
897 if (old_clk_rate > target_freq) {
898 ret = regulator_set_voltage(enc->vdd, target_volt, INT_MAX);
899 if (ret) {
900 dev_err(dev, "set vol %lu uV\n", target_volt);
901 return ret;
902 }
903 }
904 enc->volt = target_volt;
905
906 return ret;
907 }
908
rkvenc_devfreq_get_dev_status(struct device * dev,struct devfreq_dev_status * stat)909 static int rkvenc_devfreq_get_dev_status(struct device *dev,
910 struct devfreq_dev_status *stat)
911 {
912 return 0;
913 }
914
rkvenc_devfreq_get_cur_freq(struct device * dev,unsigned long * freq)915 static int rkvenc_devfreq_get_cur_freq(struct device *dev,
916 unsigned long *freq)
917 {
918 struct rkvenc_dev *enc = dev_get_drvdata(dev);
919
920 *freq = enc->core_last_rate_hz;
921
922 return 0;
923 }
924
925 static struct devfreq_dev_profile rkvenc_devfreq_profile = {
926 .target = rkvenc_devfreq_target,
927 .get_dev_status = rkvenc_devfreq_get_dev_status,
928 .get_cur_freq = rkvenc_devfreq_get_cur_freq,
929 };
930
devfreq_venc_ondemand_func(struct devfreq * df,unsigned long * freq)931 static int devfreq_venc_ondemand_func(struct devfreq *df, unsigned long *freq)
932 {
933 struct rkvenc_dev *enc = df->data;
934
935 if (enc)
936 *freq = enc->core_rate_hz;
937 else
938 *freq = df->previous_freq;
939
940 return 0;
941 }
942
devfreq_venc_ondemand_handler(struct devfreq * devfreq,unsigned int event,void * data)943 static int devfreq_venc_ondemand_handler(struct devfreq *devfreq,
944 unsigned int event, void *data)
945 {
946 return 0;
947 }
948
949 static struct devfreq_governor devfreq_venc_ondemand = {
950 .name = "venc_ondemand",
951 .get_target_freq = devfreq_venc_ondemand_func,
952 .event_handler = devfreq_venc_ondemand_handler,
953 };
954
rkvenc_get_static_power(struct devfreq * devfreq,unsigned long voltage)955 static unsigned long rkvenc_get_static_power(struct devfreq *devfreq,
956 unsigned long voltage)
957 {
958 struct rkvenc_dev *enc = devfreq->data;
959
960 if (!enc->model_data)
961 return 0;
962 else
963 return rockchip_ipa_get_static_power(enc->model_data,
964 voltage);
965 }
966
967 static struct devfreq_cooling_power venc_cooling_power_data = {
968 .get_static_power = rkvenc_get_static_power,
969 };
970
971 static struct monitor_dev_profile enc_mdevp = {
972 .type = MONITOR_TYPE_DEV,
973 .low_temp_adjust = rockchip_monitor_dev_low_temp_adjust,
974 .high_temp_adjust = rockchip_monitor_dev_high_temp_adjust,
975 };
976
rv1126_get_soc_info(struct device * dev,struct device_node * np,int * bin,int * process)977 static int __maybe_unused rv1126_get_soc_info(struct device *dev,
978 struct device_node *np,
979 int *bin, int *process)
980 {
981 int ret = 0;
982 u8 value = 0;
983
984 if (of_property_match_string(np, "nvmem-cell-names", "performance") >= 0) {
985 ret = rockchip_nvmem_cell_read_u8(np, "performance", &value);
986 if (ret) {
987 dev_err(dev, "Failed to get soc performance value\n");
988 return ret;
989 }
990 if (value == 0x1)
991 *bin = 1;
992 else
993 *bin = 0;
994 }
995 if (*bin >= 0)
996 dev_info(dev, "bin=%d\n", *bin);
997
998 return ret;
999 }
1000
1001 static const struct rockchip_opp_data __maybe_unused rv1126_rkvenc_opp_data = {
1002 .get_soc_info = rv1126_get_soc_info,
1003 };
1004
1005 static const struct of_device_id rockchip_rkvenc_of_match[] = {
1006 #ifdef CONFIG_CPU_RV1126
1007 {
1008 .compatible = "rockchip,rv1109",
1009 .data = (void *)&rv1126_rkvenc_opp_data,
1010 },
1011 {
1012 .compatible = "rockchip,rv1126",
1013 .data = (void *)&rv1126_rkvenc_opp_data,
1014 },
1015 #endif
1016 {},
1017 };
1018
rkvenc_devfreq_init(struct mpp_dev * mpp)1019 static int rkvenc_devfreq_init(struct mpp_dev *mpp)
1020 {
1021 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1022 struct clk *clk_core = enc->core_clk_info.clk;
1023 struct devfreq_cooling_power *venc_dcp = &venc_cooling_power_data;
1024 struct rockchip_opp_info opp_info = {0};
1025 int ret = 0;
1026
1027 if (!clk_core)
1028 return 0;
1029
1030 enc->vdd = devm_regulator_get_optional(mpp->dev, "venc");
1031 if (IS_ERR_OR_NULL(enc->vdd)) {
1032 if (PTR_ERR(enc->vdd) == -EPROBE_DEFER) {
1033 dev_warn(mpp->dev, "venc regulator not ready, retry\n");
1034
1035 return -EPROBE_DEFER;
1036 }
1037 dev_info(mpp->dev, "no regulator, devfreq is disabled\n");
1038
1039 return 0;
1040 }
1041
1042 rockchip_get_opp_data(rockchip_rkvenc_of_match, &opp_info);
1043 ret = rockchip_init_opp_table(mpp->dev, &opp_info, "leakage", "venc");
1044 if (ret) {
1045 dev_err(mpp->dev, "failed to init_opp_table\n");
1046 return ret;
1047 }
1048
1049 ret = devfreq_add_governor(&devfreq_venc_ondemand);
1050 if (ret) {
1051 dev_err(mpp->dev, "failed to add venc_ondemand governor\n");
1052 goto governor_err;
1053 }
1054
1055 rkvenc_devfreq_profile.initial_freq = clk_get_rate(clk_core);
1056
1057 enc->devfreq = devm_devfreq_add_device(mpp->dev,
1058 &rkvenc_devfreq_profile,
1059 "venc_ondemand", (void *)enc);
1060 if (IS_ERR(enc->devfreq)) {
1061 ret = PTR_ERR(enc->devfreq);
1062 enc->devfreq = NULL;
1063 goto devfreq_err;
1064 }
1065 enc->devfreq->last_status.total_time = 1;
1066 enc->devfreq->last_status.busy_time = 1;
1067
1068 devfreq_register_opp_notifier(mpp->dev, enc->devfreq);
1069
1070 of_property_read_u32(mpp->dev->of_node, "dynamic-power-coefficient",
1071 (u32 *)&venc_dcp->dyn_power_coeff);
1072 enc->model_data = rockchip_ipa_power_model_init(mpp->dev,
1073 "venc_leakage");
1074 if (IS_ERR_OR_NULL(enc->model_data)) {
1075 enc->model_data = NULL;
1076 dev_err(mpp->dev, "failed to initialize power model\n");
1077 } else if (enc->model_data->dynamic_coefficient) {
1078 venc_dcp->dyn_power_coeff =
1079 enc->model_data->dynamic_coefficient;
1080 }
1081 if (!venc_dcp->dyn_power_coeff) {
1082 dev_err(mpp->dev, "failed to get dynamic-coefficient\n");
1083 goto out;
1084 }
1085
1086 enc->devfreq_cooling =
1087 of_devfreq_cooling_register_power(mpp->dev->of_node,
1088 enc->devfreq, venc_dcp);
1089 if (IS_ERR_OR_NULL(enc->devfreq_cooling))
1090 dev_err(mpp->dev, "failed to register cooling device\n");
1091
1092 enc_mdevp.data = enc->devfreq;
1093 enc->mdev_info = rockchip_system_monitor_register(mpp->dev, &enc_mdevp);
1094 if (IS_ERR(enc->mdev_info)) {
1095 dev_dbg(mpp->dev, "without system monitor\n");
1096 enc->mdev_info = NULL;
1097 }
1098
1099 out:
1100
1101 return 0;
1102
1103 devfreq_err:
1104 devfreq_remove_governor(&devfreq_venc_ondemand);
1105 governor_err:
1106 dev_pm_opp_of_remove_table(mpp->dev);
1107
1108 return ret;
1109 }
1110
rkvenc_devfreq_remove(struct mpp_dev * mpp)1111 static int rkvenc_devfreq_remove(struct mpp_dev *mpp)
1112 {
1113 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1114
1115 if (enc->mdev_info)
1116 rockchip_system_monitor_unregister(enc->mdev_info);
1117 if (enc->devfreq) {
1118 devfreq_unregister_opp_notifier(mpp->dev, enc->devfreq);
1119 dev_pm_opp_of_remove_table(mpp->dev);
1120 devfreq_remove_governor(&devfreq_venc_ondemand);
1121 }
1122
1123 return 0;
1124 }
1125 #endif
1126
rkvenc_iommu_handle_work(struct work_struct * work_s)1127 static void rkvenc_iommu_handle_work(struct work_struct *work_s)
1128 {
1129 int ret = 0;
1130 struct rkvenc_dev *enc = container_of(work_s, struct rkvenc_dev, iommu_work);
1131 struct mpp_dev *mpp = &enc->mpp;
1132 unsigned long page_iova = 0;
1133
1134 mpp_debug_enter();
1135
1136 /* avoid another page fault occur after page fault */
1137 mpp_iommu_down_write(mpp->iommu_info);
1138
1139 if (enc->aux_iova != -1) {
1140 iommu_unmap(mpp->iommu_info->domain, enc->aux_iova, IOMMU_PAGE_SIZE);
1141 enc->aux_iova = -1;
1142 }
1143
1144 page_iova = round_down(enc->fault_iova, SZ_4K);
1145 ret = iommu_map(mpp->iommu_info->domain, page_iova,
1146 page_to_phys(enc->aux_page), IOMMU_PAGE_SIZE,
1147 IOMMU_READ | IOMMU_WRITE);
1148 if (ret)
1149 mpp_err("iommu_map iova %lx error.\n", page_iova);
1150 else
1151 enc->aux_iova = page_iova;
1152
1153 rockchip_iommu_unmask_irq(mpp->dev);
1154 mpp_iommu_up_write(mpp->iommu_info);
1155
1156 mpp_debug_leave();
1157 }
1158
rkvenc_iommu_fault_handle(struct iommu_domain * iommu,struct device * iommu_dev,unsigned long iova,int status,void * arg)1159 static int rkvenc_iommu_fault_handle(struct iommu_domain *iommu,
1160 struct device *iommu_dev,
1161 unsigned long iova, int status, void *arg)
1162 {
1163 struct mpp_dev *mpp = (struct mpp_dev *)arg;
1164 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1165
1166 mpp_debug_enter();
1167 mpp_debug(DEBUG_IOMMU, "IOMMU_GET_BUS_ID(status)=%d\n", IOMMU_GET_BUS_ID(status));
1168 if (IOMMU_GET_BUS_ID(status)) {
1169 enc->fault_iova = iova;
1170 rockchip_iommu_mask_irq(mpp->dev);
1171 queue_work(enc->iommu_wq, &enc->iommu_work);
1172 }
1173 mpp_debug_leave();
1174
1175 return 0;
1176 }
1177
rkvenc_init(struct mpp_dev * mpp)1178 static int rkvenc_init(struct mpp_dev *mpp)
1179 {
1180 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1181 int ret = 0;
1182
1183 mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_RKVENC];
1184
1185 /* Get clock info from dtsi */
1186 ret = mpp_get_clk_info(mpp, &enc->aclk_info, "aclk_vcodec");
1187 if (ret)
1188 mpp_err("failed on clk_get aclk_vcodec\n");
1189 ret = mpp_get_clk_info(mpp, &enc->hclk_info, "hclk_vcodec");
1190 if (ret)
1191 mpp_err("failed on clk_get hclk_vcodec\n");
1192 ret = mpp_get_clk_info(mpp, &enc->core_clk_info, "clk_core");
1193 if (ret)
1194 mpp_err("failed on clk_get clk_core\n");
1195 /* Get normal max workload from dtsi */
1196 of_property_read_u32(mpp->dev->of_node,
1197 "rockchip,default-max-load",
1198 &enc->default_max_load);
1199 /* Set default rates */
1200 mpp_set_clk_info_rate_hz(&enc->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
1201 mpp_set_clk_info_rate_hz(&enc->core_clk_info, CLK_MODE_DEFAULT, 600 * MHZ);
1202
1203 /* Get reset control from dtsi */
1204 enc->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
1205 if (!enc->rst_a)
1206 mpp_err("No aclk reset resource define\n");
1207 enc->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
1208 if (!enc->rst_h)
1209 mpp_err("No hclk reset resource define\n");
1210 enc->rst_core = mpp_reset_control_get(mpp, RST_TYPE_CORE, "video_core");
1211 if (!enc->rst_core)
1212 mpp_err("No core reset resource define\n");
1213
1214 #ifdef CONFIG_PM_DEVFREQ
1215 ret = rkvenc_devfreq_init(mpp);
1216 if (ret)
1217 mpp_err("failed to add venc devfreq\n");
1218 #endif
1219
1220 /* for mmu pagefault */
1221 enc->aux_page = alloc_page(GFP_KERNEL);
1222 if (!enc->aux_page) {
1223 dev_err(mpp->dev, "allocate a page for auxiliary usage\n");
1224 return -ENOMEM;
1225 }
1226 enc->aux_iova = -1;
1227
1228 enc->iommu_wq = create_singlethread_workqueue("iommu_wq");
1229 if (!enc->iommu_wq) {
1230 mpp_err("failed to create workqueue\n");
1231 return -ENOMEM;
1232 }
1233 INIT_WORK(&enc->iommu_work, rkvenc_iommu_handle_work);
1234
1235 mpp->iommu_info->hdl = rkvenc_iommu_fault_handle;
1236
1237 return ret;
1238 }
1239
rkvenc_exit(struct mpp_dev * mpp)1240 static int rkvenc_exit(struct mpp_dev *mpp)
1241 {
1242 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1243
1244 #ifdef CONFIG_PM_DEVFREQ
1245 rkvenc_devfreq_remove(mpp);
1246 #endif
1247
1248 if (enc->aux_page)
1249 __free_page(enc->aux_page);
1250
1251 if (enc->aux_iova != -1) {
1252 iommu_unmap(mpp->iommu_info->domain, enc->aux_iova, IOMMU_PAGE_SIZE);
1253 enc->aux_iova = -1;
1254 }
1255
1256 if (enc->iommu_wq) {
1257 destroy_workqueue(enc->iommu_wq);
1258 enc->iommu_wq = NULL;
1259 }
1260
1261 return 0;
1262 }
1263
rkvenc_reset(struct mpp_dev * mpp)1264 static int rkvenc_reset(struct mpp_dev *mpp)
1265 {
1266 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1267
1268 mpp_debug_enter();
1269
1270 #ifdef CONFIG_PM_DEVFREQ
1271 if (enc->devfreq)
1272 mutex_lock(&enc->devfreq->lock);
1273 #endif
1274 mpp_clk_set_rate(&enc->aclk_info, CLK_MODE_REDUCE);
1275 mpp_clk_set_rate(&enc->core_clk_info, CLK_MODE_REDUCE);
1276 /* safe reset */
1277 mpp_write(mpp, RKVENC_INT_MSK_BASE, 0x1FF);
1278 mpp_write(mpp, RKVENC_CLR_BASE, RKVENC_SAFE_CLR_BIT);
1279 udelay(5);
1280 mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", mpp_read(mpp, RKVENC_INT_STATUS_BASE));
1281 mpp_write(mpp, RKVENC_INT_CLR_BASE, 0xffffffff);
1282 mpp_write(mpp, RKVENC_INT_STATUS_BASE, 0);
1283 /* cru reset */
1284 if (enc->rst_a && enc->rst_h && enc->rst_core) {
1285 mpp_pmu_idle_request(mpp, true);
1286 mpp_safe_reset(enc->rst_a);
1287 mpp_safe_reset(enc->rst_h);
1288 mpp_safe_reset(enc->rst_core);
1289 udelay(5);
1290 mpp_safe_unreset(enc->rst_a);
1291 mpp_safe_unreset(enc->rst_h);
1292 mpp_safe_unreset(enc->rst_core);
1293 mpp_pmu_idle_request(mpp, false);
1294 }
1295 #ifdef CONFIG_PM_DEVFREQ
1296 if (enc->devfreq)
1297 mutex_unlock(&enc->devfreq->lock);
1298 #endif
1299
1300 mpp_debug_leave();
1301
1302 return 0;
1303 }
1304
rkvenc_clk_on(struct mpp_dev * mpp)1305 static int rkvenc_clk_on(struct mpp_dev *mpp)
1306 {
1307 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1308
1309 mpp_clk_safe_enable(enc->aclk_info.clk);
1310 mpp_clk_safe_enable(enc->hclk_info.clk);
1311 mpp_clk_safe_enable(enc->core_clk_info.clk);
1312
1313 return 0;
1314 }
1315
rkvenc_clk_off(struct mpp_dev * mpp)1316 static int rkvenc_clk_off(struct mpp_dev *mpp)
1317 {
1318 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1319
1320 clk_disable_unprepare(enc->aclk_info.clk);
1321 clk_disable_unprepare(enc->hclk_info.clk);
1322 clk_disable_unprepare(enc->core_clk_info.clk);
1323
1324 return 0;
1325 }
1326
rkvenc_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1327 static int rkvenc_get_freq(struct mpp_dev *mpp,
1328 struct mpp_task *mpp_task)
1329 {
1330 u32 task_cnt;
1331 u32 workload;
1332 struct mpp_task *loop = NULL, *n;
1333 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1334 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
1335
1336 /* if not set max load, consider not have advanced mode */
1337 if (!enc->default_max_load)
1338 return 0;
1339
1340 task_cnt = 1;
1341 workload = task->pixels;
1342 /* calc workload in pending list */
1343 mutex_lock(&mpp->queue->pending_lock);
1344 list_for_each_entry_safe(loop, n,
1345 &mpp->queue->pending_list,
1346 queue_link) {
1347 struct rkvenc_task *loop_task = to_rkvenc_task(loop);
1348
1349 task_cnt++;
1350 workload += loop_task->pixels;
1351 }
1352 mutex_unlock(&mpp->queue->pending_lock);
1353
1354 if (workload > enc->default_max_load)
1355 task->clk_mode = CLK_MODE_ADVANCED;
1356
1357 mpp_debug(DEBUG_TASK_INFO, "pending task %d, workload %d, clk_mode=%d\n",
1358 task_cnt, workload, task->clk_mode);
1359
1360 return 0;
1361 }
1362
rkvenc_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)1363 static int rkvenc_set_freq(struct mpp_dev *mpp,
1364 struct mpp_task *mpp_task)
1365 {
1366 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
1367 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
1368
1369 mpp_clk_set_rate(&enc->aclk_info, task->clk_mode);
1370
1371 #ifdef CONFIG_PM_DEVFREQ
1372 if (enc->devfreq) {
1373 unsigned long core_rate_hz;
1374
1375 mutex_lock(&enc->devfreq->lock);
1376 core_rate_hz = mpp_get_clk_info_rate_hz(&enc->core_clk_info, task->clk_mode);
1377 if (enc->core_rate_hz != core_rate_hz) {
1378 enc->core_rate_hz = core_rate_hz;
1379 update_devfreq(enc->devfreq);
1380 } else {
1381 /*
1382 * Restore frequency when frequency is changed by
1383 * rkvenc_reduce_freq()
1384 */
1385 clk_set_rate(enc->core_clk_info.clk, enc->core_last_rate_hz);
1386 }
1387 mutex_unlock(&enc->devfreq->lock);
1388 return 0;
1389 }
1390 #endif
1391 mpp_clk_set_rate(&enc->core_clk_info, task->clk_mode);
1392
1393 return 0;
1394 }
1395
1396 static struct mpp_hw_ops rkvenc_hw_ops = {
1397 .init = rkvenc_init,
1398 .exit = rkvenc_exit,
1399 .clk_on = rkvenc_clk_on,
1400 .clk_off = rkvenc_clk_off,
1401 .get_freq = rkvenc_get_freq,
1402 .set_freq = rkvenc_set_freq,
1403 .reset = rkvenc_reset,
1404 };
1405
1406 static struct mpp_dev_ops rkvenc_dev_ops = {
1407 .alloc_task = rkvenc_alloc_task,
1408 .run = rkvenc_run,
1409 .irq = rkvenc_irq,
1410 .isr = rkvenc_isr,
1411 .finish = rkvenc_finish,
1412 .result = rkvenc_result,
1413 .free_task = rkvenc_free_task,
1414 .ioctl = rkvenc_control,
1415 .init_session = rkvenc_init_session,
1416 .free_session = rkvenc_free_session,
1417 .dump_session = rkvenc_dump_session,
1418 };
1419
1420 static const struct mpp_dev_var rkvenc_v1_data = {
1421 .device_type = MPP_DEVICE_RKVENC,
1422 .hw_info = &rkvenc_hw_info,
1423 .trans_info = trans_rk_rkvenc,
1424 .hw_ops = &rkvenc_hw_ops,
1425 .dev_ops = &rkvenc_dev_ops,
1426 };
1427
1428 static const struct of_device_id mpp_rkvenc_dt_match[] = {
1429 {
1430 .compatible = "rockchip,rkv-encoder-v1",
1431 .data = &rkvenc_v1_data,
1432 },
1433 {},
1434 };
1435
rkvenc_probe(struct platform_device * pdev)1436 static int rkvenc_probe(struct platform_device *pdev)
1437 {
1438 int ret = 0;
1439 struct device *dev = &pdev->dev;
1440 struct rkvenc_dev *enc = NULL;
1441 struct mpp_dev *mpp = NULL;
1442 const struct of_device_id *match = NULL;
1443
1444 dev_info(dev, "probing start\n");
1445
1446 enc = devm_kzalloc(dev, sizeof(*enc), GFP_KERNEL);
1447 if (!enc)
1448 return -ENOMEM;
1449 mpp = &enc->mpp;
1450 platform_set_drvdata(pdev, mpp);
1451
1452 if (pdev->dev.of_node) {
1453 match = of_match_node(mpp_rkvenc_dt_match, pdev->dev.of_node);
1454 if (match)
1455 mpp->var = (struct mpp_dev_var *)match->data;
1456 }
1457
1458 ret = mpp_dev_probe(mpp, pdev);
1459 if (ret)
1460 return ret;
1461
1462 ret = devm_request_threaded_irq(dev, mpp->irq,
1463 mpp_dev_irq,
1464 mpp_dev_isr_sched,
1465 IRQF_SHARED,
1466 dev_name(dev), mpp);
1467 if (ret) {
1468 dev_err(dev, "register interrupter runtime failed\n");
1469 goto failed_get_irq;
1470 }
1471
1472 mpp->session_max_buffers = RKVENC_SESSION_MAX_BUFFERS;
1473 rkvenc_procfs_init(mpp);
1474 /* register current device to mpp service */
1475 mpp_dev_register_srv(mpp, mpp->srv);
1476 dev_info(dev, "probing finish\n");
1477
1478 return 0;
1479
1480 failed_get_irq:
1481 mpp_dev_remove(mpp);
1482
1483 return ret;
1484 }
1485
rkvenc_remove(struct platform_device * pdev)1486 static int rkvenc_remove(struct platform_device *pdev)
1487 {
1488 struct device *dev = &pdev->dev;
1489 struct mpp_dev *mpp = dev_get_drvdata(dev);
1490
1491 dev_info(dev, "remove device\n");
1492 mpp_dev_remove(mpp);
1493 rkvenc_procfs_remove(mpp);
1494
1495 return 0;
1496 }
1497
1498 struct platform_driver rockchip_rkvenc_driver = {
1499 .probe = rkvenc_probe,
1500 .remove = rkvenc_remove,
1501 .shutdown = mpp_dev_shutdown,
1502 .driver = {
1503 .name = RKVENC_DRIVER_NAME,
1504 .of_match_table = of_match_ptr(mpp_rkvenc_dt_match),
1505 },
1506 };
1507