1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3 * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4 *
5 * author:
6 * Alpha Lin, alpha.lin@rock-chips.com
7 * Randy Li, randy.li@rock-chips.com
8 * Ding Wei, leo.ding@rock-chips.com
9 *
10 */
11 #include <asm/cacheflush.h>
12 #include <linux/delay.h>
13 #include <linux/iopoll.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/of_platform.h>
18 #include <linux/slab.h>
19 #include <linux/seq_file.h>
20 #include <linux/uaccess.h>
21 #include <linux/regmap.h>
22 #include <linux/proc_fs.h>
23 #include <linux/nospec.h>
24 #include <soc/rockchip/pm_domains.h>
25
26 #include "mpp_debug.h"
27 #include "mpp_common.h"
28 #include "mpp_iommu.h"
29 #include "hack/mpp_hack_px30.h"
30
31 #define VEPU2_DRIVER_NAME "mpp_vepu2"
32
33 #define VEPU2_SESSION_MAX_BUFFERS 20
34 /* The maximum registers number of all the version */
35 #define VEPU2_REG_NUM 184
36 #define VEPU2_REG_HW_ID_INDEX -1 /* INVALID */
37 #define VEPU2_REG_START_INDEX 0
38 #define VEPU2_REG_END_INDEX 183
39 #define VEPU2_REG_OUT_INDEX (77)
40 #define VEPU2_REG_STRM_INDEX (53)
41
42 #define VEPU2_REG_ENC_EN 0x19c
43 #define VEPU2_REG_ENC_EN_INDEX (103)
44 #define VEPU2_ENC_START BIT(0)
45
46 #define VEPU2_GET_FORMAT(x) (((x) >> 4) & 0x3)
47 #define VEPU2_FORMAT_MASK (0x30)
48 #define VEPU2_GET_WIDTH(x) (((x >> 8) & 0x1ff) << 4)
49 #define VEPU2_GET_HEIGHT(x) (((x >> 20) & 0x1ff) << 4)
50
51 #define VEPU2_FMT_RESERVED (0)
52 #define VEPU2_FMT_VP8E (1)
53 #define VEPU2_FMT_JPEGE (2)
54 #define VEPU2_FMT_H264E (3)
55
56 #define VEPU2_REG_MB_CTRL 0x1a0
57 #define VEPU2_REG_MB_CTRL_INDEX (104)
58
59 #define VEPU2_REG_INT 0x1b4
60 #define VEPU2_REG_INT_INDEX (109)
61 #define VEPU2_MV_SAD_WR_EN BIT(24)
62 #define VEPU2_ROCON_WRITE_DIS BIT(20)
63 #define VEPU2_INT_SLICE_EN BIT(16)
64 #define VEPU2_CLOCK_GATE_EN BIT(12)
65 #define VEPU2_INT_TIMEOUT_EN BIT(10)
66 #define VEPU2_INT_CLEAR BIT(9)
67 #define VEPU2_IRQ_DIS BIT(8)
68 #define VEPU2_INT_TIMEOUT BIT(6)
69 #define VEPU2_INT_BUF_FULL BIT(5)
70 #define VEPU2_INT_BUS_ERROR BIT(4)
71 #define VEPU2_INT_SLICE BIT(2)
72 #define VEPU2_INT_RDY BIT(1)
73 #define VEPU2_INT_RAW BIT(0)
74
75 #define RKVPUE2_REG_DMV_4P_1P(i) (0x1e0 + ((i) << 4))
76 #define RKVPUE2_REG_DMV_4P_1P_INDEX(i) (120 + (i))
77
78 #define VEPU2_REG_CLR_CACHE_BASE 0xc10
79
80 #define to_vepu_task(task) \
81 container_of(task, struct vepu_task, mpp_task)
82 #define to_vepu_dev(dev) \
83 container_of(dev, struct vepu_dev, mpp)
84
85 struct vepu_task {
86 struct mpp_task mpp_task;
87
88 enum MPP_CLOCK_MODE clk_mode;
89 u32 reg[VEPU2_REG_NUM];
90
91 struct reg_offset_info off_inf;
92 u32 irq_status;
93 /* req for current task */
94 u32 w_req_cnt;
95 struct mpp_request w_reqs[MPP_MAX_MSG_NUM];
96 u32 r_req_cnt;
97 struct mpp_request r_reqs[MPP_MAX_MSG_NUM];
98 /* image info */
99 u32 width;
100 u32 height;
101 u32 pixels;
102 struct mpp_dma_buffer *bs_buf;
103 u32 offset_bs;
104 };
105
106 struct vepu_session_priv {
107 struct rw_semaphore rw_sem;
108 /* codec info from user */
109 struct {
110 /* show mode */
111 u32 flag;
112 /* item data */
113 u64 val;
114 } codec_info[ENC_INFO_BUTT];
115 };
116
117 struct vepu_dev {
118 struct mpp_dev mpp;
119
120 struct mpp_clk_info aclk_info;
121 struct mpp_clk_info hclk_info;
122 u32 default_max_load;
123 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
124 struct proc_dir_entry *procfs;
125 #endif
126 struct reset_control *rst_a;
127 struct reset_control *rst_h;
128 /* for ccu(central control unit) */
129 struct vepu_ccu *ccu;
130 bool disable_work;
131 };
132
133 struct vepu_ccu {
134 u32 core_num;
135 /* lock for core attach */
136 spinlock_t lock;
137 struct mpp_dev *main_core;
138 struct mpp_dev *cores[MPP_MAX_CORE_NUM];
139 unsigned long core_idle;
140 };
141
142 static struct mpp_hw_info vepu_v2_hw_info = {
143 .reg_num = VEPU2_REG_NUM,
144 .reg_id = VEPU2_REG_HW_ID_INDEX,
145 .reg_start = VEPU2_REG_START_INDEX,
146 .reg_end = VEPU2_REG_END_INDEX,
147 .reg_en = VEPU2_REG_ENC_EN_INDEX,
148 };
149
150 /*
151 * file handle translate information
152 */
153 static const u16 trans_tbl_default[] = {
154 48, 49, 50, 56, 57, 63, 64, 77, 78, 81
155 };
156
157 static const u16 trans_tbl_vp8e[] = {
158 27, 44, 45, 48, 49, 50, 56, 57, 63, 64,
159 76, 77, 78, 80, 81, 106, 108,
160 };
161
162 static struct mpp_trans_info trans_rk_vepu2[] = {
163 [VEPU2_FMT_RESERVED] = {
164 .count = 0,
165 .table = NULL,
166 },
167 [VEPU2_FMT_VP8E] = {
168 .count = ARRAY_SIZE(trans_tbl_vp8e),
169 .table = trans_tbl_vp8e,
170 },
171 [VEPU2_FMT_JPEGE] = {
172 .count = ARRAY_SIZE(trans_tbl_default),
173 .table = trans_tbl_default,
174 },
175 [VEPU2_FMT_H264E] = {
176 .count = ARRAY_SIZE(trans_tbl_default),
177 .table = trans_tbl_default,
178 },
179 };
180
vepu_process_reg_fd(struct mpp_session * session,struct vepu_task * task,struct mpp_task_msgs * msgs)181 static int vepu_process_reg_fd(struct mpp_session *session,
182 struct vepu_task *task,
183 struct mpp_task_msgs *msgs)
184 {
185 int ret;
186 int fd_bs;
187 int fmt = VEPU2_GET_FORMAT(task->reg[VEPU2_REG_ENC_EN_INDEX]);
188
189 if (session->msg_flags & MPP_FLAGS_REG_NO_OFFSET)
190 fd_bs = task->reg[VEPU2_REG_OUT_INDEX];
191 else
192 fd_bs = task->reg[VEPU2_REG_OUT_INDEX] & 0x3ff;
193
194 ret = mpp_translate_reg_address(session, &task->mpp_task,
195 fmt, task->reg, &task->off_inf);
196 if (ret)
197 return ret;
198
199 mpp_translate_reg_offset_info(&task->mpp_task,
200 &task->off_inf, task->reg);
201
202 if (fmt == VEPU2_FMT_JPEGE) {
203 struct mpp_dma_buffer *bs_buf = mpp_dma_find_buffer_fd(session->dma, fd_bs);
204
205 task->offset_bs = mpp_query_reg_offset_info(&task->off_inf, VEPU2_REG_OUT_INDEX);
206 if (bs_buf && task->offset_bs > 0)
207 mpp_dma_buf_sync(bs_buf, 0, task->offset_bs, DMA_TO_DEVICE, false);
208 task->bs_buf = bs_buf;
209 }
210
211 return 0;
212 }
213
vepu_extract_task_msg(struct vepu_task * task,struct mpp_task_msgs * msgs)214 static int vepu_extract_task_msg(struct vepu_task *task,
215 struct mpp_task_msgs *msgs)
216 {
217 u32 i;
218 int ret;
219 struct mpp_request *req;
220 struct mpp_hw_info *hw_info = task->mpp_task.hw_info;
221
222 for (i = 0; i < msgs->req_cnt; i++) {
223 u32 off_s, off_e;
224
225 req = &msgs->reqs[i];
226 if (!req->size)
227 continue;
228
229 switch (req->cmd) {
230 case MPP_CMD_SET_REG_WRITE: {
231 off_s = hw_info->reg_start * sizeof(u32);
232 off_e = hw_info->reg_end * sizeof(u32);
233 ret = mpp_check_req(req, 0, sizeof(task->reg),
234 off_s, off_e);
235 if (ret)
236 continue;
237 if (copy_from_user((u8 *)task->reg + req->offset,
238 req->data, req->size)) {
239 mpp_err("copy_from_user reg failed\n");
240 return -EIO;
241 }
242 memcpy(&task->w_reqs[task->w_req_cnt++],
243 req, sizeof(*req));
244 } break;
245 case MPP_CMD_SET_REG_READ: {
246 off_s = hw_info->reg_start * sizeof(u32);
247 off_e = hw_info->reg_end * sizeof(u32);
248 ret = mpp_check_req(req, 0, sizeof(task->reg),
249 off_s, off_e);
250 if (ret)
251 continue;
252 memcpy(&task->r_reqs[task->r_req_cnt++],
253 req, sizeof(*req));
254 } break;
255 case MPP_CMD_SET_REG_ADDR_OFFSET: {
256 mpp_extract_reg_offset_info(&task->off_inf, req);
257 } break;
258 default:
259 break;
260 }
261 }
262 mpp_debug(DEBUG_TASK_INFO, "w_req_cnt %d, r_req_cnt %d\n",
263 task->w_req_cnt, task->r_req_cnt);
264
265 return 0;
266 }
267
vepu_alloc_task(struct mpp_session * session,struct mpp_task_msgs * msgs)268 static void *vepu_alloc_task(struct mpp_session *session,
269 struct mpp_task_msgs *msgs)
270 {
271 int ret;
272 struct mpp_task *mpp_task = NULL;
273 struct vepu_task *task = NULL;
274 struct mpp_dev *mpp = session->mpp;
275
276 mpp_debug_enter();
277
278 task = kzalloc(sizeof(*task), GFP_KERNEL);
279 if (!task)
280 return NULL;
281
282 mpp_task = &task->mpp_task;
283 mpp_task_init(session, mpp_task);
284 mpp_task->hw_info = mpp->var->hw_info;
285 mpp_task->reg = task->reg;
286 /* extract reqs for current task */
287 ret = vepu_extract_task_msg(task, msgs);
288 if (ret)
289 goto fail;
290 /* process fd in register */
291 if (!(msgs->flags & MPP_FLAGS_REG_FD_NO_TRANS)) {
292 ret = vepu_process_reg_fd(session, task, msgs);
293 if (ret)
294 goto fail;
295 }
296 task->clk_mode = CLK_MODE_NORMAL;
297 /* get resolution info */
298 task->width = VEPU2_GET_WIDTH(task->reg[VEPU2_REG_ENC_EN_INDEX]);
299 task->height = VEPU2_GET_HEIGHT(task->reg[VEPU2_REG_ENC_EN_INDEX]);
300 task->pixels = task->width * task->height;
301 mpp_debug(DEBUG_TASK_INFO, "width=%d, height=%d\n", task->width, task->height);
302
303 mpp_debug_leave();
304
305 return mpp_task;
306
307 fail:
308 mpp_task_dump_mem_region(mpp, mpp_task);
309 mpp_task_dump_reg(mpp, mpp_task);
310 mpp_task_finalize(session, mpp_task);
311 kfree(task);
312 return NULL;
313 }
314
vepu_prepare(struct mpp_dev * mpp,struct mpp_task * mpp_task)315 static void *vepu_prepare(struct mpp_dev *mpp, struct mpp_task *mpp_task)
316 {
317 struct vepu_dev *enc = to_vepu_dev(mpp);
318 struct vepu_ccu *ccu = enc->ccu;
319 unsigned long core_idle;
320 unsigned long flags;
321 s32 core_id;
322 u32 i;
323
324 spin_lock_irqsave(&ccu->lock, flags);
325
326 core_idle = ccu->core_idle;
327
328 for (i = 0; i < ccu->core_num; i++) {
329 struct mpp_dev *mpp = ccu->cores[i];
330
331 if (mpp && mpp->disable)
332 clear_bit(mpp->core_id, &core_idle);
333 }
334
335 core_id = find_first_bit(&core_idle, ccu->core_num);
336 if (core_id >= ARRAY_SIZE(ccu->cores)) {
337 mpp_task = NULL;
338 mpp_dbg_core("core %d all busy %lx\n", core_id, ccu->core_idle);
339 goto done;
340 }
341
342 core_id = array_index_nospec(core_id, MPP_MAX_CORE_NUM);
343 clear_bit(core_id, &ccu->core_idle);
344 mpp_task->mpp = ccu->cores[core_id];
345 mpp_task->core_id = core_id;
346
347 mpp_dbg_core("core cnt %d core %d set idle %lx -> %lx\n",
348 ccu->core_num, core_id, core_idle, ccu->core_idle);
349
350 done:
351 spin_unlock_irqrestore(&ccu->lock, flags);
352
353 return mpp_task;
354 }
355
vepu_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)356 static int vepu_run(struct mpp_dev *mpp,
357 struct mpp_task *mpp_task)
358 {
359 u32 i;
360 u32 reg_en;
361 struct vepu_task *task = to_vepu_task(mpp_task);
362 u32 timing_en = mpp->srv->timing_en;
363
364 mpp_debug_enter();
365
366 /* clear cache */
367 mpp_write_relaxed(mpp, VEPU2_REG_CLR_CACHE_BASE, 1);
368
369 reg_en = mpp_task->hw_info->reg_en;
370 /* First, flush correct encoder format */
371 mpp_write_relaxed(mpp, VEPU2_REG_ENC_EN,
372 task->reg[reg_en] & VEPU2_FORMAT_MASK);
373 /* Second, flush others register */
374 for (i = 0; i < task->w_req_cnt; i++) {
375 struct mpp_request *req = &task->w_reqs[i];
376 int s = req->offset / sizeof(u32);
377 int e = s + req->size / sizeof(u32);
378
379 mpp_write_req(mpp, task->reg, s, e, reg_en);
380 }
381
382 /* flush tlb before starting hardware */
383 mpp_iommu_flush_tlb(mpp->iommu_info);
384
385 /* init current task */
386 mpp->cur_task = mpp_task;
387
388 mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
389
390 /* Last, flush the registers */
391 wmb();
392 mpp_write(mpp, VEPU2_REG_ENC_EN,
393 task->reg[reg_en] | VEPU2_ENC_START);
394
395 mpp_task_run_end(mpp_task, timing_en);
396
397 mpp_debug_leave();
398
399 return 0;
400 }
401
vepu_px30_run(struct mpp_dev * mpp,struct mpp_task * mpp_task)402 static int vepu_px30_run(struct mpp_dev *mpp,
403 struct mpp_task *mpp_task)
404 {
405 mpp_iommu_flush_tlb(mpp->iommu_info);
406 return vepu_run(mpp, mpp_task);
407 }
408
vepu_irq(struct mpp_dev * mpp)409 static int vepu_irq(struct mpp_dev *mpp)
410 {
411 mpp->irq_status = mpp_read(mpp, VEPU2_REG_INT);
412 if (!(mpp->irq_status & VEPU2_INT_RAW))
413 return IRQ_NONE;
414
415 mpp_write(mpp, VEPU2_REG_INT, 0);
416
417 return IRQ_WAKE_THREAD;
418 }
419
vepu_isr(struct mpp_dev * mpp)420 static int vepu_isr(struct mpp_dev *mpp)
421 {
422 u32 err_mask;
423 struct vepu_task *task = NULL;
424 struct mpp_task *mpp_task = mpp->cur_task;
425 unsigned long core_idle;
426 struct vepu_dev *enc = to_vepu_dev(mpp);
427 struct vepu_ccu *ccu = enc->ccu;
428
429 /* FIXME use a spin lock here */
430 if (!mpp_task) {
431 dev_err(mpp->dev, "no current task\n");
432 return IRQ_HANDLED;
433 }
434 mpp_time_diff(mpp_task);
435 mpp->cur_task = NULL;
436 task = to_vepu_task(mpp_task);
437 task->irq_status = mpp->irq_status;
438 mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n",
439 task->irq_status);
440
441 err_mask = VEPU2_INT_TIMEOUT
442 | VEPU2_INT_BUF_FULL
443 | VEPU2_INT_BUS_ERROR;
444
445 if (err_mask & task->irq_status)
446 atomic_inc(&mpp->reset_request);
447
448 mpp_task_finish(mpp_task->session, mpp_task);
449 /* the whole vepu has no ccu that manage multi core */
450 if (ccu) {
451 core_idle = ccu->core_idle;
452 set_bit(mpp->core_id, &ccu->core_idle);
453
454 mpp_dbg_core("core %d isr idle %lx -> %lx\n", mpp->core_id, core_idle,
455 ccu->core_idle);
456 }
457
458 mpp_debug_leave();
459
460 return IRQ_HANDLED;
461 }
462
vepu_finish(struct mpp_dev * mpp,struct mpp_task * mpp_task)463 static int vepu_finish(struct mpp_dev *mpp,
464 struct mpp_task *mpp_task)
465 {
466 u32 i;
467 u32 s, e;
468 struct mpp_request *req;
469 struct vepu_task *task = to_vepu_task(mpp_task);
470
471 mpp_debug_enter();
472
473 /* read register after running */
474 for (i = 0; i < task->r_req_cnt; i++) {
475 req = &task->r_reqs[i];
476 s = req->offset / sizeof(u32);
477 e = s + req->size / sizeof(u32);
478 mpp_read_req(mpp, task->reg, s, e);
479 }
480 /* revert hack for irq status */
481 task->reg[VEPU2_REG_INT_INDEX] = task->irq_status;
482
483 if (task->bs_buf)
484 mpp_dma_buf_sync(task->bs_buf, 0,
485 task->reg[VEPU2_REG_STRM_INDEX] / 8 +
486 task->offset_bs,
487 DMA_FROM_DEVICE, true);
488 mpp_debug_leave();
489
490 return 0;
491 }
492
vepu_result(struct mpp_dev * mpp,struct mpp_task * mpp_task,struct mpp_task_msgs * msgs)493 static int vepu_result(struct mpp_dev *mpp,
494 struct mpp_task *mpp_task,
495 struct mpp_task_msgs *msgs)
496 {
497 u32 i;
498 struct mpp_request *req;
499 struct vepu_task *task = to_vepu_task(mpp_task);
500
501 /* FIXME may overflow the kernel */
502 for (i = 0; i < task->r_req_cnt; i++) {
503 req = &task->r_reqs[i];
504
505 if (copy_to_user(req->data,
506 (u8 *)task->reg + req->offset,
507 req->size)) {
508 mpp_err("copy_to_user reg fail\n");
509 return -EIO;
510 }
511 }
512
513 return 0;
514 }
515
vepu_free_task(struct mpp_session * session,struct mpp_task * mpp_task)516 static int vepu_free_task(struct mpp_session *session,
517 struct mpp_task *mpp_task)
518 {
519 struct vepu_task *task = to_vepu_task(mpp_task);
520
521 mpp_task_finalize(session, mpp_task);
522 kfree(task);
523
524 return 0;
525 }
526
vepu_control(struct mpp_session * session,struct mpp_request * req)527 static int vepu_control(struct mpp_session *session, struct mpp_request *req)
528 {
529 switch (req->cmd) {
530 case MPP_CMD_SEND_CODEC_INFO: {
531 int i;
532 int cnt;
533 struct codec_info_elem elem;
534 struct vepu_session_priv *priv;
535
536 if (!session || !session->priv) {
537 mpp_err("session info null\n");
538 return -EINVAL;
539 }
540 priv = session->priv;
541
542 cnt = req->size / sizeof(elem);
543 cnt = (cnt > ENC_INFO_BUTT) ? ENC_INFO_BUTT : cnt;
544 mpp_debug(DEBUG_IOCTL, "codec info count %d\n", cnt);
545 for (i = 0; i < cnt; i++) {
546 if (copy_from_user(&elem, req->data + i * sizeof(elem), sizeof(elem))) {
547 mpp_err("copy_from_user failed\n");
548 continue;
549 }
550 if (elem.type > ENC_INFO_BASE && elem.type < ENC_INFO_BUTT &&
551 elem.flag > CODEC_INFO_FLAG_NULL && elem.flag < CODEC_INFO_FLAG_BUTT) {
552 elem.type = array_index_nospec(elem.type, ENC_INFO_BUTT);
553 priv->codec_info[elem.type].flag = elem.flag;
554 priv->codec_info[elem.type].val = elem.data;
555 } else {
556 mpp_err("codec info invalid, type %d, flag %d\n",
557 elem.type, elem.flag);
558 }
559 }
560 } break;
561 default: {
562 mpp_err("unknown mpp ioctl cmd %x\n", req->cmd);
563 } break;
564 }
565
566 return 0;
567 }
568
vepu_free_session(struct mpp_session * session)569 static int vepu_free_session(struct mpp_session *session)
570 {
571 if (session && session->priv) {
572 kfree(session->priv);
573 session->priv = NULL;
574 }
575
576 return 0;
577 }
578
vepu_init_session(struct mpp_session * session)579 static int vepu_init_session(struct mpp_session *session)
580 {
581 struct vepu_session_priv *priv;
582
583 if (!session) {
584 mpp_err("session is null\n");
585 return -EINVAL;
586 }
587
588 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
589 if (!priv)
590 return -ENOMEM;
591
592 init_rwsem(&priv->rw_sem);
593 session->priv = priv;
594
595 return 0;
596 }
597
598 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
vepu_procfs_remove(struct mpp_dev * mpp)599 static int vepu_procfs_remove(struct mpp_dev *mpp)
600 {
601 struct vepu_dev *enc = to_vepu_dev(mpp);
602
603 if (enc->procfs) {
604 proc_remove(enc->procfs);
605 enc->procfs = NULL;
606 }
607
608 return 0;
609 }
610
vepu_dump_session(struct mpp_session * session,struct seq_file * seq)611 static int vepu_dump_session(struct mpp_session *session, struct seq_file *seq)
612 {
613 int i;
614 struct vepu_session_priv *priv = session->priv;
615
616 down_read(&priv->rw_sem);
617 /* item name */
618 seq_puts(seq, "------------------------------------------------------");
619 seq_puts(seq, "------------------------------------------------------\n");
620 seq_printf(seq, "|%8s|", (const char *)"session");
621 seq_printf(seq, "%8s|", (const char *)"device");
622 for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) {
623 bool show = priv->codec_info[i].flag;
624
625 if (show)
626 seq_printf(seq, "%8s|", enc_info_item_name[i]);
627 }
628 seq_puts(seq, "\n");
629 /* item data*/
630 seq_printf(seq, "|%8d|", session->index);
631 seq_printf(seq, "%8s|", mpp_device_name[session->device_type]);
632 for (i = ENC_INFO_BASE; i < ENC_INFO_BUTT; i++) {
633 u32 flag = priv->codec_info[i].flag;
634
635 if (!flag)
636 continue;
637 if (flag == CODEC_INFO_FLAG_NUMBER) {
638 u32 data = priv->codec_info[i].val;
639
640 seq_printf(seq, "%8d|", data);
641 } else if (flag == CODEC_INFO_FLAG_STRING) {
642 const char *name = (const char *)&priv->codec_info[i].val;
643
644 seq_printf(seq, "%8s|", name);
645 } else {
646 seq_printf(seq, "%8s|", (const char *)"null");
647 }
648 }
649 seq_puts(seq, "\n");
650 up_read(&priv->rw_sem);
651
652 return 0;
653 }
654
vepu_show_session_info(struct seq_file * seq,void * offset)655 static int vepu_show_session_info(struct seq_file *seq, void *offset)
656 {
657 struct mpp_session *session = NULL, *n;
658 struct mpp_dev *mpp = seq->private;
659
660 mutex_lock(&mpp->srv->session_lock);
661 list_for_each_entry_safe(session, n,
662 &mpp->srv->session_list,
663 service_link) {
664 if (session->device_type != MPP_DEVICE_VEPU2 &&
665 session->device_type != MPP_DEVICE_VEPU2_JPEG)
666 continue;
667 if (!session->priv)
668 continue;
669 if (mpp->dev_ops->dump_session)
670 mpp->dev_ops->dump_session(session, seq);
671 }
672 mutex_unlock(&mpp->srv->session_lock);
673
674 return 0;
675 }
676
vepu_procfs_init(struct mpp_dev * mpp)677 static int vepu_procfs_init(struct mpp_dev *mpp)
678 {
679 struct vepu_dev *enc = to_vepu_dev(mpp);
680 char name[32];
681
682 if (!mpp->dev || !mpp->dev->of_node || !mpp->dev->of_node->name ||
683 !mpp->srv || !mpp->srv->procfs)
684 return -EINVAL;
685 if (enc->ccu)
686 snprintf(name, sizeof(name) - 1, "%s%d",
687 mpp->dev->of_node->name, mpp->core_id);
688 else
689 snprintf(name, sizeof(name) - 1, "%s",
690 mpp->dev->of_node->name);
691
692 enc->procfs = proc_mkdir(name, mpp->srv->procfs);
693 if (IS_ERR_OR_NULL(enc->procfs)) {
694 mpp_err("failed on open procfs\n");
695 enc->procfs = NULL;
696 return -EIO;
697 }
698
699 /* for common mpp_dev options */
700 mpp_procfs_create_common(enc->procfs, mpp);
701
702 mpp_procfs_create_u32("aclk", 0644,
703 enc->procfs, &enc->aclk_info.debug_rate_hz);
704 mpp_procfs_create_u32("session_buffers", 0644,
705 enc->procfs, &mpp->session_max_buffers);
706 /* for show session info */
707 proc_create_single_data("sessions-info", 0444,
708 enc->procfs, vepu_show_session_info, mpp);
709
710 return 0;
711 }
712
vepu_procfs_ccu_init(struct mpp_dev * mpp)713 static int vepu_procfs_ccu_init(struct mpp_dev *mpp)
714 {
715 struct vepu_dev *enc = to_vepu_dev(mpp);
716
717 if (!enc->procfs)
718 goto done;
719
720 done:
721 return 0;
722 }
723 #else
vepu_procfs_remove(struct mpp_dev * mpp)724 static inline int vepu_procfs_remove(struct mpp_dev *mpp)
725 {
726 return 0;
727 }
728
vepu_procfs_init(struct mpp_dev * mpp)729 static inline int vepu_procfs_init(struct mpp_dev *mpp)
730 {
731 return 0;
732 }
733
vepu_procfs_ccu_init(struct mpp_dev * mpp)734 static inline int vepu_procfs_ccu_init(struct mpp_dev *mpp)
735 {
736 return 0;
737 }
738
vepu_dump_session(struct mpp_session * session,struct seq_file * seq)739 static inline int vepu_dump_session(struct mpp_session *session, struct seq_file *seq)
740 {
741 return 0;
742 }
743 #endif
744
vepu_init(struct mpp_dev * mpp)745 static int vepu_init(struct mpp_dev *mpp)
746 {
747 int ret;
748 struct vepu_dev *enc = to_vepu_dev(mpp);
749
750 mpp->grf_info = &mpp->srv->grf_infos[MPP_DRIVER_VEPU2];
751
752 /* Get clock info from dtsi */
753 ret = mpp_get_clk_info(mpp, &enc->aclk_info, "aclk_vcodec");
754 if (ret)
755 mpp_err("failed on clk_get aclk_vcodec\n");
756 ret = mpp_get_clk_info(mpp, &enc->hclk_info, "hclk_vcodec");
757 if (ret)
758 mpp_err("failed on clk_get hclk_vcodec\n");
759 /* Get normal max workload from dtsi */
760 of_property_read_u32(mpp->dev->of_node,
761 "rockchip,default-max-load", &enc->default_max_load);
762 /* Set default rates */
763 mpp_set_clk_info_rate_hz(&enc->aclk_info, CLK_MODE_DEFAULT, 300 * MHZ);
764
765 /* Get reset control from dtsi */
766 enc->rst_a = mpp_reset_control_get(mpp, RST_TYPE_A, "video_a");
767 if (!enc->rst_a)
768 mpp_err("No aclk reset resource define\n");
769 enc->rst_h = mpp_reset_control_get(mpp, RST_TYPE_H, "video_h");
770 if (!enc->rst_h)
771 mpp_err("No hclk reset resource define\n");
772
773 return 0;
774 }
775
vepu_px30_init(struct mpp_dev * mpp)776 static int vepu_px30_init(struct mpp_dev *mpp)
777 {
778 vepu_init(mpp);
779 return px30_workaround_combo_init(mpp);
780 }
781
vepu_clk_on(struct mpp_dev * mpp)782 static int vepu_clk_on(struct mpp_dev *mpp)
783 {
784 struct vepu_dev *enc = to_vepu_dev(mpp);
785
786 mpp_clk_safe_enable(enc->aclk_info.clk);
787 mpp_clk_safe_enable(enc->hclk_info.clk);
788
789 return 0;
790 }
791
vepu_clk_off(struct mpp_dev * mpp)792 static int vepu_clk_off(struct mpp_dev *mpp)
793 {
794 struct vepu_dev *enc = to_vepu_dev(mpp);
795
796 mpp_clk_safe_disable(enc->aclk_info.clk);
797 mpp_clk_safe_disable(enc->hclk_info.clk);
798
799 return 0;
800 }
801
vepu_get_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)802 static int vepu_get_freq(struct mpp_dev *mpp,
803 struct mpp_task *mpp_task)
804 {
805 u32 task_cnt;
806 u32 workload;
807 struct mpp_task *loop = NULL, *n;
808 struct vepu_dev *enc = to_vepu_dev(mpp);
809 struct vepu_task *task = to_vepu_task(mpp_task);
810
811 /* if not set max load, consider not have advanced mode */
812 if (!enc->default_max_load)
813 return 0;
814
815 task_cnt = 1;
816 workload = task->pixels;
817 /* calc workload in pending list */
818 mutex_lock(&mpp->queue->pending_lock);
819 list_for_each_entry_safe(loop, n,
820 &mpp->queue->pending_list,
821 queue_link) {
822 struct vepu_task *loop_task = to_vepu_task(loop);
823
824 task_cnt++;
825 workload += loop_task->pixels;
826 }
827 mutex_unlock(&mpp->queue->pending_lock);
828
829 if (workload > enc->default_max_load)
830 task->clk_mode = CLK_MODE_ADVANCED;
831
832 mpp_debug(DEBUG_TASK_INFO, "pending task %d, workload %d, clk_mode=%d\n",
833 task_cnt, workload, task->clk_mode);
834
835 return 0;
836 }
837
vepu_set_freq(struct mpp_dev * mpp,struct mpp_task * mpp_task)838 static int vepu_set_freq(struct mpp_dev *mpp,
839 struct mpp_task *mpp_task)
840 {
841 struct vepu_dev *enc = to_vepu_dev(mpp);
842 struct vepu_task *task = to_vepu_task(mpp_task);
843
844 mpp_clk_set_rate(&enc->aclk_info, task->clk_mode);
845
846 return 0;
847 }
848
vepu_reduce_freq(struct mpp_dev * mpp)849 static int vepu_reduce_freq(struct mpp_dev *mpp)
850 {
851 struct vepu_dev *enc = to_vepu_dev(mpp);
852
853 mpp_clk_set_rate(&enc->aclk_info, CLK_MODE_REDUCE);
854
855 return 0;
856 }
857
vepu_reset(struct mpp_dev * mpp)858 static int vepu_reset(struct mpp_dev *mpp)
859 {
860 struct vepu_dev *enc = to_vepu_dev(mpp);
861 struct vepu_ccu *ccu = enc->ccu;
862
863 mpp_write(mpp, VEPU2_REG_ENC_EN, 0);
864 udelay(5);
865 if (enc->rst_a && enc->rst_h) {
866 /* Don't skip this or iommu won't work after reset */
867 mpp_pmu_idle_request(mpp, true);
868 mpp_safe_reset(enc->rst_a);
869 mpp_safe_reset(enc->rst_h);
870 udelay(5);
871 mpp_safe_unreset(enc->rst_a);
872 mpp_safe_unreset(enc->rst_h);
873 mpp_pmu_idle_request(mpp, false);
874 }
875 mpp_write(mpp, VEPU2_REG_INT, VEPU2_INT_CLEAR);
876
877 if (ccu) {
878 set_bit(mpp->core_id, &ccu->core_idle);
879 mpp_dbg_core("core %d reset idle %lx\n", mpp->core_id, ccu->core_idle);
880 }
881
882 return 0;
883 }
884
885 static struct mpp_hw_ops vepu_v2_hw_ops = {
886 .init = vepu_init,
887 .clk_on = vepu_clk_on,
888 .clk_off = vepu_clk_off,
889 .get_freq = vepu_get_freq,
890 .set_freq = vepu_set_freq,
891 .reduce_freq = vepu_reduce_freq,
892 .reset = vepu_reset,
893 };
894
895 static struct mpp_hw_ops vepu_px30_hw_ops = {
896 .init = vepu_px30_init,
897 .clk_on = vepu_clk_on,
898 .clk_off = vepu_clk_off,
899 .set_freq = vepu_set_freq,
900 .reduce_freq = vepu_reduce_freq,
901 .reset = vepu_reset,
902 .set_grf = px30_workaround_combo_switch_grf,
903 };
904
905 static struct mpp_dev_ops vepu_v2_dev_ops = {
906 .alloc_task = vepu_alloc_task,
907 .run = vepu_run,
908 .irq = vepu_irq,
909 .isr = vepu_isr,
910 .finish = vepu_finish,
911 .result = vepu_result,
912 .free_task = vepu_free_task,
913 .ioctl = vepu_control,
914 .init_session = vepu_init_session,
915 .free_session = vepu_free_session,
916 .dump_session = vepu_dump_session,
917 };
918
919 static struct mpp_dev_ops vepu_px30_dev_ops = {
920 .alloc_task = vepu_alloc_task,
921 .run = vepu_px30_run,
922 .irq = vepu_irq,
923 .isr = vepu_isr,
924 .finish = vepu_finish,
925 .result = vepu_result,
926 .free_task = vepu_free_task,
927 .ioctl = vepu_control,
928 .init_session = vepu_init_session,
929 .free_session = vepu_free_session,
930 .dump_session = vepu_dump_session,
931 };
932
933 static struct mpp_dev_ops vepu_ccu_dev_ops = {
934 .alloc_task = vepu_alloc_task,
935 .prepare = vepu_prepare,
936 .run = vepu_run,
937 .irq = vepu_irq,
938 .isr = vepu_isr,
939 .finish = vepu_finish,
940 .result = vepu_result,
941 .free_task = vepu_free_task,
942 .ioctl = vepu_control,
943 .init_session = vepu_init_session,
944 .free_session = vepu_free_session,
945 .dump_session = vepu_dump_session,
946 };
947
948
949 static const struct mpp_dev_var vepu_v2_data = {
950 .device_type = MPP_DEVICE_VEPU2,
951 .hw_info = &vepu_v2_hw_info,
952 .trans_info = trans_rk_vepu2,
953 .hw_ops = &vepu_v2_hw_ops,
954 .dev_ops = &vepu_v2_dev_ops,
955 };
956
957 static const struct mpp_dev_var vepu_px30_data = {
958 .device_type = MPP_DEVICE_VEPU2,
959 .hw_info = &vepu_v2_hw_info,
960 .trans_info = trans_rk_vepu2,
961 .hw_ops = &vepu_px30_hw_ops,
962 .dev_ops = &vepu_px30_dev_ops,
963 };
964
965 static const struct mpp_dev_var vepu_ccu_data = {
966 .device_type = MPP_DEVICE_VEPU2_JPEG,
967 .hw_info = &vepu_v2_hw_info,
968 .trans_info = trans_rk_vepu2,
969 .hw_ops = &vepu_v2_hw_ops,
970 .dev_ops = &vepu_ccu_dev_ops,
971 };
972
973 static const struct of_device_id mpp_vepu2_dt_match[] = {
974 {
975 .compatible = "rockchip,vpu-encoder-v2",
976 .data = &vepu_v2_data,
977 },
978 #ifdef CONFIG_CPU_PX30
979 {
980 .compatible = "rockchip,vpu-encoder-px30",
981 .data = &vepu_px30_data,
982 },
983 #endif
984 #ifdef CONFIG_CPU_RK3588
985 {
986 .compatible = "rockchip,vpu-jpege-core",
987 .data = &vepu_ccu_data,
988 },
989 {
990 .compatible = "rockchip,vpu-jpege-ccu",
991 },
992 #endif
993 {},
994 };
995
vepu_ccu_probe(struct platform_device * pdev)996 static int vepu_ccu_probe(struct platform_device *pdev)
997 {
998 struct vepu_ccu *ccu;
999 struct device *dev = &pdev->dev;
1000
1001 ccu = devm_kzalloc(dev, sizeof(*ccu), GFP_KERNEL);
1002 if (!ccu)
1003 return -ENOMEM;
1004
1005 platform_set_drvdata(pdev, ccu);
1006 spin_lock_init(&ccu->lock);
1007 return 0;
1008 }
1009
vepu_attach_ccu(struct device * dev,struct vepu_dev * enc)1010 static int vepu_attach_ccu(struct device *dev, struct vepu_dev *enc)
1011 {
1012 struct device_node *np;
1013 struct platform_device *pdev;
1014 struct vepu_ccu *ccu;
1015 unsigned long flags;
1016
1017 np = of_parse_phandle(dev->of_node, "rockchip,ccu", 0);
1018 if (!np || !of_device_is_available(np))
1019 return -ENODEV;
1020
1021 pdev = of_find_device_by_node(np);
1022 of_node_put(np);
1023 if (!pdev)
1024 return -ENODEV;
1025
1026 ccu = platform_get_drvdata(pdev);
1027 if (!ccu)
1028 return -ENOMEM;
1029
1030 spin_lock_irqsave(&ccu->lock, flags);
1031 ccu->core_num++;
1032 ccu->cores[enc->mpp.core_id] = &enc->mpp;
1033 set_bit(enc->mpp.core_id, &ccu->core_idle);
1034 spin_unlock_irqrestore(&ccu->lock, flags);
1035
1036 /* attach the ccu-domain to current core */
1037 if (!ccu->main_core) {
1038 /**
1039 * set the first device for the main-core,
1040 * then the domain of the main-core named ccu-domain
1041 */
1042 ccu->main_core = &enc->mpp;
1043 } else {
1044 struct mpp_iommu_info *ccu_info, *cur_info;
1045
1046 /* set the ccu domain for current device */
1047 ccu_info = ccu->main_core->iommu_info;
1048 cur_info = enc->mpp.iommu_info;
1049
1050 if (cur_info)
1051 cur_info->domain = ccu_info->domain;
1052 mpp_iommu_attach(cur_info);
1053 }
1054 enc->ccu = ccu;
1055
1056 dev_info(dev, "attach ccu success\n");
1057 return 0;
1058 }
1059
vepu_core_probe(struct platform_device * pdev)1060 static int vepu_core_probe(struct platform_device *pdev)
1061 {
1062 struct device *dev = &pdev->dev;
1063 struct vepu_dev *enc = NULL;
1064 struct mpp_dev *mpp = NULL;
1065 const struct of_device_id *match = NULL;
1066 int ret = 0;
1067
1068 enc = devm_kzalloc(dev, sizeof(struct vepu_dev), GFP_KERNEL);
1069 if (!enc)
1070 return -ENOMEM;
1071
1072 mpp = &enc->mpp;
1073 platform_set_drvdata(pdev, mpp);
1074
1075 if (pdev->dev.of_node) {
1076 match = of_match_node(mpp_vepu2_dt_match, pdev->dev.of_node);
1077 if (match)
1078 mpp->var = (struct mpp_dev_var *)match->data;
1079
1080 mpp->core_id = of_alias_get_id(pdev->dev.of_node, "jpege");
1081 }
1082
1083 ret = mpp_dev_probe(mpp, pdev);
1084 if (ret) {
1085 dev_err(dev, "probe sub driver failed\n");
1086 return -EINVAL;
1087 }
1088 /* current device attach to ccu */
1089 ret = vepu_attach_ccu(dev, enc);
1090 if (ret)
1091 return ret;
1092
1093 ret = devm_request_threaded_irq(dev, mpp->irq,
1094 mpp_dev_irq,
1095 mpp_dev_isr_sched,
1096 IRQF_SHARED,
1097 dev_name(dev), mpp);
1098 if (ret) {
1099 dev_err(dev, "register interrupter runtime failed\n");
1100 return -EINVAL;
1101 }
1102
1103 mpp->session_max_buffers = VEPU2_SESSION_MAX_BUFFERS;
1104 vepu_procfs_init(mpp);
1105 vepu_procfs_ccu_init(mpp);
1106 /* if current is main-core, register current device to mpp service */
1107 if (mpp == enc->ccu->main_core)
1108 mpp_dev_register_srv(mpp, mpp->srv);
1109
1110 return 0;
1111 }
1112
vepu_probe_default(struct platform_device * pdev)1113 static int vepu_probe_default(struct platform_device *pdev)
1114 {
1115 struct device *dev = &pdev->dev;
1116 struct vepu_dev *enc = NULL;
1117 struct mpp_dev *mpp = NULL;
1118 const struct of_device_id *match = NULL;
1119 int ret = 0;
1120
1121 enc = devm_kzalloc(dev, sizeof(struct vepu_dev), GFP_KERNEL);
1122 if (!enc)
1123 return -ENOMEM;
1124
1125 mpp = &enc->mpp;
1126 platform_set_drvdata(pdev, mpp);
1127
1128 if (pdev->dev.of_node) {
1129 match = of_match_node(mpp_vepu2_dt_match, pdev->dev.of_node);
1130 if (match)
1131 mpp->var = (struct mpp_dev_var *)match->data;
1132
1133 mpp->core_id = of_alias_get_id(pdev->dev.of_node, "vepu");
1134 }
1135
1136 ret = mpp_dev_probe(mpp, pdev);
1137 if (ret) {
1138 dev_err(dev, "probe sub driver failed\n");
1139 return -EINVAL;
1140 }
1141
1142 ret = devm_request_threaded_irq(dev, mpp->irq,
1143 mpp_dev_irq,
1144 mpp_dev_isr_sched,
1145 IRQF_SHARED,
1146 dev_name(dev), mpp);
1147 if (ret) {
1148 dev_err(dev, "register interrupter runtime failed\n");
1149 return -EINVAL;
1150 }
1151
1152 mpp->session_max_buffers = VEPU2_SESSION_MAX_BUFFERS;
1153 vepu_procfs_init(mpp);
1154 /* register current device to mpp service */
1155 mpp_dev_register_srv(mpp, mpp->srv);
1156
1157 return 0;
1158 }
1159
vepu_probe(struct platform_device * pdev)1160 static int vepu_probe(struct platform_device *pdev)
1161 {
1162 int ret;
1163 struct device *dev = &pdev->dev;
1164 struct device_node *np = dev->of_node;
1165
1166 dev_info(dev, "probing start\n");
1167
1168 if (strstr(np->name, "ccu"))
1169 ret = vepu_ccu_probe(pdev);
1170 else if (strstr(np->name, "core"))
1171 ret = vepu_core_probe(pdev);
1172 else
1173 ret = vepu_probe_default(pdev);
1174
1175 dev_info(dev, "probing finish\n");
1176
1177 return ret;
1178 }
1179
vepu_remove(struct platform_device * pdev)1180 static int vepu_remove(struct platform_device *pdev)
1181 {
1182 struct device *dev = &pdev->dev;
1183 struct device_node *np = dev->of_node;
1184
1185 if (strstr(np->name, "ccu")) {
1186 dev_info(dev, "remove ccu device\n");
1187 } else if (strstr(np->name, "core")) {
1188 struct mpp_dev *mpp = dev_get_drvdata(dev);
1189 struct vepu_dev *enc = to_vepu_dev(mpp);
1190
1191 dev_info(dev, "remove core\n");
1192 if (enc->ccu) {
1193 s32 core_id = mpp->core_id;
1194 struct vepu_ccu *ccu = enc->ccu;
1195 unsigned long flags;
1196
1197 spin_lock_irqsave(&ccu->lock, flags);
1198 ccu->core_num--;
1199 ccu->cores[core_id] = NULL;
1200 clear_bit(core_id, &ccu->core_idle);
1201 spin_unlock_irqrestore(&ccu->lock, flags);
1202 }
1203 mpp_dev_remove(&enc->mpp);
1204 vepu_procfs_remove(&enc->mpp);
1205 } else {
1206 struct mpp_dev *mpp = dev_get_drvdata(dev);
1207
1208 dev_info(dev, "remove device\n");
1209 mpp_dev_remove(mpp);
1210 vepu_procfs_remove(mpp);
1211 }
1212
1213 return 0;
1214 }
1215
vepu_shutdown(struct platform_device * pdev)1216 static void vepu_shutdown(struct platform_device *pdev)
1217 {
1218 struct device *dev = &pdev->dev;
1219
1220 if (!strstr(dev_name(dev), "ccu"))
1221 mpp_dev_shutdown(pdev);
1222 }
1223
1224 struct platform_driver rockchip_vepu2_driver = {
1225 .probe = vepu_probe,
1226 .remove = vepu_remove,
1227 .shutdown = vepu_shutdown,
1228 .driver = {
1229 .name = VEPU2_DRIVER_NAME,
1230 .of_match_table = of_match_ptr(mpp_vepu2_dt_match),
1231 },
1232 };
1233 EXPORT_SYMBOL(rockchip_vepu2_driver);
1234