1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Hantro VPU codec driver
4 *
5 * Copyright (C) 2018 Collabora, Ltd.
6 * Copyright 2018 Google LLC.
7 * Tomasz Figa <tfiga@chromium.org>
8 *
9 * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
10 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
11 */
12
13 #include <linux/clk.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
20 #include <linux/videodev2.h>
21 #include <linux/workqueue.h>
22 #include <media/v4l2-event.h>
23 #include <media/v4l2-mem2mem.h>
24 #include <media/videobuf2-core.h>
25 #include <media/videobuf2-vmalloc.h>
26
27 #include "hantro_v4l2.h"
28 #include "hantro.h"
29 #include "hantro_hw.h"
30
31 #define DRIVER_NAME "hantro-vpu"
32
33 int hantro_debug;
34 module_param_named(debug, hantro_debug, int, 0644);
35 MODULE_PARM_DESC(debug,
36 "Debug level - higher value produces more verbose messages");
37
hantro_get_ctrl(struct hantro_ctx * ctx,u32 id)38 void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id)
39 {
40 struct v4l2_ctrl *ctrl;
41
42 ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, id);
43 return ctrl ? ctrl->p_cur.p : NULL;
44 }
45
hantro_get_ref(struct hantro_ctx * ctx,u64 ts)46 dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
47 {
48 struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
49 struct vb2_buffer *buf;
50 int index;
51
52 index = vb2_find_timestamp(q, ts, 0);
53 if (index < 0)
54 return 0;
55 buf = vb2_get_buffer(q, index);
56 return hantro_get_dec_buf_addr(ctx, buf);
57 }
58
hantro_job_finish_no_pm(struct hantro_dev * vpu,struct hantro_ctx * ctx,enum vb2_buffer_state result)59 static void hantro_job_finish_no_pm(struct hantro_dev *vpu,
60 struct hantro_ctx *ctx,
61 enum vb2_buffer_state result)
62 {
63 struct vb2_v4l2_buffer *src, *dst;
64
65 src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
66 dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
67
68 if (WARN_ON(!src))
69 return;
70 if (WARN_ON(!dst))
71 return;
72
73 src->sequence = ctx->sequence_out++;
74 dst->sequence = ctx->sequence_cap++;
75
76 v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
77 result);
78 }
79
hantro_job_finish(struct hantro_dev * vpu,struct hantro_ctx * ctx,enum vb2_buffer_state result)80 static void hantro_job_finish(struct hantro_dev *vpu,
81 struct hantro_ctx *ctx,
82 enum vb2_buffer_state result)
83 {
84 pm_runtime_mark_last_busy(vpu->dev);
85 pm_runtime_put_autosuspend(vpu->dev);
86
87 clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
88
89 hantro_job_finish_no_pm(vpu, ctx, result);
90 }
91
hantro_irq_done(struct hantro_dev * vpu,enum vb2_buffer_state result)92 void hantro_irq_done(struct hantro_dev *vpu,
93 enum vb2_buffer_state result)
94 {
95 struct hantro_ctx *ctx =
96 v4l2_m2m_get_curr_priv(vpu->m2m_dev);
97
98 /*
99 * If cancel_delayed_work returns false
100 * the timeout expired. The watchdog is running,
101 * and will take care of finishing the job.
102 */
103 if (cancel_delayed_work(&vpu->watchdog_work)) {
104 if (result == VB2_BUF_STATE_DONE && ctx->codec_ops->done)
105 ctx->codec_ops->done(ctx);
106 hantro_job_finish(vpu, ctx, result);
107 }
108 }
109
hantro_watchdog(struct work_struct * work)110 void hantro_watchdog(struct work_struct *work)
111 {
112 struct hantro_dev *vpu;
113 struct hantro_ctx *ctx;
114
115 vpu = container_of(to_delayed_work(work),
116 struct hantro_dev, watchdog_work);
117 ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
118 if (ctx) {
119 vpu_err("frame processing timed out!\n");
120 ctx->codec_ops->reset(ctx);
121 hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR);
122 }
123 }
124
hantro_start_prepare_run(struct hantro_ctx * ctx)125 void hantro_start_prepare_run(struct hantro_ctx *ctx)
126 {
127 struct vb2_v4l2_buffer *src_buf;
128
129 src_buf = hantro_get_src_buf(ctx);
130 v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
131 &ctx->ctrl_handler);
132
133 if (!ctx->is_encoder) {
134 if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
135 hantro_postproc_enable(ctx);
136 else
137 hantro_postproc_disable(ctx);
138 }
139 }
140
hantro_end_prepare_run(struct hantro_ctx * ctx)141 void hantro_end_prepare_run(struct hantro_ctx *ctx)
142 {
143 struct vb2_v4l2_buffer *src_buf;
144
145 src_buf = hantro_get_src_buf(ctx);
146 v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
147 &ctx->ctrl_handler);
148
149 /* Kick the watchdog. */
150 schedule_delayed_work(&ctx->dev->watchdog_work,
151 msecs_to_jiffies(2000));
152 }
153
device_run(void * priv)154 static void device_run(void *priv)
155 {
156 struct hantro_ctx *ctx = priv;
157 struct vb2_v4l2_buffer *src, *dst;
158 int ret;
159
160 src = hantro_get_src_buf(ctx);
161 dst = hantro_get_dst_buf(ctx);
162
163 ret = pm_runtime_get_sync(ctx->dev->dev);
164 if (ret < 0) {
165 pm_runtime_put_noidle(ctx->dev->dev);
166 goto err_cancel_job;
167 }
168
169 ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
170 if (ret)
171 goto err_cancel_job;
172
173 v4l2_m2m_buf_copy_metadata(src, dst, true);
174
175 ctx->codec_ops->run(ctx);
176 return;
177
178 err_cancel_job:
179 hantro_job_finish_no_pm(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
180 }
181
182 static struct v4l2_m2m_ops vpu_m2m_ops = {
183 .device_run = device_run,
184 };
185
186 static int
queue_init(void * priv,struct vb2_queue * src_vq,struct vb2_queue * dst_vq)187 queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
188 {
189 struct hantro_ctx *ctx = priv;
190 int ret;
191
192 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
193 src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
194 src_vq->drv_priv = ctx;
195 src_vq->ops = &hantro_queue_ops;
196 src_vq->mem_ops = &vb2_dma_contig_memops;
197
198 /*
199 * Driver does mostly sequential access, so sacrifice TLB efficiency
200 * for faster allocation. Also, no CPU access on the source queue,
201 * so no kernel mapping needed.
202 */
203 src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
204 DMA_ATTR_NO_KERNEL_MAPPING;
205 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
206 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
207 src_vq->lock = &ctx->dev->vpu_mutex;
208 src_vq->dev = ctx->dev->v4l2_dev.dev;
209 src_vq->supports_requests = true;
210
211 ret = vb2_queue_init(src_vq);
212 if (ret)
213 return ret;
214
215 /*
216 * When encoding, the CAPTURE queue doesn't need dma memory,
217 * as the CPU needs to create the JPEG frames, from the
218 * hardware-produced JPEG payload.
219 *
220 * For the DMA destination buffer, we use a bounce buffer.
221 */
222 if (ctx->is_encoder) {
223 dst_vq->mem_ops = &vb2_vmalloc_memops;
224 } else {
225 dst_vq->bidirectional = true;
226 dst_vq->mem_ops = &vb2_dma_contig_memops;
227 dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
228 DMA_ATTR_NO_KERNEL_MAPPING;
229 }
230
231 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
232 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
233 dst_vq->drv_priv = ctx;
234 dst_vq->ops = &hantro_queue_ops;
235 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
236 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
237 dst_vq->lock = &ctx->dev->vpu_mutex;
238 dst_vq->dev = ctx->dev->v4l2_dev.dev;
239
240 return vb2_queue_init(dst_vq);
241 }
242
hantro_try_ctrl(struct v4l2_ctrl * ctrl)243 static int hantro_try_ctrl(struct v4l2_ctrl *ctrl)
244 {
245 if (ctrl->id == V4L2_CID_MPEG_VIDEO_H264_SPS) {
246 const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps;
247
248 if (sps->chroma_format_idc > 1)
249 /* Only 4:0:0 and 4:2:0 are supported */
250 return -EINVAL;
251 if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
252 /* Luma and chroma bit depth mismatch */
253 return -EINVAL;
254 if (sps->bit_depth_luma_minus8 != 0)
255 /* Only 8-bit is supported */
256 return -EINVAL;
257 }
258 return 0;
259 }
260
hantro_jpeg_s_ctrl(struct v4l2_ctrl * ctrl)261 static int hantro_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
262 {
263 struct hantro_ctx *ctx;
264
265 ctx = container_of(ctrl->handler,
266 struct hantro_ctx, ctrl_handler);
267
268 vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
269
270 switch (ctrl->id) {
271 case V4L2_CID_JPEG_COMPRESSION_QUALITY:
272 ctx->jpeg_quality = ctrl->val;
273 break;
274 default:
275 return -EINVAL;
276 }
277
278 return 0;
279 }
280
281 static const struct v4l2_ctrl_ops hantro_ctrl_ops = {
282 .try_ctrl = hantro_try_ctrl,
283 };
284
285 static const struct v4l2_ctrl_ops hantro_jpeg_ctrl_ops = {
286 .s_ctrl = hantro_jpeg_s_ctrl,
287 };
288
289 static const struct hantro_ctrl controls[] = {
290 {
291 .codec = HANTRO_JPEG_ENCODER,
292 .cfg = {
293 .id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
294 .min = 5,
295 .max = 100,
296 .step = 1,
297 .def = 50,
298 .ops = &hantro_jpeg_ctrl_ops,
299 },
300 }, {
301 .codec = HANTRO_MPEG2_DECODER,
302 .cfg = {
303 .id = V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS,
304 },
305 }, {
306 .codec = HANTRO_MPEG2_DECODER,
307 .cfg = {
308 .id = V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION,
309 },
310 }, {
311 .codec = HANTRO_VP8_DECODER,
312 .cfg = {
313 .id = V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER,
314 },
315 }, {
316 .codec = HANTRO_H264_DECODER,
317 .cfg = {
318 .id = V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS,
319 },
320 }, {
321 .codec = HANTRO_H264_DECODER,
322 .cfg = {
323 .id = V4L2_CID_MPEG_VIDEO_H264_SPS,
324 .ops = &hantro_ctrl_ops,
325 },
326 }, {
327 .codec = HANTRO_H264_DECODER,
328 .cfg = {
329 .id = V4L2_CID_MPEG_VIDEO_H264_PPS,
330 },
331 }, {
332 .codec = HANTRO_H264_DECODER,
333 .cfg = {
334 .id = V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX,
335 },
336 }, {
337 .codec = HANTRO_H264_DECODER,
338 .cfg = {
339 .id = V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE,
340 .min = V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED,
341 .def = V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED,
342 .max = V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED,
343 },
344 }, {
345 .codec = HANTRO_H264_DECODER,
346 .cfg = {
347 .id = V4L2_CID_MPEG_VIDEO_H264_START_CODE,
348 .min = V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B,
349 .def = V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B,
350 .max = V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B,
351 },
352 }, {
353 .codec = HANTRO_H264_DECODER,
354 .cfg = {
355 .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
356 .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
357 .max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
358 .menu_skip_mask =
359 BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
360 .def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
361 }
362 }, {
363 },
364 };
365
hantro_ctrls_setup(struct hantro_dev * vpu,struct hantro_ctx * ctx,int allowed_codecs)366 static int hantro_ctrls_setup(struct hantro_dev *vpu,
367 struct hantro_ctx *ctx,
368 int allowed_codecs)
369 {
370 int i, num_ctrls = ARRAY_SIZE(controls);
371
372 v4l2_ctrl_handler_init(&ctx->ctrl_handler, num_ctrls);
373
374 for (i = 0; i < num_ctrls; i++) {
375 if (!(allowed_codecs & controls[i].codec))
376 continue;
377
378 v4l2_ctrl_new_custom(&ctx->ctrl_handler,
379 &controls[i].cfg, NULL);
380 if (ctx->ctrl_handler.error) {
381 vpu_err("Adding control (%d) failed %d\n",
382 controls[i].cfg.id,
383 ctx->ctrl_handler.error);
384 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
385 return ctx->ctrl_handler.error;
386 }
387 }
388 return v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
389 }
390
391 /*
392 * V4L2 file operations.
393 */
394
hantro_open(struct file * filp)395 static int hantro_open(struct file *filp)
396 {
397 struct hantro_dev *vpu = video_drvdata(filp);
398 struct video_device *vdev = video_devdata(filp);
399 struct hantro_func *func = hantro_vdev_to_func(vdev);
400 struct hantro_ctx *ctx;
401 int allowed_codecs, ret;
402
403 /*
404 * We do not need any extra locking here, because we operate only
405 * on local data here, except reading few fields from dev, which
406 * do not change through device's lifetime (which is guaranteed by
407 * reference on module from open()) and V4L2 internal objects (such
408 * as vdev and ctx->fh), which have proper locking done in respective
409 * helper functions used here.
410 */
411
412 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
413 if (!ctx)
414 return -ENOMEM;
415
416 ctx->dev = vpu;
417 if (func->id == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
418 allowed_codecs = vpu->variant->codec & HANTRO_ENCODERS;
419 ctx->is_encoder = true;
420 } else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) {
421 allowed_codecs = vpu->variant->codec & HANTRO_DECODERS;
422 ctx->is_encoder = false;
423 } else {
424 ret = -ENODEV;
425 goto err_ctx_free;
426 }
427
428 ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx, queue_init);
429 if (IS_ERR(ctx->fh.m2m_ctx)) {
430 ret = PTR_ERR(ctx->fh.m2m_ctx);
431 goto err_ctx_free;
432 }
433
434 v4l2_fh_init(&ctx->fh, vdev);
435 filp->private_data = &ctx->fh;
436 v4l2_fh_add(&ctx->fh);
437
438 hantro_reset_fmts(ctx);
439
440 ret = hantro_ctrls_setup(vpu, ctx, allowed_codecs);
441 if (ret) {
442 vpu_err("Failed to set up controls\n");
443 goto err_fh_free;
444 }
445 ctx->fh.ctrl_handler = &ctx->ctrl_handler;
446
447 return 0;
448
449 err_fh_free:
450 v4l2_fh_del(&ctx->fh);
451 v4l2_fh_exit(&ctx->fh);
452 err_ctx_free:
453 kfree(ctx);
454 return ret;
455 }
456
hantro_release(struct file * filp)457 static int hantro_release(struct file *filp)
458 {
459 struct hantro_ctx *ctx =
460 container_of(filp->private_data, struct hantro_ctx, fh);
461
462 /*
463 * No need for extra locking because this was the last reference
464 * to this file.
465 */
466 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
467 v4l2_fh_del(&ctx->fh);
468 v4l2_fh_exit(&ctx->fh);
469 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
470 kfree(ctx);
471
472 return 0;
473 }
474
475 static const struct v4l2_file_operations hantro_fops = {
476 .owner = THIS_MODULE,
477 .open = hantro_open,
478 .release = hantro_release,
479 .poll = v4l2_m2m_fop_poll,
480 .unlocked_ioctl = video_ioctl2,
481 .mmap = v4l2_m2m_fop_mmap,
482 };
483
484 static const struct of_device_id of_hantro_match[] = {
485 #ifdef CONFIG_VIDEO_HANTRO_ROCKCHIP
486 { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
487 { .compatible = "rockchip,rk3328-vpu", .data = &rk3328_vpu_variant, },
488 { .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
489 #endif
490 #ifdef CONFIG_VIDEO_HANTRO_IMX8M
491 { .compatible = "nxp,imx8mq-vpu", .data = &imx8mq_vpu_variant, },
492 #endif
493 { /* sentinel */ }
494 };
495 MODULE_DEVICE_TABLE(of, of_hantro_match);
496
hantro_register_entity(struct media_device * mdev,struct media_entity * entity,const char * entity_name,struct media_pad * pads,int num_pads,int function,struct video_device * vdev)497 static int hantro_register_entity(struct media_device *mdev,
498 struct media_entity *entity,
499 const char *entity_name,
500 struct media_pad *pads, int num_pads,
501 int function, struct video_device *vdev)
502 {
503 char *name;
504 int ret;
505
506 entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
507 if (function == MEDIA_ENT_F_IO_V4L) {
508 entity->info.dev.major = VIDEO_MAJOR;
509 entity->info.dev.minor = vdev->minor;
510 }
511
512 name = devm_kasprintf(mdev->dev, GFP_KERNEL, "%s-%s", vdev->name,
513 entity_name);
514 if (!name)
515 return -ENOMEM;
516
517 entity->name = name;
518 entity->function = function;
519
520 ret = media_entity_pads_init(entity, num_pads, pads);
521 if (ret)
522 return ret;
523
524 ret = media_device_register_entity(mdev, entity);
525 if (ret)
526 return ret;
527
528 return 0;
529 }
530
hantro_attach_func(struct hantro_dev * vpu,struct hantro_func * func)531 static int hantro_attach_func(struct hantro_dev *vpu,
532 struct hantro_func *func)
533 {
534 struct media_device *mdev = &vpu->mdev;
535 struct media_link *link;
536 int ret;
537
538 /* Create the three encoder entities with their pads */
539 func->source_pad.flags = MEDIA_PAD_FL_SOURCE;
540 ret = hantro_register_entity(mdev, &func->vdev.entity, "source",
541 &func->source_pad, 1, MEDIA_ENT_F_IO_V4L,
542 &func->vdev);
543 if (ret)
544 return ret;
545
546 func->proc_pads[0].flags = MEDIA_PAD_FL_SINK;
547 func->proc_pads[1].flags = MEDIA_PAD_FL_SOURCE;
548 ret = hantro_register_entity(mdev, &func->proc, "proc",
549 func->proc_pads, 2, func->id,
550 &func->vdev);
551 if (ret)
552 goto err_rel_entity0;
553
554 func->sink_pad.flags = MEDIA_PAD_FL_SINK;
555 ret = hantro_register_entity(mdev, &func->sink, "sink",
556 &func->sink_pad, 1, MEDIA_ENT_F_IO_V4L,
557 &func->vdev);
558 if (ret)
559 goto err_rel_entity1;
560
561 /* Connect the three entities */
562 ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0,
563 MEDIA_LNK_FL_IMMUTABLE |
564 MEDIA_LNK_FL_ENABLED);
565 if (ret)
566 goto err_rel_entity2;
567
568 ret = media_create_pad_link(&func->proc, 1, &func->sink, 0,
569 MEDIA_LNK_FL_IMMUTABLE |
570 MEDIA_LNK_FL_ENABLED);
571 if (ret)
572 goto err_rm_links0;
573
574 /* Create video interface */
575 func->intf_devnode = media_devnode_create(mdev, MEDIA_INTF_T_V4L_VIDEO,
576 0, VIDEO_MAJOR,
577 func->vdev.minor);
578 if (!func->intf_devnode) {
579 ret = -ENOMEM;
580 goto err_rm_links1;
581 }
582
583 /* Connect the two DMA engines to the interface */
584 link = media_create_intf_link(&func->vdev.entity,
585 &func->intf_devnode->intf,
586 MEDIA_LNK_FL_IMMUTABLE |
587 MEDIA_LNK_FL_ENABLED);
588 if (!link) {
589 ret = -ENOMEM;
590 goto err_rm_devnode;
591 }
592
593 link = media_create_intf_link(&func->sink, &func->intf_devnode->intf,
594 MEDIA_LNK_FL_IMMUTABLE |
595 MEDIA_LNK_FL_ENABLED);
596 if (!link) {
597 ret = -ENOMEM;
598 goto err_rm_devnode;
599 }
600 return 0;
601
602 err_rm_devnode:
603 media_devnode_remove(func->intf_devnode);
604
605 err_rm_links1:
606 media_entity_remove_links(&func->sink);
607
608 err_rm_links0:
609 media_entity_remove_links(&func->proc);
610 media_entity_remove_links(&func->vdev.entity);
611
612 err_rel_entity2:
613 media_device_unregister_entity(&func->sink);
614
615 err_rel_entity1:
616 media_device_unregister_entity(&func->proc);
617
618 err_rel_entity0:
619 media_device_unregister_entity(&func->vdev.entity);
620 return ret;
621 }
622
hantro_detach_func(struct hantro_func * func)623 static void hantro_detach_func(struct hantro_func *func)
624 {
625 media_devnode_remove(func->intf_devnode);
626 media_entity_remove_links(&func->sink);
627 media_entity_remove_links(&func->proc);
628 media_entity_remove_links(&func->vdev.entity);
629 media_device_unregister_entity(&func->sink);
630 media_device_unregister_entity(&func->proc);
631 media_device_unregister_entity(&func->vdev.entity);
632 }
633
hantro_add_func(struct hantro_dev * vpu,unsigned int funcid)634 static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
635 {
636 const struct of_device_id *match;
637 struct hantro_func *func;
638 struct video_device *vfd;
639 int ret;
640
641 match = of_match_node(of_hantro_match, vpu->dev->of_node);
642 func = devm_kzalloc(vpu->dev, sizeof(*func), GFP_KERNEL);
643 if (!func) {
644 v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
645 return -ENOMEM;
646 }
647
648 func->id = funcid;
649
650 vfd = &func->vdev;
651 vfd->fops = &hantro_fops;
652 vfd->release = video_device_release_empty;
653 vfd->lock = &vpu->vpu_mutex;
654 vfd->v4l2_dev = &vpu->v4l2_dev;
655 vfd->vfl_dir = VFL_DIR_M2M;
656 vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
657 vfd->ioctl_ops = &hantro_ioctl_ops;
658 snprintf(vfd->name, sizeof(vfd->name), "%s-%s", match->compatible,
659 funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER ? "enc" : "dec");
660
661 if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER)
662 vpu->encoder = func;
663 else
664 vpu->decoder = func;
665
666 video_set_drvdata(vfd, vpu);
667
668 ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
669 if (ret) {
670 v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
671 return ret;
672 }
673
674 ret = hantro_attach_func(vpu, func);
675 if (ret) {
676 v4l2_err(&vpu->v4l2_dev,
677 "Failed to attach functionality to the media device\n");
678 goto err_unreg_dev;
679 }
680
681 v4l2_info(&vpu->v4l2_dev, "registered %s as /dev/video%d\n", vfd->name,
682 vfd->num);
683
684 return 0;
685
686 err_unreg_dev:
687 video_unregister_device(vfd);
688 return ret;
689 }
690
hantro_add_enc_func(struct hantro_dev * vpu)691 static int hantro_add_enc_func(struct hantro_dev *vpu)
692 {
693 if (!vpu->variant->enc_fmts)
694 return 0;
695
696 return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
697 }
698
hantro_add_dec_func(struct hantro_dev * vpu)699 static int hantro_add_dec_func(struct hantro_dev *vpu)
700 {
701 if (!vpu->variant->dec_fmts)
702 return 0;
703
704 return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
705 }
706
hantro_remove_func(struct hantro_dev * vpu,unsigned int funcid)707 static void hantro_remove_func(struct hantro_dev *vpu,
708 unsigned int funcid)
709 {
710 struct hantro_func *func;
711
712 if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER)
713 func = vpu->encoder;
714 else
715 func = vpu->decoder;
716
717 if (!func)
718 return;
719
720 hantro_detach_func(func);
721 video_unregister_device(&func->vdev);
722 }
723
hantro_remove_enc_func(struct hantro_dev * vpu)724 static void hantro_remove_enc_func(struct hantro_dev *vpu)
725 {
726 hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
727 }
728
hantro_remove_dec_func(struct hantro_dev * vpu)729 static void hantro_remove_dec_func(struct hantro_dev *vpu)
730 {
731 hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
732 }
733
734 static const struct media_device_ops hantro_m2m_media_ops = {
735 .req_validate = vb2_request_validate,
736 .req_queue = v4l2_m2m_request_queue,
737 };
738
hantro_probe(struct platform_device * pdev)739 static int hantro_probe(struct platform_device *pdev)
740 {
741 const struct of_device_id *match;
742 struct hantro_dev *vpu;
743 struct resource *res;
744 int num_bases;
745 int i, ret;
746
747 vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL);
748 if (!vpu)
749 return -ENOMEM;
750
751 vpu->dev = &pdev->dev;
752 vpu->pdev = pdev;
753 mutex_init(&vpu->vpu_mutex);
754 spin_lock_init(&vpu->irqlock);
755
756 match = of_match_node(of_hantro_match, pdev->dev.of_node);
757 vpu->variant = match->data;
758
759 INIT_DELAYED_WORK(&vpu->watchdog_work, hantro_watchdog);
760
761 vpu->clocks = devm_kcalloc(&pdev->dev, vpu->variant->num_clocks,
762 sizeof(*vpu->clocks), GFP_KERNEL);
763 if (!vpu->clocks)
764 return -ENOMEM;
765
766 for (i = 0; i < vpu->variant->num_clocks; i++)
767 vpu->clocks[i].id = vpu->variant->clk_names[i];
768 ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks,
769 vpu->clocks);
770 if (ret)
771 return ret;
772
773 num_bases = vpu->variant->num_regs ?: 1;
774 vpu->reg_bases = devm_kcalloc(&pdev->dev, num_bases,
775 sizeof(*vpu->reg_bases), GFP_KERNEL);
776 if (!vpu->reg_bases)
777 return -ENOMEM;
778
779 for (i = 0; i < num_bases; i++) {
780 res = vpu->variant->reg_names ?
781 platform_get_resource_byname(vpu->pdev, IORESOURCE_MEM,
782 vpu->variant->reg_names[i]) :
783 platform_get_resource(vpu->pdev, IORESOURCE_MEM, 0);
784 vpu->reg_bases[i] = devm_ioremap_resource(vpu->dev, res);
785 if (IS_ERR(vpu->reg_bases[i]))
786 return PTR_ERR(vpu->reg_bases[i]);
787 }
788 vpu->enc_base = vpu->reg_bases[0] + vpu->variant->enc_offset;
789 vpu->dec_base = vpu->reg_bases[0] + vpu->variant->dec_offset;
790
791 ret = dma_set_coherent_mask(vpu->dev, DMA_BIT_MASK(32));
792 if (ret) {
793 dev_err(vpu->dev, "Could not set DMA coherent mask.\n");
794 return ret;
795 }
796 vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
797
798 for (i = 0; i < vpu->variant->num_irqs; i++) {
799 const char *irq_name = vpu->variant->irqs[i].name;
800 int irq;
801
802 if (!vpu->variant->irqs[i].handler)
803 continue;
804
805 irq = platform_get_irq_byname(vpu->pdev, irq_name);
806 if (irq <= 0)
807 return -ENXIO;
808
809 ret = devm_request_irq(vpu->dev, irq,
810 vpu->variant->irqs[i].handler, 0,
811 dev_name(vpu->dev), vpu);
812 if (ret) {
813 dev_err(vpu->dev, "Could not request %s IRQ.\n",
814 irq_name);
815 return ret;
816 }
817 }
818
819 ret = vpu->variant->init(vpu);
820 if (ret) {
821 dev_err(&pdev->dev, "Failed to init VPU hardware\n");
822 return ret;
823 }
824
825 pm_runtime_set_autosuspend_delay(vpu->dev, 100);
826 pm_runtime_use_autosuspend(vpu->dev);
827 pm_runtime_enable(vpu->dev);
828
829 ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks);
830 if (ret) {
831 dev_err(&pdev->dev, "Failed to prepare clocks\n");
832 goto err_pm_disable;
833 }
834
835 ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev);
836 if (ret) {
837 dev_err(&pdev->dev, "Failed to register v4l2 device\n");
838 goto err_clk_unprepare;
839 }
840 platform_set_drvdata(pdev, vpu);
841
842 vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
843 if (IS_ERR(vpu->m2m_dev)) {
844 v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
845 ret = PTR_ERR(vpu->m2m_dev);
846 goto err_v4l2_unreg;
847 }
848
849 vpu->mdev.dev = vpu->dev;
850 strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
851 strscpy(vpu->mdev.bus_info, "platform: " DRIVER_NAME,
852 sizeof(vpu->mdev.model));
853 media_device_init(&vpu->mdev);
854 vpu->mdev.ops = &hantro_m2m_media_ops;
855 vpu->v4l2_dev.mdev = &vpu->mdev;
856
857 ret = hantro_add_enc_func(vpu);
858 if (ret) {
859 dev_err(&pdev->dev, "Failed to register encoder\n");
860 goto err_m2m_rel;
861 }
862
863 ret = hantro_add_dec_func(vpu);
864 if (ret) {
865 dev_err(&pdev->dev, "Failed to register decoder\n");
866 goto err_rm_enc_func;
867 }
868
869 ret = media_device_register(&vpu->mdev);
870 if (ret) {
871 v4l2_err(&vpu->v4l2_dev, "Failed to register mem2mem media device\n");
872 goto err_rm_dec_func;
873 }
874
875 return 0;
876
877 err_rm_dec_func:
878 hantro_remove_dec_func(vpu);
879 err_rm_enc_func:
880 hantro_remove_enc_func(vpu);
881 err_m2m_rel:
882 media_device_cleanup(&vpu->mdev);
883 v4l2_m2m_release(vpu->m2m_dev);
884 err_v4l2_unreg:
885 v4l2_device_unregister(&vpu->v4l2_dev);
886 err_clk_unprepare:
887 clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
888 err_pm_disable:
889 pm_runtime_dont_use_autosuspend(vpu->dev);
890 pm_runtime_disable(vpu->dev);
891 return ret;
892 }
893
hantro_remove(struct platform_device * pdev)894 static int hantro_remove(struct platform_device *pdev)
895 {
896 struct hantro_dev *vpu = platform_get_drvdata(pdev);
897
898 v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
899
900 media_device_unregister(&vpu->mdev);
901 hantro_remove_dec_func(vpu);
902 hantro_remove_enc_func(vpu);
903 media_device_cleanup(&vpu->mdev);
904 v4l2_m2m_release(vpu->m2m_dev);
905 v4l2_device_unregister(&vpu->v4l2_dev);
906 clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
907 pm_runtime_dont_use_autosuspend(vpu->dev);
908 pm_runtime_disable(vpu->dev);
909 return 0;
910 }
911
912 #ifdef CONFIG_PM
hantro_runtime_resume(struct device * dev)913 static int hantro_runtime_resume(struct device *dev)
914 {
915 struct hantro_dev *vpu = dev_get_drvdata(dev);
916
917 if (vpu->variant->runtime_resume)
918 return vpu->variant->runtime_resume(vpu);
919
920 return 0;
921 }
922 #endif
923
924 static const struct dev_pm_ops hantro_pm_ops = {
925 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
926 pm_runtime_force_resume)
927 SET_RUNTIME_PM_OPS(NULL, hantro_runtime_resume, NULL)
928 };
929
930 static struct platform_driver hantro_driver = {
931 .probe = hantro_probe,
932 .remove = hantro_remove,
933 .driver = {
934 .name = DRIVER_NAME,
935 .of_match_table = of_match_ptr(of_hantro_match),
936 .pm = &hantro_pm_ops,
937 },
938 };
939 module_platform_driver(hantro_driver);
940
941 MODULE_LICENSE("GPL v2");
942 MODULE_AUTHOR("Alpha Lin <Alpha.Lin@Rock-Chips.com>");
943 MODULE_AUTHOR("Tomasz Figa <tfiga@chromium.org>");
944 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
945 MODULE_DESCRIPTION("Hantro VPU codec driver");
946