1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4 * Author:Mark Yao <mark.yao@rock-chips.com>
5 *
6 * based on exynos_drm_drv.c
7 */
8
9 #include <linux/dma-buf-cache.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dma-iommu.h>
12 #include <linux/genalloc.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/module.h>
15 #include <linux/of_address.h>
16 #include <linux/of_graph.h>
17 #include <linux/of_platform.h>
18 #include <linux/clk.h>
19 #include <linux/component.h>
20 #include <linux/console.h>
21 #include <linux/iommu.h>
22 #include <linux/of_reserved_mem.h>
23
24 #include <drm/drm_debugfs.h>
25 #include <drm/drm_drv.h>
26 #include <drm/drm_displayid.h>
27 #include <drm/drm_fb_helper.h>
28 #include <drm/drm_gem_cma_helper.h>
29 #include <drm/drm_of.h>
30 #include <drm/drm_probe_helper.h>
31 #include <drm/drm_vblank.h>
32
33 #include "rockchip_drm_drv.h"
34 #include "rockchip_drm_fb.h"
35 #include "rockchip_drm_fbdev.h"
36 #include "rockchip_drm_gem.h"
37 #include "rockchip_drm_logo.h"
38
39 #include "../drm_crtc_internal.h"
40 #include "../drivers/clk/rockchip/clk.h"
41
42 #define DRIVER_NAME "rockchip"
43 #define DRIVER_DESC "RockChip Soc DRM"
44 #define DRIVER_DATE "20140818"
45 #define DRIVER_MAJOR 3
46 #define DRIVER_MINOR 0
47
48 #if IS_ENABLED(CONFIG_DRM_ROCKCHIP_VVOP)
49 static bool is_support_iommu = false;
50 #else
51 static bool is_support_iommu = true;
52 #endif
53 static bool iommu_reserve_map;
54
55 static struct drm_driver rockchip_drm_driver;
56
57 static unsigned int drm_debug;
58 module_param_named(debug, drm_debug, int, 0600);
59
rockchip_drm_debug_enabled(enum rockchip_drm_debug_category category)60 static inline bool rockchip_drm_debug_enabled(enum rockchip_drm_debug_category category)
61 {
62 return unlikely(drm_debug & category);
63 }
64
65 __printf(3, 4)
rockchip_drm_dbg(const struct device * dev,enum rockchip_drm_debug_category category,const char * format,...)66 void rockchip_drm_dbg(const struct device *dev, enum rockchip_drm_debug_category category,
67 const char *format, ...)
68 {
69 struct va_format vaf;
70 va_list args;
71
72 if (!rockchip_drm_debug_enabled(category))
73 return;
74
75 va_start(args, format);
76 vaf.fmt = format;
77 vaf.va = &args;
78
79 if (dev)
80 dev_printk(KERN_DEBUG, dev, "%pV", &vaf);
81 else
82 printk(KERN_DEBUG "%pV", &vaf);
83
84 va_end(args);
85 }
86
87 /**
88 * rockchip_drm_wait_vact_end
89 * @crtc: CRTC to enable line flag
90 * @mstimeout: millisecond for timeout
91 *
92 * Wait for vact_end line flag irq or timeout.
93 *
94 * Returns:
95 * Zero on success, negative errno on failure.
96 */
rockchip_drm_wait_vact_end(struct drm_crtc * crtc,unsigned int mstimeout)97 int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout)
98 {
99 struct rockchip_drm_private *priv;
100 int pipe, ret = 0;
101
102 if (!crtc)
103 return -ENODEV;
104
105 if (mstimeout <= 0)
106 return -EINVAL;
107
108 priv = crtc->dev->dev_private;
109 pipe = drm_crtc_index(crtc);
110
111 if (priv->crtc_funcs[pipe] && priv->crtc_funcs[pipe]->wait_vact_end)
112 ret = priv->crtc_funcs[pipe]->wait_vact_end(crtc, mstimeout);
113
114 return ret;
115 }
116 EXPORT_SYMBOL(rockchip_drm_wait_vact_end);
117
drm_mode_convert_to_split_mode(struct drm_display_mode * mode)118 void drm_mode_convert_to_split_mode(struct drm_display_mode *mode)
119 {
120 u16 hactive, hfp, hsync, hbp;
121
122 hactive = mode->hdisplay;
123 hfp = mode->hsync_start - mode->hdisplay;
124 hsync = mode->hsync_end - mode->hsync_start;
125 hbp = mode->htotal - mode->hsync_end;
126
127 mode->clock *= 2;
128 mode->crtc_clock *= 2;
129 mode->hdisplay = hactive * 2;
130 mode->hsync_start = mode->hdisplay + hfp * 2;
131 mode->hsync_end = mode->hsync_start + hsync * 2;
132 mode->htotal = mode->hsync_end + hbp * 2;
133 drm_mode_set_name(mode);
134 }
135 EXPORT_SYMBOL(drm_mode_convert_to_split_mode);
136
drm_mode_convert_to_origin_mode(struct drm_display_mode * mode)137 void drm_mode_convert_to_origin_mode(struct drm_display_mode *mode)
138 {
139 u16 hactive, hfp, hsync, hbp;
140
141 hactive = mode->hdisplay;
142 hfp = mode->hsync_start - mode->hdisplay;
143 hsync = mode->hsync_end - mode->hsync_start;
144 hbp = mode->htotal - mode->hsync_end;
145
146 mode->clock /= 2;
147 mode->crtc_clock /= 2;
148 mode->hdisplay = hactive / 2;
149 mode->hsync_start = mode->hdisplay + hfp / 2;
150 mode->hsync_end = mode->hsync_start + hsync / 2;
151 mode->htotal = mode->hsync_end + hbp / 2;
152 }
153 EXPORT_SYMBOL(drm_mode_convert_to_origin_mode);
154
155 /**
156 * drm_connector_oob_hotplug_event - Report out-of-band hotplug event to connector
157 * @connector: connector to report the event on
158 *
159 * On some hardware a hotplug event notification may come from outside the display
160 * driver / device. An example of this is some USB Type-C setups where the hardware
161 * muxes the DisplayPort data and aux-lines but does not pass the altmode HPD
162 * status bit to the GPU's DP HPD pin.
163 *
164 * This function can be used to report these out-of-band events after obtaining
165 * a drm_connector reference through calling drm_connector_find_by_fwnode().
166 */
drm_connector_oob_hotplug_event(struct fwnode_handle * connector_fwnode)167 void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode)
168 {
169 struct rockchip_drm_sub_dev *sub_dev;
170
171 if (!connector_fwnode || !connector_fwnode->dev)
172 return;
173
174 sub_dev = rockchip_drm_get_sub_dev(dev_of_node(connector_fwnode->dev));
175
176 if (sub_dev && sub_dev->connector && sub_dev->oob_hotplug_event)
177 sub_dev->oob_hotplug_event(sub_dev->connector);
178 }
179 EXPORT_SYMBOL(drm_connector_oob_hotplug_event);
180
rockchip_drm_get_bpp(const struct drm_format_info * info)181 uint32_t rockchip_drm_get_bpp(const struct drm_format_info *info)
182 {
183 /* use whatever a driver has set */
184 if (info->cpp[0])
185 return info->cpp[0] * 8;
186
187 switch (info->format) {
188 case DRM_FORMAT_YUV420_8BIT:
189 return 12;
190 case DRM_FORMAT_YUV420_10BIT:
191 return 15;
192 case DRM_FORMAT_VUY101010:
193 return 30;
194 default:
195 break;
196 }
197
198 /* all attempts failed */
199 return 0;
200 }
201 EXPORT_SYMBOL(rockchip_drm_get_bpp);
202
203 /**
204 * rockchip_drm_of_find_possible_crtcs - find the possible CRTCs for an active
205 * encoder port
206 * @dev: DRM device
207 * @port: encoder port to scan for endpoints
208 *
209 * Scan all active endpoints attached to a port, locate their attached CRTCs,
210 * and generate the DRM mask of CRTCs which may be attached to this
211 * encoder.
212 *
213 * See Documentation/devicetree/bindings/graph.txt for the bindings.
214 */
rockchip_drm_of_find_possible_crtcs(struct drm_device * dev,struct device_node * port)215 uint32_t rockchip_drm_of_find_possible_crtcs(struct drm_device *dev,
216 struct device_node *port)
217 {
218 struct device_node *remote_port, *ep;
219 uint32_t possible_crtcs = 0;
220
221 for_each_endpoint_of_node(port, ep) {
222 if (!of_device_is_available(ep))
223 continue;
224
225 remote_port = of_graph_get_remote_port(ep);
226 if (!remote_port) {
227 of_node_put(ep);
228 continue;
229 }
230
231 possible_crtcs |= drm_of_crtc_port_mask(dev, remote_port);
232
233 of_node_put(remote_port);
234 }
235
236 return possible_crtcs;
237 }
238 EXPORT_SYMBOL(rockchip_drm_of_find_possible_crtcs);
239
240 static DEFINE_MUTEX(rockchip_drm_sub_dev_lock);
241 static LIST_HEAD(rockchip_drm_sub_dev_list);
242
rockchip_connector_update_vfp_for_vrr(struct drm_crtc * crtc,struct drm_display_mode * mode,int vfp)243 void rockchip_connector_update_vfp_for_vrr(struct drm_crtc *crtc, struct drm_display_mode *mode,
244 int vfp)
245 {
246 struct rockchip_drm_sub_dev *sub_dev;
247
248 mutex_lock(&rockchip_drm_sub_dev_lock);
249 list_for_each_entry(sub_dev, &rockchip_drm_sub_dev_list, list) {
250 if (sub_dev->connector->state->crtc == crtc) {
251 if (sub_dev->update_vfp_for_vrr)
252 sub_dev->update_vfp_for_vrr(sub_dev->connector, mode, vfp);
253 }
254 }
255 mutex_unlock(&rockchip_drm_sub_dev_lock);
256 }
257 EXPORT_SYMBOL(rockchip_connector_update_vfp_for_vrr);
258
rockchip_drm_register_sub_dev(struct rockchip_drm_sub_dev * sub_dev)259 void rockchip_drm_register_sub_dev(struct rockchip_drm_sub_dev *sub_dev)
260 {
261 mutex_lock(&rockchip_drm_sub_dev_lock);
262 list_add_tail(&sub_dev->list, &rockchip_drm_sub_dev_list);
263 mutex_unlock(&rockchip_drm_sub_dev_lock);
264 }
265 EXPORT_SYMBOL(rockchip_drm_register_sub_dev);
266
rockchip_drm_unregister_sub_dev(struct rockchip_drm_sub_dev * sub_dev)267 void rockchip_drm_unregister_sub_dev(struct rockchip_drm_sub_dev *sub_dev)
268 {
269 mutex_lock(&rockchip_drm_sub_dev_lock);
270 list_del(&sub_dev->list);
271 mutex_unlock(&rockchip_drm_sub_dev_lock);
272 }
273 EXPORT_SYMBOL(rockchip_drm_unregister_sub_dev);
274
rockchip_drm_get_sub_dev(struct device_node * node)275 struct rockchip_drm_sub_dev *rockchip_drm_get_sub_dev(struct device_node *node)
276 {
277 struct rockchip_drm_sub_dev *sub_dev = NULL;
278 bool found = false;
279
280 mutex_lock(&rockchip_drm_sub_dev_lock);
281 list_for_each_entry(sub_dev, &rockchip_drm_sub_dev_list, list) {
282 if (sub_dev->of_node == node) {
283 found = true;
284 break;
285 }
286 }
287 mutex_unlock(&rockchip_drm_sub_dev_lock);
288
289 return found ? sub_dev : NULL;
290 }
291 EXPORT_SYMBOL(rockchip_drm_get_sub_dev);
292
rockchip_drm_get_sub_dev_type(void)293 int rockchip_drm_get_sub_dev_type(void)
294 {
295 int connector_type = DRM_MODE_CONNECTOR_Unknown;
296 struct rockchip_drm_sub_dev *sub_dev = NULL;
297
298 mutex_lock(&rockchip_drm_sub_dev_lock);
299 list_for_each_entry(sub_dev, &rockchip_drm_sub_dev_list, list) {
300 if (sub_dev->connector->encoder) {
301 connector_type = sub_dev->connector->connector_type;
302 break;
303 }
304 }
305 mutex_unlock(&rockchip_drm_sub_dev_lock);
306
307 return connector_type;
308 }
309 EXPORT_SYMBOL(rockchip_drm_get_sub_dev_type);
310
rockchip_drm_get_scan_line_time_ns(void)311 u32 rockchip_drm_get_scan_line_time_ns(void)
312 {
313 struct rockchip_drm_sub_dev *sub_dev = NULL;
314 struct drm_display_mode *mode;
315 int linedur_ns = 0;
316
317 mutex_lock(&rockchip_drm_sub_dev_lock);
318 list_for_each_entry(sub_dev, &rockchip_drm_sub_dev_list, list) {
319 if (sub_dev->connector->encoder && sub_dev->connector->state->crtc) {
320 mode = &sub_dev->connector->state->crtc->state->adjusted_mode;
321 linedur_ns = div_u64((u64) mode->crtc_htotal * 1000000, mode->crtc_clock);
322 break;
323 }
324 }
325 mutex_unlock(&rockchip_drm_sub_dev_lock);
326
327 return linedur_ns;
328 }
329 EXPORT_SYMBOL(rockchip_drm_get_scan_line_time_ns);
330
rockchip_drm_te_handle(struct drm_crtc * crtc)331 void rockchip_drm_te_handle(struct drm_crtc *crtc)
332 {
333 struct rockchip_drm_private *priv = crtc->dev->dev_private;
334 int pipe = drm_crtc_index(crtc);
335
336 if (priv->crtc_funcs[pipe] && priv->crtc_funcs[pipe]->te_handler)
337 priv->crtc_funcs[pipe]->te_handler(crtc);
338 }
339 EXPORT_SYMBOL(rockchip_drm_te_handle);
340
341 static const struct drm_display_mode rockchip_drm_default_modes[] = {
342 /* 4 - 1280x720@60Hz 16:9 */
343 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
344 1430, 1650, 0, 720, 725, 730, 750, 0,
345 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
346 .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
347 /* 16 - 1920x1080@60Hz 16:9 */
348 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
349 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
350 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
351 .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
352 /* 31 - 1920x1080@50Hz 16:9 */
353 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
354 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
355 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
356 .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
357 /* 19 - 1280x720@50Hz 16:9 */
358 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
359 1760, 1980, 0, 720, 725, 730, 750, 0,
360 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
361 .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
362 /* 0x10 - 1024x768@60Hz */
363 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
364 1184, 1344, 0, 768, 771, 777, 806, 0,
365 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
366 /* 17 - 720x576@50Hz 4:3 */
367 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
368 796, 864, 0, 576, 581, 586, 625, 0,
369 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
370 .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
371 /* 2 - 720x480@60Hz 4:3 */
372 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
373 798, 858, 0, 480, 489, 495, 525, 0,
374 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
375 .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
376 };
377
rockchip_drm_add_modes_noedid(struct drm_connector * connector)378 int rockchip_drm_add_modes_noedid(struct drm_connector *connector)
379 {
380 struct drm_device *dev = connector->dev;
381 struct drm_display_mode *mode;
382 int i, count, num_modes = 0;
383
384 mutex_lock(&rockchip_drm_sub_dev_lock);
385 count = ARRAY_SIZE(rockchip_drm_default_modes);
386
387 for (i = 0; i < count; i++) {
388 const struct drm_display_mode *ptr = &rockchip_drm_default_modes[i];
389
390 mode = drm_mode_duplicate(dev, ptr);
391 if (mode) {
392 if (!i)
393 mode->type = DRM_MODE_TYPE_PREFERRED;
394 drm_mode_probed_add(connector, mode);
395 num_modes++;
396 }
397 }
398 mutex_unlock(&rockchip_drm_sub_dev_lock);
399
400 return num_modes;
401 }
402 EXPORT_SYMBOL(rockchip_drm_add_modes_noedid);
403
404 static const struct rockchip_drm_width_dclk {
405 int width;
406 u32 dclk_khz;
407 } rockchip_drm_dclk[] = {
408 {1920, 148500},
409 {2048, 200000},
410 {2560, 280000},
411 {3840, 594000},
412 {4096, 594000},
413 {7680, 2376000},
414 };
415
rockchip_drm_get_dclk_by_width(int width)416 u32 rockchip_drm_get_dclk_by_width(int width)
417 {
418 int i = 0;
419 u32 dclk_khz;
420
421 for (i = 0; i < ARRAY_SIZE(rockchip_drm_dclk); i++) {
422 if (width == rockchip_drm_dclk[i].width) {
423 dclk_khz = rockchip_drm_dclk[i].dclk_khz;
424 break;
425 }
426 }
427
428 if (i == ARRAY_SIZE(rockchip_drm_dclk)) {
429 DRM_ERROR("Can't not find %d width solution and use 148500 khz as max dclk\n", width);
430
431 dclk_khz = 148500;
432 }
433
434 return dclk_khz;
435 }
436 EXPORT_SYMBOL(rockchip_drm_get_dclk_by_width);
437
438 static int
cea_db_tag(const u8 * db)439 cea_db_tag(const u8 *db)
440 {
441 return db[0] >> 5;
442 }
443
444 static int
cea_db_payload_len(const u8 * db)445 cea_db_payload_len(const u8 *db)
446 {
447 return db[0] & 0x1f;
448 }
449
450 #define for_each_cea_db(cea, i, start, end) \
451 for ((i) = (start); \
452 (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); \
453 (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
454
455 #define HDMI_NEXT_HDR_VSDB_OUI 0xd04601
456
cea_db_is_hdmi_next_hdr_block(const u8 * db)457 static bool cea_db_is_hdmi_next_hdr_block(const u8 *db)
458 {
459 unsigned int oui;
460
461 if (cea_db_tag(db) != 0x07)
462 return false;
463
464 if (cea_db_payload_len(db) < 11)
465 return false;
466
467 oui = db[3] << 16 | db[2] << 8 | db[1];
468
469 return oui == HDMI_NEXT_HDR_VSDB_OUI;
470 }
471
cea_db_is_hdmi_forum_vsdb(const u8 * db)472 static bool cea_db_is_hdmi_forum_vsdb(const u8 *db)
473 {
474 unsigned int oui;
475
476 if (cea_db_tag(db) != 0x03)
477 return false;
478
479 if (cea_db_payload_len(db) < 7)
480 return false;
481
482 oui = db[3] << 16 | db[2] << 8 | db[1];
483
484 return oui == HDMI_FORUM_IEEE_OUI;
485 }
486
487 static int
cea_db_offsets(const u8 * cea,int * start,int * end)488 cea_db_offsets(const u8 *cea, int *start, int *end)
489 {
490 /* DisplayID CTA extension blocks and top-level CEA EDID
491 * block header definitions differ in the following bytes:
492 * 1) Byte 2 of the header specifies length differently,
493 * 2) Byte 3 is only present in the CEA top level block.
494 *
495 * The different definitions for byte 2 follow.
496 *
497 * DisplayID CTA extension block defines byte 2 as:
498 * Number of payload bytes
499 *
500 * CEA EDID block defines byte 2 as:
501 * Byte number (decimal) within this block where the 18-byte
502 * DTDs begin. If no non-DTD data is present in this extension
503 * block, the value should be set to 04h (the byte after next).
504 * If set to 00h, there are no DTDs present in this block and
505 * no non-DTD data.
506 */
507 if (cea[0] == 0x81) {
508 /*
509 * for_each_displayid_db() has already verified
510 * that these stay within expected bounds.
511 */
512 *start = 3;
513 *end = *start + cea[2];
514 } else if (cea[0] == 0x02) {
515 /* Data block offset in CEA extension block */
516 *start = 4;
517 *end = cea[2];
518 if (*end == 0)
519 *end = 127;
520 if (*end < 4 || *end > 127)
521 return -ERANGE;
522 } else {
523 return -EOPNOTSUPP;
524 }
525
526 return 0;
527 }
528
find_edid_extension(const struct edid * edid,int ext_id,int * ext_index)529 static u8 *find_edid_extension(const struct edid *edid,
530 int ext_id, int *ext_index)
531 {
532 u8 *edid_ext = NULL;
533 int i;
534
535 /* No EDID or EDID extensions */
536 if (edid == NULL || edid->extensions == 0)
537 return NULL;
538
539 /* Find CEA extension */
540 for (i = *ext_index; i < edid->extensions; i++) {
541 edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
542 if (edid_ext[0] == ext_id)
543 break;
544 }
545
546 if (i >= edid->extensions)
547 return NULL;
548
549 *ext_index = i + 1;
550
551 return edid_ext;
552 }
553
validate_displayid(u8 * displayid,int length,int idx)554 static int validate_displayid(u8 *displayid, int length, int idx)
555 {
556 int i, dispid_length;
557 u8 csum = 0;
558 struct displayid_hdr *base;
559
560 base = (struct displayid_hdr *)&displayid[idx];
561
562 DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
563 base->rev, base->bytes, base->prod_id, base->ext_count);
564
565 /* +1 for DispID checksum */
566 dispid_length = sizeof(*base) + base->bytes + 1;
567 if (dispid_length > length - idx)
568 return -EINVAL;
569
570 for (i = 0; i < dispid_length; i++)
571 csum += displayid[idx + i];
572 if (csum) {
573 DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
574 return -EINVAL;
575 }
576
577 return 0;
578 }
579
find_displayid_extension(const struct edid * edid,int * length,int * idx,int * ext_index)580 static u8 *find_displayid_extension(const struct edid *edid,
581 int *length, int *idx,
582 int *ext_index)
583 {
584 u8 *displayid = find_edid_extension(edid, 0x70, ext_index);
585 struct displayid_hdr *base;
586 int ret;
587
588 if (!displayid)
589 return NULL;
590
591 /* EDID extensions block checksum isn't for us */
592 *length = EDID_LENGTH - 1;
593 *idx = 1;
594
595 ret = validate_displayid(displayid, *length, *idx);
596 if (ret)
597 return NULL;
598
599 base = (struct displayid_hdr *)&displayid[*idx];
600 *length = *idx + sizeof(*base) + base->bytes;
601
602 return displayid;
603 }
604
find_cea_extension(const struct edid * edid)605 static u8 *find_cea_extension(const struct edid *edid)
606 {
607 int length, idx;
608 struct displayid_block *block;
609 u8 *cea;
610 u8 *displayid;
611 int ext_index;
612
613 /* Look for a top level CEA extension block */
614 /* FIXME: make callers iterate through multiple CEA ext blocks? */
615 ext_index = 0;
616 cea = find_edid_extension(edid, 0x02, &ext_index);
617 if (cea)
618 return cea;
619
620 /* CEA blocks can also be found embedded in a DisplayID block */
621 ext_index = 0;
622 for (;;) {
623 displayid = find_displayid_extension(edid, &length, &idx,
624 &ext_index);
625 if (!displayid)
626 return NULL;
627
628 idx += sizeof(struct displayid_hdr);
629 for_each_displayid_db(displayid, block, idx, length) {
630 if (block->tag == 0x81)
631 return (u8 *)block;
632 }
633 }
634
635 return NULL;
636 }
637
638 #define EDID_CEA_YCRCB422 (1 << 4)
639
rockchip_drm_get_yuv422_format(struct drm_connector * connector,struct edid * edid)640 int rockchip_drm_get_yuv422_format(struct drm_connector *connector,
641 struct edid *edid)
642 {
643 struct drm_display_info *info;
644 const u8 *edid_ext;
645
646 if (!connector || !edid)
647 return -EINVAL;
648
649 info = &connector->display_info;
650
651 edid_ext = find_cea_extension(edid);
652 if (!edid_ext)
653 return -EINVAL;
654
655 if (edid_ext[3] & EDID_CEA_YCRCB422)
656 info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
657
658 return 0;
659 }
660 EXPORT_SYMBOL(rockchip_drm_get_yuv422_format);
661
662 static
get_max_frl_rate(int max_frl_rate,u8 * max_lanes,u8 * max_rate_per_lane)663 void get_max_frl_rate(int max_frl_rate, u8 *max_lanes, u8 *max_rate_per_lane)
664 {
665 switch (max_frl_rate) {
666 case 1:
667 *max_lanes = 3;
668 *max_rate_per_lane = 3;
669 break;
670 case 2:
671 *max_lanes = 3;
672 *max_rate_per_lane = 6;
673 break;
674 case 3:
675 *max_lanes = 4;
676 *max_rate_per_lane = 6;
677 break;
678 case 4:
679 *max_lanes = 4;
680 *max_rate_per_lane = 8;
681 break;
682 case 5:
683 *max_lanes = 4;
684 *max_rate_per_lane = 10;
685 break;
686 case 6:
687 *max_lanes = 4;
688 *max_rate_per_lane = 12;
689 break;
690 case 0:
691 default:
692 *max_lanes = 0;
693 *max_rate_per_lane = 0;
694 }
695 }
696
697 #define EDID_DSC_10BPC (1 << 0)
698 #define EDID_DSC_12BPC (1 << 1)
699 #define EDID_DSC_16BPC (1 << 2)
700 #define EDID_DSC_ALL_BPP (1 << 3)
701 #define EDID_DSC_NATIVE_420 (1 << 6)
702 #define EDID_DSC_1P2 (1 << 7)
703 #define EDID_DSC_MAX_FRL_RATE_MASK 0xf0
704 #define EDID_DSC_MAX_SLICES 0xf
705 #define EDID_DSC_TOTAL_CHUNK_KBYTES 0x3f
706 #define EDID_MAX_FRL_RATE_MASK 0xf0
707
708 static
parse_edid_forum_vsdb(struct rockchip_drm_dsc_cap * dsc_cap,u8 * max_frl_rate_per_lane,u8 * max_lanes,u8 * add_func,const u8 * hf_vsdb)709 void parse_edid_forum_vsdb(struct rockchip_drm_dsc_cap *dsc_cap,
710 u8 *max_frl_rate_per_lane, u8 *max_lanes, u8 *add_func,
711 const u8 *hf_vsdb)
712 {
713 u8 max_frl_rate;
714 u8 dsc_max_frl_rate;
715 u8 dsc_max_slices;
716
717 if (!hf_vsdb[7])
718 return;
719
720 DRM_DEBUG_KMS("hdmi_21 sink detected. parsing edid\n");
721 max_frl_rate = (hf_vsdb[7] & EDID_MAX_FRL_RATE_MASK) >> 4;
722 get_max_frl_rate(max_frl_rate, max_lanes,
723 max_frl_rate_per_lane);
724
725 *add_func = hf_vsdb[8];
726
727 if (cea_db_payload_len(hf_vsdb) < 13)
728 return;
729
730 dsc_cap->v_1p2 = hf_vsdb[11] & EDID_DSC_1P2;
731
732 if (!dsc_cap->v_1p2)
733 return;
734
735 dsc_cap->native_420 = hf_vsdb[11] & EDID_DSC_NATIVE_420;
736 dsc_cap->all_bpp = hf_vsdb[11] & EDID_DSC_ALL_BPP;
737
738 if (hf_vsdb[11] & EDID_DSC_16BPC)
739 dsc_cap->bpc_supported = 16;
740 else if (hf_vsdb[11] & EDID_DSC_12BPC)
741 dsc_cap->bpc_supported = 12;
742 else if (hf_vsdb[11] & EDID_DSC_10BPC)
743 dsc_cap->bpc_supported = 10;
744 else
745 dsc_cap->bpc_supported = 0;
746
747 dsc_max_frl_rate = (hf_vsdb[12] & EDID_DSC_MAX_FRL_RATE_MASK) >> 4;
748 get_max_frl_rate(dsc_max_frl_rate, &dsc_cap->max_lanes,
749 &dsc_cap->max_frl_rate_per_lane);
750 dsc_cap->total_chunk_kbytes = hf_vsdb[13] & EDID_DSC_TOTAL_CHUNK_KBYTES;
751
752 dsc_max_slices = hf_vsdb[12] & EDID_DSC_MAX_SLICES;
753 switch (dsc_max_slices) {
754 case 1:
755 dsc_cap->max_slices = 1;
756 dsc_cap->clk_per_slice = 340;
757 break;
758 case 2:
759 dsc_cap->max_slices = 2;
760 dsc_cap->clk_per_slice = 340;
761 break;
762 case 3:
763 dsc_cap->max_slices = 4;
764 dsc_cap->clk_per_slice = 340;
765 break;
766 case 4:
767 dsc_cap->max_slices = 8;
768 dsc_cap->clk_per_slice = 340;
769 break;
770 case 5:
771 dsc_cap->max_slices = 8;
772 dsc_cap->clk_per_slice = 400;
773 break;
774 case 6:
775 dsc_cap->max_slices = 12;
776 dsc_cap->clk_per_slice = 400;
777 break;
778 case 7:
779 dsc_cap->max_slices = 16;
780 dsc_cap->clk_per_slice = 400;
781 break;
782 case 0:
783 default:
784 dsc_cap->max_slices = 0;
785 dsc_cap->clk_per_slice = 0;
786 }
787 }
788
789 enum {
790 VER_26_BYTE_V0,
791 VER_15_BYTE_V1,
792 VER_12_BYTE_V1,
793 VER_12_BYTE_V2,
794 };
795
check_next_hdr_version(const u8 * next_hdr_db)796 static int check_next_hdr_version(const u8 *next_hdr_db)
797 {
798 u16 ver;
799
800 ver = (next_hdr_db[5] & 0xf0) << 8 | next_hdr_db[0];
801
802 switch (ver) {
803 case 0x00f9:
804 return VER_26_BYTE_V0;
805 case 0x20ee:
806 return VER_15_BYTE_V1;
807 case 0x20eb:
808 return VER_12_BYTE_V1;
809 case 0x40eb:
810 return VER_12_BYTE_V2;
811 default:
812 return -ENOENT;
813 }
814 }
815
parse_ver_26_v0_data(struct ver_26_v0 * hdr,const u8 * data)816 static void parse_ver_26_v0_data(struct ver_26_v0 *hdr, const u8 *data)
817 {
818 hdr->yuv422_12bit = data[5] & BIT(0);
819 hdr->support_2160p_60 = (data[5] & BIT(1)) >> 1;
820 hdr->global_dimming = (data[5] & BIT(2)) >> 2;
821
822 hdr->dm_major_ver = (data[21] & 0xf0) >> 4;
823 hdr->dm_minor_ver = data[21] & 0xf;
824
825 hdr->t_min_pq = (data[19] << 4) | ((data[18] & 0xf0) >> 4);
826 hdr->t_max_pq = (data[20] << 4) | (data[18] & 0xf);
827
828 hdr->rx = (data[7] << 4) | ((data[6] & 0xf0) >> 4);
829 hdr->ry = (data[8] << 4) | (data[6] & 0xf);
830 hdr->gx = (data[10] << 4) | ((data[9] & 0xf0) >> 4);
831 hdr->gy = (data[11] << 4) | (data[9] & 0xf);
832 hdr->bx = (data[13] << 4) | ((data[12] & 0xf0) >> 4);
833 hdr->by = (data[14] << 4) | (data[12] & 0xf);
834 hdr->wx = (data[16] << 4) | ((data[15] & 0xf0) >> 4);
835 hdr->wy = (data[17] << 4) | (data[15] & 0xf);
836 }
837
parse_ver_15_v1_data(struct ver_15_v1 * hdr,const u8 * data)838 static void parse_ver_15_v1_data(struct ver_15_v1 *hdr, const u8 *data)
839 {
840 hdr->yuv422_12bit = data[5] & BIT(0);
841 hdr->support_2160p_60 = (data[5] & BIT(1)) >> 1;
842 hdr->global_dimming = data[6] & BIT(0);
843
844 hdr->dm_version = (data[5] & 0x1c) >> 2;
845
846 hdr->colorimetry = data[7] & BIT(0);
847
848 hdr->t_max_lum = (data[6] & 0xfe) >> 1;
849 hdr->t_min_lum = (data[7] & 0xfe) >> 1;
850
851 hdr->rx = data[9];
852 hdr->ry = data[10];
853 hdr->gx = data[11];
854 hdr->gy = data[12];
855 hdr->bx = data[13];
856 hdr->by = data[14];
857 }
858
parse_ver_12_v1_data(struct ver_12_v1 * hdr,const u8 * data)859 static void parse_ver_12_v1_data(struct ver_12_v1 *hdr, const u8 *data)
860 {
861 hdr->yuv422_12bit = data[5] & BIT(0);
862 hdr->support_2160p_60 = (data[5] & BIT(1)) >> 1;
863 hdr->global_dimming = data[6] & BIT(0);
864
865 hdr->dm_version = (data[5] & 0x1c) >> 2;
866
867 hdr->colorimetry = data[7] & BIT(0);
868
869 hdr->t_max_lum = (data[6] & 0xfe) >> 1;
870 hdr->t_min_lum = (data[7] & 0xfe) >> 1;
871
872 hdr->low_latency = data[8] & 0x3;
873
874 hdr->unique_rx = (data[11] & 0xf8) >> 3;
875 hdr->unique_ry = (data[11] & 0x7) << 2 | (data[10] & BIT(0)) << 1 |
876 (data[9] & BIT(0));
877 hdr->unique_gx = (data[9] & 0xfe) >> 1;
878 hdr->unique_gy = (data[10] & 0xfe) >> 1;
879 hdr->unique_bx = (data[8] & 0xe0) >> 5;
880 hdr->unique_by = (data[8] & 0x1c) >> 2;
881 }
882
parse_ver_12_v2_data(struct ver_12_v2 * hdr,const u8 * data)883 static void parse_ver_12_v2_data(struct ver_12_v2 *hdr, const u8 *data)
884 {
885 hdr->yuv422_12bit = data[5] & BIT(0);
886 hdr->backlt_ctrl = (data[5] & BIT(1)) >> 1;
887 hdr->global_dimming = (data[6] & BIT(2)) >> 2;
888
889 hdr->dm_version = (data[5] & 0x1c) >> 2;
890 hdr->backlt_min_luma = data[6] & 0x3;
891 hdr->interface = data[7] & 0x3;
892 hdr->yuv444_10b_12b = (data[8] & BIT(0)) << 1 | (data[9] & BIT(0));
893
894 hdr->t_min_pq_v2 = (data[6] & 0xf8) >> 3;
895 hdr->t_max_pq_v2 = (data[7] & 0xf8) >> 3;
896
897 hdr->unique_rx = (data[10] & 0xf8) >> 3;
898 hdr->unique_ry = (data[11] & 0xf8) >> 3;
899 hdr->unique_gx = (data[8] & 0xfe) >> 1;
900 hdr->unique_gy = (data[9] & 0xfe) >> 1;
901 hdr->unique_bx = data[10] & 0x7;
902 hdr->unique_by = data[11] & 0x7;
903 }
904
905 static
parse_next_hdr_block(struct next_hdr_sink_data * sink_data,const u8 * next_hdr_db)906 void parse_next_hdr_block(struct next_hdr_sink_data *sink_data,
907 const u8 *next_hdr_db)
908 {
909 int version;
910
911 version = check_next_hdr_version(next_hdr_db);
912 if (version < 0)
913 return;
914
915 sink_data->version = version;
916
917 switch (version) {
918 case VER_26_BYTE_V0:
919 parse_ver_26_v0_data(&sink_data->ver_26_v0, next_hdr_db);
920 break;
921 case VER_15_BYTE_V1:
922 parse_ver_15_v1_data(&sink_data->ver_15_v1, next_hdr_db);
923 break;
924 case VER_12_BYTE_V1:
925 parse_ver_12_v1_data(&sink_data->ver_12_v1, next_hdr_db);
926 break;
927 case VER_12_BYTE_V2:
928 parse_ver_12_v2_data(&sink_data->ver_12_v2, next_hdr_db);
929 break;
930 default:
931 break;
932 }
933 }
934
rockchip_drm_parse_cea_ext(struct rockchip_drm_dsc_cap * dsc_cap,u8 * max_frl_rate_per_lane,u8 * max_lanes,u8 * add_func,const struct edid * edid)935 int rockchip_drm_parse_cea_ext(struct rockchip_drm_dsc_cap *dsc_cap,
936 u8 *max_frl_rate_per_lane, u8 *max_lanes, u8 *add_func,
937 const struct edid *edid)
938 {
939 const u8 *edid_ext;
940 int i, start, end;
941
942 if (!dsc_cap || !max_frl_rate_per_lane || !max_lanes || !edid || !add_func)
943 return -EINVAL;
944
945 edid_ext = find_cea_extension(edid);
946 if (!edid_ext)
947 return -EINVAL;
948
949 if (cea_db_offsets(edid_ext, &start, &end))
950 return -EINVAL;
951
952 for_each_cea_db(edid_ext, i, start, end) {
953 const u8 *db = &edid_ext[i];
954
955 if (cea_db_is_hdmi_forum_vsdb(db))
956 parse_edid_forum_vsdb(dsc_cap, max_frl_rate_per_lane,
957 max_lanes, add_func, db);
958 }
959
960 return 0;
961 }
962 EXPORT_SYMBOL(rockchip_drm_parse_cea_ext);
963
rockchip_drm_parse_next_hdr(struct next_hdr_sink_data * sink_data,const struct edid * edid)964 int rockchip_drm_parse_next_hdr(struct next_hdr_sink_data *sink_data,
965 const struct edid *edid)
966 {
967 const u8 *edid_ext;
968 int i, start, end;
969
970 if (!sink_data || !edid)
971 return -EINVAL;
972
973 memset(sink_data, 0, sizeof(struct next_hdr_sink_data));
974
975 edid_ext = find_cea_extension(edid);
976 if (!edid_ext)
977 return -EINVAL;
978
979 if (cea_db_offsets(edid_ext, &start, &end))
980 return -EINVAL;
981
982 for_each_cea_db(edid_ext, i, start, end) {
983 const u8 *db = &edid_ext[i];
984
985 if (cea_db_is_hdmi_next_hdr_block(db))
986 parse_next_hdr_block(sink_data, db);
987 }
988
989 return 0;
990 }
991 EXPORT_SYMBOL(rockchip_drm_parse_next_hdr);
992
993 #define COLORIMETRY_DATA_BLOCK 0x5
994 #define USE_EXTENDED_TAG 0x07
995
cea_db_is_hdmi_colorimetry_data_block(const u8 * db)996 static bool cea_db_is_hdmi_colorimetry_data_block(const u8 *db)
997 {
998 if (cea_db_tag(db) != USE_EXTENDED_TAG)
999 return false;
1000
1001 if (db[1] != COLORIMETRY_DATA_BLOCK)
1002 return false;
1003
1004 return true;
1005 }
1006
1007 int
rockchip_drm_parse_colorimetry_data_block(u8 * colorimetry,const struct edid * edid)1008 rockchip_drm_parse_colorimetry_data_block(u8 *colorimetry, const struct edid *edid)
1009 {
1010 const u8 *edid_ext;
1011 int i, start, end;
1012
1013 if (!colorimetry || !edid)
1014 return -EINVAL;
1015
1016 *colorimetry = 0;
1017
1018 edid_ext = find_cea_extension(edid);
1019 if (!edid_ext)
1020 return -EINVAL;
1021
1022 if (cea_db_offsets(edid_ext, &start, &end))
1023 return -EINVAL;
1024
1025 for_each_cea_db(edid_ext, i, start, end) {
1026 const u8 *db = &edid_ext[i];
1027
1028 if (cea_db_is_hdmi_colorimetry_data_block(db))
1029 /* As per CEA 861-G spec */
1030 *colorimetry = ((db[3] & (0x1 << 7)) << 1) | db[2];
1031 }
1032
1033 return 0;
1034 }
1035 EXPORT_SYMBOL(rockchip_drm_parse_colorimetry_data_block);
1036
1037 /*
1038 * Attach a (component) device to the shared drm dma mapping from master drm
1039 * device. This is used by the VOPs to map GEM buffers to a common DMA
1040 * mapping.
1041 */
rockchip_drm_dma_attach_device(struct drm_device * drm_dev,struct device * dev)1042 int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
1043 struct device *dev)
1044 {
1045 struct rockchip_drm_private *private = drm_dev->dev_private;
1046 int ret;
1047
1048 if (!is_support_iommu)
1049 return 0;
1050
1051 ret = iommu_attach_device(private->domain, dev);
1052 if (ret) {
1053 DRM_DEV_ERROR(dev, "Failed to attach iommu device\n");
1054 return ret;
1055 }
1056
1057 return 0;
1058 }
1059
rockchip_drm_dma_detach_device(struct drm_device * drm_dev,struct device * dev)1060 void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
1061 struct device *dev)
1062 {
1063 struct rockchip_drm_private *private = drm_dev->dev_private;
1064 struct iommu_domain *domain = private->domain;
1065
1066 if (!is_support_iommu)
1067 return;
1068
1069 iommu_detach_device(domain, dev);
1070 }
1071
rockchip_drm_crtc_standby(struct drm_crtc * crtc,bool standby)1072 void rockchip_drm_crtc_standby(struct drm_crtc *crtc, bool standby)
1073 {
1074 struct rockchip_drm_private *priv = crtc->dev->dev_private;
1075 int pipe = drm_crtc_index(crtc);
1076
1077 if (pipe < ROCKCHIP_MAX_CRTC &&
1078 priv->crtc_funcs[pipe] &&
1079 priv->crtc_funcs[pipe]->crtc_standby)
1080 priv->crtc_funcs[pipe]->crtc_standby(crtc, standby);
1081 }
1082
rockchip_register_crtc_funcs(struct drm_crtc * crtc,const struct rockchip_crtc_funcs * crtc_funcs)1083 int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
1084 const struct rockchip_crtc_funcs *crtc_funcs)
1085 {
1086 int pipe = drm_crtc_index(crtc);
1087 struct rockchip_drm_private *priv = crtc->dev->dev_private;
1088
1089 if (pipe >= ROCKCHIP_MAX_CRTC)
1090 return -EINVAL;
1091
1092 priv->crtc_funcs[pipe] = crtc_funcs;
1093
1094 return 0;
1095 }
1096
rockchip_unregister_crtc_funcs(struct drm_crtc * crtc)1097 void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc)
1098 {
1099 int pipe = drm_crtc_index(crtc);
1100 struct rockchip_drm_private *priv = crtc->dev->dev_private;
1101
1102 if (pipe >= ROCKCHIP_MAX_CRTC)
1103 return;
1104
1105 priv->crtc_funcs[pipe] = NULL;
1106 }
1107
rockchip_drm_fault_handler(struct iommu_domain * iommu,struct device * dev,unsigned long iova,int flags,void * arg)1108 static int rockchip_drm_fault_handler(struct iommu_domain *iommu,
1109 struct device *dev,
1110 unsigned long iova, int flags, void *arg)
1111 {
1112 struct drm_device *drm_dev = arg;
1113 struct rockchip_drm_private *priv = drm_dev->dev_private;
1114 struct drm_crtc *crtc;
1115
1116 DRM_ERROR("iommu fault handler flags: 0x%x\n", flags);
1117 drm_for_each_crtc(crtc, drm_dev) {
1118 int pipe = drm_crtc_index(crtc);
1119
1120 if (priv->crtc_funcs[pipe] &&
1121 priv->crtc_funcs[pipe]->regs_dump)
1122 priv->crtc_funcs[pipe]->regs_dump(crtc, NULL);
1123
1124 if (priv->crtc_funcs[pipe] &&
1125 priv->crtc_funcs[pipe]->debugfs_dump)
1126 priv->crtc_funcs[pipe]->debugfs_dump(crtc, NULL);
1127 }
1128
1129 return 0;
1130 }
1131
rockchip_drm_init_iommu(struct drm_device * drm_dev)1132 static int rockchip_drm_init_iommu(struct drm_device *drm_dev)
1133 {
1134 struct rockchip_drm_private *private = drm_dev->dev_private;
1135 struct iommu_domain_geometry *geometry;
1136 u64 start, end;
1137 int ret = 0;
1138
1139 if (!is_support_iommu)
1140 return 0;
1141
1142 private->domain = iommu_domain_alloc(&platform_bus_type);
1143 if (!private->domain)
1144 return -ENOMEM;
1145
1146 geometry = &private->domain->geometry;
1147 start = geometry->aperture_start;
1148 end = geometry->aperture_end;
1149
1150 DRM_DEBUG("IOMMU context initialized (aperture: %#llx-%#llx)\n",
1151 start, end);
1152 drm_mm_init(&private->mm, start, end - start + 1);
1153 mutex_init(&private->mm_lock);
1154
1155 iommu_set_fault_handler(private->domain, rockchip_drm_fault_handler,
1156 drm_dev);
1157
1158 if (iommu_reserve_map) {
1159 /*
1160 * At 32 bit platform size_t maximum value is 0xffffffff, SZ_4G(0x100000000) will be
1161 * cliped to 0, so we split into two mapping
1162 */
1163 ret = iommu_map(private->domain, 0, 0, (size_t)SZ_2G,
1164 IOMMU_WRITE | IOMMU_READ | IOMMU_PRIV);
1165 if (ret)
1166 dev_err(drm_dev->dev, "failed to create 0-2G pre mapping\n");
1167
1168 ret = iommu_map(private->domain, SZ_2G, SZ_2G, (size_t)SZ_2G,
1169 IOMMU_WRITE | IOMMU_READ | IOMMU_PRIV);
1170 if (ret)
1171 dev_err(drm_dev->dev, "failed to create 2G-4G pre mapping\n");
1172 }
1173
1174 return ret;
1175 }
1176
rockchip_iommu_cleanup(struct drm_device * drm_dev)1177 static void rockchip_iommu_cleanup(struct drm_device *drm_dev)
1178 {
1179 struct rockchip_drm_private *private = drm_dev->dev_private;
1180
1181 if (!is_support_iommu)
1182 return;
1183
1184 if (iommu_reserve_map) {
1185 iommu_unmap(private->domain, 0, (size_t)SZ_2G);
1186 iommu_unmap(private->domain, SZ_2G, (size_t)SZ_2G);
1187 }
1188 drm_mm_takedown(&private->mm);
1189 iommu_domain_free(private->domain);
1190 }
1191
1192 #ifdef CONFIG_DEBUG_FS
rockchip_drm_mm_dump(struct seq_file * s,void * data)1193 static int rockchip_drm_mm_dump(struct seq_file *s, void *data)
1194 {
1195 struct drm_info_node *node = s->private;
1196 struct drm_minor *minor = node->minor;
1197 struct drm_device *drm_dev = minor->dev;
1198 struct rockchip_drm_private *priv = drm_dev->dev_private;
1199 struct drm_printer p = drm_seq_file_printer(s);
1200
1201 if (!priv->domain)
1202 return 0;
1203 mutex_lock(&priv->mm_lock);
1204 drm_mm_print(&priv->mm, &p);
1205 mutex_unlock(&priv->mm_lock);
1206
1207 return 0;
1208 }
1209
rockchip_drm_summary_show(struct seq_file * s,void * data)1210 static int rockchip_drm_summary_show(struct seq_file *s, void *data)
1211 {
1212 struct drm_info_node *node = s->private;
1213 struct drm_minor *minor = node->minor;
1214 struct drm_device *drm_dev = minor->dev;
1215 struct rockchip_drm_private *priv = drm_dev->dev_private;
1216 struct drm_crtc *crtc;
1217
1218 drm_for_each_crtc(crtc, drm_dev) {
1219 int pipe = drm_crtc_index(crtc);
1220
1221 if (priv->crtc_funcs[pipe] &&
1222 priv->crtc_funcs[pipe]->debugfs_dump)
1223 priv->crtc_funcs[pipe]->debugfs_dump(crtc, s);
1224 }
1225
1226 return 0;
1227 }
1228
rockchip_drm_regs_dump(struct seq_file * s,void * data)1229 static int rockchip_drm_regs_dump(struct seq_file *s, void *data)
1230 {
1231 struct drm_info_node *node = s->private;
1232 struct drm_minor *minor = node->minor;
1233 struct drm_device *drm_dev = minor->dev;
1234 struct rockchip_drm_private *priv = drm_dev->dev_private;
1235 struct drm_crtc *crtc;
1236
1237 drm_for_each_crtc(crtc, drm_dev) {
1238 int pipe = drm_crtc_index(crtc);
1239
1240 if (priv->crtc_funcs[pipe] &&
1241 priv->crtc_funcs[pipe]->regs_dump)
1242 priv->crtc_funcs[pipe]->regs_dump(crtc, s);
1243 }
1244
1245 return 0;
1246 }
1247
rockchip_drm_active_regs_dump(struct seq_file * s,void * data)1248 static int rockchip_drm_active_regs_dump(struct seq_file *s, void *data)
1249 {
1250 struct drm_info_node *node = s->private;
1251 struct drm_minor *minor = node->minor;
1252 struct drm_device *drm_dev = minor->dev;
1253 struct rockchip_drm_private *priv = drm_dev->dev_private;
1254 struct drm_crtc *crtc;
1255
1256 drm_for_each_crtc(crtc, drm_dev) {
1257 int pipe = drm_crtc_index(crtc);
1258
1259 if (priv->crtc_funcs[pipe] &&
1260 priv->crtc_funcs[pipe]->active_regs_dump)
1261 priv->crtc_funcs[pipe]->active_regs_dump(crtc, s);
1262 }
1263
1264 return 0;
1265 }
1266
1267 static struct drm_info_list rockchip_debugfs_files[] = {
1268 { "active_regs", rockchip_drm_active_regs_dump, 0, NULL },
1269 { "regs", rockchip_drm_regs_dump, 0, NULL },
1270 { "summary", rockchip_drm_summary_show, 0, NULL },
1271 { "mm_dump", rockchip_drm_mm_dump, 0, NULL },
1272 };
1273
rockchip_drm_debugfs_init(struct drm_minor * minor)1274 static void rockchip_drm_debugfs_init(struct drm_minor *minor)
1275 {
1276 struct drm_device *dev = minor->dev;
1277 struct rockchip_drm_private *priv = dev->dev_private;
1278 struct drm_crtc *crtc;
1279
1280 drm_debugfs_create_files(rockchip_debugfs_files,
1281 ARRAY_SIZE(rockchip_debugfs_files),
1282 minor->debugfs_root, minor);
1283
1284 drm_for_each_crtc(crtc, dev) {
1285 int pipe = drm_crtc_index(crtc);
1286
1287 if (priv->crtc_funcs[pipe] &&
1288 priv->crtc_funcs[pipe]->debugfs_init)
1289 priv->crtc_funcs[pipe]->debugfs_init(minor, crtc);
1290 }
1291 }
1292 #endif
1293
1294 static const struct drm_prop_enum_list split_area[] = {
1295 { ROCKCHIP_DRM_SPLIT_UNSET, "UNSET" },
1296 { ROCKCHIP_DRM_SPLIT_LEFT_SIDE, "LEFT" },
1297 { ROCKCHIP_DRM_SPLIT_RIGHT_SIDE, "RIGHT" },
1298 };
1299
rockchip_drm_create_properties(struct drm_device * dev)1300 static int rockchip_drm_create_properties(struct drm_device *dev)
1301 {
1302 struct drm_property *prop;
1303 struct rockchip_drm_private *private = dev->dev_private;
1304
1305 prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
1306 "EOTF", 0, 5);
1307 if (!prop)
1308 return -ENOMEM;
1309 private->eotf_prop = prop;
1310
1311 prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
1312 "COLOR_SPACE", 0, 12);
1313 if (!prop)
1314 return -ENOMEM;
1315 private->color_space_prop = prop;
1316
1317 prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
1318 "ASYNC_COMMIT", 0, 1);
1319 if (!prop)
1320 return -ENOMEM;
1321 private->async_commit_prop = prop;
1322
1323 prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
1324 "SHARE_ID", 0, UINT_MAX);
1325 if (!prop)
1326 return -ENOMEM;
1327 private->share_id_prop = prop;
1328
1329 prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_IMMUTABLE,
1330 "CONNECTOR_ID", 0, 0xf);
1331 if (!prop)
1332 return -ENOMEM;
1333 private->connector_id_prop = prop;
1334
1335 prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "SPLIT_AREA",
1336 split_area,
1337 ARRAY_SIZE(split_area));
1338 private->split_area_prop = prop;
1339
1340 prop = drm_property_create_object(dev,
1341 DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_IMMUTABLE,
1342 "SOC_ID", DRM_MODE_OBJECT_CRTC);
1343 private->soc_id_prop = prop;
1344
1345 prop = drm_property_create_object(dev,
1346 DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_IMMUTABLE,
1347 "PORT_ID", DRM_MODE_OBJECT_CRTC);
1348 private->port_id_prop = prop;
1349
1350 private->aclk_prop = drm_property_create_range(dev, 0, "ACLK", 0, UINT_MAX);
1351 private->bg_prop = drm_property_create_range(dev, 0, "BACKGROUND", 0, UINT_MAX);
1352 private->line_flag_prop = drm_property_create_range(dev, 0, "LINE_FLAG1", 0, UINT_MAX);
1353 private->cubic_lut_prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "CUBIC_LUT", 0);
1354 private->cubic_lut_size_prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE,
1355 "CUBIC_LUT_SIZE", 0, UINT_MAX);
1356
1357 return drm_mode_create_tv_properties(dev, 0, NULL);
1358 }
1359
rockchip_attach_connector_property(struct drm_device * drm)1360 static void rockchip_attach_connector_property(struct drm_device *drm)
1361 {
1362 struct drm_connector *connector;
1363 struct drm_mode_config *conf = &drm->mode_config;
1364 struct drm_connector_list_iter conn_iter;
1365
1366 mutex_lock(&drm->mode_config.mutex);
1367
1368 #define ROCKCHIP_PROP_ATTACH(prop, v) \
1369 drm_object_attach_property(&connector->base, prop, v)
1370
1371 drm_connector_list_iter_begin(drm, &conn_iter);
1372 drm_for_each_connector_iter(connector, &conn_iter) {
1373 ROCKCHIP_PROP_ATTACH(conf->tv_brightness_property, 50);
1374 ROCKCHIP_PROP_ATTACH(conf->tv_contrast_property, 50);
1375 ROCKCHIP_PROP_ATTACH(conf->tv_saturation_property, 50);
1376 ROCKCHIP_PROP_ATTACH(conf->tv_hue_property, 50);
1377 }
1378 drm_connector_list_iter_end(&conn_iter);
1379 #undef ROCKCHIP_PROP_ATTACH
1380
1381 mutex_unlock(&drm->mode_config.mutex);
1382 }
1383
rockchip_drm_set_property_default(struct drm_device * drm)1384 static void rockchip_drm_set_property_default(struct drm_device *drm)
1385 {
1386 struct drm_connector *connector;
1387 struct drm_mode_config *conf = &drm->mode_config;
1388 struct drm_atomic_state *state;
1389 int ret;
1390 struct drm_connector_list_iter conn_iter;
1391
1392 drm_modeset_lock_all(drm);
1393
1394 state = drm_atomic_helper_duplicate_state(drm, conf->acquire_ctx);
1395 if (IS_ERR(state)) {
1396 DRM_ERROR("failed to alloc atomic state\n");
1397 goto err_unlock;
1398 }
1399 state->acquire_ctx = conf->acquire_ctx;
1400
1401 drm_connector_list_iter_begin(drm, &conn_iter);
1402 drm_for_each_connector_iter(connector, &conn_iter) {
1403 struct drm_connector_state *connector_state;
1404
1405 connector_state = drm_atomic_get_connector_state(state,
1406 connector);
1407 if (IS_ERR(connector_state)) {
1408 DRM_ERROR("Connector[%d]: Failed to get state\n", connector->base.id);
1409 continue;
1410 }
1411
1412 connector_state->tv.brightness = 50;
1413 connector_state->tv.contrast = 50;
1414 connector_state->tv.saturation = 50;
1415 connector_state->tv.hue = 50;
1416 }
1417 drm_connector_list_iter_end(&conn_iter);
1418
1419 ret = drm_atomic_commit(state);
1420 WARN_ON(ret == -EDEADLK);
1421 if (ret)
1422 DRM_ERROR("Failed to update properties\n");
1423 drm_atomic_state_put(state);
1424
1425 err_unlock:
1426 drm_modeset_unlock_all(drm);
1427 }
1428
rockchip_gem_pool_init(struct drm_device * drm)1429 static int rockchip_gem_pool_init(struct drm_device *drm)
1430 {
1431 struct rockchip_drm_private *private = drm->dev_private;
1432 struct device_node *np = drm->dev->of_node;
1433 struct device_node *node;
1434 phys_addr_t start, size;
1435 struct resource res;
1436 int ret;
1437
1438 node = of_parse_phandle(np, "secure-memory-region", 0);
1439 if (!node)
1440 return -ENXIO;
1441
1442 ret = of_address_to_resource(node, 0, &res);
1443 if (ret)
1444 return ret;
1445 start = res.start;
1446 size = resource_size(&res);
1447 if (!size)
1448 return -ENOMEM;
1449
1450 private->secure_buffer_pool = gen_pool_create(PAGE_SHIFT, -1);
1451 if (!private->secure_buffer_pool)
1452 return -ENOMEM;
1453
1454 gen_pool_add(private->secure_buffer_pool, start, size, -1);
1455
1456 return 0;
1457 }
1458
rockchip_gem_pool_destroy(struct drm_device * drm)1459 static void rockchip_gem_pool_destroy(struct drm_device *drm)
1460 {
1461 struct rockchip_drm_private *private = drm->dev_private;
1462
1463 if (!private->secure_buffer_pool)
1464 return;
1465
1466 gen_pool_destroy(private->secure_buffer_pool);
1467 }
1468
rockchip_drm_bind(struct device * dev)1469 static int rockchip_drm_bind(struct device *dev)
1470 {
1471 struct drm_device *drm_dev;
1472 struct rockchip_drm_private *private;
1473 int ret;
1474
1475 drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
1476 if (IS_ERR(drm_dev))
1477 return PTR_ERR(drm_dev);
1478
1479 dev_set_drvdata(dev, drm_dev);
1480
1481 private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
1482 if (!private) {
1483 ret = -ENOMEM;
1484 goto err_free;
1485 }
1486
1487 mutex_init(&private->ovl_lock);
1488
1489 drm_dev->dev_private = private;
1490
1491 INIT_LIST_HEAD(&private->psr_list);
1492 mutex_init(&private->psr_list_lock);
1493 mutex_init(&private->commit_lock);
1494
1495 private->hdmi_pll.pll = devm_clk_get_optional(dev, "hdmi-tmds-pll");
1496 if (PTR_ERR(private->hdmi_pll.pll) == -EPROBE_DEFER) {
1497 ret = -EPROBE_DEFER;
1498 goto err_free;
1499 } else if (IS_ERR(private->hdmi_pll.pll)) {
1500 dev_err(dev, "failed to get hdmi-tmds-pll\n");
1501 ret = PTR_ERR(private->hdmi_pll.pll);
1502 goto err_free;
1503 }
1504 private->default_pll.pll = devm_clk_get_optional(dev, "default-vop-pll");
1505 if (PTR_ERR(private->default_pll.pll) == -EPROBE_DEFER) {
1506 ret = -EPROBE_DEFER;
1507 goto err_free;
1508 } else if (IS_ERR(private->default_pll.pll)) {
1509 dev_err(dev, "failed to get default vop pll\n");
1510 ret = PTR_ERR(private->default_pll.pll);
1511 goto err_free;
1512 }
1513
1514 ret = drmm_mode_config_init(drm_dev);
1515 if (ret)
1516 goto err_free;
1517
1518 rockchip_drm_mode_config_init(drm_dev);
1519 rockchip_drm_create_properties(drm_dev);
1520 /* Try to bind all sub drivers. */
1521 ret = component_bind_all(dev, drm_dev);
1522 if (ret)
1523 goto err_mode_config_cleanup;
1524
1525 rockchip_attach_connector_property(drm_dev);
1526 ret = drm_vblank_init(drm_dev, drm_dev->mode_config.num_crtc);
1527 if (ret)
1528 goto err_unbind_all;
1529
1530 drm_mode_config_reset(drm_dev);
1531 rockchip_drm_set_property_default(drm_dev);
1532
1533 /*
1534 * enable drm irq mode.
1535 * - with irq_enabled = true, we can use the vblank feature.
1536 */
1537 drm_dev->irq_enabled = true;
1538
1539 /* init kms poll for handling hpd */
1540 drm_kms_helper_poll_init(drm_dev);
1541
1542 ret = rockchip_drm_init_iommu(drm_dev);
1543 if (ret)
1544 goto err_unbind_all;
1545
1546 rockchip_gem_pool_init(drm_dev);
1547 ret = of_reserved_mem_device_init(drm_dev->dev);
1548 if (ret)
1549 DRM_DEBUG_KMS("No reserved memory region assign to drm\n");
1550
1551 rockchip_drm_show_logo(drm_dev);
1552
1553 ret = rockchip_drm_fbdev_init(drm_dev);
1554 if (ret)
1555 goto err_iommu_cleanup;
1556
1557 drm_dev->mode_config.allow_fb_modifiers = true;
1558
1559 ret = drm_dev_register(drm_dev, 0);
1560 if (ret)
1561 goto err_kms_helper_poll_fini;
1562
1563 rockchip_clk_unprotect();
1564
1565 return 0;
1566 err_kms_helper_poll_fini:
1567 rockchip_gem_pool_destroy(drm_dev);
1568 drm_kms_helper_poll_fini(drm_dev);
1569 rockchip_drm_fbdev_fini(drm_dev);
1570 err_iommu_cleanup:
1571 rockchip_iommu_cleanup(drm_dev);
1572 err_unbind_all:
1573 component_unbind_all(dev, drm_dev);
1574 err_mode_config_cleanup:
1575 drm_mode_config_cleanup(drm_dev);
1576 err_free:
1577 drm_dev->dev_private = NULL;
1578 dev_set_drvdata(dev, NULL);
1579 drm_dev_put(drm_dev);
1580 return ret;
1581 }
1582
rockchip_drm_unbind(struct device * dev)1583 static void rockchip_drm_unbind(struct device *dev)
1584 {
1585 struct drm_device *drm_dev = dev_get_drvdata(dev);
1586
1587 drm_dev_unregister(drm_dev);
1588
1589 rockchip_drm_fbdev_fini(drm_dev);
1590 rockchip_gem_pool_destroy(drm_dev);
1591 drm_kms_helper_poll_fini(drm_dev);
1592
1593 drm_atomic_helper_shutdown(drm_dev);
1594 component_unbind_all(dev, drm_dev);
1595 drm_mode_config_cleanup(drm_dev);
1596 rockchip_iommu_cleanup(drm_dev);
1597
1598 drm_dev->dev_private = NULL;
1599 dev_set_drvdata(dev, NULL);
1600 drm_dev_put(drm_dev);
1601 }
1602
rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc * crtc,struct drm_file * file_priv)1603 static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
1604 struct drm_file *file_priv)
1605 {
1606 struct rockchip_drm_private *priv = crtc->dev->dev_private;
1607 int pipe = drm_crtc_index(crtc);
1608
1609 if (pipe < ROCKCHIP_MAX_CRTC &&
1610 priv->crtc_funcs[pipe] &&
1611 priv->crtc_funcs[pipe]->cancel_pending_vblank)
1612 priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv);
1613 }
1614
rockchip_drm_open(struct drm_device * dev,struct drm_file * file)1615 static int rockchip_drm_open(struct drm_device *dev, struct drm_file *file)
1616 {
1617 struct drm_crtc *crtc;
1618
1619 drm_for_each_crtc(crtc, dev)
1620 crtc->primary->fb = NULL;
1621
1622 return 0;
1623 }
1624
rockchip_drm_postclose(struct drm_device * dev,struct drm_file * file_priv)1625 static void rockchip_drm_postclose(struct drm_device *dev,
1626 struct drm_file *file_priv)
1627 {
1628 struct drm_crtc *crtc;
1629
1630 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
1631 rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv);
1632 }
1633
rockchip_drm_lastclose(struct drm_device * dev)1634 static void rockchip_drm_lastclose(struct drm_device *dev)
1635 {
1636 struct rockchip_drm_private *priv = dev->dev_private;
1637
1638 if (!priv->logo)
1639 drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev_helper);
1640 }
1641
1642 static struct drm_pending_vblank_event *
rockchip_drm_add_vcnt_event(struct drm_crtc * crtc,union drm_wait_vblank * vblwait,struct drm_file * file_priv)1643 rockchip_drm_add_vcnt_event(struct drm_crtc *crtc, union drm_wait_vblank *vblwait,
1644 struct drm_file *file_priv)
1645 {
1646 struct drm_pending_vblank_event *e;
1647 struct drm_device *dev = crtc->dev;
1648 unsigned long flags;
1649
1650 e = kzalloc(sizeof(*e), GFP_KERNEL);
1651 if (!e)
1652 return NULL;
1653
1654 e->pipe = drm_crtc_index(crtc);
1655 e->event.base.type = DRM_EVENT_ROCKCHIP_CRTC_VCNT;
1656 e->event.base.length = sizeof(e->event.vbl);
1657 e->event.vbl.crtc_id = crtc->base.id;
1658 e->event.vbl.user_data = vblwait->request.signal;
1659
1660 spin_lock_irqsave(&dev->event_lock, flags);
1661 drm_event_reserve_init_locked(dev, file_priv, &e->base, &e->event.base);
1662 spin_unlock_irqrestore(&dev->event_lock, flags);
1663
1664 return e;
1665 }
1666
rockchip_drm_get_vcnt_event_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1667 static int rockchip_drm_get_vcnt_event_ioctl(struct drm_device *dev, void *data,
1668 struct drm_file *file_priv)
1669 {
1670 struct rockchip_drm_private *priv = dev->dev_private;
1671 union drm_wait_vblank *vblwait = data;
1672 struct drm_pending_vblank_event *e;
1673 struct drm_crtc *crtc;
1674 unsigned int flags, pipe;
1675
1676 flags = vblwait->request.type & (_DRM_VBLANK_FLAGS_MASK | _DRM_ROCKCHIP_VCNT_EVENT);
1677 pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
1678 if (pipe)
1679 pipe = pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
1680 else
1681 pipe = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
1682
1683 crtc = drm_crtc_from_index(dev, pipe);
1684
1685 if (flags & _DRM_ROCKCHIP_VCNT_EVENT) {
1686 e = rockchip_drm_add_vcnt_event(crtc, vblwait, file_priv);
1687 priv->vcnt[pipe].event = e;
1688 }
1689
1690 return 0;
1691 }
1692
1693 static const struct drm_ioctl_desc rockchip_ioctls[] = {
1694 DRM_IOCTL_DEF_DRV(ROCKCHIP_GEM_CREATE, rockchip_gem_create_ioctl,
1695 DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
1696 DRM_IOCTL_DEF_DRV(ROCKCHIP_GEM_MAP_OFFSET,
1697 rockchip_gem_map_offset_ioctl,
1698 DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
1699 DRM_IOCTL_DEF_DRV(ROCKCHIP_GEM_GET_PHYS, rockchip_gem_get_phys_ioctl,
1700 DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
1701 DRM_IOCTL_DEF_DRV(ROCKCHIP_GET_VCNT_EVENT, rockchip_drm_get_vcnt_event_ioctl,
1702 DRM_UNLOCKED),
1703 };
1704
1705 static const struct file_operations rockchip_drm_driver_fops = {
1706 .owner = THIS_MODULE,
1707 .open = drm_open,
1708 .mmap = rockchip_gem_mmap,
1709 .poll = drm_poll,
1710 .read = drm_read,
1711 .unlocked_ioctl = drm_ioctl,
1712 .compat_ioctl = drm_compat_ioctl,
1713 .release = drm_release,
1714 };
1715
rockchip_drm_gem_dmabuf_begin_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction dir)1716 static int rockchip_drm_gem_dmabuf_begin_cpu_access(struct dma_buf *dma_buf,
1717 enum dma_data_direction dir)
1718 {
1719 struct drm_gem_object *obj = dma_buf->priv;
1720
1721 return rockchip_gem_prime_begin_cpu_access(obj, dir);
1722 }
1723
rockchip_drm_gem_dmabuf_end_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction dir)1724 static int rockchip_drm_gem_dmabuf_end_cpu_access(struct dma_buf *dma_buf,
1725 enum dma_data_direction dir)
1726 {
1727 struct drm_gem_object *obj = dma_buf->priv;
1728
1729 return rockchip_gem_prime_end_cpu_access(obj, dir);
1730 }
1731
1732 static const struct dma_buf_ops rockchip_drm_gem_prime_dmabuf_ops = {
1733 .cache_sgt_mapping = true,
1734 .attach = drm_gem_map_attach,
1735 .detach = drm_gem_map_detach,
1736 .map_dma_buf = drm_gem_map_dma_buf,
1737 .unmap_dma_buf = drm_gem_unmap_dma_buf,
1738 .release = drm_gem_dmabuf_release,
1739 .mmap = drm_gem_dmabuf_mmap,
1740 .vmap = drm_gem_dmabuf_vmap,
1741 .vunmap = drm_gem_dmabuf_vunmap,
1742 .get_uuid = drm_gem_dmabuf_get_uuid,
1743 .begin_cpu_access = rockchip_drm_gem_dmabuf_begin_cpu_access,
1744 .end_cpu_access = rockchip_drm_gem_dmabuf_end_cpu_access,
1745 };
1746
rockchip_drm_gem_prime_import_dev(struct drm_device * dev,struct dma_buf * dma_buf,struct device * attach_dev)1747 static struct drm_gem_object *rockchip_drm_gem_prime_import_dev(struct drm_device *dev,
1748 struct dma_buf *dma_buf,
1749 struct device *attach_dev)
1750 {
1751 struct dma_buf_attachment *attach;
1752 struct sg_table *sgt;
1753 struct drm_gem_object *obj;
1754 int ret;
1755
1756 if (dma_buf->ops == &rockchip_drm_gem_prime_dmabuf_ops) {
1757 obj = dma_buf->priv;
1758 if (obj->dev == dev) {
1759 /*
1760 * Importing dmabuf exported from out own gem increases
1761 * refcount on gem itself instead of f_count of dmabuf.
1762 */
1763 drm_gem_object_get(obj);
1764 return obj;
1765 }
1766 }
1767
1768 if (!dev->driver->gem_prime_import_sg_table)
1769 return ERR_PTR(-EINVAL);
1770
1771 attach = dma_buf_attach(dma_buf, attach_dev);
1772 if (IS_ERR(attach))
1773 return ERR_CAST(attach);
1774
1775 get_dma_buf(dma_buf);
1776
1777 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
1778 if (IS_ERR(sgt)) {
1779 ret = PTR_ERR(sgt);
1780 goto fail_detach;
1781 }
1782
1783 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
1784 if (IS_ERR(obj)) {
1785 ret = PTR_ERR(obj);
1786 goto fail_unmap;
1787 }
1788
1789 obj->import_attach = attach;
1790 obj->resv = dma_buf->resv;
1791
1792 return obj;
1793
1794 fail_unmap:
1795 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
1796 fail_detach:
1797 dma_buf_detach(dma_buf, attach);
1798 dma_buf_put(dma_buf);
1799
1800 return ERR_PTR(ret);
1801 }
1802
rockchip_drm_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)1803 static struct drm_gem_object *rockchip_drm_gem_prime_import(struct drm_device *dev,
1804 struct dma_buf *dma_buf)
1805 {
1806 return rockchip_drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
1807 }
1808
rockchip_drm_gem_prime_export(struct drm_gem_object * obj,int flags)1809 static struct dma_buf *rockchip_drm_gem_prime_export(struct drm_gem_object *obj,
1810 int flags)
1811 {
1812 struct drm_device *dev = obj->dev;
1813 struct dma_buf_export_info exp_info = {
1814 .exp_name = KBUILD_MODNAME, /* white lie for debug */
1815 .owner = dev->driver->fops->owner,
1816 .ops = &rockchip_drm_gem_prime_dmabuf_ops,
1817 .size = obj->size,
1818 .flags = flags,
1819 .priv = obj,
1820 .resv = obj->resv,
1821 };
1822
1823 return drm_gem_dmabuf_export(dev, &exp_info);
1824 }
1825
1826 static struct drm_driver rockchip_drm_driver = {
1827 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_RENDER,
1828 .postclose = rockchip_drm_postclose,
1829 .lastclose = rockchip_drm_lastclose,
1830 .open = rockchip_drm_open,
1831 .gem_vm_ops = &drm_gem_cma_vm_ops,
1832 .gem_free_object_unlocked = rockchip_gem_free_object,
1833 .dumb_create = rockchip_gem_dumb_create,
1834 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1835 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1836 .gem_prime_import = rockchip_drm_gem_prime_import,
1837 .gem_prime_export = rockchip_drm_gem_prime_export,
1838 .gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table,
1839 .gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table,
1840 .gem_prime_vmap = rockchip_gem_prime_vmap,
1841 .gem_prime_vunmap = rockchip_gem_prime_vunmap,
1842 .gem_prime_mmap = rockchip_gem_mmap_buf,
1843 #ifdef CONFIG_DEBUG_FS
1844 .debugfs_init = rockchip_drm_debugfs_init,
1845 #endif
1846 .ioctls = rockchip_ioctls,
1847 .num_ioctls = ARRAY_SIZE(rockchip_ioctls),
1848 .fops = &rockchip_drm_driver_fops,
1849 .name = DRIVER_NAME,
1850 .desc = DRIVER_DESC,
1851 .date = DRIVER_DATE,
1852 .major = DRIVER_MAJOR,
1853 .minor = DRIVER_MINOR,
1854 };
1855
1856 #ifdef CONFIG_PM_SLEEP
rockchip_drm_sys_suspend(struct device * dev)1857 static int rockchip_drm_sys_suspend(struct device *dev)
1858 {
1859 struct drm_device *drm = dev_get_drvdata(dev);
1860
1861 return drm_mode_config_helper_suspend(drm);
1862 }
1863
rockchip_drm_sys_resume(struct device * dev)1864 static int rockchip_drm_sys_resume(struct device *dev)
1865 {
1866 struct drm_device *drm = dev_get_drvdata(dev);
1867
1868 return drm_mode_config_helper_resume(drm);
1869 }
1870 #endif
1871
1872 static const struct dev_pm_ops rockchip_drm_pm_ops = {
1873 SET_SYSTEM_SLEEP_PM_OPS(rockchip_drm_sys_suspend,
1874 rockchip_drm_sys_resume)
1875 };
1876
1877 #define MAX_ROCKCHIP_SUB_DRIVERS 16
1878 static struct platform_driver *rockchip_sub_drivers[MAX_ROCKCHIP_SUB_DRIVERS];
1879 static int num_rockchip_sub_drivers;
1880
1881 /*
1882 * Check if a vop endpoint is leading to a rockchip subdriver or bridge.
1883 * Should be called from the component bind stage of the drivers
1884 * to ensure that all subdrivers are probed.
1885 *
1886 * @ep: endpoint of a rockchip vop
1887 *
1888 * returns true if subdriver, false if external bridge and -ENODEV
1889 * if remote port does not contain a device.
1890 */
rockchip_drm_endpoint_is_subdriver(struct device_node * ep)1891 int rockchip_drm_endpoint_is_subdriver(struct device_node *ep)
1892 {
1893 struct device_node *node = of_graph_get_remote_port_parent(ep);
1894 struct platform_device *pdev;
1895 struct device_driver *drv;
1896 int i;
1897
1898 if (!node)
1899 return -ENODEV;
1900
1901 /* status disabled will prevent creation of platform-devices */
1902 pdev = of_find_device_by_node(node);
1903 of_node_put(node);
1904 if (!pdev)
1905 return -ENODEV;
1906
1907 /*
1908 * All rockchip subdrivers have probed at this point, so
1909 * any device not having a driver now is an external bridge.
1910 */
1911 drv = pdev->dev.driver;
1912 if (!drv) {
1913 platform_device_put(pdev);
1914 return false;
1915 }
1916
1917 for (i = 0; i < num_rockchip_sub_drivers; i++) {
1918 if (rockchip_sub_drivers[i] == to_platform_driver(drv)) {
1919 platform_device_put(pdev);
1920 return true;
1921 }
1922 }
1923
1924 platform_device_put(pdev);
1925 return false;
1926 }
1927
compare_dev(struct device * dev,void * data)1928 static int compare_dev(struct device *dev, void *data)
1929 {
1930 return dev == (struct device *)data;
1931 }
1932
rockchip_drm_match_remove(struct device * dev)1933 static void rockchip_drm_match_remove(struct device *dev)
1934 {
1935 struct device_link *link;
1936
1937 list_for_each_entry(link, &dev->links.consumers, s_node)
1938 device_link_del(link);
1939 }
1940
rockchip_drm_match_add(struct device * dev)1941 static struct component_match *rockchip_drm_match_add(struct device *dev)
1942 {
1943 struct component_match *match = NULL;
1944 int i;
1945
1946 for (i = 0; i < num_rockchip_sub_drivers; i++) {
1947 struct platform_driver *drv = rockchip_sub_drivers[i];
1948 struct device *p = NULL, *d;
1949
1950 do {
1951 d = platform_find_device_by_driver(p, &drv->driver);
1952 put_device(p);
1953 p = d;
1954
1955 if (!d)
1956 break;
1957
1958 device_link_add(dev, d, DL_FLAG_STATELESS);
1959 component_match_add(dev, &match, compare_dev, d);
1960 } while (true);
1961 }
1962
1963 if (IS_ERR(match))
1964 rockchip_drm_match_remove(dev);
1965
1966 return match ?: ERR_PTR(-ENODEV);
1967 }
1968
1969 static const struct component_master_ops rockchip_drm_ops = {
1970 .bind = rockchip_drm_bind,
1971 .unbind = rockchip_drm_unbind,
1972 };
1973
rockchip_drm_platform_of_probe(struct device * dev)1974 static int rockchip_drm_platform_of_probe(struct device *dev)
1975 {
1976 struct device_node *np = dev->of_node;
1977 struct device_node *port;
1978 bool found = false;
1979 int i;
1980
1981 if (!np)
1982 return -ENODEV;
1983
1984 for (i = 0;; i++) {
1985 struct device_node *iommu;
1986
1987 port = of_parse_phandle(np, "ports", i);
1988 if (!port)
1989 break;
1990
1991 if (!of_device_is_available(port->parent)) {
1992 of_node_put(port);
1993 continue;
1994 }
1995
1996 iommu = of_parse_phandle(port->parent, "iommus", 0);
1997 if (!iommu || !of_device_is_available(iommu)) {
1998 DRM_DEV_DEBUG(dev,
1999 "no iommu attached for %pOF, using non-iommu buffers\n",
2000 port->parent);
2001 /*
2002 * if there is a crtc not support iommu, force set all
2003 * crtc use non-iommu buffer.
2004 */
2005 is_support_iommu = false;
2006 }
2007
2008 found = true;
2009
2010 iommu_reserve_map |= of_property_read_bool(iommu, "rockchip,reserve-map");
2011 of_node_put(iommu);
2012 of_node_put(port);
2013 }
2014
2015 if (i == 0) {
2016 DRM_DEV_ERROR(dev, "missing 'ports' property\n");
2017 return -ENODEV;
2018 }
2019
2020 if (!found) {
2021 DRM_DEV_ERROR(dev,
2022 "No available vop found for display-subsystem.\n");
2023 return -ENODEV;
2024 }
2025
2026 return 0;
2027 }
2028
rockchip_drm_platform_probe(struct platform_device * pdev)2029 static int rockchip_drm_platform_probe(struct platform_device *pdev)
2030 {
2031 struct device *dev = &pdev->dev;
2032 struct component_match *match = NULL;
2033 int ret;
2034
2035 ret = rockchip_drm_platform_of_probe(dev);
2036 #if !IS_ENABLED(CONFIG_DRM_ROCKCHIP_VVOP)
2037 if (ret)
2038 return ret;
2039 #endif
2040
2041 match = rockchip_drm_match_add(dev);
2042 if (IS_ERR(match))
2043 return PTR_ERR(match);
2044
2045 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
2046 if (ret)
2047 goto err;
2048
2049 ret = component_master_add_with_match(dev, &rockchip_drm_ops, match);
2050 if (ret < 0)
2051 goto err;
2052
2053 return 0;
2054 err:
2055 rockchip_drm_match_remove(dev);
2056
2057 return ret;
2058 }
2059
rockchip_drm_platform_remove(struct platform_device * pdev)2060 static int rockchip_drm_platform_remove(struct platform_device *pdev)
2061 {
2062 component_master_del(&pdev->dev, &rockchip_drm_ops);
2063
2064 rockchip_drm_match_remove(&pdev->dev);
2065
2066 return 0;
2067 }
2068
rockchip_drm_platform_shutdown(struct platform_device * pdev)2069 static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
2070 {
2071 struct drm_device *drm = platform_get_drvdata(pdev);
2072
2073 if (drm)
2074 drm_atomic_helper_shutdown(drm);
2075 }
2076
2077 static const struct of_device_id rockchip_drm_dt_ids[] = {
2078 { .compatible = "rockchip,display-subsystem", },
2079 { /* sentinel */ },
2080 };
2081 MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
2082
2083 static struct platform_driver rockchip_drm_platform_driver = {
2084 .probe = rockchip_drm_platform_probe,
2085 .remove = rockchip_drm_platform_remove,
2086 .shutdown = rockchip_drm_platform_shutdown,
2087 .driver = {
2088 .name = "rockchip-drm",
2089 .of_match_table = rockchip_drm_dt_ids,
2090 .pm = &rockchip_drm_pm_ops,
2091 },
2092 };
2093
2094 #define ADD_ROCKCHIP_SUB_DRIVER(drv, cond) { \
2095 if (IS_ENABLED(cond) && \
2096 !WARN_ON(num_rockchip_sub_drivers >= MAX_ROCKCHIP_SUB_DRIVERS)) \
2097 rockchip_sub_drivers[num_rockchip_sub_drivers++] = &drv; \
2098 }
2099
rockchip_drm_init(void)2100 static int __init rockchip_drm_init(void)
2101 {
2102 int ret;
2103
2104 num_rockchip_sub_drivers = 0;
2105 #if IS_ENABLED(CONFIG_DRM_ROCKCHIP_VVOP)
2106 ADD_ROCKCHIP_SUB_DRIVER(vvop_platform_driver, CONFIG_DRM_ROCKCHIP_VVOP);
2107 #else
2108 ADD_ROCKCHIP_SUB_DRIVER(vop_platform_driver, CONFIG_ROCKCHIP_VOP);
2109 ADD_ROCKCHIP_SUB_DRIVER(vop2_platform_driver, CONFIG_ROCKCHIP_VOP2);
2110 ADD_ROCKCHIP_SUB_DRIVER(vconn_platform_driver, CONFIG_ROCKCHIP_VCONN);
2111 ADD_ROCKCHIP_SUB_DRIVER(rockchip_lvds_driver,
2112 CONFIG_ROCKCHIP_LVDS);
2113 ADD_ROCKCHIP_SUB_DRIVER(rockchip_dp_driver,
2114 CONFIG_ROCKCHIP_ANALOGIX_DP);
2115 ADD_ROCKCHIP_SUB_DRIVER(cdn_dp_driver, CONFIG_ROCKCHIP_CDN_DP);
2116 ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_rockchip_pltfm_driver,
2117 CONFIG_ROCKCHIP_DW_HDMI);
2118 ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_rockchip_driver,
2119 CONFIG_ROCKCHIP_DW_MIPI_DSI);
2120 ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi2_rockchip_driver,
2121 CONFIG_ROCKCHIP_DW_MIPI_DSI);
2122 ADD_ROCKCHIP_SUB_DRIVER(inno_hdmi_driver, CONFIG_ROCKCHIP_INNO_HDMI);
2123 ADD_ROCKCHIP_SUB_DRIVER(rk3066_hdmi_driver,
2124 CONFIG_ROCKCHIP_RK3066_HDMI);
2125 ADD_ROCKCHIP_SUB_DRIVER(rockchip_rgb_driver, CONFIG_ROCKCHIP_RGB);
2126 ADD_ROCKCHIP_SUB_DRIVER(rockchip_tve_driver, CONFIG_ROCKCHIP_DRM_TVE);
2127 ADD_ROCKCHIP_SUB_DRIVER(dw_dp_driver, CONFIG_ROCKCHIP_DW_DP);
2128
2129 #endif
2130 ret = platform_register_drivers(rockchip_sub_drivers,
2131 num_rockchip_sub_drivers);
2132 if (ret)
2133 return ret;
2134
2135 ret = platform_driver_register(&rockchip_drm_platform_driver);
2136 if (ret)
2137 goto err_unreg_drivers;
2138
2139 rockchip_gem_get_ddr_info();
2140
2141 return 0;
2142
2143 err_unreg_drivers:
2144 platform_unregister_drivers(rockchip_sub_drivers,
2145 num_rockchip_sub_drivers);
2146 return ret;
2147 }
2148
rockchip_drm_fini(void)2149 static void __exit rockchip_drm_fini(void)
2150 {
2151 platform_driver_unregister(&rockchip_drm_platform_driver);
2152
2153 platform_unregister_drivers(rockchip_sub_drivers,
2154 num_rockchip_sub_drivers);
2155 }
2156
2157 #ifdef CONFIG_VIDEO_REVERSE_IMAGE
2158 fs_initcall(rockchip_drm_init);
2159 #else
2160 module_init(rockchip_drm_init);
2161 #endif
2162 module_exit(rockchip_drm_fini);
2163
2164 MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>");
2165 MODULE_DESCRIPTION("ROCKCHIP DRM Driver");
2166 MODULE_LICENSE("GPL v2");
2167