1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3 * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
4 * Author: Sandy Huang <hjc@rock-chips.com>
5 */
6 #include <linux/memblock.h>
7 #include <linux/of_address.h>
8 #include <linux/of_platform.h>
9 #include <linux/clk.h>
10 #include <linux/clk-provider.h>
11 #include <linux/iommu.h>
12
13 #include <drm/drm_atomic_uapi.h>
14 #include <drm/drm_drv.h>
15 #include <drm/drm_gem_cma_helper.h>
16 #include <drm/drm_of.h>
17 #include <drm/drm_probe_helper.h>
18
19 #include "rockchip_drm_drv.h"
20 #include "rockchip_drm_fb.h"
21 #include "rockchip_drm_logo.h"
22
is_support_hotplug(uint32_t output_type)23 static bool is_support_hotplug(uint32_t output_type)
24 {
25 switch (output_type) {
26 case DRM_MODE_CONNECTOR_DVII:
27 case DRM_MODE_CONNECTOR_DVID:
28 case DRM_MODE_CONNECTOR_DVIA:
29 case DRM_MODE_CONNECTOR_DisplayPort:
30 case DRM_MODE_CONNECTOR_HDMIA:
31 case DRM_MODE_CONNECTOR_HDMIB:
32 case DRM_MODE_CONNECTOR_TV:
33 return true;
34 default:
35 return false;
36 }
37 }
38
39 static struct drm_crtc *
find_crtc_by_node(struct drm_device * drm_dev,struct device_node * node)40 find_crtc_by_node(struct drm_device *drm_dev, struct device_node *node)
41 {
42 struct device_node *np_crtc;
43 struct drm_crtc *crtc;
44
45 np_crtc = of_get_parent(node);
46 if (!np_crtc || !of_device_is_available(np_crtc))
47 return NULL;
48
49 drm_for_each_crtc(crtc, drm_dev) {
50 if (crtc->port == np_crtc)
51 return crtc;
52 }
53
54 return NULL;
55 }
56
57 static struct rockchip_drm_sub_dev *
find_sub_dev_by_node(struct drm_device * drm_dev,struct device_node * node)58 find_sub_dev_by_node(struct drm_device *drm_dev, struct device_node *node)
59 {
60 struct device_node *np_connector;
61 struct rockchip_drm_sub_dev *sub_dev;
62
63 np_connector = of_graph_get_remote_port_parent(node);
64 if (!np_connector || !of_device_is_available(np_connector))
65 return NULL;
66
67 sub_dev = rockchip_drm_get_sub_dev(np_connector);
68 if (!sub_dev)
69 return NULL;
70
71 return sub_dev;
72 }
73
74 static struct rockchip_drm_sub_dev *
find_sub_dev_by_bridge(struct drm_device * drm_dev,struct device_node * node)75 find_sub_dev_by_bridge(struct drm_device *drm_dev, struct device_node *node)
76 {
77 struct device_node *np_encoder, *np_connector = NULL;
78 struct rockchip_drm_sub_dev *sub_dev = NULL;
79 struct device_node *port, *endpoint;
80
81 np_encoder = of_graph_get_remote_port_parent(node);
82 if (!np_encoder || !of_device_is_available(np_encoder))
83 goto err_put_encoder;
84
85 port = of_graph_get_port_by_id(np_encoder, 1);
86 if (!port) {
87 dev_err(drm_dev->dev, "can't found port point!\n");
88 goto err_put_encoder;
89 }
90
91 for_each_child_of_node(port, endpoint) {
92 np_connector = of_graph_get_remote_port_parent(endpoint);
93 if (!np_connector) {
94 dev_err(drm_dev->dev,
95 "can't found connector node, please init!\n");
96 goto err_put_port;
97 }
98 if (!of_device_is_available(np_connector)) {
99 of_node_put(np_connector);
100 np_connector = NULL;
101 continue;
102 } else {
103 break;
104 }
105 }
106 if (!np_connector) {
107 dev_err(drm_dev->dev, "can't found available connector node!\n");
108 goto err_put_port;
109 }
110
111 sub_dev = rockchip_drm_get_sub_dev(np_connector);
112 if (!sub_dev)
113 goto err_put_port;
114
115 of_node_put(np_connector);
116 err_put_port:
117 of_node_put(port);
118 err_put_encoder:
119 of_node_put(np_encoder);
120
121 return sub_dev;
122 }
123
rockchip_drm_release_reserve_vm(struct drm_device * drm,struct drm_mm_node * node)124 static void rockchip_drm_release_reserve_vm(struct drm_device *drm, struct drm_mm_node *node)
125 {
126 struct rockchip_drm_private *private = drm->dev_private;
127
128 mutex_lock(&private->mm_lock);
129 if (drm_mm_node_allocated(node))
130 drm_mm_remove_node(node);
131 mutex_unlock(&private->mm_lock);
132 }
133
rockchip_drm_reserve_vm(struct drm_device * drm,struct drm_mm * mm,struct drm_mm_node * node,u64 size,u64 offset)134 static int rockchip_drm_reserve_vm(struct drm_device *drm, struct drm_mm *mm,
135 struct drm_mm_node *node, u64 size, u64 offset)
136 {
137 struct rockchip_drm_private *private = drm->dev_private;
138 int ret;
139
140 node->size = size;
141 node->start = offset;
142 node->color = 0;
143 mutex_lock(&private->mm_lock);
144 ret = drm_mm_reserve_node(mm, node);
145 mutex_unlock(&private->mm_lock);
146
147 return ret;
148 }
149
150 static unsigned long
rockchip_drm_free_reserved_area(phys_addr_t start,phys_addr_t end,int poison,const char * s)151 rockchip_drm_free_reserved_area(phys_addr_t start, phys_addr_t end, int poison, const char *s)
152 {
153 unsigned long pages = 0;
154
155 start = ALIGN_DOWN(start, PAGE_SIZE);
156 end = PAGE_ALIGN(end);
157 for (; start < end; start += PAGE_SIZE) {
158 struct page *page = phys_to_page(start);
159 void *direct_map_addr;
160
161 if (!pfn_valid(__phys_to_pfn(start)))
162 continue;
163
164 /*
165 * 'direct_map_addr' might be different from 'pos'
166 * because some architectures' virt_to_page()
167 * work with aliases. Getting the direct map
168 * address ensures that we get a _writeable_
169 * alias for the memset().
170 */
171 direct_map_addr = page_address(page);
172 /*
173 * Perform a kasan-unchecked memset() since this memory
174 * has not been initialized.
175 */
176 direct_map_addr = kasan_reset_tag(direct_map_addr);
177 if ((unsigned int)poison <= 0xFF)
178 memset(direct_map_addr, poison, PAGE_SIZE);
179
180 free_reserved_page(page);
181 pages++;
182 }
183
184 if (pages && s)
185 pr_info("Freeing %s memory: %ldK\n", s, pages << (PAGE_SHIFT - 10));
186
187 return pages;
188 }
189
rockchip_free_loader_memory(struct drm_device * drm)190 void rockchip_free_loader_memory(struct drm_device *drm)
191 {
192 struct rockchip_drm_private *private = drm->dev_private;
193 struct rockchip_logo *logo;
194
195 if (!private || !private->logo || --private->logo->count)
196 return;
197
198 logo = private->logo;
199
200 if (private->domain) {
201 u32 pg_size = 1UL << __ffs(private->domain->pgsize_bitmap);
202
203 iommu_unmap(private->domain, logo->dma_addr, ALIGN(logo->size, pg_size));
204 rockchip_drm_release_reserve_vm(drm, &logo->logo_reserved_node);
205 }
206
207 memblock_free(logo->start, logo->size);
208 rockchip_drm_free_reserved_area(logo->start, logo->start + logo->size,
209 -1, "drm_logo");
210 kfree(logo);
211 private->logo = NULL;
212 private->loader_protect = false;
213 }
214
init_loader_memory(struct drm_device * drm_dev)215 static int init_loader_memory(struct drm_device *drm_dev)
216 {
217 struct rockchip_drm_private *private = drm_dev->dev_private;
218 struct rockchip_logo *logo;
219 struct device_node *np = drm_dev->dev->of_node;
220 struct device_node *node;
221 phys_addr_t start, size;
222 u32 pg_size = PAGE_SIZE;
223 struct resource res;
224 int ret, idx;
225
226 idx = of_property_match_string(np, "memory-region-names", "drm-logo");
227 if (idx >= 0)
228 node = of_parse_phandle(np, "memory-region", idx);
229 else
230 node = of_parse_phandle(np, "logo-memory-region", 0);
231 if (!node)
232 return -ENOMEM;
233
234 ret = of_address_to_resource(node, 0, &res);
235 if (ret)
236 return ret;
237 if (private->domain)
238 pg_size = 1UL << __ffs(private->domain->pgsize_bitmap);
239 start = ALIGN_DOWN(res.start, pg_size);
240 size = resource_size(&res);
241 if (!size)
242 return -ENOMEM;
243 if (!IS_ALIGNED(res.start, PAGE_SIZE) || !IS_ALIGNED(size, PAGE_SIZE))
244 DRM_ERROR("Reserved logo memory should be aligned as:0x%lx, cureent is:start[%pad] size[%pad]\n",
245 PAGE_SIZE, &res.start, &size);
246 if (pg_size != PAGE_SIZE)
247 DRM_WARN("iommu page size[0x%x] isn't equal to OS page size[0x%lx]\n", pg_size, PAGE_SIZE);
248
249 logo = kmalloc(sizeof(*logo), GFP_KERNEL);
250 if (!logo)
251 return -ENOMEM;
252
253 logo->kvaddr = phys_to_virt(start);
254
255 if (private->domain) {
256 ret = rockchip_drm_reserve_vm(drm_dev, &private->mm, &logo->logo_reserved_node, size, start);
257 if (ret)
258 dev_err(drm_dev->dev, "failed to reserve vm for logo memory\n");
259 ret = iommu_map(private->domain, start, start, ALIGN(size, pg_size),
260 IOMMU_WRITE | IOMMU_READ);
261 if (ret) {
262 dev_err(drm_dev->dev, "failed to create 1v1 mapping\n");
263 goto err_free_logo;
264 }
265 }
266
267 logo->dma_addr = start;
268 logo->start = res.start;
269 logo->size = size;
270 logo->count = 1;
271 private->logo = logo;
272
273 idx = of_property_match_string(np, "memory-region-names", "drm-cubic-lut");
274 if (idx < 0)
275 return 0;
276
277 node = of_parse_phandle(np, "memory-region", idx);
278 if (!node)
279 return -ENOMEM;
280
281 ret = of_address_to_resource(node, 0, &res);
282 if (ret)
283 return ret;
284 start = ALIGN_DOWN(res.start, pg_size);
285 size = resource_size(&res);
286 if (!size)
287 return 0;
288 if (!IS_ALIGNED(res.start, PAGE_SIZE) || !IS_ALIGNED(size, PAGE_SIZE))
289 DRM_ERROR("Reserved drm cubic memory should be aligned as:0x%lx, cureent is:start[%pad] size[%pad]\n",
290 PAGE_SIZE, &res.start, &size);
291
292 private->cubic_lut_kvaddr = phys_to_virt(start);
293 if (private->domain) {
294 private->clut_reserved_node = kmalloc(sizeof(struct drm_mm_node), GFP_KERNEL);
295 if (!private->clut_reserved_node)
296 return -ENOMEM;
297
298 ret = rockchip_drm_reserve_vm(drm_dev, &private->mm, private->clut_reserved_node, size, start);
299 if (ret)
300 dev_err(drm_dev->dev, "failed to reserve vm for clut memory\n");
301
302 ret = iommu_map(private->domain, start, start, ALIGN(size, pg_size),
303 IOMMU_WRITE | IOMMU_READ);
304 if (ret) {
305 dev_err(drm_dev->dev, "failed to create 1v1 mapping for cubic lut\n");
306 goto err_free_clut;
307 }
308 }
309 private->cubic_lut_dma_addr = start;
310
311 return 0;
312
313 err_free_clut:
314 rockchip_drm_release_reserve_vm(drm_dev, private->clut_reserved_node);
315 kfree(private->clut_reserved_node);
316 private->clut_reserved_node = NULL;
317 err_free_logo:
318 rockchip_drm_release_reserve_vm(drm_dev, &logo->logo_reserved_node);
319 kfree(logo);
320
321 return ret;
322 }
323
324 static struct drm_framebuffer *
get_framebuffer_by_node(struct drm_device * drm_dev,struct device_node * node)325 get_framebuffer_by_node(struct drm_device *drm_dev, struct device_node *node)
326 {
327 struct rockchip_drm_private *private = drm_dev->dev_private;
328 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
329 u32 val;
330 int bpp;
331
332 if (WARN_ON(!private->logo))
333 return NULL;
334
335 if (of_property_read_u32(node, "logo,offset", &val)) {
336 dev_err(drm_dev->dev, "%s: failed to get logo,offset\n", node->full_name);
337 return NULL;
338 }
339 mode_cmd.offsets[0] = val;
340
341 if (of_property_read_u32(node, "logo,width", &val)) {
342 dev_err(drm_dev->dev, "%s: failed to get logo,width\n", node->full_name);
343 return NULL;
344 }
345 mode_cmd.width = val;
346
347 if (of_property_read_u32(node, "logo,height", &val)) {
348 dev_err(drm_dev->dev, "%s: failed to get logo,height\n", node->full_name);
349 return NULL;
350 }
351 mode_cmd.height = val;
352
353 if (of_property_read_u32(node, "logo,bpp", &val)) {
354 dev_err(drm_dev->dev, "%s: failed to get logo,bpp\n", node->full_name);
355 return NULL;
356 }
357 bpp = val;
358
359 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * bpp, 32) / 8;
360
361 switch (bpp) {
362 case 16:
363 mode_cmd.pixel_format = DRM_FORMAT_RGB565;
364 break;
365 case 24:
366 mode_cmd.pixel_format = DRM_FORMAT_RGB888;
367 break;
368 case 32:
369 mode_cmd.pixel_format = DRM_FORMAT_XRGB8888;
370 break;
371 default:
372 dev_err(drm_dev->dev, "%s: unsupported to logo bpp %d\n", node->full_name, bpp);
373 return NULL;
374 }
375
376 return rockchip_drm_logo_fb_alloc(drm_dev, &mode_cmd, private->logo);
377 }
378
of_parse_post_csc_info(struct device_node * route,struct rockchip_drm_mode_set * set)379 static void of_parse_post_csc_info(struct device_node *route, struct rockchip_drm_mode_set *set)
380 {
381 int val;
382
383 if (!of_property_read_u32(route, "post-csc,enable", &val))
384 set->csc.csc_enable = val;
385 else
386 set->csc.csc_enable = 0;
387
388 if (!set->csc.csc_enable)
389 return;
390
391 if (!of_property_read_u32(route, "post-csc,hue", &val))
392 set->csc.hue = val;
393 else
394 set->csc.hue = 256;
395
396 if (!of_property_read_u32(route, "post-csc,saturation", &val))
397 set->csc.saturation = val;
398 else
399 set->csc.saturation = 256;
400
401 if (!of_property_read_u32(route, "post-csc,contrast", &val))
402 set->csc.contrast = val;
403 else
404 set->csc.contrast = 256;
405
406 if (!of_property_read_u32(route, "post-csc,brightness", &val))
407 set->csc.brightness = val;
408 else
409 set->csc.brightness = 256;
410
411 if (!of_property_read_u32(route, "post-csc,r-gain", &val))
412 set->csc.r_gain = val;
413 else
414 set->csc.r_gain = 256;
415
416 if (!of_property_read_u32(route, "post-csc,g-gain", &val))
417 set->csc.g_gain = val;
418 else
419 set->csc.g_gain = 256;
420
421 if (!of_property_read_u32(route, "post-csc,b-gain", &val))
422 set->csc.b_gain = val;
423 else
424 set->csc.b_gain = 256;
425
426 if (!of_property_read_u32(route, "post-csc,r-offset", &val))
427 set->csc.r_offset = val;
428 else
429 set->csc.r_offset = 256;
430
431 if (!of_property_read_u32(route, "post-csc,g-offset", &val))
432 set->csc.g_offset = val;
433 else
434 set->csc.g_offset = 256;
435
436 if (!of_property_read_u32(route, "post-csc,b-offset", &val))
437 set->csc.b_offset = val;
438 else
439 set->csc.b_offset = 256;
440 }
441
442 static struct rockchip_drm_mode_set *
of_parse_display_resource(struct drm_device * drm_dev,struct device_node * route)443 of_parse_display_resource(struct drm_device *drm_dev, struct device_node *route)
444 {
445 struct rockchip_drm_private *private = drm_dev->dev_private;
446 struct rockchip_drm_mode_set *set;
447 struct device_node *connect;
448 struct drm_framebuffer *fb;
449 struct rockchip_drm_sub_dev *sub_dev;
450 struct drm_crtc *crtc;
451 const char *string;
452 u32 val;
453
454 connect = of_parse_phandle(route, "connect", 0);
455 if (!connect)
456 return NULL;
457
458 fb = get_framebuffer_by_node(drm_dev, route);
459 if (IS_ERR_OR_NULL(fb))
460 return NULL;
461
462 crtc = find_crtc_by_node(drm_dev, connect);
463
464 sub_dev = find_sub_dev_by_node(drm_dev, connect);
465
466 if (!sub_dev)
467 sub_dev = find_sub_dev_by_bridge(drm_dev, connect);
468
469 if (!crtc || !sub_dev) {
470 dev_warn(drm_dev->dev,
471 "No available crtc or connector for display");
472 drm_framebuffer_put(fb);
473 return NULL;
474 }
475
476 set = kzalloc(sizeof(*set), GFP_KERNEL);
477 if (!set)
478 return NULL;
479
480 if (!of_property_read_u32(route, "video,clock", &val))
481 set->clock = val;
482
483 if (!of_property_read_u32(route, "video,hdisplay", &val))
484 set->hdisplay = val;
485
486 if (!of_property_read_u32(route, "video,vdisplay", &val))
487 set->vdisplay = val;
488
489 if (!of_property_read_u32(route, "video,crtc_hsync_end", &val))
490 set->crtc_hsync_end = val;
491
492 if (!of_property_read_u32(route, "video,crtc_vsync_end", &val))
493 set->crtc_vsync_end = val;
494
495 if (!of_property_read_u32(route, "video,vrefresh", &val))
496 set->vrefresh = val;
497
498 if (!of_property_read_u32(route, "video,flags", &val))
499 set->flags = val;
500
501 if (!of_property_read_u32(route, "video,aspect_ratio", &val))
502 set->picture_aspect_ratio = val;
503
504 if (!of_property_read_u32(route, "overscan,left_margin", &val))
505 set->left_margin = val;
506
507 if (!of_property_read_u32(route, "overscan,right_margin", &val))
508 set->right_margin = val;
509
510 if (!of_property_read_u32(route, "overscan,top_margin", &val))
511 set->top_margin = val;
512
513 if (!of_property_read_u32(route, "overscan,bottom_margin", &val))
514 set->bottom_margin = val;
515
516 if (!of_property_read_u32(route, "bcsh,brightness", &val))
517 set->brightness = val;
518 else
519 set->brightness = 50;
520
521 if (!of_property_read_u32(route, "bcsh,contrast", &val))
522 set->contrast = val;
523 else
524 set->contrast = 50;
525
526 if (!of_property_read_u32(route, "bcsh,saturation", &val))
527 set->saturation = val;
528 else
529 set->saturation = 50;
530
531 if (!of_property_read_u32(route, "bcsh,hue", &val))
532 set->hue = val;
533 else
534 set->hue = 50;
535
536 of_parse_post_csc_info(route, set);
537
538 set->force_output = of_property_read_bool(route, "force-output");
539
540 if (!of_property_read_u32(route, "cubic_lut,offset", &val)) {
541 private->cubic_lut[crtc->index].enable = true;
542 private->cubic_lut[crtc->index].offset = val;
543 }
544
545 set->ratio = 1;
546 if (!of_property_read_string(route, "logo,mode", &string) &&
547 !strcmp(string, "fullscreen"))
548 set->ratio = 0;
549
550 set->fb = fb;
551 set->crtc = crtc;
552 set->sub_dev = sub_dev;
553
554 return set;
555 }
556
rockchip_drm_fill_connector_modes(struct drm_connector * connector,uint32_t maxX,uint32_t maxY,bool force_output)557 static int rockchip_drm_fill_connector_modes(struct drm_connector *connector,
558 uint32_t maxX, uint32_t maxY,
559 bool force_output)
560 {
561 struct drm_device *dev = connector->dev;
562 struct drm_display_mode *mode;
563 const struct drm_connector_helper_funcs *connector_funcs =
564 connector->helper_private;
565 int count = 0;
566 bool verbose_prune = true;
567 enum drm_connector_status old_status;
568
569 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
570
571 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
572 connector->name);
573 /* set all modes to the unverified state */
574 list_for_each_entry(mode, &connector->modes, head)
575 mode->status = MODE_STALE;
576
577 if (force_output)
578 connector->force = DRM_FORCE_ON;
579 if (connector->force) {
580 if (connector->force == DRM_FORCE_ON ||
581 connector->force == DRM_FORCE_ON_DIGITAL)
582 connector->status = connector_status_connected;
583 else
584 connector->status = connector_status_disconnected;
585 if (connector->funcs->force)
586 connector->funcs->force(connector);
587 } else {
588 old_status = connector->status;
589
590 if (connector->funcs->detect)
591 connector->status = connector->funcs->detect(connector, true);
592 else
593 connector->status = connector_status_connected;
594 /*
595 * Normally either the driver's hpd code or the poll loop should
596 * pick up any changes and fire the hotplug event. But if
597 * userspace sneaks in a probe, we might miss a change. Hence
598 * check here, and if anything changed start the hotplug code.
599 */
600 if (old_status != connector->status) {
601 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
602 connector->base.id,
603 connector->name,
604 old_status, connector->status);
605
606 /*
607 * The hotplug event code might call into the fb
608 * helpers, and so expects that we do not hold any
609 * locks. Fire up the poll struct instead, it will
610 * disable itself again.
611 */
612 dev->mode_config.delayed_event = true;
613 if (dev->mode_config.poll_enabled)
614 schedule_delayed_work(&dev->mode_config.output_poll_work,
615 0);
616 }
617 }
618
619 /* Re-enable polling in case the global poll config changed. */
620 if (!dev->mode_config.poll_running)
621 drm_kms_helper_poll_enable(dev);
622
623 dev->mode_config.poll_running = true;
624
625 if (connector->status == connector_status_disconnected) {
626 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
627 connector->base.id, connector->name);
628 drm_connector_update_edid_property(connector, NULL);
629 verbose_prune = false;
630 goto prune;
631 }
632
633 if (!force_output)
634 count = (*connector_funcs->get_modes)(connector);
635
636 if (count == 0 && connector->status == connector_status_connected)
637 count = drm_add_modes_noedid(connector, 4096, 4096);
638 if (force_output)
639 count += rockchip_drm_add_modes_noedid(connector);
640 if (count == 0)
641 goto prune;
642
643 drm_connector_list_update(connector);
644
645 list_for_each_entry(mode, &connector->modes, head) {
646 if (mode->status == MODE_OK)
647 mode->status = drm_mode_validate_driver(dev, mode);
648
649 if (mode->status == MODE_OK)
650 mode->status = drm_mode_validate_size(mode, maxX, maxY);
651
652 /**
653 * if (mode->status == MODE_OK)
654 * mode->status = drm_mode_validate_flag(mode, mode_flags);
655 */
656 if (mode->status == MODE_OK && connector_funcs->mode_valid)
657 mode->status = connector_funcs->mode_valid(connector,
658 mode);
659 if (mode->status == MODE_OK)
660 mode->status = drm_mode_validate_ycbcr420(mode,
661 connector);
662 }
663
664 prune:
665 drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
666
667 if (list_empty(&connector->modes))
668 return 0;
669
670 drm_mode_sort(&connector->modes);
671
672 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
673 connector->name);
674 list_for_each_entry(mode, &connector->modes, head) {
675 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
676 drm_mode_debug_printmodeline(mode);
677 }
678
679 return count;
680 }
681
682 /*
683 * For connectors that support multiple encoders, either the
684 * .atomic_best_encoder() or .best_encoder() operation must be implemented.
685 */
686 static struct drm_encoder *
rockchip_drm_connector_get_single_encoder(struct drm_connector * connector)687 rockchip_drm_connector_get_single_encoder(struct drm_connector *connector)
688 {
689 struct drm_encoder *encoder;
690
691 WARN_ON(hweight32(connector->possible_encoders) > 1);
692 drm_connector_for_each_possible_encoder(connector, encoder)
693 return encoder;
694
695 return NULL;
696 }
697
setup_initial_state(struct drm_device * drm_dev,struct drm_atomic_state * state,struct rockchip_drm_mode_set * set)698 static int setup_initial_state(struct drm_device *drm_dev,
699 struct drm_atomic_state *state,
700 struct rockchip_drm_mode_set *set)
701 {
702 struct rockchip_drm_private *priv = drm_dev->dev_private;
703 struct drm_connector *connector = set->sub_dev->connector;
704 struct drm_crtc *crtc = set->crtc;
705 struct drm_crtc_state *crtc_state;
706 struct drm_connector_state *conn_state;
707 struct drm_plane_state *primary_state;
708 struct drm_display_mode *mode = NULL;
709 const struct drm_connector_helper_funcs *funcs;
710 int pipe = drm_crtc_index(crtc);
711 bool is_crtc_enabled = true;
712 int hdisplay, vdisplay;
713 int fb_width, fb_height;
714 int found = 0, match = 0;
715 int num_modes;
716 int ret = 0;
717 struct rockchip_crtc_state *s = NULL;
718
719 if (!set->hdisplay || !set->vdisplay || !set->vrefresh)
720 is_crtc_enabled = false;
721
722 crtc->state->state = state;
723
724 conn_state = drm_atomic_get_connector_state(state, connector);
725 if (IS_ERR(conn_state))
726 return PTR_ERR(conn_state);
727
728 funcs = connector->helper_private;
729
730 if (funcs->best_encoder)
731 conn_state->best_encoder = funcs->best_encoder(connector);
732 else
733 conn_state->best_encoder = rockchip_drm_connector_get_single_encoder(connector);
734
735 if (set->sub_dev->loader_protect) {
736 ret = set->sub_dev->loader_protect(conn_state->best_encoder, true);
737 if (ret) {
738 dev_err(drm_dev->dev,
739 "connector[%s] loader protect failed\n",
740 connector->name);
741 return ret;
742 }
743 }
744
745 num_modes = rockchip_drm_fill_connector_modes(connector, 7680, 7680, set->force_output);
746 if (!num_modes) {
747 dev_err(drm_dev->dev, "connector[%s] can't found any modes\n",
748 connector->name);
749 ret = -EINVAL;
750 goto error_conn;
751 }
752
753 list_for_each_entry(mode, &connector->modes, head) {
754 if (mode->clock == set->clock &&
755 mode->hdisplay == set->hdisplay &&
756 mode->vdisplay == set->vdisplay &&
757 mode->crtc_hsync_end == set->crtc_hsync_end &&
758 mode->crtc_vsync_end == set->crtc_vsync_end &&
759 drm_mode_vrefresh(mode) == set->vrefresh &&
760 /* we just need to focus on DRM_MODE_FLAG_ALL flag, so here
761 * we compare mode->flags with set->flags & DRM_MODE_FLAG_ALL.
762 */
763 mode->flags == (set->flags & DRM_MODE_FLAG_ALL) &&
764 mode->picture_aspect_ratio == set->picture_aspect_ratio) {
765 found = 1;
766 match = 1;
767 break;
768 }
769 }
770
771 if (!found) {
772 ret = -EINVAL;
773 connector->status = connector_status_disconnected;
774 dev_err(drm_dev->dev, "connector[%s] can't found any match mode\n",
775 connector->name);
776 DRM_INFO("%s support modes:\n\n", connector->name);
777 list_for_each_entry(mode, &connector->modes, head) {
778 DRM_INFO(DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
779 }
780 DRM_INFO("uboot set mode: h/v display[%d,%d] h/v sync_end[%d,%d] vfresh[%d], flags[0x%x], aspect_ratio[%d]\n",
781 set->hdisplay, set->vdisplay, set->crtc_hsync_end, set->crtc_vsync_end,
782 set->vrefresh, set->flags, set->picture_aspect_ratio);
783 goto error_conn;
784 }
785
786 conn_state->tv.brightness = set->brightness;
787 conn_state->tv.contrast = set->contrast;
788 conn_state->tv.saturation = set->saturation;
789 conn_state->tv.hue = set->hue;
790 set->mode = mode;
791 crtc_state = drm_atomic_get_crtc_state(state, crtc);
792 if (IS_ERR(crtc_state)) {
793 ret = PTR_ERR(crtc_state);
794 goto error_conn;
795 }
796
797 drm_mode_copy(&crtc_state->adjusted_mode, mode);
798 if (!match || !is_crtc_enabled) {
799 set->mode_changed = true;
800 } else {
801 ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
802 if (ret)
803 goto error_conn;
804
805 mode->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
806 ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
807 if (ret)
808 goto error_conn;
809
810 crtc_state->active = true;
811
812 if (priv->crtc_funcs[pipe] &&
813 priv->crtc_funcs[pipe]->loader_protect)
814 priv->crtc_funcs[pipe]->loader_protect(crtc, true, &set->csc);
815 }
816
817 if (!set->fb) {
818 ret = 0;
819 goto error_crtc;
820 }
821 primary_state = drm_atomic_get_plane_state(state, crtc->primary);
822 if (IS_ERR(primary_state)) {
823 ret = PTR_ERR(primary_state);
824 goto error_crtc;
825 }
826
827 hdisplay = mode->hdisplay;
828 vdisplay = mode->vdisplay;
829 fb_width = set->fb->width;
830 fb_height = set->fb->height;
831
832 primary_state->crtc = crtc;
833 primary_state->src_x = 0;
834 primary_state->src_y = 0;
835 primary_state->src_w = fb_width << 16;
836 primary_state->src_h = fb_height << 16;
837 if (set->ratio) {
838 if (set->fb->width >= hdisplay) {
839 primary_state->crtc_x = 0;
840 primary_state->crtc_w = hdisplay;
841 } else {
842 primary_state->crtc_x = (hdisplay - fb_width) / 2;
843 primary_state->crtc_w = set->fb->width;
844 }
845
846 if (set->fb->height >= vdisplay) {
847 primary_state->crtc_y = 0;
848 primary_state->crtc_h = vdisplay;
849 } else {
850 primary_state->crtc_y = (vdisplay - fb_height) / 2;
851 primary_state->crtc_h = fb_height;
852 }
853 } else {
854 primary_state->crtc_x = 0;
855 primary_state->crtc_y = 0;
856 primary_state->crtc_w = hdisplay;
857 primary_state->crtc_h = vdisplay;
858 }
859 s = to_rockchip_crtc_state(crtc->state);
860 s->output_type = connector->connector_type;
861
862 return 0;
863
864 error_crtc:
865 if (priv->crtc_funcs[pipe] && priv->crtc_funcs[pipe]->loader_protect)
866 priv->crtc_funcs[pipe]->loader_protect(crtc, false, NULL);
867 error_conn:
868 if (set->sub_dev->loader_protect)
869 set->sub_dev->loader_protect(conn_state->best_encoder, false);
870
871 return ret;
872 }
873
update_state(struct drm_device * drm_dev,struct drm_atomic_state * state,struct rockchip_drm_mode_set * set,unsigned int * plane_mask)874 static int update_state(struct drm_device *drm_dev,
875 struct drm_atomic_state *state,
876 struct rockchip_drm_mode_set *set,
877 unsigned int *plane_mask)
878 {
879 struct drm_crtc *crtc = set->crtc;
880 struct drm_connector *connector = set->sub_dev->connector;
881 struct drm_display_mode *mode = set->mode;
882 struct drm_plane_state *primary_state;
883 struct drm_crtc_state *crtc_state;
884 struct drm_connector_state *conn_state;
885 int ret;
886 struct rockchip_crtc_state *s;
887
888 crtc_state = drm_atomic_get_crtc_state(state, crtc);
889 if (IS_ERR(crtc_state))
890 return PTR_ERR(crtc_state);
891 conn_state = drm_atomic_get_connector_state(state, connector);
892 if (IS_ERR(conn_state))
893 return PTR_ERR(conn_state);
894 s = to_rockchip_crtc_state(crtc_state);
895 s->left_margin = set->left_margin;
896 s->right_margin = set->right_margin;
897 s->top_margin = set->top_margin;
898 s->bottom_margin = set->bottom_margin;
899
900 if (set->mode_changed) {
901 ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
902 if (ret)
903 return ret;
904
905 ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
906 if (ret)
907 return ret;
908
909 crtc_state->active = true;
910 } else {
911 const struct drm_encoder_helper_funcs *encoder_helper_funcs;
912 const struct drm_connector_helper_funcs *connector_helper_funcs;
913 struct drm_encoder *encoder;
914 struct drm_bridge *bridge;
915
916 connector_helper_funcs = connector->helper_private;
917 if (!connector_helper_funcs)
918 return -ENXIO;
919 if (connector_helper_funcs->best_encoder)
920 encoder = connector_helper_funcs->best_encoder(connector);
921 else
922 encoder = rockchip_drm_connector_get_single_encoder(connector);
923 if (!encoder)
924 return -ENXIO;
925 encoder_helper_funcs = encoder->helper_private;
926 if (!encoder_helper_funcs->atomic_check)
927 return -ENXIO;
928 ret = encoder_helper_funcs->atomic_check(encoder, crtc->state,
929 conn_state);
930 if (ret)
931 return ret;
932
933 if (encoder_helper_funcs->atomic_mode_set)
934 encoder_helper_funcs->atomic_mode_set(encoder,
935 crtc_state,
936 conn_state);
937 else if (encoder_helper_funcs->mode_set)
938 encoder_helper_funcs->mode_set(encoder, mode, mode);
939
940 bridge = drm_bridge_chain_get_first_bridge(encoder);
941 drm_bridge_chain_mode_set(bridge, mode, mode);
942 }
943
944 primary_state = drm_atomic_get_plane_state(state, crtc->primary);
945 if (IS_ERR(primary_state))
946 return PTR_ERR(primary_state);
947
948 crtc_state->plane_mask = 1 << drm_plane_index(crtc->primary);
949 *plane_mask |= crtc_state->plane_mask;
950
951
952 drm_atomic_set_fb_for_plane(primary_state, set->fb);
953 drm_framebuffer_put(set->fb);
954 ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
955
956 return ret;
957 }
958
rockchip_drm_copy_mode_from_mode_set(struct drm_display_mode * mode,struct rockchip_drm_mode_set * set)959 static void rockchip_drm_copy_mode_from_mode_set(struct drm_display_mode *mode,
960 struct rockchip_drm_mode_set *set)
961 {
962 mode->clock = set->clock;
963 mode->hdisplay = set->hdisplay;
964 mode->vdisplay = set->vdisplay;
965 mode->crtc_hsync_end = set->crtc_hsync_end;
966 mode->crtc_vsync_end = set->crtc_vsync_end;
967 mode->flags = set->flags & DRM_MODE_FLAG_ALL;
968 mode->picture_aspect_ratio = set->picture_aspect_ratio;
969 }
970
rockchip_drm_show_logo(struct drm_device * drm_dev)971 void rockchip_drm_show_logo(struct drm_device *drm_dev)
972 {
973 struct drm_atomic_state *state, *old_state;
974 struct device_node *np = drm_dev->dev->of_node;
975 struct drm_mode_config *mode_config = &drm_dev->mode_config;
976 struct rockchip_drm_private *private = drm_dev->dev_private;
977 struct device_node *root, *route;
978 struct rockchip_drm_mode_set *set, *tmp, *unset;
979 struct list_head mode_set_list;
980 struct list_head mode_unset_list;
981 unsigned int plane_mask = 0;
982 struct drm_crtc *crtc;
983 int ret, i;
984
985 root = of_get_child_by_name(np, "route");
986 if (!root) {
987 dev_warn(drm_dev->dev, "failed to parse resources for logo display\n");
988 return;
989 }
990
991 if (init_loader_memory(drm_dev)) {
992 dev_warn(drm_dev->dev, "failed to parse loader memory\n");
993 return;
994 }
995
996 INIT_LIST_HEAD(&mode_set_list);
997 INIT_LIST_HEAD(&mode_unset_list);
998 drm_modeset_lock_all(drm_dev);
999 state = drm_atomic_state_alloc(drm_dev);
1000 if (!state) {
1001 dev_err(drm_dev->dev, "failed to alloc atomic state for logo display\n");
1002 ret = -ENOMEM;
1003 goto err_unlock;
1004 }
1005
1006 state->acquire_ctx = mode_config->acquire_ctx;
1007
1008 for_each_child_of_node(root, route) {
1009 if (!of_device_is_available(route))
1010 continue;
1011
1012 set = of_parse_display_resource(drm_dev, route);
1013 if (!set)
1014 continue;
1015
1016 if (setup_initial_state(drm_dev, state, set)) {
1017 drm_framebuffer_put(set->fb);
1018 INIT_LIST_HEAD(&set->head);
1019 list_add_tail(&set->head, &mode_unset_list);
1020 continue;
1021 }
1022
1023 INIT_LIST_HEAD(&set->head);
1024 list_add_tail(&set->head, &mode_set_list);
1025 }
1026
1027 /*
1028 * the mode_unset_list store the unconnected route, if route's crtc
1029 * isn't used, we should close it.
1030 */
1031 list_for_each_entry_safe(unset, tmp, &mode_unset_list, head) {
1032 struct rockchip_drm_mode_set *tmp_set;
1033 int find_used_crtc = 0;
1034
1035 list_for_each_entry_safe(set, tmp_set, &mode_set_list, head) {
1036 if (set->crtc == unset->crtc) {
1037 find_used_crtc = 1;
1038 continue;
1039 }
1040 }
1041
1042 if (!find_used_crtc) {
1043 struct drm_crtc *crtc = unset->crtc;
1044 struct drm_crtc_state *crtc_state;
1045 int pipe = drm_crtc_index(crtc);
1046 struct rockchip_drm_private *priv =
1047 drm_dev->dev_private;
1048
1049 /*
1050 * The display timing information of mode_set is parsed from dts, which
1051 * written in uboot. If the mode_set is added into mode_unset_list, it
1052 * should be converted to crtc_state->adjusted_mode, in order to check
1053 * splice_mode flag in loader_protect().
1054 */
1055 if (unset->hdisplay && unset->vdisplay) {
1056 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1057 if (crtc_state)
1058 rockchip_drm_copy_mode_from_mode_set(&crtc_state->adjusted_mode,
1059 unset);
1060 if (priv->crtc_funcs[pipe] &&
1061 priv->crtc_funcs[pipe]->loader_protect)
1062 priv->crtc_funcs[pipe]->loader_protect(crtc, true,
1063 &set->csc);
1064 priv->crtc_funcs[pipe]->crtc_close(crtc);
1065 if (priv->crtc_funcs[pipe] &&
1066 priv->crtc_funcs[pipe]->loader_protect)
1067 priv->crtc_funcs[pipe]->loader_protect(crtc, false, NULL);
1068 }
1069 }
1070
1071 list_del(&unset->head);
1072 kfree(unset);
1073 }
1074
1075 if (list_empty(&mode_set_list)) {
1076 dev_warn(drm_dev->dev, "can't not find any logo display\n");
1077 ret = -ENXIO;
1078 goto err_free_state;
1079 }
1080
1081 /*
1082 * The state save initial devices status, swap the state into
1083 * drm devices as old state, so if new state come, can compare
1084 * with this state to judge which status need to update.
1085 */
1086 WARN_ON(drm_atomic_helper_swap_state(state, false));
1087 drm_atomic_state_put(state);
1088 old_state = drm_atomic_helper_duplicate_state(drm_dev,
1089 mode_config->acquire_ctx);
1090 if (IS_ERR(old_state)) {
1091 dev_err(drm_dev->dev, "failed to duplicate atomic state for logo display\n");
1092 ret = PTR_ERR_OR_ZERO(old_state);
1093 goto err_free_state;
1094 }
1095
1096 state = drm_atomic_helper_duplicate_state(drm_dev,
1097 mode_config->acquire_ctx);
1098 if (IS_ERR(state)) {
1099 dev_err(drm_dev->dev, "failed to duplicate atomic state for logo display\n");
1100 ret = PTR_ERR_OR_ZERO(state);
1101 goto err_free_old_state;
1102 }
1103 state->acquire_ctx = mode_config->acquire_ctx;
1104
1105 list_for_each_entry(set, &mode_set_list, head)
1106 /*
1107 * We don't want to see any fail on update_state.
1108 */
1109 WARN_ON(update_state(drm_dev, state, set, &plane_mask));
1110
1111 for (i = 0; i < state->num_connector; i++) {
1112 if (state->connectors[i].new_state->connector->status !=
1113 connector_status_connected)
1114 state->connectors[i].new_state->best_encoder = NULL;
1115 }
1116
1117 ret = drm_atomic_commit(state);
1118 /**
1119 * todo
1120 * drm_atomic_clean_old_fb(drm_dev, plane_mask, ret);
1121 */
1122
1123 list_for_each_entry_safe(set, tmp, &mode_set_list, head) {
1124 if (set->force_output)
1125 set->sub_dev->connector->force = DRM_FORCE_UNSPECIFIED;
1126 list_del(&set->head);
1127 kfree(set);
1128 }
1129
1130 /*
1131 * Is possible get deadlock here?
1132 */
1133 WARN_ON(ret == -EDEADLK);
1134
1135 if (ret) {
1136 /*
1137 * restore display status if atomic commit failed.
1138 */
1139 WARN_ON(drm_atomic_helper_swap_state(old_state, false));
1140 goto err_free_state;
1141 }
1142
1143 rockchip_free_loader_memory(drm_dev);
1144 drm_atomic_state_put(old_state);
1145 drm_atomic_state_put(state);
1146
1147 private->loader_protect = true;
1148 drm_modeset_unlock_all(drm_dev);
1149
1150 if (private->fbdev_helper && private->fbdev_helper->fb) {
1151 drm_for_each_crtc(crtc, drm_dev) {
1152 struct rockchip_crtc_state *s = NULL;
1153
1154 s = to_rockchip_crtc_state(crtc->state);
1155 if (is_support_hotplug(s->output_type))
1156 drm_framebuffer_get(private->fbdev_helper->fb);
1157 }
1158 }
1159
1160 return;
1161 err_free_old_state:
1162 drm_atomic_state_put(old_state);
1163 err_free_state:
1164 drm_atomic_state_put(state);
1165 err_unlock:
1166 drm_modeset_unlock_all(drm_dev);
1167 if (ret)
1168 dev_err(drm_dev->dev, "failed to show kernel logo\n");
1169 }
1170
1171 #ifndef MODULE
1172 static const char *const loader_protect_clocks[] __initconst = {
1173 "hclk_vio",
1174 "hclk_vop",
1175 "hclk_vopb",
1176 "hclk_vopl",
1177 "aclk_vio",
1178 "aclk_vio0",
1179 "aclk_vio1",
1180 "aclk_vop",
1181 "aclk_vopb",
1182 "aclk_vopl",
1183 "aclk_vo_pre",
1184 "aclk_vio_pre",
1185 "dclk_vop",
1186 "dclk_vop0",
1187 "dclk_vop1",
1188 "dclk_vopb",
1189 "dclk_vopl",
1190 };
1191
1192 static struct clk **loader_clocks __initdata;
rockchip_clocks_loader_protect(void)1193 static int __init rockchip_clocks_loader_protect(void)
1194 {
1195 int nclocks = ARRAY_SIZE(loader_protect_clocks);
1196 struct clk *clk;
1197 int i;
1198
1199 loader_clocks = kcalloc(nclocks, sizeof(void *), GFP_KERNEL);
1200 if (!loader_clocks)
1201 return -ENOMEM;
1202
1203 for (i = 0; i < nclocks; i++) {
1204 clk = __clk_lookup(loader_protect_clocks[i]);
1205
1206 if (clk) {
1207 loader_clocks[i] = clk;
1208 clk_prepare_enable(clk);
1209 }
1210 }
1211
1212 return 0;
1213 }
1214 arch_initcall_sync(rockchip_clocks_loader_protect);
1215
rockchip_clocks_loader_unprotect(void)1216 static int __init rockchip_clocks_loader_unprotect(void)
1217 {
1218 int i;
1219
1220 if (!loader_clocks)
1221 return -ENODEV;
1222
1223 for (i = 0; i < ARRAY_SIZE(loader_protect_clocks); i++) {
1224 struct clk *clk = loader_clocks[i];
1225
1226 if (clk)
1227 clk_disable_unprepare(clk);
1228 }
1229 kfree(loader_clocks);
1230
1231 return 0;
1232 }
1233 late_initcall_sync(rockchip_clocks_loader_unprotect);
1234 #endif
1235