xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 
106 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
107 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
108 
109 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
111 
112 /* Number of bytes in PSP header for firmware. */
113 #define PSP_HEADER_BYTES 0x100
114 
115 /* Number of bytes in PSP footer for firmware. */
116 #define PSP_FOOTER_BYTES 0x100
117 
118 /**
119  * DOC: overview
120  *
121  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
122  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
123  * requests into DC requests, and DC responses into DRM responses.
124  *
125  * The root control structure is &struct amdgpu_display_manager.
126  */
127 
128 /* basic init/fini API */
129 static int amdgpu_dm_init(struct amdgpu_device *adev);
130 static void amdgpu_dm_fini(struct amdgpu_device *adev);
131 
get_subconnector_type(struct dc_link * link)132 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
133 {
134 	switch (link->dpcd_caps.dongle_type) {
135 	case DISPLAY_DONGLE_NONE:
136 		return DRM_MODE_SUBCONNECTOR_Native;
137 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
138 		return DRM_MODE_SUBCONNECTOR_VGA;
139 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
140 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
141 		return DRM_MODE_SUBCONNECTOR_DVID;
142 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
143 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
144 		return DRM_MODE_SUBCONNECTOR_HDMIA;
145 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
146 	default:
147 		return DRM_MODE_SUBCONNECTOR_Unknown;
148 	}
149 }
150 
update_subconnector_property(struct amdgpu_dm_connector * aconnector)151 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
152 {
153 	struct dc_link *link = aconnector->dc_link;
154 	struct drm_connector *connector = &aconnector->base;
155 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
156 
157 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
158 		return;
159 
160 	if (aconnector->dc_sink)
161 		subconnector = get_subconnector_type(link);
162 
163 	drm_object_property_set_value(&connector->base,
164 			connector->dev->mode_config.dp_subconnector_property,
165 			subconnector);
166 }
167 
168 /*
169  * initializes drm_device display related structures, based on the information
170  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
171  * drm_encoder, drm_mode_config
172  *
173  * Returns 0 on success
174  */
175 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
176 /* removes and deallocates the drm structures, created by the above function */
177 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
178 
179 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
180 				struct drm_plane *plane,
181 				unsigned long possible_crtcs,
182 				const struct dc_plane_cap *plane_cap);
183 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
184 			       struct drm_plane *plane,
185 			       uint32_t link_index);
186 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
187 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
188 				    uint32_t link_index,
189 				    struct amdgpu_encoder *amdgpu_encoder);
190 static int amdgpu_dm_encoder_init(struct drm_device *dev,
191 				  struct amdgpu_encoder *aencoder,
192 				  uint32_t link_index);
193 
194 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
195 
196 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
197 				   struct drm_atomic_state *state,
198 				   bool nonblock);
199 
200 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
201 
202 static int amdgpu_dm_atomic_check(struct drm_device *dev,
203 				  struct drm_atomic_state *state);
204 
205 static void handle_cursor_update(struct drm_plane *plane,
206 				 struct drm_plane_state *old_plane_state);
207 
208 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
209 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
212 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213 
214 /*
215  * dm_vblank_get_counter
216  *
217  * @brief
218  * Get counter for number of vertical blanks
219  *
220  * @param
221  * struct amdgpu_device *adev - [in] desired amdgpu device
222  * int disp_idx - [in] which CRTC to get the counter from
223  *
224  * @return
225  * Counter for vertical blanks
226  */
dm_vblank_get_counter(struct amdgpu_device * adev,int crtc)227 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
228 {
229 	if (crtc >= adev->mode_info.num_crtc)
230 		return 0;
231 	else {
232 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
233 
234 		if (acrtc->dm_irq_params.stream == NULL) {
235 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
236 				  crtc);
237 			return 0;
238 		}
239 
240 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
241 	}
242 }
243 
dm_crtc_get_scanoutpos(struct amdgpu_device * adev,int crtc,u32 * vbl,u32 * position)244 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
245 				  u32 *vbl, u32 *position)
246 {
247 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
248 
249 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
250 		return -EINVAL;
251 	else {
252 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
253 
254 		if (acrtc->dm_irq_params.stream ==  NULL) {
255 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
256 				  crtc);
257 			return 0;
258 		}
259 
260 		/*
261 		 * TODO rework base driver to use values directly.
262 		 * for now parse it back into reg-format
263 		 */
264 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
265 					 &v_blank_start,
266 					 &v_blank_end,
267 					 &h_position,
268 					 &v_position);
269 
270 		*position = v_position | (h_position << 16);
271 		*vbl = v_blank_start | (v_blank_end << 16);
272 	}
273 
274 	return 0;
275 }
276 
dm_is_idle(void * handle)277 static bool dm_is_idle(void *handle)
278 {
279 	/* XXX todo */
280 	return true;
281 }
282 
dm_wait_for_idle(void * handle)283 static int dm_wait_for_idle(void *handle)
284 {
285 	/* XXX todo */
286 	return 0;
287 }
288 
dm_check_soft_reset(void * handle)289 static bool dm_check_soft_reset(void *handle)
290 {
291 	return false;
292 }
293 
dm_soft_reset(void * handle)294 static int dm_soft_reset(void *handle)
295 {
296 	/* XXX todo */
297 	return 0;
298 }
299 
300 static struct amdgpu_crtc *
get_crtc_by_otg_inst(struct amdgpu_device * adev,int otg_inst)301 get_crtc_by_otg_inst(struct amdgpu_device *adev,
302 		     int otg_inst)
303 {
304 	struct drm_device *dev = adev_to_drm(adev);
305 	struct drm_crtc *crtc;
306 	struct amdgpu_crtc *amdgpu_crtc;
307 
308 	if (otg_inst == -1) {
309 		WARN_ON(1);
310 		return adev->mode_info.crtcs[0];
311 	}
312 
313 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
314 		amdgpu_crtc = to_amdgpu_crtc(crtc);
315 
316 		if (amdgpu_crtc->otg_inst == otg_inst)
317 			return amdgpu_crtc;
318 	}
319 
320 	return NULL;
321 }
322 
amdgpu_dm_vrr_active_irq(struct amdgpu_crtc * acrtc)323 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
324 {
325 	return acrtc->dm_irq_params.freesync_config.state ==
326 		       VRR_STATE_ACTIVE_VARIABLE ||
327 	       acrtc->dm_irq_params.freesync_config.state ==
328 		       VRR_STATE_ACTIVE_FIXED;
329 }
330 
amdgpu_dm_vrr_active(struct dm_crtc_state * dm_state)331 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
332 {
333 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
334 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
335 }
336 
337 /**
338  * dm_pflip_high_irq() - Handle pageflip interrupt
339  * @interrupt_params: ignored
340  *
341  * Handles the pageflip interrupt by notifying all interested parties
342  * that the pageflip has been completed.
343  */
dm_pflip_high_irq(void * interrupt_params)344 static void dm_pflip_high_irq(void *interrupt_params)
345 {
346 	struct amdgpu_crtc *amdgpu_crtc;
347 	struct common_irq_params *irq_params = interrupt_params;
348 	struct amdgpu_device *adev = irq_params->adev;
349 	unsigned long flags;
350 	struct drm_pending_vblank_event *e;
351 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
352 	bool vrr_active;
353 
354 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
355 
356 	/* IRQ could occur when in initial stage */
357 	/* TODO work and BO cleanup */
358 	if (amdgpu_crtc == NULL) {
359 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
360 		return;
361 	}
362 
363 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
364 
365 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
366 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
367 						 amdgpu_crtc->pflip_status,
368 						 AMDGPU_FLIP_SUBMITTED,
369 						 amdgpu_crtc->crtc_id,
370 						 amdgpu_crtc);
371 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
372 		return;
373 	}
374 
375 	/* page flip completed. */
376 	e = amdgpu_crtc->event;
377 	amdgpu_crtc->event = NULL;
378 
379 	if (!e)
380 		WARN_ON(1);
381 
382 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
383 
384 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
385 	if (!vrr_active ||
386 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
387 				      &v_blank_end, &hpos, &vpos) ||
388 	    (vpos < v_blank_start)) {
389 		/* Update to correct count and vblank timestamp if racing with
390 		 * vblank irq. This also updates to the correct vblank timestamp
391 		 * even in VRR mode, as scanout is past the front-porch atm.
392 		 */
393 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
394 
395 		/* Wake up userspace by sending the pageflip event with proper
396 		 * count and timestamp of vblank of flip completion.
397 		 */
398 		if (e) {
399 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
400 
401 			/* Event sent, so done with vblank for this flip */
402 			drm_crtc_vblank_put(&amdgpu_crtc->base);
403 		}
404 	} else if (e) {
405 		/* VRR active and inside front-porch: vblank count and
406 		 * timestamp for pageflip event will only be up to date after
407 		 * drm_crtc_handle_vblank() has been executed from late vblank
408 		 * irq handler after start of back-porch (vline 0). We queue the
409 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
410 		 * updated timestamp and count, once it runs after us.
411 		 *
412 		 * We need to open-code this instead of using the helper
413 		 * drm_crtc_arm_vblank_event(), as that helper would
414 		 * call drm_crtc_accurate_vblank_count(), which we must
415 		 * not call in VRR mode while we are in front-porch!
416 		 */
417 
418 		/* sequence will be replaced by real count during send-out. */
419 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
420 		e->pipe = amdgpu_crtc->crtc_id;
421 
422 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
423 		e = NULL;
424 	}
425 
426 	/* Keep track of vblank of this flip for flip throttling. We use the
427 	 * cooked hw counter, as that one incremented at start of this vblank
428 	 * of pageflip completion, so last_flip_vblank is the forbidden count
429 	 * for queueing new pageflips if vsync + VRR is enabled.
430 	 */
431 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
432 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
433 
434 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
435 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
436 
437 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
438 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
439 			 vrr_active, (int) !e);
440 }
441 
dm_vupdate_high_irq(void * interrupt_params)442 static void dm_vupdate_high_irq(void *interrupt_params)
443 {
444 	struct common_irq_params *irq_params = interrupt_params;
445 	struct amdgpu_device *adev = irq_params->adev;
446 	struct amdgpu_crtc *acrtc;
447 	unsigned long flags;
448 	int vrr_active;
449 
450 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
451 
452 	if (acrtc) {
453 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
454 
455 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
456 			      acrtc->crtc_id,
457 			      vrr_active);
458 
459 		/* Core vblank handling is done here after end of front-porch in
460 		 * vrr mode, as vblank timestamping will give valid results
461 		 * while now done after front-porch. This will also deliver
462 		 * page-flip completion events that have been queued to us
463 		 * if a pageflip happened inside front-porch.
464 		 */
465 		if (vrr_active) {
466 			drm_crtc_handle_vblank(&acrtc->base);
467 
468 			/* BTR processing for pre-DCE12 ASICs */
469 			if (acrtc->dm_irq_params.stream &&
470 			    adev->family < AMDGPU_FAMILY_AI) {
471 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
472 				mod_freesync_handle_v_update(
473 				    adev->dm.freesync_module,
474 				    acrtc->dm_irq_params.stream,
475 				    &acrtc->dm_irq_params.vrr_params);
476 
477 				dc_stream_adjust_vmin_vmax(
478 				    adev->dm.dc,
479 				    acrtc->dm_irq_params.stream,
480 				    &acrtc->dm_irq_params.vrr_params.adjust);
481 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
482 			}
483 		}
484 	}
485 }
486 
487 /**
488  * dm_crtc_high_irq() - Handles CRTC interrupt
489  * @interrupt_params: used for determining the CRTC instance
490  *
491  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
492  * event handler.
493  */
dm_crtc_high_irq(void * interrupt_params)494 static void dm_crtc_high_irq(void *interrupt_params)
495 {
496 	struct common_irq_params *irq_params = interrupt_params;
497 	struct amdgpu_device *adev = irq_params->adev;
498 	struct amdgpu_crtc *acrtc;
499 	unsigned long flags;
500 	int vrr_active;
501 
502 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
503 	if (!acrtc)
504 		return;
505 
506 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
507 
508 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
509 		      vrr_active, acrtc->dm_irq_params.active_planes);
510 
511 	/**
512 	 * Core vblank handling at start of front-porch is only possible
513 	 * in non-vrr mode, as only there vblank timestamping will give
514 	 * valid results while done in front-porch. Otherwise defer it
515 	 * to dm_vupdate_high_irq after end of front-porch.
516 	 */
517 	if (!vrr_active)
518 		drm_crtc_handle_vblank(&acrtc->base);
519 
520 	/**
521 	 * Following stuff must happen at start of vblank, for crc
522 	 * computation and below-the-range btr support in vrr mode.
523 	 */
524 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
525 
526 	/* BTR updates need to happen before VUPDATE on Vega and above. */
527 	if (adev->family < AMDGPU_FAMILY_AI)
528 		return;
529 
530 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
531 
532 	if (acrtc->dm_irq_params.stream &&
533 	    acrtc->dm_irq_params.vrr_params.supported &&
534 	    acrtc->dm_irq_params.freesync_config.state ==
535 		    VRR_STATE_ACTIVE_VARIABLE) {
536 		mod_freesync_handle_v_update(adev->dm.freesync_module,
537 					     acrtc->dm_irq_params.stream,
538 					     &acrtc->dm_irq_params.vrr_params);
539 
540 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
541 					   &acrtc->dm_irq_params.vrr_params.adjust);
542 	}
543 
544 	/*
545 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
546 	 * In that case, pageflip completion interrupts won't fire and pageflip
547 	 * completion events won't get delivered. Prevent this by sending
548 	 * pending pageflip events from here if a flip is still pending.
549 	 *
550 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
551 	 * avoid race conditions between flip programming and completion,
552 	 * which could cause too early flip completion events.
553 	 */
554 	if (adev->family >= AMDGPU_FAMILY_RV &&
555 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
556 	    acrtc->dm_irq_params.active_planes == 0) {
557 		if (acrtc->event) {
558 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
559 			acrtc->event = NULL;
560 			drm_crtc_vblank_put(&acrtc->base);
561 		}
562 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
563 	}
564 
565 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
566 }
567 
dm_set_clockgating_state(void * handle,enum amd_clockgating_state state)568 static int dm_set_clockgating_state(void *handle,
569 		  enum amd_clockgating_state state)
570 {
571 	return 0;
572 }
573 
dm_set_powergating_state(void * handle,enum amd_powergating_state state)574 static int dm_set_powergating_state(void *handle,
575 		  enum amd_powergating_state state)
576 {
577 	return 0;
578 }
579 
580 /* Prototypes of private functions */
581 static int dm_early_init(void* handle);
582 
583 /* Allocate memory for FBC compressed data  */
amdgpu_dm_fbc_init(struct drm_connector * connector)584 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
585 {
586 	struct drm_device *dev = connector->dev;
587 	struct amdgpu_device *adev = drm_to_adev(dev);
588 	struct dm_compressor_info *compressor = &adev->dm.compressor;
589 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
590 	struct drm_display_mode *mode;
591 	unsigned long max_size = 0;
592 
593 	if (adev->dm.dc->fbc_compressor == NULL)
594 		return;
595 
596 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
597 		return;
598 
599 	if (compressor->bo_ptr)
600 		return;
601 
602 
603 	list_for_each_entry(mode, &connector->modes, head) {
604 		if (max_size < mode->htotal * mode->vtotal)
605 			max_size = mode->htotal * mode->vtotal;
606 	}
607 
608 	if (max_size) {
609 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
610 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
611 			    &compressor->gpu_addr, &compressor->cpu_addr);
612 
613 		if (r)
614 			DRM_ERROR("DM: Failed to initialize FBC\n");
615 		else {
616 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
617 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
618 		}
619 
620 	}
621 
622 }
623 
amdgpu_dm_audio_component_get_eld(struct device * kdev,int port,int pipe,bool * enabled,unsigned char * buf,int max_bytes)624 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
625 					  int pipe, bool *enabled,
626 					  unsigned char *buf, int max_bytes)
627 {
628 	struct drm_device *dev = dev_get_drvdata(kdev);
629 	struct amdgpu_device *adev = drm_to_adev(dev);
630 	struct drm_connector *connector;
631 	struct drm_connector_list_iter conn_iter;
632 	struct amdgpu_dm_connector *aconnector;
633 	int ret = 0;
634 
635 	*enabled = false;
636 
637 	mutex_lock(&adev->dm.audio_lock);
638 
639 	drm_connector_list_iter_begin(dev, &conn_iter);
640 	drm_for_each_connector_iter(connector, &conn_iter) {
641 		aconnector = to_amdgpu_dm_connector(connector);
642 		if (aconnector->audio_inst != port)
643 			continue;
644 
645 		*enabled = true;
646 		ret = drm_eld_size(connector->eld);
647 		memcpy(buf, connector->eld, min(max_bytes, ret));
648 
649 		break;
650 	}
651 	drm_connector_list_iter_end(&conn_iter);
652 
653 	mutex_unlock(&adev->dm.audio_lock);
654 
655 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
656 
657 	return ret;
658 }
659 
660 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
661 	.get_eld = amdgpu_dm_audio_component_get_eld,
662 };
663 
amdgpu_dm_audio_component_bind(struct device * kdev,struct device * hda_kdev,void * data)664 static int amdgpu_dm_audio_component_bind(struct device *kdev,
665 				       struct device *hda_kdev, void *data)
666 {
667 	struct drm_device *dev = dev_get_drvdata(kdev);
668 	struct amdgpu_device *adev = drm_to_adev(dev);
669 	struct drm_audio_component *acomp = data;
670 
671 	acomp->ops = &amdgpu_dm_audio_component_ops;
672 	acomp->dev = kdev;
673 	adev->dm.audio_component = acomp;
674 
675 	return 0;
676 }
677 
amdgpu_dm_audio_component_unbind(struct device * kdev,struct device * hda_kdev,void * data)678 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
679 					  struct device *hda_kdev, void *data)
680 {
681 	struct drm_device *dev = dev_get_drvdata(kdev);
682 	struct amdgpu_device *adev = drm_to_adev(dev);
683 	struct drm_audio_component *acomp = data;
684 
685 	acomp->ops = NULL;
686 	acomp->dev = NULL;
687 	adev->dm.audio_component = NULL;
688 }
689 
690 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
691 	.bind	= amdgpu_dm_audio_component_bind,
692 	.unbind	= amdgpu_dm_audio_component_unbind,
693 };
694 
amdgpu_dm_audio_init(struct amdgpu_device * adev)695 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
696 {
697 	int i, ret;
698 
699 	if (!amdgpu_audio)
700 		return 0;
701 
702 	adev->mode_info.audio.enabled = true;
703 
704 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
705 
706 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
707 		adev->mode_info.audio.pin[i].channels = -1;
708 		adev->mode_info.audio.pin[i].rate = -1;
709 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
710 		adev->mode_info.audio.pin[i].status_bits = 0;
711 		adev->mode_info.audio.pin[i].category_code = 0;
712 		adev->mode_info.audio.pin[i].connected = false;
713 		adev->mode_info.audio.pin[i].id =
714 			adev->dm.dc->res_pool->audios[i]->inst;
715 		adev->mode_info.audio.pin[i].offset = 0;
716 	}
717 
718 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
719 	if (ret < 0)
720 		return ret;
721 
722 	adev->dm.audio_registered = true;
723 
724 	return 0;
725 }
726 
amdgpu_dm_audio_fini(struct amdgpu_device * adev)727 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
728 {
729 	if (!amdgpu_audio)
730 		return;
731 
732 	if (!adev->mode_info.audio.enabled)
733 		return;
734 
735 	if (adev->dm.audio_registered) {
736 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
737 		adev->dm.audio_registered = false;
738 	}
739 
740 	/* TODO: Disable audio? */
741 
742 	adev->mode_info.audio.enabled = false;
743 }
744 
amdgpu_dm_audio_eld_notify(struct amdgpu_device * adev,int pin)745 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
746 {
747 	struct drm_audio_component *acomp = adev->dm.audio_component;
748 
749 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
750 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
751 
752 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
753 						 pin, -1);
754 	}
755 }
756 
dm_dmub_hw_init(struct amdgpu_device * adev)757 static int dm_dmub_hw_init(struct amdgpu_device *adev)
758 {
759 	const struct dmcub_firmware_header_v1_0 *hdr;
760 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
761 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
762 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
763 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
764 	struct abm *abm = adev->dm.dc->res_pool->abm;
765 	struct dmub_srv_hw_params hw_params;
766 	enum dmub_status status;
767 	const unsigned char *fw_inst_const, *fw_bss_data;
768 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
769 	bool has_hw_support;
770 
771 	if (!dmub_srv)
772 		/* DMUB isn't supported on the ASIC. */
773 		return 0;
774 
775 	if (!fb_info) {
776 		DRM_ERROR("No framebuffer info for DMUB service.\n");
777 		return -EINVAL;
778 	}
779 
780 	if (!dmub_fw) {
781 		/* Firmware required for DMUB support. */
782 		DRM_ERROR("No firmware provided for DMUB.\n");
783 		return -EINVAL;
784 	}
785 
786 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
787 	if (status != DMUB_STATUS_OK) {
788 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
789 		return -EINVAL;
790 	}
791 
792 	if (!has_hw_support) {
793 		DRM_INFO("DMUB unsupported on ASIC\n");
794 		return 0;
795 	}
796 
797 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
798 
799 	fw_inst_const = dmub_fw->data +
800 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
801 			PSP_HEADER_BYTES;
802 
803 	fw_bss_data = dmub_fw->data +
804 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
805 		      le32_to_cpu(hdr->inst_const_bytes);
806 
807 	/* Copy firmware and bios info into FB memory. */
808 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
809 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
810 
811 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
812 
813 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
814 	 * amdgpu_ucode_init_single_fw will load dmub firmware
815 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
816 	 * will be done by dm_dmub_hw_init
817 	 */
818 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
819 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
820 				fw_inst_const_size);
821 	}
822 
823 	if (fw_bss_data_size)
824 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
825 		       fw_bss_data, fw_bss_data_size);
826 
827 	/* Copy firmware bios info into FB memory. */
828 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
829 	       adev->bios_size);
830 
831 	/* Reset regions that need to be reset. */
832 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
833 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
834 
835 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
836 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
837 
838 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
839 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
840 
841 	/* Initialize hardware. */
842 	memset(&hw_params, 0, sizeof(hw_params));
843 	hw_params.fb_base = adev->gmc.fb_start;
844 	hw_params.fb_offset = adev->gmc.aper_base;
845 
846 	/* backdoor load firmware and trigger dmub running */
847 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
848 		hw_params.load_inst_const = true;
849 
850 	if (dmcu)
851 		hw_params.psp_version = dmcu->psp_version;
852 
853 	for (i = 0; i < fb_info->num_fb; ++i)
854 		hw_params.fb[i] = &fb_info->fb[i];
855 
856 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
857 	if (status != DMUB_STATUS_OK) {
858 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
859 		return -EINVAL;
860 	}
861 
862 	/* Wait for firmware load to finish. */
863 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
864 	if (status != DMUB_STATUS_OK)
865 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
866 
867 	/* Init DMCU and ABM if available. */
868 	if (dmcu && abm) {
869 		dmcu->funcs->dmcu_init(dmcu);
870 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
871 	}
872 
873 	if (!adev->dm.dc->ctx->dmub_srv)
874 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 	if (!adev->dm.dc->ctx->dmub_srv) {
876 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877 		return -ENOMEM;
878 	}
879 
880 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 		 adev->dm.dmcub_fw_version);
882 
883 	return 0;
884 }
885 
amdgpu_check_debugfs_connector_property_change(struct amdgpu_device * adev,struct drm_atomic_state * state)886 static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
887 							   struct drm_atomic_state *state)
888 {
889 	struct drm_connector *connector;
890 	struct drm_crtc *crtc;
891 	struct amdgpu_dm_connector *amdgpu_dm_connector;
892 	struct drm_connector_state *conn_state;
893 	struct dm_crtc_state *acrtc_state;
894 	struct drm_crtc_state *crtc_state;
895 	struct dc_stream_state *stream;
896 	struct drm_device *dev = adev_to_drm(adev);
897 
898 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
899 
900 		amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
901 		conn_state = connector->state;
902 
903 		if (!(conn_state && conn_state->crtc))
904 			continue;
905 
906 		crtc = conn_state->crtc;
907 		acrtc_state = to_dm_crtc_state(crtc->state);
908 
909 		if (!(acrtc_state && acrtc_state->stream))
910 			continue;
911 
912 		stream = acrtc_state->stream;
913 
914 		if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
915 		    amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
916 		    amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
917 		    amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
918 			conn_state = drm_atomic_get_connector_state(state, connector);
919 			crtc_state = drm_atomic_get_crtc_state(state, crtc);
920 			crtc_state->mode_changed = true;
921 		}
922 	}
923 }
924 
925 struct amdgpu_stutter_quirk {
926 	u16 chip_vendor;
927 	u16 chip_device;
928 	u16 subsys_vendor;
929 	u16 subsys_device;
930 	u8 revision;
931 };
932 
933 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
934 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
935 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
936 	{ 0, 0, 0, 0, 0 },
937 };
938 
dm_should_disable_stutter(struct pci_dev * pdev)939 static bool dm_should_disable_stutter(struct pci_dev *pdev)
940 {
941 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
942 
943 	while (p && p->chip_device != 0) {
944 		if (pdev->vendor == p->chip_vendor &&
945 		    pdev->device == p->chip_device &&
946 		    pdev->subsystem_vendor == p->subsys_vendor &&
947 		    pdev->subsystem_device == p->subsys_device &&
948 		    pdev->revision == p->revision) {
949 			return true;
950 		}
951 		++p;
952 	}
953 	return false;
954 }
955 
amdgpu_dm_init(struct amdgpu_device * adev)956 static int amdgpu_dm_init(struct amdgpu_device *adev)
957 {
958 	struct dc_init_data init_data;
959 #ifdef CONFIG_DRM_AMD_DC_HDCP
960 	struct dc_callback_init init_params;
961 #endif
962 	int r;
963 
964 	adev->dm.ddev = adev_to_drm(adev);
965 	adev->dm.adev = adev;
966 
967 	/* Zero all the fields */
968 	memset(&init_data, 0, sizeof(init_data));
969 #ifdef CONFIG_DRM_AMD_DC_HDCP
970 	memset(&init_params, 0, sizeof(init_params));
971 #endif
972 
973 	mutex_init(&adev->dm.dc_lock);
974 	mutex_init(&adev->dm.audio_lock);
975 
976 	if(amdgpu_dm_irq_init(adev)) {
977 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
978 		goto error;
979 	}
980 
981 	init_data.asic_id.chip_family = adev->family;
982 
983 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
984 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
985 	init_data.asic_id.chip_id = adev->pdev->device;
986 
987 	init_data.asic_id.vram_width = adev->gmc.vram_width;
988 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
989 	init_data.asic_id.atombios_base_address =
990 		adev->mode_info.atom_context->bios;
991 
992 	init_data.driver = adev;
993 
994 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
995 
996 	if (!adev->dm.cgs_device) {
997 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
998 		goto error;
999 	}
1000 
1001 	init_data.cgs_device = adev->dm.cgs_device;
1002 
1003 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1004 
1005 	switch (adev->asic_type) {
1006 	case CHIP_CARRIZO:
1007 	case CHIP_STONEY:
1008 	case CHIP_RAVEN:
1009 	case CHIP_RENOIR:
1010 		init_data.flags.gpu_vm_support = true;
1011 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1012 			init_data.flags.disable_dmcu = true;
1013 		break;
1014 	default:
1015 		break;
1016 	}
1017 
1018 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1019 		init_data.flags.fbc_support = true;
1020 
1021 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1022 		init_data.flags.multi_mon_pp_mclk_switch = true;
1023 
1024 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1025 		init_data.flags.disable_fractional_pwm = true;
1026 
1027 	init_data.flags.power_down_display_on_boot = true;
1028 
1029 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1030 
1031 	/* Display Core create. */
1032 	adev->dm.dc = dc_create(&init_data);
1033 
1034 	if (adev->dm.dc) {
1035 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1036 	} else {
1037 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1038 		goto error;
1039 	}
1040 
1041 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1042 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1043 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1044 	}
1045 
1046 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1047 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1048 	if (dm_should_disable_stutter(adev->pdev))
1049 		adev->dm.dc->debug.disable_stutter = true;
1050 
1051 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1052 		adev->dm.dc->debug.disable_stutter = true;
1053 
1054 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1055 		adev->dm.dc->debug.disable_dsc = true;
1056 
1057 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1058 		adev->dm.dc->debug.disable_clock_gate = true;
1059 
1060 	r = dm_dmub_hw_init(adev);
1061 	if (r) {
1062 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1063 		goto error;
1064 	}
1065 
1066 	dc_hardware_init(adev->dm.dc);
1067 
1068 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1069 	if (!adev->dm.freesync_module) {
1070 		DRM_ERROR(
1071 		"amdgpu: failed to initialize freesync_module.\n");
1072 	} else
1073 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1074 				adev->dm.freesync_module);
1075 
1076 	amdgpu_dm_init_color_mod();
1077 
1078 #ifdef CONFIG_DRM_AMD_DC_HDCP
1079 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1080 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1081 
1082 		if (!adev->dm.hdcp_workqueue)
1083 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1084 		else
1085 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1086 
1087 		dc_init_callbacks(adev->dm.dc, &init_params);
1088 	}
1089 #endif
1090 	if (amdgpu_dm_initialize_drm_device(adev)) {
1091 		DRM_ERROR(
1092 		"amdgpu: failed to initialize sw for display support.\n");
1093 		goto error;
1094 	}
1095 
1096 	/* create fake encoders for MST */
1097 	dm_dp_create_fake_mst_encoders(adev);
1098 
1099 	/* TODO: Add_display_info? */
1100 
1101 	/* TODO use dynamic cursor width */
1102 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1103 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1104 
1105 	/* Disable vblank IRQs aggressively for power-saving */
1106 	adev_to_drm(adev)->vblank_disable_immediate = true;
1107 
1108 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1109 		DRM_ERROR(
1110 		"amdgpu: failed to initialize sw for display support.\n");
1111 		goto error;
1112 	}
1113 
1114 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1115 
1116 	return 0;
1117 error:
1118 	amdgpu_dm_fini(adev);
1119 
1120 	return -EINVAL;
1121 }
1122 
amdgpu_dm_fini(struct amdgpu_device * adev)1123 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1124 {
1125 	int i;
1126 
1127 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1128 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1129 	}
1130 
1131 	amdgpu_dm_audio_fini(adev);
1132 
1133 	amdgpu_dm_destroy_drm_device(&adev->dm);
1134 
1135 #ifdef CONFIG_DRM_AMD_DC_HDCP
1136 	if (adev->dm.hdcp_workqueue) {
1137 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1138 		adev->dm.hdcp_workqueue = NULL;
1139 	}
1140 
1141 	if (adev->dm.dc)
1142 		dc_deinit_callbacks(adev->dm.dc);
1143 #endif
1144 	if (adev->dm.dc->ctx->dmub_srv) {
1145 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1146 		adev->dm.dc->ctx->dmub_srv = NULL;
1147 	}
1148 
1149 	if (adev->dm.dmub_bo)
1150 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1151 				      &adev->dm.dmub_bo_gpu_addr,
1152 				      &adev->dm.dmub_bo_cpu_addr);
1153 
1154 	/* DC Destroy TODO: Replace destroy DAL */
1155 	if (adev->dm.dc)
1156 		dc_destroy(&adev->dm.dc);
1157 	/*
1158 	 * TODO: pageflip, vlank interrupt
1159 	 *
1160 	 * amdgpu_dm_irq_fini(adev);
1161 	 */
1162 
1163 	if (adev->dm.cgs_device) {
1164 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1165 		adev->dm.cgs_device = NULL;
1166 	}
1167 	if (adev->dm.freesync_module) {
1168 		mod_freesync_destroy(adev->dm.freesync_module);
1169 		adev->dm.freesync_module = NULL;
1170 	}
1171 
1172 	mutex_destroy(&adev->dm.audio_lock);
1173 	mutex_destroy(&adev->dm.dc_lock);
1174 
1175 	return;
1176 }
1177 
load_dmcu_fw(struct amdgpu_device * adev)1178 static int load_dmcu_fw(struct amdgpu_device *adev)
1179 {
1180 	const char *fw_name_dmcu = NULL;
1181 	int r;
1182 	const struct dmcu_firmware_header_v1_0 *hdr;
1183 
1184 	switch(adev->asic_type) {
1185 #if defined(CONFIG_DRM_AMD_DC_SI)
1186 	case CHIP_TAHITI:
1187 	case CHIP_PITCAIRN:
1188 	case CHIP_VERDE:
1189 	case CHIP_OLAND:
1190 #endif
1191 	case CHIP_BONAIRE:
1192 	case CHIP_HAWAII:
1193 	case CHIP_KAVERI:
1194 	case CHIP_KABINI:
1195 	case CHIP_MULLINS:
1196 	case CHIP_TONGA:
1197 	case CHIP_FIJI:
1198 	case CHIP_CARRIZO:
1199 	case CHIP_STONEY:
1200 	case CHIP_POLARIS11:
1201 	case CHIP_POLARIS10:
1202 	case CHIP_POLARIS12:
1203 	case CHIP_VEGAM:
1204 	case CHIP_VEGA10:
1205 	case CHIP_VEGA12:
1206 	case CHIP_VEGA20:
1207 	case CHIP_NAVI10:
1208 	case CHIP_NAVI14:
1209 	case CHIP_RENOIR:
1210 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1211 	case CHIP_SIENNA_CICHLID:
1212 	case CHIP_NAVY_FLOUNDER:
1213 #endif
1214 		return 0;
1215 	case CHIP_NAVI12:
1216 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1217 		break;
1218 	case CHIP_RAVEN:
1219 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1220 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1221 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1222 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1223 		else
1224 			return 0;
1225 		break;
1226 	default:
1227 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1228 		return -EINVAL;
1229 	}
1230 
1231 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1232 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1233 		return 0;
1234 	}
1235 
1236 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1237 	if (r == -ENOENT) {
1238 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1239 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1240 		adev->dm.fw_dmcu = NULL;
1241 		return 0;
1242 	}
1243 	if (r) {
1244 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1245 			fw_name_dmcu);
1246 		return r;
1247 	}
1248 
1249 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1250 	if (r) {
1251 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1252 			fw_name_dmcu);
1253 		release_firmware(adev->dm.fw_dmcu);
1254 		adev->dm.fw_dmcu = NULL;
1255 		return r;
1256 	}
1257 
1258 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1259 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1260 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1261 	adev->firmware.fw_size +=
1262 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1263 
1264 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1265 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1266 	adev->firmware.fw_size +=
1267 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1268 
1269 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1270 
1271 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1272 
1273 	return 0;
1274 }
1275 
amdgpu_dm_dmub_reg_read(void * ctx,uint32_t address)1276 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1277 {
1278 	struct amdgpu_device *adev = ctx;
1279 
1280 	return dm_read_reg(adev->dm.dc->ctx, address);
1281 }
1282 
amdgpu_dm_dmub_reg_write(void * ctx,uint32_t address,uint32_t value)1283 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1284 				     uint32_t value)
1285 {
1286 	struct amdgpu_device *adev = ctx;
1287 
1288 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1289 }
1290 
dm_dmub_sw_init(struct amdgpu_device * adev)1291 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1292 {
1293 	struct dmub_srv_create_params create_params;
1294 	struct dmub_srv_region_params region_params;
1295 	struct dmub_srv_region_info region_info;
1296 	struct dmub_srv_fb_params fb_params;
1297 	struct dmub_srv_fb_info *fb_info;
1298 	struct dmub_srv *dmub_srv;
1299 	const struct dmcub_firmware_header_v1_0 *hdr;
1300 	const char *fw_name_dmub;
1301 	enum dmub_asic dmub_asic;
1302 	enum dmub_status status;
1303 	int r;
1304 
1305 	switch (adev->asic_type) {
1306 	case CHIP_RENOIR:
1307 		dmub_asic = DMUB_ASIC_DCN21;
1308 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1309 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1310 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1311 		break;
1312 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1313 	case CHIP_SIENNA_CICHLID:
1314 		dmub_asic = DMUB_ASIC_DCN30;
1315 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1316 		break;
1317 	case CHIP_NAVY_FLOUNDER:
1318 		dmub_asic = DMUB_ASIC_DCN30;
1319 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1320 		break;
1321 #endif
1322 
1323 	default:
1324 		/* ASIC doesn't support DMUB. */
1325 		return 0;
1326 	}
1327 
1328 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1329 	if (r) {
1330 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1331 		return 0;
1332 	}
1333 
1334 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1335 	if (r) {
1336 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1337 		return 0;
1338 	}
1339 
1340 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1341 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1342 
1343 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1344 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1345 			AMDGPU_UCODE_ID_DMCUB;
1346 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1347 			adev->dm.dmub_fw;
1348 		adev->firmware.fw_size +=
1349 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1350 
1351 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1352 			 adev->dm.dmcub_fw_version);
1353 	}
1354 
1355 
1356 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1357 	dmub_srv = adev->dm.dmub_srv;
1358 
1359 	if (!dmub_srv) {
1360 		DRM_ERROR("Failed to allocate DMUB service!\n");
1361 		return -ENOMEM;
1362 	}
1363 
1364 	memset(&create_params, 0, sizeof(create_params));
1365 	create_params.user_ctx = adev;
1366 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1367 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1368 	create_params.asic = dmub_asic;
1369 
1370 	/* Create the DMUB service. */
1371 	status = dmub_srv_create(dmub_srv, &create_params);
1372 	if (status != DMUB_STATUS_OK) {
1373 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1374 		return -EINVAL;
1375 	}
1376 
1377 	/* Calculate the size of all the regions for the DMUB service. */
1378 	memset(&region_params, 0, sizeof(region_params));
1379 
1380 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1381 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1382 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1383 	region_params.vbios_size = adev->bios_size;
1384 	region_params.fw_bss_data = region_params.bss_data_size ?
1385 		adev->dm.dmub_fw->data +
1386 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1387 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1388 	region_params.fw_inst_const =
1389 		adev->dm.dmub_fw->data +
1390 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1391 		PSP_HEADER_BYTES;
1392 
1393 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1394 					   &region_info);
1395 
1396 	if (status != DMUB_STATUS_OK) {
1397 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1398 		return -EINVAL;
1399 	}
1400 
1401 	/*
1402 	 * Allocate a framebuffer based on the total size of all the regions.
1403 	 * TODO: Move this into GART.
1404 	 */
1405 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1406 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1407 				    &adev->dm.dmub_bo_gpu_addr,
1408 				    &adev->dm.dmub_bo_cpu_addr);
1409 	if (r)
1410 		return r;
1411 
1412 	/* Rebase the regions on the framebuffer address. */
1413 	memset(&fb_params, 0, sizeof(fb_params));
1414 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1415 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1416 	fb_params.region_info = &region_info;
1417 
1418 	adev->dm.dmub_fb_info =
1419 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1420 	fb_info = adev->dm.dmub_fb_info;
1421 
1422 	if (!fb_info) {
1423 		DRM_ERROR(
1424 			"Failed to allocate framebuffer info for DMUB service!\n");
1425 		return -ENOMEM;
1426 	}
1427 
1428 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1429 	if (status != DMUB_STATUS_OK) {
1430 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1431 		return -EINVAL;
1432 	}
1433 
1434 	return 0;
1435 }
1436 
dm_sw_init(void * handle)1437 static int dm_sw_init(void *handle)
1438 {
1439 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1440 	int r;
1441 
1442 	r = dm_dmub_sw_init(adev);
1443 	if (r)
1444 		return r;
1445 
1446 	return load_dmcu_fw(adev);
1447 }
1448 
dm_sw_fini(void * handle)1449 static int dm_sw_fini(void *handle)
1450 {
1451 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1452 
1453 	kfree(adev->dm.dmub_fb_info);
1454 	adev->dm.dmub_fb_info = NULL;
1455 
1456 	if (adev->dm.dmub_srv) {
1457 		dmub_srv_destroy(adev->dm.dmub_srv);
1458 		adev->dm.dmub_srv = NULL;
1459 	}
1460 
1461 	release_firmware(adev->dm.dmub_fw);
1462 	adev->dm.dmub_fw = NULL;
1463 
1464 	release_firmware(adev->dm.fw_dmcu);
1465 	adev->dm.fw_dmcu = NULL;
1466 
1467 	return 0;
1468 }
1469 
detect_mst_link_for_all_connectors(struct drm_device * dev)1470 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1471 {
1472 	struct amdgpu_dm_connector *aconnector;
1473 	struct drm_connector *connector;
1474 	struct drm_connector_list_iter iter;
1475 	int ret = 0;
1476 
1477 	drm_connector_list_iter_begin(dev, &iter);
1478 	drm_for_each_connector_iter(connector, &iter) {
1479 		aconnector = to_amdgpu_dm_connector(connector);
1480 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1481 		    aconnector->mst_mgr.aux) {
1482 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1483 					 aconnector,
1484 					 aconnector->base.base.id);
1485 
1486 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1487 			if (ret < 0) {
1488 				DRM_ERROR("DM_MST: Failed to start MST\n");
1489 				aconnector->dc_link->type =
1490 					dc_connection_single;
1491 				break;
1492 			}
1493 		}
1494 	}
1495 	drm_connector_list_iter_end(&iter);
1496 
1497 	return ret;
1498 }
1499 
dm_late_init(void * handle)1500 static int dm_late_init(void *handle)
1501 {
1502 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1503 
1504 	struct dmcu_iram_parameters params;
1505 	unsigned int linear_lut[16];
1506 	int i;
1507 	struct dmcu *dmcu = NULL;
1508 	bool ret = true;
1509 
1510 	dmcu = adev->dm.dc->res_pool->dmcu;
1511 
1512 	for (i = 0; i < 16; i++)
1513 		linear_lut[i] = 0xFFFF * i / 15;
1514 
1515 	params.set = 0;
1516 	params.backlight_ramping_start = 0xCCCC;
1517 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1518 	params.backlight_lut_array_size = 16;
1519 	params.backlight_lut_array = linear_lut;
1520 
1521 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1522 	 * 0xFFFF x 0.01 = 0x28F
1523 	 */
1524 	params.min_abm_backlight = 0x28F;
1525 
1526 	/* In the case where abm is implemented on dmcub,
1527 	 * dmcu object will be null.
1528 	 * ABM 2.4 and up are implemented on dmcub.
1529 	 */
1530 	if (dmcu)
1531 		ret = dmcu_load_iram(dmcu, params);
1532 	else if (adev->dm.dc->ctx->dmub_srv)
1533 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1534 
1535 	if (!ret)
1536 		return -EINVAL;
1537 
1538 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1539 }
1540 
s3_handle_mst(struct drm_device * dev,bool suspend)1541 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1542 {
1543 	struct amdgpu_dm_connector *aconnector;
1544 	struct drm_connector *connector;
1545 	struct drm_connector_list_iter iter;
1546 	struct drm_dp_mst_topology_mgr *mgr;
1547 	int ret;
1548 	bool need_hotplug = false;
1549 
1550 	drm_connector_list_iter_begin(dev, &iter);
1551 	drm_for_each_connector_iter(connector, &iter) {
1552 		aconnector = to_amdgpu_dm_connector(connector);
1553 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1554 		    aconnector->mst_port)
1555 			continue;
1556 
1557 		mgr = &aconnector->mst_mgr;
1558 
1559 		if (suspend) {
1560 			drm_dp_mst_topology_mgr_suspend(mgr);
1561 		} else {
1562 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1563 			if (ret < 0) {
1564 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1565 				need_hotplug = true;
1566 			}
1567 		}
1568 	}
1569 	drm_connector_list_iter_end(&iter);
1570 
1571 	if (need_hotplug)
1572 		drm_kms_helper_hotplug_event(dev);
1573 }
1574 
amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device * adev)1575 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1576 {
1577 	struct smu_context *smu = &adev->smu;
1578 	int ret = 0;
1579 
1580 	if (!is_support_sw_smu(adev))
1581 		return 0;
1582 
1583 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1584 	 * on window driver dc implementation.
1585 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1586 	 * should be passed to smu during boot up and resume from s3.
1587 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1588 	 * dcn20_resource_construct
1589 	 * then call pplib functions below to pass the settings to smu:
1590 	 * smu_set_watermarks_for_clock_ranges
1591 	 * smu_set_watermarks_table
1592 	 * navi10_set_watermarks_table
1593 	 * smu_write_watermarks_table
1594 	 *
1595 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1596 	 * dc has implemented different flow for window driver:
1597 	 * dc_hardware_init / dc_set_power_state
1598 	 * dcn10_init_hw
1599 	 * notify_wm_ranges
1600 	 * set_wm_ranges
1601 	 * -- Linux
1602 	 * smu_set_watermarks_for_clock_ranges
1603 	 * renoir_set_watermarks_table
1604 	 * smu_write_watermarks_table
1605 	 *
1606 	 * For Linux,
1607 	 * dc_hardware_init -> amdgpu_dm_init
1608 	 * dc_set_power_state --> dm_resume
1609 	 *
1610 	 * therefore, this function apply to navi10/12/14 but not Renoir
1611 	 * *
1612 	 */
1613 	switch(adev->asic_type) {
1614 	case CHIP_NAVI10:
1615 	case CHIP_NAVI14:
1616 	case CHIP_NAVI12:
1617 		break;
1618 	default:
1619 		return 0;
1620 	}
1621 
1622 	ret = smu_write_watermarks_table(smu);
1623 	if (ret) {
1624 		DRM_ERROR("Failed to update WMTABLE!\n");
1625 		return ret;
1626 	}
1627 
1628 	return 0;
1629 }
1630 
1631 /**
1632  * dm_hw_init() - Initialize DC device
1633  * @handle: The base driver device containing the amdgpu_dm device.
1634  *
1635  * Initialize the &struct amdgpu_display_manager device. This involves calling
1636  * the initializers of each DM component, then populating the struct with them.
1637  *
1638  * Although the function implies hardware initialization, both hardware and
1639  * software are initialized here. Splitting them out to their relevant init
1640  * hooks is a future TODO item.
1641  *
1642  * Some notable things that are initialized here:
1643  *
1644  * - Display Core, both software and hardware
1645  * - DC modules that we need (freesync and color management)
1646  * - DRM software states
1647  * - Interrupt sources and handlers
1648  * - Vblank support
1649  * - Debug FS entries, if enabled
1650  */
dm_hw_init(void * handle)1651 static int dm_hw_init(void *handle)
1652 {
1653 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1654 	/* Create DAL display manager */
1655 	amdgpu_dm_init(adev);
1656 	amdgpu_dm_hpd_init(adev);
1657 
1658 	return 0;
1659 }
1660 
1661 /**
1662  * dm_hw_fini() - Teardown DC device
1663  * @handle: The base driver device containing the amdgpu_dm device.
1664  *
1665  * Teardown components within &struct amdgpu_display_manager that require
1666  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1667  * were loaded. Also flush IRQ workqueues and disable them.
1668  */
dm_hw_fini(void * handle)1669 static int dm_hw_fini(void *handle)
1670 {
1671 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1672 
1673 	amdgpu_dm_hpd_fini(adev);
1674 
1675 	amdgpu_dm_irq_fini(adev);
1676 	amdgpu_dm_fini(adev);
1677 	return 0;
1678 }
1679 
1680 
1681 static int dm_enable_vblank(struct drm_crtc *crtc);
1682 static void dm_disable_vblank(struct drm_crtc *crtc);
1683 
dm_gpureset_toggle_interrupts(struct amdgpu_device * adev,struct dc_state * state,bool enable)1684 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1685 				 struct dc_state *state, bool enable)
1686 {
1687 	enum dc_irq_source irq_source;
1688 	struct amdgpu_crtc *acrtc;
1689 	int rc = -EBUSY;
1690 	int i = 0;
1691 
1692 	for (i = 0; i < state->stream_count; i++) {
1693 		acrtc = get_crtc_by_otg_inst(
1694 				adev, state->stream_status[i].primary_otg_inst);
1695 
1696 		if (acrtc && state->stream_status[i].plane_count != 0) {
1697 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1698 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1699 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1700 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1701 			if (rc)
1702 				DRM_WARN("Failed to %s pflip interrupts\n",
1703 					 enable ? "enable" : "disable");
1704 
1705 			if (enable) {
1706 				rc = dm_enable_vblank(&acrtc->base);
1707 				if (rc)
1708 					DRM_WARN("Failed to enable vblank interrupts\n");
1709 			} else {
1710 				dm_disable_vblank(&acrtc->base);
1711 			}
1712 
1713 		}
1714 	}
1715 
1716 }
1717 
amdgpu_dm_commit_zero_streams(struct dc * dc)1718 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1719 {
1720 	struct dc_state *context = NULL;
1721 	enum dc_status res = DC_ERROR_UNEXPECTED;
1722 	int i;
1723 	struct dc_stream_state *del_streams[MAX_PIPES];
1724 	int del_streams_count = 0;
1725 
1726 	memset(del_streams, 0, sizeof(del_streams));
1727 
1728 	context = dc_create_state(dc);
1729 	if (context == NULL)
1730 		goto context_alloc_fail;
1731 
1732 	dc_resource_state_copy_construct_current(dc, context);
1733 
1734 	/* First remove from context all streams */
1735 	for (i = 0; i < context->stream_count; i++) {
1736 		struct dc_stream_state *stream = context->streams[i];
1737 
1738 		del_streams[del_streams_count++] = stream;
1739 	}
1740 
1741 	/* Remove all planes for removed streams and then remove the streams */
1742 	for (i = 0; i < del_streams_count; i++) {
1743 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1744 			res = DC_FAIL_DETACH_SURFACES;
1745 			goto fail;
1746 		}
1747 
1748 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1749 		if (res != DC_OK)
1750 			goto fail;
1751 	}
1752 
1753 
1754 	res = dc_validate_global_state(dc, context, false);
1755 
1756 	if (res != DC_OK) {
1757 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1758 		goto fail;
1759 	}
1760 
1761 	res = dc_commit_state(dc, context);
1762 
1763 fail:
1764 	dc_release_state(context);
1765 
1766 context_alloc_fail:
1767 	return res;
1768 }
1769 
dm_suspend(void * handle)1770 static int dm_suspend(void *handle)
1771 {
1772 	struct amdgpu_device *adev = handle;
1773 	struct amdgpu_display_manager *dm = &adev->dm;
1774 	int ret = 0;
1775 
1776 	if (amdgpu_in_reset(adev)) {
1777 		mutex_lock(&dm->dc_lock);
1778 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1779 
1780 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1781 
1782 		amdgpu_dm_commit_zero_streams(dm->dc);
1783 
1784 		amdgpu_dm_irq_suspend(adev);
1785 
1786 		return ret;
1787 	}
1788 
1789 	WARN_ON(adev->dm.cached_state);
1790 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1791 
1792 	s3_handle_mst(adev_to_drm(adev), true);
1793 
1794 	amdgpu_dm_irq_suspend(adev);
1795 
1796 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1797 
1798 	return 0;
1799 }
1800 
1801 static struct amdgpu_dm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state * state,struct drm_crtc * crtc)1802 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1803 					     struct drm_crtc *crtc)
1804 {
1805 	uint32_t i;
1806 	struct drm_connector_state *new_con_state;
1807 	struct drm_connector *connector;
1808 	struct drm_crtc *crtc_from_state;
1809 
1810 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1811 		crtc_from_state = new_con_state->crtc;
1812 
1813 		if (crtc_from_state == crtc)
1814 			return to_amdgpu_dm_connector(connector);
1815 	}
1816 
1817 	return NULL;
1818 }
1819 
emulated_link_detect(struct dc_link * link)1820 static void emulated_link_detect(struct dc_link *link)
1821 {
1822 	struct dc_sink_init_data sink_init_data = { 0 };
1823 	struct display_sink_capability sink_caps = { 0 };
1824 	enum dc_edid_status edid_status;
1825 	struct dc_context *dc_ctx = link->ctx;
1826 	struct dc_sink *sink = NULL;
1827 	struct dc_sink *prev_sink = NULL;
1828 
1829 	link->type = dc_connection_none;
1830 	prev_sink = link->local_sink;
1831 
1832 	if (prev_sink)
1833 		dc_sink_release(prev_sink);
1834 
1835 	switch (link->connector_signal) {
1836 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1837 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1838 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1839 		break;
1840 	}
1841 
1842 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1843 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1844 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1845 		break;
1846 	}
1847 
1848 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1849 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1850 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1851 		break;
1852 	}
1853 
1854 	case SIGNAL_TYPE_LVDS: {
1855 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1856 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1857 		break;
1858 	}
1859 
1860 	case SIGNAL_TYPE_EDP: {
1861 		sink_caps.transaction_type =
1862 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1863 		sink_caps.signal = SIGNAL_TYPE_EDP;
1864 		break;
1865 	}
1866 
1867 	case SIGNAL_TYPE_DISPLAY_PORT: {
1868 		sink_caps.transaction_type =
1869 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1870 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1871 		break;
1872 	}
1873 
1874 	default:
1875 		DC_ERROR("Invalid connector type! signal:%d\n",
1876 			link->connector_signal);
1877 		return;
1878 	}
1879 
1880 	sink_init_data.link = link;
1881 	sink_init_data.sink_signal = sink_caps.signal;
1882 
1883 	sink = dc_sink_create(&sink_init_data);
1884 	if (!sink) {
1885 		DC_ERROR("Failed to create sink!\n");
1886 		return;
1887 	}
1888 
1889 	/* dc_sink_create returns a new reference */
1890 	link->local_sink = sink;
1891 
1892 	edid_status = dm_helpers_read_local_edid(
1893 			link->ctx,
1894 			link,
1895 			sink);
1896 
1897 	if (edid_status != EDID_OK)
1898 		DC_ERROR("Failed to read EDID");
1899 
1900 }
1901 
dm_gpureset_commit_state(struct dc_state * dc_state,struct amdgpu_display_manager * dm)1902 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1903 				     struct amdgpu_display_manager *dm)
1904 {
1905 	struct {
1906 		struct dc_surface_update surface_updates[MAX_SURFACES];
1907 		struct dc_plane_info plane_infos[MAX_SURFACES];
1908 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1909 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1910 		struct dc_stream_update stream_update;
1911 	} * bundle;
1912 	int k, m;
1913 
1914 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1915 
1916 	if (!bundle) {
1917 		dm_error("Failed to allocate update bundle\n");
1918 		goto cleanup;
1919 	}
1920 
1921 	for (k = 0; k < dc_state->stream_count; k++) {
1922 		bundle->stream_update.stream = dc_state->streams[k];
1923 
1924 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1925 			bundle->surface_updates[m].surface =
1926 				dc_state->stream_status->plane_states[m];
1927 			bundle->surface_updates[m].surface->force_full_update =
1928 				true;
1929 		}
1930 		dc_commit_updates_for_stream(
1931 			dm->dc, bundle->surface_updates,
1932 			dc_state->stream_status->plane_count,
1933 			dc_state->streams[k], &bundle->stream_update, dc_state);
1934 	}
1935 
1936 cleanup:
1937 	kfree(bundle);
1938 
1939 	return;
1940 }
1941 
dm_set_dpms_off(struct dc_link * link)1942 static void dm_set_dpms_off(struct dc_link *link)
1943 {
1944 	struct dc_stream_state *stream_state;
1945 	struct amdgpu_dm_connector *aconnector = link->priv;
1946 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1947 	struct dc_stream_update stream_update;
1948 	bool dpms_off = true;
1949 
1950 	memset(&stream_update, 0, sizeof(stream_update));
1951 	stream_update.dpms_off = &dpms_off;
1952 
1953 	mutex_lock(&adev->dm.dc_lock);
1954 	stream_state = dc_stream_find_from_link(link);
1955 
1956 	if (stream_state == NULL) {
1957 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1958 		mutex_unlock(&adev->dm.dc_lock);
1959 		return;
1960 	}
1961 
1962 	stream_update.stream = stream_state;
1963 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1964 				     stream_state, &stream_update,
1965 				     stream_state->ctx->dc->current_state);
1966 	mutex_unlock(&adev->dm.dc_lock);
1967 }
1968 
dm_resume(void * handle)1969 static int dm_resume(void *handle)
1970 {
1971 	struct amdgpu_device *adev = handle;
1972 	struct drm_device *ddev = adev_to_drm(adev);
1973 	struct amdgpu_display_manager *dm = &adev->dm;
1974 	struct amdgpu_dm_connector *aconnector;
1975 	struct drm_connector *connector;
1976 	struct drm_connector_list_iter iter;
1977 	struct drm_crtc *crtc;
1978 	struct drm_crtc_state *new_crtc_state;
1979 	struct dm_crtc_state *dm_new_crtc_state;
1980 	struct drm_plane *plane;
1981 	struct drm_plane_state *new_plane_state;
1982 	struct dm_plane_state *dm_new_plane_state;
1983 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1984 	enum dc_connection_type new_connection_type = dc_connection_none;
1985 	struct dc_state *dc_state;
1986 	int i, r, j;
1987 
1988 	if (amdgpu_in_reset(adev)) {
1989 		dc_state = dm->cached_dc_state;
1990 
1991 		r = dm_dmub_hw_init(adev);
1992 		if (r)
1993 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1994 
1995 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1996 		dc_resume(dm->dc);
1997 
1998 		amdgpu_dm_irq_resume_early(adev);
1999 
2000 		for (i = 0; i < dc_state->stream_count; i++) {
2001 			dc_state->streams[i]->mode_changed = true;
2002 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2003 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2004 					= 0xffffffff;
2005 			}
2006 		}
2007 
2008 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2009 
2010 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2011 
2012 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2013 
2014 		dc_release_state(dm->cached_dc_state);
2015 		dm->cached_dc_state = NULL;
2016 
2017 		amdgpu_dm_irq_resume_late(adev);
2018 
2019 		mutex_unlock(&dm->dc_lock);
2020 
2021 		return 0;
2022 	}
2023 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2024 	dc_release_state(dm_state->context);
2025 	dm_state->context = dc_create_state(dm->dc);
2026 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2027 	dc_resource_state_construct(dm->dc, dm_state->context);
2028 
2029 	/* Before powering on DC we need to re-initialize DMUB. */
2030 	r = dm_dmub_hw_init(adev);
2031 	if (r)
2032 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2033 
2034 	/* power on hardware */
2035 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2036 
2037 	/* program HPD filter */
2038 	dc_resume(dm->dc);
2039 
2040 	/*
2041 	 * early enable HPD Rx IRQ, should be done before set mode as short
2042 	 * pulse interrupts are used for MST
2043 	 */
2044 	amdgpu_dm_irq_resume_early(adev);
2045 
2046 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2047 	s3_handle_mst(ddev, false);
2048 
2049 	/* Do detection*/
2050 	drm_connector_list_iter_begin(ddev, &iter);
2051 	drm_for_each_connector_iter(connector, &iter) {
2052 		aconnector = to_amdgpu_dm_connector(connector);
2053 
2054 		/*
2055 		 * this is the case when traversing through already created
2056 		 * MST connectors, should be skipped
2057 		 */
2058 		if (aconnector->dc_link &&
2059 		    aconnector->dc_link->type == dc_connection_mst_branch)
2060 			continue;
2061 
2062 		mutex_lock(&aconnector->hpd_lock);
2063 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2064 			DRM_ERROR("KMS: Failed to detect connector\n");
2065 
2066 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2067 			emulated_link_detect(aconnector->dc_link);
2068 		else
2069 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2070 
2071 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2072 			aconnector->fake_enable = false;
2073 
2074 		if (aconnector->dc_sink)
2075 			dc_sink_release(aconnector->dc_sink);
2076 		aconnector->dc_sink = NULL;
2077 		amdgpu_dm_update_connector_after_detect(aconnector);
2078 		mutex_unlock(&aconnector->hpd_lock);
2079 	}
2080 	drm_connector_list_iter_end(&iter);
2081 
2082 	/* Force mode set in atomic commit */
2083 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2084 		new_crtc_state->active_changed = true;
2085 
2086 	/*
2087 	 * atomic_check is expected to create the dc states. We need to release
2088 	 * them here, since they were duplicated as part of the suspend
2089 	 * procedure.
2090 	 */
2091 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2092 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2093 		if (dm_new_crtc_state->stream) {
2094 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2095 			dc_stream_release(dm_new_crtc_state->stream);
2096 			dm_new_crtc_state->stream = NULL;
2097 		}
2098 	}
2099 
2100 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2101 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2102 		if (dm_new_plane_state->dc_state) {
2103 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2104 			dc_plane_state_release(dm_new_plane_state->dc_state);
2105 			dm_new_plane_state->dc_state = NULL;
2106 		}
2107 	}
2108 
2109 	drm_atomic_helper_resume(ddev, dm->cached_state);
2110 
2111 	dm->cached_state = NULL;
2112 
2113 	amdgpu_dm_irq_resume_late(adev);
2114 
2115 	amdgpu_dm_smu_write_watermarks_table(adev);
2116 
2117 	return 0;
2118 }
2119 
2120 /**
2121  * DOC: DM Lifecycle
2122  *
2123  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2124  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2125  * the base driver's device list to be initialized and torn down accordingly.
2126  *
2127  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2128  */
2129 
2130 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2131 	.name = "dm",
2132 	.early_init = dm_early_init,
2133 	.late_init = dm_late_init,
2134 	.sw_init = dm_sw_init,
2135 	.sw_fini = dm_sw_fini,
2136 	.hw_init = dm_hw_init,
2137 	.hw_fini = dm_hw_fini,
2138 	.suspend = dm_suspend,
2139 	.resume = dm_resume,
2140 	.is_idle = dm_is_idle,
2141 	.wait_for_idle = dm_wait_for_idle,
2142 	.check_soft_reset = dm_check_soft_reset,
2143 	.soft_reset = dm_soft_reset,
2144 	.set_clockgating_state = dm_set_clockgating_state,
2145 	.set_powergating_state = dm_set_powergating_state,
2146 };
2147 
2148 const struct amdgpu_ip_block_version dm_ip_block =
2149 {
2150 	.type = AMD_IP_BLOCK_TYPE_DCE,
2151 	.major = 1,
2152 	.minor = 0,
2153 	.rev = 0,
2154 	.funcs = &amdgpu_dm_funcs,
2155 };
2156 
2157 
2158 /**
2159  * DOC: atomic
2160  *
2161  * *WIP*
2162  */
2163 
2164 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2165 	.fb_create = amdgpu_display_user_framebuffer_create,
2166 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2167 	.atomic_check = amdgpu_dm_atomic_check,
2168 	.atomic_commit = amdgpu_dm_atomic_commit,
2169 };
2170 
2171 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2172 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2173 };
2174 
update_connector_ext_caps(struct amdgpu_dm_connector * aconnector)2175 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2176 {
2177 	u32 max_avg, min_cll, max, min, q, r;
2178 	struct amdgpu_dm_backlight_caps *caps;
2179 	struct amdgpu_display_manager *dm;
2180 	struct drm_connector *conn_base;
2181 	struct amdgpu_device *adev;
2182 	struct dc_link *link = NULL;
2183 	static const u8 pre_computed_values[] = {
2184 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2185 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2186 
2187 	if (!aconnector || !aconnector->dc_link)
2188 		return;
2189 
2190 	link = aconnector->dc_link;
2191 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2192 		return;
2193 
2194 	conn_base = &aconnector->base;
2195 	adev = drm_to_adev(conn_base->dev);
2196 	dm = &adev->dm;
2197 	caps = &dm->backlight_caps;
2198 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2199 	caps->aux_support = false;
2200 	max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2201 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2202 
2203 	if (caps->ext_caps->bits.oled == 1 /*||
2204 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2205 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2206 		caps->aux_support = true;
2207 
2208 	if (amdgpu_backlight == 0)
2209 		caps->aux_support = false;
2210 	else if (amdgpu_backlight == 1)
2211 		caps->aux_support = true;
2212 
2213 	/* From the specification (CTA-861-G), for calculating the maximum
2214 	 * luminance we need to use:
2215 	 *	Luminance = 50*2**(CV/32)
2216 	 * Where CV is a one-byte value.
2217 	 * For calculating this expression we may need float point precision;
2218 	 * to avoid this complexity level, we take advantage that CV is divided
2219 	 * by a constant. From the Euclids division algorithm, we know that CV
2220 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2221 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2222 	 * need to pre-compute the value of r/32. For pre-computing the values
2223 	 * We just used the following Ruby line:
2224 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2225 	 * The results of the above expressions can be verified at
2226 	 * pre_computed_values.
2227 	 */
2228 	q = max_avg >> 5;
2229 	r = max_avg % 32;
2230 	max = (1 << q) * pre_computed_values[r];
2231 
2232 	// min luminance: maxLum * (CV/255)^2 / 100
2233 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2234 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2235 
2236 	caps->aux_max_input_signal = max;
2237 	caps->aux_min_input_signal = min;
2238 }
2239 
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector * aconnector)2240 void amdgpu_dm_update_connector_after_detect(
2241 		struct amdgpu_dm_connector *aconnector)
2242 {
2243 	struct drm_connector *connector = &aconnector->base;
2244 	struct drm_device *dev = connector->dev;
2245 	struct dc_sink *sink;
2246 
2247 	/* MST handled by drm_mst framework */
2248 	if (aconnector->mst_mgr.mst_state == true)
2249 		return;
2250 
2251 	sink = aconnector->dc_link->local_sink;
2252 	if (sink)
2253 		dc_sink_retain(sink);
2254 
2255 	/*
2256 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2257 	 * the connector sink is set to either fake or physical sink depends on link status.
2258 	 * Skip if already done during boot.
2259 	 */
2260 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2261 			&& aconnector->dc_em_sink) {
2262 
2263 		/*
2264 		 * For S3 resume with headless use eml_sink to fake stream
2265 		 * because on resume connector->sink is set to NULL
2266 		 */
2267 		mutex_lock(&dev->mode_config.mutex);
2268 
2269 		if (sink) {
2270 			if (aconnector->dc_sink) {
2271 				amdgpu_dm_update_freesync_caps(connector, NULL);
2272 				/*
2273 				 * retain and release below are used to
2274 				 * bump up refcount for sink because the link doesn't point
2275 				 * to it anymore after disconnect, so on next crtc to connector
2276 				 * reshuffle by UMD we will get into unwanted dc_sink release
2277 				 */
2278 				dc_sink_release(aconnector->dc_sink);
2279 			}
2280 			aconnector->dc_sink = sink;
2281 			dc_sink_retain(aconnector->dc_sink);
2282 			amdgpu_dm_update_freesync_caps(connector,
2283 					aconnector->edid);
2284 		} else {
2285 			amdgpu_dm_update_freesync_caps(connector, NULL);
2286 			if (!aconnector->dc_sink) {
2287 				aconnector->dc_sink = aconnector->dc_em_sink;
2288 				dc_sink_retain(aconnector->dc_sink);
2289 			}
2290 		}
2291 
2292 		mutex_unlock(&dev->mode_config.mutex);
2293 
2294 		if (sink)
2295 			dc_sink_release(sink);
2296 		return;
2297 	}
2298 
2299 	/*
2300 	 * TODO: temporary guard to look for proper fix
2301 	 * if this sink is MST sink, we should not do anything
2302 	 */
2303 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2304 		dc_sink_release(sink);
2305 		return;
2306 	}
2307 
2308 	if (aconnector->dc_sink == sink) {
2309 		/*
2310 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2311 		 * Do nothing!!
2312 		 */
2313 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2314 				aconnector->connector_id);
2315 		if (sink)
2316 			dc_sink_release(sink);
2317 		return;
2318 	}
2319 
2320 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2321 		aconnector->connector_id, aconnector->dc_sink, sink);
2322 
2323 	mutex_lock(&dev->mode_config.mutex);
2324 
2325 	/*
2326 	 * 1. Update status of the drm connector
2327 	 * 2. Send an event and let userspace tell us what to do
2328 	 */
2329 	if (sink) {
2330 		/*
2331 		 * TODO: check if we still need the S3 mode update workaround.
2332 		 * If yes, put it here.
2333 		 */
2334 		if (aconnector->dc_sink) {
2335 			amdgpu_dm_update_freesync_caps(connector, NULL);
2336 			dc_sink_release(aconnector->dc_sink);
2337 		}
2338 
2339 		aconnector->dc_sink = sink;
2340 		dc_sink_retain(aconnector->dc_sink);
2341 		if (sink->dc_edid.length == 0) {
2342 			aconnector->edid = NULL;
2343 			if (aconnector->dc_link->aux_mode) {
2344 				drm_dp_cec_unset_edid(
2345 					&aconnector->dm_dp_aux.aux);
2346 			}
2347 		} else {
2348 			aconnector->edid =
2349 				(struct edid *)sink->dc_edid.raw_edid;
2350 
2351 			if (aconnector->dc_link->aux_mode)
2352 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2353 						    aconnector->edid);
2354 		}
2355 
2356 		drm_connector_update_edid_property(connector, aconnector->edid);
2357 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2358 		update_connector_ext_caps(aconnector);
2359 	} else {
2360 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2361 		amdgpu_dm_update_freesync_caps(connector, NULL);
2362 		drm_connector_update_edid_property(connector, NULL);
2363 		aconnector->num_modes = 0;
2364 		dc_sink_release(aconnector->dc_sink);
2365 		aconnector->dc_sink = NULL;
2366 		aconnector->edid = NULL;
2367 #ifdef CONFIG_DRM_AMD_DC_HDCP
2368 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2369 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2370 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2371 #endif
2372 	}
2373 
2374 	mutex_unlock(&dev->mode_config.mutex);
2375 
2376 	update_subconnector_property(aconnector);
2377 
2378 	if (sink)
2379 		dc_sink_release(sink);
2380 }
2381 
handle_hpd_irq(void * param)2382 static void handle_hpd_irq(void *param)
2383 {
2384 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2385 	struct drm_connector *connector = &aconnector->base;
2386 	struct drm_device *dev = connector->dev;
2387 	enum dc_connection_type new_connection_type = dc_connection_none;
2388 #ifdef CONFIG_DRM_AMD_DC_HDCP
2389 	struct amdgpu_device *adev = drm_to_adev(dev);
2390 #endif
2391 
2392 	/*
2393 	 * In case of failure or MST no need to update connector status or notify the OS
2394 	 * since (for MST case) MST does this in its own context.
2395 	 */
2396 	mutex_lock(&aconnector->hpd_lock);
2397 
2398 #ifdef CONFIG_DRM_AMD_DC_HDCP
2399 	if (adev->dm.hdcp_workqueue)
2400 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2401 #endif
2402 	if (aconnector->fake_enable)
2403 		aconnector->fake_enable = false;
2404 
2405 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2406 		DRM_ERROR("KMS: Failed to detect connector\n");
2407 
2408 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2409 		emulated_link_detect(aconnector->dc_link);
2410 
2411 
2412 		drm_modeset_lock_all(dev);
2413 		dm_restore_drm_connector_state(dev, connector);
2414 		drm_modeset_unlock_all(dev);
2415 
2416 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2417 			drm_kms_helper_hotplug_event(dev);
2418 
2419 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2420 		if (new_connection_type == dc_connection_none &&
2421 		    aconnector->dc_link->type == dc_connection_none)
2422 			dm_set_dpms_off(aconnector->dc_link);
2423 
2424 		amdgpu_dm_update_connector_after_detect(aconnector);
2425 
2426 		drm_modeset_lock_all(dev);
2427 		dm_restore_drm_connector_state(dev, connector);
2428 		drm_modeset_unlock_all(dev);
2429 
2430 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2431 			drm_kms_helper_hotplug_event(dev);
2432 	}
2433 	mutex_unlock(&aconnector->hpd_lock);
2434 
2435 }
2436 
dm_handle_hpd_rx_irq(struct amdgpu_dm_connector * aconnector)2437 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2438 {
2439 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2440 	uint8_t dret;
2441 	bool new_irq_handled = false;
2442 	int dpcd_addr;
2443 	int dpcd_bytes_to_read;
2444 
2445 	const int max_process_count = 30;
2446 	int process_count = 0;
2447 
2448 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2449 
2450 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2451 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2452 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2453 		dpcd_addr = DP_SINK_COUNT;
2454 	} else {
2455 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2456 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2457 		dpcd_addr = DP_SINK_COUNT_ESI;
2458 	}
2459 
2460 	dret = drm_dp_dpcd_read(
2461 		&aconnector->dm_dp_aux.aux,
2462 		dpcd_addr,
2463 		esi,
2464 		dpcd_bytes_to_read);
2465 
2466 	while (dret == dpcd_bytes_to_read &&
2467 		process_count < max_process_count) {
2468 		uint8_t retry;
2469 		dret = 0;
2470 
2471 		process_count++;
2472 
2473 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2474 		/* handle HPD short pulse irq */
2475 		if (aconnector->mst_mgr.mst_state)
2476 			drm_dp_mst_hpd_irq(
2477 				&aconnector->mst_mgr,
2478 				esi,
2479 				&new_irq_handled);
2480 
2481 		if (new_irq_handled) {
2482 			/* ACK at DPCD to notify down stream */
2483 			const int ack_dpcd_bytes_to_write =
2484 				dpcd_bytes_to_read - 1;
2485 
2486 			for (retry = 0; retry < 3; retry++) {
2487 				uint8_t wret;
2488 
2489 				wret = drm_dp_dpcd_write(
2490 					&aconnector->dm_dp_aux.aux,
2491 					dpcd_addr + 1,
2492 					&esi[1],
2493 					ack_dpcd_bytes_to_write);
2494 				if (wret == ack_dpcd_bytes_to_write)
2495 					break;
2496 			}
2497 
2498 			/* check if there is new irq to be handled */
2499 			dret = drm_dp_dpcd_read(
2500 				&aconnector->dm_dp_aux.aux,
2501 				dpcd_addr,
2502 				esi,
2503 				dpcd_bytes_to_read);
2504 
2505 			new_irq_handled = false;
2506 		} else {
2507 			break;
2508 		}
2509 	}
2510 
2511 	if (process_count == max_process_count)
2512 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2513 }
2514 
handle_hpd_rx_irq(void * param)2515 static void handle_hpd_rx_irq(void *param)
2516 {
2517 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2518 	struct drm_connector *connector = &aconnector->base;
2519 	struct drm_device *dev = connector->dev;
2520 	struct dc_link *dc_link = aconnector->dc_link;
2521 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2522 	enum dc_connection_type new_connection_type = dc_connection_none;
2523 #ifdef CONFIG_DRM_AMD_DC_HDCP
2524 	union hpd_irq_data hpd_irq_data;
2525 	struct amdgpu_device *adev = drm_to_adev(dev);
2526 
2527 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2528 #endif
2529 
2530 	/*
2531 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2532 	 * conflict, after implement i2c helper, this mutex should be
2533 	 * retired.
2534 	 */
2535 	if (dc_link->type != dc_connection_mst_branch)
2536 		mutex_lock(&aconnector->hpd_lock);
2537 
2538 
2539 #ifdef CONFIG_DRM_AMD_DC_HDCP
2540 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2541 #else
2542 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2543 #endif
2544 			!is_mst_root_connector) {
2545 		/* Downstream Port status changed. */
2546 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2547 			DRM_ERROR("KMS: Failed to detect connector\n");
2548 
2549 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2550 			emulated_link_detect(dc_link);
2551 
2552 			if (aconnector->fake_enable)
2553 				aconnector->fake_enable = false;
2554 
2555 			amdgpu_dm_update_connector_after_detect(aconnector);
2556 
2557 
2558 			drm_modeset_lock_all(dev);
2559 			dm_restore_drm_connector_state(dev, connector);
2560 			drm_modeset_unlock_all(dev);
2561 
2562 			drm_kms_helper_hotplug_event(dev);
2563 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2564 
2565 			if (aconnector->fake_enable)
2566 				aconnector->fake_enable = false;
2567 
2568 			amdgpu_dm_update_connector_after_detect(aconnector);
2569 
2570 
2571 			drm_modeset_lock_all(dev);
2572 			dm_restore_drm_connector_state(dev, connector);
2573 			drm_modeset_unlock_all(dev);
2574 
2575 			drm_kms_helper_hotplug_event(dev);
2576 		}
2577 	}
2578 #ifdef CONFIG_DRM_AMD_DC_HDCP
2579 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2580 		if (adev->dm.hdcp_workqueue)
2581 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2582 	}
2583 #endif
2584 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2585 	    (dc_link->type == dc_connection_mst_branch))
2586 		dm_handle_hpd_rx_irq(aconnector);
2587 
2588 	if (dc_link->type != dc_connection_mst_branch) {
2589 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2590 		mutex_unlock(&aconnector->hpd_lock);
2591 	}
2592 }
2593 
register_hpd_handlers(struct amdgpu_device * adev)2594 static void register_hpd_handlers(struct amdgpu_device *adev)
2595 {
2596 	struct drm_device *dev = adev_to_drm(adev);
2597 	struct drm_connector *connector;
2598 	struct amdgpu_dm_connector *aconnector;
2599 	const struct dc_link *dc_link;
2600 	struct dc_interrupt_params int_params = {0};
2601 
2602 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2603 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2604 
2605 	list_for_each_entry(connector,
2606 			&dev->mode_config.connector_list, head)	{
2607 
2608 		aconnector = to_amdgpu_dm_connector(connector);
2609 		dc_link = aconnector->dc_link;
2610 
2611 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2612 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2613 			int_params.irq_source = dc_link->irq_source_hpd;
2614 
2615 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2616 					handle_hpd_irq,
2617 					(void *) aconnector);
2618 		}
2619 
2620 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2621 
2622 			/* Also register for DP short pulse (hpd_rx). */
2623 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2624 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2625 
2626 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2627 					handle_hpd_rx_irq,
2628 					(void *) aconnector);
2629 		}
2630 	}
2631 }
2632 
2633 #if defined(CONFIG_DRM_AMD_DC_SI)
2634 /* Register IRQ sources and initialize IRQ callbacks */
dce60_register_irq_handlers(struct amdgpu_device * adev)2635 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2636 {
2637 	struct dc *dc = adev->dm.dc;
2638 	struct common_irq_params *c_irq_params;
2639 	struct dc_interrupt_params int_params = {0};
2640 	int r;
2641 	int i;
2642 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2643 
2644 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2645 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2646 
2647 	/*
2648 	 * Actions of amdgpu_irq_add_id():
2649 	 * 1. Register a set() function with base driver.
2650 	 *    Base driver will call set() function to enable/disable an
2651 	 *    interrupt in DC hardware.
2652 	 * 2. Register amdgpu_dm_irq_handler().
2653 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2654 	 *    coming from DC hardware.
2655 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2656 	 *    for acknowledging and handling. */
2657 
2658 	/* Use VBLANK interrupt */
2659 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2660 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2661 		if (r) {
2662 			DRM_ERROR("Failed to add crtc irq id!\n");
2663 			return r;
2664 		}
2665 
2666 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2667 		int_params.irq_source =
2668 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2669 
2670 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2671 
2672 		c_irq_params->adev = adev;
2673 		c_irq_params->irq_src = int_params.irq_source;
2674 
2675 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2676 				dm_crtc_high_irq, c_irq_params);
2677 	}
2678 
2679 	/* Use GRPH_PFLIP interrupt */
2680 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2681 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2682 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2683 		if (r) {
2684 			DRM_ERROR("Failed to add page flip irq id!\n");
2685 			return r;
2686 		}
2687 
2688 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2689 		int_params.irq_source =
2690 			dc_interrupt_to_irq_source(dc, i, 0);
2691 
2692 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2693 
2694 		c_irq_params->adev = adev;
2695 		c_irq_params->irq_src = int_params.irq_source;
2696 
2697 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2698 				dm_pflip_high_irq, c_irq_params);
2699 
2700 	}
2701 
2702 	/* HPD */
2703 	r = amdgpu_irq_add_id(adev, client_id,
2704 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2705 	if (r) {
2706 		DRM_ERROR("Failed to add hpd irq id!\n");
2707 		return r;
2708 	}
2709 
2710 	register_hpd_handlers(adev);
2711 
2712 	return 0;
2713 }
2714 #endif
2715 
2716 /* Register IRQ sources and initialize IRQ callbacks */
dce110_register_irq_handlers(struct amdgpu_device * adev)2717 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2718 {
2719 	struct dc *dc = adev->dm.dc;
2720 	struct common_irq_params *c_irq_params;
2721 	struct dc_interrupt_params int_params = {0};
2722 	int r;
2723 	int i;
2724 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2725 
2726 	if (adev->asic_type >= CHIP_VEGA10)
2727 		client_id = SOC15_IH_CLIENTID_DCE;
2728 
2729 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2730 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2731 
2732 	/*
2733 	 * Actions of amdgpu_irq_add_id():
2734 	 * 1. Register a set() function with base driver.
2735 	 *    Base driver will call set() function to enable/disable an
2736 	 *    interrupt in DC hardware.
2737 	 * 2. Register amdgpu_dm_irq_handler().
2738 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2739 	 *    coming from DC hardware.
2740 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2741 	 *    for acknowledging and handling. */
2742 
2743 	/* Use VBLANK interrupt */
2744 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2745 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2746 		if (r) {
2747 			DRM_ERROR("Failed to add crtc irq id!\n");
2748 			return r;
2749 		}
2750 
2751 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2752 		int_params.irq_source =
2753 			dc_interrupt_to_irq_source(dc, i, 0);
2754 
2755 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2756 
2757 		c_irq_params->adev = adev;
2758 		c_irq_params->irq_src = int_params.irq_source;
2759 
2760 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2761 				dm_crtc_high_irq, c_irq_params);
2762 	}
2763 
2764 	/* Use VUPDATE interrupt */
2765 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2766 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2767 		if (r) {
2768 			DRM_ERROR("Failed to add vupdate irq id!\n");
2769 			return r;
2770 		}
2771 
2772 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2773 		int_params.irq_source =
2774 			dc_interrupt_to_irq_source(dc, i, 0);
2775 
2776 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2777 
2778 		c_irq_params->adev = adev;
2779 		c_irq_params->irq_src = int_params.irq_source;
2780 
2781 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2782 				dm_vupdate_high_irq, c_irq_params);
2783 	}
2784 
2785 	/* Use GRPH_PFLIP interrupt */
2786 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2787 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2788 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2789 		if (r) {
2790 			DRM_ERROR("Failed to add page flip irq id!\n");
2791 			return r;
2792 		}
2793 
2794 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2795 		int_params.irq_source =
2796 			dc_interrupt_to_irq_source(dc, i, 0);
2797 
2798 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2799 
2800 		c_irq_params->adev = adev;
2801 		c_irq_params->irq_src = int_params.irq_source;
2802 
2803 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2804 				dm_pflip_high_irq, c_irq_params);
2805 
2806 	}
2807 
2808 	/* HPD */
2809 	r = amdgpu_irq_add_id(adev, client_id,
2810 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2811 	if (r) {
2812 		DRM_ERROR("Failed to add hpd irq id!\n");
2813 		return r;
2814 	}
2815 
2816 	register_hpd_handlers(adev);
2817 
2818 	return 0;
2819 }
2820 
2821 #if defined(CONFIG_DRM_AMD_DC_DCN)
2822 /* Register IRQ sources and initialize IRQ callbacks */
dcn10_register_irq_handlers(struct amdgpu_device * adev)2823 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2824 {
2825 	struct dc *dc = adev->dm.dc;
2826 	struct common_irq_params *c_irq_params;
2827 	struct dc_interrupt_params int_params = {0};
2828 	int r;
2829 	int i;
2830 
2831 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2832 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2833 
2834 	/*
2835 	 * Actions of amdgpu_irq_add_id():
2836 	 * 1. Register a set() function with base driver.
2837 	 *    Base driver will call set() function to enable/disable an
2838 	 *    interrupt in DC hardware.
2839 	 * 2. Register amdgpu_dm_irq_handler().
2840 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2841 	 *    coming from DC hardware.
2842 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2843 	 *    for acknowledging and handling.
2844 	 */
2845 
2846 	/* Use VSTARTUP interrupt */
2847 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2848 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2849 			i++) {
2850 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2851 
2852 		if (r) {
2853 			DRM_ERROR("Failed to add crtc irq id!\n");
2854 			return r;
2855 		}
2856 
2857 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2858 		int_params.irq_source =
2859 			dc_interrupt_to_irq_source(dc, i, 0);
2860 
2861 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2862 
2863 		c_irq_params->adev = adev;
2864 		c_irq_params->irq_src = int_params.irq_source;
2865 
2866 		amdgpu_dm_irq_register_interrupt(
2867 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2868 	}
2869 
2870 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2871 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2872 	 * to trigger at end of each vblank, regardless of state of the lock,
2873 	 * matching DCE behaviour.
2874 	 */
2875 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2876 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2877 	     i++) {
2878 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2879 
2880 		if (r) {
2881 			DRM_ERROR("Failed to add vupdate irq id!\n");
2882 			return r;
2883 		}
2884 
2885 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2886 		int_params.irq_source =
2887 			dc_interrupt_to_irq_source(dc, i, 0);
2888 
2889 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2890 
2891 		c_irq_params->adev = adev;
2892 		c_irq_params->irq_src = int_params.irq_source;
2893 
2894 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2895 				dm_vupdate_high_irq, c_irq_params);
2896 	}
2897 
2898 	/* Use GRPH_PFLIP interrupt */
2899 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2900 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2901 			i++) {
2902 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2903 		if (r) {
2904 			DRM_ERROR("Failed to add page flip irq id!\n");
2905 			return r;
2906 		}
2907 
2908 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2909 		int_params.irq_source =
2910 			dc_interrupt_to_irq_source(dc, i, 0);
2911 
2912 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2913 
2914 		c_irq_params->adev = adev;
2915 		c_irq_params->irq_src = int_params.irq_source;
2916 
2917 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2918 				dm_pflip_high_irq, c_irq_params);
2919 
2920 	}
2921 
2922 	/* HPD */
2923 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2924 			&adev->hpd_irq);
2925 	if (r) {
2926 		DRM_ERROR("Failed to add hpd irq id!\n");
2927 		return r;
2928 	}
2929 
2930 	register_hpd_handlers(adev);
2931 
2932 	return 0;
2933 }
2934 #endif
2935 
2936 /*
2937  * Acquires the lock for the atomic state object and returns
2938  * the new atomic state.
2939  *
2940  * This should only be called during atomic check.
2941  */
dm_atomic_get_state(struct drm_atomic_state * state,struct dm_atomic_state ** dm_state)2942 static int dm_atomic_get_state(struct drm_atomic_state *state,
2943 			       struct dm_atomic_state **dm_state)
2944 {
2945 	struct drm_device *dev = state->dev;
2946 	struct amdgpu_device *adev = drm_to_adev(dev);
2947 	struct amdgpu_display_manager *dm = &adev->dm;
2948 	struct drm_private_state *priv_state;
2949 
2950 	if (*dm_state)
2951 		return 0;
2952 
2953 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2954 	if (IS_ERR(priv_state))
2955 		return PTR_ERR(priv_state);
2956 
2957 	*dm_state = to_dm_atomic_state(priv_state);
2958 
2959 	return 0;
2960 }
2961 
2962 static struct dm_atomic_state *
dm_atomic_get_new_state(struct drm_atomic_state * state)2963 dm_atomic_get_new_state(struct drm_atomic_state *state)
2964 {
2965 	struct drm_device *dev = state->dev;
2966 	struct amdgpu_device *adev = drm_to_adev(dev);
2967 	struct amdgpu_display_manager *dm = &adev->dm;
2968 	struct drm_private_obj *obj;
2969 	struct drm_private_state *new_obj_state;
2970 	int i;
2971 
2972 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2973 		if (obj->funcs == dm->atomic_obj.funcs)
2974 			return to_dm_atomic_state(new_obj_state);
2975 	}
2976 
2977 	return NULL;
2978 }
2979 
2980 static struct drm_private_state *
dm_atomic_duplicate_state(struct drm_private_obj * obj)2981 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2982 {
2983 	struct dm_atomic_state *old_state, *new_state;
2984 
2985 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2986 	if (!new_state)
2987 		return NULL;
2988 
2989 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2990 
2991 	old_state = to_dm_atomic_state(obj->state);
2992 
2993 	if (old_state && old_state->context)
2994 		new_state->context = dc_copy_state(old_state->context);
2995 
2996 	if (!new_state->context) {
2997 		kfree(new_state);
2998 		return NULL;
2999 	}
3000 
3001 	return &new_state->base;
3002 }
3003 
dm_atomic_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)3004 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3005 				    struct drm_private_state *state)
3006 {
3007 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3008 
3009 	if (dm_state && dm_state->context)
3010 		dc_release_state(dm_state->context);
3011 
3012 	kfree(dm_state);
3013 }
3014 
3015 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3016 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3017 	.atomic_destroy_state = dm_atomic_destroy_state,
3018 };
3019 
amdgpu_dm_mode_config_init(struct amdgpu_device * adev)3020 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3021 {
3022 	struct dm_atomic_state *state;
3023 	int r;
3024 
3025 	adev->mode_info.mode_config_initialized = true;
3026 
3027 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3028 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3029 
3030 	adev_to_drm(adev)->mode_config.max_width = 16384;
3031 	adev_to_drm(adev)->mode_config.max_height = 16384;
3032 
3033 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3034 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3035 	/* indicates support for immediate flip */
3036 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3037 
3038 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3039 
3040 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3041 	if (!state)
3042 		return -ENOMEM;
3043 
3044 	state->context = dc_create_state(adev->dm.dc);
3045 	if (!state->context) {
3046 		kfree(state);
3047 		return -ENOMEM;
3048 	}
3049 
3050 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3051 
3052 	drm_atomic_private_obj_init(adev_to_drm(adev),
3053 				    &adev->dm.atomic_obj,
3054 				    &state->base,
3055 				    &dm_atomic_state_funcs);
3056 
3057 	r = amdgpu_display_modeset_create_props(adev);
3058 	if (r) {
3059 		dc_release_state(state->context);
3060 		kfree(state);
3061 		return r;
3062 	}
3063 
3064 	r = amdgpu_dm_audio_init(adev);
3065 	if (r) {
3066 		dc_release_state(state->context);
3067 		kfree(state);
3068 		return r;
3069 	}
3070 
3071 	return 0;
3072 }
3073 
3074 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3075 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3076 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3077 
3078 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3079 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3080 
amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager * dm)3081 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3082 {
3083 #if defined(CONFIG_ACPI)
3084 	struct amdgpu_dm_backlight_caps caps;
3085 
3086 	memset(&caps, 0, sizeof(caps));
3087 
3088 	if (dm->backlight_caps.caps_valid)
3089 		return;
3090 
3091 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3092 	if (caps.caps_valid) {
3093 		dm->backlight_caps.caps_valid = true;
3094 		if (caps.aux_support)
3095 			return;
3096 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3097 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3098 	} else {
3099 		dm->backlight_caps.min_input_signal =
3100 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3101 		dm->backlight_caps.max_input_signal =
3102 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3103 	}
3104 #else
3105 	if (dm->backlight_caps.aux_support)
3106 		return;
3107 
3108 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3109 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3110 #endif
3111 }
3112 
get_brightness_range(const struct amdgpu_dm_backlight_caps * caps,unsigned * min,unsigned * max)3113 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3114 				unsigned *min, unsigned *max)
3115 {
3116 	if (!caps)
3117 		return 0;
3118 
3119 	if (caps->aux_support) {
3120 		// Firmware limits are in nits, DC API wants millinits.
3121 		*max = 1000 * caps->aux_max_input_signal;
3122 		*min = 1000 * caps->aux_min_input_signal;
3123 	} else {
3124 		// Firmware limits are 8-bit, PWM control is 16-bit.
3125 		*max = 0x101 * caps->max_input_signal;
3126 		*min = 0x101 * caps->min_input_signal;
3127 	}
3128 	return 1;
3129 }
3130 
convert_brightness_from_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)3131 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3132 					uint32_t brightness)
3133 {
3134 	unsigned min, max;
3135 
3136 	if (!get_brightness_range(caps, &min, &max))
3137 		return brightness;
3138 
3139 	// Rescale 0..255 to min..max
3140 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3141 				       AMDGPU_MAX_BL_LEVEL);
3142 }
3143 
convert_brightness_to_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)3144 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3145 				      uint32_t brightness)
3146 {
3147 	unsigned min, max;
3148 
3149 	if (!get_brightness_range(caps, &min, &max))
3150 		return brightness;
3151 
3152 	if (brightness < min)
3153 		return 0;
3154 	// Rescale min..max to 0..255
3155 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3156 				 max - min);
3157 }
3158 
amdgpu_dm_backlight_update_status(struct backlight_device * bd)3159 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3160 {
3161 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3162 	struct amdgpu_dm_backlight_caps caps;
3163 	struct dc_link *link = NULL;
3164 	u32 brightness;
3165 	bool rc;
3166 
3167 	amdgpu_dm_update_backlight_caps(dm);
3168 	caps = dm->backlight_caps;
3169 
3170 	link = (struct dc_link *)dm->backlight_link;
3171 
3172 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3173 	// Change brightness based on AUX property
3174 	if (caps.aux_support)
3175 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3176 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3177 	else
3178 		rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3179 
3180 	return rc ? 0 : 1;
3181 }
3182 
amdgpu_dm_backlight_get_brightness(struct backlight_device * bd)3183 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3184 {
3185 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3186 	struct amdgpu_dm_backlight_caps caps;
3187 
3188 	amdgpu_dm_update_backlight_caps(dm);
3189 	caps = dm->backlight_caps;
3190 
3191 	if (caps.aux_support) {
3192 		struct dc_link *link = (struct dc_link *)dm->backlight_link;
3193 		u32 avg, peak;
3194 		bool rc;
3195 
3196 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3197 		if (!rc)
3198 			return bd->props.brightness;
3199 		return convert_brightness_to_user(&caps, avg);
3200 	} else {
3201 		int ret = dc_link_get_backlight_level(dm->backlight_link);
3202 
3203 		if (ret == DC_ERROR_UNEXPECTED)
3204 			return bd->props.brightness;
3205 		return convert_brightness_to_user(&caps, ret);
3206 	}
3207 }
3208 
3209 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3210 	.options = BL_CORE_SUSPENDRESUME,
3211 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3212 	.update_status	= amdgpu_dm_backlight_update_status,
3213 };
3214 
3215 static void
amdgpu_dm_register_backlight_device(struct amdgpu_display_manager * dm)3216 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3217 {
3218 	char bl_name[16];
3219 	struct backlight_properties props = { 0 };
3220 
3221 	amdgpu_dm_update_backlight_caps(dm);
3222 
3223 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3224 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3225 	props.type = BACKLIGHT_RAW;
3226 
3227 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3228 		 adev_to_drm(dm->adev)->primary->index);
3229 
3230 	dm->backlight_dev = backlight_device_register(bl_name,
3231 						      adev_to_drm(dm->adev)->dev,
3232 						      dm,
3233 						      &amdgpu_dm_backlight_ops,
3234 						      &props);
3235 
3236 	if (IS_ERR(dm->backlight_dev))
3237 		DRM_ERROR("DM: Backlight registration failed!\n");
3238 	else
3239 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3240 }
3241 
3242 #endif
3243 
initialize_plane(struct amdgpu_display_manager * dm,struct amdgpu_mode_info * mode_info,int plane_id,enum drm_plane_type plane_type,const struct dc_plane_cap * plane_cap)3244 static int initialize_plane(struct amdgpu_display_manager *dm,
3245 			    struct amdgpu_mode_info *mode_info, int plane_id,
3246 			    enum drm_plane_type plane_type,
3247 			    const struct dc_plane_cap *plane_cap)
3248 {
3249 	struct drm_plane *plane;
3250 	unsigned long possible_crtcs;
3251 	int ret = 0;
3252 
3253 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3254 	if (!plane) {
3255 		DRM_ERROR("KMS: Failed to allocate plane\n");
3256 		return -ENOMEM;
3257 	}
3258 	plane->type = plane_type;
3259 
3260 	/*
3261 	 * HACK: IGT tests expect that the primary plane for a CRTC
3262 	 * can only have one possible CRTC. Only expose support for
3263 	 * any CRTC if they're not going to be used as a primary plane
3264 	 * for a CRTC - like overlay or underlay planes.
3265 	 */
3266 	possible_crtcs = 1 << plane_id;
3267 	if (plane_id >= dm->dc->caps.max_streams)
3268 		possible_crtcs = 0xff;
3269 
3270 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3271 
3272 	if (ret) {
3273 		DRM_ERROR("KMS: Failed to initialize plane\n");
3274 		kfree(plane);
3275 		return ret;
3276 	}
3277 
3278 	if (mode_info)
3279 		mode_info->planes[plane_id] = plane;
3280 
3281 	return ret;
3282 }
3283 
3284 
register_backlight_device(struct amdgpu_display_manager * dm,struct dc_link * link)3285 static void register_backlight_device(struct amdgpu_display_manager *dm,
3286 				      struct dc_link *link)
3287 {
3288 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3289 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3290 
3291 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3292 	    link->type != dc_connection_none) {
3293 		/*
3294 		 * Event if registration failed, we should continue with
3295 		 * DM initialization because not having a backlight control
3296 		 * is better then a black screen.
3297 		 */
3298 		amdgpu_dm_register_backlight_device(dm);
3299 
3300 		if (dm->backlight_dev)
3301 			dm->backlight_link = link;
3302 	}
3303 #endif
3304 }
3305 
3306 
3307 /*
3308  * In this architecture, the association
3309  * connector -> encoder -> crtc
3310  * id not really requried. The crtc and connector will hold the
3311  * display_index as an abstraction to use with DAL component
3312  *
3313  * Returns 0 on success
3314  */
amdgpu_dm_initialize_drm_device(struct amdgpu_device * adev)3315 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3316 {
3317 	struct amdgpu_display_manager *dm = &adev->dm;
3318 	int32_t i;
3319 	struct amdgpu_dm_connector *aconnector = NULL;
3320 	struct amdgpu_encoder *aencoder = NULL;
3321 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3322 	uint32_t link_cnt;
3323 	int32_t primary_planes;
3324 	enum dc_connection_type new_connection_type = dc_connection_none;
3325 	const struct dc_plane_cap *plane;
3326 
3327 	dm->display_indexes_num = dm->dc->caps.max_streams;
3328 	/* Update the actual used number of crtc */
3329 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3330 
3331 	link_cnt = dm->dc->caps.max_links;
3332 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3333 		DRM_ERROR("DM: Failed to initialize mode config\n");
3334 		return -EINVAL;
3335 	}
3336 
3337 	/* There is one primary plane per CRTC */
3338 	primary_planes = dm->dc->caps.max_streams;
3339 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3340 
3341 	/*
3342 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3343 	 * Order is reversed to match iteration order in atomic check.
3344 	 */
3345 	for (i = (primary_planes - 1); i >= 0; i--) {
3346 		plane = &dm->dc->caps.planes[i];
3347 
3348 		if (initialize_plane(dm, mode_info, i,
3349 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3350 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3351 			goto fail;
3352 		}
3353 	}
3354 
3355 	/*
3356 	 * Initialize overlay planes, index starting after primary planes.
3357 	 * These planes have a higher DRM index than the primary planes since
3358 	 * they should be considered as having a higher z-order.
3359 	 * Order is reversed to match iteration order in atomic check.
3360 	 *
3361 	 * Only support DCN for now, and only expose one so we don't encourage
3362 	 * userspace to use up all the pipes.
3363 	 */
3364 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3365 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3366 
3367 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3368 			continue;
3369 
3370 		if (!plane->blends_with_above || !plane->blends_with_below)
3371 			continue;
3372 
3373 		if (!plane->pixel_format_support.argb8888)
3374 			continue;
3375 
3376 		if (initialize_plane(dm, NULL, primary_planes + i,
3377 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3378 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3379 			goto fail;
3380 		}
3381 
3382 		/* Only create one overlay plane. */
3383 		break;
3384 	}
3385 
3386 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3387 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3388 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3389 			goto fail;
3390 		}
3391 
3392 	/* loops over all connectors on the board */
3393 	for (i = 0; i < link_cnt; i++) {
3394 		struct dc_link *link = NULL;
3395 
3396 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3397 			DRM_ERROR(
3398 				"KMS: Cannot support more than %d display indexes\n",
3399 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3400 			continue;
3401 		}
3402 
3403 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3404 		if (!aconnector)
3405 			goto fail;
3406 
3407 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3408 		if (!aencoder)
3409 			goto fail;
3410 
3411 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3412 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3413 			goto fail;
3414 		}
3415 
3416 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3417 			DRM_ERROR("KMS: Failed to initialize connector\n");
3418 			goto fail;
3419 		}
3420 
3421 		link = dc_get_link_at_index(dm->dc, i);
3422 
3423 		if (!dc_link_detect_sink(link, &new_connection_type))
3424 			DRM_ERROR("KMS: Failed to detect connector\n");
3425 
3426 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3427 			emulated_link_detect(link);
3428 			amdgpu_dm_update_connector_after_detect(aconnector);
3429 
3430 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3431 			amdgpu_dm_update_connector_after_detect(aconnector);
3432 			register_backlight_device(dm, link);
3433 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3434 				amdgpu_dm_set_psr_caps(link);
3435 		}
3436 
3437 
3438 	}
3439 
3440 	/* Software is initialized. Now we can register interrupt handlers. */
3441 	switch (adev->asic_type) {
3442 #if defined(CONFIG_DRM_AMD_DC_SI)
3443 	case CHIP_TAHITI:
3444 	case CHIP_PITCAIRN:
3445 	case CHIP_VERDE:
3446 	case CHIP_OLAND:
3447 		if (dce60_register_irq_handlers(dm->adev)) {
3448 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3449 			goto fail;
3450 		}
3451 		break;
3452 #endif
3453 	case CHIP_BONAIRE:
3454 	case CHIP_HAWAII:
3455 	case CHIP_KAVERI:
3456 	case CHIP_KABINI:
3457 	case CHIP_MULLINS:
3458 	case CHIP_TONGA:
3459 	case CHIP_FIJI:
3460 	case CHIP_CARRIZO:
3461 	case CHIP_STONEY:
3462 	case CHIP_POLARIS11:
3463 	case CHIP_POLARIS10:
3464 	case CHIP_POLARIS12:
3465 	case CHIP_VEGAM:
3466 	case CHIP_VEGA10:
3467 	case CHIP_VEGA12:
3468 	case CHIP_VEGA20:
3469 		if (dce110_register_irq_handlers(dm->adev)) {
3470 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3471 			goto fail;
3472 		}
3473 		break;
3474 #if defined(CONFIG_DRM_AMD_DC_DCN)
3475 	case CHIP_RAVEN:
3476 	case CHIP_NAVI12:
3477 	case CHIP_NAVI10:
3478 	case CHIP_NAVI14:
3479 	case CHIP_RENOIR:
3480 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3481 	case CHIP_SIENNA_CICHLID:
3482 	case CHIP_NAVY_FLOUNDER:
3483 #endif
3484 		if (dcn10_register_irq_handlers(dm->adev)) {
3485 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3486 			goto fail;
3487 		}
3488 		break;
3489 #endif
3490 	default:
3491 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3492 		goto fail;
3493 	}
3494 
3495 	return 0;
3496 fail:
3497 	kfree(aencoder);
3498 	kfree(aconnector);
3499 
3500 	return -EINVAL;
3501 }
3502 
amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager * dm)3503 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3504 {
3505 	drm_mode_config_cleanup(dm->ddev);
3506 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3507 	return;
3508 }
3509 
3510 /******************************************************************************
3511  * amdgpu_display_funcs functions
3512  *****************************************************************************/
3513 
3514 /*
3515  * dm_bandwidth_update - program display watermarks
3516  *
3517  * @adev: amdgpu_device pointer
3518  *
3519  * Calculate and program the display watermarks and line buffer allocation.
3520  */
dm_bandwidth_update(struct amdgpu_device * adev)3521 static void dm_bandwidth_update(struct amdgpu_device *adev)
3522 {
3523 	/* TODO: implement later */
3524 }
3525 
3526 static const struct amdgpu_display_funcs dm_display_funcs = {
3527 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3528 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3529 	.backlight_set_level = NULL, /* never called for DC */
3530 	.backlight_get_level = NULL, /* never called for DC */
3531 	.hpd_sense = NULL,/* called unconditionally */
3532 	.hpd_set_polarity = NULL, /* called unconditionally */
3533 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3534 	.page_flip_get_scanoutpos =
3535 		dm_crtc_get_scanoutpos,/* called unconditionally */
3536 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3537 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3538 };
3539 
3540 #if defined(CONFIG_DEBUG_KERNEL_DC)
3541 
s3_debug_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)3542 static ssize_t s3_debug_store(struct device *device,
3543 			      struct device_attribute *attr,
3544 			      const char *buf,
3545 			      size_t count)
3546 {
3547 	int ret;
3548 	int s3_state;
3549 	struct drm_device *drm_dev = dev_get_drvdata(device);
3550 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3551 
3552 	ret = kstrtoint(buf, 0, &s3_state);
3553 
3554 	if (ret == 0) {
3555 		if (s3_state) {
3556 			dm_resume(adev);
3557 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3558 		} else
3559 			dm_suspend(adev);
3560 	}
3561 
3562 	return ret == 0 ? count : 0;
3563 }
3564 
3565 DEVICE_ATTR_WO(s3_debug);
3566 
3567 #endif
3568 
dm_early_init(void * handle)3569 static int dm_early_init(void *handle)
3570 {
3571 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3572 
3573 	switch (adev->asic_type) {
3574 #if defined(CONFIG_DRM_AMD_DC_SI)
3575 	case CHIP_TAHITI:
3576 	case CHIP_PITCAIRN:
3577 	case CHIP_VERDE:
3578 		adev->mode_info.num_crtc = 6;
3579 		adev->mode_info.num_hpd = 6;
3580 		adev->mode_info.num_dig = 6;
3581 		break;
3582 	case CHIP_OLAND:
3583 		adev->mode_info.num_crtc = 2;
3584 		adev->mode_info.num_hpd = 2;
3585 		adev->mode_info.num_dig = 2;
3586 		break;
3587 #endif
3588 	case CHIP_BONAIRE:
3589 	case CHIP_HAWAII:
3590 		adev->mode_info.num_crtc = 6;
3591 		adev->mode_info.num_hpd = 6;
3592 		adev->mode_info.num_dig = 6;
3593 		break;
3594 	case CHIP_KAVERI:
3595 		adev->mode_info.num_crtc = 4;
3596 		adev->mode_info.num_hpd = 6;
3597 		adev->mode_info.num_dig = 7;
3598 		break;
3599 	case CHIP_KABINI:
3600 	case CHIP_MULLINS:
3601 		adev->mode_info.num_crtc = 2;
3602 		adev->mode_info.num_hpd = 6;
3603 		adev->mode_info.num_dig = 6;
3604 		break;
3605 	case CHIP_FIJI:
3606 	case CHIP_TONGA:
3607 		adev->mode_info.num_crtc = 6;
3608 		adev->mode_info.num_hpd = 6;
3609 		adev->mode_info.num_dig = 7;
3610 		break;
3611 	case CHIP_CARRIZO:
3612 		adev->mode_info.num_crtc = 3;
3613 		adev->mode_info.num_hpd = 6;
3614 		adev->mode_info.num_dig = 9;
3615 		break;
3616 	case CHIP_STONEY:
3617 		adev->mode_info.num_crtc = 2;
3618 		adev->mode_info.num_hpd = 6;
3619 		adev->mode_info.num_dig = 9;
3620 		break;
3621 	case CHIP_POLARIS11:
3622 	case CHIP_POLARIS12:
3623 		adev->mode_info.num_crtc = 5;
3624 		adev->mode_info.num_hpd = 5;
3625 		adev->mode_info.num_dig = 5;
3626 		break;
3627 	case CHIP_POLARIS10:
3628 	case CHIP_VEGAM:
3629 		adev->mode_info.num_crtc = 6;
3630 		adev->mode_info.num_hpd = 6;
3631 		adev->mode_info.num_dig = 6;
3632 		break;
3633 	case CHIP_VEGA10:
3634 	case CHIP_VEGA12:
3635 	case CHIP_VEGA20:
3636 		adev->mode_info.num_crtc = 6;
3637 		adev->mode_info.num_hpd = 6;
3638 		adev->mode_info.num_dig = 6;
3639 		break;
3640 #if defined(CONFIG_DRM_AMD_DC_DCN)
3641 	case CHIP_RAVEN:
3642 		adev->mode_info.num_crtc = 4;
3643 		adev->mode_info.num_hpd = 4;
3644 		adev->mode_info.num_dig = 4;
3645 		break;
3646 #endif
3647 	case CHIP_NAVI10:
3648 	case CHIP_NAVI12:
3649 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3650 	case CHIP_SIENNA_CICHLID:
3651 	case CHIP_NAVY_FLOUNDER:
3652 #endif
3653 		adev->mode_info.num_crtc = 6;
3654 		adev->mode_info.num_hpd = 6;
3655 		adev->mode_info.num_dig = 6;
3656 		break;
3657 	case CHIP_NAVI14:
3658 		adev->mode_info.num_crtc = 5;
3659 		adev->mode_info.num_hpd = 5;
3660 		adev->mode_info.num_dig = 5;
3661 		break;
3662 	case CHIP_RENOIR:
3663 		adev->mode_info.num_crtc = 4;
3664 		adev->mode_info.num_hpd = 4;
3665 		adev->mode_info.num_dig = 4;
3666 		break;
3667 	default:
3668 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3669 		return -EINVAL;
3670 	}
3671 
3672 	amdgpu_dm_set_irq_funcs(adev);
3673 
3674 	if (adev->mode_info.funcs == NULL)
3675 		adev->mode_info.funcs = &dm_display_funcs;
3676 
3677 	/*
3678 	 * Note: Do NOT change adev->audio_endpt_rreg and
3679 	 * adev->audio_endpt_wreg because they are initialised in
3680 	 * amdgpu_device_init()
3681 	 */
3682 #if defined(CONFIG_DEBUG_KERNEL_DC)
3683 	device_create_file(
3684 		adev_to_drm(adev)->dev,
3685 		&dev_attr_s3_debug);
3686 #endif
3687 
3688 	return 0;
3689 }
3690 
modeset_required(struct drm_crtc_state * crtc_state,struct dc_stream_state * new_stream,struct dc_stream_state * old_stream)3691 static bool modeset_required(struct drm_crtc_state *crtc_state,
3692 			     struct dc_stream_state *new_stream,
3693 			     struct dc_stream_state *old_stream)
3694 {
3695 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3696 }
3697 
modereset_required(struct drm_crtc_state * crtc_state)3698 static bool modereset_required(struct drm_crtc_state *crtc_state)
3699 {
3700 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3701 }
3702 
amdgpu_dm_encoder_destroy(struct drm_encoder * encoder)3703 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3704 {
3705 	drm_encoder_cleanup(encoder);
3706 	kfree(encoder);
3707 }
3708 
3709 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3710 	.destroy = amdgpu_dm_encoder_destroy,
3711 };
3712 
3713 
fill_dc_scaling_info(const struct drm_plane_state * state,struct dc_scaling_info * scaling_info)3714 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3715 				struct dc_scaling_info *scaling_info)
3716 {
3717 	int scale_w, scale_h;
3718 
3719 	memset(scaling_info, 0, sizeof(*scaling_info));
3720 
3721 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3722 	scaling_info->src_rect.x = state->src_x >> 16;
3723 	scaling_info->src_rect.y = state->src_y >> 16;
3724 
3725 	/*
3726 	 * For reasons we don't (yet) fully understand a non-zero
3727 	 * src_y coordinate into an NV12 buffer can cause a
3728 	 * system hang. To avoid hangs (and maybe be overly cautious)
3729 	 * let's reject both non-zero src_x and src_y.
3730 	 *
3731 	 * We currently know of only one use-case to reproduce a
3732 	 * scenario with non-zero src_x and src_y for NV12, which
3733 	 * is to gesture the YouTube Android app into full screen
3734 	 * on ChromeOS.
3735 	 */
3736 	if (state->fb &&
3737 	    state->fb->format->format == DRM_FORMAT_NV12 &&
3738 	    (scaling_info->src_rect.x != 0 ||
3739 	     scaling_info->src_rect.y != 0))
3740 		return -EINVAL;
3741 
3742 	/*
3743 	 * For reasons we don't (yet) fully understand a non-zero
3744 	 * src_y coordinate into an NV12 buffer can cause a
3745 	 * system hang. To avoid hangs (and maybe be overly cautious)
3746 	 * let's reject both non-zero src_x and src_y.
3747 	 *
3748 	 * We currently know of only one use-case to reproduce a
3749 	 * scenario with non-zero src_x and src_y for NV12, which
3750 	 * is to gesture the YouTube Android app into full screen
3751 	 * on ChromeOS.
3752 	 */
3753 	if (state->fb &&
3754 	    state->fb->format->format == DRM_FORMAT_NV12 &&
3755 	    (scaling_info->src_rect.x != 0 ||
3756 	     scaling_info->src_rect.y != 0))
3757 		return -EINVAL;
3758 
3759 	scaling_info->src_rect.width = state->src_w >> 16;
3760 	if (scaling_info->src_rect.width == 0)
3761 		return -EINVAL;
3762 
3763 	scaling_info->src_rect.height = state->src_h >> 16;
3764 	if (scaling_info->src_rect.height == 0)
3765 		return -EINVAL;
3766 
3767 	scaling_info->dst_rect.x = state->crtc_x;
3768 	scaling_info->dst_rect.y = state->crtc_y;
3769 
3770 	if (state->crtc_w == 0)
3771 		return -EINVAL;
3772 
3773 	scaling_info->dst_rect.width = state->crtc_w;
3774 
3775 	if (state->crtc_h == 0)
3776 		return -EINVAL;
3777 
3778 	scaling_info->dst_rect.height = state->crtc_h;
3779 
3780 	/* DRM doesn't specify clipping on destination output. */
3781 	scaling_info->clip_rect = scaling_info->dst_rect;
3782 
3783 	/* TODO: Validate scaling per-format with DC plane caps */
3784 	scale_w = scaling_info->dst_rect.width * 1000 /
3785 		  scaling_info->src_rect.width;
3786 
3787 	if (scale_w < 250 || scale_w > 16000)
3788 		return -EINVAL;
3789 
3790 	scale_h = scaling_info->dst_rect.height * 1000 /
3791 		  scaling_info->src_rect.height;
3792 
3793 	if (scale_h < 250 || scale_h > 16000)
3794 		return -EINVAL;
3795 
3796 	/*
3797 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3798 	 * assume reasonable defaults based on the format.
3799 	 */
3800 
3801 	return 0;
3802 }
3803 
get_fb_info(const struct amdgpu_framebuffer * amdgpu_fb,uint64_t * tiling_flags,bool * tmz_surface)3804 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3805 		       uint64_t *tiling_flags, bool *tmz_surface)
3806 {
3807 	struct amdgpu_bo *rbo;
3808 	int r;
3809 
3810 	if (!amdgpu_fb) {
3811 		*tiling_flags = 0;
3812 		*tmz_surface = false;
3813 		return 0;
3814 	}
3815 
3816 	rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3817 	r = amdgpu_bo_reserve(rbo, false);
3818 
3819 	if (unlikely(r)) {
3820 		/* Don't show error message when returning -ERESTARTSYS */
3821 		if (r != -ERESTARTSYS)
3822 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3823 		return r;
3824 	}
3825 
3826 	if (tiling_flags)
3827 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3828 
3829 	if (tmz_surface)
3830 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3831 
3832 	amdgpu_bo_unreserve(rbo);
3833 
3834 	return r;
3835 }
3836 
get_dcc_address(uint64_t address,uint64_t tiling_flags)3837 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3838 {
3839 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3840 
3841 	return offset ? (address + offset * 256) : 0;
3842 }
3843 
3844 static int
fill_plane_dcc_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct plane_size * plane_size,const union dc_tiling_info * tiling_info,const uint64_t info,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool force_disable_dcc)3845 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3846 			  const struct amdgpu_framebuffer *afb,
3847 			  const enum surface_pixel_format format,
3848 			  const enum dc_rotation_angle rotation,
3849 			  const struct plane_size *plane_size,
3850 			  const union dc_tiling_info *tiling_info,
3851 			  const uint64_t info,
3852 			  struct dc_plane_dcc_param *dcc,
3853 			  struct dc_plane_address *address,
3854 			  bool force_disable_dcc)
3855 {
3856 	struct dc *dc = adev->dm.dc;
3857 	struct dc_dcc_surface_param input;
3858 	struct dc_surface_dcc_cap output;
3859 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3860 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3861 	uint64_t dcc_address;
3862 
3863 	memset(&input, 0, sizeof(input));
3864 	memset(&output, 0, sizeof(output));
3865 
3866 	if (force_disable_dcc)
3867 		return 0;
3868 
3869 	if (!offset)
3870 		return 0;
3871 
3872 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3873 		return 0;
3874 
3875 	if (!dc->cap_funcs.get_dcc_compression_cap)
3876 		return -EINVAL;
3877 
3878 	input.format = format;
3879 	input.surface_size.width = plane_size->surface_size.width;
3880 	input.surface_size.height = plane_size->surface_size.height;
3881 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3882 
3883 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3884 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3885 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3886 		input.scan = SCAN_DIRECTION_VERTICAL;
3887 
3888 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3889 		return -EINVAL;
3890 
3891 	if (!output.capable)
3892 		return -EINVAL;
3893 
3894 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3895 		return -EINVAL;
3896 
3897 	dcc->enable = 1;
3898 	dcc->meta_pitch =
3899 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3900 	dcc->independent_64b_blks = i64b;
3901 
3902 	dcc_address = get_dcc_address(afb->address, info);
3903 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3904 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3905 
3906 	return 0;
3907 }
3908 
3909 static int
fill_plane_buffer_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const uint64_t tiling_flags,union dc_tiling_info * tiling_info,struct plane_size * plane_size,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)3910 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3911 			     const struct amdgpu_framebuffer *afb,
3912 			     const enum surface_pixel_format format,
3913 			     const enum dc_rotation_angle rotation,
3914 			     const uint64_t tiling_flags,
3915 			     union dc_tiling_info *tiling_info,
3916 			     struct plane_size *plane_size,
3917 			     struct dc_plane_dcc_param *dcc,
3918 			     struct dc_plane_address *address,
3919 			     bool tmz_surface,
3920 			     bool force_disable_dcc)
3921 {
3922 	const struct drm_framebuffer *fb = &afb->base;
3923 	int ret;
3924 
3925 	memset(tiling_info, 0, sizeof(*tiling_info));
3926 	memset(plane_size, 0, sizeof(*plane_size));
3927 	memset(dcc, 0, sizeof(*dcc));
3928 	memset(address, 0, sizeof(*address));
3929 
3930 	address->tmz_surface = tmz_surface;
3931 
3932 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3933 		plane_size->surface_size.x = 0;
3934 		plane_size->surface_size.y = 0;
3935 		plane_size->surface_size.width = fb->width;
3936 		plane_size->surface_size.height = fb->height;
3937 		plane_size->surface_pitch =
3938 			fb->pitches[0] / fb->format->cpp[0];
3939 
3940 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3941 		address->grph.addr.low_part = lower_32_bits(afb->address);
3942 		address->grph.addr.high_part = upper_32_bits(afb->address);
3943 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3944 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3945 
3946 		plane_size->surface_size.x = 0;
3947 		plane_size->surface_size.y = 0;
3948 		plane_size->surface_size.width = fb->width;
3949 		plane_size->surface_size.height = fb->height;
3950 		plane_size->surface_pitch =
3951 			fb->pitches[0] / fb->format->cpp[0];
3952 
3953 		plane_size->chroma_size.x = 0;
3954 		plane_size->chroma_size.y = 0;
3955 		/* TODO: set these based on surface format */
3956 		plane_size->chroma_size.width = fb->width / 2;
3957 		plane_size->chroma_size.height = fb->height / 2;
3958 
3959 		plane_size->chroma_pitch =
3960 			fb->pitches[1] / fb->format->cpp[1];
3961 
3962 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3963 		address->video_progressive.luma_addr.low_part =
3964 			lower_32_bits(afb->address);
3965 		address->video_progressive.luma_addr.high_part =
3966 			upper_32_bits(afb->address);
3967 		address->video_progressive.chroma_addr.low_part =
3968 			lower_32_bits(chroma_addr);
3969 		address->video_progressive.chroma_addr.high_part =
3970 			upper_32_bits(chroma_addr);
3971 	}
3972 
3973 	/* Fill GFX8 params */
3974 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3975 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3976 
3977 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3978 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3979 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3980 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3981 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3982 
3983 		/* XXX fix me for VI */
3984 		tiling_info->gfx8.num_banks = num_banks;
3985 		tiling_info->gfx8.array_mode =
3986 				DC_ARRAY_2D_TILED_THIN1;
3987 		tiling_info->gfx8.tile_split = tile_split;
3988 		tiling_info->gfx8.bank_width = bankw;
3989 		tiling_info->gfx8.bank_height = bankh;
3990 		tiling_info->gfx8.tile_aspect = mtaspect;
3991 		tiling_info->gfx8.tile_mode =
3992 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3993 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3994 			== DC_ARRAY_1D_TILED_THIN1) {
3995 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3996 	}
3997 
3998 	tiling_info->gfx8.pipe_config =
3999 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4000 
4001 	if (adev->asic_type == CHIP_VEGA10 ||
4002 	    adev->asic_type == CHIP_VEGA12 ||
4003 	    adev->asic_type == CHIP_VEGA20 ||
4004 	    adev->asic_type == CHIP_NAVI10 ||
4005 	    adev->asic_type == CHIP_NAVI14 ||
4006 	    adev->asic_type == CHIP_NAVI12 ||
4007 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
4008 		adev->asic_type == CHIP_SIENNA_CICHLID ||
4009 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
4010 #endif
4011 	    adev->asic_type == CHIP_RENOIR ||
4012 	    adev->asic_type == CHIP_RAVEN) {
4013 		/* Fill GFX9 params */
4014 		tiling_info->gfx9.num_pipes =
4015 			adev->gfx.config.gb_addr_config_fields.num_pipes;
4016 		tiling_info->gfx9.num_banks =
4017 			adev->gfx.config.gb_addr_config_fields.num_banks;
4018 		tiling_info->gfx9.pipe_interleave =
4019 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4020 		tiling_info->gfx9.num_shader_engines =
4021 			adev->gfx.config.gb_addr_config_fields.num_se;
4022 		tiling_info->gfx9.max_compressed_frags =
4023 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4024 		tiling_info->gfx9.num_rb_per_se =
4025 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4026 		tiling_info->gfx9.swizzle =
4027 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
4028 		tiling_info->gfx9.shaderEnable = 1;
4029 
4030 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
4031 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4032 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
4033 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4034 #endif
4035 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
4036 						plane_size, tiling_info,
4037 						tiling_flags, dcc, address,
4038 						force_disable_dcc);
4039 		if (ret)
4040 			return ret;
4041 	}
4042 
4043 	return 0;
4044 }
4045 
4046 static void
fill_blending_from_plane_state(const struct drm_plane_state * plane_state,bool * per_pixel_alpha,bool * global_alpha,int * global_alpha_value)4047 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4048 			       bool *per_pixel_alpha, bool *global_alpha,
4049 			       int *global_alpha_value)
4050 {
4051 	*per_pixel_alpha = false;
4052 	*global_alpha = false;
4053 	*global_alpha_value = 0xff;
4054 
4055 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4056 		return;
4057 
4058 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4059 		static const uint32_t alpha_formats[] = {
4060 			DRM_FORMAT_ARGB8888,
4061 			DRM_FORMAT_RGBA8888,
4062 			DRM_FORMAT_ABGR8888,
4063 		};
4064 		uint32_t format = plane_state->fb->format->format;
4065 		unsigned int i;
4066 
4067 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4068 			if (format == alpha_formats[i]) {
4069 				*per_pixel_alpha = true;
4070 				break;
4071 			}
4072 		}
4073 	}
4074 
4075 	if (plane_state->alpha < 0xffff) {
4076 		*global_alpha = true;
4077 		*global_alpha_value = plane_state->alpha >> 8;
4078 	}
4079 }
4080 
4081 static int
fill_plane_color_attributes(const struct drm_plane_state * plane_state,const enum surface_pixel_format format,enum dc_color_space * color_space)4082 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4083 			    const enum surface_pixel_format format,
4084 			    enum dc_color_space *color_space)
4085 {
4086 	bool full_range;
4087 
4088 	*color_space = COLOR_SPACE_SRGB;
4089 
4090 	/* DRM color properties only affect non-RGB formats. */
4091 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4092 		return 0;
4093 
4094 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4095 
4096 	switch (plane_state->color_encoding) {
4097 	case DRM_COLOR_YCBCR_BT601:
4098 		if (full_range)
4099 			*color_space = COLOR_SPACE_YCBCR601;
4100 		else
4101 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4102 		break;
4103 
4104 	case DRM_COLOR_YCBCR_BT709:
4105 		if (full_range)
4106 			*color_space = COLOR_SPACE_YCBCR709;
4107 		else
4108 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4109 		break;
4110 
4111 	case DRM_COLOR_YCBCR_BT2020:
4112 		if (full_range)
4113 			*color_space = COLOR_SPACE_2020_YCBCR;
4114 		else
4115 			return -EINVAL;
4116 		break;
4117 
4118 	default:
4119 		return -EINVAL;
4120 	}
4121 
4122 	return 0;
4123 }
4124 
4125 static int
fill_dc_plane_info_and_addr(struct amdgpu_device * adev,const struct drm_plane_state * plane_state,const uint64_t tiling_flags,struct dc_plane_info * plane_info,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)4126 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4127 			    const struct drm_plane_state *plane_state,
4128 			    const uint64_t tiling_flags,
4129 			    struct dc_plane_info *plane_info,
4130 			    struct dc_plane_address *address,
4131 			    bool tmz_surface,
4132 			    bool force_disable_dcc)
4133 {
4134 	const struct drm_framebuffer *fb = plane_state->fb;
4135 	const struct amdgpu_framebuffer *afb =
4136 		to_amdgpu_framebuffer(plane_state->fb);
4137 	struct drm_format_name_buf format_name;
4138 	int ret;
4139 
4140 	memset(plane_info, 0, sizeof(*plane_info));
4141 
4142 	switch (fb->format->format) {
4143 	case DRM_FORMAT_C8:
4144 		plane_info->format =
4145 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4146 		break;
4147 	case DRM_FORMAT_RGB565:
4148 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4149 		break;
4150 	case DRM_FORMAT_XRGB8888:
4151 	case DRM_FORMAT_ARGB8888:
4152 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4153 		break;
4154 	case DRM_FORMAT_XRGB2101010:
4155 	case DRM_FORMAT_ARGB2101010:
4156 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4157 		break;
4158 	case DRM_FORMAT_XBGR2101010:
4159 	case DRM_FORMAT_ABGR2101010:
4160 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4161 		break;
4162 	case DRM_FORMAT_XBGR8888:
4163 	case DRM_FORMAT_ABGR8888:
4164 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4165 		break;
4166 	case DRM_FORMAT_NV21:
4167 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4168 		break;
4169 	case DRM_FORMAT_NV12:
4170 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4171 		break;
4172 	case DRM_FORMAT_P010:
4173 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4174 		break;
4175 	case DRM_FORMAT_XRGB16161616F:
4176 	case DRM_FORMAT_ARGB16161616F:
4177 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4178 		break;
4179 	case DRM_FORMAT_XBGR16161616F:
4180 	case DRM_FORMAT_ABGR16161616F:
4181 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4182 		break;
4183 	default:
4184 		DRM_ERROR(
4185 			"Unsupported screen format %s\n",
4186 			drm_get_format_name(fb->format->format, &format_name));
4187 		return -EINVAL;
4188 	}
4189 
4190 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4191 	case DRM_MODE_ROTATE_0:
4192 		plane_info->rotation = ROTATION_ANGLE_0;
4193 		break;
4194 	case DRM_MODE_ROTATE_90:
4195 		plane_info->rotation = ROTATION_ANGLE_90;
4196 		break;
4197 	case DRM_MODE_ROTATE_180:
4198 		plane_info->rotation = ROTATION_ANGLE_180;
4199 		break;
4200 	case DRM_MODE_ROTATE_270:
4201 		plane_info->rotation = ROTATION_ANGLE_270;
4202 		break;
4203 	default:
4204 		plane_info->rotation = ROTATION_ANGLE_0;
4205 		break;
4206 	}
4207 
4208 	plane_info->visible = true;
4209 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4210 
4211 	plane_info->layer_index = 0;
4212 
4213 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4214 					  &plane_info->color_space);
4215 	if (ret)
4216 		return ret;
4217 
4218 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4219 					   plane_info->rotation, tiling_flags,
4220 					   &plane_info->tiling_info,
4221 					   &plane_info->plane_size,
4222 					   &plane_info->dcc, address, tmz_surface,
4223 					   force_disable_dcc);
4224 	if (ret)
4225 		return ret;
4226 
4227 	fill_blending_from_plane_state(
4228 		plane_state, &plane_info->per_pixel_alpha,
4229 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4230 
4231 	return 0;
4232 }
4233 
fill_dc_plane_attributes(struct amdgpu_device * adev,struct dc_plane_state * dc_plane_state,struct drm_plane_state * plane_state,struct drm_crtc_state * crtc_state)4234 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4235 				    struct dc_plane_state *dc_plane_state,
4236 				    struct drm_plane_state *plane_state,
4237 				    struct drm_crtc_state *crtc_state)
4238 {
4239 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4240 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4241 	struct dc_scaling_info scaling_info;
4242 	struct dc_plane_info plane_info;
4243 	int ret;
4244 	bool force_disable_dcc = false;
4245 
4246 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4247 	if (ret)
4248 		return ret;
4249 
4250 	dc_plane_state->src_rect = scaling_info.src_rect;
4251 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4252 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4253 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4254 
4255 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4256 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4257 					  dm_plane_state->tiling_flags,
4258 					  &plane_info,
4259 					  &dc_plane_state->address,
4260 					  dm_plane_state->tmz_surface,
4261 					  force_disable_dcc);
4262 	if (ret)
4263 		return ret;
4264 
4265 	dc_plane_state->format = plane_info.format;
4266 	dc_plane_state->color_space = plane_info.color_space;
4267 	dc_plane_state->format = plane_info.format;
4268 	dc_plane_state->plane_size = plane_info.plane_size;
4269 	dc_plane_state->rotation = plane_info.rotation;
4270 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4271 	dc_plane_state->stereo_format = plane_info.stereo_format;
4272 	dc_plane_state->tiling_info = plane_info.tiling_info;
4273 	dc_plane_state->visible = plane_info.visible;
4274 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4275 	dc_plane_state->global_alpha = plane_info.global_alpha;
4276 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4277 	dc_plane_state->dcc = plane_info.dcc;
4278 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4279 
4280 	/*
4281 	 * Always set input transfer function, since plane state is refreshed
4282 	 * every time.
4283 	 */
4284 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4285 	if (ret)
4286 		return ret;
4287 
4288 	return 0;
4289 }
4290 
update_stream_scaling_settings(const struct drm_display_mode * mode,const struct dm_connector_state * dm_state,struct dc_stream_state * stream)4291 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4292 					   const struct dm_connector_state *dm_state,
4293 					   struct dc_stream_state *stream)
4294 {
4295 	enum amdgpu_rmx_type rmx_type;
4296 
4297 	struct rect src = { 0 }; /* viewport in composition space*/
4298 	struct rect dst = { 0 }; /* stream addressable area */
4299 
4300 	/* no mode. nothing to be done */
4301 	if (!mode)
4302 		return;
4303 
4304 	/* Full screen scaling by default */
4305 	src.width = mode->hdisplay;
4306 	src.height = mode->vdisplay;
4307 	dst.width = stream->timing.h_addressable;
4308 	dst.height = stream->timing.v_addressable;
4309 
4310 	if (dm_state) {
4311 		rmx_type = dm_state->scaling;
4312 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4313 			if (src.width * dst.height <
4314 					src.height * dst.width) {
4315 				/* height needs less upscaling/more downscaling */
4316 				dst.width = src.width *
4317 						dst.height / src.height;
4318 			} else {
4319 				/* width needs less upscaling/more downscaling */
4320 				dst.height = src.height *
4321 						dst.width / src.width;
4322 			}
4323 		} else if (rmx_type == RMX_CENTER) {
4324 			dst = src;
4325 		}
4326 
4327 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4328 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4329 
4330 		if (dm_state->underscan_enable) {
4331 			dst.x += dm_state->underscan_hborder / 2;
4332 			dst.y += dm_state->underscan_vborder / 2;
4333 			dst.width -= dm_state->underscan_hborder;
4334 			dst.height -= dm_state->underscan_vborder;
4335 		}
4336 	}
4337 
4338 	stream->src = src;
4339 	stream->dst = dst;
4340 
4341 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4342 			dst.x, dst.y, dst.width, dst.height);
4343 
4344 }
4345 
4346 static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector * connector,bool is_y420,int requested_bpc)4347 convert_color_depth_from_display_info(const struct drm_connector *connector,
4348 				      bool is_y420, int requested_bpc)
4349 {
4350 	uint8_t bpc;
4351 
4352 	if (is_y420) {
4353 		bpc = 8;
4354 
4355 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4356 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4357 			bpc = 16;
4358 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4359 			bpc = 12;
4360 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4361 			bpc = 10;
4362 	} else {
4363 		bpc = (uint8_t)connector->display_info.bpc;
4364 		/* Assume 8 bpc by default if no bpc is specified. */
4365 		bpc = bpc ? bpc : 8;
4366 	}
4367 
4368 	if (requested_bpc > 0) {
4369 		/*
4370 		 * Cap display bpc based on the user requested value.
4371 		 *
4372 		 * The value for state->max_bpc may not correctly updated
4373 		 * depending on when the connector gets added to the state
4374 		 * or if this was called outside of atomic check, so it
4375 		 * can't be used directly.
4376 		 */
4377 		bpc = min_t(u8, bpc, requested_bpc);
4378 
4379 		/* Round down to the nearest even number. */
4380 		bpc = bpc - (bpc & 1);
4381 	}
4382 
4383 	switch (bpc) {
4384 	case 0:
4385 		/*
4386 		 * Temporary Work around, DRM doesn't parse color depth for
4387 		 * EDID revision before 1.4
4388 		 * TODO: Fix edid parsing
4389 		 */
4390 		return COLOR_DEPTH_888;
4391 	case 6:
4392 		return COLOR_DEPTH_666;
4393 	case 8:
4394 		return COLOR_DEPTH_888;
4395 	case 10:
4396 		return COLOR_DEPTH_101010;
4397 	case 12:
4398 		return COLOR_DEPTH_121212;
4399 	case 14:
4400 		return COLOR_DEPTH_141414;
4401 	case 16:
4402 		return COLOR_DEPTH_161616;
4403 	default:
4404 		return COLOR_DEPTH_UNDEFINED;
4405 	}
4406 }
4407 
4408 static enum dc_aspect_ratio
get_aspect_ratio(const struct drm_display_mode * mode_in)4409 get_aspect_ratio(const struct drm_display_mode *mode_in)
4410 {
4411 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4412 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4413 }
4414 
4415 static enum dc_color_space
get_output_color_space(const struct dc_crtc_timing * dc_crtc_timing)4416 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4417 {
4418 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4419 
4420 	switch (dc_crtc_timing->pixel_encoding)	{
4421 	case PIXEL_ENCODING_YCBCR422:
4422 	case PIXEL_ENCODING_YCBCR444:
4423 	case PIXEL_ENCODING_YCBCR420:
4424 	{
4425 		/*
4426 		 * 27030khz is the separation point between HDTV and SDTV
4427 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4428 		 * respectively
4429 		 */
4430 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4431 			if (dc_crtc_timing->flags.Y_ONLY)
4432 				color_space =
4433 					COLOR_SPACE_YCBCR709_LIMITED;
4434 			else
4435 				color_space = COLOR_SPACE_YCBCR709;
4436 		} else {
4437 			if (dc_crtc_timing->flags.Y_ONLY)
4438 				color_space =
4439 					COLOR_SPACE_YCBCR601_LIMITED;
4440 			else
4441 				color_space = COLOR_SPACE_YCBCR601;
4442 		}
4443 
4444 	}
4445 	break;
4446 	case PIXEL_ENCODING_RGB:
4447 		color_space = COLOR_SPACE_SRGB;
4448 		break;
4449 
4450 	default:
4451 		WARN_ON(1);
4452 		break;
4453 	}
4454 
4455 	return color_space;
4456 }
4457 
adjust_colour_depth_from_display_info(struct dc_crtc_timing * timing_out,const struct drm_display_info * info)4458 static bool adjust_colour_depth_from_display_info(
4459 	struct dc_crtc_timing *timing_out,
4460 	const struct drm_display_info *info)
4461 {
4462 	enum dc_color_depth depth = timing_out->display_color_depth;
4463 	int normalized_clk;
4464 	do {
4465 		normalized_clk = timing_out->pix_clk_100hz / 10;
4466 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4467 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4468 			normalized_clk /= 2;
4469 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4470 		switch (depth) {
4471 		case COLOR_DEPTH_888:
4472 			break;
4473 		case COLOR_DEPTH_101010:
4474 			normalized_clk = (normalized_clk * 30) / 24;
4475 			break;
4476 		case COLOR_DEPTH_121212:
4477 			normalized_clk = (normalized_clk * 36) / 24;
4478 			break;
4479 		case COLOR_DEPTH_161616:
4480 			normalized_clk = (normalized_clk * 48) / 24;
4481 			break;
4482 		default:
4483 			/* The above depths are the only ones valid for HDMI. */
4484 			return false;
4485 		}
4486 		if (normalized_clk <= info->max_tmds_clock) {
4487 			timing_out->display_color_depth = depth;
4488 			return true;
4489 		}
4490 	} while (--depth > COLOR_DEPTH_666);
4491 	return false;
4492 }
4493 
fill_stream_properties_from_drm_display_mode(struct dc_stream_state * stream,const struct drm_display_mode * mode_in,const struct drm_connector * connector,const struct drm_connector_state * connector_state,const struct dc_stream_state * old_stream,int requested_bpc)4494 static void fill_stream_properties_from_drm_display_mode(
4495 	struct dc_stream_state *stream,
4496 	const struct drm_display_mode *mode_in,
4497 	const struct drm_connector *connector,
4498 	const struct drm_connector_state *connector_state,
4499 	const struct dc_stream_state *old_stream,
4500 	int requested_bpc)
4501 {
4502 	struct dc_crtc_timing *timing_out = &stream->timing;
4503 	const struct drm_display_info *info = &connector->display_info;
4504 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4505 	struct hdmi_vendor_infoframe hv_frame;
4506 	struct hdmi_avi_infoframe avi_frame;
4507 
4508 	memset(&hv_frame, 0, sizeof(hv_frame));
4509 	memset(&avi_frame, 0, sizeof(avi_frame));
4510 
4511 	timing_out->h_border_left = 0;
4512 	timing_out->h_border_right = 0;
4513 	timing_out->v_border_top = 0;
4514 	timing_out->v_border_bottom = 0;
4515 	/* TODO: un-hardcode */
4516 	if (drm_mode_is_420_only(info, mode_in)
4517 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4518 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4519 	else if (drm_mode_is_420_also(info, mode_in)
4520 			&& aconnector->force_yuv420_output)
4521 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4522 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4523 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4524 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4525 	else
4526 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4527 
4528 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4529 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4530 		connector,
4531 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4532 		requested_bpc);
4533 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4534 	timing_out->hdmi_vic = 0;
4535 
4536 	if(old_stream) {
4537 		timing_out->vic = old_stream->timing.vic;
4538 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4539 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4540 	} else {
4541 		timing_out->vic = drm_match_cea_mode(mode_in);
4542 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4543 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4544 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4545 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4546 	}
4547 
4548 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4549 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4550 		timing_out->vic = avi_frame.video_code;
4551 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4552 		timing_out->hdmi_vic = hv_frame.vic;
4553 	}
4554 
4555 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4556 	timing_out->h_total = mode_in->crtc_htotal;
4557 	timing_out->h_sync_width =
4558 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4559 	timing_out->h_front_porch =
4560 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4561 	timing_out->v_total = mode_in->crtc_vtotal;
4562 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4563 	timing_out->v_front_porch =
4564 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4565 	timing_out->v_sync_width =
4566 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4567 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4568 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4569 
4570 	stream->output_color_space = get_output_color_space(timing_out);
4571 
4572 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4573 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4574 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4575 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4576 		    drm_mode_is_420_also(info, mode_in) &&
4577 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4578 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4579 			adjust_colour_depth_from_display_info(timing_out, info);
4580 		}
4581 	}
4582 }
4583 
fill_audio_info(struct audio_info * audio_info,const struct drm_connector * drm_connector,const struct dc_sink * dc_sink)4584 static void fill_audio_info(struct audio_info *audio_info,
4585 			    const struct drm_connector *drm_connector,
4586 			    const struct dc_sink *dc_sink)
4587 {
4588 	int i = 0;
4589 	int cea_revision = 0;
4590 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4591 
4592 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4593 	audio_info->product_id = edid_caps->product_id;
4594 
4595 	cea_revision = drm_connector->display_info.cea_rev;
4596 
4597 	strscpy(audio_info->display_name,
4598 		edid_caps->display_name,
4599 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4600 
4601 	if (cea_revision >= 3) {
4602 		audio_info->mode_count = edid_caps->audio_mode_count;
4603 
4604 		for (i = 0; i < audio_info->mode_count; ++i) {
4605 			audio_info->modes[i].format_code =
4606 					(enum audio_format_code)
4607 					(edid_caps->audio_modes[i].format_code);
4608 			audio_info->modes[i].channel_count =
4609 					edid_caps->audio_modes[i].channel_count;
4610 			audio_info->modes[i].sample_rates.all =
4611 					edid_caps->audio_modes[i].sample_rate;
4612 			audio_info->modes[i].sample_size =
4613 					edid_caps->audio_modes[i].sample_size;
4614 		}
4615 	}
4616 
4617 	audio_info->flags.all = edid_caps->speaker_flags;
4618 
4619 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4620 	if (drm_connector->latency_present[0]) {
4621 		audio_info->video_latency = drm_connector->video_latency[0];
4622 		audio_info->audio_latency = drm_connector->audio_latency[0];
4623 	}
4624 
4625 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4626 
4627 }
4628 
4629 static void
copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode * src_mode,struct drm_display_mode * dst_mode)4630 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4631 				      struct drm_display_mode *dst_mode)
4632 {
4633 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4634 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4635 	dst_mode->crtc_clock = src_mode->crtc_clock;
4636 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4637 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4638 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4639 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4640 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4641 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4642 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4643 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4644 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4645 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4646 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4647 }
4648 
4649 static void
decide_crtc_timing_for_drm_display_mode(struct drm_display_mode * drm_mode,const struct drm_display_mode * native_mode,bool scale_enabled)4650 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4651 					const struct drm_display_mode *native_mode,
4652 					bool scale_enabled)
4653 {
4654 	if (scale_enabled) {
4655 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4656 	} else if (native_mode->clock == drm_mode->clock &&
4657 			native_mode->htotal == drm_mode->htotal &&
4658 			native_mode->vtotal == drm_mode->vtotal) {
4659 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4660 	} else {
4661 		/* no scaling nor amdgpu inserted, no need to patch */
4662 	}
4663 }
4664 
4665 static struct dc_sink *
create_fake_sink(struct amdgpu_dm_connector * aconnector)4666 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4667 {
4668 	struct dc_sink_init_data sink_init_data = { 0 };
4669 	struct dc_sink *sink = NULL;
4670 	sink_init_data.link = aconnector->dc_link;
4671 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4672 
4673 	sink = dc_sink_create(&sink_init_data);
4674 	if (!sink) {
4675 		DRM_ERROR("Failed to create sink!\n");
4676 		return NULL;
4677 	}
4678 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4679 
4680 	return sink;
4681 }
4682 
set_multisync_trigger_params(struct dc_stream_state * stream)4683 static void set_multisync_trigger_params(
4684 		struct dc_stream_state *stream)
4685 {
4686 	if (stream->triggered_crtc_reset.enabled) {
4687 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4688 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4689 	}
4690 }
4691 
set_master_stream(struct dc_stream_state * stream_set[],int stream_count)4692 static void set_master_stream(struct dc_stream_state *stream_set[],
4693 			      int stream_count)
4694 {
4695 	int j, highest_rfr = 0, master_stream = 0;
4696 
4697 	for (j = 0;  j < stream_count; j++) {
4698 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4699 			int refresh_rate = 0;
4700 
4701 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4702 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4703 			if (refresh_rate > highest_rfr) {
4704 				highest_rfr = refresh_rate;
4705 				master_stream = j;
4706 			}
4707 		}
4708 	}
4709 	for (j = 0;  j < stream_count; j++) {
4710 		if (stream_set[j])
4711 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4712 	}
4713 }
4714 
dm_enable_per_frame_crtc_master_sync(struct dc_state * context)4715 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4716 {
4717 	int i = 0;
4718 
4719 	if (context->stream_count < 2)
4720 		return;
4721 	for (i = 0; i < context->stream_count ; i++) {
4722 		if (!context->streams[i])
4723 			continue;
4724 		/*
4725 		 * TODO: add a function to read AMD VSDB bits and set
4726 		 * crtc_sync_master.multi_sync_enabled flag
4727 		 * For now it's set to false
4728 		 */
4729 		set_multisync_trigger_params(context->streams[i]);
4730 	}
4731 	set_master_stream(context->streams, context->stream_count);
4732 }
4733 
4734 static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream,int requested_bpc)4735 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4736 		       const struct drm_display_mode *drm_mode,
4737 		       const struct dm_connector_state *dm_state,
4738 		       const struct dc_stream_state *old_stream,
4739 		       int requested_bpc)
4740 {
4741 	struct drm_display_mode *preferred_mode = NULL;
4742 	struct drm_connector *drm_connector;
4743 	const struct drm_connector_state *con_state =
4744 		dm_state ? &dm_state->base : NULL;
4745 	struct dc_stream_state *stream = NULL;
4746 	struct drm_display_mode mode = *drm_mode;
4747 	bool native_mode_found = false;
4748 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4749 	int mode_refresh;
4750 	int preferred_refresh = 0;
4751 #if defined(CONFIG_DRM_AMD_DC_DCN)
4752 	struct dsc_dec_dpcd_caps dsc_caps;
4753 #endif
4754 	uint32_t link_bandwidth_kbps;
4755 
4756 	struct dc_sink *sink = NULL;
4757 	if (aconnector == NULL) {
4758 		DRM_ERROR("aconnector is NULL!\n");
4759 		return stream;
4760 	}
4761 
4762 	drm_connector = &aconnector->base;
4763 
4764 	if (!aconnector->dc_sink) {
4765 		sink = create_fake_sink(aconnector);
4766 		if (!sink)
4767 			return stream;
4768 	} else {
4769 		sink = aconnector->dc_sink;
4770 		dc_sink_retain(sink);
4771 	}
4772 
4773 	stream = dc_create_stream_for_sink(sink);
4774 
4775 	if (stream == NULL) {
4776 		DRM_ERROR("Failed to create stream for sink!\n");
4777 		goto finish;
4778 	}
4779 
4780 	stream->dm_stream_context = aconnector;
4781 
4782 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4783 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4784 
4785 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4786 		/* Search for preferred mode */
4787 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4788 			native_mode_found = true;
4789 			break;
4790 		}
4791 	}
4792 	if (!native_mode_found)
4793 		preferred_mode = list_first_entry_or_null(
4794 				&aconnector->base.modes,
4795 				struct drm_display_mode,
4796 				head);
4797 
4798 	mode_refresh = drm_mode_vrefresh(&mode);
4799 
4800 	if (preferred_mode == NULL) {
4801 		/*
4802 		 * This may not be an error, the use case is when we have no
4803 		 * usermode calls to reset and set mode upon hotplug. In this
4804 		 * case, we call set mode ourselves to restore the previous mode
4805 		 * and the modelist may not be filled in in time.
4806 		 */
4807 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4808 	} else {
4809 		decide_crtc_timing_for_drm_display_mode(
4810 				&mode, preferred_mode,
4811 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4812 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4813 	}
4814 
4815 	if (!dm_state)
4816 		drm_mode_set_crtcinfo(&mode, 0);
4817 
4818 	/*
4819 	* If scaling is enabled and refresh rate didn't change
4820 	* we copy the vic and polarities of the old timings
4821 	*/
4822 	if (!scale || mode_refresh != preferred_refresh)
4823 		fill_stream_properties_from_drm_display_mode(stream,
4824 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4825 	else
4826 		fill_stream_properties_from_drm_display_mode(stream,
4827 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4828 
4829 	stream->timing.flags.DSC = 0;
4830 
4831 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4832 #if defined(CONFIG_DRM_AMD_DC_DCN)
4833 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4834 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4835 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4836 				      &dsc_caps);
4837 #endif
4838 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4839 							     dc_link_get_link_cap(aconnector->dc_link));
4840 
4841 #if defined(CONFIG_DRM_AMD_DC_DCN)
4842 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4843 			/* Set DSC policy according to dsc_clock_en */
4844 			dc_dsc_policy_set_enable_dsc_when_not_needed(
4845 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4846 
4847 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4848 						  &dsc_caps,
4849 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4850 						  link_bandwidth_kbps,
4851 						  &stream->timing,
4852 						  &stream->timing.dsc_cfg))
4853 				stream->timing.flags.DSC = 1;
4854 			/* Overwrite the stream flag if DSC is enabled through debugfs */
4855 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4856 				stream->timing.flags.DSC = 1;
4857 
4858 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4859 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4860 
4861 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4862 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4863 
4864 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4865 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4866 		}
4867 #endif
4868 	}
4869 
4870 	update_stream_scaling_settings(&mode, dm_state, stream);
4871 
4872 	fill_audio_info(
4873 		&stream->audio_info,
4874 		drm_connector,
4875 		sink);
4876 
4877 	update_stream_signal(stream, sink);
4878 
4879 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4880 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4881 
4882 	if (stream->link->psr_settings.psr_feature_enabled) {
4883 		//
4884 		// should decide stream support vsc sdp colorimetry capability
4885 		// before building vsc info packet
4886 		//
4887 		stream->use_vsc_sdp_for_colorimetry = false;
4888 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4889 			stream->use_vsc_sdp_for_colorimetry =
4890 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4891 		} else {
4892 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4893 				stream->use_vsc_sdp_for_colorimetry = true;
4894 		}
4895 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4896 	}
4897 finish:
4898 	dc_sink_release(sink);
4899 
4900 	return stream;
4901 }
4902 
amdgpu_dm_crtc_destroy(struct drm_crtc * crtc)4903 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4904 {
4905 	drm_crtc_cleanup(crtc);
4906 	kfree(crtc);
4907 }
4908 
dm_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)4909 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4910 				  struct drm_crtc_state *state)
4911 {
4912 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4913 
4914 	/* TODO Destroy dc_stream objects are stream object is flattened */
4915 	if (cur->stream)
4916 		dc_stream_release(cur->stream);
4917 
4918 
4919 	__drm_atomic_helper_crtc_destroy_state(state);
4920 
4921 
4922 	kfree(state);
4923 }
4924 
dm_crtc_reset_state(struct drm_crtc * crtc)4925 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4926 {
4927 	struct dm_crtc_state *state;
4928 
4929 	if (crtc->state)
4930 		dm_crtc_destroy_state(crtc, crtc->state);
4931 
4932 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4933 	if (WARN_ON(!state))
4934 		return;
4935 
4936 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
4937 }
4938 
4939 static struct drm_crtc_state *
dm_crtc_duplicate_state(struct drm_crtc * crtc)4940 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4941 {
4942 	struct dm_crtc_state *state, *cur;
4943 
4944 	cur = to_dm_crtc_state(crtc->state);
4945 
4946 	if (WARN_ON(!crtc->state))
4947 		return NULL;
4948 
4949 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4950 	if (!state)
4951 		return NULL;
4952 
4953 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4954 
4955 	if (cur->stream) {
4956 		state->stream = cur->stream;
4957 		dc_stream_retain(state->stream);
4958 	}
4959 
4960 	state->active_planes = cur->active_planes;
4961 	state->vrr_infopacket = cur->vrr_infopacket;
4962 	state->abm_level = cur->abm_level;
4963 	state->vrr_supported = cur->vrr_supported;
4964 	state->freesync_config = cur->freesync_config;
4965 	state->crc_src = cur->crc_src;
4966 	state->cm_has_degamma = cur->cm_has_degamma;
4967 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4968 
4969 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4970 
4971 	return &state->base;
4972 }
4973 
dm_set_vupdate_irq(struct drm_crtc * crtc,bool enable)4974 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4975 {
4976 	enum dc_irq_source irq_source;
4977 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4978 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4979 	int rc;
4980 
4981 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4982 
4983 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4984 
4985 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4986 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4987 	return rc;
4988 }
4989 
dm_set_vblank(struct drm_crtc * crtc,bool enable)4990 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4991 {
4992 	enum dc_irq_source irq_source;
4993 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4994 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4995 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4996 	int rc = 0;
4997 
4998 	if (enable) {
4999 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5000 		if (amdgpu_dm_vrr_active(acrtc_state))
5001 			rc = dm_set_vupdate_irq(crtc, true);
5002 	} else {
5003 		/* vblank irq off -> vupdate irq off */
5004 		rc = dm_set_vupdate_irq(crtc, false);
5005 	}
5006 
5007 	if (rc)
5008 		return rc;
5009 
5010 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5011 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5012 }
5013 
dm_enable_vblank(struct drm_crtc * crtc)5014 static int dm_enable_vblank(struct drm_crtc *crtc)
5015 {
5016 	return dm_set_vblank(crtc, true);
5017 }
5018 
dm_disable_vblank(struct drm_crtc * crtc)5019 static void dm_disable_vblank(struct drm_crtc *crtc)
5020 {
5021 	dm_set_vblank(crtc, false);
5022 }
5023 
5024 /* Implemented only the options currently availible for the driver */
5025 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5026 	.reset = dm_crtc_reset_state,
5027 	.destroy = amdgpu_dm_crtc_destroy,
5028 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
5029 	.set_config = drm_atomic_helper_set_config,
5030 	.page_flip = drm_atomic_helper_page_flip,
5031 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5032 	.atomic_destroy_state = dm_crtc_destroy_state,
5033 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5034 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5035 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5036 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5037 	.enable_vblank = dm_enable_vblank,
5038 	.disable_vblank = dm_disable_vblank,
5039 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5040 };
5041 
5042 static enum drm_connector_status
amdgpu_dm_connector_detect(struct drm_connector * connector,bool force)5043 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5044 {
5045 	bool connected;
5046 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5047 
5048 	/*
5049 	 * Notes:
5050 	 * 1. This interface is NOT called in context of HPD irq.
5051 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5052 	 * makes it a bad place for *any* MST-related activity.
5053 	 */
5054 
5055 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5056 	    !aconnector->fake_enable)
5057 		connected = (aconnector->dc_sink != NULL);
5058 	else
5059 		connected = (aconnector->base.force == DRM_FORCE_ON);
5060 
5061 	update_subconnector_property(aconnector);
5062 
5063 	return (connected ? connector_status_connected :
5064 			connector_status_disconnected);
5065 }
5066 
amdgpu_dm_connector_atomic_set_property(struct drm_connector * connector,struct drm_connector_state * connector_state,struct drm_property * property,uint64_t val)5067 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5068 					    struct drm_connector_state *connector_state,
5069 					    struct drm_property *property,
5070 					    uint64_t val)
5071 {
5072 	struct drm_device *dev = connector->dev;
5073 	struct amdgpu_device *adev = drm_to_adev(dev);
5074 	struct dm_connector_state *dm_old_state =
5075 		to_dm_connector_state(connector->state);
5076 	struct dm_connector_state *dm_new_state =
5077 		to_dm_connector_state(connector_state);
5078 
5079 	int ret = -EINVAL;
5080 
5081 	if (property == dev->mode_config.scaling_mode_property) {
5082 		enum amdgpu_rmx_type rmx_type;
5083 
5084 		switch (val) {
5085 		case DRM_MODE_SCALE_CENTER:
5086 			rmx_type = RMX_CENTER;
5087 			break;
5088 		case DRM_MODE_SCALE_ASPECT:
5089 			rmx_type = RMX_ASPECT;
5090 			break;
5091 		case DRM_MODE_SCALE_FULLSCREEN:
5092 			rmx_type = RMX_FULL;
5093 			break;
5094 		case DRM_MODE_SCALE_NONE:
5095 		default:
5096 			rmx_type = RMX_OFF;
5097 			break;
5098 		}
5099 
5100 		if (dm_old_state->scaling == rmx_type)
5101 			return 0;
5102 
5103 		dm_new_state->scaling = rmx_type;
5104 		ret = 0;
5105 	} else if (property == adev->mode_info.underscan_hborder_property) {
5106 		dm_new_state->underscan_hborder = val;
5107 		ret = 0;
5108 	} else if (property == adev->mode_info.underscan_vborder_property) {
5109 		dm_new_state->underscan_vborder = val;
5110 		ret = 0;
5111 	} else if (property == adev->mode_info.underscan_property) {
5112 		dm_new_state->underscan_enable = val;
5113 		ret = 0;
5114 	} else if (property == adev->mode_info.abm_level_property) {
5115 		dm_new_state->abm_level = val;
5116 		ret = 0;
5117 	}
5118 
5119 	return ret;
5120 }
5121 
amdgpu_dm_connector_atomic_get_property(struct drm_connector * connector,const struct drm_connector_state * state,struct drm_property * property,uint64_t * val)5122 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5123 					    const struct drm_connector_state *state,
5124 					    struct drm_property *property,
5125 					    uint64_t *val)
5126 {
5127 	struct drm_device *dev = connector->dev;
5128 	struct amdgpu_device *adev = drm_to_adev(dev);
5129 	struct dm_connector_state *dm_state =
5130 		to_dm_connector_state(state);
5131 	int ret = -EINVAL;
5132 
5133 	if (property == dev->mode_config.scaling_mode_property) {
5134 		switch (dm_state->scaling) {
5135 		case RMX_CENTER:
5136 			*val = DRM_MODE_SCALE_CENTER;
5137 			break;
5138 		case RMX_ASPECT:
5139 			*val = DRM_MODE_SCALE_ASPECT;
5140 			break;
5141 		case RMX_FULL:
5142 			*val = DRM_MODE_SCALE_FULLSCREEN;
5143 			break;
5144 		case RMX_OFF:
5145 		default:
5146 			*val = DRM_MODE_SCALE_NONE;
5147 			break;
5148 		}
5149 		ret = 0;
5150 	} else if (property == adev->mode_info.underscan_hborder_property) {
5151 		*val = dm_state->underscan_hborder;
5152 		ret = 0;
5153 	} else if (property == adev->mode_info.underscan_vborder_property) {
5154 		*val = dm_state->underscan_vborder;
5155 		ret = 0;
5156 	} else if (property == adev->mode_info.underscan_property) {
5157 		*val = dm_state->underscan_enable;
5158 		ret = 0;
5159 	} else if (property == adev->mode_info.abm_level_property) {
5160 		*val = dm_state->abm_level;
5161 		ret = 0;
5162 	}
5163 
5164 	return ret;
5165 }
5166 
amdgpu_dm_connector_unregister(struct drm_connector * connector)5167 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5168 {
5169 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5170 
5171 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5172 }
5173 
amdgpu_dm_connector_destroy(struct drm_connector * connector)5174 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5175 {
5176 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5177 	const struct dc_link *link = aconnector->dc_link;
5178 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5179 	struct amdgpu_display_manager *dm = &adev->dm;
5180 
5181 	/*
5182 	 * Call only if mst_mgr was iniitalized before since it's not done
5183 	 * for all connector types.
5184 	 */
5185 	if (aconnector->mst_mgr.dev)
5186 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5187 
5188 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5189 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5190 
5191 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5192 	    link->type != dc_connection_none &&
5193 	    dm->backlight_dev) {
5194 		backlight_device_unregister(dm->backlight_dev);
5195 		dm->backlight_dev = NULL;
5196 	}
5197 #endif
5198 
5199 	if (aconnector->dc_em_sink)
5200 		dc_sink_release(aconnector->dc_em_sink);
5201 	aconnector->dc_em_sink = NULL;
5202 	if (aconnector->dc_sink)
5203 		dc_sink_release(aconnector->dc_sink);
5204 	aconnector->dc_sink = NULL;
5205 
5206 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5207 	drm_connector_unregister(connector);
5208 	drm_connector_cleanup(connector);
5209 	if (aconnector->i2c) {
5210 		i2c_del_adapter(&aconnector->i2c->base);
5211 		kfree(aconnector->i2c);
5212 	}
5213 	kfree(aconnector->dm_dp_aux.aux.name);
5214 
5215 	kfree(connector);
5216 }
5217 
amdgpu_dm_connector_funcs_reset(struct drm_connector * connector)5218 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5219 {
5220 	struct dm_connector_state *state =
5221 		to_dm_connector_state(connector->state);
5222 
5223 	if (connector->state)
5224 		__drm_atomic_helper_connector_destroy_state(connector->state);
5225 
5226 	kfree(state);
5227 
5228 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5229 
5230 	if (state) {
5231 		state->scaling = RMX_OFF;
5232 		state->underscan_enable = false;
5233 		state->underscan_hborder = 0;
5234 		state->underscan_vborder = 0;
5235 		state->base.max_requested_bpc = 8;
5236 		state->vcpi_slots = 0;
5237 		state->pbn = 0;
5238 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5239 			state->abm_level = amdgpu_dm_abm_level;
5240 
5241 		__drm_atomic_helper_connector_reset(connector, &state->base);
5242 	}
5243 }
5244 
5245 struct drm_connector_state *
amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector * connector)5246 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5247 {
5248 	struct dm_connector_state *state =
5249 		to_dm_connector_state(connector->state);
5250 
5251 	struct dm_connector_state *new_state =
5252 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5253 
5254 	if (!new_state)
5255 		return NULL;
5256 
5257 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5258 
5259 	new_state->freesync_capable = state->freesync_capable;
5260 	new_state->abm_level = state->abm_level;
5261 	new_state->scaling = state->scaling;
5262 	new_state->underscan_enable = state->underscan_enable;
5263 	new_state->underscan_hborder = state->underscan_hborder;
5264 	new_state->underscan_vborder = state->underscan_vborder;
5265 	new_state->vcpi_slots = state->vcpi_slots;
5266 	new_state->pbn = state->pbn;
5267 	return &new_state->base;
5268 }
5269 
5270 static int
amdgpu_dm_connector_late_register(struct drm_connector * connector)5271 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5272 {
5273 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5274 		to_amdgpu_dm_connector(connector);
5275 	int r;
5276 
5277 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5278 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5279 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5280 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5281 		if (r)
5282 			return r;
5283 	}
5284 
5285 #if defined(CONFIG_DEBUG_FS)
5286 	connector_debugfs_init(amdgpu_dm_connector);
5287 #endif
5288 
5289 	return 0;
5290 }
5291 
5292 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5293 	.reset = amdgpu_dm_connector_funcs_reset,
5294 	.detect = amdgpu_dm_connector_detect,
5295 	.fill_modes = drm_helper_probe_single_connector_modes,
5296 	.destroy = amdgpu_dm_connector_destroy,
5297 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5298 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5299 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5300 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5301 	.late_register = amdgpu_dm_connector_late_register,
5302 	.early_unregister = amdgpu_dm_connector_unregister
5303 };
5304 
get_modes(struct drm_connector * connector)5305 static int get_modes(struct drm_connector *connector)
5306 {
5307 	return amdgpu_dm_connector_get_modes(connector);
5308 }
5309 
create_eml_sink(struct amdgpu_dm_connector * aconnector)5310 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5311 {
5312 	struct dc_sink_init_data init_params = {
5313 			.link = aconnector->dc_link,
5314 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5315 	};
5316 	struct edid *edid;
5317 
5318 	if (!aconnector->base.edid_blob_ptr) {
5319 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5320 				aconnector->base.name);
5321 
5322 		aconnector->base.force = DRM_FORCE_OFF;
5323 		aconnector->base.override_edid = false;
5324 		return;
5325 	}
5326 
5327 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5328 
5329 	aconnector->edid = edid;
5330 
5331 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5332 		aconnector->dc_link,
5333 		(uint8_t *)edid,
5334 		(edid->extensions + 1) * EDID_LENGTH,
5335 		&init_params);
5336 
5337 	if (aconnector->base.force == DRM_FORCE_ON) {
5338 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5339 		aconnector->dc_link->local_sink :
5340 		aconnector->dc_em_sink;
5341 		dc_sink_retain(aconnector->dc_sink);
5342 	}
5343 }
5344 
handle_edid_mgmt(struct amdgpu_dm_connector * aconnector)5345 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5346 {
5347 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5348 
5349 	/*
5350 	 * In case of headless boot with force on for DP managed connector
5351 	 * Those settings have to be != 0 to get initial modeset
5352 	 */
5353 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5354 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5355 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5356 	}
5357 
5358 
5359 	aconnector->base.override_edid = true;
5360 	create_eml_sink(aconnector);
5361 }
5362 
5363 static struct dc_stream_state *
create_validate_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream)5364 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5365 				const struct drm_display_mode *drm_mode,
5366 				const struct dm_connector_state *dm_state,
5367 				const struct dc_stream_state *old_stream)
5368 {
5369 	struct drm_connector *connector = &aconnector->base;
5370 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5371 	struct dc_stream_state *stream;
5372 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5373 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5374 	enum dc_status dc_result = DC_OK;
5375 
5376 	do {
5377 		stream = create_stream_for_sink(aconnector, drm_mode,
5378 						dm_state, old_stream,
5379 						requested_bpc);
5380 		if (stream == NULL) {
5381 			DRM_ERROR("Failed to create stream for sink!\n");
5382 			break;
5383 		}
5384 
5385 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5386 
5387 		if (dc_result != DC_OK) {
5388 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5389 				      drm_mode->hdisplay,
5390 				      drm_mode->vdisplay,
5391 				      drm_mode->clock,
5392 				      dc_result,
5393 				      dc_status_to_str(dc_result));
5394 
5395 			dc_stream_release(stream);
5396 			stream = NULL;
5397 			requested_bpc -= 2; /* lower bpc to retry validation */
5398 		}
5399 
5400 	} while (stream == NULL && requested_bpc >= 6);
5401 
5402 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
5403 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
5404 
5405 		aconnector->force_yuv420_output = true;
5406 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
5407 						dm_state, old_stream);
5408 		aconnector->force_yuv420_output = false;
5409 	}
5410 
5411 	return stream;
5412 }
5413 
amdgpu_dm_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)5414 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5415 				   struct drm_display_mode *mode)
5416 {
5417 	int result = MODE_ERROR;
5418 	struct dc_sink *dc_sink;
5419 	/* TODO: Unhardcode stream count */
5420 	struct dc_stream_state *stream;
5421 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5422 
5423 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5424 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5425 		return result;
5426 
5427 	/*
5428 	 * Only run this the first time mode_valid is called to initilialize
5429 	 * EDID mgmt
5430 	 */
5431 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5432 		!aconnector->dc_em_sink)
5433 		handle_edid_mgmt(aconnector);
5434 
5435 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5436 
5437 	if (dc_sink == NULL) {
5438 		DRM_ERROR("dc_sink is NULL!\n");
5439 		goto fail;
5440 	}
5441 
5442 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5443 	if (stream) {
5444 		dc_stream_release(stream);
5445 		result = MODE_OK;
5446 	}
5447 
5448 fail:
5449 	/* TODO: error handling*/
5450 	return result;
5451 }
5452 
fill_hdr_info_packet(const struct drm_connector_state * state,struct dc_info_packet * out)5453 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5454 				struct dc_info_packet *out)
5455 {
5456 	struct hdmi_drm_infoframe frame;
5457 	unsigned char buf[30]; /* 26 + 4 */
5458 	ssize_t len;
5459 	int ret, i;
5460 
5461 	memset(out, 0, sizeof(*out));
5462 
5463 	if (!state->hdr_output_metadata)
5464 		return 0;
5465 
5466 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5467 	if (ret)
5468 		return ret;
5469 
5470 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5471 	if (len < 0)
5472 		return (int)len;
5473 
5474 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5475 	if (len != 30)
5476 		return -EINVAL;
5477 
5478 	/* Prepare the infopacket for DC. */
5479 	switch (state->connector->connector_type) {
5480 	case DRM_MODE_CONNECTOR_HDMIA:
5481 		out->hb0 = 0x87; /* type */
5482 		out->hb1 = 0x01; /* version */
5483 		out->hb2 = 0x1A; /* length */
5484 		out->sb[0] = buf[3]; /* checksum */
5485 		i = 1;
5486 		break;
5487 
5488 	case DRM_MODE_CONNECTOR_DisplayPort:
5489 	case DRM_MODE_CONNECTOR_eDP:
5490 		out->hb0 = 0x00; /* sdp id, zero */
5491 		out->hb1 = 0x87; /* type */
5492 		out->hb2 = 0x1D; /* payload len - 1 */
5493 		out->hb3 = (0x13 << 2); /* sdp version */
5494 		out->sb[0] = 0x01; /* version */
5495 		out->sb[1] = 0x1A; /* length */
5496 		i = 2;
5497 		break;
5498 
5499 	default:
5500 		return -EINVAL;
5501 	}
5502 
5503 	memcpy(&out->sb[i], &buf[4], 26);
5504 	out->valid = true;
5505 
5506 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5507 		       sizeof(out->sb), false);
5508 
5509 	return 0;
5510 }
5511 
5512 static bool
is_hdr_metadata_different(const struct drm_connector_state * old_state,const struct drm_connector_state * new_state)5513 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5514 			  const struct drm_connector_state *new_state)
5515 {
5516 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5517 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5518 
5519 	if (old_blob != new_blob) {
5520 		if (old_blob && new_blob &&
5521 		    old_blob->length == new_blob->length)
5522 			return memcmp(old_blob->data, new_blob->data,
5523 				      old_blob->length);
5524 
5525 		return true;
5526 	}
5527 
5528 	return false;
5529 }
5530 
5531 static int
amdgpu_dm_connector_atomic_check(struct drm_connector * conn,struct drm_atomic_state * state)5532 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5533 				 struct drm_atomic_state *state)
5534 {
5535 	struct drm_connector_state *new_con_state =
5536 		drm_atomic_get_new_connector_state(state, conn);
5537 	struct drm_connector_state *old_con_state =
5538 		drm_atomic_get_old_connector_state(state, conn);
5539 	struct drm_crtc *crtc = new_con_state->crtc;
5540 	struct drm_crtc_state *new_crtc_state;
5541 	int ret;
5542 
5543 	if (!crtc)
5544 		return 0;
5545 
5546 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5547 		struct dc_info_packet hdr_infopacket;
5548 
5549 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5550 		if (ret)
5551 			return ret;
5552 
5553 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5554 		if (IS_ERR(new_crtc_state))
5555 			return PTR_ERR(new_crtc_state);
5556 
5557 		/*
5558 		 * DC considers the stream backends changed if the
5559 		 * static metadata changes. Forcing the modeset also
5560 		 * gives a simple way for userspace to switch from
5561 		 * 8bpc to 10bpc when setting the metadata to enter
5562 		 * or exit HDR.
5563 		 *
5564 		 * Changing the static metadata after it's been
5565 		 * set is permissible, however. So only force a
5566 		 * modeset if we're entering or exiting HDR.
5567 		 */
5568 		new_crtc_state->mode_changed =
5569 			!old_con_state->hdr_output_metadata ||
5570 			!new_con_state->hdr_output_metadata;
5571 	}
5572 
5573 	return 0;
5574 }
5575 
5576 static const struct drm_connector_helper_funcs
5577 amdgpu_dm_connector_helper_funcs = {
5578 	/*
5579 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5580 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5581 	 * are missing after user start lightdm. So we need to renew modes list.
5582 	 * in get_modes call back, not just return the modes count
5583 	 */
5584 	.get_modes = get_modes,
5585 	.mode_valid = amdgpu_dm_connector_mode_valid,
5586 	.atomic_check = amdgpu_dm_connector_atomic_check,
5587 };
5588 
dm_crtc_helper_disable(struct drm_crtc * crtc)5589 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5590 {
5591 }
5592 
count_crtc_active_planes(struct drm_crtc_state * new_crtc_state)5593 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5594 {
5595 	struct drm_atomic_state *state = new_crtc_state->state;
5596 	struct drm_plane *plane;
5597 	int num_active = 0;
5598 
5599 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5600 		struct drm_plane_state *new_plane_state;
5601 
5602 		/* Cursor planes are "fake". */
5603 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5604 			continue;
5605 
5606 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5607 
5608 		if (!new_plane_state) {
5609 			/*
5610 			 * The plane is enable on the CRTC and hasn't changed
5611 			 * state. This means that it previously passed
5612 			 * validation and is therefore enabled.
5613 			 */
5614 			num_active += 1;
5615 			continue;
5616 		}
5617 
5618 		/* We need a framebuffer to be considered enabled. */
5619 		num_active += (new_plane_state->fb != NULL);
5620 	}
5621 
5622 	return num_active;
5623 }
5624 
dm_update_crtc_active_planes(struct drm_crtc * crtc,struct drm_crtc_state * new_crtc_state)5625 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5626 					 struct drm_crtc_state *new_crtc_state)
5627 {
5628 	struct dm_crtc_state *dm_new_crtc_state =
5629 		to_dm_crtc_state(new_crtc_state);
5630 
5631 	dm_new_crtc_state->active_planes = 0;
5632 
5633 	if (!dm_new_crtc_state->stream)
5634 		return;
5635 
5636 	dm_new_crtc_state->active_planes =
5637 		count_crtc_active_planes(new_crtc_state);
5638 }
5639 
dm_crtc_helper_atomic_check(struct drm_crtc * crtc,struct drm_crtc_state * state)5640 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5641 				       struct drm_crtc_state *state)
5642 {
5643 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5644 	struct dc *dc = adev->dm.dc;
5645 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5646 	int ret = -EINVAL;
5647 
5648 	dm_update_crtc_active_planes(crtc, state);
5649 
5650 	if (unlikely(!dm_crtc_state->stream &&
5651 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5652 		WARN_ON(1);
5653 		return ret;
5654 	}
5655 
5656 	/*
5657 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5658 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5659 	 * planes are disabled, which is not supported by the hardware. And there is legacy
5660 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5661 	 */
5662 	if (state->enable &&
5663 	    !(state->plane_mask & drm_plane_mask(crtc->primary)))
5664 		return -EINVAL;
5665 
5666 	/* In some use cases, like reset, no stream is attached */
5667 	if (!dm_crtc_state->stream)
5668 		return 0;
5669 
5670 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5671 		return 0;
5672 
5673 	return ret;
5674 }
5675 
dm_crtc_helper_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)5676 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5677 				      const struct drm_display_mode *mode,
5678 				      struct drm_display_mode *adjusted_mode)
5679 {
5680 	return true;
5681 }
5682 
5683 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5684 	.disable = dm_crtc_helper_disable,
5685 	.atomic_check = dm_crtc_helper_atomic_check,
5686 	.mode_fixup = dm_crtc_helper_mode_fixup,
5687 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5688 };
5689 
dm_encoder_helper_disable(struct drm_encoder * encoder)5690 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5691 {
5692 
5693 }
5694 
convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)5695 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5696 {
5697 	switch (display_color_depth) {
5698 		case COLOR_DEPTH_666:
5699 			return 6;
5700 		case COLOR_DEPTH_888:
5701 			return 8;
5702 		case COLOR_DEPTH_101010:
5703 			return 10;
5704 		case COLOR_DEPTH_121212:
5705 			return 12;
5706 		case COLOR_DEPTH_141414:
5707 			return 14;
5708 		case COLOR_DEPTH_161616:
5709 			return 16;
5710 		default:
5711 			break;
5712 		}
5713 	return 0;
5714 }
5715 
dm_encoder_helper_atomic_check(struct drm_encoder * encoder,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)5716 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5717 					  struct drm_crtc_state *crtc_state,
5718 					  struct drm_connector_state *conn_state)
5719 {
5720 	struct drm_atomic_state *state = crtc_state->state;
5721 	struct drm_connector *connector = conn_state->connector;
5722 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5723 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5724 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5725 	struct drm_dp_mst_topology_mgr *mst_mgr;
5726 	struct drm_dp_mst_port *mst_port;
5727 	enum dc_color_depth color_depth;
5728 	int clock, bpp = 0;
5729 	bool is_y420 = false;
5730 
5731 	if (!aconnector->port || !aconnector->dc_sink)
5732 		return 0;
5733 
5734 	mst_port = aconnector->port;
5735 	mst_mgr = &aconnector->mst_port->mst_mgr;
5736 
5737 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5738 		return 0;
5739 
5740 	if (!state->duplicated) {
5741 		int max_bpc = conn_state->max_requested_bpc;
5742 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5743 				aconnector->force_yuv420_output;
5744 		color_depth = convert_color_depth_from_display_info(connector,
5745 								    is_y420,
5746 								    max_bpc);
5747 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5748 		clock = adjusted_mode->clock;
5749 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5750 	}
5751 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5752 									   mst_mgr,
5753 									   mst_port,
5754 									   dm_new_connector_state->pbn,
5755 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5756 	if (dm_new_connector_state->vcpi_slots < 0) {
5757 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5758 		return dm_new_connector_state->vcpi_slots;
5759 	}
5760 	return 0;
5761 }
5762 
5763 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5764 	.disable = dm_encoder_helper_disable,
5765 	.atomic_check = dm_encoder_helper_atomic_check
5766 };
5767 
5768 #if defined(CONFIG_DRM_AMD_DC_DCN)
dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state * state,struct dc_state * dc_state)5769 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5770 					    struct dc_state *dc_state)
5771 {
5772 	struct dc_stream_state *stream = NULL;
5773 	struct drm_connector *connector;
5774 	struct drm_connector_state *new_con_state, *old_con_state;
5775 	struct amdgpu_dm_connector *aconnector;
5776 	struct dm_connector_state *dm_conn_state;
5777 	int i, j, clock, bpp;
5778 	int vcpi, pbn_div, pbn = 0;
5779 
5780 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5781 
5782 		aconnector = to_amdgpu_dm_connector(connector);
5783 
5784 		if (!aconnector->port)
5785 			continue;
5786 
5787 		if (!new_con_state || !new_con_state->crtc)
5788 			continue;
5789 
5790 		dm_conn_state = to_dm_connector_state(new_con_state);
5791 
5792 		for (j = 0; j < dc_state->stream_count; j++) {
5793 			stream = dc_state->streams[j];
5794 			if (!stream)
5795 				continue;
5796 
5797 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5798 				break;
5799 
5800 			stream = NULL;
5801 		}
5802 
5803 		if (!stream)
5804 			continue;
5805 
5806 		if (stream->timing.flags.DSC != 1) {
5807 			drm_dp_mst_atomic_enable_dsc(state,
5808 						     aconnector->port,
5809 						     dm_conn_state->pbn,
5810 						     0,
5811 						     false);
5812 			continue;
5813 		}
5814 
5815 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5816 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5817 		clock = stream->timing.pix_clk_100hz / 10;
5818 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5819 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5820 						    aconnector->port,
5821 						    pbn, pbn_div,
5822 						    true);
5823 		if (vcpi < 0)
5824 			return vcpi;
5825 
5826 		dm_conn_state->pbn = pbn;
5827 		dm_conn_state->vcpi_slots = vcpi;
5828 	}
5829 	return 0;
5830 }
5831 #endif
5832 
dm_drm_plane_reset(struct drm_plane * plane)5833 static void dm_drm_plane_reset(struct drm_plane *plane)
5834 {
5835 	struct dm_plane_state *amdgpu_state = NULL;
5836 
5837 	if (plane->state)
5838 		plane->funcs->atomic_destroy_state(plane, plane->state);
5839 
5840 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5841 	WARN_ON(amdgpu_state == NULL);
5842 
5843 	if (amdgpu_state)
5844 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5845 }
5846 
5847 static struct drm_plane_state *
dm_drm_plane_duplicate_state(struct drm_plane * plane)5848 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5849 {
5850 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5851 
5852 	old_dm_plane_state = to_dm_plane_state(plane->state);
5853 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5854 	if (!dm_plane_state)
5855 		return NULL;
5856 
5857 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5858 
5859 	if (old_dm_plane_state->dc_state) {
5860 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5861 		dc_plane_state_retain(dm_plane_state->dc_state);
5862 	}
5863 
5864 	/* Framebuffer hasn't been updated yet, so retain old flags. */
5865 	dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5866 	dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5867 
5868 	return &dm_plane_state->base;
5869 }
5870 
dm_drm_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)5871 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5872 				struct drm_plane_state *state)
5873 {
5874 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5875 
5876 	if (dm_plane_state->dc_state)
5877 		dc_plane_state_release(dm_plane_state->dc_state);
5878 
5879 	drm_atomic_helper_plane_destroy_state(plane, state);
5880 }
5881 
5882 static const struct drm_plane_funcs dm_plane_funcs = {
5883 	.update_plane	= drm_atomic_helper_update_plane,
5884 	.disable_plane	= drm_atomic_helper_disable_plane,
5885 	.destroy	= drm_primary_helper_destroy,
5886 	.reset = dm_drm_plane_reset,
5887 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5888 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5889 };
5890 
dm_plane_helper_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)5891 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5892 				      struct drm_plane_state *new_state)
5893 {
5894 	struct amdgpu_framebuffer *afb;
5895 	struct drm_gem_object *obj;
5896 	struct amdgpu_device *adev;
5897 	struct amdgpu_bo *rbo;
5898 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5899 	struct list_head list;
5900 	struct ttm_validate_buffer tv;
5901 	struct ww_acquire_ctx ticket;
5902 	uint32_t domain;
5903 	int r;
5904 
5905 	if (!new_state->fb) {
5906 		DRM_DEBUG_DRIVER("No FB bound\n");
5907 		return 0;
5908 	}
5909 
5910 	afb = to_amdgpu_framebuffer(new_state->fb);
5911 	obj = new_state->fb->obj[0];
5912 	rbo = gem_to_amdgpu_bo(obj);
5913 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5914 	INIT_LIST_HEAD(&list);
5915 
5916 	tv.bo = &rbo->tbo;
5917 	tv.num_shared = 1;
5918 	list_add(&tv.head, &list);
5919 
5920 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5921 	if (r) {
5922 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5923 		return r;
5924 	}
5925 
5926 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5927 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5928 	else
5929 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5930 
5931 	r = amdgpu_bo_pin(rbo, domain);
5932 	if (unlikely(r != 0)) {
5933 		if (r != -ERESTARTSYS)
5934 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5935 		ttm_eu_backoff_reservation(&ticket, &list);
5936 		return r;
5937 	}
5938 
5939 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5940 	if (unlikely(r != 0)) {
5941 		amdgpu_bo_unpin(rbo);
5942 		ttm_eu_backoff_reservation(&ticket, &list);
5943 		DRM_ERROR("%p bind failed\n", rbo);
5944 		return r;
5945 	}
5946 
5947 	ttm_eu_backoff_reservation(&ticket, &list);
5948 
5949 	afb->address = amdgpu_bo_gpu_offset(rbo);
5950 
5951 	amdgpu_bo_ref(rbo);
5952 
5953 	/**
5954 	 * We don't do surface updates on planes that have been newly created,
5955 	 * but we also don't have the afb->address during atomic check.
5956 	 *
5957 	 * Fill in buffer attributes depending on the address here, but only on
5958 	 * newly created planes since they're not being used by DC yet and this
5959 	 * won't modify global state.
5960 	 */
5961 	dm_plane_state_old = to_dm_plane_state(plane->state);
5962 	dm_plane_state_new = to_dm_plane_state(new_state);
5963 
5964 	if (dm_plane_state_new->dc_state &&
5965 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5966 		struct dc_plane_state *plane_state =
5967 			dm_plane_state_new->dc_state;
5968 		bool force_disable_dcc = !plane_state->dcc.enable;
5969 
5970 		fill_plane_buffer_attributes(
5971 			adev, afb, plane_state->format, plane_state->rotation,
5972 			dm_plane_state_new->tiling_flags,
5973 			&plane_state->tiling_info, &plane_state->plane_size,
5974 			&plane_state->dcc, &plane_state->address,
5975 			dm_plane_state_new->tmz_surface, force_disable_dcc);
5976 	}
5977 
5978 	return 0;
5979 }
5980 
dm_plane_helper_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)5981 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5982 				       struct drm_plane_state *old_state)
5983 {
5984 	struct amdgpu_bo *rbo;
5985 	int r;
5986 
5987 	if (!old_state->fb)
5988 		return;
5989 
5990 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5991 	r = amdgpu_bo_reserve(rbo, false);
5992 	if (unlikely(r)) {
5993 		DRM_ERROR("failed to reserve rbo before unpin\n");
5994 		return;
5995 	}
5996 
5997 	amdgpu_bo_unpin(rbo);
5998 	amdgpu_bo_unreserve(rbo);
5999 	amdgpu_bo_unref(&rbo);
6000 }
6001 
dm_plane_helper_check_state(struct drm_plane_state * state,struct drm_crtc_state * new_crtc_state)6002 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6003 				       struct drm_crtc_state *new_crtc_state)
6004 {
6005 	int max_downscale = 0;
6006 	int max_upscale = INT_MAX;
6007 
6008 	/* TODO: These should be checked against DC plane caps */
6009 	return drm_atomic_helper_check_plane_state(
6010 		state, new_crtc_state, max_downscale, max_upscale, true, true);
6011 }
6012 
dm_plane_atomic_check(struct drm_plane * plane,struct drm_plane_state * state)6013 static int dm_plane_atomic_check(struct drm_plane *plane,
6014 				 struct drm_plane_state *state)
6015 {
6016 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6017 	struct dc *dc = adev->dm.dc;
6018 	struct dm_plane_state *dm_plane_state;
6019 	struct dc_scaling_info scaling_info;
6020 	struct drm_crtc_state *new_crtc_state;
6021 	int ret;
6022 
6023 	dm_plane_state = to_dm_plane_state(state);
6024 
6025 	if (!dm_plane_state->dc_state)
6026 		return 0;
6027 
6028 	new_crtc_state =
6029 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
6030 	if (!new_crtc_state)
6031 		return -EINVAL;
6032 
6033 	ret = dm_plane_helper_check_state(state, new_crtc_state);
6034 	if (ret)
6035 		return ret;
6036 
6037 	ret = fill_dc_scaling_info(state, &scaling_info);
6038 	if (ret)
6039 		return ret;
6040 
6041 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6042 		return 0;
6043 
6044 	return -EINVAL;
6045 }
6046 
dm_plane_atomic_async_check(struct drm_plane * plane,struct drm_plane_state * new_plane_state)6047 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6048 				       struct drm_plane_state *new_plane_state)
6049 {
6050 	/* Only support async updates on cursor planes. */
6051 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6052 		return -EINVAL;
6053 
6054 	return 0;
6055 }
6056 
dm_plane_atomic_async_update(struct drm_plane * plane,struct drm_plane_state * new_state)6057 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6058 					 struct drm_plane_state *new_state)
6059 {
6060 	struct drm_plane_state *old_state =
6061 		drm_atomic_get_old_plane_state(new_state->state, plane);
6062 
6063 	swap(plane->state->fb, new_state->fb);
6064 
6065 	plane->state->src_x = new_state->src_x;
6066 	plane->state->src_y = new_state->src_y;
6067 	plane->state->src_w = new_state->src_w;
6068 	plane->state->src_h = new_state->src_h;
6069 	plane->state->crtc_x = new_state->crtc_x;
6070 	plane->state->crtc_y = new_state->crtc_y;
6071 	plane->state->crtc_w = new_state->crtc_w;
6072 	plane->state->crtc_h = new_state->crtc_h;
6073 
6074 	handle_cursor_update(plane, old_state);
6075 }
6076 
6077 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6078 	.prepare_fb = dm_plane_helper_prepare_fb,
6079 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6080 	.atomic_check = dm_plane_atomic_check,
6081 	.atomic_async_check = dm_plane_atomic_async_check,
6082 	.atomic_async_update = dm_plane_atomic_async_update
6083 };
6084 
6085 /*
6086  * TODO: these are currently initialized to rgb formats only.
6087  * For future use cases we should either initialize them dynamically based on
6088  * plane capabilities, or initialize this array to all formats, so internal drm
6089  * check will succeed, and let DC implement proper check
6090  */
6091 static const uint32_t rgb_formats[] = {
6092 	DRM_FORMAT_XRGB8888,
6093 	DRM_FORMAT_ARGB8888,
6094 	DRM_FORMAT_RGBA8888,
6095 	DRM_FORMAT_XRGB2101010,
6096 	DRM_FORMAT_XBGR2101010,
6097 	DRM_FORMAT_ARGB2101010,
6098 	DRM_FORMAT_ABGR2101010,
6099 	DRM_FORMAT_XBGR8888,
6100 	DRM_FORMAT_ABGR8888,
6101 	DRM_FORMAT_RGB565,
6102 };
6103 
6104 static const uint32_t overlay_formats[] = {
6105 	DRM_FORMAT_XRGB8888,
6106 	DRM_FORMAT_ARGB8888,
6107 	DRM_FORMAT_RGBA8888,
6108 	DRM_FORMAT_XBGR8888,
6109 	DRM_FORMAT_ABGR8888,
6110 	DRM_FORMAT_RGB565
6111 };
6112 
6113 static const u32 cursor_formats[] = {
6114 	DRM_FORMAT_ARGB8888
6115 };
6116 
get_plane_formats(const struct drm_plane * plane,const struct dc_plane_cap * plane_cap,uint32_t * formats,int max_formats)6117 static int get_plane_formats(const struct drm_plane *plane,
6118 			     const struct dc_plane_cap *plane_cap,
6119 			     uint32_t *formats, int max_formats)
6120 {
6121 	int i, num_formats = 0;
6122 
6123 	/*
6124 	 * TODO: Query support for each group of formats directly from
6125 	 * DC plane caps. This will require adding more formats to the
6126 	 * caps list.
6127 	 */
6128 
6129 	switch (plane->type) {
6130 	case DRM_PLANE_TYPE_PRIMARY:
6131 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6132 			if (num_formats >= max_formats)
6133 				break;
6134 
6135 			formats[num_formats++] = rgb_formats[i];
6136 		}
6137 
6138 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6139 			formats[num_formats++] = DRM_FORMAT_NV12;
6140 		if (plane_cap && plane_cap->pixel_format_support.p010)
6141 			formats[num_formats++] = DRM_FORMAT_P010;
6142 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6143 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6144 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6145 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6146 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6147 		}
6148 		break;
6149 
6150 	case DRM_PLANE_TYPE_OVERLAY:
6151 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6152 			if (num_formats >= max_formats)
6153 				break;
6154 
6155 			formats[num_formats++] = overlay_formats[i];
6156 		}
6157 		break;
6158 
6159 	case DRM_PLANE_TYPE_CURSOR:
6160 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6161 			if (num_formats >= max_formats)
6162 				break;
6163 
6164 			formats[num_formats++] = cursor_formats[i];
6165 		}
6166 		break;
6167 	}
6168 
6169 	return num_formats;
6170 }
6171 
amdgpu_dm_plane_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,unsigned long possible_crtcs,const struct dc_plane_cap * plane_cap)6172 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6173 				struct drm_plane *plane,
6174 				unsigned long possible_crtcs,
6175 				const struct dc_plane_cap *plane_cap)
6176 {
6177 	uint32_t formats[32];
6178 	int num_formats;
6179 	int res = -EPERM;
6180 	unsigned int supported_rotations;
6181 
6182 	num_formats = get_plane_formats(plane, plane_cap, formats,
6183 					ARRAY_SIZE(formats));
6184 
6185 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6186 				       &dm_plane_funcs, formats, num_formats,
6187 				       NULL, plane->type, NULL);
6188 	if (res)
6189 		return res;
6190 
6191 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6192 	    plane_cap && plane_cap->per_pixel_alpha) {
6193 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6194 					  BIT(DRM_MODE_BLEND_PREMULTI);
6195 
6196 		drm_plane_create_alpha_property(plane);
6197 		drm_plane_create_blend_mode_property(plane, blend_caps);
6198 	}
6199 
6200 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6201 	    plane_cap &&
6202 	    (plane_cap->pixel_format_support.nv12 ||
6203 	     plane_cap->pixel_format_support.p010)) {
6204 		/* This only affects YUV formats. */
6205 		drm_plane_create_color_properties(
6206 			plane,
6207 			BIT(DRM_COLOR_YCBCR_BT601) |
6208 			BIT(DRM_COLOR_YCBCR_BT709) |
6209 			BIT(DRM_COLOR_YCBCR_BT2020),
6210 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6211 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6212 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6213 	}
6214 
6215 	supported_rotations =
6216 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6217 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6218 
6219 	if (dm->adev->asic_type >= CHIP_BONAIRE)
6220 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6221 						   supported_rotations);
6222 
6223 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6224 
6225 	/* Create (reset) the plane state */
6226 	if (plane->funcs->reset)
6227 		plane->funcs->reset(plane);
6228 
6229 	return 0;
6230 }
6231 
amdgpu_dm_crtc_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,uint32_t crtc_index)6232 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6233 			       struct drm_plane *plane,
6234 			       uint32_t crtc_index)
6235 {
6236 	struct amdgpu_crtc *acrtc = NULL;
6237 	struct drm_plane *cursor_plane;
6238 
6239 	int res = -ENOMEM;
6240 
6241 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6242 	if (!cursor_plane)
6243 		goto fail;
6244 
6245 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6246 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6247 
6248 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6249 	if (!acrtc)
6250 		goto fail;
6251 
6252 	res = drm_crtc_init_with_planes(
6253 			dm->ddev,
6254 			&acrtc->base,
6255 			plane,
6256 			cursor_plane,
6257 			&amdgpu_dm_crtc_funcs, NULL);
6258 
6259 	if (res)
6260 		goto fail;
6261 
6262 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6263 
6264 	/* Create (reset) the plane state */
6265 	if (acrtc->base.funcs->reset)
6266 		acrtc->base.funcs->reset(&acrtc->base);
6267 
6268 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6269 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6270 
6271 	acrtc->crtc_id = crtc_index;
6272 	acrtc->base.enabled = false;
6273 	acrtc->otg_inst = -1;
6274 
6275 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6276 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6277 				   true, MAX_COLOR_LUT_ENTRIES);
6278 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6279 
6280 	return 0;
6281 
6282 fail:
6283 	kfree(acrtc);
6284 	kfree(cursor_plane);
6285 	return res;
6286 }
6287 
6288 
to_drm_connector_type(enum signal_type st)6289 static int to_drm_connector_type(enum signal_type st)
6290 {
6291 	switch (st) {
6292 	case SIGNAL_TYPE_HDMI_TYPE_A:
6293 		return DRM_MODE_CONNECTOR_HDMIA;
6294 	case SIGNAL_TYPE_EDP:
6295 		return DRM_MODE_CONNECTOR_eDP;
6296 	case SIGNAL_TYPE_LVDS:
6297 		return DRM_MODE_CONNECTOR_LVDS;
6298 	case SIGNAL_TYPE_RGB:
6299 		return DRM_MODE_CONNECTOR_VGA;
6300 	case SIGNAL_TYPE_DISPLAY_PORT:
6301 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6302 		return DRM_MODE_CONNECTOR_DisplayPort;
6303 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6304 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6305 		return DRM_MODE_CONNECTOR_DVID;
6306 	case SIGNAL_TYPE_VIRTUAL:
6307 		return DRM_MODE_CONNECTOR_VIRTUAL;
6308 
6309 	default:
6310 		return DRM_MODE_CONNECTOR_Unknown;
6311 	}
6312 }
6313 
amdgpu_dm_connector_to_encoder(struct drm_connector * connector)6314 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6315 {
6316 	struct drm_encoder *encoder;
6317 
6318 	/* There is only one encoder per connector */
6319 	drm_connector_for_each_possible_encoder(connector, encoder)
6320 		return encoder;
6321 
6322 	return NULL;
6323 }
6324 
amdgpu_dm_get_native_mode(struct drm_connector * connector)6325 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6326 {
6327 	struct drm_encoder *encoder;
6328 	struct amdgpu_encoder *amdgpu_encoder;
6329 
6330 	encoder = amdgpu_dm_connector_to_encoder(connector);
6331 
6332 	if (encoder == NULL)
6333 		return;
6334 
6335 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6336 
6337 	amdgpu_encoder->native_mode.clock = 0;
6338 
6339 	if (!list_empty(&connector->probed_modes)) {
6340 		struct drm_display_mode *preferred_mode = NULL;
6341 
6342 		list_for_each_entry(preferred_mode,
6343 				    &connector->probed_modes,
6344 				    head) {
6345 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6346 				amdgpu_encoder->native_mode = *preferred_mode;
6347 
6348 			break;
6349 		}
6350 
6351 	}
6352 }
6353 
6354 static struct drm_display_mode *
amdgpu_dm_create_common_mode(struct drm_encoder * encoder,char * name,int hdisplay,int vdisplay)6355 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6356 			     char *name,
6357 			     int hdisplay, int vdisplay)
6358 {
6359 	struct drm_device *dev = encoder->dev;
6360 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6361 	struct drm_display_mode *mode = NULL;
6362 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6363 
6364 	mode = drm_mode_duplicate(dev, native_mode);
6365 
6366 	if (mode == NULL)
6367 		return NULL;
6368 
6369 	mode->hdisplay = hdisplay;
6370 	mode->vdisplay = vdisplay;
6371 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6372 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6373 
6374 	return mode;
6375 
6376 }
6377 
amdgpu_dm_connector_add_common_modes(struct drm_encoder * encoder,struct drm_connector * connector)6378 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6379 						 struct drm_connector *connector)
6380 {
6381 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6382 	struct drm_display_mode *mode = NULL;
6383 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6384 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6385 				to_amdgpu_dm_connector(connector);
6386 	int i;
6387 	int n;
6388 	struct mode_size {
6389 		char name[DRM_DISPLAY_MODE_LEN];
6390 		int w;
6391 		int h;
6392 	} common_modes[] = {
6393 		{  "640x480",  640,  480},
6394 		{  "800x600",  800,  600},
6395 		{ "1024x768", 1024,  768},
6396 		{ "1280x720", 1280,  720},
6397 		{ "1280x800", 1280,  800},
6398 		{"1280x1024", 1280, 1024},
6399 		{ "1440x900", 1440,  900},
6400 		{"1680x1050", 1680, 1050},
6401 		{"1600x1200", 1600, 1200},
6402 		{"1920x1080", 1920, 1080},
6403 		{"1920x1200", 1920, 1200}
6404 	};
6405 
6406 	n = ARRAY_SIZE(common_modes);
6407 
6408 	for (i = 0; i < n; i++) {
6409 		struct drm_display_mode *curmode = NULL;
6410 		bool mode_existed = false;
6411 
6412 		if (common_modes[i].w > native_mode->hdisplay ||
6413 		    common_modes[i].h > native_mode->vdisplay ||
6414 		   (common_modes[i].w == native_mode->hdisplay &&
6415 		    common_modes[i].h == native_mode->vdisplay))
6416 			continue;
6417 
6418 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6419 			if (common_modes[i].w == curmode->hdisplay &&
6420 			    common_modes[i].h == curmode->vdisplay) {
6421 				mode_existed = true;
6422 				break;
6423 			}
6424 		}
6425 
6426 		if (mode_existed)
6427 			continue;
6428 
6429 		mode = amdgpu_dm_create_common_mode(encoder,
6430 				common_modes[i].name, common_modes[i].w,
6431 				common_modes[i].h);
6432 		if (!mode)
6433 			continue;
6434 
6435 		drm_mode_probed_add(connector, mode);
6436 		amdgpu_dm_connector->num_modes++;
6437 	}
6438 }
6439 
amdgpu_dm_connector_ddc_get_modes(struct drm_connector * connector,struct edid * edid)6440 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6441 					      struct edid *edid)
6442 {
6443 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6444 			to_amdgpu_dm_connector(connector);
6445 
6446 	if (edid) {
6447 		/* empty probed_modes */
6448 		INIT_LIST_HEAD(&connector->probed_modes);
6449 		amdgpu_dm_connector->num_modes =
6450 				drm_add_edid_modes(connector, edid);
6451 
6452 		/* sorting the probed modes before calling function
6453 		 * amdgpu_dm_get_native_mode() since EDID can have
6454 		 * more than one preferred mode. The modes that are
6455 		 * later in the probed mode list could be of higher
6456 		 * and preferred resolution. For example, 3840x2160
6457 		 * resolution in base EDID preferred timing and 4096x2160
6458 		 * preferred resolution in DID extension block later.
6459 		 */
6460 		drm_mode_sort(&connector->probed_modes);
6461 		amdgpu_dm_get_native_mode(connector);
6462 	} else {
6463 		amdgpu_dm_connector->num_modes = 0;
6464 	}
6465 }
6466 
amdgpu_dm_connector_get_modes(struct drm_connector * connector)6467 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6468 {
6469 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6470 			to_amdgpu_dm_connector(connector);
6471 	struct drm_encoder *encoder;
6472 	struct edid *edid = amdgpu_dm_connector->edid;
6473 
6474 	encoder = amdgpu_dm_connector_to_encoder(connector);
6475 
6476 	if (!edid || !drm_edid_is_valid(edid)) {
6477 		amdgpu_dm_connector->num_modes =
6478 				drm_add_modes_noedid(connector, 640, 480);
6479 	} else {
6480 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6481 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6482 	}
6483 	amdgpu_dm_fbc_init(connector);
6484 
6485 	return amdgpu_dm_connector->num_modes;
6486 }
6487 
amdgpu_dm_connector_init_helper(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,int connector_type,struct dc_link * link,int link_index)6488 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6489 				     struct amdgpu_dm_connector *aconnector,
6490 				     int connector_type,
6491 				     struct dc_link *link,
6492 				     int link_index)
6493 {
6494 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6495 
6496 	/*
6497 	 * Some of the properties below require access to state, like bpc.
6498 	 * Allocate some default initial connector state with our reset helper.
6499 	 */
6500 	if (aconnector->base.funcs->reset)
6501 		aconnector->base.funcs->reset(&aconnector->base);
6502 
6503 	aconnector->connector_id = link_index;
6504 	aconnector->dc_link = link;
6505 	aconnector->base.interlace_allowed = false;
6506 	aconnector->base.doublescan_allowed = false;
6507 	aconnector->base.stereo_allowed = false;
6508 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6509 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6510 	aconnector->audio_inst = -1;
6511 	mutex_init(&aconnector->hpd_lock);
6512 
6513 	/*
6514 	 * configure support HPD hot plug connector_>polled default value is 0
6515 	 * which means HPD hot plug not supported
6516 	 */
6517 	switch (connector_type) {
6518 	case DRM_MODE_CONNECTOR_HDMIA:
6519 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6520 		aconnector->base.ycbcr_420_allowed =
6521 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6522 		break;
6523 	case DRM_MODE_CONNECTOR_DisplayPort:
6524 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6525 		aconnector->base.ycbcr_420_allowed =
6526 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6527 		break;
6528 	case DRM_MODE_CONNECTOR_DVID:
6529 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6530 		break;
6531 	default:
6532 		break;
6533 	}
6534 
6535 	drm_object_attach_property(&aconnector->base.base,
6536 				dm->ddev->mode_config.scaling_mode_property,
6537 				DRM_MODE_SCALE_NONE);
6538 
6539 	drm_object_attach_property(&aconnector->base.base,
6540 				adev->mode_info.underscan_property,
6541 				UNDERSCAN_OFF);
6542 	drm_object_attach_property(&aconnector->base.base,
6543 				adev->mode_info.underscan_hborder_property,
6544 				0);
6545 	drm_object_attach_property(&aconnector->base.base,
6546 				adev->mode_info.underscan_vborder_property,
6547 				0);
6548 
6549 	if (!aconnector->mst_port)
6550 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6551 
6552 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6553 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6554 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6555 
6556 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6557 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6558 		drm_object_attach_property(&aconnector->base.base,
6559 				adev->mode_info.abm_level_property, 0);
6560 	}
6561 
6562 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6563 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6564 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6565 		drm_object_attach_property(
6566 			&aconnector->base.base,
6567 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6568 
6569 		if (!aconnector->mst_port)
6570 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6571 
6572 #ifdef CONFIG_DRM_AMD_DC_HDCP
6573 		if (adev->dm.hdcp_workqueue)
6574 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6575 #endif
6576 	}
6577 }
6578 
amdgpu_dm_i2c_xfer(struct i2c_adapter * i2c_adap,struct i2c_msg * msgs,int num)6579 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6580 			      struct i2c_msg *msgs, int num)
6581 {
6582 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6583 	struct ddc_service *ddc_service = i2c->ddc_service;
6584 	struct i2c_command cmd;
6585 	int i;
6586 	int result = -EIO;
6587 
6588 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6589 
6590 	if (!cmd.payloads)
6591 		return result;
6592 
6593 	cmd.number_of_payloads = num;
6594 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6595 	cmd.speed = 100;
6596 
6597 	for (i = 0; i < num; i++) {
6598 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6599 		cmd.payloads[i].address = msgs[i].addr;
6600 		cmd.payloads[i].length = msgs[i].len;
6601 		cmd.payloads[i].data = msgs[i].buf;
6602 	}
6603 
6604 	if (dc_submit_i2c(
6605 			ddc_service->ctx->dc,
6606 			ddc_service->ddc_pin->hw_info.ddc_channel,
6607 			&cmd))
6608 		result = num;
6609 
6610 	kfree(cmd.payloads);
6611 	return result;
6612 }
6613 
amdgpu_dm_i2c_func(struct i2c_adapter * adap)6614 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6615 {
6616 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6617 }
6618 
6619 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6620 	.master_xfer = amdgpu_dm_i2c_xfer,
6621 	.functionality = amdgpu_dm_i2c_func,
6622 };
6623 
6624 static struct amdgpu_i2c_adapter *
create_i2c(struct ddc_service * ddc_service,int link_index,int * res)6625 create_i2c(struct ddc_service *ddc_service,
6626 	   int link_index,
6627 	   int *res)
6628 {
6629 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6630 	struct amdgpu_i2c_adapter *i2c;
6631 
6632 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6633 	if (!i2c)
6634 		return NULL;
6635 	i2c->base.owner = THIS_MODULE;
6636 	i2c->base.class = I2C_CLASS_DDC;
6637 	i2c->base.dev.parent = &adev->pdev->dev;
6638 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6639 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6640 	i2c_set_adapdata(&i2c->base, i2c);
6641 	i2c->ddc_service = ddc_service;
6642 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6643 
6644 	return i2c;
6645 }
6646 
6647 
6648 /*
6649  * Note: this function assumes that dc_link_detect() was called for the
6650  * dc_link which will be represented by this aconnector.
6651  */
amdgpu_dm_connector_init(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,uint32_t link_index,struct amdgpu_encoder * aencoder)6652 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6653 				    struct amdgpu_dm_connector *aconnector,
6654 				    uint32_t link_index,
6655 				    struct amdgpu_encoder *aencoder)
6656 {
6657 	int res = 0;
6658 	int connector_type;
6659 	struct dc *dc = dm->dc;
6660 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6661 	struct amdgpu_i2c_adapter *i2c;
6662 
6663 	link->priv = aconnector;
6664 
6665 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6666 
6667 	i2c = create_i2c(link->ddc, link->link_index, &res);
6668 	if (!i2c) {
6669 		DRM_ERROR("Failed to create i2c adapter data\n");
6670 		return -ENOMEM;
6671 	}
6672 
6673 	aconnector->i2c = i2c;
6674 	res = i2c_add_adapter(&i2c->base);
6675 
6676 	if (res) {
6677 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6678 		goto out_free;
6679 	}
6680 
6681 	connector_type = to_drm_connector_type(link->connector_signal);
6682 
6683 	res = drm_connector_init_with_ddc(
6684 			dm->ddev,
6685 			&aconnector->base,
6686 			&amdgpu_dm_connector_funcs,
6687 			connector_type,
6688 			&i2c->base);
6689 
6690 	if (res) {
6691 		DRM_ERROR("connector_init failed\n");
6692 		aconnector->connector_id = -1;
6693 		goto out_free;
6694 	}
6695 
6696 	drm_connector_helper_add(
6697 			&aconnector->base,
6698 			&amdgpu_dm_connector_helper_funcs);
6699 
6700 	amdgpu_dm_connector_init_helper(
6701 		dm,
6702 		aconnector,
6703 		connector_type,
6704 		link,
6705 		link_index);
6706 
6707 	drm_connector_attach_encoder(
6708 		&aconnector->base, &aencoder->base);
6709 
6710 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6711 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6712 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6713 
6714 out_free:
6715 	if (res) {
6716 		kfree(i2c);
6717 		aconnector->i2c = NULL;
6718 	}
6719 	return res;
6720 }
6721 
amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device * adev)6722 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6723 {
6724 	switch (adev->mode_info.num_crtc) {
6725 	case 1:
6726 		return 0x1;
6727 	case 2:
6728 		return 0x3;
6729 	case 3:
6730 		return 0x7;
6731 	case 4:
6732 		return 0xf;
6733 	case 5:
6734 		return 0x1f;
6735 	case 6:
6736 	default:
6737 		return 0x3f;
6738 	}
6739 }
6740 
amdgpu_dm_encoder_init(struct drm_device * dev,struct amdgpu_encoder * aencoder,uint32_t link_index)6741 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6742 				  struct amdgpu_encoder *aencoder,
6743 				  uint32_t link_index)
6744 {
6745 	struct amdgpu_device *adev = drm_to_adev(dev);
6746 
6747 	int res = drm_encoder_init(dev,
6748 				   &aencoder->base,
6749 				   &amdgpu_dm_encoder_funcs,
6750 				   DRM_MODE_ENCODER_TMDS,
6751 				   NULL);
6752 
6753 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6754 
6755 	if (!res)
6756 		aencoder->encoder_id = link_index;
6757 	else
6758 		aencoder->encoder_id = -1;
6759 
6760 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6761 
6762 	return res;
6763 }
6764 
manage_dm_interrupts(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,bool enable)6765 static void manage_dm_interrupts(struct amdgpu_device *adev,
6766 				 struct amdgpu_crtc *acrtc,
6767 				 bool enable)
6768 {
6769 	/*
6770 	 * We have no guarantee that the frontend index maps to the same
6771 	 * backend index - some even map to more than one.
6772 	 *
6773 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6774 	 */
6775 	int irq_type =
6776 		amdgpu_display_crtc_idx_to_irq_type(
6777 			adev,
6778 			acrtc->crtc_id);
6779 
6780 	if (enable) {
6781 		drm_crtc_vblank_on(&acrtc->base);
6782 		amdgpu_irq_get(
6783 			adev,
6784 			&adev->pageflip_irq,
6785 			irq_type);
6786 	} else {
6787 
6788 		amdgpu_irq_put(
6789 			adev,
6790 			&adev->pageflip_irq,
6791 			irq_type);
6792 		drm_crtc_vblank_off(&acrtc->base);
6793 	}
6794 }
6795 
dm_update_pflip_irq_state(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc)6796 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6797 				      struct amdgpu_crtc *acrtc)
6798 {
6799 	int irq_type =
6800 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6801 
6802 	/**
6803 	 * This reads the current state for the IRQ and force reapplies
6804 	 * the setting to hardware.
6805 	 */
6806 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6807 }
6808 
6809 static bool
is_scaling_state_different(const struct dm_connector_state * dm_state,const struct dm_connector_state * old_dm_state)6810 is_scaling_state_different(const struct dm_connector_state *dm_state,
6811 			   const struct dm_connector_state *old_dm_state)
6812 {
6813 	if (dm_state->scaling != old_dm_state->scaling)
6814 		return true;
6815 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6816 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6817 			return true;
6818 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6819 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6820 			return true;
6821 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6822 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6823 		return true;
6824 	return false;
6825 }
6826 
6827 #ifdef CONFIG_DRM_AMD_DC_HDCP
is_content_protection_different(struct drm_connector_state * state,const struct drm_connector_state * old_state,const struct drm_connector * connector,struct hdcp_workqueue * hdcp_w)6828 static bool is_content_protection_different(struct drm_connector_state *state,
6829 					    const struct drm_connector_state *old_state,
6830 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6831 {
6832 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6833 
6834 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6835 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6836 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6837 		return true;
6838 	}
6839 
6840 	/* CP is being re enabled, ignore this */
6841 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6842 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6843 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6844 		return false;
6845 	}
6846 
6847 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6848 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6849 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6850 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6851 
6852 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6853 	 * hot-plug, headless s3, dpms
6854 	 */
6855 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6856 	    aconnector->dc_sink != NULL)
6857 		return true;
6858 
6859 	if (old_state->content_protection == state->content_protection)
6860 		return false;
6861 
6862 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6863 		return true;
6864 
6865 	return false;
6866 }
6867 
6868 #endif
remove_stream(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,struct dc_stream_state * stream)6869 static void remove_stream(struct amdgpu_device *adev,
6870 			  struct amdgpu_crtc *acrtc,
6871 			  struct dc_stream_state *stream)
6872 {
6873 	/* this is the update mode case */
6874 
6875 	acrtc->otg_inst = -1;
6876 	acrtc->enabled = false;
6877 }
6878 
get_cursor_position(struct drm_plane * plane,struct drm_crtc * crtc,struct dc_cursor_position * position)6879 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6880 			       struct dc_cursor_position *position)
6881 {
6882 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6883 	int x, y;
6884 	int xorigin = 0, yorigin = 0;
6885 
6886 	if (!crtc || !plane->state->fb)
6887 		return 0;
6888 
6889 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6890 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6891 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6892 			  __func__,
6893 			  plane->state->crtc_w,
6894 			  plane->state->crtc_h);
6895 		return -EINVAL;
6896 	}
6897 
6898 	x = plane->state->crtc_x;
6899 	y = plane->state->crtc_y;
6900 
6901 	if (x <= -amdgpu_crtc->max_cursor_width ||
6902 	    y <= -amdgpu_crtc->max_cursor_height)
6903 		return 0;
6904 
6905 	if (x < 0) {
6906 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6907 		x = 0;
6908 	}
6909 	if (y < 0) {
6910 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6911 		y = 0;
6912 	}
6913 	position->enable = true;
6914 	position->translate_by_source = true;
6915 	position->x = x;
6916 	position->y = y;
6917 	position->x_hotspot = xorigin;
6918 	position->y_hotspot = yorigin;
6919 
6920 	return 0;
6921 }
6922 
handle_cursor_update(struct drm_plane * plane,struct drm_plane_state * old_plane_state)6923 static void handle_cursor_update(struct drm_plane *plane,
6924 				 struct drm_plane_state *old_plane_state)
6925 {
6926 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6927 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6928 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6929 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6930 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6931 	uint64_t address = afb ? afb->address : 0;
6932 	struct dc_cursor_position position = {0};
6933 	struct dc_cursor_attributes attributes;
6934 	int ret;
6935 
6936 	if (!plane->state->fb && !old_plane_state->fb)
6937 		return;
6938 
6939 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6940 			 __func__,
6941 			 amdgpu_crtc->crtc_id,
6942 			 plane->state->crtc_w,
6943 			 plane->state->crtc_h);
6944 
6945 	ret = get_cursor_position(plane, crtc, &position);
6946 	if (ret)
6947 		return;
6948 
6949 	if (!position.enable) {
6950 		/* turn off cursor */
6951 		if (crtc_state && crtc_state->stream) {
6952 			mutex_lock(&adev->dm.dc_lock);
6953 			dc_stream_set_cursor_position(crtc_state->stream,
6954 						      &position);
6955 			mutex_unlock(&adev->dm.dc_lock);
6956 		}
6957 		return;
6958 	}
6959 
6960 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6961 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6962 
6963 	memset(&attributes, 0, sizeof(attributes));
6964 	attributes.address.high_part = upper_32_bits(address);
6965 	attributes.address.low_part  = lower_32_bits(address);
6966 	attributes.width             = plane->state->crtc_w;
6967 	attributes.height            = plane->state->crtc_h;
6968 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6969 	attributes.rotation_angle    = 0;
6970 	attributes.attribute_flags.value = 0;
6971 
6972 	attributes.pitch = attributes.width;
6973 
6974 	if (crtc_state->stream) {
6975 		mutex_lock(&adev->dm.dc_lock);
6976 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6977 							 &attributes))
6978 			DRM_ERROR("DC failed to set cursor attributes\n");
6979 
6980 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6981 						   &position))
6982 			DRM_ERROR("DC failed to set cursor position\n");
6983 		mutex_unlock(&adev->dm.dc_lock);
6984 	}
6985 }
6986 
prepare_flip_isr(struct amdgpu_crtc * acrtc)6987 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6988 {
6989 
6990 	assert_spin_locked(&acrtc->base.dev->event_lock);
6991 	WARN_ON(acrtc->event);
6992 
6993 	acrtc->event = acrtc->base.state->event;
6994 
6995 	/* Set the flip status */
6996 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6997 
6998 	/* Mark this event as consumed */
6999 	acrtc->base.state->event = NULL;
7000 
7001 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7002 						 acrtc->crtc_id);
7003 }
7004 
update_freesync_state_on_stream(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state,struct dc_stream_state * new_stream,struct dc_plane_state * surface,u32 flip_timestamp_in_us)7005 static void update_freesync_state_on_stream(
7006 	struct amdgpu_display_manager *dm,
7007 	struct dm_crtc_state *new_crtc_state,
7008 	struct dc_stream_state *new_stream,
7009 	struct dc_plane_state *surface,
7010 	u32 flip_timestamp_in_us)
7011 {
7012 	struct mod_vrr_params vrr_params;
7013 	struct dc_info_packet vrr_infopacket = {0};
7014 	struct amdgpu_device *adev = dm->adev;
7015 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7016 	unsigned long flags;
7017 
7018 	if (!new_stream)
7019 		return;
7020 
7021 	/*
7022 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7023 	 * For now it's sufficient to just guard against these conditions.
7024 	 */
7025 
7026 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7027 		return;
7028 
7029 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7030         vrr_params = acrtc->dm_irq_params.vrr_params;
7031 
7032 	if (surface) {
7033 		mod_freesync_handle_preflip(
7034 			dm->freesync_module,
7035 			surface,
7036 			new_stream,
7037 			flip_timestamp_in_us,
7038 			&vrr_params);
7039 
7040 		if (adev->family < AMDGPU_FAMILY_AI &&
7041 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7042 			mod_freesync_handle_v_update(dm->freesync_module,
7043 						     new_stream, &vrr_params);
7044 
7045 			/* Need to call this before the frame ends. */
7046 			dc_stream_adjust_vmin_vmax(dm->dc,
7047 						   new_crtc_state->stream,
7048 						   &vrr_params.adjust);
7049 		}
7050 	}
7051 
7052 	mod_freesync_build_vrr_infopacket(
7053 		dm->freesync_module,
7054 		new_stream,
7055 		&vrr_params,
7056 		PACKET_TYPE_VRR,
7057 		TRANSFER_FUNC_UNKNOWN,
7058 		&vrr_infopacket);
7059 
7060 	new_crtc_state->freesync_timing_changed |=
7061 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7062 			&vrr_params.adjust,
7063 			sizeof(vrr_params.adjust)) != 0);
7064 
7065 	new_crtc_state->freesync_vrr_info_changed |=
7066 		(memcmp(&new_crtc_state->vrr_infopacket,
7067 			&vrr_infopacket,
7068 			sizeof(vrr_infopacket)) != 0);
7069 
7070 	acrtc->dm_irq_params.vrr_params = vrr_params;
7071 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7072 
7073 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7074 	new_stream->vrr_infopacket = vrr_infopacket;
7075 
7076 	if (new_crtc_state->freesync_vrr_info_changed)
7077 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7078 			      new_crtc_state->base.crtc->base.id,
7079 			      (int)new_crtc_state->base.vrr_enabled,
7080 			      (int)vrr_params.state);
7081 
7082 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7083 }
7084 
update_stream_irq_parameters(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state)7085 static void update_stream_irq_parameters(
7086 	struct amdgpu_display_manager *dm,
7087 	struct dm_crtc_state *new_crtc_state)
7088 {
7089 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7090 	struct mod_vrr_params vrr_params;
7091 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7092 	struct amdgpu_device *adev = dm->adev;
7093 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7094 	unsigned long flags;
7095 
7096 	if (!new_stream)
7097 		return;
7098 
7099 	/*
7100 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7101 	 * For now it's sufficient to just guard against these conditions.
7102 	 */
7103 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7104 		return;
7105 
7106 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7107 	vrr_params = acrtc->dm_irq_params.vrr_params;
7108 
7109 	if (new_crtc_state->vrr_supported &&
7110 	    config.min_refresh_in_uhz &&
7111 	    config.max_refresh_in_uhz) {
7112 		config.state = new_crtc_state->base.vrr_enabled ?
7113 			VRR_STATE_ACTIVE_VARIABLE :
7114 			VRR_STATE_INACTIVE;
7115 	} else {
7116 		config.state = VRR_STATE_UNSUPPORTED;
7117 	}
7118 
7119 	mod_freesync_build_vrr_params(dm->freesync_module,
7120 				      new_stream,
7121 				      &config, &vrr_params);
7122 
7123 	new_crtc_state->freesync_timing_changed |=
7124 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7125 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7126 
7127 	new_crtc_state->freesync_config = config;
7128 	/* Copy state for access from DM IRQ handler */
7129 	acrtc->dm_irq_params.freesync_config = config;
7130 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7131 	acrtc->dm_irq_params.vrr_params = vrr_params;
7132 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7133 }
7134 
amdgpu_dm_handle_vrr_transition(struct dm_crtc_state * old_state,struct dm_crtc_state * new_state)7135 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7136 					    struct dm_crtc_state *new_state)
7137 {
7138 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7139 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7140 
7141 	if (!old_vrr_active && new_vrr_active) {
7142 		/* Transition VRR inactive -> active:
7143 		 * While VRR is active, we must not disable vblank irq, as a
7144 		 * reenable after disable would compute bogus vblank/pflip
7145 		 * timestamps if it likely happened inside display front-porch.
7146 		 *
7147 		 * We also need vupdate irq for the actual core vblank handling
7148 		 * at end of vblank.
7149 		 */
7150 		dm_set_vupdate_irq(new_state->base.crtc, true);
7151 		drm_crtc_vblank_get(new_state->base.crtc);
7152 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7153 				 __func__, new_state->base.crtc->base.id);
7154 	} else if (old_vrr_active && !new_vrr_active) {
7155 		/* Transition VRR active -> inactive:
7156 		 * Allow vblank irq disable again for fixed refresh rate.
7157 		 */
7158 		dm_set_vupdate_irq(new_state->base.crtc, false);
7159 		drm_crtc_vblank_put(new_state->base.crtc);
7160 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7161 				 __func__, new_state->base.crtc->base.id);
7162 	}
7163 }
7164 
amdgpu_dm_commit_cursors(struct drm_atomic_state * state)7165 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7166 {
7167 	struct drm_plane *plane;
7168 	struct drm_plane_state *old_plane_state, *new_plane_state;
7169 	int i;
7170 
7171 	/*
7172 	 * TODO: Make this per-stream so we don't issue redundant updates for
7173 	 * commits with multiple streams.
7174 	 */
7175 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7176 				       new_plane_state, i)
7177 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7178 			handle_cursor_update(plane, old_plane_state);
7179 }
7180 
amdgpu_dm_commit_planes(struct drm_atomic_state * state,struct dc_state * dc_state,struct drm_device * dev,struct amdgpu_display_manager * dm,struct drm_crtc * pcrtc,bool wait_for_vblank)7181 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7182 				    struct dc_state *dc_state,
7183 				    struct drm_device *dev,
7184 				    struct amdgpu_display_manager *dm,
7185 				    struct drm_crtc *pcrtc,
7186 				    bool wait_for_vblank)
7187 {
7188 	uint32_t i;
7189 	uint64_t timestamp_ns;
7190 	struct drm_plane *plane;
7191 	struct drm_plane_state *old_plane_state, *new_plane_state;
7192 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7193 	struct drm_crtc_state *new_pcrtc_state =
7194 			drm_atomic_get_new_crtc_state(state, pcrtc);
7195 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7196 	struct dm_crtc_state *dm_old_crtc_state =
7197 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7198 	int planes_count = 0, vpos, hpos;
7199 	long r;
7200 	unsigned long flags;
7201 	struct amdgpu_bo *abo;
7202 	uint32_t target_vblank, last_flip_vblank;
7203 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7204 	bool pflip_present = false;
7205 	struct {
7206 		struct dc_surface_update surface_updates[MAX_SURFACES];
7207 		struct dc_plane_info plane_infos[MAX_SURFACES];
7208 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7209 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7210 		struct dc_stream_update stream_update;
7211 	} *bundle;
7212 
7213 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7214 
7215 	if (!bundle) {
7216 		dm_error("Failed to allocate update bundle\n");
7217 		goto cleanup;
7218 	}
7219 
7220 	/*
7221 	 * Disable the cursor first if we're disabling all the planes.
7222 	 * It'll remain on the screen after the planes are re-enabled
7223 	 * if we don't.
7224 	 */
7225 	if (acrtc_state->active_planes == 0)
7226 		amdgpu_dm_commit_cursors(state);
7227 
7228 	/* update planes when needed */
7229 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7230 		struct drm_crtc *crtc = new_plane_state->crtc;
7231 		struct drm_crtc_state *new_crtc_state;
7232 		struct drm_framebuffer *fb = new_plane_state->fb;
7233 		bool plane_needs_flip;
7234 		struct dc_plane_state *dc_plane;
7235 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7236 
7237 		/* Cursor plane is handled after stream updates */
7238 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7239 			continue;
7240 
7241 		if (!fb || !crtc || pcrtc != crtc)
7242 			continue;
7243 
7244 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7245 		if (!new_crtc_state->active)
7246 			continue;
7247 
7248 		dc_plane = dm_new_plane_state->dc_state;
7249 
7250 		bundle->surface_updates[planes_count].surface = dc_plane;
7251 		if (new_pcrtc_state->color_mgmt_changed) {
7252 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7253 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7254 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7255 		}
7256 
7257 		fill_dc_scaling_info(new_plane_state,
7258 				     &bundle->scaling_infos[planes_count]);
7259 
7260 		bundle->surface_updates[planes_count].scaling_info =
7261 			&bundle->scaling_infos[planes_count];
7262 
7263 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7264 
7265 		pflip_present = pflip_present || plane_needs_flip;
7266 
7267 		if (!plane_needs_flip) {
7268 			planes_count += 1;
7269 			continue;
7270 		}
7271 
7272 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7273 
7274 		/*
7275 		 * Wait for all fences on this FB. Do limited wait to avoid
7276 		 * deadlock during GPU reset when this fence will not signal
7277 		 * but we hold reservation lock for the BO.
7278 		 */
7279 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7280 							false,
7281 							msecs_to_jiffies(5000));
7282 		if (unlikely(r <= 0))
7283 			DRM_ERROR("Waiting for fences timed out!");
7284 
7285 		fill_dc_plane_info_and_addr(
7286 			dm->adev, new_plane_state,
7287 			dm_new_plane_state->tiling_flags,
7288 			&bundle->plane_infos[planes_count],
7289 			&bundle->flip_addrs[planes_count].address,
7290 			dm_new_plane_state->tmz_surface, false);
7291 
7292 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7293 				 new_plane_state->plane->index,
7294 				 bundle->plane_infos[planes_count].dcc.enable);
7295 
7296 		bundle->surface_updates[planes_count].plane_info =
7297 			&bundle->plane_infos[planes_count];
7298 
7299 		/*
7300 		 * Only allow immediate flips for fast updates that don't
7301 		 * change FB pitch, DCC state, rotation or mirroing.
7302 		 */
7303 		bundle->flip_addrs[planes_count].flip_immediate =
7304 			crtc->state->async_flip &&
7305 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7306 
7307 		timestamp_ns = ktime_get_ns();
7308 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7309 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7310 		bundle->surface_updates[planes_count].surface = dc_plane;
7311 
7312 		if (!bundle->surface_updates[planes_count].surface) {
7313 			DRM_ERROR("No surface for CRTC: id=%d\n",
7314 					acrtc_attach->crtc_id);
7315 			continue;
7316 		}
7317 
7318 		if (plane == pcrtc->primary)
7319 			update_freesync_state_on_stream(
7320 				dm,
7321 				acrtc_state,
7322 				acrtc_state->stream,
7323 				dc_plane,
7324 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7325 
7326 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7327 				 __func__,
7328 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7329 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7330 
7331 		planes_count += 1;
7332 
7333 	}
7334 
7335 	if (pflip_present) {
7336 		if (!vrr_active) {
7337 			/* Use old throttling in non-vrr fixed refresh rate mode
7338 			 * to keep flip scheduling based on target vblank counts
7339 			 * working in a backwards compatible way, e.g., for
7340 			 * clients using the GLX_OML_sync_control extension or
7341 			 * DRI3/Present extension with defined target_msc.
7342 			 */
7343 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7344 		}
7345 		else {
7346 			/* For variable refresh rate mode only:
7347 			 * Get vblank of last completed flip to avoid > 1 vrr
7348 			 * flips per video frame by use of throttling, but allow
7349 			 * flip programming anywhere in the possibly large
7350 			 * variable vrr vblank interval for fine-grained flip
7351 			 * timing control and more opportunity to avoid stutter
7352 			 * on late submission of flips.
7353 			 */
7354 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7355 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7356 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7357 		}
7358 
7359 		target_vblank = last_flip_vblank + wait_for_vblank;
7360 
7361 		/*
7362 		 * Wait until we're out of the vertical blank period before the one
7363 		 * targeted by the flip
7364 		 */
7365 		while ((acrtc_attach->enabled &&
7366 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7367 							    0, &vpos, &hpos, NULL,
7368 							    NULL, &pcrtc->hwmode)
7369 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7370 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7371 			(int)(target_vblank -
7372 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7373 			usleep_range(1000, 1100);
7374 		}
7375 
7376 		/**
7377 		 * Prepare the flip event for the pageflip interrupt to handle.
7378 		 *
7379 		 * This only works in the case where we've already turned on the
7380 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7381 		 * from 0 -> n planes we have to skip a hardware generated event
7382 		 * and rely on sending it from software.
7383 		 */
7384 		if (acrtc_attach->base.state->event &&
7385 		    acrtc_state->active_planes > 0) {
7386 			drm_crtc_vblank_get(pcrtc);
7387 
7388 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7389 
7390 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7391 			prepare_flip_isr(acrtc_attach);
7392 
7393 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7394 		}
7395 
7396 		if (acrtc_state->stream) {
7397 			if (acrtc_state->freesync_vrr_info_changed)
7398 				bundle->stream_update.vrr_infopacket =
7399 					&acrtc_state->stream->vrr_infopacket;
7400 		}
7401 	}
7402 
7403 	/* Update the planes if changed or disable if we don't have any. */
7404 	if ((planes_count || acrtc_state->active_planes == 0) &&
7405 		acrtc_state->stream) {
7406 		bundle->stream_update.stream = acrtc_state->stream;
7407 		if (new_pcrtc_state->mode_changed) {
7408 			bundle->stream_update.src = acrtc_state->stream->src;
7409 			bundle->stream_update.dst = acrtc_state->stream->dst;
7410 		}
7411 
7412 		if (new_pcrtc_state->color_mgmt_changed) {
7413 			/*
7414 			 * TODO: This isn't fully correct since we've actually
7415 			 * already modified the stream in place.
7416 			 */
7417 			bundle->stream_update.gamut_remap =
7418 				&acrtc_state->stream->gamut_remap_matrix;
7419 			bundle->stream_update.output_csc_transform =
7420 				&acrtc_state->stream->csc_color_matrix;
7421 			bundle->stream_update.out_transfer_func =
7422 				acrtc_state->stream->out_transfer_func;
7423 		}
7424 
7425 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7426 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7427 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7428 
7429 		/*
7430 		 * If FreeSync state on the stream has changed then we need to
7431 		 * re-adjust the min/max bounds now that DC doesn't handle this
7432 		 * as part of commit.
7433 		 */
7434 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7435 		    amdgpu_dm_vrr_active(acrtc_state)) {
7436 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7437 			dc_stream_adjust_vmin_vmax(
7438 				dm->dc, acrtc_state->stream,
7439 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7440 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7441 		}
7442 		mutex_lock(&dm->dc_lock);
7443 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7444 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7445 			amdgpu_dm_psr_disable(acrtc_state->stream);
7446 
7447 		dc_commit_updates_for_stream(dm->dc,
7448 						     bundle->surface_updates,
7449 						     planes_count,
7450 						     acrtc_state->stream,
7451 						     &bundle->stream_update,
7452 						     dc_state);
7453 
7454 		/**
7455 		 * Enable or disable the interrupts on the backend.
7456 		 *
7457 		 * Most pipes are put into power gating when unused.
7458 		 *
7459 		 * When power gating is enabled on a pipe we lose the
7460 		 * interrupt enablement state when power gating is disabled.
7461 		 *
7462 		 * So we need to update the IRQ control state in hardware
7463 		 * whenever the pipe turns on (since it could be previously
7464 		 * power gated) or off (since some pipes can't be power gated
7465 		 * on some ASICs).
7466 		 */
7467 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7468 			dm_update_pflip_irq_state(drm_to_adev(dev),
7469 						  acrtc_attach);
7470 
7471 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7472 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7473 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7474 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7475 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7476 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7477 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7478 			amdgpu_dm_psr_enable(acrtc_state->stream);
7479 		}
7480 
7481 		mutex_unlock(&dm->dc_lock);
7482 	}
7483 
7484 	/*
7485 	 * Update cursor state *after* programming all the planes.
7486 	 * This avoids redundant programming in the case where we're going
7487 	 * to be disabling a single plane - those pipes are being disabled.
7488 	 */
7489 	if (acrtc_state->active_planes)
7490 		amdgpu_dm_commit_cursors(state);
7491 
7492 cleanup:
7493 	kfree(bundle);
7494 }
7495 
amdgpu_dm_commit_audio(struct drm_device * dev,struct drm_atomic_state * state)7496 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7497 				   struct drm_atomic_state *state)
7498 {
7499 	struct amdgpu_device *adev = drm_to_adev(dev);
7500 	struct amdgpu_dm_connector *aconnector;
7501 	struct drm_connector *connector;
7502 	struct drm_connector_state *old_con_state, *new_con_state;
7503 	struct drm_crtc_state *new_crtc_state;
7504 	struct dm_crtc_state *new_dm_crtc_state;
7505 	const struct dc_stream_status *status;
7506 	int i, inst;
7507 
7508 	/* Notify device removals. */
7509 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7510 		if (old_con_state->crtc != new_con_state->crtc) {
7511 			/* CRTC changes require notification. */
7512 			goto notify;
7513 		}
7514 
7515 		if (!new_con_state->crtc)
7516 			continue;
7517 
7518 		new_crtc_state = drm_atomic_get_new_crtc_state(
7519 			state, new_con_state->crtc);
7520 
7521 		if (!new_crtc_state)
7522 			continue;
7523 
7524 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7525 			continue;
7526 
7527 	notify:
7528 		aconnector = to_amdgpu_dm_connector(connector);
7529 
7530 		mutex_lock(&adev->dm.audio_lock);
7531 		inst = aconnector->audio_inst;
7532 		aconnector->audio_inst = -1;
7533 		mutex_unlock(&adev->dm.audio_lock);
7534 
7535 		amdgpu_dm_audio_eld_notify(adev, inst);
7536 	}
7537 
7538 	/* Notify audio device additions. */
7539 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7540 		if (!new_con_state->crtc)
7541 			continue;
7542 
7543 		new_crtc_state = drm_atomic_get_new_crtc_state(
7544 			state, new_con_state->crtc);
7545 
7546 		if (!new_crtc_state)
7547 			continue;
7548 
7549 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7550 			continue;
7551 
7552 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7553 		if (!new_dm_crtc_state->stream)
7554 			continue;
7555 
7556 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7557 		if (!status)
7558 			continue;
7559 
7560 		aconnector = to_amdgpu_dm_connector(connector);
7561 
7562 		mutex_lock(&adev->dm.audio_lock);
7563 		inst = status->audio_inst;
7564 		aconnector->audio_inst = inst;
7565 		mutex_unlock(&adev->dm.audio_lock);
7566 
7567 		amdgpu_dm_audio_eld_notify(adev, inst);
7568 	}
7569 }
7570 
7571 /*
7572  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7573  * @crtc_state: the DRM CRTC state
7574  * @stream_state: the DC stream state.
7575  *
7576  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7577  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7578  */
amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state * crtc_state,struct dc_stream_state * stream_state)7579 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7580 						struct dc_stream_state *stream_state)
7581 {
7582 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7583 }
7584 
amdgpu_dm_atomic_commit(struct drm_device * dev,struct drm_atomic_state * state,bool nonblock)7585 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7586 				   struct drm_atomic_state *state,
7587 				   bool nonblock)
7588 {
7589 	/*
7590 	 * Add check here for SoC's that support hardware cursor plane, to
7591 	 * unset legacy_cursor_update
7592 	 */
7593 
7594 	return drm_atomic_helper_commit(dev, state, nonblock);
7595 
7596 	/*TODO Handle EINTR, reenable IRQ*/
7597 }
7598 
7599 /**
7600  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7601  * @state: The atomic state to commit
7602  *
7603  * This will tell DC to commit the constructed DC state from atomic_check,
7604  * programming the hardware. Any failures here implies a hardware failure, since
7605  * atomic check should have filtered anything non-kosher.
7606  */
amdgpu_dm_atomic_commit_tail(struct drm_atomic_state * state)7607 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7608 {
7609 	struct drm_device *dev = state->dev;
7610 	struct amdgpu_device *adev = drm_to_adev(dev);
7611 	struct amdgpu_display_manager *dm = &adev->dm;
7612 	struct dm_atomic_state *dm_state;
7613 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7614 	uint32_t i, j;
7615 	struct drm_crtc *crtc;
7616 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7617 	unsigned long flags;
7618 	bool wait_for_vblank = true;
7619 	struct drm_connector *connector;
7620 	struct drm_connector_state *old_con_state, *new_con_state;
7621 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7622 	int crtc_disable_count = 0;
7623 	bool mode_set_reset_required = false;
7624 
7625 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7626 
7627 	dm_state = dm_atomic_get_new_state(state);
7628 	if (dm_state && dm_state->context) {
7629 		dc_state = dm_state->context;
7630 	} else {
7631 		/* No state changes, retain current state. */
7632 		dc_state_temp = dc_create_state(dm->dc);
7633 		ASSERT(dc_state_temp);
7634 		dc_state = dc_state_temp;
7635 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7636 	}
7637 
7638 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7639 				       new_crtc_state, i) {
7640 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7641 
7642 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7643 
7644 		if (old_crtc_state->active &&
7645 		    (!new_crtc_state->active ||
7646 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7647 			manage_dm_interrupts(adev, acrtc, false);
7648 			dc_stream_release(dm_old_crtc_state->stream);
7649 		}
7650 	}
7651 
7652 	drm_atomic_helper_calc_timestamping_constants(state);
7653 
7654 	/* update changed items */
7655 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7656 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7657 
7658 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7659 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7660 
7661 		DRM_DEBUG_DRIVER(
7662 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7663 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7664 			"connectors_changed:%d\n",
7665 			acrtc->crtc_id,
7666 			new_crtc_state->enable,
7667 			new_crtc_state->active,
7668 			new_crtc_state->planes_changed,
7669 			new_crtc_state->mode_changed,
7670 			new_crtc_state->active_changed,
7671 			new_crtc_state->connectors_changed);
7672 
7673 		/* Copy all transient state flags into dc state */
7674 		if (dm_new_crtc_state->stream) {
7675 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7676 							    dm_new_crtc_state->stream);
7677 		}
7678 
7679 		/* handles headless hotplug case, updating new_state and
7680 		 * aconnector as needed
7681 		 */
7682 
7683 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7684 
7685 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7686 
7687 			if (!dm_new_crtc_state->stream) {
7688 				/*
7689 				 * this could happen because of issues with
7690 				 * userspace notifications delivery.
7691 				 * In this case userspace tries to set mode on
7692 				 * display which is disconnected in fact.
7693 				 * dc_sink is NULL in this case on aconnector.
7694 				 * We expect reset mode will come soon.
7695 				 *
7696 				 * This can also happen when unplug is done
7697 				 * during resume sequence ended
7698 				 *
7699 				 * In this case, we want to pretend we still
7700 				 * have a sink to keep the pipe running so that
7701 				 * hw state is consistent with the sw state
7702 				 */
7703 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7704 						__func__, acrtc->base.base.id);
7705 				continue;
7706 			}
7707 
7708 			if (dm_old_crtc_state->stream)
7709 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7710 
7711 			pm_runtime_get_noresume(dev->dev);
7712 
7713 			acrtc->enabled = true;
7714 			acrtc->hw_mode = new_crtc_state->mode;
7715 			crtc->hwmode = new_crtc_state->mode;
7716 			mode_set_reset_required = true;
7717 		} else if (modereset_required(new_crtc_state)) {
7718 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7719 			/* i.e. reset mode */
7720 			if (dm_old_crtc_state->stream)
7721 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7722 			mode_set_reset_required = true;
7723 		}
7724 	} /* for_each_crtc_in_state() */
7725 
7726 	if (dc_state) {
7727 		/* if there mode set or reset, disable eDP PSR */
7728 		if (mode_set_reset_required)
7729 			amdgpu_dm_psr_disable_all(dm);
7730 
7731 		dm_enable_per_frame_crtc_master_sync(dc_state);
7732 		mutex_lock(&dm->dc_lock);
7733 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7734 		mutex_unlock(&dm->dc_lock);
7735 	}
7736 
7737 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7738 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7739 
7740 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7741 
7742 		if (dm_new_crtc_state->stream != NULL) {
7743 			const struct dc_stream_status *status =
7744 					dc_stream_get_status(dm_new_crtc_state->stream);
7745 
7746 			if (!status)
7747 				status = dc_stream_get_status_from_state(dc_state,
7748 									 dm_new_crtc_state->stream);
7749 			if (!status)
7750 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7751 			else
7752 				acrtc->otg_inst = status->primary_otg_inst;
7753 		}
7754 	}
7755 #ifdef CONFIG_DRM_AMD_DC_HDCP
7756 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7757 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7758 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7759 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7760 
7761 		new_crtc_state = NULL;
7762 
7763 		if (acrtc)
7764 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7765 
7766 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7767 
7768 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7769 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7770 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7771 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7772 			continue;
7773 		}
7774 
7775 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7776 			hdcp_update_display(
7777 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7778 				new_con_state->hdcp_content_type,
7779 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7780 													 : false);
7781 	}
7782 #endif
7783 
7784 	/* Handle connector state changes */
7785 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7786 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7787 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7788 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7789 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7790 		struct dc_stream_update stream_update;
7791 		struct dc_info_packet hdr_packet;
7792 		struct dc_stream_status *status = NULL;
7793 		bool abm_changed, hdr_changed, scaling_changed;
7794 
7795 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7796 		memset(&stream_update, 0, sizeof(stream_update));
7797 
7798 		if (acrtc) {
7799 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7800 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7801 		}
7802 
7803 		/* Skip any modesets/resets */
7804 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7805 			continue;
7806 
7807 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7808 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7809 
7810 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7811 							     dm_old_con_state);
7812 
7813 		abm_changed = dm_new_crtc_state->abm_level !=
7814 			      dm_old_crtc_state->abm_level;
7815 
7816 		hdr_changed =
7817 			is_hdr_metadata_different(old_con_state, new_con_state);
7818 
7819 		if (!scaling_changed && !abm_changed && !hdr_changed)
7820 			continue;
7821 
7822 		stream_update.stream = dm_new_crtc_state->stream;
7823 		if (scaling_changed) {
7824 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7825 					dm_new_con_state, dm_new_crtc_state->stream);
7826 
7827 			stream_update.src = dm_new_crtc_state->stream->src;
7828 			stream_update.dst = dm_new_crtc_state->stream->dst;
7829 		}
7830 
7831 		if (abm_changed) {
7832 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7833 
7834 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7835 		}
7836 
7837 		if (hdr_changed) {
7838 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7839 			stream_update.hdr_static_metadata = &hdr_packet;
7840 		}
7841 
7842 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7843 		WARN_ON(!status);
7844 		WARN_ON(!status->plane_count);
7845 
7846 		/*
7847 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7848 		 * Here we create an empty update on each plane.
7849 		 * To fix this, DC should permit updating only stream properties.
7850 		 */
7851 		for (j = 0; j < status->plane_count; j++)
7852 			dummy_updates[j].surface = status->plane_states[0];
7853 
7854 
7855 		mutex_lock(&dm->dc_lock);
7856 		dc_commit_updates_for_stream(dm->dc,
7857 						     dummy_updates,
7858 						     status->plane_count,
7859 						     dm_new_crtc_state->stream,
7860 						     &stream_update,
7861 						     dc_state);
7862 		mutex_unlock(&dm->dc_lock);
7863 	}
7864 
7865 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7866 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7867 				      new_crtc_state, i) {
7868 		if (old_crtc_state->active && !new_crtc_state->active)
7869 			crtc_disable_count++;
7870 
7871 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7872 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7873 
7874 		/* For freesync config update on crtc state and params for irq */
7875 		update_stream_irq_parameters(dm, dm_new_crtc_state);
7876 
7877 		/* Handle vrr on->off / off->on transitions */
7878 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7879 						dm_new_crtc_state);
7880 	}
7881 
7882 	/**
7883 	 * Enable interrupts for CRTCs that are newly enabled or went through
7884 	 * a modeset. It was intentionally deferred until after the front end
7885 	 * state was modified to wait until the OTG was on and so the IRQ
7886 	 * handlers didn't access stale or invalid state.
7887 	 */
7888 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7889 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7890 
7891 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7892 
7893 		if (new_crtc_state->active &&
7894 		    (!old_crtc_state->active ||
7895 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7896 			dc_stream_retain(dm_new_crtc_state->stream);
7897 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7898 			manage_dm_interrupts(adev, acrtc, true);
7899 
7900 #ifdef CONFIG_DEBUG_FS
7901 			/**
7902 			 * Frontend may have changed so reapply the CRC capture
7903 			 * settings for the stream.
7904 			 */
7905 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7906 
7907 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7908 				amdgpu_dm_crtc_configure_crc_source(
7909 					crtc, dm_new_crtc_state,
7910 					dm_new_crtc_state->crc_src);
7911 			}
7912 #endif
7913 		}
7914 	}
7915 
7916 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7917 		if (new_crtc_state->async_flip)
7918 			wait_for_vblank = false;
7919 
7920 	/* update planes when needed per crtc*/
7921 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7922 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7923 
7924 		if (dm_new_crtc_state->stream)
7925 			amdgpu_dm_commit_planes(state, dc_state, dev,
7926 						dm, crtc, wait_for_vblank);
7927 	}
7928 
7929 	/* Update audio instances for each connector. */
7930 	amdgpu_dm_commit_audio(dev, state);
7931 
7932 	/*
7933 	 * send vblank event on all events not handled in flip and
7934 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7935 	 */
7936 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7937 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7938 
7939 		if (new_crtc_state->event)
7940 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7941 
7942 		new_crtc_state->event = NULL;
7943 	}
7944 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7945 
7946 	/* Signal HW programming completion */
7947 	drm_atomic_helper_commit_hw_done(state);
7948 
7949 	if (wait_for_vblank)
7950 		drm_atomic_helper_wait_for_flip_done(dev, state);
7951 
7952 	drm_atomic_helper_cleanup_planes(dev, state);
7953 
7954 	/*
7955 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7956 	 * so we can put the GPU into runtime suspend if we're not driving any
7957 	 * displays anymore
7958 	 */
7959 	for (i = 0; i < crtc_disable_count; i++)
7960 		pm_runtime_put_autosuspend(dev->dev);
7961 	pm_runtime_mark_last_busy(dev->dev);
7962 
7963 	if (dc_state_temp)
7964 		dc_release_state(dc_state_temp);
7965 }
7966 
7967 
dm_force_atomic_commit(struct drm_connector * connector)7968 static int dm_force_atomic_commit(struct drm_connector *connector)
7969 {
7970 	int ret = 0;
7971 	struct drm_device *ddev = connector->dev;
7972 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7973 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7974 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7975 	struct drm_connector_state *conn_state;
7976 	struct drm_crtc_state *crtc_state;
7977 	struct drm_plane_state *plane_state;
7978 
7979 	if (!state)
7980 		return -ENOMEM;
7981 
7982 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7983 
7984 	/* Construct an atomic state to restore previous display setting */
7985 
7986 	/*
7987 	 * Attach connectors to drm_atomic_state
7988 	 */
7989 	conn_state = drm_atomic_get_connector_state(state, connector);
7990 
7991 	ret = PTR_ERR_OR_ZERO(conn_state);
7992 	if (ret)
7993 		goto out;
7994 
7995 	/* Attach crtc to drm_atomic_state*/
7996 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7997 
7998 	ret = PTR_ERR_OR_ZERO(crtc_state);
7999 	if (ret)
8000 		goto out;
8001 
8002 	/* force a restore */
8003 	crtc_state->mode_changed = true;
8004 
8005 	/* Attach plane to drm_atomic_state */
8006 	plane_state = drm_atomic_get_plane_state(state, plane);
8007 
8008 	ret = PTR_ERR_OR_ZERO(plane_state);
8009 	if (ret)
8010 		goto out;
8011 
8012 	/* Call commit internally with the state we just constructed */
8013 	ret = drm_atomic_commit(state);
8014 
8015 out:
8016 	drm_atomic_state_put(state);
8017 	if (ret)
8018 		DRM_ERROR("Restoring old state failed with %i\n", ret);
8019 
8020 	return ret;
8021 }
8022 
8023 /*
8024  * This function handles all cases when set mode does not come upon hotplug.
8025  * This includes when a display is unplugged then plugged back into the
8026  * same port and when running without usermode desktop manager supprot
8027  */
dm_restore_drm_connector_state(struct drm_device * dev,struct drm_connector * connector)8028 void dm_restore_drm_connector_state(struct drm_device *dev,
8029 				    struct drm_connector *connector)
8030 {
8031 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8032 	struct amdgpu_crtc *disconnected_acrtc;
8033 	struct dm_crtc_state *acrtc_state;
8034 
8035 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8036 		return;
8037 
8038 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8039 	if (!disconnected_acrtc)
8040 		return;
8041 
8042 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8043 	if (!acrtc_state->stream)
8044 		return;
8045 
8046 	/*
8047 	 * If the previous sink is not released and different from the current,
8048 	 * we deduce we are in a state where we can not rely on usermode call
8049 	 * to turn on the display, so we do it here
8050 	 */
8051 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8052 		dm_force_atomic_commit(&aconnector->base);
8053 }
8054 
8055 /*
8056  * Grabs all modesetting locks to serialize against any blocking commits,
8057  * Waits for completion of all non blocking commits.
8058  */
do_aquire_global_lock(struct drm_device * dev,struct drm_atomic_state * state)8059 static int do_aquire_global_lock(struct drm_device *dev,
8060 				 struct drm_atomic_state *state)
8061 {
8062 	struct drm_crtc *crtc;
8063 	struct drm_crtc_commit *commit;
8064 	long ret;
8065 
8066 	/*
8067 	 * Adding all modeset locks to aquire_ctx will
8068 	 * ensure that when the framework release it the
8069 	 * extra locks we are locking here will get released to
8070 	 */
8071 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8072 	if (ret)
8073 		return ret;
8074 
8075 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8076 		spin_lock(&crtc->commit_lock);
8077 		commit = list_first_entry_or_null(&crtc->commit_list,
8078 				struct drm_crtc_commit, commit_entry);
8079 		if (commit)
8080 			drm_crtc_commit_get(commit);
8081 		spin_unlock(&crtc->commit_lock);
8082 
8083 		if (!commit)
8084 			continue;
8085 
8086 		/*
8087 		 * Make sure all pending HW programming completed and
8088 		 * page flips done
8089 		 */
8090 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8091 
8092 		if (ret > 0)
8093 			ret = wait_for_completion_interruptible_timeout(
8094 					&commit->flip_done, 10*HZ);
8095 
8096 		if (ret == 0)
8097 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8098 				  "timed out\n", crtc->base.id, crtc->name);
8099 
8100 		drm_crtc_commit_put(commit);
8101 	}
8102 
8103 	return ret < 0 ? ret : 0;
8104 }
8105 
get_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state,struct dm_connector_state * new_con_state)8106 static void get_freesync_config_for_crtc(
8107 	struct dm_crtc_state *new_crtc_state,
8108 	struct dm_connector_state *new_con_state)
8109 {
8110 	struct mod_freesync_config config = {0};
8111 	struct amdgpu_dm_connector *aconnector =
8112 			to_amdgpu_dm_connector(new_con_state->base.connector);
8113 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8114 	int vrefresh = drm_mode_vrefresh(mode);
8115 
8116 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8117 					vrefresh >= aconnector->min_vfreq &&
8118 					vrefresh <= aconnector->max_vfreq;
8119 
8120 	if (new_crtc_state->vrr_supported) {
8121 		new_crtc_state->stream->ignore_msa_timing_param = true;
8122 		config.state = new_crtc_state->base.vrr_enabled ?
8123 				VRR_STATE_ACTIVE_VARIABLE :
8124 				VRR_STATE_INACTIVE;
8125 		config.min_refresh_in_uhz =
8126 				aconnector->min_vfreq * 1000000;
8127 		config.max_refresh_in_uhz =
8128 				aconnector->max_vfreq * 1000000;
8129 		config.vsif_supported = true;
8130 		config.btr = true;
8131 	}
8132 
8133 	new_crtc_state->freesync_config = config;
8134 }
8135 
reset_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state)8136 static void reset_freesync_config_for_crtc(
8137 	struct dm_crtc_state *new_crtc_state)
8138 {
8139 	new_crtc_state->vrr_supported = false;
8140 
8141 	memset(&new_crtc_state->vrr_infopacket, 0,
8142 	       sizeof(new_crtc_state->vrr_infopacket));
8143 }
8144 
dm_update_crtc_state(struct amdgpu_display_manager * dm,struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_crtc_state * old_crtc_state,struct drm_crtc_state * new_crtc_state,bool enable,bool * lock_and_validation_needed)8145 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8146 				struct drm_atomic_state *state,
8147 				struct drm_crtc *crtc,
8148 				struct drm_crtc_state *old_crtc_state,
8149 				struct drm_crtc_state *new_crtc_state,
8150 				bool enable,
8151 				bool *lock_and_validation_needed)
8152 {
8153 	struct dm_atomic_state *dm_state = NULL;
8154 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8155 	struct dc_stream_state *new_stream;
8156 	int ret = 0;
8157 
8158 	/*
8159 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8160 	 * update changed items
8161 	 */
8162 	struct amdgpu_crtc *acrtc = NULL;
8163 	struct amdgpu_dm_connector *aconnector = NULL;
8164 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8165 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8166 
8167 	new_stream = NULL;
8168 
8169 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8170 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8171 	acrtc = to_amdgpu_crtc(crtc);
8172 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8173 
8174 	/* TODO This hack should go away */
8175 	if (aconnector && enable) {
8176 		/* Make sure fake sink is created in plug-in scenario */
8177 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8178 							    &aconnector->base);
8179 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8180 							    &aconnector->base);
8181 
8182 		if (IS_ERR(drm_new_conn_state)) {
8183 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8184 			goto fail;
8185 		}
8186 
8187 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8188 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8189 
8190 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8191 			goto skip_modeset;
8192 
8193 		new_stream = create_validate_stream_for_sink(aconnector,
8194 							     &new_crtc_state->mode,
8195 							     dm_new_conn_state,
8196 							     dm_old_crtc_state->stream);
8197 
8198 		/*
8199 		 * we can have no stream on ACTION_SET if a display
8200 		 * was disconnected during S3, in this case it is not an
8201 		 * error, the OS will be updated after detection, and
8202 		 * will do the right thing on next atomic commit
8203 		 */
8204 
8205 		if (!new_stream) {
8206 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8207 					__func__, acrtc->base.base.id);
8208 			ret = -ENOMEM;
8209 			goto fail;
8210 		}
8211 
8212 		/*
8213 		 * TODO: Check VSDB bits to decide whether this should
8214 		 * be enabled or not.
8215 		 */
8216 		new_stream->triggered_crtc_reset.enabled =
8217 			dm->force_timing_sync;
8218 
8219 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8220 
8221 		ret = fill_hdr_info_packet(drm_new_conn_state,
8222 					   &new_stream->hdr_static_metadata);
8223 		if (ret)
8224 			goto fail;
8225 
8226 		/*
8227 		 * If we already removed the old stream from the context
8228 		 * (and set the new stream to NULL) then we can't reuse
8229 		 * the old stream even if the stream and scaling are unchanged.
8230 		 * We'll hit the BUG_ON and black screen.
8231 		 *
8232 		 * TODO: Refactor this function to allow this check to work
8233 		 * in all conditions.
8234 		 */
8235 		if (dm_new_crtc_state->stream &&
8236 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8237 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8238 			new_crtc_state->mode_changed = false;
8239 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8240 					 new_crtc_state->mode_changed);
8241 		}
8242 	}
8243 
8244 	/* mode_changed flag may get updated above, need to check again */
8245 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8246 		goto skip_modeset;
8247 
8248 	DRM_DEBUG_DRIVER(
8249 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8250 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8251 		"connectors_changed:%d\n",
8252 		acrtc->crtc_id,
8253 		new_crtc_state->enable,
8254 		new_crtc_state->active,
8255 		new_crtc_state->planes_changed,
8256 		new_crtc_state->mode_changed,
8257 		new_crtc_state->active_changed,
8258 		new_crtc_state->connectors_changed);
8259 
8260 	/* Remove stream for any changed/disabled CRTC */
8261 	if (!enable) {
8262 
8263 		if (!dm_old_crtc_state->stream)
8264 			goto skip_modeset;
8265 
8266 		ret = dm_atomic_get_state(state, &dm_state);
8267 		if (ret)
8268 			goto fail;
8269 
8270 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8271 				crtc->base.id);
8272 
8273 		/* i.e. reset mode */
8274 		if (dc_remove_stream_from_ctx(
8275 				dm->dc,
8276 				dm_state->context,
8277 				dm_old_crtc_state->stream) != DC_OK) {
8278 			ret = -EINVAL;
8279 			goto fail;
8280 		}
8281 
8282 		dc_stream_release(dm_old_crtc_state->stream);
8283 		dm_new_crtc_state->stream = NULL;
8284 
8285 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8286 
8287 		*lock_and_validation_needed = true;
8288 
8289 	} else {/* Add stream for any updated/enabled CRTC */
8290 		/*
8291 		 * Quick fix to prevent NULL pointer on new_stream when
8292 		 * added MST connectors not found in existing crtc_state in the chained mode
8293 		 * TODO: need to dig out the root cause of that
8294 		 */
8295 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8296 			goto skip_modeset;
8297 
8298 		if (modereset_required(new_crtc_state))
8299 			goto skip_modeset;
8300 
8301 		if (modeset_required(new_crtc_state, new_stream,
8302 				     dm_old_crtc_state->stream)) {
8303 
8304 			WARN_ON(dm_new_crtc_state->stream);
8305 
8306 			ret = dm_atomic_get_state(state, &dm_state);
8307 			if (ret)
8308 				goto fail;
8309 
8310 			dm_new_crtc_state->stream = new_stream;
8311 
8312 			dc_stream_retain(new_stream);
8313 
8314 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8315 						crtc->base.id);
8316 
8317 			if (dc_add_stream_to_ctx(
8318 					dm->dc,
8319 					dm_state->context,
8320 					dm_new_crtc_state->stream) != DC_OK) {
8321 				ret = -EINVAL;
8322 				goto fail;
8323 			}
8324 
8325 			*lock_and_validation_needed = true;
8326 		}
8327 	}
8328 
8329 skip_modeset:
8330 	/* Release extra reference */
8331 	if (new_stream)
8332 		 dc_stream_release(new_stream);
8333 
8334 	/*
8335 	 * We want to do dc stream updates that do not require a
8336 	 * full modeset below.
8337 	 */
8338 	if (!(enable && aconnector && new_crtc_state->active))
8339 		return 0;
8340 	/*
8341 	 * Given above conditions, the dc state cannot be NULL because:
8342 	 * 1. We're in the process of enabling CRTCs (just been added
8343 	 *    to the dc context, or already is on the context)
8344 	 * 2. Has a valid connector attached, and
8345 	 * 3. Is currently active and enabled.
8346 	 * => The dc stream state currently exists.
8347 	 */
8348 	BUG_ON(dm_new_crtc_state->stream == NULL);
8349 
8350 	/* Scaling or underscan settings */
8351 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
8352 				drm_atomic_crtc_needs_modeset(new_crtc_state))
8353 		update_stream_scaling_settings(
8354 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8355 
8356 	/* ABM settings */
8357 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8358 
8359 	/*
8360 	 * Color management settings. We also update color properties
8361 	 * when a modeset is needed, to ensure it gets reprogrammed.
8362 	 */
8363 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8364 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8365 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8366 		if (ret)
8367 			goto fail;
8368 	}
8369 
8370 	/* Update Freesync settings. */
8371 	get_freesync_config_for_crtc(dm_new_crtc_state,
8372 				     dm_new_conn_state);
8373 
8374 	return ret;
8375 
8376 fail:
8377 	if (new_stream)
8378 		dc_stream_release(new_stream);
8379 	return ret;
8380 }
8381 
should_reset_plane(struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state)8382 static bool should_reset_plane(struct drm_atomic_state *state,
8383 			       struct drm_plane *plane,
8384 			       struct drm_plane_state *old_plane_state,
8385 			       struct drm_plane_state *new_plane_state)
8386 {
8387 	struct drm_plane *other;
8388 	struct drm_plane_state *old_other_state, *new_other_state;
8389 	struct drm_crtc_state *new_crtc_state;
8390 	int i;
8391 
8392 	/*
8393 	 * TODO: Remove this hack once the checks below are sufficient
8394 	 * enough to determine when we need to reset all the planes on
8395 	 * the stream.
8396 	 */
8397 	if (state->allow_modeset)
8398 		return true;
8399 
8400 	/* Exit early if we know that we're adding or removing the plane. */
8401 	if (old_plane_state->crtc != new_plane_state->crtc)
8402 		return true;
8403 
8404 	/* old crtc == new_crtc == NULL, plane not in context. */
8405 	if (!new_plane_state->crtc)
8406 		return false;
8407 
8408 	new_crtc_state =
8409 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8410 
8411 	if (!new_crtc_state)
8412 		return true;
8413 
8414 	/* CRTC Degamma changes currently require us to recreate planes. */
8415 	if (new_crtc_state->color_mgmt_changed)
8416 		return true;
8417 
8418 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8419 		return true;
8420 
8421 	/*
8422 	 * If there are any new primary or overlay planes being added or
8423 	 * removed then the z-order can potentially change. To ensure
8424 	 * correct z-order and pipe acquisition the current DC architecture
8425 	 * requires us to remove and recreate all existing planes.
8426 	 *
8427 	 * TODO: Come up with a more elegant solution for this.
8428 	 */
8429 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8430 		struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8431 
8432 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8433 			continue;
8434 
8435 		if (old_other_state->crtc != new_plane_state->crtc &&
8436 		    new_other_state->crtc != new_plane_state->crtc)
8437 			continue;
8438 
8439 		if (old_other_state->crtc != new_other_state->crtc)
8440 			return true;
8441 
8442 		/* Src/dst size and scaling updates. */
8443 		if (old_other_state->src_w != new_other_state->src_w ||
8444 		    old_other_state->src_h != new_other_state->src_h ||
8445 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8446 		    old_other_state->crtc_h != new_other_state->crtc_h)
8447 			return true;
8448 
8449 		/* Rotation / mirroring updates. */
8450 		if (old_other_state->rotation != new_other_state->rotation)
8451 			return true;
8452 
8453 		/* Blending updates. */
8454 		if (old_other_state->pixel_blend_mode !=
8455 		    new_other_state->pixel_blend_mode)
8456 			return true;
8457 
8458 		/* Alpha updates. */
8459 		if (old_other_state->alpha != new_other_state->alpha)
8460 			return true;
8461 
8462 		/* Colorspace changes. */
8463 		if (old_other_state->color_range != new_other_state->color_range ||
8464 		    old_other_state->color_encoding != new_other_state->color_encoding)
8465 			return true;
8466 
8467 		/* Framebuffer checks fall at the end. */
8468 		if (!old_other_state->fb || !new_other_state->fb)
8469 			continue;
8470 
8471 		/* Pixel format changes can require bandwidth updates. */
8472 		if (old_other_state->fb->format != new_other_state->fb->format)
8473 			return true;
8474 
8475 		old_dm_plane_state = to_dm_plane_state(old_other_state);
8476 		new_dm_plane_state = to_dm_plane_state(new_other_state);
8477 
8478 		/* Tiling and DCC changes also require bandwidth updates. */
8479 		if (old_dm_plane_state->tiling_flags !=
8480 		    new_dm_plane_state->tiling_flags)
8481 			return true;
8482 	}
8483 
8484 	return false;
8485 }
8486 
dm_update_plane_state(struct dc * dc,struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state,bool enable,bool * lock_and_validation_needed)8487 static int dm_update_plane_state(struct dc *dc,
8488 				 struct drm_atomic_state *state,
8489 				 struct drm_plane *plane,
8490 				 struct drm_plane_state *old_plane_state,
8491 				 struct drm_plane_state *new_plane_state,
8492 				 bool enable,
8493 				 bool *lock_and_validation_needed)
8494 {
8495 
8496 	struct dm_atomic_state *dm_state = NULL;
8497 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8498 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8499 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8500 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8501 	struct amdgpu_crtc *new_acrtc;
8502 	bool needs_reset;
8503 	int ret = 0;
8504 
8505 
8506 	new_plane_crtc = new_plane_state->crtc;
8507 	old_plane_crtc = old_plane_state->crtc;
8508 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8509 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8510 
8511 	/*TODO Implement better atomic check for cursor plane */
8512 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8513 		if (!enable || !new_plane_crtc ||
8514 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8515 			return 0;
8516 
8517 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8518 
8519 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8520 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8521 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8522 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8523 			return -EINVAL;
8524 		}
8525 
8526 		return 0;
8527 	}
8528 
8529 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8530 					 new_plane_state);
8531 
8532 	/* Remove any changed/removed planes */
8533 	if (!enable) {
8534 		if (!needs_reset)
8535 			return 0;
8536 
8537 		if (!old_plane_crtc)
8538 			return 0;
8539 
8540 		old_crtc_state = drm_atomic_get_old_crtc_state(
8541 				state, old_plane_crtc);
8542 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8543 
8544 		if (!dm_old_crtc_state->stream)
8545 			return 0;
8546 
8547 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8548 				plane->base.id, old_plane_crtc->base.id);
8549 
8550 		ret = dm_atomic_get_state(state, &dm_state);
8551 		if (ret)
8552 			return ret;
8553 
8554 		if (!dc_remove_plane_from_context(
8555 				dc,
8556 				dm_old_crtc_state->stream,
8557 				dm_old_plane_state->dc_state,
8558 				dm_state->context)) {
8559 
8560 			return -EINVAL;
8561 		}
8562 
8563 
8564 		dc_plane_state_release(dm_old_plane_state->dc_state);
8565 		dm_new_plane_state->dc_state = NULL;
8566 
8567 		*lock_and_validation_needed = true;
8568 
8569 	} else { /* Add new planes */
8570 		struct dc_plane_state *dc_new_plane_state;
8571 
8572 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8573 			return 0;
8574 
8575 		if (!new_plane_crtc)
8576 			return 0;
8577 
8578 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8579 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8580 
8581 		if (!dm_new_crtc_state->stream)
8582 			return 0;
8583 
8584 		if (!needs_reset)
8585 			return 0;
8586 
8587 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8588 		if (ret)
8589 			return ret;
8590 
8591 		WARN_ON(dm_new_plane_state->dc_state);
8592 
8593 		dc_new_plane_state = dc_create_plane_state(dc);
8594 		if (!dc_new_plane_state)
8595 			return -ENOMEM;
8596 
8597 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8598 				plane->base.id, new_plane_crtc->base.id);
8599 
8600 		ret = fill_dc_plane_attributes(
8601 			drm_to_adev(new_plane_crtc->dev),
8602 			dc_new_plane_state,
8603 			new_plane_state,
8604 			new_crtc_state);
8605 		if (ret) {
8606 			dc_plane_state_release(dc_new_plane_state);
8607 			return ret;
8608 		}
8609 
8610 		ret = dm_atomic_get_state(state, &dm_state);
8611 		if (ret) {
8612 			dc_plane_state_release(dc_new_plane_state);
8613 			return ret;
8614 		}
8615 
8616 		/*
8617 		 * Any atomic check errors that occur after this will
8618 		 * not need a release. The plane state will be attached
8619 		 * to the stream, and therefore part of the atomic
8620 		 * state. It'll be released when the atomic state is
8621 		 * cleaned.
8622 		 */
8623 		if (!dc_add_plane_to_context(
8624 				dc,
8625 				dm_new_crtc_state->stream,
8626 				dc_new_plane_state,
8627 				dm_state->context)) {
8628 
8629 			dc_plane_state_release(dc_new_plane_state);
8630 			return -EINVAL;
8631 		}
8632 
8633 		dm_new_plane_state->dc_state = dc_new_plane_state;
8634 
8635 		/* Tell DC to do a full surface update every time there
8636 		 * is a plane change. Inefficient, but works for now.
8637 		 */
8638 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8639 
8640 		*lock_and_validation_needed = true;
8641 	}
8642 
8643 
8644 	return ret;
8645 }
8646 
8647 #if defined(CONFIG_DRM_AMD_DC_DCN)
add_affected_mst_dsc_crtcs(struct drm_atomic_state * state,struct drm_crtc * crtc)8648 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8649 {
8650 	struct drm_connector *connector;
8651 	struct drm_connector_state *conn_state, *old_conn_state;
8652 	struct amdgpu_dm_connector *aconnector = NULL;
8653 	int i;
8654 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
8655 		if (!conn_state->crtc)
8656 			conn_state = old_conn_state;
8657 
8658 		if (conn_state->crtc != crtc)
8659 			continue;
8660 
8661 		aconnector = to_amdgpu_dm_connector(connector);
8662 		if (!aconnector->port || !aconnector->mst_port)
8663 			aconnector = NULL;
8664 		else
8665 			break;
8666 	}
8667 
8668 	if (!aconnector)
8669 		return 0;
8670 
8671 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8672 }
8673 #endif
8674 
validate_overlay(struct drm_atomic_state * state)8675 static int validate_overlay(struct drm_atomic_state *state)
8676 {
8677 	int i;
8678 	struct drm_plane *plane;
8679 	struct drm_plane_state *old_plane_state, *new_plane_state;
8680 	struct drm_plane_state *primary_state, *overlay_state = NULL;
8681 
8682 	/* Check if primary plane is contained inside overlay */
8683 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8684 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
8685 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8686 				return 0;
8687 
8688 			overlay_state = new_plane_state;
8689 			continue;
8690 		}
8691 	}
8692 
8693 	/* check if we're making changes to the overlay plane */
8694 	if (!overlay_state)
8695 		return 0;
8696 
8697 	/* check if overlay plane is enabled */
8698 	if (!overlay_state->crtc)
8699 		return 0;
8700 
8701 	/* find the primary plane for the CRTC that the overlay is enabled on */
8702 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
8703 	if (IS_ERR(primary_state))
8704 		return PTR_ERR(primary_state);
8705 
8706 	/* check if primary plane is enabled */
8707 	if (!primary_state->crtc)
8708 		return 0;
8709 
8710 	/* Perform the bounds check to ensure the overlay plane covers the primary */
8711 	if (primary_state->crtc_x < overlay_state->crtc_x ||
8712 	    primary_state->crtc_y < overlay_state->crtc_y ||
8713 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
8714 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
8715 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
8716 		return -EINVAL;
8717 	}
8718 
8719 	return 0;
8720 }
8721 
8722 /**
8723  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8724  * @dev: The DRM device
8725  * @state: The atomic state to commit
8726  *
8727  * Validate that the given atomic state is programmable by DC into hardware.
8728  * This involves constructing a &struct dc_state reflecting the new hardware
8729  * state we wish to commit, then querying DC to see if it is programmable. It's
8730  * important not to modify the existing DC state. Otherwise, atomic_check
8731  * may unexpectedly commit hardware changes.
8732  *
8733  * When validating the DC state, it's important that the right locks are
8734  * acquired. For full updates case which removes/adds/updates streams on one
8735  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8736  * that any such full update commit will wait for completion of any outstanding
8737  * flip using DRMs synchronization events.
8738  *
8739  * Note that DM adds the affected connectors for all CRTCs in state, when that
8740  * might not seem necessary. This is because DC stream creation requires the
8741  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8742  * be possible but non-trivial - a possible TODO item.
8743  *
8744  * Return: -Error code if validation failed.
8745  */
amdgpu_dm_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)8746 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8747 				  struct drm_atomic_state *state)
8748 {
8749 	struct amdgpu_device *adev = drm_to_adev(dev);
8750 	struct dm_atomic_state *dm_state = NULL;
8751 	struct dc *dc = adev->dm.dc;
8752 	struct drm_connector *connector;
8753 	struct drm_connector_state *old_con_state, *new_con_state;
8754 	struct drm_crtc *crtc;
8755 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8756 	struct drm_plane *plane;
8757 	struct drm_plane_state *old_plane_state, *new_plane_state;
8758 	enum dc_status status;
8759 	int ret, i;
8760 	bool lock_and_validation_needed = false;
8761 
8762 	amdgpu_check_debugfs_connector_property_change(adev, state);
8763 
8764 	ret = drm_atomic_helper_check_modeset(dev, state);
8765 	if (ret)
8766 		goto fail;
8767 
8768 	/* Check connector changes */
8769 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8770 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8771 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8772 
8773 		/* Skip connectors that are disabled or part of modeset already. */
8774 		if (!old_con_state->crtc && !new_con_state->crtc)
8775 			continue;
8776 
8777 		if (!new_con_state->crtc)
8778 			continue;
8779 
8780 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8781 		if (IS_ERR(new_crtc_state)) {
8782 			ret = PTR_ERR(new_crtc_state);
8783 			goto fail;
8784 		}
8785 
8786 		if (dm_old_con_state->abm_level !=
8787 		    dm_new_con_state->abm_level)
8788 			new_crtc_state->connectors_changed = true;
8789 	}
8790 
8791 #if defined(CONFIG_DRM_AMD_DC_DCN)
8792 	if (dc_resource_is_dsc_encoding_supported(dc)) {
8793 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8794 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8795 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8796 				if (ret)
8797 					goto fail;
8798 			}
8799 		}
8800 	}
8801 #endif
8802 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8803 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8804 		    !new_crtc_state->color_mgmt_changed &&
8805 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8806 			continue;
8807 
8808 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
8809 		if (ret)
8810 			goto fail;
8811 
8812 		if (!new_crtc_state->enable)
8813 			continue;
8814 
8815 		ret = drm_atomic_add_affected_connectors(state, crtc);
8816 		if (ret)
8817 			return ret;
8818 
8819 		ret = drm_atomic_add_affected_planes(state, crtc);
8820 		if (ret)
8821 			goto fail;
8822 	}
8823 
8824 	/*
8825 	 * Add all primary and overlay planes on the CRTC to the state
8826 	 * whenever a plane is enabled to maintain correct z-ordering
8827 	 * and to enable fast surface updates.
8828 	 */
8829 	drm_for_each_crtc(crtc, dev) {
8830 		bool modified = false;
8831 
8832 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8833 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8834 				continue;
8835 
8836 			if (new_plane_state->crtc == crtc ||
8837 			    old_plane_state->crtc == crtc) {
8838 				modified = true;
8839 				break;
8840 			}
8841 		}
8842 
8843 		if (!modified)
8844 			continue;
8845 
8846 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8847 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8848 				continue;
8849 
8850 			new_plane_state =
8851 				drm_atomic_get_plane_state(state, plane);
8852 
8853 			if (IS_ERR(new_plane_state)) {
8854 				ret = PTR_ERR(new_plane_state);
8855 				goto fail;
8856 			}
8857 		}
8858 	}
8859 
8860 	/* Prepass for updating tiling flags on new planes. */
8861 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8862 		struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8863 		struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8864 
8865 		ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8866 				  &new_dm_plane_state->tmz_surface);
8867 		if (ret)
8868 			goto fail;
8869 	}
8870 
8871 	/* Remove exiting planes if they are modified */
8872 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8873 		ret = dm_update_plane_state(dc, state, plane,
8874 					    old_plane_state,
8875 					    new_plane_state,
8876 					    false,
8877 					    &lock_and_validation_needed);
8878 		if (ret)
8879 			goto fail;
8880 	}
8881 
8882 	/* Disable all crtcs which require disable */
8883 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8884 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8885 					   old_crtc_state,
8886 					   new_crtc_state,
8887 					   false,
8888 					   &lock_and_validation_needed);
8889 		if (ret)
8890 			goto fail;
8891 	}
8892 
8893 	/* Enable all crtcs which require enable */
8894 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8895 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8896 					   old_crtc_state,
8897 					   new_crtc_state,
8898 					   true,
8899 					   &lock_and_validation_needed);
8900 		if (ret)
8901 			goto fail;
8902 	}
8903 
8904 	ret = validate_overlay(state);
8905 	if (ret)
8906 		goto fail;
8907 
8908 	/* Add new/modified planes */
8909 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8910 		ret = dm_update_plane_state(dc, state, plane,
8911 					    old_plane_state,
8912 					    new_plane_state,
8913 					    true,
8914 					    &lock_and_validation_needed);
8915 		if (ret)
8916 			goto fail;
8917 	}
8918 
8919 	/* Run this here since we want to validate the streams we created */
8920 	ret = drm_atomic_helper_check_planes(dev, state);
8921 	if (ret)
8922 		goto fail;
8923 
8924 	if (state->legacy_cursor_update) {
8925 		/*
8926 		 * This is a fast cursor update coming from the plane update
8927 		 * helper, check if it can be done asynchronously for better
8928 		 * performance.
8929 		 */
8930 		state->async_update =
8931 			!drm_atomic_helper_async_check(dev, state);
8932 
8933 		/*
8934 		 * Skip the remaining global validation if this is an async
8935 		 * update. Cursor updates can be done without affecting
8936 		 * state or bandwidth calcs and this avoids the performance
8937 		 * penalty of locking the private state object and
8938 		 * allocating a new dc_state.
8939 		 */
8940 		if (state->async_update)
8941 			return 0;
8942 	}
8943 
8944 	/* Check scaling and underscan changes*/
8945 	/* TODO Removed scaling changes validation due to inability to commit
8946 	 * new stream into context w\o causing full reset. Need to
8947 	 * decide how to handle.
8948 	 */
8949 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8950 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8951 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8952 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8953 
8954 		/* Skip any modesets/resets */
8955 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8956 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8957 			continue;
8958 
8959 		/* Skip any thing not scale or underscan changes */
8960 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8961 			continue;
8962 
8963 		lock_and_validation_needed = true;
8964 	}
8965 
8966 	/**
8967 	 * Streams and planes are reset when there are changes that affect
8968 	 * bandwidth. Anything that affects bandwidth needs to go through
8969 	 * DC global validation to ensure that the configuration can be applied
8970 	 * to hardware.
8971 	 *
8972 	 * We have to currently stall out here in atomic_check for outstanding
8973 	 * commits to finish in this case because our IRQ handlers reference
8974 	 * DRM state directly - we can end up disabling interrupts too early
8975 	 * if we don't.
8976 	 *
8977 	 * TODO: Remove this stall and drop DM state private objects.
8978 	 */
8979 	if (lock_and_validation_needed) {
8980 		ret = dm_atomic_get_state(state, &dm_state);
8981 		if (ret)
8982 			goto fail;
8983 
8984 		ret = do_aquire_global_lock(dev, state);
8985 		if (ret)
8986 			goto fail;
8987 
8988 #if defined(CONFIG_DRM_AMD_DC_DCN)
8989 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8990 			goto fail;
8991 
8992 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8993 		if (ret)
8994 			goto fail;
8995 #endif
8996 
8997 		/*
8998 		 * Perform validation of MST topology in the state:
8999 		 * We need to perform MST atomic check before calling
9000 		 * dc_validate_global_state(), or there is a chance
9001 		 * to get stuck in an infinite loop and hang eventually.
9002 		 */
9003 		ret = drm_dp_mst_atomic_check(state);
9004 		if (ret)
9005 			goto fail;
9006 		status = dc_validate_global_state(dc, dm_state->context, false);
9007 		if (status != DC_OK) {
9008 			drm_dbg_atomic(dev,
9009 				       "DC global validation failure: %s (%d)",
9010 				       dc_status_to_str(status), status);
9011 			ret = -EINVAL;
9012 			goto fail;
9013 		}
9014 	} else {
9015 		/*
9016 		 * The commit is a fast update. Fast updates shouldn't change
9017 		 * the DC context, affect global validation, and can have their
9018 		 * commit work done in parallel with other commits not touching
9019 		 * the same resource. If we have a new DC context as part of
9020 		 * the DM atomic state from validation we need to free it and
9021 		 * retain the existing one instead.
9022 		 *
9023 		 * Furthermore, since the DM atomic state only contains the DC
9024 		 * context and can safely be annulled, we can free the state
9025 		 * and clear the associated private object now to free
9026 		 * some memory and avoid a possible use-after-free later.
9027 		 */
9028 
9029 		for (i = 0; i < state->num_private_objs; i++) {
9030 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9031 
9032 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9033 				int j = state->num_private_objs-1;
9034 
9035 				dm_atomic_destroy_state(obj,
9036 						state->private_objs[i].state);
9037 
9038 				/* If i is not at the end of the array then the
9039 				 * last element needs to be moved to where i was
9040 				 * before the array can safely be truncated.
9041 				 */
9042 				if (i != j)
9043 					state->private_objs[i] =
9044 						state->private_objs[j];
9045 
9046 				state->private_objs[j].ptr = NULL;
9047 				state->private_objs[j].state = NULL;
9048 				state->private_objs[j].old_state = NULL;
9049 				state->private_objs[j].new_state = NULL;
9050 
9051 				state->num_private_objs = j;
9052 				break;
9053 			}
9054 		}
9055 	}
9056 
9057 	/* Store the overall update type for use later in atomic check. */
9058 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9059 		struct dm_crtc_state *dm_new_crtc_state =
9060 			to_dm_crtc_state(new_crtc_state);
9061 
9062 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9063 							 UPDATE_TYPE_FULL :
9064 							 UPDATE_TYPE_FAST;
9065 	}
9066 
9067 	/* Must be success */
9068 	WARN_ON(ret);
9069 	return ret;
9070 
9071 fail:
9072 	if (ret == -EDEADLK)
9073 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9074 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9075 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9076 	else
9077 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9078 
9079 	return ret;
9080 }
9081 
is_dp_capable_without_timing_msa(struct dc * dc,struct amdgpu_dm_connector * amdgpu_dm_connector)9082 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9083 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9084 {
9085 	uint8_t dpcd_data;
9086 	bool capable = false;
9087 
9088 	if (amdgpu_dm_connector->dc_link &&
9089 		dm_helpers_dp_read_dpcd(
9090 				NULL,
9091 				amdgpu_dm_connector->dc_link,
9092 				DP_DOWN_STREAM_PORT_COUNT,
9093 				&dpcd_data,
9094 				sizeof(dpcd_data))) {
9095 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9096 	}
9097 
9098 	return capable;
9099 }
amdgpu_dm_update_freesync_caps(struct drm_connector * connector,struct edid * edid)9100 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9101 					struct edid *edid)
9102 {
9103 	int i;
9104 	bool edid_check_required;
9105 	struct detailed_timing *timing;
9106 	struct detailed_non_pixel *data;
9107 	struct detailed_data_monitor_range *range;
9108 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9109 			to_amdgpu_dm_connector(connector);
9110 	struct dm_connector_state *dm_con_state = NULL;
9111 
9112 	struct drm_device *dev = connector->dev;
9113 	struct amdgpu_device *adev = drm_to_adev(dev);
9114 	bool freesync_capable = false;
9115 
9116 	if (!connector->state) {
9117 		DRM_ERROR("%s - Connector has no state", __func__);
9118 		goto update;
9119 	}
9120 
9121 	if (!edid) {
9122 		dm_con_state = to_dm_connector_state(connector->state);
9123 
9124 		amdgpu_dm_connector->min_vfreq = 0;
9125 		amdgpu_dm_connector->max_vfreq = 0;
9126 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9127 
9128 		goto update;
9129 	}
9130 
9131 	dm_con_state = to_dm_connector_state(connector->state);
9132 
9133 	edid_check_required = false;
9134 	if (!amdgpu_dm_connector->dc_sink) {
9135 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9136 		goto update;
9137 	}
9138 	if (!adev->dm.freesync_module)
9139 		goto update;
9140 	/*
9141 	 * if edid non zero restrict freesync only for dp and edp
9142 	 */
9143 	if (edid) {
9144 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9145 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9146 			edid_check_required = is_dp_capable_without_timing_msa(
9147 						adev->dm.dc,
9148 						amdgpu_dm_connector);
9149 		}
9150 	}
9151 	if (edid_check_required == true && (edid->version > 1 ||
9152 	   (edid->version == 1 && edid->revision > 1))) {
9153 		for (i = 0; i < 4; i++) {
9154 
9155 			timing	= &edid->detailed_timings[i];
9156 			data	= &timing->data.other_data;
9157 			range	= &data->data.range;
9158 			/*
9159 			 * Check if monitor has continuous frequency mode
9160 			 */
9161 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
9162 				continue;
9163 			/*
9164 			 * Check for flag range limits only. If flag == 1 then
9165 			 * no additional timing information provided.
9166 			 * Default GTF, GTF Secondary curve and CVT are not
9167 			 * supported
9168 			 */
9169 			if (range->flags != 1)
9170 				continue;
9171 
9172 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9173 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9174 			amdgpu_dm_connector->pixel_clock_mhz =
9175 				range->pixel_clock_mhz * 10;
9176 			break;
9177 		}
9178 
9179 		if (amdgpu_dm_connector->max_vfreq -
9180 		    amdgpu_dm_connector->min_vfreq > 10) {
9181 
9182 			freesync_capable = true;
9183 		}
9184 	}
9185 
9186 update:
9187 	if (dm_con_state)
9188 		dm_con_state->freesync_capable = freesync_capable;
9189 
9190 	if (connector->vrr_capable_property)
9191 		drm_connector_set_vrr_capable_property(connector,
9192 						       freesync_capable);
9193 }
9194 
amdgpu_dm_set_psr_caps(struct dc_link * link)9195 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9196 {
9197 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9198 
9199 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9200 		return;
9201 	if (link->type == dc_connection_none)
9202 		return;
9203 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9204 					dpcd_data, sizeof(dpcd_data))) {
9205 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9206 
9207 		if (dpcd_data[0] == 0) {
9208 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9209 			link->psr_settings.psr_feature_enabled = false;
9210 		} else {
9211 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9212 			link->psr_settings.psr_feature_enabled = true;
9213 		}
9214 
9215 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9216 	}
9217 }
9218 
9219 /*
9220  * amdgpu_dm_link_setup_psr() - configure psr link
9221  * @stream: stream state
9222  *
9223  * Return: true if success
9224  */
amdgpu_dm_link_setup_psr(struct dc_stream_state * stream)9225 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9226 {
9227 	struct dc_link *link = NULL;
9228 	struct psr_config psr_config = {0};
9229 	struct psr_context psr_context = {0};
9230 	bool ret = false;
9231 
9232 	if (stream == NULL)
9233 		return false;
9234 
9235 	link = stream->link;
9236 
9237 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9238 
9239 	if (psr_config.psr_version > 0) {
9240 		psr_config.psr_exit_link_training_required = 0x1;
9241 		psr_config.psr_frame_capture_indication_req = 0;
9242 		psr_config.psr_rfb_setup_time = 0x37;
9243 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9244 		psr_config.allow_smu_optimizations = 0x0;
9245 
9246 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9247 
9248 	}
9249 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9250 
9251 	return ret;
9252 }
9253 
9254 /*
9255  * amdgpu_dm_psr_enable() - enable psr f/w
9256  * @stream: stream state
9257  *
9258  * Return: true if success
9259  */
amdgpu_dm_psr_enable(struct dc_stream_state * stream)9260 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9261 {
9262 	struct dc_link *link = stream->link;
9263 	unsigned int vsync_rate_hz = 0;
9264 	struct dc_static_screen_params params = {0};
9265 	/* Calculate number of static frames before generating interrupt to
9266 	 * enter PSR.
9267 	 */
9268 	// Init fail safe of 2 frames static
9269 	unsigned int num_frames_static = 2;
9270 
9271 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9272 
9273 	vsync_rate_hz = div64_u64(div64_u64((
9274 			stream->timing.pix_clk_100hz * 100),
9275 			stream->timing.v_total),
9276 			stream->timing.h_total);
9277 
9278 	/* Round up
9279 	 * Calculate number of frames such that at least 30 ms of time has
9280 	 * passed.
9281 	 */
9282 	if (vsync_rate_hz != 0) {
9283 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9284 		num_frames_static = (30000 / frame_time_microsec) + 1;
9285 	}
9286 
9287 	params.triggers.cursor_update = true;
9288 	params.triggers.overlay_update = true;
9289 	params.triggers.surface_update = true;
9290 	params.num_frames = num_frames_static;
9291 
9292 	dc_stream_set_static_screen_params(link->ctx->dc,
9293 					   &stream, 1,
9294 					   &params);
9295 
9296 	return dc_link_set_psr_allow_active(link, true, false);
9297 }
9298 
9299 /*
9300  * amdgpu_dm_psr_disable() - disable psr f/w
9301  * @stream:  stream state
9302  *
9303  * Return: true if success
9304  */
amdgpu_dm_psr_disable(struct dc_stream_state * stream)9305 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9306 {
9307 
9308 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9309 
9310 	return dc_link_set_psr_allow_active(stream->link, false, true);
9311 }
9312 
9313 /*
9314  * amdgpu_dm_psr_disable() - disable psr f/w
9315  * if psr is enabled on any stream
9316  *
9317  * Return: true if success
9318  */
amdgpu_dm_psr_disable_all(struct amdgpu_display_manager * dm)9319 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9320 {
9321 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9322 	return dc_set_psr_allow_active(dm->dc, false);
9323 }
9324 
amdgpu_dm_trigger_timing_sync(struct drm_device * dev)9325 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9326 {
9327 	struct amdgpu_device *adev = drm_to_adev(dev);
9328 	struct dc *dc = adev->dm.dc;
9329 	int i;
9330 
9331 	mutex_lock(&adev->dm.dc_lock);
9332 	if (dc->current_state) {
9333 		for (i = 0; i < dc->current_state->stream_count; ++i)
9334 			dc->current_state->streams[i]
9335 				->triggered_crtc_reset.enabled =
9336 				adev->dm.force_timing_sync;
9337 
9338 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9339 		dc_trigger_sync(dc, dc->current_state);
9340 	}
9341 	mutex_unlock(&adev->dm.dc_lock);
9342 }
9343