xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/display/intel_atomic.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright © 2015 Intel Corporation
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun  * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun  * Software.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18*4882a593Smuzhiyun  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20*4882a593Smuzhiyun  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21*4882a593Smuzhiyun  * DEALINGS IN THE SOFTWARE.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /**
25*4882a593Smuzhiyun  * DOC: atomic modeset support
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * The functions here implement the state management and hardware programming
28*4882a593Smuzhiyun  * dispatch required by the atomic modeset infrastructure.
29*4882a593Smuzhiyun  * See intel_atomic_plane.c for the plane-specific atomic functionality.
30*4882a593Smuzhiyun  */
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include <drm/drm_atomic.h>
33*4882a593Smuzhiyun #include <drm/drm_atomic_helper.h>
34*4882a593Smuzhiyun #include <drm/drm_fourcc.h>
35*4882a593Smuzhiyun #include <drm/drm_plane_helper.h>
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include "intel_atomic.h"
38*4882a593Smuzhiyun #include "intel_cdclk.h"
39*4882a593Smuzhiyun #include "intel_display_types.h"
40*4882a593Smuzhiyun #include "intel_global_state.h"
41*4882a593Smuzhiyun #include "intel_hdcp.h"
42*4882a593Smuzhiyun #include "intel_psr.h"
43*4882a593Smuzhiyun #include "intel_sprite.h"
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /**
46*4882a593Smuzhiyun  * intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property.
47*4882a593Smuzhiyun  * @connector: Connector to get the property for.
48*4882a593Smuzhiyun  * @state: Connector state to retrieve the property from.
49*4882a593Smuzhiyun  * @property: Property to retrieve.
50*4882a593Smuzhiyun  * @val: Return value for the property.
51*4882a593Smuzhiyun  *
52*4882a593Smuzhiyun  * Returns the atomic property value for a digital connector.
53*4882a593Smuzhiyun  */
intel_digital_connector_atomic_get_property(struct drm_connector * connector,const struct drm_connector_state * state,struct drm_property * property,u64 * val)54*4882a593Smuzhiyun int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
55*4882a593Smuzhiyun 						const struct drm_connector_state *state,
56*4882a593Smuzhiyun 						struct drm_property *property,
57*4882a593Smuzhiyun 						u64 *val)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	struct drm_device *dev = connector->dev;
60*4882a593Smuzhiyun 	struct drm_i915_private *dev_priv = to_i915(dev);
61*4882a593Smuzhiyun 	struct intel_digital_connector_state *intel_conn_state =
62*4882a593Smuzhiyun 		to_intel_digital_connector_state(state);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	if (property == dev_priv->force_audio_property)
65*4882a593Smuzhiyun 		*val = intel_conn_state->force_audio;
66*4882a593Smuzhiyun 	else if (property == dev_priv->broadcast_rgb_property)
67*4882a593Smuzhiyun 		*val = intel_conn_state->broadcast_rgb;
68*4882a593Smuzhiyun 	else {
69*4882a593Smuzhiyun 		drm_dbg_atomic(&dev_priv->drm,
70*4882a593Smuzhiyun 			       "Unknown property [PROP:%d:%s]\n",
71*4882a593Smuzhiyun 			       property->base.id, property->name);
72*4882a593Smuzhiyun 		return -EINVAL;
73*4882a593Smuzhiyun 	}
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	return 0;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /**
79*4882a593Smuzhiyun  * intel_digital_connector_atomic_set_property - hook for connector->atomic_set_property.
80*4882a593Smuzhiyun  * @connector: Connector to set the property for.
81*4882a593Smuzhiyun  * @state: Connector state to set the property on.
82*4882a593Smuzhiyun  * @property: Property to set.
83*4882a593Smuzhiyun  * @val: New value for the property.
84*4882a593Smuzhiyun  *
85*4882a593Smuzhiyun  * Sets the atomic property value for a digital connector.
86*4882a593Smuzhiyun  */
intel_digital_connector_atomic_set_property(struct drm_connector * connector,struct drm_connector_state * state,struct drm_property * property,u64 val)87*4882a593Smuzhiyun int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
88*4882a593Smuzhiyun 						struct drm_connector_state *state,
89*4882a593Smuzhiyun 						struct drm_property *property,
90*4882a593Smuzhiyun 						u64 val)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	struct drm_device *dev = connector->dev;
93*4882a593Smuzhiyun 	struct drm_i915_private *dev_priv = to_i915(dev);
94*4882a593Smuzhiyun 	struct intel_digital_connector_state *intel_conn_state =
95*4882a593Smuzhiyun 		to_intel_digital_connector_state(state);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	if (property == dev_priv->force_audio_property) {
98*4882a593Smuzhiyun 		intel_conn_state->force_audio = val;
99*4882a593Smuzhiyun 		return 0;
100*4882a593Smuzhiyun 	}
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	if (property == dev_priv->broadcast_rgb_property) {
103*4882a593Smuzhiyun 		intel_conn_state->broadcast_rgb = val;
104*4882a593Smuzhiyun 		return 0;
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	drm_dbg_atomic(&dev_priv->drm, "Unknown property [PROP:%d:%s]\n",
108*4882a593Smuzhiyun 		       property->base.id, property->name);
109*4882a593Smuzhiyun 	return -EINVAL;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
blob_equal(const struct drm_property_blob * a,const struct drm_property_blob * b)112*4882a593Smuzhiyun static bool blob_equal(const struct drm_property_blob *a,
113*4882a593Smuzhiyun 		       const struct drm_property_blob *b)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	if (a && b)
116*4882a593Smuzhiyun 		return a->length == b->length &&
117*4882a593Smuzhiyun 			!memcmp(a->data, b->data, a->length);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	return !a == !b;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
intel_digital_connector_atomic_check(struct drm_connector * conn,struct drm_atomic_state * state)122*4882a593Smuzhiyun int intel_digital_connector_atomic_check(struct drm_connector *conn,
123*4882a593Smuzhiyun 					 struct drm_atomic_state *state)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	struct drm_connector_state *new_state =
126*4882a593Smuzhiyun 		drm_atomic_get_new_connector_state(state, conn);
127*4882a593Smuzhiyun 	struct intel_digital_connector_state *new_conn_state =
128*4882a593Smuzhiyun 		to_intel_digital_connector_state(new_state);
129*4882a593Smuzhiyun 	struct drm_connector_state *old_state =
130*4882a593Smuzhiyun 		drm_atomic_get_old_connector_state(state, conn);
131*4882a593Smuzhiyun 	struct intel_digital_connector_state *old_conn_state =
132*4882a593Smuzhiyun 		to_intel_digital_connector_state(old_state);
133*4882a593Smuzhiyun 	struct drm_crtc_state *crtc_state;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	intel_hdcp_atomic_check(conn, old_state, new_state);
136*4882a593Smuzhiyun 	intel_psr_atomic_check(conn, old_state, new_state);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	if (!new_state->crtc)
139*4882a593Smuzhiyun 		return 0;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	/*
144*4882a593Smuzhiyun 	 * These properties are handled by fastset, and might not end
145*4882a593Smuzhiyun 	 * up in a modeset.
146*4882a593Smuzhiyun 	 */
147*4882a593Smuzhiyun 	if (new_conn_state->force_audio != old_conn_state->force_audio ||
148*4882a593Smuzhiyun 	    new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
149*4882a593Smuzhiyun 	    new_conn_state->base.colorspace != old_conn_state->base.colorspace ||
150*4882a593Smuzhiyun 	    new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
151*4882a593Smuzhiyun 	    new_conn_state->base.content_type != old_conn_state->base.content_type ||
152*4882a593Smuzhiyun 	    new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode ||
153*4882a593Smuzhiyun 	    !blob_equal(new_conn_state->base.hdr_output_metadata,
154*4882a593Smuzhiyun 			old_conn_state->base.hdr_output_metadata))
155*4882a593Smuzhiyun 		crtc_state->mode_changed = true;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	return 0;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /**
161*4882a593Smuzhiyun  * intel_digital_connector_duplicate_state - duplicate connector state
162*4882a593Smuzhiyun  * @connector: digital connector
163*4882a593Smuzhiyun  *
164*4882a593Smuzhiyun  * Allocates and returns a copy of the connector state (both common and
165*4882a593Smuzhiyun  * digital connector specific) for the specified connector.
166*4882a593Smuzhiyun  *
167*4882a593Smuzhiyun  * Returns: The newly allocated connector state, or NULL on failure.
168*4882a593Smuzhiyun  */
169*4882a593Smuzhiyun struct drm_connector_state *
intel_digital_connector_duplicate_state(struct drm_connector * connector)170*4882a593Smuzhiyun intel_digital_connector_duplicate_state(struct drm_connector *connector)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	struct intel_digital_connector_state *state;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL);
175*4882a593Smuzhiyun 	if (!state)
176*4882a593Smuzhiyun 		return NULL;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	__drm_atomic_helper_connector_duplicate_state(connector, &state->base);
179*4882a593Smuzhiyun 	return &state->base;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun /**
183*4882a593Smuzhiyun  * intel_connector_needs_modeset - check if connector needs a modeset
184*4882a593Smuzhiyun  * @state: the atomic state corresponding to this modeset
185*4882a593Smuzhiyun  * @connector: the connector
186*4882a593Smuzhiyun  */
187*4882a593Smuzhiyun bool
intel_connector_needs_modeset(struct intel_atomic_state * state,struct drm_connector * connector)188*4882a593Smuzhiyun intel_connector_needs_modeset(struct intel_atomic_state *state,
189*4882a593Smuzhiyun 			      struct drm_connector *connector)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	const struct drm_connector_state *old_conn_state, *new_conn_state;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	old_conn_state = drm_atomic_get_old_connector_state(&state->base, connector);
194*4882a593Smuzhiyun 	new_conn_state = drm_atomic_get_new_connector_state(&state->base, connector);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	return old_conn_state->crtc != new_conn_state->crtc ||
197*4882a593Smuzhiyun 	       (new_conn_state->crtc &&
198*4882a593Smuzhiyun 		drm_atomic_crtc_needs_modeset(drm_atomic_get_new_crtc_state(&state->base,
199*4882a593Smuzhiyun 									    new_conn_state->crtc)));
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun struct intel_digital_connector_state *
intel_atomic_get_digital_connector_state(struct intel_atomic_state * state,struct intel_connector * connector)203*4882a593Smuzhiyun intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
204*4882a593Smuzhiyun 					 struct intel_connector *connector)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	struct drm_connector_state *conn_state;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	conn_state = drm_atomic_get_connector_state(&state->base,
209*4882a593Smuzhiyun 						    &connector->base);
210*4882a593Smuzhiyun 	if (IS_ERR(conn_state))
211*4882a593Smuzhiyun 		return ERR_CAST(conn_state);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	return to_intel_digital_connector_state(conn_state);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun /**
217*4882a593Smuzhiyun  * intel_crtc_duplicate_state - duplicate crtc state
218*4882a593Smuzhiyun  * @crtc: drm crtc
219*4882a593Smuzhiyun  *
220*4882a593Smuzhiyun  * Allocates and returns a copy of the crtc state (both common and
221*4882a593Smuzhiyun  * Intel-specific) for the specified crtc.
222*4882a593Smuzhiyun  *
223*4882a593Smuzhiyun  * Returns: The newly allocated crtc state, or NULL on failure.
224*4882a593Smuzhiyun  */
225*4882a593Smuzhiyun struct drm_crtc_state *
intel_crtc_duplicate_state(struct drm_crtc * crtc)226*4882a593Smuzhiyun intel_crtc_duplicate_state(struct drm_crtc *crtc)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	const struct intel_crtc_state *old_crtc_state = to_intel_crtc_state(crtc->state);
229*4882a593Smuzhiyun 	struct intel_crtc_state *crtc_state;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	crtc_state = kmemdup(old_crtc_state, sizeof(*crtc_state), GFP_KERNEL);
232*4882a593Smuzhiyun 	if (!crtc_state)
233*4882a593Smuzhiyun 		return NULL;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->uapi);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	/* copy color blobs */
238*4882a593Smuzhiyun 	if (crtc_state->hw.degamma_lut)
239*4882a593Smuzhiyun 		drm_property_blob_get(crtc_state->hw.degamma_lut);
240*4882a593Smuzhiyun 	if (crtc_state->hw.ctm)
241*4882a593Smuzhiyun 		drm_property_blob_get(crtc_state->hw.ctm);
242*4882a593Smuzhiyun 	if (crtc_state->hw.gamma_lut)
243*4882a593Smuzhiyun 		drm_property_blob_get(crtc_state->hw.gamma_lut);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	crtc_state->update_pipe = false;
246*4882a593Smuzhiyun 	crtc_state->disable_lp_wm = false;
247*4882a593Smuzhiyun 	crtc_state->disable_cxsr = false;
248*4882a593Smuzhiyun 	crtc_state->update_wm_pre = false;
249*4882a593Smuzhiyun 	crtc_state->update_wm_post = false;
250*4882a593Smuzhiyun 	crtc_state->fifo_changed = false;
251*4882a593Smuzhiyun 	crtc_state->preload_luts = false;
252*4882a593Smuzhiyun 	crtc_state->inherited = false;
253*4882a593Smuzhiyun 	crtc_state->wm.need_postvbl_update = false;
254*4882a593Smuzhiyun 	crtc_state->fb_bits = 0;
255*4882a593Smuzhiyun 	crtc_state->update_planes = 0;
256*4882a593Smuzhiyun 	crtc_state->dsb = NULL;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	return &crtc_state->uapi;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
intel_crtc_put_color_blobs(struct intel_crtc_state * crtc_state)261*4882a593Smuzhiyun static void intel_crtc_put_color_blobs(struct intel_crtc_state *crtc_state)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	drm_property_blob_put(crtc_state->hw.degamma_lut);
264*4882a593Smuzhiyun 	drm_property_blob_put(crtc_state->hw.gamma_lut);
265*4882a593Smuzhiyun 	drm_property_blob_put(crtc_state->hw.ctm);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
intel_crtc_free_hw_state(struct intel_crtc_state * crtc_state)268*4882a593Smuzhiyun void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	intel_crtc_put_color_blobs(crtc_state);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
intel_crtc_copy_color_blobs(struct intel_crtc_state * crtc_state)273*4882a593Smuzhiyun void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	drm_property_replace_blob(&crtc_state->hw.degamma_lut,
276*4882a593Smuzhiyun 				  crtc_state->uapi.degamma_lut);
277*4882a593Smuzhiyun 	drm_property_replace_blob(&crtc_state->hw.gamma_lut,
278*4882a593Smuzhiyun 				  crtc_state->uapi.gamma_lut);
279*4882a593Smuzhiyun 	drm_property_replace_blob(&crtc_state->hw.ctm,
280*4882a593Smuzhiyun 				  crtc_state->uapi.ctm);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun /**
284*4882a593Smuzhiyun  * intel_crtc_destroy_state - destroy crtc state
285*4882a593Smuzhiyun  * @crtc: drm crtc
286*4882a593Smuzhiyun  * @state: the state to destroy
287*4882a593Smuzhiyun  *
288*4882a593Smuzhiyun  * Destroys the crtc state (both common and Intel-specific) for the
289*4882a593Smuzhiyun  * specified crtc.
290*4882a593Smuzhiyun  */
291*4882a593Smuzhiyun void
intel_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)292*4882a593Smuzhiyun intel_crtc_destroy_state(struct drm_crtc *crtc,
293*4882a593Smuzhiyun 			 struct drm_crtc_state *state)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(state);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	drm_WARN_ON(crtc->dev, crtc_state->dsb);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
300*4882a593Smuzhiyun 	intel_crtc_free_hw_state(crtc_state);
301*4882a593Smuzhiyun 	kfree(crtc_state);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun 
intel_atomic_setup_scaler(struct intel_crtc_scaler_state * scaler_state,int num_scalers_need,struct intel_crtc * intel_crtc,const char * name,int idx,struct intel_plane_state * plane_state,int * scaler_id)304*4882a593Smuzhiyun static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
305*4882a593Smuzhiyun 				      int num_scalers_need, struct intel_crtc *intel_crtc,
306*4882a593Smuzhiyun 				      const char *name, int idx,
307*4882a593Smuzhiyun 				      struct intel_plane_state *plane_state,
308*4882a593Smuzhiyun 				      int *scaler_id)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
311*4882a593Smuzhiyun 	int j;
312*4882a593Smuzhiyun 	u32 mode;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	if (*scaler_id < 0) {
315*4882a593Smuzhiyun 		/* find a free scaler */
316*4882a593Smuzhiyun 		for (j = 0; j < intel_crtc->num_scalers; j++) {
317*4882a593Smuzhiyun 			if (scaler_state->scalers[j].in_use)
318*4882a593Smuzhiyun 				continue;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 			*scaler_id = j;
321*4882a593Smuzhiyun 			scaler_state->scalers[*scaler_id].in_use = 1;
322*4882a593Smuzhiyun 			break;
323*4882a593Smuzhiyun 		}
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
327*4882a593Smuzhiyun 		     "Cannot find scaler for %s:%d\n", name, idx))
328*4882a593Smuzhiyun 		return;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	/* set scaler mode */
331*4882a593Smuzhiyun 	if (plane_state && plane_state->hw.fb &&
332*4882a593Smuzhiyun 	    plane_state->hw.fb->format->is_yuv &&
333*4882a593Smuzhiyun 	    plane_state->hw.fb->format->num_planes > 1) {
334*4882a593Smuzhiyun 		struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
335*4882a593Smuzhiyun 		if (IS_GEN(dev_priv, 9) &&
336*4882a593Smuzhiyun 		    !IS_GEMINILAKE(dev_priv)) {
337*4882a593Smuzhiyun 			mode = SKL_PS_SCALER_MODE_NV12;
338*4882a593Smuzhiyun 		} else if (icl_is_hdr_plane(dev_priv, plane->id)) {
339*4882a593Smuzhiyun 			/*
340*4882a593Smuzhiyun 			 * On gen11+'s HDR planes we only use the scaler for
341*4882a593Smuzhiyun 			 * scaling. They have a dedicated chroma upsampler, so
342*4882a593Smuzhiyun 			 * we don't need the scaler to upsample the UV plane.
343*4882a593Smuzhiyun 			 */
344*4882a593Smuzhiyun 			mode = PS_SCALER_MODE_NORMAL;
345*4882a593Smuzhiyun 		} else {
346*4882a593Smuzhiyun 			struct intel_plane *linked =
347*4882a593Smuzhiyun 				plane_state->planar_linked_plane;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 			mode = PS_SCALER_MODE_PLANAR;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 			if (linked)
352*4882a593Smuzhiyun 				mode |= PS_PLANE_Y_SEL(linked->id);
353*4882a593Smuzhiyun 		}
354*4882a593Smuzhiyun 	} else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
355*4882a593Smuzhiyun 		mode = PS_SCALER_MODE_NORMAL;
356*4882a593Smuzhiyun 	} else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
357*4882a593Smuzhiyun 		/*
358*4882a593Smuzhiyun 		 * when only 1 scaler is in use on a pipe with 2 scalers
359*4882a593Smuzhiyun 		 * scaler 0 operates in high quality (HQ) mode.
360*4882a593Smuzhiyun 		 * In this case use scaler 0 to take advantage of HQ mode
361*4882a593Smuzhiyun 		 */
362*4882a593Smuzhiyun 		scaler_state->scalers[*scaler_id].in_use = 0;
363*4882a593Smuzhiyun 		*scaler_id = 0;
364*4882a593Smuzhiyun 		scaler_state->scalers[0].in_use = 1;
365*4882a593Smuzhiyun 		mode = SKL_PS_SCALER_MODE_HQ;
366*4882a593Smuzhiyun 	} else {
367*4882a593Smuzhiyun 		mode = SKL_PS_SCALER_MODE_DYN;
368*4882a593Smuzhiyun 	}
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
371*4882a593Smuzhiyun 		    intel_crtc->pipe, *scaler_id, name, idx);
372*4882a593Smuzhiyun 	scaler_state->scalers[*scaler_id].mode = mode;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun /**
376*4882a593Smuzhiyun  * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
377*4882a593Smuzhiyun  * @dev_priv: i915 device
378*4882a593Smuzhiyun  * @intel_crtc: intel crtc
379*4882a593Smuzhiyun  * @crtc_state: incoming crtc_state to validate and setup scalers
380*4882a593Smuzhiyun  *
381*4882a593Smuzhiyun  * This function sets up scalers based on staged scaling requests for
382*4882a593Smuzhiyun  * a @crtc and its planes. It is called from crtc level check path. If request
383*4882a593Smuzhiyun  * is a supportable request, it attaches scalers to requested planes and crtc.
384*4882a593Smuzhiyun  *
385*4882a593Smuzhiyun  * This function takes into account the current scaler(s) in use by any planes
386*4882a593Smuzhiyun  * not being part of this atomic state
387*4882a593Smuzhiyun  *
388*4882a593Smuzhiyun  *  Returns:
389*4882a593Smuzhiyun  *         0 - scalers were setup succesfully
390*4882a593Smuzhiyun  *         error code - otherwise
391*4882a593Smuzhiyun  */
intel_atomic_setup_scalers(struct drm_i915_private * dev_priv,struct intel_crtc * intel_crtc,struct intel_crtc_state * crtc_state)392*4882a593Smuzhiyun int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
393*4882a593Smuzhiyun 			       struct intel_crtc *intel_crtc,
394*4882a593Smuzhiyun 			       struct intel_crtc_state *crtc_state)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	struct drm_plane *plane = NULL;
397*4882a593Smuzhiyun 	struct intel_plane *intel_plane;
398*4882a593Smuzhiyun 	struct intel_plane_state *plane_state = NULL;
399*4882a593Smuzhiyun 	struct intel_crtc_scaler_state *scaler_state =
400*4882a593Smuzhiyun 		&crtc_state->scaler_state;
401*4882a593Smuzhiyun 	struct drm_atomic_state *drm_state = crtc_state->uapi.state;
402*4882a593Smuzhiyun 	struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
403*4882a593Smuzhiyun 	int num_scalers_need;
404*4882a593Smuzhiyun 	int i;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	num_scalers_need = hweight32(scaler_state->scaler_users);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	/*
409*4882a593Smuzhiyun 	 * High level flow:
410*4882a593Smuzhiyun 	 * - staged scaler requests are already in scaler_state->scaler_users
411*4882a593Smuzhiyun 	 * - check whether staged scaling requests can be supported
412*4882a593Smuzhiyun 	 * - add planes using scalers that aren't in current transaction
413*4882a593Smuzhiyun 	 * - assign scalers to requested users
414*4882a593Smuzhiyun 	 * - as part of plane commit, scalers will be committed
415*4882a593Smuzhiyun 	 *   (i.e., either attached or detached) to respective planes in hw
416*4882a593Smuzhiyun 	 * - as part of crtc_commit, scaler will be either attached or detached
417*4882a593Smuzhiyun 	 *   to crtc in hw
418*4882a593Smuzhiyun 	 */
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	/* fail if required scalers > available scalers */
421*4882a593Smuzhiyun 	if (num_scalers_need > intel_crtc->num_scalers){
422*4882a593Smuzhiyun 		drm_dbg_kms(&dev_priv->drm,
423*4882a593Smuzhiyun 			    "Too many scaling requests %d > %d\n",
424*4882a593Smuzhiyun 			    num_scalers_need, intel_crtc->num_scalers);
425*4882a593Smuzhiyun 		return -EINVAL;
426*4882a593Smuzhiyun 	}
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	/* walkthrough scaler_users bits and start assigning scalers */
429*4882a593Smuzhiyun 	for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
430*4882a593Smuzhiyun 		int *scaler_id;
431*4882a593Smuzhiyun 		const char *name;
432*4882a593Smuzhiyun 		int idx;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 		/* skip if scaler not required */
435*4882a593Smuzhiyun 		if (!(scaler_state->scaler_users & (1 << i)))
436*4882a593Smuzhiyun 			continue;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 		if (i == SKL_CRTC_INDEX) {
439*4882a593Smuzhiyun 			name = "CRTC";
440*4882a593Smuzhiyun 			idx = intel_crtc->base.base.id;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 			/* panel fitter case: assign as a crtc scaler */
443*4882a593Smuzhiyun 			scaler_id = &scaler_state->scaler_id;
444*4882a593Smuzhiyun 		} else {
445*4882a593Smuzhiyun 			name = "PLANE";
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 			/* plane scaler case: assign as a plane scaler */
448*4882a593Smuzhiyun 			/* find the plane that set the bit as scaler_user */
449*4882a593Smuzhiyun 			plane = drm_state->planes[i].ptr;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 			/*
452*4882a593Smuzhiyun 			 * to enable/disable hq mode, add planes that are using scaler
453*4882a593Smuzhiyun 			 * into this transaction
454*4882a593Smuzhiyun 			 */
455*4882a593Smuzhiyun 			if (!plane) {
456*4882a593Smuzhiyun 				struct drm_plane_state *state;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 				/*
459*4882a593Smuzhiyun 				 * GLK+ scalers don't have a HQ mode so it
460*4882a593Smuzhiyun 				 * isn't necessary to change between HQ and dyn mode
461*4882a593Smuzhiyun 				 * on those platforms.
462*4882a593Smuzhiyun 				 */
463*4882a593Smuzhiyun 				if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
464*4882a593Smuzhiyun 					continue;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 				plane = drm_plane_from_index(&dev_priv->drm, i);
467*4882a593Smuzhiyun 				state = drm_atomic_get_plane_state(drm_state, plane);
468*4882a593Smuzhiyun 				if (IS_ERR(state)) {
469*4882a593Smuzhiyun 					drm_dbg_kms(&dev_priv->drm,
470*4882a593Smuzhiyun 						    "Failed to add [PLANE:%d] to drm_state\n",
471*4882a593Smuzhiyun 						    plane->base.id);
472*4882a593Smuzhiyun 					return PTR_ERR(state);
473*4882a593Smuzhiyun 				}
474*4882a593Smuzhiyun 			}
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 			intel_plane = to_intel_plane(plane);
477*4882a593Smuzhiyun 			idx = plane->base.id;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 			/* plane on different crtc cannot be a scaler user of this crtc */
480*4882a593Smuzhiyun 			if (drm_WARN_ON(&dev_priv->drm,
481*4882a593Smuzhiyun 					intel_plane->pipe != intel_crtc->pipe))
482*4882a593Smuzhiyun 				continue;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 			plane_state = intel_atomic_get_new_plane_state(intel_state,
485*4882a593Smuzhiyun 								       intel_plane);
486*4882a593Smuzhiyun 			scaler_id = &plane_state->scaler_id;
487*4882a593Smuzhiyun 		}
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 		intel_atomic_setup_scaler(scaler_state, num_scalers_need,
490*4882a593Smuzhiyun 					  intel_crtc, name, idx,
491*4882a593Smuzhiyun 					  plane_state, scaler_id);
492*4882a593Smuzhiyun 	}
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	return 0;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun struct drm_atomic_state *
intel_atomic_state_alloc(struct drm_device * dev)498*4882a593Smuzhiyun intel_atomic_state_alloc(struct drm_device *dev)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun 	struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
503*4882a593Smuzhiyun 		kfree(state);
504*4882a593Smuzhiyun 		return NULL;
505*4882a593Smuzhiyun 	}
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	return &state->base;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun 
intel_atomic_state_free(struct drm_atomic_state * _state)510*4882a593Smuzhiyun void intel_atomic_state_free(struct drm_atomic_state *_state)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	drm_atomic_state_default_release(&state->base);
515*4882a593Smuzhiyun 	kfree(state->global_objs);
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	i915_sw_fence_fini(&state->commit_ready);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	kfree(state);
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun 
intel_atomic_state_clear(struct drm_atomic_state * s)522*4882a593Smuzhiyun void intel_atomic_state_clear(struct drm_atomic_state *s)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun 	struct intel_atomic_state *state = to_intel_atomic_state(s);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	drm_atomic_state_default_clear(&state->base);
527*4882a593Smuzhiyun 	intel_atomic_clear_global_state(state);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	state->dpll_set = state->modeset = false;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun struct intel_crtc_state *
intel_atomic_get_crtc_state(struct drm_atomic_state * state,struct intel_crtc * crtc)533*4882a593Smuzhiyun intel_atomic_get_crtc_state(struct drm_atomic_state *state,
534*4882a593Smuzhiyun 			    struct intel_crtc *crtc)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	struct drm_crtc_state *crtc_state;
537*4882a593Smuzhiyun 	crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
538*4882a593Smuzhiyun 	if (IS_ERR(crtc_state))
539*4882a593Smuzhiyun 		return ERR_CAST(crtc_state);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	return to_intel_crtc_state(crtc_state);
542*4882a593Smuzhiyun }
543