1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (C) 2014 Red Hat
3*4882a593Smuzhiyun * Copyright (C) 2014 Intel Corp.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
6*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
7*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
8*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
10*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
13*4882a593Smuzhiyun * all copies or substantial portions of the Software.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Authors:
24*4882a593Smuzhiyun * Rob Clark <robdclark@gmail.com>
25*4882a593Smuzhiyun * Daniel Vetter <daniel.vetter@ffwll.ch>
26*4882a593Smuzhiyun */
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <linux/dma-fence.h>
29*4882a593Smuzhiyun #include <linux/ktime.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include <drm/drm_atomic.h>
32*4882a593Smuzhiyun #include <drm/drm_atomic_helper.h>
33*4882a593Smuzhiyun #include <drm/drm_atomic_uapi.h>
34*4882a593Smuzhiyun #include <drm/drm_bridge.h>
35*4882a593Smuzhiyun #include <drm/drm_damage_helper.h>
36*4882a593Smuzhiyun #include <drm/drm_device.h>
37*4882a593Smuzhiyun #include <drm/drm_drv.h>
38*4882a593Smuzhiyun #include <drm/drm_plane_helper.h>
39*4882a593Smuzhiyun #include <drm/drm_print.h>
40*4882a593Smuzhiyun #include <drm/drm_self_refresh_helper.h>
41*4882a593Smuzhiyun #include <drm/drm_vblank.h>
42*4882a593Smuzhiyun #include <drm/drm_writeback.h>
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #include "drm_crtc_helper_internal.h"
45*4882a593Smuzhiyun #include "drm_crtc_internal.h"
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /**
48*4882a593Smuzhiyun * DOC: overview
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * This helper library provides implementations of check and commit functions on
51*4882a593Smuzhiyun * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
52*4882a593Smuzhiyun * also provides convenience implementations for the atomic state handling
53*4882a593Smuzhiyun * callbacks for drivers which don't need to subclass the drm core structures to
54*4882a593Smuzhiyun * add their own additional internal state.
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * This library also provides default implementations for the check callback in
57*4882a593Smuzhiyun * drm_atomic_helper_check() and for the commit callback with
58*4882a593Smuzhiyun * drm_atomic_helper_commit(). But the individual stages and callbacks are
59*4882a593Smuzhiyun * exposed to allow drivers to mix and match and e.g. use the plane helpers only
60*4882a593Smuzhiyun * together with a driver private modeset implementation.
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * This library also provides implementations for all the legacy driver
63*4882a593Smuzhiyun * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
64*4882a593Smuzhiyun * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
65*4882a593Smuzhiyun * various functions to implement set_property callbacks. New drivers must not
66*4882a593Smuzhiyun * implement these functions themselves but must use the provided helpers.
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * The atomic helper uses the same function table structures as all other
69*4882a593Smuzhiyun * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
70*4882a593Smuzhiyun * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
71*4882a593Smuzhiyun * also shares the &struct drm_plane_helper_funcs function table with the plane
72*4882a593Smuzhiyun * helpers.
73*4882a593Smuzhiyun */
74*4882a593Smuzhiyun static void
drm_atomic_helper_plane_changed(struct drm_atomic_state * state,struct drm_plane_state * old_plane_state,struct drm_plane_state * plane_state,struct drm_plane * plane)75*4882a593Smuzhiyun drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
76*4882a593Smuzhiyun struct drm_plane_state *old_plane_state,
77*4882a593Smuzhiyun struct drm_plane_state *plane_state,
78*4882a593Smuzhiyun struct drm_plane *plane)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun if (old_plane_state->crtc) {
83*4882a593Smuzhiyun crtc_state = drm_atomic_get_new_crtc_state(state,
84*4882a593Smuzhiyun old_plane_state->crtc);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun if (WARN_ON(!crtc_state))
87*4882a593Smuzhiyun return;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun crtc_state->planes_changed = true;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun if (plane_state->crtc) {
93*4882a593Smuzhiyun crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun if (WARN_ON(!crtc_state))
96*4882a593Smuzhiyun return;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun crtc_state->planes_changed = true;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
handle_conflicting_encoders(struct drm_atomic_state * state,bool disable_conflicting_encoders)102*4882a593Smuzhiyun static int handle_conflicting_encoders(struct drm_atomic_state *state,
103*4882a593Smuzhiyun bool disable_conflicting_encoders)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun struct drm_connector_state *new_conn_state;
106*4882a593Smuzhiyun struct drm_connector *connector;
107*4882a593Smuzhiyun struct drm_connector_list_iter conn_iter;
108*4882a593Smuzhiyun struct drm_encoder *encoder;
109*4882a593Smuzhiyun unsigned encoder_mask = 0;
110*4882a593Smuzhiyun int i, ret = 0;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * First loop, find all newly assigned encoders from the connectors
114*4882a593Smuzhiyun * part of the state. If the same encoder is assigned to multiple
115*4882a593Smuzhiyun * connectors bail out.
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun for_each_new_connector_in_state(state, connector, new_conn_state, i) {
118*4882a593Smuzhiyun const struct drm_connector_helper_funcs *funcs = connector->helper_private;
119*4882a593Smuzhiyun struct drm_encoder *new_encoder;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun if (!new_conn_state->crtc)
122*4882a593Smuzhiyun continue;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (funcs->atomic_best_encoder)
125*4882a593Smuzhiyun new_encoder = funcs->atomic_best_encoder(connector, new_conn_state);
126*4882a593Smuzhiyun else if (funcs->best_encoder)
127*4882a593Smuzhiyun new_encoder = funcs->best_encoder(connector);
128*4882a593Smuzhiyun else
129*4882a593Smuzhiyun new_encoder = drm_connector_get_single_encoder(connector);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun if (new_encoder) {
132*4882a593Smuzhiyun if (encoder_mask & drm_encoder_mask(new_encoder)) {
133*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
134*4882a593Smuzhiyun new_encoder->base.id, new_encoder->name,
135*4882a593Smuzhiyun connector->base.id, connector->name);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun return -EINVAL;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun encoder_mask |= drm_encoder_mask(new_encoder);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (!encoder_mask)
145*4882a593Smuzhiyun return 0;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /*
148*4882a593Smuzhiyun * Second loop, iterate over all connectors not part of the state.
149*4882a593Smuzhiyun *
150*4882a593Smuzhiyun * If a conflicting encoder is found and disable_conflicting_encoders
151*4882a593Smuzhiyun * is not set, an error is returned. Userspace can provide a solution
152*4882a593Smuzhiyun * through the atomic ioctl.
153*4882a593Smuzhiyun *
154*4882a593Smuzhiyun * If the flag is set conflicting connectors are removed from the CRTC
155*4882a593Smuzhiyun * and the CRTC is disabled if no encoder is left. This preserves
156*4882a593Smuzhiyun * compatibility with the legacy set_config behavior.
157*4882a593Smuzhiyun */
158*4882a593Smuzhiyun drm_connector_list_iter_begin(state->dev, &conn_iter);
159*4882a593Smuzhiyun drm_for_each_connector_iter(connector, &conn_iter) {
160*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun if (drm_atomic_get_new_connector_state(state, connector))
163*4882a593Smuzhiyun continue;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun encoder = connector->state->best_encoder;
166*4882a593Smuzhiyun if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
167*4882a593Smuzhiyun continue;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun if (!disable_conflicting_encoders) {
170*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
171*4882a593Smuzhiyun encoder->base.id, encoder->name,
172*4882a593Smuzhiyun connector->state->crtc->base.id,
173*4882a593Smuzhiyun connector->state->crtc->name,
174*4882a593Smuzhiyun connector->base.id, connector->name);
175*4882a593Smuzhiyun ret = -EINVAL;
176*4882a593Smuzhiyun goto out;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun new_conn_state = drm_atomic_get_connector_state(state, connector);
180*4882a593Smuzhiyun if (IS_ERR(new_conn_state)) {
181*4882a593Smuzhiyun ret = PTR_ERR(new_conn_state);
182*4882a593Smuzhiyun goto out;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
186*4882a593Smuzhiyun encoder->base.id, encoder->name,
187*4882a593Smuzhiyun new_conn_state->crtc->base.id, new_conn_state->crtc->name,
188*4882a593Smuzhiyun connector->base.id, connector->name);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL);
193*4882a593Smuzhiyun if (ret)
194*4882a593Smuzhiyun goto out;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if (!crtc_state->connector_mask) {
197*4882a593Smuzhiyun ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
198*4882a593Smuzhiyun NULL);
199*4882a593Smuzhiyun if (ret < 0)
200*4882a593Smuzhiyun goto out;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun crtc_state->active = false;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun out:
206*4882a593Smuzhiyun drm_connector_list_iter_end(&conn_iter);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun return ret;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun static void
set_best_encoder(struct drm_atomic_state * state,struct drm_connector_state * conn_state,struct drm_encoder * encoder)212*4882a593Smuzhiyun set_best_encoder(struct drm_atomic_state *state,
213*4882a593Smuzhiyun struct drm_connector_state *conn_state,
214*4882a593Smuzhiyun struct drm_encoder *encoder)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
217*4882a593Smuzhiyun struct drm_crtc *crtc;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun if (conn_state->best_encoder) {
220*4882a593Smuzhiyun /* Unset the encoder_mask in the old crtc state. */
221*4882a593Smuzhiyun crtc = conn_state->connector->state->crtc;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /* A NULL crtc is an error here because we should have
224*4882a593Smuzhiyun * duplicated a NULL best_encoder when crtc was NULL.
225*4882a593Smuzhiyun * As an exception restoring duplicated atomic state
226*4882a593Smuzhiyun * during resume is allowed, so don't warn when
227*4882a593Smuzhiyun * best_encoder is equal to encoder we intend to set.
228*4882a593Smuzhiyun */
229*4882a593Smuzhiyun WARN_ON(!crtc && encoder != conn_state->best_encoder);
230*4882a593Smuzhiyun if (crtc) {
231*4882a593Smuzhiyun crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun crtc_state->encoder_mask &=
234*4882a593Smuzhiyun ~drm_encoder_mask(conn_state->best_encoder);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (encoder) {
239*4882a593Smuzhiyun crtc = conn_state->crtc;
240*4882a593Smuzhiyun WARN_ON(!crtc);
241*4882a593Smuzhiyun if (crtc) {
242*4882a593Smuzhiyun crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun crtc_state->encoder_mask |=
245*4882a593Smuzhiyun drm_encoder_mask(encoder);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun conn_state->best_encoder = encoder;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun static void
steal_encoder(struct drm_atomic_state * state,struct drm_encoder * encoder)253*4882a593Smuzhiyun steal_encoder(struct drm_atomic_state *state,
254*4882a593Smuzhiyun struct drm_encoder *encoder)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
257*4882a593Smuzhiyun struct drm_connector *connector;
258*4882a593Smuzhiyun struct drm_connector_state *old_connector_state, *new_connector_state;
259*4882a593Smuzhiyun int i;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
262*4882a593Smuzhiyun struct drm_crtc *encoder_crtc;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun if (new_connector_state->best_encoder != encoder)
265*4882a593Smuzhiyun continue;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun encoder_crtc = old_connector_state->crtc;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
270*4882a593Smuzhiyun encoder->base.id, encoder->name,
271*4882a593Smuzhiyun encoder_crtc->base.id, encoder_crtc->name);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun set_best_encoder(state, new_connector_state, NULL);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc);
276*4882a593Smuzhiyun crtc_state->connectors_changed = true;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun return;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun static int
update_connector_routing(struct drm_atomic_state * state,struct drm_connector * connector,struct drm_connector_state * old_connector_state,struct drm_connector_state * new_connector_state)283*4882a593Smuzhiyun update_connector_routing(struct drm_atomic_state *state,
284*4882a593Smuzhiyun struct drm_connector *connector,
285*4882a593Smuzhiyun struct drm_connector_state *old_connector_state,
286*4882a593Smuzhiyun struct drm_connector_state *new_connector_state)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun const struct drm_connector_helper_funcs *funcs;
289*4882a593Smuzhiyun struct drm_encoder *new_encoder;
290*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
293*4882a593Smuzhiyun connector->base.id,
294*4882a593Smuzhiyun connector->name);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun if (old_connector_state->crtc != new_connector_state->crtc) {
297*4882a593Smuzhiyun if (old_connector_state->crtc) {
298*4882a593Smuzhiyun crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
299*4882a593Smuzhiyun crtc_state->connectors_changed = true;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun if (new_connector_state->crtc) {
303*4882a593Smuzhiyun crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
304*4882a593Smuzhiyun crtc_state->connectors_changed = true;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun if (!new_connector_state->crtc) {
309*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n",
310*4882a593Smuzhiyun connector->base.id,
311*4882a593Smuzhiyun connector->name);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun set_best_encoder(state, new_connector_state, NULL);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun return 0;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun crtc_state = drm_atomic_get_new_crtc_state(state,
319*4882a593Smuzhiyun new_connector_state->crtc);
320*4882a593Smuzhiyun /*
321*4882a593Smuzhiyun * For compatibility with legacy users, we want to make sure that
322*4882a593Smuzhiyun * we allow DPMS On->Off modesets on unregistered connectors. Modesets
323*4882a593Smuzhiyun * which would result in anything else must be considered invalid, to
324*4882a593Smuzhiyun * avoid turning on new displays on dead connectors.
325*4882a593Smuzhiyun *
326*4882a593Smuzhiyun * Since the connector can be unregistered at any point during an
327*4882a593Smuzhiyun * atomic check or commit, this is racy. But that's OK: all we care
328*4882a593Smuzhiyun * about is ensuring that userspace can't do anything but shut off the
329*4882a593Smuzhiyun * display on a connector that was destroyed after it's been notified,
330*4882a593Smuzhiyun * not before.
331*4882a593Smuzhiyun *
332*4882a593Smuzhiyun * Additionally, we also want to ignore connector registration when
333*4882a593Smuzhiyun * we're trying to restore an atomic state during system resume since
334*4882a593Smuzhiyun * there's a chance the connector may have been destroyed during the
335*4882a593Smuzhiyun * process, but it's better to ignore that then cause
336*4882a593Smuzhiyun * drm_atomic_helper_resume() to fail.
337*4882a593Smuzhiyun */
338*4882a593Smuzhiyun if (!state->duplicated && drm_connector_is_unregistered(connector) &&
339*4882a593Smuzhiyun crtc_state->active) {
340*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n",
341*4882a593Smuzhiyun connector->base.id, connector->name);
342*4882a593Smuzhiyun return -EINVAL;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun funcs = connector->helper_private;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (funcs->atomic_best_encoder)
348*4882a593Smuzhiyun new_encoder = funcs->atomic_best_encoder(connector,
349*4882a593Smuzhiyun new_connector_state);
350*4882a593Smuzhiyun else if (funcs->best_encoder)
351*4882a593Smuzhiyun new_encoder = funcs->best_encoder(connector);
352*4882a593Smuzhiyun else
353*4882a593Smuzhiyun new_encoder = drm_connector_get_single_encoder(connector);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun if (!new_encoder) {
356*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
357*4882a593Smuzhiyun connector->base.id,
358*4882a593Smuzhiyun connector->name);
359*4882a593Smuzhiyun return -EINVAL;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
363*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
364*4882a593Smuzhiyun new_encoder->base.id,
365*4882a593Smuzhiyun new_encoder->name,
366*4882a593Smuzhiyun new_connector_state->crtc->base.id,
367*4882a593Smuzhiyun new_connector_state->crtc->name);
368*4882a593Smuzhiyun return -EINVAL;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun if (new_encoder == new_connector_state->best_encoder) {
372*4882a593Smuzhiyun set_best_encoder(state, new_connector_state, new_encoder);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
375*4882a593Smuzhiyun connector->base.id,
376*4882a593Smuzhiyun connector->name,
377*4882a593Smuzhiyun new_encoder->base.id,
378*4882a593Smuzhiyun new_encoder->name,
379*4882a593Smuzhiyun new_connector_state->crtc->base.id,
380*4882a593Smuzhiyun new_connector_state->crtc->name);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun return 0;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun steal_encoder(state, new_encoder);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun set_best_encoder(state, new_connector_state, new_encoder);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun crtc_state->connectors_changed = true;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
392*4882a593Smuzhiyun connector->base.id,
393*4882a593Smuzhiyun connector->name,
394*4882a593Smuzhiyun new_encoder->base.id,
395*4882a593Smuzhiyun new_encoder->name,
396*4882a593Smuzhiyun new_connector_state->crtc->base.id,
397*4882a593Smuzhiyun new_connector_state->crtc->name);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun return 0;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun static int
mode_fixup(struct drm_atomic_state * state)403*4882a593Smuzhiyun mode_fixup(struct drm_atomic_state *state)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun struct drm_crtc *crtc;
406*4882a593Smuzhiyun struct drm_crtc_state *new_crtc_state;
407*4882a593Smuzhiyun struct drm_connector *connector;
408*4882a593Smuzhiyun struct drm_connector_state *new_conn_state;
409*4882a593Smuzhiyun int i;
410*4882a593Smuzhiyun int ret;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
413*4882a593Smuzhiyun if (!new_crtc_state->mode_changed &&
414*4882a593Smuzhiyun !new_crtc_state->connectors_changed)
415*4882a593Smuzhiyun continue;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun for_each_new_connector_in_state(state, connector, new_conn_state, i) {
421*4882a593Smuzhiyun const struct drm_encoder_helper_funcs *funcs;
422*4882a593Smuzhiyun struct drm_encoder *encoder;
423*4882a593Smuzhiyun struct drm_bridge *bridge;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun if (!new_conn_state->crtc || !new_conn_state->best_encoder)
428*4882a593Smuzhiyun continue;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun new_crtc_state =
431*4882a593Smuzhiyun drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /*
434*4882a593Smuzhiyun * Each encoder has at most one connector (since we always steal
435*4882a593Smuzhiyun * it away), so we won't call ->mode_fixup twice.
436*4882a593Smuzhiyun */
437*4882a593Smuzhiyun encoder = new_conn_state->best_encoder;
438*4882a593Smuzhiyun funcs = encoder->helper_private;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun bridge = drm_bridge_chain_get_first_bridge(encoder);
441*4882a593Smuzhiyun ret = drm_atomic_bridge_chain_check(bridge,
442*4882a593Smuzhiyun new_crtc_state,
443*4882a593Smuzhiyun new_conn_state);
444*4882a593Smuzhiyun if (ret) {
445*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("Bridge atomic check failed\n");
446*4882a593Smuzhiyun return ret;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun if (funcs && funcs->atomic_check) {
450*4882a593Smuzhiyun ret = funcs->atomic_check(encoder, new_crtc_state,
451*4882a593Smuzhiyun new_conn_state);
452*4882a593Smuzhiyun if (ret) {
453*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n",
454*4882a593Smuzhiyun encoder->base.id, encoder->name);
455*4882a593Smuzhiyun return ret;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun } else if (funcs && funcs->mode_fixup) {
458*4882a593Smuzhiyun ret = funcs->mode_fixup(encoder, &new_crtc_state->mode,
459*4882a593Smuzhiyun &new_crtc_state->adjusted_mode);
460*4882a593Smuzhiyun if (!ret) {
461*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n",
462*4882a593Smuzhiyun encoder->base.id, encoder->name);
463*4882a593Smuzhiyun return -EINVAL;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
469*4882a593Smuzhiyun const struct drm_crtc_helper_funcs *funcs;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun if (!new_crtc_state->enable)
472*4882a593Smuzhiyun continue;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun if (!new_crtc_state->mode_changed &&
475*4882a593Smuzhiyun !new_crtc_state->connectors_changed)
476*4882a593Smuzhiyun continue;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun funcs = crtc->helper_private;
479*4882a593Smuzhiyun if (!funcs || !funcs->mode_fixup)
480*4882a593Smuzhiyun continue;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
483*4882a593Smuzhiyun &new_crtc_state->adjusted_mode);
484*4882a593Smuzhiyun if (!ret) {
485*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n",
486*4882a593Smuzhiyun crtc->base.id, crtc->name);
487*4882a593Smuzhiyun return -EINVAL;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun return 0;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
mode_valid_path(struct drm_connector * connector,struct drm_encoder * encoder,struct drm_crtc * crtc,const struct drm_display_mode * mode)494*4882a593Smuzhiyun static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
495*4882a593Smuzhiyun struct drm_encoder *encoder,
496*4882a593Smuzhiyun struct drm_crtc *crtc,
497*4882a593Smuzhiyun const struct drm_display_mode *mode)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun struct drm_bridge *bridge;
500*4882a593Smuzhiyun enum drm_mode_status ret;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun ret = drm_encoder_mode_valid(encoder, mode);
503*4882a593Smuzhiyun if (ret != MODE_OK) {
504*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] mode_valid() failed\n",
505*4882a593Smuzhiyun encoder->base.id, encoder->name);
506*4882a593Smuzhiyun return ret;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun bridge = drm_bridge_chain_get_first_bridge(encoder);
510*4882a593Smuzhiyun ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info,
511*4882a593Smuzhiyun mode);
512*4882a593Smuzhiyun if (ret != MODE_OK) {
513*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n");
514*4882a593Smuzhiyun return ret;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun ret = drm_crtc_mode_valid(crtc, mode);
518*4882a593Smuzhiyun if (ret != MODE_OK) {
519*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode_valid() failed\n",
520*4882a593Smuzhiyun crtc->base.id, crtc->name);
521*4882a593Smuzhiyun return ret;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun return ret;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun static int
mode_valid(struct drm_atomic_state * state)528*4882a593Smuzhiyun mode_valid(struct drm_atomic_state *state)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun struct drm_connector_state *conn_state;
531*4882a593Smuzhiyun struct drm_connector *connector;
532*4882a593Smuzhiyun int i;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun for_each_new_connector_in_state(state, connector, conn_state, i) {
535*4882a593Smuzhiyun struct drm_encoder *encoder = conn_state->best_encoder;
536*4882a593Smuzhiyun struct drm_crtc *crtc = conn_state->crtc;
537*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
538*4882a593Smuzhiyun enum drm_mode_status mode_status;
539*4882a593Smuzhiyun const struct drm_display_mode *mode;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun if (!crtc || !encoder)
542*4882a593Smuzhiyun continue;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
545*4882a593Smuzhiyun if (!crtc_state)
546*4882a593Smuzhiyun continue;
547*4882a593Smuzhiyun if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
548*4882a593Smuzhiyun continue;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun mode = &crtc_state->mode;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun mode_status = mode_valid_path(connector, encoder, crtc, mode);
553*4882a593Smuzhiyun if (mode_status != MODE_OK)
554*4882a593Smuzhiyun return -EINVAL;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun return 0;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /**
561*4882a593Smuzhiyun * drm_atomic_helper_check_modeset - validate state object for modeset changes
562*4882a593Smuzhiyun * @dev: DRM device
563*4882a593Smuzhiyun * @state: the driver state object
564*4882a593Smuzhiyun *
565*4882a593Smuzhiyun * Check the state object to see if the requested state is physically possible.
566*4882a593Smuzhiyun * This does all the CRTC and connector related computations for an atomic
567*4882a593Smuzhiyun * update and adds any additional connectors needed for full modesets. It calls
568*4882a593Smuzhiyun * the various per-object callbacks in the follow order:
569*4882a593Smuzhiyun *
570*4882a593Smuzhiyun * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
571*4882a593Smuzhiyun * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
572*4882a593Smuzhiyun * 3. If it's determined a modeset is needed then all connectors on the affected
573*4882a593Smuzhiyun * CRTC are added and &drm_connector_helper_funcs.atomic_check is run on them.
574*4882a593Smuzhiyun * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
575*4882a593Smuzhiyun * &drm_crtc_helper_funcs.mode_valid are called on the affected components.
576*4882a593Smuzhiyun * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
577*4882a593Smuzhiyun * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
578*4882a593Smuzhiyun * This function is only called when the encoder will be part of a configured CRTC,
579*4882a593Smuzhiyun * it must not be used for implementing connector property validation.
580*4882a593Smuzhiyun * If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
581*4882a593Smuzhiyun * instead.
582*4882a593Smuzhiyun * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with CRTC constraints.
583*4882a593Smuzhiyun *
584*4882a593Smuzhiyun * &drm_crtc_state.mode_changed is set when the input mode is changed.
585*4882a593Smuzhiyun * &drm_crtc_state.connectors_changed is set when a connector is added or
586*4882a593Smuzhiyun * removed from the CRTC. &drm_crtc_state.active_changed is set when
587*4882a593Smuzhiyun * &drm_crtc_state.active changes, which is used for DPMS.
588*4882a593Smuzhiyun * &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
589*4882a593Smuzhiyun * See also: drm_atomic_crtc_needs_modeset()
590*4882a593Smuzhiyun *
591*4882a593Smuzhiyun * IMPORTANT:
592*4882a593Smuzhiyun *
593*4882a593Smuzhiyun * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
594*4882a593Smuzhiyun * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
595*4882a593Smuzhiyun * without a full modeset) _must_ call this function afterwards after that
596*4882a593Smuzhiyun * change. It is permitted to call this function multiple times for the same
597*4882a593Smuzhiyun * update, e.g. when the &drm_crtc_helper_funcs.atomic_check functions depend
598*4882a593Smuzhiyun * upon the adjusted dotclock for fifo space allocation and watermark
599*4882a593Smuzhiyun * computation.
600*4882a593Smuzhiyun *
601*4882a593Smuzhiyun * RETURNS:
602*4882a593Smuzhiyun * Zero for success or -errno
603*4882a593Smuzhiyun */
604*4882a593Smuzhiyun int
drm_atomic_helper_check_modeset(struct drm_device * dev,struct drm_atomic_state * state)605*4882a593Smuzhiyun drm_atomic_helper_check_modeset(struct drm_device *dev,
606*4882a593Smuzhiyun struct drm_atomic_state *state)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun struct drm_crtc *crtc;
609*4882a593Smuzhiyun struct drm_crtc_state *old_crtc_state, *new_crtc_state;
610*4882a593Smuzhiyun struct drm_connector *connector;
611*4882a593Smuzhiyun struct drm_connector_state *old_connector_state, *new_connector_state;
612*4882a593Smuzhiyun int i, ret;
613*4882a593Smuzhiyun unsigned connectors_mask = 0;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
616*4882a593Smuzhiyun bool has_connectors =
617*4882a593Smuzhiyun !!new_crtc_state->connector_mask;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
622*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
623*4882a593Smuzhiyun crtc->base.id, crtc->name);
624*4882a593Smuzhiyun new_crtc_state->mode_changed = true;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun if (old_crtc_state->enable != new_crtc_state->enable) {
628*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n",
629*4882a593Smuzhiyun crtc->base.id, crtc->name);
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun /*
632*4882a593Smuzhiyun * For clarity this assignment is done here, but
633*4882a593Smuzhiyun * enable == 0 is only true when there are no
634*4882a593Smuzhiyun * connectors and a NULL mode.
635*4882a593Smuzhiyun *
636*4882a593Smuzhiyun * The other way around is true as well. enable != 0
637*4882a593Smuzhiyun * iff connectors are attached and a mode is set.
638*4882a593Smuzhiyun */
639*4882a593Smuzhiyun new_crtc_state->mode_changed = true;
640*4882a593Smuzhiyun new_crtc_state->connectors_changed = true;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun if (old_crtc_state->active != new_crtc_state->active) {
644*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
645*4882a593Smuzhiyun crtc->base.id, crtc->name);
646*4882a593Smuzhiyun new_crtc_state->active_changed = true;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun if (new_crtc_state->enable != has_connectors) {
650*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
651*4882a593Smuzhiyun crtc->base.id, crtc->name);
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun return -EINVAL;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun if (drm_dev_has_vblank(dev))
657*4882a593Smuzhiyun new_crtc_state->no_vblank = false;
658*4882a593Smuzhiyun else
659*4882a593Smuzhiyun new_crtc_state->no_vblank = true;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun ret = handle_conflicting_encoders(state, false);
663*4882a593Smuzhiyun if (ret)
664*4882a593Smuzhiyun return ret;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
667*4882a593Smuzhiyun const struct drm_connector_helper_funcs *funcs = connector->helper_private;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun /*
672*4882a593Smuzhiyun * This only sets crtc->connectors_changed for routing changes,
673*4882a593Smuzhiyun * drivers must set crtc->connectors_changed themselves when
674*4882a593Smuzhiyun * connector properties need to be updated.
675*4882a593Smuzhiyun */
676*4882a593Smuzhiyun ret = update_connector_routing(state, connector,
677*4882a593Smuzhiyun old_connector_state,
678*4882a593Smuzhiyun new_connector_state);
679*4882a593Smuzhiyun if (ret)
680*4882a593Smuzhiyun return ret;
681*4882a593Smuzhiyun if (old_connector_state->crtc) {
682*4882a593Smuzhiyun new_crtc_state = drm_atomic_get_new_crtc_state(state,
683*4882a593Smuzhiyun old_connector_state->crtc);
684*4882a593Smuzhiyun if (old_connector_state->link_status !=
685*4882a593Smuzhiyun new_connector_state->link_status)
686*4882a593Smuzhiyun new_crtc_state->connectors_changed = true;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun if (old_connector_state->max_requested_bpc !=
689*4882a593Smuzhiyun new_connector_state->max_requested_bpc)
690*4882a593Smuzhiyun new_crtc_state->connectors_changed = true;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun if (funcs->atomic_check)
694*4882a593Smuzhiyun ret = funcs->atomic_check(connector, state);
695*4882a593Smuzhiyun if (ret)
696*4882a593Smuzhiyun return ret;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun connectors_mask |= BIT(i);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun /*
702*4882a593Smuzhiyun * After all the routing has been prepared we need to add in any
703*4882a593Smuzhiyun * connector which is itself unchanged, but whose CRTC changes its
704*4882a593Smuzhiyun * configuration. This must be done before calling mode_fixup in case a
705*4882a593Smuzhiyun * crtc only changed its mode but has the same set of connectors.
706*4882a593Smuzhiyun */
707*4882a593Smuzhiyun for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
708*4882a593Smuzhiyun if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
709*4882a593Smuzhiyun continue;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
712*4882a593Smuzhiyun crtc->base.id, crtc->name,
713*4882a593Smuzhiyun new_crtc_state->enable ? 'y' : 'n',
714*4882a593Smuzhiyun new_crtc_state->active ? 'y' : 'n');
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun ret = drm_atomic_add_affected_connectors(state, crtc);
717*4882a593Smuzhiyun if (ret != 0)
718*4882a593Smuzhiyun return ret;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun ret = drm_atomic_add_affected_planes(state, crtc);
721*4882a593Smuzhiyun if (ret != 0)
722*4882a593Smuzhiyun return ret;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun /*
726*4882a593Smuzhiyun * Iterate over all connectors again, to make sure atomic_check()
727*4882a593Smuzhiyun * has been called on them when a modeset is forced.
728*4882a593Smuzhiyun */
729*4882a593Smuzhiyun for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
730*4882a593Smuzhiyun const struct drm_connector_helper_funcs *funcs = connector->helper_private;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun if (connectors_mask & BIT(i))
733*4882a593Smuzhiyun continue;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun if (funcs->atomic_check)
736*4882a593Smuzhiyun ret = funcs->atomic_check(connector, state);
737*4882a593Smuzhiyun if (ret)
738*4882a593Smuzhiyun return ret;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun /*
742*4882a593Smuzhiyun * Iterate over all connectors again, and add all affected bridges to
743*4882a593Smuzhiyun * the state.
744*4882a593Smuzhiyun */
745*4882a593Smuzhiyun for_each_oldnew_connector_in_state(state, connector,
746*4882a593Smuzhiyun old_connector_state,
747*4882a593Smuzhiyun new_connector_state, i) {
748*4882a593Smuzhiyun struct drm_encoder *encoder;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun encoder = old_connector_state->best_encoder;
751*4882a593Smuzhiyun ret = drm_atomic_add_encoder_bridges(state, encoder);
752*4882a593Smuzhiyun if (ret)
753*4882a593Smuzhiyun return ret;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun encoder = new_connector_state->best_encoder;
756*4882a593Smuzhiyun ret = drm_atomic_add_encoder_bridges(state, encoder);
757*4882a593Smuzhiyun if (ret)
758*4882a593Smuzhiyun return ret;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun ret = mode_valid(state);
762*4882a593Smuzhiyun if (ret)
763*4882a593Smuzhiyun return ret;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun return mode_fixup(state);
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun /**
770*4882a593Smuzhiyun * drm_atomic_helper_check_plane_state() - Check plane state for validity
771*4882a593Smuzhiyun * @plane_state: plane state to check
772*4882a593Smuzhiyun * @crtc_state: CRTC state to check
773*4882a593Smuzhiyun * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
774*4882a593Smuzhiyun * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
775*4882a593Smuzhiyun * @can_position: is it legal to position the plane such that it
776*4882a593Smuzhiyun * doesn't cover the entire CRTC? This will generally
777*4882a593Smuzhiyun * only be false for primary planes.
778*4882a593Smuzhiyun * @can_update_disabled: can the plane be updated while the CRTC
779*4882a593Smuzhiyun * is disabled?
780*4882a593Smuzhiyun *
781*4882a593Smuzhiyun * Checks that a desired plane update is valid, and updates various
782*4882a593Smuzhiyun * bits of derived state (clipped coordinates etc.). Drivers that provide
783*4882a593Smuzhiyun * their own plane handling rather than helper-provided implementations may
784*4882a593Smuzhiyun * still wish to call this function to avoid duplication of error checking
785*4882a593Smuzhiyun * code.
786*4882a593Smuzhiyun *
787*4882a593Smuzhiyun * RETURNS:
788*4882a593Smuzhiyun * Zero if update appears valid, error code on failure
789*4882a593Smuzhiyun */
drm_atomic_helper_check_plane_state(struct drm_plane_state * plane_state,const struct drm_crtc_state * crtc_state,int min_scale,int max_scale,bool can_position,bool can_update_disabled)790*4882a593Smuzhiyun int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
791*4882a593Smuzhiyun const struct drm_crtc_state *crtc_state,
792*4882a593Smuzhiyun int min_scale,
793*4882a593Smuzhiyun int max_scale,
794*4882a593Smuzhiyun bool can_position,
795*4882a593Smuzhiyun bool can_update_disabled)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun struct drm_framebuffer *fb = plane_state->fb;
798*4882a593Smuzhiyun struct drm_rect *src = &plane_state->src;
799*4882a593Smuzhiyun struct drm_rect *dst = &plane_state->dst;
800*4882a593Smuzhiyun unsigned int rotation = plane_state->rotation;
801*4882a593Smuzhiyun struct drm_rect clip = {};
802*4882a593Smuzhiyun int hscale, vscale;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun *src = drm_plane_state_src(plane_state);
807*4882a593Smuzhiyun *dst = drm_plane_state_dest(plane_state);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun if (!fb) {
810*4882a593Smuzhiyun plane_state->visible = false;
811*4882a593Smuzhiyun return 0;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun /* crtc should only be NULL when disabling (i.e., !fb) */
815*4882a593Smuzhiyun if (WARN_ON(!plane_state->crtc)) {
816*4882a593Smuzhiyun plane_state->visible = false;
817*4882a593Smuzhiyun return 0;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun if (!crtc_state->enable && !can_update_disabled) {
821*4882a593Smuzhiyun DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
822*4882a593Smuzhiyun return -EINVAL;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun /* Check scaling */
828*4882a593Smuzhiyun hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
829*4882a593Smuzhiyun vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
830*4882a593Smuzhiyun if (hscale < 0 || vscale < 0) {
831*4882a593Smuzhiyun DRM_DEBUG_KMS("Invalid scaling of plane\n");
832*4882a593Smuzhiyun drm_rect_debug_print("src: ", &plane_state->src, true);
833*4882a593Smuzhiyun drm_rect_debug_print("dst: ", &plane_state->dst, false);
834*4882a593Smuzhiyun return -ERANGE;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun if (crtc_state->enable)
838*4882a593Smuzhiyun drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun if (!plane_state->visible)
845*4882a593Smuzhiyun /*
846*4882a593Smuzhiyun * Plane isn't visible; some drivers can handle this
847*4882a593Smuzhiyun * so we just return success here. Drivers that can't
848*4882a593Smuzhiyun * (including those that use the primary plane helper's
849*4882a593Smuzhiyun * update function) will return an error from their
850*4882a593Smuzhiyun * update_plane handler.
851*4882a593Smuzhiyun */
852*4882a593Smuzhiyun return 0;
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun if (!can_position && !drm_rect_equals(dst, &clip)) {
855*4882a593Smuzhiyun DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
856*4882a593Smuzhiyun drm_rect_debug_print("dst: ", dst, false);
857*4882a593Smuzhiyun drm_rect_debug_print("clip: ", &clip, false);
858*4882a593Smuzhiyun return -EINVAL;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun return 0;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun /**
866*4882a593Smuzhiyun * drm_atomic_helper_check_planes - validate state object for planes changes
867*4882a593Smuzhiyun * @dev: DRM device
868*4882a593Smuzhiyun * @state: the driver state object
869*4882a593Smuzhiyun *
870*4882a593Smuzhiyun * Check the state object to see if the requested state is physically possible.
871*4882a593Smuzhiyun * This does all the plane update related checks using by calling into the
872*4882a593Smuzhiyun * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
873*4882a593Smuzhiyun * hooks provided by the driver.
874*4882a593Smuzhiyun *
875*4882a593Smuzhiyun * It also sets &drm_crtc_state.planes_changed to indicate that a CRTC has
876*4882a593Smuzhiyun * updated planes.
877*4882a593Smuzhiyun *
878*4882a593Smuzhiyun * RETURNS:
879*4882a593Smuzhiyun * Zero for success or -errno
880*4882a593Smuzhiyun */
881*4882a593Smuzhiyun int
drm_atomic_helper_check_planes(struct drm_device * dev,struct drm_atomic_state * state)882*4882a593Smuzhiyun drm_atomic_helper_check_planes(struct drm_device *dev,
883*4882a593Smuzhiyun struct drm_atomic_state *state)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun struct drm_crtc *crtc;
886*4882a593Smuzhiyun struct drm_crtc_state *new_crtc_state;
887*4882a593Smuzhiyun struct drm_plane *plane;
888*4882a593Smuzhiyun struct drm_plane_state *new_plane_state, *old_plane_state;
889*4882a593Smuzhiyun int i, ret = 0;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
892*4882a593Smuzhiyun const struct drm_plane_helper_funcs *funcs;
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun WARN_ON(!drm_modeset_is_locked(&plane->mutex));
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun funcs = plane->helper_private;
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun drm_atomic_helper_check_plane_damage(state, new_plane_state);
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun if (!funcs || !funcs->atomic_check)
903*4882a593Smuzhiyun continue;
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun ret = funcs->atomic_check(plane, new_plane_state);
906*4882a593Smuzhiyun if (ret) {
907*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
908*4882a593Smuzhiyun plane->base.id, plane->name);
909*4882a593Smuzhiyun return ret;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
914*4882a593Smuzhiyun const struct drm_crtc_helper_funcs *funcs;
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun funcs = crtc->helper_private;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun if (!funcs || !funcs->atomic_check)
919*4882a593Smuzhiyun continue;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun ret = funcs->atomic_check(crtc, new_crtc_state);
922*4882a593Smuzhiyun if (ret) {
923*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
924*4882a593Smuzhiyun crtc->base.id, crtc->name);
925*4882a593Smuzhiyun return ret;
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun return ret;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_check_planes);
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun /**
934*4882a593Smuzhiyun * drm_atomic_helper_check - validate state object
935*4882a593Smuzhiyun * @dev: DRM device
936*4882a593Smuzhiyun * @state: the driver state object
937*4882a593Smuzhiyun *
938*4882a593Smuzhiyun * Check the state object to see if the requested state is physically possible.
939*4882a593Smuzhiyun * Only CRTCs and planes have check callbacks, so for any additional (global)
940*4882a593Smuzhiyun * checking that a driver needs it can simply wrap that around this function.
941*4882a593Smuzhiyun * Drivers without such needs can directly use this as their
942*4882a593Smuzhiyun * &drm_mode_config_funcs.atomic_check callback.
943*4882a593Smuzhiyun *
944*4882a593Smuzhiyun * This just wraps the two parts of the state checking for planes and modeset
945*4882a593Smuzhiyun * state in the default order: First it calls drm_atomic_helper_check_modeset()
946*4882a593Smuzhiyun * and then drm_atomic_helper_check_planes(). The assumption is that the
947*4882a593Smuzhiyun * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
948*4882a593Smuzhiyun * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
949*4882a593Smuzhiyun * watermarks.
950*4882a593Smuzhiyun *
951*4882a593Smuzhiyun * Note that zpos normalization will add all enable planes to the state which
952*4882a593Smuzhiyun * might not desired for some drivers.
953*4882a593Smuzhiyun * For example enable/disable of a cursor plane which have fixed zpos value
954*4882a593Smuzhiyun * would trigger all other enabled planes to be forced to the state change.
955*4882a593Smuzhiyun *
956*4882a593Smuzhiyun * RETURNS:
957*4882a593Smuzhiyun * Zero for success or -errno
958*4882a593Smuzhiyun */
drm_atomic_helper_check(struct drm_device * dev,struct drm_atomic_state * state)959*4882a593Smuzhiyun int drm_atomic_helper_check(struct drm_device *dev,
960*4882a593Smuzhiyun struct drm_atomic_state *state)
961*4882a593Smuzhiyun {
962*4882a593Smuzhiyun int ret;
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun ret = drm_atomic_helper_check_modeset(dev, state);
965*4882a593Smuzhiyun if (ret)
966*4882a593Smuzhiyun return ret;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun if (dev->mode_config.normalize_zpos) {
969*4882a593Smuzhiyun ret = drm_atomic_normalize_zpos(dev, state);
970*4882a593Smuzhiyun if (ret)
971*4882a593Smuzhiyun return ret;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun ret = drm_atomic_helper_check_planes(dev, state);
975*4882a593Smuzhiyun if (ret)
976*4882a593Smuzhiyun return ret;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun if (state->legacy_cursor_update)
979*4882a593Smuzhiyun state->async_update = !drm_atomic_helper_async_check(dev, state);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun drm_self_refresh_helper_alter_state(state);
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun return ret;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_check);
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun static bool
crtc_needs_disable(struct drm_crtc_state * old_state,struct drm_crtc_state * new_state)988*4882a593Smuzhiyun crtc_needs_disable(struct drm_crtc_state *old_state,
989*4882a593Smuzhiyun struct drm_crtc_state *new_state)
990*4882a593Smuzhiyun {
991*4882a593Smuzhiyun /*
992*4882a593Smuzhiyun * No new_state means the CRTC is off, so the only criteria is whether
993*4882a593Smuzhiyun * it's currently active or in self refresh mode.
994*4882a593Smuzhiyun */
995*4882a593Smuzhiyun if (!new_state)
996*4882a593Smuzhiyun return drm_atomic_crtc_effectively_active(old_state);
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun /*
999*4882a593Smuzhiyun * We need to disable bridge(s) and CRTC if we're transitioning out of
1000*4882a593Smuzhiyun * self-refresh and changing CRTCs at the same time, because the
1001*4882a593Smuzhiyun * bridge tracks self-refresh status via CRTC state.
1002*4882a593Smuzhiyun */
1003*4882a593Smuzhiyun if (old_state->self_refresh_active &&
1004*4882a593Smuzhiyun old_state->crtc != new_state->crtc)
1005*4882a593Smuzhiyun return true;
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun /*
1008*4882a593Smuzhiyun * We also need to run through the crtc_funcs->disable() function if
1009*4882a593Smuzhiyun * the CRTC is currently on, if it's transitioning to self refresh
1010*4882a593Smuzhiyun * mode, or if it's in self refresh mode and needs to be fully
1011*4882a593Smuzhiyun * disabled.
1012*4882a593Smuzhiyun */
1013*4882a593Smuzhiyun return old_state->active ||
1014*4882a593Smuzhiyun (old_state->self_refresh_active && !new_state->enable) ||
1015*4882a593Smuzhiyun new_state->self_refresh_active;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun static void
disable_outputs(struct drm_device * dev,struct drm_atomic_state * old_state)1019*4882a593Smuzhiyun disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun struct drm_connector *connector;
1022*4882a593Smuzhiyun struct drm_connector_state *old_conn_state, *new_conn_state;
1023*4882a593Smuzhiyun struct drm_crtc *crtc;
1024*4882a593Smuzhiyun struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1025*4882a593Smuzhiyun int i;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
1028*4882a593Smuzhiyun const struct drm_encoder_helper_funcs *funcs;
1029*4882a593Smuzhiyun struct drm_encoder *encoder;
1030*4882a593Smuzhiyun struct drm_bridge *bridge;
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun /* Shut down everything that's in the changeset and currently
1033*4882a593Smuzhiyun * still on. So need to check the old, saved state. */
1034*4882a593Smuzhiyun if (!old_conn_state->crtc)
1035*4882a593Smuzhiyun continue;
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc);
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun if (new_conn_state->crtc)
1040*4882a593Smuzhiyun new_crtc_state = drm_atomic_get_new_crtc_state(
1041*4882a593Smuzhiyun old_state,
1042*4882a593Smuzhiyun new_conn_state->crtc);
1043*4882a593Smuzhiyun else
1044*4882a593Smuzhiyun new_crtc_state = NULL;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
1047*4882a593Smuzhiyun !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
1048*4882a593Smuzhiyun continue;
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun encoder = old_conn_state->best_encoder;
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun /* We shouldn't get this far if we didn't previously have
1053*4882a593Smuzhiyun * an encoder.. but WARN_ON() rather than explode.
1054*4882a593Smuzhiyun */
1055*4882a593Smuzhiyun if (WARN_ON(!encoder))
1056*4882a593Smuzhiyun continue;
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun funcs = encoder->helper_private;
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
1061*4882a593Smuzhiyun encoder->base.id, encoder->name);
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun /*
1064*4882a593Smuzhiyun * Each encoder has at most one connector (since we always steal
1065*4882a593Smuzhiyun * it away), so we won't call disable hooks twice.
1066*4882a593Smuzhiyun */
1067*4882a593Smuzhiyun bridge = drm_bridge_chain_get_first_bridge(encoder);
1068*4882a593Smuzhiyun drm_atomic_bridge_chain_disable(bridge, old_state);
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun /* Right function depends upon target state. */
1071*4882a593Smuzhiyun if (funcs) {
1072*4882a593Smuzhiyun if (funcs->atomic_disable)
1073*4882a593Smuzhiyun funcs->atomic_disable(encoder, old_state);
1074*4882a593Smuzhiyun else if (new_conn_state->crtc && funcs->prepare)
1075*4882a593Smuzhiyun funcs->prepare(encoder);
1076*4882a593Smuzhiyun else if (funcs->disable)
1077*4882a593Smuzhiyun funcs->disable(encoder);
1078*4882a593Smuzhiyun else if (funcs->dpms)
1079*4882a593Smuzhiyun funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun drm_atomic_bridge_chain_post_disable(bridge, old_state);
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1086*4882a593Smuzhiyun const struct drm_crtc_helper_funcs *funcs;
1087*4882a593Smuzhiyun int ret;
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun /* Shut down everything that needs a full modeset. */
1090*4882a593Smuzhiyun if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1091*4882a593Smuzhiyun continue;
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun if (!crtc_needs_disable(old_crtc_state, new_crtc_state))
1094*4882a593Smuzhiyun continue;
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun funcs = crtc->helper_private;
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n",
1099*4882a593Smuzhiyun crtc->base.id, crtc->name);
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun /* Right function depends upon target state. */
1103*4882a593Smuzhiyun if (new_crtc_state->enable && funcs->prepare)
1104*4882a593Smuzhiyun funcs->prepare(crtc);
1105*4882a593Smuzhiyun else if (funcs->atomic_disable)
1106*4882a593Smuzhiyun funcs->atomic_disable(crtc, old_crtc_state);
1107*4882a593Smuzhiyun else if (funcs->disable)
1108*4882a593Smuzhiyun funcs->disable(crtc);
1109*4882a593Smuzhiyun else if (funcs->dpms)
1110*4882a593Smuzhiyun funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun if (!drm_dev_has_vblank(dev))
1113*4882a593Smuzhiyun continue;
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun ret = drm_crtc_vblank_get(crtc);
1116*4882a593Smuzhiyun WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
1117*4882a593Smuzhiyun if (ret == 0)
1118*4882a593Smuzhiyun drm_crtc_vblank_put(crtc);
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun /**
1123*4882a593Smuzhiyun * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
1124*4882a593Smuzhiyun * @dev: DRM device
1125*4882a593Smuzhiyun * @old_state: atomic state object with old state structures
1126*4882a593Smuzhiyun *
1127*4882a593Smuzhiyun * This function updates all the various legacy modeset state pointers in
1128*4882a593Smuzhiyun * connectors, encoders and CRTCs.
1129*4882a593Smuzhiyun *
1130*4882a593Smuzhiyun * Drivers can use this for building their own atomic commit if they don't have
1131*4882a593Smuzhiyun * a pure helper-based modeset implementation.
1132*4882a593Smuzhiyun *
1133*4882a593Smuzhiyun * Since these updates are not synchronized with lockings, only code paths
1134*4882a593Smuzhiyun * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
1135*4882a593Smuzhiyun * legacy state filled out by this helper. Defacto this means this helper and
1136*4882a593Smuzhiyun * the legacy state pointers are only really useful for transitioning an
1137*4882a593Smuzhiyun * existing driver to the atomic world.
1138*4882a593Smuzhiyun */
1139*4882a593Smuzhiyun void
drm_atomic_helper_update_legacy_modeset_state(struct drm_device * dev,struct drm_atomic_state * old_state)1140*4882a593Smuzhiyun drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
1141*4882a593Smuzhiyun struct drm_atomic_state *old_state)
1142*4882a593Smuzhiyun {
1143*4882a593Smuzhiyun struct drm_connector *connector;
1144*4882a593Smuzhiyun struct drm_connector_state *old_conn_state, *new_conn_state;
1145*4882a593Smuzhiyun struct drm_crtc *crtc;
1146*4882a593Smuzhiyun struct drm_crtc_state *new_crtc_state;
1147*4882a593Smuzhiyun int i;
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun /* clear out existing links and update dpms */
1150*4882a593Smuzhiyun for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
1151*4882a593Smuzhiyun if (connector->encoder) {
1152*4882a593Smuzhiyun WARN_ON(!connector->encoder->crtc);
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun connector->encoder->crtc = NULL;
1155*4882a593Smuzhiyun connector->encoder = NULL;
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun crtc = new_conn_state->crtc;
1159*4882a593Smuzhiyun if ((!crtc && old_conn_state->crtc) ||
1160*4882a593Smuzhiyun (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
1161*4882a593Smuzhiyun int mode = DRM_MODE_DPMS_OFF;
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun if (crtc && crtc->state->active)
1164*4882a593Smuzhiyun mode = DRM_MODE_DPMS_ON;
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun connector->dpms = mode;
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun /* set new links */
1171*4882a593Smuzhiyun for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1172*4882a593Smuzhiyun if (!new_conn_state->crtc)
1173*4882a593Smuzhiyun continue;
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun if (WARN_ON(!new_conn_state->best_encoder))
1176*4882a593Smuzhiyun continue;
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun connector->encoder = new_conn_state->best_encoder;
1179*4882a593Smuzhiyun connector->encoder->crtc = new_conn_state->crtc;
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun /* set legacy state in the crtc structure */
1183*4882a593Smuzhiyun for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
1184*4882a593Smuzhiyun struct drm_plane *primary = crtc->primary;
1185*4882a593Smuzhiyun struct drm_plane_state *new_plane_state;
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun crtc->mode = new_crtc_state->mode;
1188*4882a593Smuzhiyun crtc->enabled = new_crtc_state->enable;
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun new_plane_state =
1191*4882a593Smuzhiyun drm_atomic_get_new_plane_state(old_state, primary);
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun if (new_plane_state && new_plane_state->crtc == crtc) {
1194*4882a593Smuzhiyun crtc->x = new_plane_state->src_x >> 16;
1195*4882a593Smuzhiyun crtc->y = new_plane_state->src_y >> 16;
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun /**
1202*4882a593Smuzhiyun * drm_atomic_helper_calc_timestamping_constants - update vblank timestamping constants
1203*4882a593Smuzhiyun * @state: atomic state object
1204*4882a593Smuzhiyun *
1205*4882a593Smuzhiyun * Updates the timestamping constants used for precise vblank timestamps
1206*4882a593Smuzhiyun * by calling drm_calc_timestamping_constants() for all enabled crtcs in @state.
1207*4882a593Smuzhiyun */
drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state * state)1208*4882a593Smuzhiyun void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state)
1209*4882a593Smuzhiyun {
1210*4882a593Smuzhiyun struct drm_crtc_state *new_crtc_state;
1211*4882a593Smuzhiyun struct drm_crtc *crtc;
1212*4882a593Smuzhiyun int i;
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1215*4882a593Smuzhiyun if (new_crtc_state->enable)
1216*4882a593Smuzhiyun drm_calc_timestamping_constants(crtc,
1217*4882a593Smuzhiyun &new_crtc_state->adjusted_mode);
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun static void
crtc_set_mode(struct drm_device * dev,struct drm_atomic_state * old_state)1223*4882a593Smuzhiyun crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
1224*4882a593Smuzhiyun {
1225*4882a593Smuzhiyun struct drm_crtc *crtc;
1226*4882a593Smuzhiyun struct drm_crtc_state *new_crtc_state;
1227*4882a593Smuzhiyun struct drm_connector *connector;
1228*4882a593Smuzhiyun struct drm_connector_state *new_conn_state;
1229*4882a593Smuzhiyun int i;
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
1232*4882a593Smuzhiyun const struct drm_crtc_helper_funcs *funcs;
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun if (!new_crtc_state->mode_changed)
1235*4882a593Smuzhiyun continue;
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun funcs = crtc->helper_private;
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun if (new_crtc_state->enable && funcs->mode_set_nofb) {
1240*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n",
1241*4882a593Smuzhiyun crtc->base.id, crtc->name);
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun funcs->mode_set_nofb(crtc);
1244*4882a593Smuzhiyun }
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1248*4882a593Smuzhiyun const struct drm_encoder_helper_funcs *funcs;
1249*4882a593Smuzhiyun struct drm_encoder *encoder;
1250*4882a593Smuzhiyun struct drm_display_mode *mode, *adjusted_mode;
1251*4882a593Smuzhiyun struct drm_bridge *bridge;
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun if (!new_conn_state->best_encoder)
1254*4882a593Smuzhiyun continue;
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun encoder = new_conn_state->best_encoder;
1257*4882a593Smuzhiyun funcs = encoder->helper_private;
1258*4882a593Smuzhiyun new_crtc_state = new_conn_state->crtc->state;
1259*4882a593Smuzhiyun mode = &new_crtc_state->mode;
1260*4882a593Smuzhiyun adjusted_mode = &new_crtc_state->adjusted_mode;
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun if (!new_crtc_state->mode_changed)
1263*4882a593Smuzhiyun continue;
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
1266*4882a593Smuzhiyun encoder->base.id, encoder->name);
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun /*
1269*4882a593Smuzhiyun * Each encoder has at most one connector (since we always steal
1270*4882a593Smuzhiyun * it away), so we won't call mode_set hooks twice.
1271*4882a593Smuzhiyun */
1272*4882a593Smuzhiyun if (funcs && funcs->atomic_mode_set) {
1273*4882a593Smuzhiyun funcs->atomic_mode_set(encoder, new_crtc_state,
1274*4882a593Smuzhiyun new_conn_state);
1275*4882a593Smuzhiyun } else if (funcs && funcs->mode_set) {
1276*4882a593Smuzhiyun funcs->mode_set(encoder, mode, adjusted_mode);
1277*4882a593Smuzhiyun }
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun bridge = drm_bridge_chain_get_first_bridge(encoder);
1280*4882a593Smuzhiyun drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun /**
1285*4882a593Smuzhiyun * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
1286*4882a593Smuzhiyun * @dev: DRM device
1287*4882a593Smuzhiyun * @old_state: atomic state object with old state structures
1288*4882a593Smuzhiyun *
1289*4882a593Smuzhiyun * This function shuts down all the outputs that need to be shut down and
1290*4882a593Smuzhiyun * prepares them (if required) with the new mode.
1291*4882a593Smuzhiyun *
1292*4882a593Smuzhiyun * For compatibility with legacy CRTC helpers this should be called before
1293*4882a593Smuzhiyun * drm_atomic_helper_commit_planes(), which is what the default commit function
1294*4882a593Smuzhiyun * does. But drivers with different needs can group the modeset commits together
1295*4882a593Smuzhiyun * and do the plane commits at the end. This is useful for drivers doing runtime
1296*4882a593Smuzhiyun * PM since planes updates then only happen when the CRTC is actually enabled.
1297*4882a593Smuzhiyun */
drm_atomic_helper_commit_modeset_disables(struct drm_device * dev,struct drm_atomic_state * old_state)1298*4882a593Smuzhiyun void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
1299*4882a593Smuzhiyun struct drm_atomic_state *old_state)
1300*4882a593Smuzhiyun {
1301*4882a593Smuzhiyun disable_outputs(dev, old_state);
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
1304*4882a593Smuzhiyun drm_atomic_helper_calc_timestamping_constants(old_state);
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun crtc_set_mode(dev, old_state);
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
1309*4882a593Smuzhiyun
drm_atomic_helper_commit_writebacks(struct drm_device * dev,struct drm_atomic_state * old_state)1310*4882a593Smuzhiyun static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
1311*4882a593Smuzhiyun struct drm_atomic_state *old_state)
1312*4882a593Smuzhiyun {
1313*4882a593Smuzhiyun struct drm_connector *connector;
1314*4882a593Smuzhiyun struct drm_connector_state *new_conn_state;
1315*4882a593Smuzhiyun int i;
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1318*4882a593Smuzhiyun const struct drm_connector_helper_funcs *funcs;
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun funcs = connector->helper_private;
1321*4882a593Smuzhiyun if (!funcs->atomic_commit)
1322*4882a593Smuzhiyun continue;
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
1325*4882a593Smuzhiyun WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
1326*4882a593Smuzhiyun funcs->atomic_commit(connector, new_conn_state);
1327*4882a593Smuzhiyun }
1328*4882a593Smuzhiyun }
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun /**
1332*4882a593Smuzhiyun * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
1333*4882a593Smuzhiyun * @dev: DRM device
1334*4882a593Smuzhiyun * @old_state: atomic state object with old state structures
1335*4882a593Smuzhiyun *
1336*4882a593Smuzhiyun * This function enables all the outputs with the new configuration which had to
1337*4882a593Smuzhiyun * be turned off for the update.
1338*4882a593Smuzhiyun *
1339*4882a593Smuzhiyun * For compatibility with legacy CRTC helpers this should be called after
1340*4882a593Smuzhiyun * drm_atomic_helper_commit_planes(), which is what the default commit function
1341*4882a593Smuzhiyun * does. But drivers with different needs can group the modeset commits together
1342*4882a593Smuzhiyun * and do the plane commits at the end. This is useful for drivers doing runtime
1343*4882a593Smuzhiyun * PM since planes updates then only happen when the CRTC is actually enabled.
1344*4882a593Smuzhiyun */
drm_atomic_helper_commit_modeset_enables(struct drm_device * dev,struct drm_atomic_state * old_state)1345*4882a593Smuzhiyun void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
1346*4882a593Smuzhiyun struct drm_atomic_state *old_state)
1347*4882a593Smuzhiyun {
1348*4882a593Smuzhiyun struct drm_crtc *crtc;
1349*4882a593Smuzhiyun struct drm_crtc_state *old_crtc_state;
1350*4882a593Smuzhiyun struct drm_crtc_state *new_crtc_state;
1351*4882a593Smuzhiyun struct drm_connector *connector;
1352*4882a593Smuzhiyun struct drm_connector_state *new_conn_state;
1353*4882a593Smuzhiyun int i;
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1356*4882a593Smuzhiyun const struct drm_crtc_helper_funcs *funcs;
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun /* Need to filter out CRTCs where only planes change. */
1359*4882a593Smuzhiyun if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1360*4882a593Smuzhiyun continue;
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun if (!new_crtc_state->active)
1363*4882a593Smuzhiyun continue;
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun funcs = crtc->helper_private;
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun if (new_crtc_state->enable) {
1368*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
1369*4882a593Smuzhiyun crtc->base.id, crtc->name);
1370*4882a593Smuzhiyun if (funcs->atomic_enable)
1371*4882a593Smuzhiyun funcs->atomic_enable(crtc, old_crtc_state);
1372*4882a593Smuzhiyun else if (funcs->commit)
1373*4882a593Smuzhiyun funcs->commit(crtc);
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun }
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1378*4882a593Smuzhiyun const struct drm_encoder_helper_funcs *funcs;
1379*4882a593Smuzhiyun struct drm_encoder *encoder;
1380*4882a593Smuzhiyun struct drm_bridge *bridge;
1381*4882a593Smuzhiyun
1382*4882a593Smuzhiyun if (!new_conn_state->best_encoder)
1383*4882a593Smuzhiyun continue;
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun if (!new_conn_state->crtc->state->active ||
1386*4882a593Smuzhiyun !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
1387*4882a593Smuzhiyun continue;
1388*4882a593Smuzhiyun
1389*4882a593Smuzhiyun encoder = new_conn_state->best_encoder;
1390*4882a593Smuzhiyun funcs = encoder->helper_private;
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
1393*4882a593Smuzhiyun encoder->base.id, encoder->name);
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun /*
1396*4882a593Smuzhiyun * Each encoder has at most one connector (since we always steal
1397*4882a593Smuzhiyun * it away), so we won't call enable hooks twice.
1398*4882a593Smuzhiyun */
1399*4882a593Smuzhiyun bridge = drm_bridge_chain_get_first_bridge(encoder);
1400*4882a593Smuzhiyun drm_atomic_bridge_chain_pre_enable(bridge, old_state);
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun if (funcs) {
1403*4882a593Smuzhiyun if (funcs->atomic_enable)
1404*4882a593Smuzhiyun funcs->atomic_enable(encoder, old_state);
1405*4882a593Smuzhiyun else if (funcs->enable)
1406*4882a593Smuzhiyun funcs->enable(encoder);
1407*4882a593Smuzhiyun else if (funcs->commit)
1408*4882a593Smuzhiyun funcs->commit(encoder);
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun drm_atomic_bridge_chain_enable(bridge, old_state);
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun drm_atomic_helper_commit_writebacks(dev, old_state);
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun /**
1419*4882a593Smuzhiyun * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
1420*4882a593Smuzhiyun * @dev: DRM device
1421*4882a593Smuzhiyun * @state: atomic state object with old state structures
1422*4882a593Smuzhiyun * @pre_swap: If true, do an interruptible wait, and @state is the new state.
1423*4882a593Smuzhiyun * Otherwise @state is the old state.
1424*4882a593Smuzhiyun *
1425*4882a593Smuzhiyun * For implicit sync, driver should fish the exclusive fence out from the
1426*4882a593Smuzhiyun * incoming fb's and stash it in the drm_plane_state. This is called after
1427*4882a593Smuzhiyun * drm_atomic_helper_swap_state() so it uses the current plane state (and
1428*4882a593Smuzhiyun * just uses the atomic state to find the changed planes)
1429*4882a593Smuzhiyun *
1430*4882a593Smuzhiyun * Note that @pre_swap is needed since the point where we block for fences moves
1431*4882a593Smuzhiyun * around depending upon whether an atomic commit is blocking or
1432*4882a593Smuzhiyun * non-blocking. For non-blocking commit all waiting needs to happen after
1433*4882a593Smuzhiyun * drm_atomic_helper_swap_state() is called, but for blocking commits we want
1434*4882a593Smuzhiyun * to wait **before** we do anything that can't be easily rolled back. That is
1435*4882a593Smuzhiyun * before we call drm_atomic_helper_swap_state().
1436*4882a593Smuzhiyun *
1437*4882a593Smuzhiyun * Returns zero if success or < 0 if dma_fence_wait() fails.
1438*4882a593Smuzhiyun */
drm_atomic_helper_wait_for_fences(struct drm_device * dev,struct drm_atomic_state * state,bool pre_swap)1439*4882a593Smuzhiyun int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
1440*4882a593Smuzhiyun struct drm_atomic_state *state,
1441*4882a593Smuzhiyun bool pre_swap)
1442*4882a593Smuzhiyun {
1443*4882a593Smuzhiyun struct drm_plane *plane;
1444*4882a593Smuzhiyun struct drm_plane_state *new_plane_state;
1445*4882a593Smuzhiyun int i, ret;
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1448*4882a593Smuzhiyun if (!new_plane_state->fence)
1449*4882a593Smuzhiyun continue;
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun WARN_ON(!new_plane_state->fb);
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun /*
1454*4882a593Smuzhiyun * If waiting for fences pre-swap (ie: nonblock), userspace can
1455*4882a593Smuzhiyun * still interrupt the operation. Instead of blocking until the
1456*4882a593Smuzhiyun * timer expires, make the wait interruptible.
1457*4882a593Smuzhiyun */
1458*4882a593Smuzhiyun ret = dma_fence_wait(new_plane_state->fence, pre_swap);
1459*4882a593Smuzhiyun if (ret)
1460*4882a593Smuzhiyun return ret;
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun dma_fence_put(new_plane_state->fence);
1463*4882a593Smuzhiyun new_plane_state->fence = NULL;
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun return 0;
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun /**
1471*4882a593Smuzhiyun * drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
1472*4882a593Smuzhiyun * @dev: DRM device
1473*4882a593Smuzhiyun * @old_state: atomic state object with old state structures
1474*4882a593Smuzhiyun *
1475*4882a593Smuzhiyun * Helper to, after atomic commit, wait for vblanks on all affected
1476*4882a593Smuzhiyun * CRTCs (ie. before cleaning up old framebuffers using
1477*4882a593Smuzhiyun * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
1478*4882a593Smuzhiyun * framebuffers have actually changed to optimize for the legacy cursor and
1479*4882a593Smuzhiyun * plane update use-case.
1480*4882a593Smuzhiyun *
1481*4882a593Smuzhiyun * Drivers using the nonblocking commit tracking support initialized by calling
1482*4882a593Smuzhiyun * drm_atomic_helper_setup_commit() should look at
1483*4882a593Smuzhiyun * drm_atomic_helper_wait_for_flip_done() as an alternative.
1484*4882a593Smuzhiyun */
1485*4882a593Smuzhiyun void
drm_atomic_helper_wait_for_vblanks(struct drm_device * dev,struct drm_atomic_state * old_state)1486*4882a593Smuzhiyun drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
1487*4882a593Smuzhiyun struct drm_atomic_state *old_state)
1488*4882a593Smuzhiyun {
1489*4882a593Smuzhiyun struct drm_crtc *crtc;
1490*4882a593Smuzhiyun struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1491*4882a593Smuzhiyun int i, ret;
1492*4882a593Smuzhiyun unsigned crtc_mask = 0;
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun /*
1495*4882a593Smuzhiyun * Legacy cursor ioctls are completely unsynced, and userspace
1496*4882a593Smuzhiyun * relies on that (by doing tons of cursor updates).
1497*4882a593Smuzhiyun */
1498*4882a593Smuzhiyun if (old_state->legacy_cursor_update)
1499*4882a593Smuzhiyun return;
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1502*4882a593Smuzhiyun if (!new_crtc_state->active)
1503*4882a593Smuzhiyun continue;
1504*4882a593Smuzhiyun
1505*4882a593Smuzhiyun ret = drm_crtc_vblank_get(crtc);
1506*4882a593Smuzhiyun if (ret != 0)
1507*4882a593Smuzhiyun continue;
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun crtc_mask |= drm_crtc_mask(crtc);
1510*4882a593Smuzhiyun old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
1511*4882a593Smuzhiyun }
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1514*4882a593Smuzhiyun if (!(crtc_mask & drm_crtc_mask(crtc)))
1515*4882a593Smuzhiyun continue;
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun ret = wait_event_timeout(dev->vblank[i].queue,
1518*4882a593Smuzhiyun old_state->crtcs[i].last_vblank_count !=
1519*4882a593Smuzhiyun drm_crtc_vblank_count(crtc),
1520*4882a593Smuzhiyun msecs_to_jiffies(100));
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
1523*4882a593Smuzhiyun crtc->base.id, crtc->name);
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun drm_crtc_vblank_put(crtc);
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun /**
1531*4882a593Smuzhiyun * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
1532*4882a593Smuzhiyun * @dev: DRM device
1533*4882a593Smuzhiyun * @old_state: atomic state object with old state structures
1534*4882a593Smuzhiyun *
1535*4882a593Smuzhiyun * Helper to, after atomic commit, wait for page flips on all affected
1536*4882a593Smuzhiyun * crtcs (ie. before cleaning up old framebuffers using
1537*4882a593Smuzhiyun * drm_atomic_helper_cleanup_planes()). Compared to
1538*4882a593Smuzhiyun * drm_atomic_helper_wait_for_vblanks() this waits for the completion on all
1539*4882a593Smuzhiyun * CRTCs, assuming that cursors-only updates are signalling their completion
1540*4882a593Smuzhiyun * immediately (or using a different path).
1541*4882a593Smuzhiyun *
1542*4882a593Smuzhiyun * This requires that drivers use the nonblocking commit tracking support
1543*4882a593Smuzhiyun * initialized using drm_atomic_helper_setup_commit().
1544*4882a593Smuzhiyun */
drm_atomic_helper_wait_for_flip_done(struct drm_device * dev,struct drm_atomic_state * old_state)1545*4882a593Smuzhiyun void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
1546*4882a593Smuzhiyun struct drm_atomic_state *old_state)
1547*4882a593Smuzhiyun {
1548*4882a593Smuzhiyun struct drm_crtc *crtc;
1549*4882a593Smuzhiyun int i;
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun for (i = 0; i < dev->mode_config.num_crtc; i++) {
1552*4882a593Smuzhiyun struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
1553*4882a593Smuzhiyun int ret;
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun crtc = old_state->crtcs[i].ptr;
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun if (!crtc || !commit)
1558*4882a593Smuzhiyun continue;
1559*4882a593Smuzhiyun
1560*4882a593Smuzhiyun ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
1561*4882a593Smuzhiyun if (ret == 0)
1562*4882a593Smuzhiyun DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
1563*4882a593Smuzhiyun crtc->base.id, crtc->name);
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun if (old_state->fake_commit)
1567*4882a593Smuzhiyun complete_all(&old_state->fake_commit->flip_done);
1568*4882a593Smuzhiyun }
1569*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
1570*4882a593Smuzhiyun
1571*4882a593Smuzhiyun /**
1572*4882a593Smuzhiyun * drm_atomic_helper_commit_tail - commit atomic update to hardware
1573*4882a593Smuzhiyun * @old_state: atomic state object with old state structures
1574*4882a593Smuzhiyun *
1575*4882a593Smuzhiyun * This is the default implementation for the
1576*4882a593Smuzhiyun * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1577*4882a593Smuzhiyun * that do not support runtime_pm or do not need the CRTC to be
1578*4882a593Smuzhiyun * enabled to perform a commit. Otherwise, see
1579*4882a593Smuzhiyun * drm_atomic_helper_commit_tail_rpm().
1580*4882a593Smuzhiyun *
1581*4882a593Smuzhiyun * Note that the default ordering of how the various stages are called is to
1582*4882a593Smuzhiyun * match the legacy modeset helper library closest.
1583*4882a593Smuzhiyun */
drm_atomic_helper_commit_tail(struct drm_atomic_state * old_state)1584*4882a593Smuzhiyun void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
1585*4882a593Smuzhiyun {
1586*4882a593Smuzhiyun struct drm_device *dev = old_state->dev;
1587*4882a593Smuzhiyun
1588*4882a593Smuzhiyun drm_atomic_helper_commit_modeset_disables(dev, old_state);
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun drm_atomic_helper_commit_planes(dev, old_state, 0);
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun drm_atomic_helper_commit_modeset_enables(dev, old_state);
1593*4882a593Smuzhiyun
1594*4882a593Smuzhiyun drm_atomic_helper_fake_vblank(old_state);
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun drm_atomic_helper_commit_hw_done(old_state);
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun drm_atomic_helper_wait_for_vblanks(dev, old_state);
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun drm_atomic_helper_cleanup_planes(dev, old_state);
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun /**
1605*4882a593Smuzhiyun * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
1606*4882a593Smuzhiyun * @old_state: new modeset state to be committed
1607*4882a593Smuzhiyun *
1608*4882a593Smuzhiyun * This is an alternative implementation for the
1609*4882a593Smuzhiyun * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1610*4882a593Smuzhiyun * that support runtime_pm or need the CRTC to be enabled to perform a
1611*4882a593Smuzhiyun * commit. Otherwise, one should use the default implementation
1612*4882a593Smuzhiyun * drm_atomic_helper_commit_tail().
1613*4882a593Smuzhiyun */
drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state * old_state)1614*4882a593Smuzhiyun void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
1615*4882a593Smuzhiyun {
1616*4882a593Smuzhiyun struct drm_device *dev = old_state->dev;
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun drm_atomic_helper_commit_modeset_disables(dev, old_state);
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun drm_atomic_helper_commit_modeset_enables(dev, old_state);
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun drm_atomic_helper_commit_planes(dev, old_state,
1623*4882a593Smuzhiyun DRM_PLANE_COMMIT_ACTIVE_ONLY);
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun drm_atomic_helper_fake_vblank(old_state);
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun drm_atomic_helper_commit_hw_done(old_state);
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun drm_atomic_helper_wait_for_vblanks(dev, old_state);
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun drm_atomic_helper_cleanup_planes(dev, old_state);
1632*4882a593Smuzhiyun }
1633*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
1634*4882a593Smuzhiyun
commit_tail(struct drm_atomic_state * old_state)1635*4882a593Smuzhiyun static void commit_tail(struct drm_atomic_state *old_state)
1636*4882a593Smuzhiyun {
1637*4882a593Smuzhiyun struct drm_device *dev = old_state->dev;
1638*4882a593Smuzhiyun const struct drm_mode_config_helper_funcs *funcs;
1639*4882a593Smuzhiyun struct drm_crtc_state *new_crtc_state;
1640*4882a593Smuzhiyun struct drm_crtc *crtc;
1641*4882a593Smuzhiyun ktime_t start;
1642*4882a593Smuzhiyun s64 commit_time_ms;
1643*4882a593Smuzhiyun unsigned int i, new_self_refresh_mask = 0;
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun funcs = dev->mode_config.helper_private;
1646*4882a593Smuzhiyun
1647*4882a593Smuzhiyun /*
1648*4882a593Smuzhiyun * We're measuring the _entire_ commit, so the time will vary depending
1649*4882a593Smuzhiyun * on how many fences and objects are involved. For the purposes of self
1650*4882a593Smuzhiyun * refresh, this is desirable since it'll give us an idea of how
1651*4882a593Smuzhiyun * congested things are. This will inform our decision on how often we
1652*4882a593Smuzhiyun * should enter self refresh after idle.
1653*4882a593Smuzhiyun *
1654*4882a593Smuzhiyun * These times will be averaged out in the self refresh helpers to avoid
1655*4882a593Smuzhiyun * overreacting over one outlier frame
1656*4882a593Smuzhiyun */
1657*4882a593Smuzhiyun start = ktime_get();
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun drm_atomic_helper_wait_for_fences(dev, old_state, false);
1660*4882a593Smuzhiyun
1661*4882a593Smuzhiyun drm_atomic_helper_wait_for_dependencies(old_state);
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun /*
1664*4882a593Smuzhiyun * We cannot safely access new_crtc_state after
1665*4882a593Smuzhiyun * drm_atomic_helper_commit_hw_done() so figure out which crtc's have
1666*4882a593Smuzhiyun * self-refresh active beforehand:
1667*4882a593Smuzhiyun */
1668*4882a593Smuzhiyun for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
1669*4882a593Smuzhiyun if (new_crtc_state->self_refresh_active)
1670*4882a593Smuzhiyun new_self_refresh_mask |= BIT(i);
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun if (funcs && funcs->atomic_commit_tail)
1673*4882a593Smuzhiyun funcs->atomic_commit_tail(old_state);
1674*4882a593Smuzhiyun else
1675*4882a593Smuzhiyun drm_atomic_helper_commit_tail(old_state);
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun commit_time_ms = ktime_ms_delta(ktime_get(), start);
1678*4882a593Smuzhiyun if (commit_time_ms > 0)
1679*4882a593Smuzhiyun drm_self_refresh_helper_update_avg_times(old_state,
1680*4882a593Smuzhiyun (unsigned long)commit_time_ms,
1681*4882a593Smuzhiyun new_self_refresh_mask);
1682*4882a593Smuzhiyun
1683*4882a593Smuzhiyun drm_atomic_helper_commit_cleanup_done(old_state);
1684*4882a593Smuzhiyun
1685*4882a593Smuzhiyun drm_atomic_state_put(old_state);
1686*4882a593Smuzhiyun }
1687*4882a593Smuzhiyun
commit_work(struct work_struct * work)1688*4882a593Smuzhiyun static void commit_work(struct work_struct *work)
1689*4882a593Smuzhiyun {
1690*4882a593Smuzhiyun struct drm_atomic_state *state = container_of(work,
1691*4882a593Smuzhiyun struct drm_atomic_state,
1692*4882a593Smuzhiyun commit_work);
1693*4882a593Smuzhiyun commit_tail(state);
1694*4882a593Smuzhiyun }
1695*4882a593Smuzhiyun
1696*4882a593Smuzhiyun /**
1697*4882a593Smuzhiyun * drm_atomic_helper_async_check - check if state can be commited asynchronously
1698*4882a593Smuzhiyun * @dev: DRM device
1699*4882a593Smuzhiyun * @state: the driver state object
1700*4882a593Smuzhiyun *
1701*4882a593Smuzhiyun * This helper will check if it is possible to commit the state asynchronously.
1702*4882a593Smuzhiyun * Async commits are not supposed to swap the states like normal sync commits
1703*4882a593Smuzhiyun * but just do in-place changes on the current state.
1704*4882a593Smuzhiyun *
1705*4882a593Smuzhiyun * It will return 0 if the commit can happen in an asynchronous fashion or error
1706*4882a593Smuzhiyun * if not. Note that error just mean it can't be commited asynchronously, if it
1707*4882a593Smuzhiyun * fails the commit should be treated like a normal synchronous commit.
1708*4882a593Smuzhiyun */
drm_atomic_helper_async_check(struct drm_device * dev,struct drm_atomic_state * state)1709*4882a593Smuzhiyun int drm_atomic_helper_async_check(struct drm_device *dev,
1710*4882a593Smuzhiyun struct drm_atomic_state *state)
1711*4882a593Smuzhiyun {
1712*4882a593Smuzhiyun struct drm_crtc *crtc;
1713*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
1714*4882a593Smuzhiyun struct drm_plane *plane = NULL;
1715*4882a593Smuzhiyun struct drm_plane_state *old_plane_state = NULL;
1716*4882a593Smuzhiyun struct drm_plane_state *new_plane_state = NULL;
1717*4882a593Smuzhiyun const struct drm_plane_helper_funcs *funcs;
1718*4882a593Smuzhiyun int i, n_planes = 0;
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1721*4882a593Smuzhiyun if (drm_atomic_crtc_needs_modeset(crtc_state))
1722*4882a593Smuzhiyun return -EINVAL;
1723*4882a593Smuzhiyun }
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
1726*4882a593Smuzhiyun n_planes++;
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun /* FIXME: we support only single plane updates for now */
1729*4882a593Smuzhiyun if (n_planes != 1)
1730*4882a593Smuzhiyun return -EINVAL;
1731*4882a593Smuzhiyun
1732*4882a593Smuzhiyun if (!new_plane_state->crtc ||
1733*4882a593Smuzhiyun old_plane_state->crtc != new_plane_state->crtc)
1734*4882a593Smuzhiyun return -EINVAL;
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun funcs = plane->helper_private;
1737*4882a593Smuzhiyun if (!funcs->atomic_async_update)
1738*4882a593Smuzhiyun return -EINVAL;
1739*4882a593Smuzhiyun
1740*4882a593Smuzhiyun if (new_plane_state->fence)
1741*4882a593Smuzhiyun return -EINVAL;
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun /*
1744*4882a593Smuzhiyun * Don't do an async update if there is an outstanding commit modifying
1745*4882a593Smuzhiyun * the plane. This prevents our async update's changes from getting
1746*4882a593Smuzhiyun * overridden by a previous synchronous update's state.
1747*4882a593Smuzhiyun */
1748*4882a593Smuzhiyun if (old_plane_state->commit &&
1749*4882a593Smuzhiyun !try_wait_for_completion(&old_plane_state->commit->hw_done))
1750*4882a593Smuzhiyun return -EBUSY;
1751*4882a593Smuzhiyun
1752*4882a593Smuzhiyun return funcs->atomic_async_check(plane, new_plane_state);
1753*4882a593Smuzhiyun }
1754*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_async_check);
1755*4882a593Smuzhiyun
1756*4882a593Smuzhiyun /**
1757*4882a593Smuzhiyun * drm_atomic_helper_async_commit - commit state asynchronously
1758*4882a593Smuzhiyun * @dev: DRM device
1759*4882a593Smuzhiyun * @state: the driver state object
1760*4882a593Smuzhiyun *
1761*4882a593Smuzhiyun * This function commits a state asynchronously, i.e., not vblank
1762*4882a593Smuzhiyun * synchronized. It should be used on a state only when
1763*4882a593Smuzhiyun * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
1764*4882a593Smuzhiyun * the states like normal sync commits, but just do in-place changes on the
1765*4882a593Smuzhiyun * current state.
1766*4882a593Smuzhiyun *
1767*4882a593Smuzhiyun * TODO: Implement full swap instead of doing in-place changes.
1768*4882a593Smuzhiyun */
drm_atomic_helper_async_commit(struct drm_device * dev,struct drm_atomic_state * state)1769*4882a593Smuzhiyun void drm_atomic_helper_async_commit(struct drm_device *dev,
1770*4882a593Smuzhiyun struct drm_atomic_state *state)
1771*4882a593Smuzhiyun {
1772*4882a593Smuzhiyun struct drm_plane *plane;
1773*4882a593Smuzhiyun struct drm_plane_state *plane_state;
1774*4882a593Smuzhiyun const struct drm_plane_helper_funcs *funcs;
1775*4882a593Smuzhiyun int i;
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun for_each_new_plane_in_state(state, plane, plane_state, i) {
1778*4882a593Smuzhiyun struct drm_framebuffer *new_fb = plane_state->fb;
1779*4882a593Smuzhiyun struct drm_framebuffer *old_fb = plane->state->fb;
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun funcs = plane->helper_private;
1782*4882a593Smuzhiyun funcs->atomic_async_update(plane, plane_state);
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun /*
1785*4882a593Smuzhiyun * ->atomic_async_update() is supposed to update the
1786*4882a593Smuzhiyun * plane->state in-place, make sure at least common
1787*4882a593Smuzhiyun * properties have been properly updated.
1788*4882a593Smuzhiyun */
1789*4882a593Smuzhiyun WARN_ON_ONCE(plane->state->fb != new_fb);
1790*4882a593Smuzhiyun WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
1791*4882a593Smuzhiyun WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
1792*4882a593Smuzhiyun WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
1793*4882a593Smuzhiyun WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
1794*4882a593Smuzhiyun
1795*4882a593Smuzhiyun /*
1796*4882a593Smuzhiyun * Make sure the FBs have been swapped so that cleanups in the
1797*4882a593Smuzhiyun * new_state performs a cleanup in the old FB.
1798*4882a593Smuzhiyun */
1799*4882a593Smuzhiyun WARN_ON_ONCE(plane_state->fb != old_fb);
1800*4882a593Smuzhiyun }
1801*4882a593Smuzhiyun }
1802*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_async_commit);
1803*4882a593Smuzhiyun
1804*4882a593Smuzhiyun /**
1805*4882a593Smuzhiyun * drm_atomic_helper_commit - commit validated state object
1806*4882a593Smuzhiyun * @dev: DRM device
1807*4882a593Smuzhiyun * @state: the driver state object
1808*4882a593Smuzhiyun * @nonblock: whether nonblocking behavior is requested.
1809*4882a593Smuzhiyun *
1810*4882a593Smuzhiyun * This function commits a with drm_atomic_helper_check() pre-validated state
1811*4882a593Smuzhiyun * object. This can still fail when e.g. the framebuffer reservation fails. This
1812*4882a593Smuzhiyun * function implements nonblocking commits, using
1813*4882a593Smuzhiyun * drm_atomic_helper_setup_commit() and related functions.
1814*4882a593Smuzhiyun *
1815*4882a593Smuzhiyun * Committing the actual hardware state is done through the
1816*4882a593Smuzhiyun * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or its default
1817*4882a593Smuzhiyun * implementation drm_atomic_helper_commit_tail().
1818*4882a593Smuzhiyun *
1819*4882a593Smuzhiyun * RETURNS:
1820*4882a593Smuzhiyun * Zero for success or -errno.
1821*4882a593Smuzhiyun */
drm_atomic_helper_commit(struct drm_device * dev,struct drm_atomic_state * state,bool nonblock)1822*4882a593Smuzhiyun int drm_atomic_helper_commit(struct drm_device *dev,
1823*4882a593Smuzhiyun struct drm_atomic_state *state,
1824*4882a593Smuzhiyun bool nonblock)
1825*4882a593Smuzhiyun {
1826*4882a593Smuzhiyun int ret;
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun if (state->async_update) {
1829*4882a593Smuzhiyun ret = drm_atomic_helper_prepare_planes(dev, state);
1830*4882a593Smuzhiyun if (ret)
1831*4882a593Smuzhiyun return ret;
1832*4882a593Smuzhiyun
1833*4882a593Smuzhiyun drm_atomic_helper_async_commit(dev, state);
1834*4882a593Smuzhiyun drm_atomic_helper_cleanup_planes(dev, state);
1835*4882a593Smuzhiyun
1836*4882a593Smuzhiyun return 0;
1837*4882a593Smuzhiyun }
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun ret = drm_atomic_helper_setup_commit(state, nonblock);
1840*4882a593Smuzhiyun if (ret)
1841*4882a593Smuzhiyun return ret;
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun INIT_WORK(&state->commit_work, commit_work);
1844*4882a593Smuzhiyun
1845*4882a593Smuzhiyun ret = drm_atomic_helper_prepare_planes(dev, state);
1846*4882a593Smuzhiyun if (ret)
1847*4882a593Smuzhiyun return ret;
1848*4882a593Smuzhiyun
1849*4882a593Smuzhiyun if (!nonblock) {
1850*4882a593Smuzhiyun ret = drm_atomic_helper_wait_for_fences(dev, state, true);
1851*4882a593Smuzhiyun if (ret)
1852*4882a593Smuzhiyun goto err;
1853*4882a593Smuzhiyun }
1854*4882a593Smuzhiyun
1855*4882a593Smuzhiyun /*
1856*4882a593Smuzhiyun * This is the point of no return - everything below never fails except
1857*4882a593Smuzhiyun * when the hw goes bonghits. Which means we can commit the new state on
1858*4882a593Smuzhiyun * the software side now.
1859*4882a593Smuzhiyun */
1860*4882a593Smuzhiyun
1861*4882a593Smuzhiyun ret = drm_atomic_helper_swap_state(state, true);
1862*4882a593Smuzhiyun if (ret)
1863*4882a593Smuzhiyun goto err;
1864*4882a593Smuzhiyun
1865*4882a593Smuzhiyun /*
1866*4882a593Smuzhiyun * Everything below can be run asynchronously without the need to grab
1867*4882a593Smuzhiyun * any modeset locks at all under one condition: It must be guaranteed
1868*4882a593Smuzhiyun * that the asynchronous work has either been cancelled (if the driver
1869*4882a593Smuzhiyun * supports it, which at least requires that the framebuffers get
1870*4882a593Smuzhiyun * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
1871*4882a593Smuzhiyun * before the new state gets committed on the software side with
1872*4882a593Smuzhiyun * drm_atomic_helper_swap_state().
1873*4882a593Smuzhiyun *
1874*4882a593Smuzhiyun * This scheme allows new atomic state updates to be prepared and
1875*4882a593Smuzhiyun * checked in parallel to the asynchronous completion of the previous
1876*4882a593Smuzhiyun * update. Which is important since compositors need to figure out the
1877*4882a593Smuzhiyun * composition of the next frame right after having submitted the
1878*4882a593Smuzhiyun * current layout.
1879*4882a593Smuzhiyun *
1880*4882a593Smuzhiyun * NOTE: Commit work has multiple phases, first hardware commit, then
1881*4882a593Smuzhiyun * cleanup. We want them to overlap, hence need system_unbound_wq to
1882*4882a593Smuzhiyun * make sure work items don't artificially stall on each another.
1883*4882a593Smuzhiyun */
1884*4882a593Smuzhiyun
1885*4882a593Smuzhiyun drm_atomic_state_get(state);
1886*4882a593Smuzhiyun if (nonblock)
1887*4882a593Smuzhiyun queue_work(system_unbound_wq, &state->commit_work);
1888*4882a593Smuzhiyun else
1889*4882a593Smuzhiyun commit_tail(state);
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun return 0;
1892*4882a593Smuzhiyun
1893*4882a593Smuzhiyun err:
1894*4882a593Smuzhiyun drm_atomic_helper_cleanup_planes(dev, state);
1895*4882a593Smuzhiyun return ret;
1896*4882a593Smuzhiyun }
1897*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_commit);
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun /**
1900*4882a593Smuzhiyun * DOC: implementing nonblocking commit
1901*4882a593Smuzhiyun *
1902*4882a593Smuzhiyun * Nonblocking atomic commits should use struct &drm_crtc_commit to sequence
1903*4882a593Smuzhiyun * different operations against each another. Locks, especially struct
1904*4882a593Smuzhiyun * &drm_modeset_lock, should not be held in worker threads or any other
1905*4882a593Smuzhiyun * asynchronous context used to commit the hardware state.
1906*4882a593Smuzhiyun *
1907*4882a593Smuzhiyun * drm_atomic_helper_commit() implements the recommended sequence for
1908*4882a593Smuzhiyun * nonblocking commits, using drm_atomic_helper_setup_commit() internally:
1909*4882a593Smuzhiyun *
1910*4882a593Smuzhiyun * 1. Run drm_atomic_helper_prepare_planes(). Since this can fail and we
1911*4882a593Smuzhiyun * need to propagate out of memory/VRAM errors to userspace, it must be called
1912*4882a593Smuzhiyun * synchronously.
1913*4882a593Smuzhiyun *
1914*4882a593Smuzhiyun * 2. Synchronize with any outstanding nonblocking commit worker threads which
1915*4882a593Smuzhiyun * might be affected by the new state update. This is handled by
1916*4882a593Smuzhiyun * drm_atomic_helper_setup_commit().
1917*4882a593Smuzhiyun *
1918*4882a593Smuzhiyun * Asynchronous workers need to have sufficient parallelism to be able to run
1919*4882a593Smuzhiyun * different atomic commits on different CRTCs in parallel. The simplest way to
1920*4882a593Smuzhiyun * achieve this is by running them on the &system_unbound_wq work queue. Note
1921*4882a593Smuzhiyun * that drivers are not required to split up atomic commits and run an
1922*4882a593Smuzhiyun * individual commit in parallel - userspace is supposed to do that if it cares.
1923*4882a593Smuzhiyun * But it might be beneficial to do that for modesets, since those necessarily
1924*4882a593Smuzhiyun * must be done as one global operation, and enabling or disabling a CRTC can
1925*4882a593Smuzhiyun * take a long time. But even that is not required.
1926*4882a593Smuzhiyun *
1927*4882a593Smuzhiyun * IMPORTANT: A &drm_atomic_state update for multiple CRTCs is sequenced
1928*4882a593Smuzhiyun * against all CRTCs therein. Therefore for atomic state updates which only flip
1929*4882a593Smuzhiyun * planes the driver must not get the struct &drm_crtc_state of unrelated CRTCs
1930*4882a593Smuzhiyun * in its atomic check code: This would prevent committing of atomic updates to
1931*4882a593Smuzhiyun * multiple CRTCs in parallel. In general, adding additional state structures
1932*4882a593Smuzhiyun * should be avoided as much as possible, because this reduces parallelism in
1933*4882a593Smuzhiyun * (nonblocking) commits, both due to locking and due to commit sequencing
1934*4882a593Smuzhiyun * requirements.
1935*4882a593Smuzhiyun *
1936*4882a593Smuzhiyun * 3. The software state is updated synchronously with
1937*4882a593Smuzhiyun * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
1938*4882a593Smuzhiyun * locks means concurrent callers never see inconsistent state. Note that commit
1939*4882a593Smuzhiyun * workers do not hold any locks; their access is only coordinated through
1940*4882a593Smuzhiyun * ordering. If workers would access state only through the pointers in the
1941*4882a593Smuzhiyun * free-standing state objects (currently not the case for any driver) then even
1942*4882a593Smuzhiyun * multiple pending commits could be in-flight at the same time.
1943*4882a593Smuzhiyun *
1944*4882a593Smuzhiyun * 4. Schedule a work item to do all subsequent steps, using the split-out
1945*4882a593Smuzhiyun * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
1946*4882a593Smuzhiyun * then cleaning up the framebuffers after the old framebuffer is no longer
1947*4882a593Smuzhiyun * being displayed. The scheduled work should synchronize against other workers
1948*4882a593Smuzhiyun * using the &drm_crtc_commit infrastructure as needed. See
1949*4882a593Smuzhiyun * drm_atomic_helper_setup_commit() for more details.
1950*4882a593Smuzhiyun */
1951*4882a593Smuzhiyun
stall_checks(struct drm_crtc * crtc,bool nonblock)1952*4882a593Smuzhiyun static int stall_checks(struct drm_crtc *crtc, bool nonblock)
1953*4882a593Smuzhiyun {
1954*4882a593Smuzhiyun struct drm_crtc_commit *commit, *stall_commit = NULL;
1955*4882a593Smuzhiyun bool completed = true;
1956*4882a593Smuzhiyun int i;
1957*4882a593Smuzhiyun long ret = 0;
1958*4882a593Smuzhiyun
1959*4882a593Smuzhiyun spin_lock(&crtc->commit_lock);
1960*4882a593Smuzhiyun i = 0;
1961*4882a593Smuzhiyun list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
1962*4882a593Smuzhiyun if (i == 0) {
1963*4882a593Smuzhiyun completed = try_wait_for_completion(&commit->flip_done);
1964*4882a593Smuzhiyun /* Userspace is not allowed to get ahead of the previous
1965*4882a593Smuzhiyun * commit with nonblocking ones. */
1966*4882a593Smuzhiyun if (!completed && nonblock) {
1967*4882a593Smuzhiyun spin_unlock(&crtc->commit_lock);
1968*4882a593Smuzhiyun return -EBUSY;
1969*4882a593Smuzhiyun }
1970*4882a593Smuzhiyun } else if (i == 1) {
1971*4882a593Smuzhiyun stall_commit = drm_crtc_commit_get(commit);
1972*4882a593Smuzhiyun break;
1973*4882a593Smuzhiyun }
1974*4882a593Smuzhiyun
1975*4882a593Smuzhiyun i++;
1976*4882a593Smuzhiyun }
1977*4882a593Smuzhiyun spin_unlock(&crtc->commit_lock);
1978*4882a593Smuzhiyun
1979*4882a593Smuzhiyun if (!stall_commit)
1980*4882a593Smuzhiyun return 0;
1981*4882a593Smuzhiyun
1982*4882a593Smuzhiyun /* We don't want to let commits get ahead of cleanup work too much,
1983*4882a593Smuzhiyun * stalling on 2nd previous commit means triple-buffer won't ever stall.
1984*4882a593Smuzhiyun */
1985*4882a593Smuzhiyun ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
1986*4882a593Smuzhiyun 10*HZ);
1987*4882a593Smuzhiyun if (ret == 0)
1988*4882a593Smuzhiyun DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n",
1989*4882a593Smuzhiyun crtc->base.id, crtc->name);
1990*4882a593Smuzhiyun
1991*4882a593Smuzhiyun drm_crtc_commit_put(stall_commit);
1992*4882a593Smuzhiyun
1993*4882a593Smuzhiyun return ret < 0 ? ret : 0;
1994*4882a593Smuzhiyun }
1995*4882a593Smuzhiyun
release_crtc_commit(struct completion * completion)1996*4882a593Smuzhiyun static void release_crtc_commit(struct completion *completion)
1997*4882a593Smuzhiyun {
1998*4882a593Smuzhiyun struct drm_crtc_commit *commit = container_of(completion,
1999*4882a593Smuzhiyun typeof(*commit),
2000*4882a593Smuzhiyun flip_done);
2001*4882a593Smuzhiyun
2002*4882a593Smuzhiyun drm_crtc_commit_put(commit);
2003*4882a593Smuzhiyun }
2004*4882a593Smuzhiyun
init_commit(struct drm_crtc_commit * commit,struct drm_crtc * crtc)2005*4882a593Smuzhiyun static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
2006*4882a593Smuzhiyun {
2007*4882a593Smuzhiyun init_completion(&commit->flip_done);
2008*4882a593Smuzhiyun init_completion(&commit->hw_done);
2009*4882a593Smuzhiyun init_completion(&commit->cleanup_done);
2010*4882a593Smuzhiyun INIT_LIST_HEAD(&commit->commit_entry);
2011*4882a593Smuzhiyun kref_init(&commit->ref);
2012*4882a593Smuzhiyun commit->crtc = crtc;
2013*4882a593Smuzhiyun }
2014*4882a593Smuzhiyun
2015*4882a593Smuzhiyun static struct drm_crtc_commit *
crtc_or_fake_commit(struct drm_atomic_state * state,struct drm_crtc * crtc)2016*4882a593Smuzhiyun crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
2017*4882a593Smuzhiyun {
2018*4882a593Smuzhiyun if (crtc) {
2019*4882a593Smuzhiyun struct drm_crtc_state *new_crtc_state;
2020*4882a593Smuzhiyun
2021*4882a593Smuzhiyun new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
2022*4882a593Smuzhiyun
2023*4882a593Smuzhiyun return new_crtc_state->commit;
2024*4882a593Smuzhiyun }
2025*4882a593Smuzhiyun
2026*4882a593Smuzhiyun if (!state->fake_commit) {
2027*4882a593Smuzhiyun state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
2028*4882a593Smuzhiyun if (!state->fake_commit)
2029*4882a593Smuzhiyun return NULL;
2030*4882a593Smuzhiyun
2031*4882a593Smuzhiyun init_commit(state->fake_commit, NULL);
2032*4882a593Smuzhiyun }
2033*4882a593Smuzhiyun
2034*4882a593Smuzhiyun return state->fake_commit;
2035*4882a593Smuzhiyun }
2036*4882a593Smuzhiyun
2037*4882a593Smuzhiyun /**
2038*4882a593Smuzhiyun * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
2039*4882a593Smuzhiyun * @state: new modeset state to be committed
2040*4882a593Smuzhiyun * @nonblock: whether nonblocking behavior is requested.
2041*4882a593Smuzhiyun *
2042*4882a593Smuzhiyun * This function prepares @state to be used by the atomic helper's support for
2043*4882a593Smuzhiyun * nonblocking commits. Drivers using the nonblocking commit infrastructure
2044*4882a593Smuzhiyun * should always call this function from their
2045*4882a593Smuzhiyun * &drm_mode_config_funcs.atomic_commit hook.
2046*4882a593Smuzhiyun *
2047*4882a593Smuzhiyun * To be able to use this support drivers need to use a few more helper
2048*4882a593Smuzhiyun * functions. drm_atomic_helper_wait_for_dependencies() must be called before
2049*4882a593Smuzhiyun * actually committing the hardware state, and for nonblocking commits this call
2050*4882a593Smuzhiyun * must be placed in the async worker. See also drm_atomic_helper_swap_state()
2051*4882a593Smuzhiyun * and its stall parameter, for when a driver's commit hooks look at the
2052*4882a593Smuzhiyun * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
2053*4882a593Smuzhiyun *
2054*4882a593Smuzhiyun * Completion of the hardware commit step must be signalled using
2055*4882a593Smuzhiyun * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
2056*4882a593Smuzhiyun * to read or change any permanent software or hardware modeset state. The only
2057*4882a593Smuzhiyun * exception is state protected by other means than &drm_modeset_lock locks.
2058*4882a593Smuzhiyun * Only the free standing @state with pointers to the old state structures can
2059*4882a593Smuzhiyun * be inspected, e.g. to clean up old buffers using
2060*4882a593Smuzhiyun * drm_atomic_helper_cleanup_planes().
2061*4882a593Smuzhiyun *
2062*4882a593Smuzhiyun * At the very end, before cleaning up @state drivers must call
2063*4882a593Smuzhiyun * drm_atomic_helper_commit_cleanup_done().
2064*4882a593Smuzhiyun *
2065*4882a593Smuzhiyun * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
2066*4882a593Smuzhiyun * complete and easy-to-use default implementation of the atomic_commit() hook.
2067*4882a593Smuzhiyun *
2068*4882a593Smuzhiyun * The tracking of asynchronously executed and still pending commits is done
2069*4882a593Smuzhiyun * using the core structure &drm_crtc_commit.
2070*4882a593Smuzhiyun *
2071*4882a593Smuzhiyun * By default there's no need to clean up resources allocated by this function
2072*4882a593Smuzhiyun * explicitly: drm_atomic_state_default_clear() will take care of that
2073*4882a593Smuzhiyun * automatically.
2074*4882a593Smuzhiyun *
2075*4882a593Smuzhiyun * Returns:
2076*4882a593Smuzhiyun *
2077*4882a593Smuzhiyun * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
2078*4882a593Smuzhiyun * -ENOMEM on allocation failures and -EINTR when a signal is pending.
2079*4882a593Smuzhiyun */
drm_atomic_helper_setup_commit(struct drm_atomic_state * state,bool nonblock)2080*4882a593Smuzhiyun int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
2081*4882a593Smuzhiyun bool nonblock)
2082*4882a593Smuzhiyun {
2083*4882a593Smuzhiyun struct drm_crtc *crtc;
2084*4882a593Smuzhiyun struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2085*4882a593Smuzhiyun struct drm_connector *conn;
2086*4882a593Smuzhiyun struct drm_connector_state *old_conn_state, *new_conn_state;
2087*4882a593Smuzhiyun struct drm_plane *plane;
2088*4882a593Smuzhiyun struct drm_plane_state *old_plane_state, *new_plane_state;
2089*4882a593Smuzhiyun struct drm_crtc_commit *commit;
2090*4882a593Smuzhiyun int i, ret;
2091*4882a593Smuzhiyun
2092*4882a593Smuzhiyun for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2093*4882a593Smuzhiyun commit = kzalloc(sizeof(*commit), GFP_KERNEL);
2094*4882a593Smuzhiyun if (!commit)
2095*4882a593Smuzhiyun return -ENOMEM;
2096*4882a593Smuzhiyun
2097*4882a593Smuzhiyun init_commit(commit, crtc);
2098*4882a593Smuzhiyun
2099*4882a593Smuzhiyun new_crtc_state->commit = commit;
2100*4882a593Smuzhiyun
2101*4882a593Smuzhiyun ret = stall_checks(crtc, nonblock);
2102*4882a593Smuzhiyun if (ret)
2103*4882a593Smuzhiyun return ret;
2104*4882a593Smuzhiyun
2105*4882a593Smuzhiyun /* Drivers only send out events when at least either current or
2106*4882a593Smuzhiyun * new CRTC state is active. Complete right away if everything
2107*4882a593Smuzhiyun * stays off. */
2108*4882a593Smuzhiyun if (!old_crtc_state->active && !new_crtc_state->active) {
2109*4882a593Smuzhiyun complete_all(&commit->flip_done);
2110*4882a593Smuzhiyun continue;
2111*4882a593Smuzhiyun }
2112*4882a593Smuzhiyun
2113*4882a593Smuzhiyun /* Legacy cursor updates are fully unsynced. */
2114*4882a593Smuzhiyun if (state->legacy_cursor_update) {
2115*4882a593Smuzhiyun complete_all(&commit->flip_done);
2116*4882a593Smuzhiyun continue;
2117*4882a593Smuzhiyun }
2118*4882a593Smuzhiyun
2119*4882a593Smuzhiyun if (!new_crtc_state->event) {
2120*4882a593Smuzhiyun commit->event = kzalloc(sizeof(*commit->event),
2121*4882a593Smuzhiyun GFP_KERNEL);
2122*4882a593Smuzhiyun if (!commit->event)
2123*4882a593Smuzhiyun return -ENOMEM;
2124*4882a593Smuzhiyun
2125*4882a593Smuzhiyun new_crtc_state->event = commit->event;
2126*4882a593Smuzhiyun }
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun new_crtc_state->event->base.completion = &commit->flip_done;
2129*4882a593Smuzhiyun new_crtc_state->event->base.completion_release = release_crtc_commit;
2130*4882a593Smuzhiyun drm_crtc_commit_get(commit);
2131*4882a593Smuzhiyun
2132*4882a593Smuzhiyun commit->abort_completion = true;
2133*4882a593Smuzhiyun
2134*4882a593Smuzhiyun state->crtcs[i].commit = commit;
2135*4882a593Smuzhiyun drm_crtc_commit_get(commit);
2136*4882a593Smuzhiyun }
2137*4882a593Smuzhiyun
2138*4882a593Smuzhiyun for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
2139*4882a593Smuzhiyun /* Userspace is not allowed to get ahead of the previous
2140*4882a593Smuzhiyun * commit with nonblocking ones. */
2141*4882a593Smuzhiyun if (nonblock && old_conn_state->commit &&
2142*4882a593Smuzhiyun !try_wait_for_completion(&old_conn_state->commit->flip_done))
2143*4882a593Smuzhiyun return -EBUSY;
2144*4882a593Smuzhiyun
2145*4882a593Smuzhiyun /* Always track connectors explicitly for e.g. link retraining. */
2146*4882a593Smuzhiyun commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
2147*4882a593Smuzhiyun if (!commit)
2148*4882a593Smuzhiyun return -ENOMEM;
2149*4882a593Smuzhiyun
2150*4882a593Smuzhiyun new_conn_state->commit = drm_crtc_commit_get(commit);
2151*4882a593Smuzhiyun }
2152*4882a593Smuzhiyun
2153*4882a593Smuzhiyun for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2154*4882a593Smuzhiyun /* Userspace is not allowed to get ahead of the previous
2155*4882a593Smuzhiyun * commit with nonblocking ones. */
2156*4882a593Smuzhiyun if (nonblock && old_plane_state->commit &&
2157*4882a593Smuzhiyun !try_wait_for_completion(&old_plane_state->commit->flip_done))
2158*4882a593Smuzhiyun return -EBUSY;
2159*4882a593Smuzhiyun
2160*4882a593Smuzhiyun /* Always track planes explicitly for async pageflip support. */
2161*4882a593Smuzhiyun commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
2162*4882a593Smuzhiyun if (!commit)
2163*4882a593Smuzhiyun return -ENOMEM;
2164*4882a593Smuzhiyun
2165*4882a593Smuzhiyun new_plane_state->commit = drm_crtc_commit_get(commit);
2166*4882a593Smuzhiyun }
2167*4882a593Smuzhiyun
2168*4882a593Smuzhiyun return 0;
2169*4882a593Smuzhiyun }
2170*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
2171*4882a593Smuzhiyun
2172*4882a593Smuzhiyun /**
2173*4882a593Smuzhiyun * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
2174*4882a593Smuzhiyun * @old_state: atomic state object with old state structures
2175*4882a593Smuzhiyun *
2176*4882a593Smuzhiyun * This function waits for all preceeding commits that touch the same CRTC as
2177*4882a593Smuzhiyun * @old_state to both be committed to the hardware (as signalled by
2178*4882a593Smuzhiyun * drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
2179*4882a593Smuzhiyun * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
2180*4882a593Smuzhiyun *
2181*4882a593Smuzhiyun * This is part of the atomic helper support for nonblocking commits, see
2182*4882a593Smuzhiyun * drm_atomic_helper_setup_commit() for an overview.
2183*4882a593Smuzhiyun */
drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state * old_state)2184*4882a593Smuzhiyun void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
2185*4882a593Smuzhiyun {
2186*4882a593Smuzhiyun struct drm_crtc *crtc;
2187*4882a593Smuzhiyun struct drm_crtc_state *old_crtc_state;
2188*4882a593Smuzhiyun struct drm_plane *plane;
2189*4882a593Smuzhiyun struct drm_plane_state *old_plane_state;
2190*4882a593Smuzhiyun struct drm_connector *conn;
2191*4882a593Smuzhiyun struct drm_connector_state *old_conn_state;
2192*4882a593Smuzhiyun struct drm_crtc_commit *commit;
2193*4882a593Smuzhiyun int i;
2194*4882a593Smuzhiyun long ret;
2195*4882a593Smuzhiyun
2196*4882a593Smuzhiyun for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
2197*4882a593Smuzhiyun commit = old_crtc_state->commit;
2198*4882a593Smuzhiyun
2199*4882a593Smuzhiyun if (!commit)
2200*4882a593Smuzhiyun continue;
2201*4882a593Smuzhiyun
2202*4882a593Smuzhiyun ret = wait_for_completion_timeout(&commit->hw_done,
2203*4882a593Smuzhiyun 10*HZ);
2204*4882a593Smuzhiyun if (ret == 0)
2205*4882a593Smuzhiyun DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
2206*4882a593Smuzhiyun crtc->base.id, crtc->name);
2207*4882a593Smuzhiyun
2208*4882a593Smuzhiyun /* Currently no support for overwriting flips, hence
2209*4882a593Smuzhiyun * stall for previous one to execute completely. */
2210*4882a593Smuzhiyun ret = wait_for_completion_timeout(&commit->flip_done,
2211*4882a593Smuzhiyun 10*HZ);
2212*4882a593Smuzhiyun if (ret == 0)
2213*4882a593Smuzhiyun DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
2214*4882a593Smuzhiyun crtc->base.id, crtc->name);
2215*4882a593Smuzhiyun }
2216*4882a593Smuzhiyun
2217*4882a593Smuzhiyun for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
2218*4882a593Smuzhiyun commit = old_conn_state->commit;
2219*4882a593Smuzhiyun
2220*4882a593Smuzhiyun if (!commit)
2221*4882a593Smuzhiyun continue;
2222*4882a593Smuzhiyun
2223*4882a593Smuzhiyun ret = wait_for_completion_timeout(&commit->hw_done,
2224*4882a593Smuzhiyun 10*HZ);
2225*4882a593Smuzhiyun if (ret == 0)
2226*4882a593Smuzhiyun DRM_ERROR("[CONNECTOR:%d:%s] hw_done timed out\n",
2227*4882a593Smuzhiyun conn->base.id, conn->name);
2228*4882a593Smuzhiyun
2229*4882a593Smuzhiyun /* Currently no support for overwriting flips, hence
2230*4882a593Smuzhiyun * stall for previous one to execute completely. */
2231*4882a593Smuzhiyun ret = wait_for_completion_timeout(&commit->flip_done,
2232*4882a593Smuzhiyun 10*HZ);
2233*4882a593Smuzhiyun if (ret == 0)
2234*4882a593Smuzhiyun DRM_ERROR("[CONNECTOR:%d:%s] flip_done timed out\n",
2235*4882a593Smuzhiyun conn->base.id, conn->name);
2236*4882a593Smuzhiyun }
2237*4882a593Smuzhiyun
2238*4882a593Smuzhiyun for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
2239*4882a593Smuzhiyun commit = old_plane_state->commit;
2240*4882a593Smuzhiyun
2241*4882a593Smuzhiyun if (!commit)
2242*4882a593Smuzhiyun continue;
2243*4882a593Smuzhiyun
2244*4882a593Smuzhiyun ret = wait_for_completion_timeout(&commit->hw_done,
2245*4882a593Smuzhiyun 10*HZ);
2246*4882a593Smuzhiyun if (ret == 0)
2247*4882a593Smuzhiyun DRM_ERROR("[PLANE:%d:%s] hw_done timed out\n",
2248*4882a593Smuzhiyun plane->base.id, plane->name);
2249*4882a593Smuzhiyun
2250*4882a593Smuzhiyun /* Currently no support for overwriting flips, hence
2251*4882a593Smuzhiyun * stall for previous one to execute completely. */
2252*4882a593Smuzhiyun ret = wait_for_completion_timeout(&commit->flip_done,
2253*4882a593Smuzhiyun 10*HZ);
2254*4882a593Smuzhiyun if (ret == 0)
2255*4882a593Smuzhiyun DRM_ERROR("[PLANE:%d:%s] flip_done timed out\n",
2256*4882a593Smuzhiyun plane->base.id, plane->name);
2257*4882a593Smuzhiyun }
2258*4882a593Smuzhiyun }
2259*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
2260*4882a593Smuzhiyun
2261*4882a593Smuzhiyun /**
2262*4882a593Smuzhiyun * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
2263*4882a593Smuzhiyun * @old_state: atomic state object with old state structures
2264*4882a593Smuzhiyun *
2265*4882a593Smuzhiyun * This function walks all CRTCs and fakes VBLANK events on those with
2266*4882a593Smuzhiyun * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
2267*4882a593Smuzhiyun * The primary use of this function is writeback connectors working in oneshot
2268*4882a593Smuzhiyun * mode and faking VBLANK events. In this case they only fake the VBLANK event
2269*4882a593Smuzhiyun * when a job is queued, and any change to the pipeline that does not touch the
2270*4882a593Smuzhiyun * connector is leading to timeouts when calling
2271*4882a593Smuzhiyun * drm_atomic_helper_wait_for_vblanks() or
2272*4882a593Smuzhiyun * drm_atomic_helper_wait_for_flip_done(). In addition to writeback
2273*4882a593Smuzhiyun * connectors, this function can also fake VBLANK events for CRTCs without
2274*4882a593Smuzhiyun * VBLANK interrupt.
2275*4882a593Smuzhiyun *
2276*4882a593Smuzhiyun * This is part of the atomic helper support for nonblocking commits, see
2277*4882a593Smuzhiyun * drm_atomic_helper_setup_commit() for an overview.
2278*4882a593Smuzhiyun */
drm_atomic_helper_fake_vblank(struct drm_atomic_state * old_state)2279*4882a593Smuzhiyun void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
2280*4882a593Smuzhiyun {
2281*4882a593Smuzhiyun struct drm_crtc_state *new_crtc_state;
2282*4882a593Smuzhiyun struct drm_crtc *crtc;
2283*4882a593Smuzhiyun int i;
2284*4882a593Smuzhiyun
2285*4882a593Smuzhiyun for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
2286*4882a593Smuzhiyun unsigned long flags;
2287*4882a593Smuzhiyun
2288*4882a593Smuzhiyun if (!new_crtc_state->no_vblank)
2289*4882a593Smuzhiyun continue;
2290*4882a593Smuzhiyun
2291*4882a593Smuzhiyun spin_lock_irqsave(&old_state->dev->event_lock, flags);
2292*4882a593Smuzhiyun if (new_crtc_state->event) {
2293*4882a593Smuzhiyun drm_crtc_send_vblank_event(crtc,
2294*4882a593Smuzhiyun new_crtc_state->event);
2295*4882a593Smuzhiyun new_crtc_state->event = NULL;
2296*4882a593Smuzhiyun }
2297*4882a593Smuzhiyun spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
2298*4882a593Smuzhiyun }
2299*4882a593Smuzhiyun }
2300*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
2301*4882a593Smuzhiyun
2302*4882a593Smuzhiyun /**
2303*4882a593Smuzhiyun * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
2304*4882a593Smuzhiyun * @old_state: atomic state object with old state structures
2305*4882a593Smuzhiyun *
2306*4882a593Smuzhiyun * This function is used to signal completion of the hardware commit step. After
2307*4882a593Smuzhiyun * this step the driver is not allowed to read or change any permanent software
2308*4882a593Smuzhiyun * or hardware modeset state. The only exception is state protected by other
2309*4882a593Smuzhiyun * means than &drm_modeset_lock locks.
2310*4882a593Smuzhiyun *
2311*4882a593Smuzhiyun * Drivers should try to postpone any expensive or delayed cleanup work after
2312*4882a593Smuzhiyun * this function is called.
2313*4882a593Smuzhiyun *
2314*4882a593Smuzhiyun * This is part of the atomic helper support for nonblocking commits, see
2315*4882a593Smuzhiyun * drm_atomic_helper_setup_commit() for an overview.
2316*4882a593Smuzhiyun */
drm_atomic_helper_commit_hw_done(struct drm_atomic_state * old_state)2317*4882a593Smuzhiyun void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
2318*4882a593Smuzhiyun {
2319*4882a593Smuzhiyun struct drm_crtc *crtc;
2320*4882a593Smuzhiyun struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2321*4882a593Smuzhiyun struct drm_crtc_commit *commit;
2322*4882a593Smuzhiyun int i;
2323*4882a593Smuzhiyun
2324*4882a593Smuzhiyun for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2325*4882a593Smuzhiyun commit = new_crtc_state->commit;
2326*4882a593Smuzhiyun if (!commit)
2327*4882a593Smuzhiyun continue;
2328*4882a593Smuzhiyun
2329*4882a593Smuzhiyun /*
2330*4882a593Smuzhiyun * copy new_crtc_state->commit to old_crtc_state->commit,
2331*4882a593Smuzhiyun * it's unsafe to touch new_crtc_state after hw_done,
2332*4882a593Smuzhiyun * but we still need to do so in cleanup_done().
2333*4882a593Smuzhiyun */
2334*4882a593Smuzhiyun if (old_crtc_state->commit)
2335*4882a593Smuzhiyun drm_crtc_commit_put(old_crtc_state->commit);
2336*4882a593Smuzhiyun
2337*4882a593Smuzhiyun old_crtc_state->commit = drm_crtc_commit_get(commit);
2338*4882a593Smuzhiyun
2339*4882a593Smuzhiyun /* backend must have consumed any event by now */
2340*4882a593Smuzhiyun WARN_ON(new_crtc_state->event);
2341*4882a593Smuzhiyun complete_all(&commit->hw_done);
2342*4882a593Smuzhiyun }
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun if (old_state->fake_commit) {
2345*4882a593Smuzhiyun complete_all(&old_state->fake_commit->hw_done);
2346*4882a593Smuzhiyun complete_all(&old_state->fake_commit->flip_done);
2347*4882a593Smuzhiyun }
2348*4882a593Smuzhiyun }
2349*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
2350*4882a593Smuzhiyun
2351*4882a593Smuzhiyun /**
2352*4882a593Smuzhiyun * drm_atomic_helper_commit_cleanup_done - signal completion of commit
2353*4882a593Smuzhiyun * @old_state: atomic state object with old state structures
2354*4882a593Smuzhiyun *
2355*4882a593Smuzhiyun * This signals completion of the atomic update @old_state, including any
2356*4882a593Smuzhiyun * cleanup work. If used, it must be called right before calling
2357*4882a593Smuzhiyun * drm_atomic_state_put().
2358*4882a593Smuzhiyun *
2359*4882a593Smuzhiyun * This is part of the atomic helper support for nonblocking commits, see
2360*4882a593Smuzhiyun * drm_atomic_helper_setup_commit() for an overview.
2361*4882a593Smuzhiyun */
drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state * old_state)2362*4882a593Smuzhiyun void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
2363*4882a593Smuzhiyun {
2364*4882a593Smuzhiyun struct drm_crtc *crtc;
2365*4882a593Smuzhiyun struct drm_crtc_state *old_crtc_state;
2366*4882a593Smuzhiyun struct drm_crtc_commit *commit;
2367*4882a593Smuzhiyun int i;
2368*4882a593Smuzhiyun
2369*4882a593Smuzhiyun for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
2370*4882a593Smuzhiyun commit = old_crtc_state->commit;
2371*4882a593Smuzhiyun if (WARN_ON(!commit))
2372*4882a593Smuzhiyun continue;
2373*4882a593Smuzhiyun
2374*4882a593Smuzhiyun complete_all(&commit->cleanup_done);
2375*4882a593Smuzhiyun WARN_ON(!try_wait_for_completion(&commit->hw_done));
2376*4882a593Smuzhiyun
2377*4882a593Smuzhiyun spin_lock(&crtc->commit_lock);
2378*4882a593Smuzhiyun list_del(&commit->commit_entry);
2379*4882a593Smuzhiyun spin_unlock(&crtc->commit_lock);
2380*4882a593Smuzhiyun }
2381*4882a593Smuzhiyun
2382*4882a593Smuzhiyun if (old_state->fake_commit) {
2383*4882a593Smuzhiyun complete_all(&old_state->fake_commit->cleanup_done);
2384*4882a593Smuzhiyun WARN_ON(!try_wait_for_completion(&old_state->fake_commit->hw_done));
2385*4882a593Smuzhiyun }
2386*4882a593Smuzhiyun }
2387*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
2388*4882a593Smuzhiyun
2389*4882a593Smuzhiyun /**
2390*4882a593Smuzhiyun * drm_atomic_helper_prepare_planes - prepare plane resources before commit
2391*4882a593Smuzhiyun * @dev: DRM device
2392*4882a593Smuzhiyun * @state: atomic state object with new state structures
2393*4882a593Smuzhiyun *
2394*4882a593Smuzhiyun * This function prepares plane state, specifically framebuffers, for the new
2395*4882a593Smuzhiyun * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
2396*4882a593Smuzhiyun * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
2397*4882a593Smuzhiyun * any already successfully prepared framebuffer.
2398*4882a593Smuzhiyun *
2399*4882a593Smuzhiyun * Returns:
2400*4882a593Smuzhiyun * 0 on success, negative error code on failure.
2401*4882a593Smuzhiyun */
drm_atomic_helper_prepare_planes(struct drm_device * dev,struct drm_atomic_state * state)2402*4882a593Smuzhiyun int drm_atomic_helper_prepare_planes(struct drm_device *dev,
2403*4882a593Smuzhiyun struct drm_atomic_state *state)
2404*4882a593Smuzhiyun {
2405*4882a593Smuzhiyun struct drm_connector *connector;
2406*4882a593Smuzhiyun struct drm_connector_state *new_conn_state;
2407*4882a593Smuzhiyun struct drm_plane *plane;
2408*4882a593Smuzhiyun struct drm_plane_state *new_plane_state;
2409*4882a593Smuzhiyun int ret, i, j;
2410*4882a593Smuzhiyun
2411*4882a593Smuzhiyun for_each_new_connector_in_state(state, connector, new_conn_state, i) {
2412*4882a593Smuzhiyun if (!new_conn_state->writeback_job)
2413*4882a593Smuzhiyun continue;
2414*4882a593Smuzhiyun
2415*4882a593Smuzhiyun ret = drm_writeback_prepare_job(new_conn_state->writeback_job);
2416*4882a593Smuzhiyun if (ret < 0)
2417*4882a593Smuzhiyun return ret;
2418*4882a593Smuzhiyun }
2419*4882a593Smuzhiyun
2420*4882a593Smuzhiyun for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2421*4882a593Smuzhiyun const struct drm_plane_helper_funcs *funcs;
2422*4882a593Smuzhiyun
2423*4882a593Smuzhiyun funcs = plane->helper_private;
2424*4882a593Smuzhiyun
2425*4882a593Smuzhiyun if (funcs->prepare_fb) {
2426*4882a593Smuzhiyun ret = funcs->prepare_fb(plane, new_plane_state);
2427*4882a593Smuzhiyun if (ret)
2428*4882a593Smuzhiyun goto fail;
2429*4882a593Smuzhiyun }
2430*4882a593Smuzhiyun }
2431*4882a593Smuzhiyun
2432*4882a593Smuzhiyun return 0;
2433*4882a593Smuzhiyun
2434*4882a593Smuzhiyun fail:
2435*4882a593Smuzhiyun for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2436*4882a593Smuzhiyun const struct drm_plane_helper_funcs *funcs;
2437*4882a593Smuzhiyun
2438*4882a593Smuzhiyun if (j >= i)
2439*4882a593Smuzhiyun continue;
2440*4882a593Smuzhiyun
2441*4882a593Smuzhiyun funcs = plane->helper_private;
2442*4882a593Smuzhiyun
2443*4882a593Smuzhiyun if (funcs->cleanup_fb)
2444*4882a593Smuzhiyun funcs->cleanup_fb(plane, new_plane_state);
2445*4882a593Smuzhiyun }
2446*4882a593Smuzhiyun
2447*4882a593Smuzhiyun return ret;
2448*4882a593Smuzhiyun }
2449*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
2450*4882a593Smuzhiyun
plane_crtc_active(const struct drm_plane_state * state)2451*4882a593Smuzhiyun static bool plane_crtc_active(const struct drm_plane_state *state)
2452*4882a593Smuzhiyun {
2453*4882a593Smuzhiyun return state->crtc && state->crtc->state->active;
2454*4882a593Smuzhiyun }
2455*4882a593Smuzhiyun
2456*4882a593Smuzhiyun /**
2457*4882a593Smuzhiyun * drm_atomic_helper_commit_planes - commit plane state
2458*4882a593Smuzhiyun * @dev: DRM device
2459*4882a593Smuzhiyun * @old_state: atomic state object with old state structures
2460*4882a593Smuzhiyun * @flags: flags for committing plane state
2461*4882a593Smuzhiyun *
2462*4882a593Smuzhiyun * This function commits the new plane state using the plane and atomic helper
2463*4882a593Smuzhiyun * functions for planes and CRTCs. It assumes that the atomic state has already
2464*4882a593Smuzhiyun * been pushed into the relevant object state pointers, since this step can no
2465*4882a593Smuzhiyun * longer fail.
2466*4882a593Smuzhiyun *
2467*4882a593Smuzhiyun * It still requires the global state object @old_state to know which planes and
2468*4882a593Smuzhiyun * crtcs need to be updated though.
2469*4882a593Smuzhiyun *
2470*4882a593Smuzhiyun * Note that this function does all plane updates across all CRTCs in one step.
2471*4882a593Smuzhiyun * If the hardware can't support this approach look at
2472*4882a593Smuzhiyun * drm_atomic_helper_commit_planes_on_crtc() instead.
2473*4882a593Smuzhiyun *
2474*4882a593Smuzhiyun * Plane parameters can be updated by applications while the associated CRTC is
2475*4882a593Smuzhiyun * disabled. The DRM/KMS core will store the parameters in the plane state,
2476*4882a593Smuzhiyun * which will be available to the driver when the CRTC is turned on. As a result
2477*4882a593Smuzhiyun * most drivers don't need to be immediately notified of plane updates for a
2478*4882a593Smuzhiyun * disabled CRTC.
2479*4882a593Smuzhiyun *
2480*4882a593Smuzhiyun * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
2481*4882a593Smuzhiyun * @flags in order not to receive plane update notifications related to a
2482*4882a593Smuzhiyun * disabled CRTC. This avoids the need to manually ignore plane updates in
2483*4882a593Smuzhiyun * driver code when the driver and/or hardware can't or just don't need to deal
2484*4882a593Smuzhiyun * with updates on disabled CRTCs, for example when supporting runtime PM.
2485*4882a593Smuzhiyun *
2486*4882a593Smuzhiyun * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
2487*4882a593Smuzhiyun * display controllers require to disable a CRTC's planes when the CRTC is
2488*4882a593Smuzhiyun * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
2489*4882a593Smuzhiyun * call for a plane if the CRTC of the old plane state needs a modesetting
2490*4882a593Smuzhiyun * operation. Of course, the drivers need to disable the planes in their CRTC
2491*4882a593Smuzhiyun * disable callbacks since no one else would do that.
2492*4882a593Smuzhiyun *
2493*4882a593Smuzhiyun * The drm_atomic_helper_commit() default implementation doesn't set the
2494*4882a593Smuzhiyun * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
2495*4882a593Smuzhiyun * This should not be copied blindly by drivers.
2496*4882a593Smuzhiyun */
drm_atomic_helper_commit_planes(struct drm_device * dev,struct drm_atomic_state * old_state,uint32_t flags)2497*4882a593Smuzhiyun void drm_atomic_helper_commit_planes(struct drm_device *dev,
2498*4882a593Smuzhiyun struct drm_atomic_state *old_state,
2499*4882a593Smuzhiyun uint32_t flags)
2500*4882a593Smuzhiyun {
2501*4882a593Smuzhiyun struct drm_crtc *crtc;
2502*4882a593Smuzhiyun struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2503*4882a593Smuzhiyun struct drm_plane *plane;
2504*4882a593Smuzhiyun struct drm_plane_state *old_plane_state, *new_plane_state;
2505*4882a593Smuzhiyun int i;
2506*4882a593Smuzhiyun bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
2507*4882a593Smuzhiyun bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
2508*4882a593Smuzhiyun
2509*4882a593Smuzhiyun for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2510*4882a593Smuzhiyun const struct drm_crtc_helper_funcs *funcs;
2511*4882a593Smuzhiyun
2512*4882a593Smuzhiyun funcs = crtc->helper_private;
2513*4882a593Smuzhiyun
2514*4882a593Smuzhiyun if (!funcs || !funcs->atomic_begin)
2515*4882a593Smuzhiyun continue;
2516*4882a593Smuzhiyun
2517*4882a593Smuzhiyun if (active_only && !new_crtc_state->active)
2518*4882a593Smuzhiyun continue;
2519*4882a593Smuzhiyun
2520*4882a593Smuzhiyun funcs->atomic_begin(crtc, old_crtc_state);
2521*4882a593Smuzhiyun }
2522*4882a593Smuzhiyun
2523*4882a593Smuzhiyun for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
2524*4882a593Smuzhiyun const struct drm_plane_helper_funcs *funcs;
2525*4882a593Smuzhiyun bool disabling;
2526*4882a593Smuzhiyun
2527*4882a593Smuzhiyun funcs = plane->helper_private;
2528*4882a593Smuzhiyun
2529*4882a593Smuzhiyun if (!funcs)
2530*4882a593Smuzhiyun continue;
2531*4882a593Smuzhiyun
2532*4882a593Smuzhiyun disabling = drm_atomic_plane_disabling(old_plane_state,
2533*4882a593Smuzhiyun new_plane_state);
2534*4882a593Smuzhiyun
2535*4882a593Smuzhiyun if (active_only) {
2536*4882a593Smuzhiyun /*
2537*4882a593Smuzhiyun * Skip planes related to inactive CRTCs. If the plane
2538*4882a593Smuzhiyun * is enabled use the state of the current CRTC. If the
2539*4882a593Smuzhiyun * plane is being disabled use the state of the old
2540*4882a593Smuzhiyun * CRTC to avoid skipping planes being disabled on an
2541*4882a593Smuzhiyun * active CRTC.
2542*4882a593Smuzhiyun */
2543*4882a593Smuzhiyun if (!disabling && !plane_crtc_active(new_plane_state))
2544*4882a593Smuzhiyun continue;
2545*4882a593Smuzhiyun if (disabling && !plane_crtc_active(old_plane_state))
2546*4882a593Smuzhiyun continue;
2547*4882a593Smuzhiyun }
2548*4882a593Smuzhiyun
2549*4882a593Smuzhiyun /*
2550*4882a593Smuzhiyun * Special-case disabling the plane if drivers support it.
2551*4882a593Smuzhiyun */
2552*4882a593Smuzhiyun if (disabling && funcs->atomic_disable) {
2553*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
2554*4882a593Smuzhiyun
2555*4882a593Smuzhiyun crtc_state = old_plane_state->crtc->state;
2556*4882a593Smuzhiyun
2557*4882a593Smuzhiyun if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2558*4882a593Smuzhiyun no_disable)
2559*4882a593Smuzhiyun continue;
2560*4882a593Smuzhiyun
2561*4882a593Smuzhiyun funcs->atomic_disable(plane, old_plane_state);
2562*4882a593Smuzhiyun } else if (new_plane_state->crtc || disabling) {
2563*4882a593Smuzhiyun funcs->atomic_update(plane, old_plane_state);
2564*4882a593Smuzhiyun }
2565*4882a593Smuzhiyun }
2566*4882a593Smuzhiyun
2567*4882a593Smuzhiyun for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2568*4882a593Smuzhiyun const struct drm_crtc_helper_funcs *funcs;
2569*4882a593Smuzhiyun
2570*4882a593Smuzhiyun funcs = crtc->helper_private;
2571*4882a593Smuzhiyun
2572*4882a593Smuzhiyun if (!funcs || !funcs->atomic_flush)
2573*4882a593Smuzhiyun continue;
2574*4882a593Smuzhiyun
2575*4882a593Smuzhiyun if (active_only && !new_crtc_state->active)
2576*4882a593Smuzhiyun continue;
2577*4882a593Smuzhiyun
2578*4882a593Smuzhiyun funcs->atomic_flush(crtc, old_crtc_state);
2579*4882a593Smuzhiyun }
2580*4882a593Smuzhiyun }
2581*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
2582*4882a593Smuzhiyun
2583*4882a593Smuzhiyun /**
2584*4882a593Smuzhiyun * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a CRTC
2585*4882a593Smuzhiyun * @old_crtc_state: atomic state object with the old CRTC state
2586*4882a593Smuzhiyun *
2587*4882a593Smuzhiyun * This function commits the new plane state using the plane and atomic helper
2588*4882a593Smuzhiyun * functions for planes on the specific CRTC. It assumes that the atomic state
2589*4882a593Smuzhiyun * has already been pushed into the relevant object state pointers, since this
2590*4882a593Smuzhiyun * step can no longer fail.
2591*4882a593Smuzhiyun *
2592*4882a593Smuzhiyun * This function is useful when plane updates should be done CRTC-by-CRTC
2593*4882a593Smuzhiyun * instead of one global step like drm_atomic_helper_commit_planes() does.
2594*4882a593Smuzhiyun *
2595*4882a593Smuzhiyun * This function can only be savely used when planes are not allowed to move
2596*4882a593Smuzhiyun * between different CRTCs because this function doesn't handle inter-CRTC
2597*4882a593Smuzhiyun * depencies. Callers need to ensure that either no such depencies exist,
2598*4882a593Smuzhiyun * resolve them through ordering of commit calls or through some other means.
2599*4882a593Smuzhiyun */
2600*4882a593Smuzhiyun void
drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state * old_crtc_state)2601*4882a593Smuzhiyun drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
2602*4882a593Smuzhiyun {
2603*4882a593Smuzhiyun const struct drm_crtc_helper_funcs *crtc_funcs;
2604*4882a593Smuzhiyun struct drm_crtc *crtc = old_crtc_state->crtc;
2605*4882a593Smuzhiyun struct drm_atomic_state *old_state = old_crtc_state->state;
2606*4882a593Smuzhiyun struct drm_crtc_state *new_crtc_state =
2607*4882a593Smuzhiyun drm_atomic_get_new_crtc_state(old_state, crtc);
2608*4882a593Smuzhiyun struct drm_plane *plane;
2609*4882a593Smuzhiyun unsigned plane_mask;
2610*4882a593Smuzhiyun
2611*4882a593Smuzhiyun plane_mask = old_crtc_state->plane_mask;
2612*4882a593Smuzhiyun plane_mask |= new_crtc_state->plane_mask;
2613*4882a593Smuzhiyun
2614*4882a593Smuzhiyun crtc_funcs = crtc->helper_private;
2615*4882a593Smuzhiyun if (crtc_funcs && crtc_funcs->atomic_begin)
2616*4882a593Smuzhiyun crtc_funcs->atomic_begin(crtc, old_crtc_state);
2617*4882a593Smuzhiyun
2618*4882a593Smuzhiyun drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
2619*4882a593Smuzhiyun struct drm_plane_state *old_plane_state =
2620*4882a593Smuzhiyun drm_atomic_get_old_plane_state(old_state, plane);
2621*4882a593Smuzhiyun struct drm_plane_state *new_plane_state =
2622*4882a593Smuzhiyun drm_atomic_get_new_plane_state(old_state, plane);
2623*4882a593Smuzhiyun const struct drm_plane_helper_funcs *plane_funcs;
2624*4882a593Smuzhiyun
2625*4882a593Smuzhiyun plane_funcs = plane->helper_private;
2626*4882a593Smuzhiyun
2627*4882a593Smuzhiyun if (!old_plane_state || !plane_funcs)
2628*4882a593Smuzhiyun continue;
2629*4882a593Smuzhiyun
2630*4882a593Smuzhiyun WARN_ON(new_plane_state->crtc &&
2631*4882a593Smuzhiyun new_plane_state->crtc != crtc);
2632*4882a593Smuzhiyun
2633*4882a593Smuzhiyun if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
2634*4882a593Smuzhiyun plane_funcs->atomic_disable)
2635*4882a593Smuzhiyun plane_funcs->atomic_disable(plane, old_plane_state);
2636*4882a593Smuzhiyun else if (new_plane_state->crtc ||
2637*4882a593Smuzhiyun drm_atomic_plane_disabling(old_plane_state, new_plane_state))
2638*4882a593Smuzhiyun plane_funcs->atomic_update(plane, old_plane_state);
2639*4882a593Smuzhiyun }
2640*4882a593Smuzhiyun
2641*4882a593Smuzhiyun if (crtc_funcs && crtc_funcs->atomic_flush)
2642*4882a593Smuzhiyun crtc_funcs->atomic_flush(crtc, old_crtc_state);
2643*4882a593Smuzhiyun }
2644*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
2645*4882a593Smuzhiyun
2646*4882a593Smuzhiyun /**
2647*4882a593Smuzhiyun * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
2648*4882a593Smuzhiyun * @old_crtc_state: atomic state object with the old CRTC state
2649*4882a593Smuzhiyun * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
2650*4882a593Smuzhiyun *
2651*4882a593Smuzhiyun * Disables all planes associated with the given CRTC. This can be
2652*4882a593Smuzhiyun * used for instance in the CRTC helper atomic_disable callback to disable
2653*4882a593Smuzhiyun * all planes.
2654*4882a593Smuzhiyun *
2655*4882a593Smuzhiyun * If the atomic-parameter is set the function calls the CRTC's
2656*4882a593Smuzhiyun * atomic_begin hook before and atomic_flush hook after disabling the
2657*4882a593Smuzhiyun * planes.
2658*4882a593Smuzhiyun *
2659*4882a593Smuzhiyun * It is a bug to call this function without having implemented the
2660*4882a593Smuzhiyun * &drm_plane_helper_funcs.atomic_disable plane hook.
2661*4882a593Smuzhiyun */
2662*4882a593Smuzhiyun void
drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state * old_crtc_state,bool atomic)2663*4882a593Smuzhiyun drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
2664*4882a593Smuzhiyun bool atomic)
2665*4882a593Smuzhiyun {
2666*4882a593Smuzhiyun struct drm_crtc *crtc = old_crtc_state->crtc;
2667*4882a593Smuzhiyun const struct drm_crtc_helper_funcs *crtc_funcs =
2668*4882a593Smuzhiyun crtc->helper_private;
2669*4882a593Smuzhiyun struct drm_plane *plane;
2670*4882a593Smuzhiyun
2671*4882a593Smuzhiyun if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
2672*4882a593Smuzhiyun crtc_funcs->atomic_begin(crtc, NULL);
2673*4882a593Smuzhiyun
2674*4882a593Smuzhiyun drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
2675*4882a593Smuzhiyun const struct drm_plane_helper_funcs *plane_funcs =
2676*4882a593Smuzhiyun plane->helper_private;
2677*4882a593Smuzhiyun
2678*4882a593Smuzhiyun if (!plane_funcs)
2679*4882a593Smuzhiyun continue;
2680*4882a593Smuzhiyun
2681*4882a593Smuzhiyun WARN_ON(!plane_funcs->atomic_disable);
2682*4882a593Smuzhiyun if (plane_funcs->atomic_disable)
2683*4882a593Smuzhiyun plane_funcs->atomic_disable(plane, NULL);
2684*4882a593Smuzhiyun }
2685*4882a593Smuzhiyun
2686*4882a593Smuzhiyun if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
2687*4882a593Smuzhiyun crtc_funcs->atomic_flush(crtc, NULL);
2688*4882a593Smuzhiyun }
2689*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
2690*4882a593Smuzhiyun
2691*4882a593Smuzhiyun /**
2692*4882a593Smuzhiyun * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
2693*4882a593Smuzhiyun * @dev: DRM device
2694*4882a593Smuzhiyun * @old_state: atomic state object with old state structures
2695*4882a593Smuzhiyun *
2696*4882a593Smuzhiyun * This function cleans up plane state, specifically framebuffers, from the old
2697*4882a593Smuzhiyun * configuration. Hence the old configuration must be perserved in @old_state to
2698*4882a593Smuzhiyun * be able to call this function.
2699*4882a593Smuzhiyun *
2700*4882a593Smuzhiyun * This function must also be called on the new state when the atomic update
2701*4882a593Smuzhiyun * fails at any point after calling drm_atomic_helper_prepare_planes().
2702*4882a593Smuzhiyun */
drm_atomic_helper_cleanup_planes(struct drm_device * dev,struct drm_atomic_state * old_state)2703*4882a593Smuzhiyun void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
2704*4882a593Smuzhiyun struct drm_atomic_state *old_state)
2705*4882a593Smuzhiyun {
2706*4882a593Smuzhiyun struct drm_plane *plane;
2707*4882a593Smuzhiyun struct drm_plane_state *old_plane_state, *new_plane_state;
2708*4882a593Smuzhiyun int i;
2709*4882a593Smuzhiyun
2710*4882a593Smuzhiyun for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
2711*4882a593Smuzhiyun const struct drm_plane_helper_funcs *funcs;
2712*4882a593Smuzhiyun struct drm_plane_state *plane_state;
2713*4882a593Smuzhiyun
2714*4882a593Smuzhiyun /*
2715*4882a593Smuzhiyun * This might be called before swapping when commit is aborted,
2716*4882a593Smuzhiyun * in which case we have to cleanup the new state.
2717*4882a593Smuzhiyun */
2718*4882a593Smuzhiyun if (old_plane_state == plane->state)
2719*4882a593Smuzhiyun plane_state = new_plane_state;
2720*4882a593Smuzhiyun else
2721*4882a593Smuzhiyun plane_state = old_plane_state;
2722*4882a593Smuzhiyun
2723*4882a593Smuzhiyun funcs = plane->helper_private;
2724*4882a593Smuzhiyun
2725*4882a593Smuzhiyun if (funcs->cleanup_fb)
2726*4882a593Smuzhiyun funcs->cleanup_fb(plane, plane_state);
2727*4882a593Smuzhiyun }
2728*4882a593Smuzhiyun }
2729*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
2730*4882a593Smuzhiyun
2731*4882a593Smuzhiyun /**
2732*4882a593Smuzhiyun * drm_atomic_helper_swap_state - store atomic state into current sw state
2733*4882a593Smuzhiyun * @state: atomic state
2734*4882a593Smuzhiyun * @stall: stall for preceeding commits
2735*4882a593Smuzhiyun *
2736*4882a593Smuzhiyun * This function stores the atomic state into the current state pointers in all
2737*4882a593Smuzhiyun * driver objects. It should be called after all failing steps have been done
2738*4882a593Smuzhiyun * and succeeded, but before the actual hardware state is committed.
2739*4882a593Smuzhiyun *
2740*4882a593Smuzhiyun * For cleanup and error recovery the current state for all changed objects will
2741*4882a593Smuzhiyun * be swapped into @state.
2742*4882a593Smuzhiyun *
2743*4882a593Smuzhiyun * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
2744*4882a593Smuzhiyun *
2745*4882a593Smuzhiyun * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
2746*4882a593Smuzhiyun *
2747*4882a593Smuzhiyun * 2. Do any other steps that might fail.
2748*4882a593Smuzhiyun *
2749*4882a593Smuzhiyun * 3. Put the staged state into the current state pointers with this function.
2750*4882a593Smuzhiyun *
2751*4882a593Smuzhiyun * 4. Actually commit the hardware state.
2752*4882a593Smuzhiyun *
2753*4882a593Smuzhiyun * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
2754*4882a593Smuzhiyun * contains the old state. Also do any other cleanup required with that state.
2755*4882a593Smuzhiyun *
2756*4882a593Smuzhiyun * @stall must be set when nonblocking commits for this driver directly access
2757*4882a593Smuzhiyun * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
2758*4882a593Smuzhiyun * the current atomic helpers this is almost always the case, since the helpers
2759*4882a593Smuzhiyun * don't pass the right state structures to the callbacks.
2760*4882a593Smuzhiyun *
2761*4882a593Smuzhiyun * Returns:
2762*4882a593Smuzhiyun *
2763*4882a593Smuzhiyun * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
2764*4882a593Smuzhiyun * waiting for the previous commits has been interrupted.
2765*4882a593Smuzhiyun */
drm_atomic_helper_swap_state(struct drm_atomic_state * state,bool stall)2766*4882a593Smuzhiyun int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
2767*4882a593Smuzhiyun bool stall)
2768*4882a593Smuzhiyun {
2769*4882a593Smuzhiyun int i, ret;
2770*4882a593Smuzhiyun struct drm_connector *connector;
2771*4882a593Smuzhiyun struct drm_connector_state *old_conn_state, *new_conn_state;
2772*4882a593Smuzhiyun struct drm_crtc *crtc;
2773*4882a593Smuzhiyun struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2774*4882a593Smuzhiyun struct drm_plane *plane;
2775*4882a593Smuzhiyun struct drm_plane_state *old_plane_state, *new_plane_state;
2776*4882a593Smuzhiyun struct drm_crtc_commit *commit;
2777*4882a593Smuzhiyun struct drm_private_obj *obj;
2778*4882a593Smuzhiyun struct drm_private_state *old_obj_state, *new_obj_state;
2779*4882a593Smuzhiyun
2780*4882a593Smuzhiyun if (stall) {
2781*4882a593Smuzhiyun /*
2782*4882a593Smuzhiyun * We have to stall for hw_done here before
2783*4882a593Smuzhiyun * drm_atomic_helper_wait_for_dependencies() because flip
2784*4882a593Smuzhiyun * depth > 1 is not yet supported by all drivers. As long as
2785*4882a593Smuzhiyun * obj->state is directly dereferenced anywhere in the drivers
2786*4882a593Smuzhiyun * atomic_commit_tail function, then it's unsafe to swap state
2787*4882a593Smuzhiyun * before drm_atomic_helper_commit_hw_done() is called.
2788*4882a593Smuzhiyun */
2789*4882a593Smuzhiyun
2790*4882a593Smuzhiyun for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2791*4882a593Smuzhiyun commit = old_crtc_state->commit;
2792*4882a593Smuzhiyun
2793*4882a593Smuzhiyun if (!commit)
2794*4882a593Smuzhiyun continue;
2795*4882a593Smuzhiyun
2796*4882a593Smuzhiyun ret = wait_for_completion_interruptible(&commit->hw_done);
2797*4882a593Smuzhiyun if (ret)
2798*4882a593Smuzhiyun return ret;
2799*4882a593Smuzhiyun }
2800*4882a593Smuzhiyun
2801*4882a593Smuzhiyun for_each_old_connector_in_state(state, connector, old_conn_state, i) {
2802*4882a593Smuzhiyun commit = old_conn_state->commit;
2803*4882a593Smuzhiyun
2804*4882a593Smuzhiyun if (!commit)
2805*4882a593Smuzhiyun continue;
2806*4882a593Smuzhiyun
2807*4882a593Smuzhiyun ret = wait_for_completion_interruptible(&commit->hw_done);
2808*4882a593Smuzhiyun if (ret)
2809*4882a593Smuzhiyun return ret;
2810*4882a593Smuzhiyun }
2811*4882a593Smuzhiyun
2812*4882a593Smuzhiyun for_each_old_plane_in_state(state, plane, old_plane_state, i) {
2813*4882a593Smuzhiyun commit = old_plane_state->commit;
2814*4882a593Smuzhiyun
2815*4882a593Smuzhiyun if (!commit)
2816*4882a593Smuzhiyun continue;
2817*4882a593Smuzhiyun
2818*4882a593Smuzhiyun ret = wait_for_completion_interruptible(&commit->hw_done);
2819*4882a593Smuzhiyun if (ret)
2820*4882a593Smuzhiyun return ret;
2821*4882a593Smuzhiyun }
2822*4882a593Smuzhiyun }
2823*4882a593Smuzhiyun
2824*4882a593Smuzhiyun for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
2825*4882a593Smuzhiyun WARN_ON(connector->state != old_conn_state);
2826*4882a593Smuzhiyun
2827*4882a593Smuzhiyun old_conn_state->state = state;
2828*4882a593Smuzhiyun new_conn_state->state = NULL;
2829*4882a593Smuzhiyun
2830*4882a593Smuzhiyun state->connectors[i].state = old_conn_state;
2831*4882a593Smuzhiyun connector->state = new_conn_state;
2832*4882a593Smuzhiyun }
2833*4882a593Smuzhiyun
2834*4882a593Smuzhiyun for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2835*4882a593Smuzhiyun WARN_ON(crtc->state != old_crtc_state);
2836*4882a593Smuzhiyun
2837*4882a593Smuzhiyun old_crtc_state->state = state;
2838*4882a593Smuzhiyun new_crtc_state->state = NULL;
2839*4882a593Smuzhiyun
2840*4882a593Smuzhiyun state->crtcs[i].state = old_crtc_state;
2841*4882a593Smuzhiyun crtc->state = new_crtc_state;
2842*4882a593Smuzhiyun
2843*4882a593Smuzhiyun if (new_crtc_state->commit) {
2844*4882a593Smuzhiyun spin_lock(&crtc->commit_lock);
2845*4882a593Smuzhiyun list_add(&new_crtc_state->commit->commit_entry,
2846*4882a593Smuzhiyun &crtc->commit_list);
2847*4882a593Smuzhiyun spin_unlock(&crtc->commit_lock);
2848*4882a593Smuzhiyun
2849*4882a593Smuzhiyun new_crtc_state->commit->event = NULL;
2850*4882a593Smuzhiyun }
2851*4882a593Smuzhiyun }
2852*4882a593Smuzhiyun
2853*4882a593Smuzhiyun for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2854*4882a593Smuzhiyun WARN_ON(plane->state != old_plane_state);
2855*4882a593Smuzhiyun
2856*4882a593Smuzhiyun old_plane_state->state = state;
2857*4882a593Smuzhiyun new_plane_state->state = NULL;
2858*4882a593Smuzhiyun
2859*4882a593Smuzhiyun state->planes[i].state = old_plane_state;
2860*4882a593Smuzhiyun plane->state = new_plane_state;
2861*4882a593Smuzhiyun }
2862*4882a593Smuzhiyun
2863*4882a593Smuzhiyun for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
2864*4882a593Smuzhiyun WARN_ON(obj->state != old_obj_state);
2865*4882a593Smuzhiyun
2866*4882a593Smuzhiyun old_obj_state->state = state;
2867*4882a593Smuzhiyun new_obj_state->state = NULL;
2868*4882a593Smuzhiyun
2869*4882a593Smuzhiyun state->private_objs[i].state = old_obj_state;
2870*4882a593Smuzhiyun obj->state = new_obj_state;
2871*4882a593Smuzhiyun }
2872*4882a593Smuzhiyun
2873*4882a593Smuzhiyun return 0;
2874*4882a593Smuzhiyun }
2875*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_swap_state);
2876*4882a593Smuzhiyun
2877*4882a593Smuzhiyun /**
2878*4882a593Smuzhiyun * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
2879*4882a593Smuzhiyun * @plane: plane object to update
2880*4882a593Smuzhiyun * @crtc: owning CRTC of owning plane
2881*4882a593Smuzhiyun * @fb: framebuffer to flip onto plane
2882*4882a593Smuzhiyun * @crtc_x: x offset of primary plane on @crtc
2883*4882a593Smuzhiyun * @crtc_y: y offset of primary plane on @crtc
2884*4882a593Smuzhiyun * @crtc_w: width of primary plane rectangle on @crtc
2885*4882a593Smuzhiyun * @crtc_h: height of primary plane rectangle on @crtc
2886*4882a593Smuzhiyun * @src_x: x offset of @fb for panning
2887*4882a593Smuzhiyun * @src_y: y offset of @fb for panning
2888*4882a593Smuzhiyun * @src_w: width of source rectangle in @fb
2889*4882a593Smuzhiyun * @src_h: height of source rectangle in @fb
2890*4882a593Smuzhiyun * @ctx: lock acquire context
2891*4882a593Smuzhiyun *
2892*4882a593Smuzhiyun * Provides a default plane update handler using the atomic driver interface.
2893*4882a593Smuzhiyun *
2894*4882a593Smuzhiyun * RETURNS:
2895*4882a593Smuzhiyun * Zero on success, error code on failure
2896*4882a593Smuzhiyun */
drm_atomic_helper_update_plane(struct drm_plane * plane,struct drm_crtc * crtc,struct drm_framebuffer * fb,int crtc_x,int crtc_y,unsigned int crtc_w,unsigned int crtc_h,uint32_t src_x,uint32_t src_y,uint32_t src_w,uint32_t src_h,struct drm_modeset_acquire_ctx * ctx)2897*4882a593Smuzhiyun int drm_atomic_helper_update_plane(struct drm_plane *plane,
2898*4882a593Smuzhiyun struct drm_crtc *crtc,
2899*4882a593Smuzhiyun struct drm_framebuffer *fb,
2900*4882a593Smuzhiyun int crtc_x, int crtc_y,
2901*4882a593Smuzhiyun unsigned int crtc_w, unsigned int crtc_h,
2902*4882a593Smuzhiyun uint32_t src_x, uint32_t src_y,
2903*4882a593Smuzhiyun uint32_t src_w, uint32_t src_h,
2904*4882a593Smuzhiyun struct drm_modeset_acquire_ctx *ctx)
2905*4882a593Smuzhiyun {
2906*4882a593Smuzhiyun struct drm_atomic_state *state;
2907*4882a593Smuzhiyun struct drm_plane_state *plane_state;
2908*4882a593Smuzhiyun int ret = 0;
2909*4882a593Smuzhiyun
2910*4882a593Smuzhiyun state = drm_atomic_state_alloc(plane->dev);
2911*4882a593Smuzhiyun if (!state)
2912*4882a593Smuzhiyun return -ENOMEM;
2913*4882a593Smuzhiyun
2914*4882a593Smuzhiyun state->acquire_ctx = ctx;
2915*4882a593Smuzhiyun plane_state = drm_atomic_get_plane_state(state, plane);
2916*4882a593Smuzhiyun if (IS_ERR(plane_state)) {
2917*4882a593Smuzhiyun ret = PTR_ERR(plane_state);
2918*4882a593Smuzhiyun goto fail;
2919*4882a593Smuzhiyun }
2920*4882a593Smuzhiyun
2921*4882a593Smuzhiyun ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
2922*4882a593Smuzhiyun if (ret != 0)
2923*4882a593Smuzhiyun goto fail;
2924*4882a593Smuzhiyun drm_atomic_set_fb_for_plane(plane_state, fb);
2925*4882a593Smuzhiyun plane_state->crtc_x = crtc_x;
2926*4882a593Smuzhiyun plane_state->crtc_y = crtc_y;
2927*4882a593Smuzhiyun plane_state->crtc_w = crtc_w;
2928*4882a593Smuzhiyun plane_state->crtc_h = crtc_h;
2929*4882a593Smuzhiyun plane_state->src_x = src_x;
2930*4882a593Smuzhiyun plane_state->src_y = src_y;
2931*4882a593Smuzhiyun plane_state->src_w = src_w;
2932*4882a593Smuzhiyun plane_state->src_h = src_h;
2933*4882a593Smuzhiyun
2934*4882a593Smuzhiyun if (plane == crtc->cursor)
2935*4882a593Smuzhiyun state->legacy_cursor_update = true;
2936*4882a593Smuzhiyun
2937*4882a593Smuzhiyun ret = drm_atomic_commit(state);
2938*4882a593Smuzhiyun fail:
2939*4882a593Smuzhiyun drm_atomic_state_put(state);
2940*4882a593Smuzhiyun return ret;
2941*4882a593Smuzhiyun }
2942*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_update_plane);
2943*4882a593Smuzhiyun
2944*4882a593Smuzhiyun /**
2945*4882a593Smuzhiyun * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic
2946*4882a593Smuzhiyun * @plane: plane to disable
2947*4882a593Smuzhiyun * @ctx: lock acquire context
2948*4882a593Smuzhiyun *
2949*4882a593Smuzhiyun * Provides a default plane disable handler using the atomic driver interface.
2950*4882a593Smuzhiyun *
2951*4882a593Smuzhiyun * RETURNS:
2952*4882a593Smuzhiyun * Zero on success, error code on failure
2953*4882a593Smuzhiyun */
drm_atomic_helper_disable_plane(struct drm_plane * plane,struct drm_modeset_acquire_ctx * ctx)2954*4882a593Smuzhiyun int drm_atomic_helper_disable_plane(struct drm_plane *plane,
2955*4882a593Smuzhiyun struct drm_modeset_acquire_ctx *ctx)
2956*4882a593Smuzhiyun {
2957*4882a593Smuzhiyun struct drm_atomic_state *state;
2958*4882a593Smuzhiyun struct drm_plane_state *plane_state;
2959*4882a593Smuzhiyun int ret = 0;
2960*4882a593Smuzhiyun
2961*4882a593Smuzhiyun state = drm_atomic_state_alloc(plane->dev);
2962*4882a593Smuzhiyun if (!state)
2963*4882a593Smuzhiyun return -ENOMEM;
2964*4882a593Smuzhiyun
2965*4882a593Smuzhiyun state->acquire_ctx = ctx;
2966*4882a593Smuzhiyun plane_state = drm_atomic_get_plane_state(state, plane);
2967*4882a593Smuzhiyun if (IS_ERR(plane_state)) {
2968*4882a593Smuzhiyun ret = PTR_ERR(plane_state);
2969*4882a593Smuzhiyun goto fail;
2970*4882a593Smuzhiyun }
2971*4882a593Smuzhiyun
2972*4882a593Smuzhiyun if (plane_state->crtc && plane_state->crtc->cursor == plane)
2973*4882a593Smuzhiyun plane_state->state->legacy_cursor_update = true;
2974*4882a593Smuzhiyun
2975*4882a593Smuzhiyun ret = __drm_atomic_helper_disable_plane(plane, plane_state);
2976*4882a593Smuzhiyun if (ret != 0)
2977*4882a593Smuzhiyun goto fail;
2978*4882a593Smuzhiyun
2979*4882a593Smuzhiyun ret = drm_atomic_commit(state);
2980*4882a593Smuzhiyun fail:
2981*4882a593Smuzhiyun drm_atomic_state_put(state);
2982*4882a593Smuzhiyun return ret;
2983*4882a593Smuzhiyun }
2984*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
2985*4882a593Smuzhiyun
2986*4882a593Smuzhiyun /**
2987*4882a593Smuzhiyun * drm_atomic_helper_set_config - set a new config from userspace
2988*4882a593Smuzhiyun * @set: mode set configuration
2989*4882a593Smuzhiyun * @ctx: lock acquisition context
2990*4882a593Smuzhiyun *
2991*4882a593Smuzhiyun * Provides a default CRTC set_config handler using the atomic driver interface.
2992*4882a593Smuzhiyun *
2993*4882a593Smuzhiyun * NOTE: For backwards compatibility with old userspace this automatically
2994*4882a593Smuzhiyun * resets the "link-status" property to GOOD, to force any link
2995*4882a593Smuzhiyun * re-training. The SETCRTC ioctl does not define whether an update does
2996*4882a593Smuzhiyun * need a full modeset or just a plane update, hence we're allowed to do
2997*4882a593Smuzhiyun * that. See also drm_connector_set_link_status_property().
2998*4882a593Smuzhiyun *
2999*4882a593Smuzhiyun * Returns:
3000*4882a593Smuzhiyun * Returns 0 on success, negative errno numbers on failure.
3001*4882a593Smuzhiyun */
drm_atomic_helper_set_config(struct drm_mode_set * set,struct drm_modeset_acquire_ctx * ctx)3002*4882a593Smuzhiyun int drm_atomic_helper_set_config(struct drm_mode_set *set,
3003*4882a593Smuzhiyun struct drm_modeset_acquire_ctx *ctx)
3004*4882a593Smuzhiyun {
3005*4882a593Smuzhiyun struct drm_atomic_state *state;
3006*4882a593Smuzhiyun struct drm_crtc *crtc = set->crtc;
3007*4882a593Smuzhiyun int ret = 0;
3008*4882a593Smuzhiyun
3009*4882a593Smuzhiyun state = drm_atomic_state_alloc(crtc->dev);
3010*4882a593Smuzhiyun if (!state)
3011*4882a593Smuzhiyun return -ENOMEM;
3012*4882a593Smuzhiyun
3013*4882a593Smuzhiyun state->acquire_ctx = ctx;
3014*4882a593Smuzhiyun ret = __drm_atomic_helper_set_config(set, state);
3015*4882a593Smuzhiyun if (ret != 0)
3016*4882a593Smuzhiyun goto fail;
3017*4882a593Smuzhiyun
3018*4882a593Smuzhiyun ret = handle_conflicting_encoders(state, true);
3019*4882a593Smuzhiyun if (ret)
3020*4882a593Smuzhiyun goto fail;
3021*4882a593Smuzhiyun
3022*4882a593Smuzhiyun ret = drm_atomic_commit(state);
3023*4882a593Smuzhiyun
3024*4882a593Smuzhiyun fail:
3025*4882a593Smuzhiyun drm_atomic_state_put(state);
3026*4882a593Smuzhiyun return ret;
3027*4882a593Smuzhiyun }
3028*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_set_config);
3029*4882a593Smuzhiyun
3030*4882a593Smuzhiyun /**
3031*4882a593Smuzhiyun * drm_atomic_helper_disable_all - disable all currently active outputs
3032*4882a593Smuzhiyun * @dev: DRM device
3033*4882a593Smuzhiyun * @ctx: lock acquisition context
3034*4882a593Smuzhiyun *
3035*4882a593Smuzhiyun * Loops through all connectors, finding those that aren't turned off and then
3036*4882a593Smuzhiyun * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
3037*4882a593Smuzhiyun * that they are connected to.
3038*4882a593Smuzhiyun *
3039*4882a593Smuzhiyun * This is used for example in suspend/resume to disable all currently active
3040*4882a593Smuzhiyun * functions when suspending. If you just want to shut down everything at e.g.
3041*4882a593Smuzhiyun * driver unload, look at drm_atomic_helper_shutdown().
3042*4882a593Smuzhiyun *
3043*4882a593Smuzhiyun * Note that if callers haven't already acquired all modeset locks this might
3044*4882a593Smuzhiyun * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3045*4882a593Smuzhiyun *
3046*4882a593Smuzhiyun * Returns:
3047*4882a593Smuzhiyun * 0 on success or a negative error code on failure.
3048*4882a593Smuzhiyun *
3049*4882a593Smuzhiyun * See also:
3050*4882a593Smuzhiyun * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
3051*4882a593Smuzhiyun * drm_atomic_helper_shutdown().
3052*4882a593Smuzhiyun */
drm_atomic_helper_disable_all(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)3053*4882a593Smuzhiyun int drm_atomic_helper_disable_all(struct drm_device *dev,
3054*4882a593Smuzhiyun struct drm_modeset_acquire_ctx *ctx)
3055*4882a593Smuzhiyun {
3056*4882a593Smuzhiyun struct drm_atomic_state *state;
3057*4882a593Smuzhiyun struct drm_connector_state *conn_state;
3058*4882a593Smuzhiyun struct drm_connector *conn;
3059*4882a593Smuzhiyun struct drm_plane_state *plane_state;
3060*4882a593Smuzhiyun struct drm_plane *plane;
3061*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
3062*4882a593Smuzhiyun struct drm_crtc *crtc;
3063*4882a593Smuzhiyun int ret, i;
3064*4882a593Smuzhiyun
3065*4882a593Smuzhiyun state = drm_atomic_state_alloc(dev);
3066*4882a593Smuzhiyun if (!state)
3067*4882a593Smuzhiyun return -ENOMEM;
3068*4882a593Smuzhiyun
3069*4882a593Smuzhiyun state->acquire_ctx = ctx;
3070*4882a593Smuzhiyun
3071*4882a593Smuzhiyun drm_for_each_crtc(crtc, dev) {
3072*4882a593Smuzhiyun crtc_state = drm_atomic_get_crtc_state(state, crtc);
3073*4882a593Smuzhiyun if (IS_ERR(crtc_state)) {
3074*4882a593Smuzhiyun ret = PTR_ERR(crtc_state);
3075*4882a593Smuzhiyun goto free;
3076*4882a593Smuzhiyun }
3077*4882a593Smuzhiyun
3078*4882a593Smuzhiyun crtc_state->active = false;
3079*4882a593Smuzhiyun
3080*4882a593Smuzhiyun ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
3081*4882a593Smuzhiyun if (ret < 0)
3082*4882a593Smuzhiyun goto free;
3083*4882a593Smuzhiyun
3084*4882a593Smuzhiyun ret = drm_atomic_add_affected_planes(state, crtc);
3085*4882a593Smuzhiyun if (ret < 0)
3086*4882a593Smuzhiyun goto free;
3087*4882a593Smuzhiyun
3088*4882a593Smuzhiyun ret = drm_atomic_add_affected_connectors(state, crtc);
3089*4882a593Smuzhiyun if (ret < 0)
3090*4882a593Smuzhiyun goto free;
3091*4882a593Smuzhiyun }
3092*4882a593Smuzhiyun
3093*4882a593Smuzhiyun for_each_new_connector_in_state(state, conn, conn_state, i) {
3094*4882a593Smuzhiyun ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
3095*4882a593Smuzhiyun if (ret < 0)
3096*4882a593Smuzhiyun goto free;
3097*4882a593Smuzhiyun }
3098*4882a593Smuzhiyun
3099*4882a593Smuzhiyun for_each_new_plane_in_state(state, plane, plane_state, i) {
3100*4882a593Smuzhiyun ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
3101*4882a593Smuzhiyun if (ret < 0)
3102*4882a593Smuzhiyun goto free;
3103*4882a593Smuzhiyun
3104*4882a593Smuzhiyun drm_atomic_set_fb_for_plane(plane_state, NULL);
3105*4882a593Smuzhiyun }
3106*4882a593Smuzhiyun
3107*4882a593Smuzhiyun ret = drm_atomic_commit(state);
3108*4882a593Smuzhiyun free:
3109*4882a593Smuzhiyun drm_atomic_state_put(state);
3110*4882a593Smuzhiyun return ret;
3111*4882a593Smuzhiyun }
3112*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_disable_all);
3113*4882a593Smuzhiyun
3114*4882a593Smuzhiyun /**
3115*4882a593Smuzhiyun * drm_atomic_helper_shutdown - shutdown all CRTC
3116*4882a593Smuzhiyun * @dev: DRM device
3117*4882a593Smuzhiyun *
3118*4882a593Smuzhiyun * This shuts down all CRTC, which is useful for driver unloading. Shutdown on
3119*4882a593Smuzhiyun * suspend should instead be handled with drm_atomic_helper_suspend(), since
3120*4882a593Smuzhiyun * that also takes a snapshot of the modeset state to be restored on resume.
3121*4882a593Smuzhiyun *
3122*4882a593Smuzhiyun * This is just a convenience wrapper around drm_atomic_helper_disable_all(),
3123*4882a593Smuzhiyun * and it is the atomic version of drm_crtc_force_disable_all().
3124*4882a593Smuzhiyun */
drm_atomic_helper_shutdown(struct drm_device * dev)3125*4882a593Smuzhiyun void drm_atomic_helper_shutdown(struct drm_device *dev)
3126*4882a593Smuzhiyun {
3127*4882a593Smuzhiyun struct drm_modeset_acquire_ctx ctx;
3128*4882a593Smuzhiyun int ret;
3129*4882a593Smuzhiyun
3130*4882a593Smuzhiyun DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
3131*4882a593Smuzhiyun
3132*4882a593Smuzhiyun ret = drm_atomic_helper_disable_all(dev, &ctx);
3133*4882a593Smuzhiyun if (ret)
3134*4882a593Smuzhiyun DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
3135*4882a593Smuzhiyun
3136*4882a593Smuzhiyun DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
3137*4882a593Smuzhiyun }
3138*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_shutdown);
3139*4882a593Smuzhiyun
3140*4882a593Smuzhiyun /**
3141*4882a593Smuzhiyun * drm_atomic_helper_duplicate_state - duplicate an atomic state object
3142*4882a593Smuzhiyun * @dev: DRM device
3143*4882a593Smuzhiyun * @ctx: lock acquisition context
3144*4882a593Smuzhiyun *
3145*4882a593Smuzhiyun * Makes a copy of the current atomic state by looping over all objects and
3146*4882a593Smuzhiyun * duplicating their respective states. This is used for example by suspend/
3147*4882a593Smuzhiyun * resume support code to save the state prior to suspend such that it can
3148*4882a593Smuzhiyun * be restored upon resume.
3149*4882a593Smuzhiyun *
3150*4882a593Smuzhiyun * Note that this treats atomic state as persistent between save and restore.
3151*4882a593Smuzhiyun * Drivers must make sure that this is possible and won't result in confusion
3152*4882a593Smuzhiyun * or erroneous behaviour.
3153*4882a593Smuzhiyun *
3154*4882a593Smuzhiyun * Note that if callers haven't already acquired all modeset locks this might
3155*4882a593Smuzhiyun * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3156*4882a593Smuzhiyun *
3157*4882a593Smuzhiyun * Returns:
3158*4882a593Smuzhiyun * A pointer to the copy of the atomic state object on success or an
3159*4882a593Smuzhiyun * ERR_PTR()-encoded error code on failure.
3160*4882a593Smuzhiyun *
3161*4882a593Smuzhiyun * See also:
3162*4882a593Smuzhiyun * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
3163*4882a593Smuzhiyun */
3164*4882a593Smuzhiyun struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)3165*4882a593Smuzhiyun drm_atomic_helper_duplicate_state(struct drm_device *dev,
3166*4882a593Smuzhiyun struct drm_modeset_acquire_ctx *ctx)
3167*4882a593Smuzhiyun {
3168*4882a593Smuzhiyun struct drm_atomic_state *state;
3169*4882a593Smuzhiyun struct drm_connector *conn;
3170*4882a593Smuzhiyun struct drm_connector_list_iter conn_iter;
3171*4882a593Smuzhiyun struct drm_plane *plane;
3172*4882a593Smuzhiyun struct drm_crtc *crtc;
3173*4882a593Smuzhiyun int err = 0;
3174*4882a593Smuzhiyun
3175*4882a593Smuzhiyun state = drm_atomic_state_alloc(dev);
3176*4882a593Smuzhiyun if (!state)
3177*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
3178*4882a593Smuzhiyun
3179*4882a593Smuzhiyun state->acquire_ctx = ctx;
3180*4882a593Smuzhiyun state->duplicated = true;
3181*4882a593Smuzhiyun
3182*4882a593Smuzhiyun drm_for_each_crtc(crtc, dev) {
3183*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
3184*4882a593Smuzhiyun
3185*4882a593Smuzhiyun crtc_state = drm_atomic_get_crtc_state(state, crtc);
3186*4882a593Smuzhiyun if (IS_ERR(crtc_state)) {
3187*4882a593Smuzhiyun err = PTR_ERR(crtc_state);
3188*4882a593Smuzhiyun goto free;
3189*4882a593Smuzhiyun }
3190*4882a593Smuzhiyun }
3191*4882a593Smuzhiyun
3192*4882a593Smuzhiyun drm_for_each_plane(plane, dev) {
3193*4882a593Smuzhiyun struct drm_plane_state *plane_state;
3194*4882a593Smuzhiyun
3195*4882a593Smuzhiyun plane_state = drm_atomic_get_plane_state(state, plane);
3196*4882a593Smuzhiyun if (IS_ERR(plane_state)) {
3197*4882a593Smuzhiyun err = PTR_ERR(plane_state);
3198*4882a593Smuzhiyun goto free;
3199*4882a593Smuzhiyun }
3200*4882a593Smuzhiyun }
3201*4882a593Smuzhiyun
3202*4882a593Smuzhiyun drm_connector_list_iter_begin(dev, &conn_iter);
3203*4882a593Smuzhiyun drm_for_each_connector_iter(conn, &conn_iter) {
3204*4882a593Smuzhiyun struct drm_connector_state *conn_state;
3205*4882a593Smuzhiyun
3206*4882a593Smuzhiyun conn_state = drm_atomic_get_connector_state(state, conn);
3207*4882a593Smuzhiyun if (IS_ERR(conn_state)) {
3208*4882a593Smuzhiyun err = PTR_ERR(conn_state);
3209*4882a593Smuzhiyun drm_connector_list_iter_end(&conn_iter);
3210*4882a593Smuzhiyun goto free;
3211*4882a593Smuzhiyun }
3212*4882a593Smuzhiyun }
3213*4882a593Smuzhiyun drm_connector_list_iter_end(&conn_iter);
3214*4882a593Smuzhiyun
3215*4882a593Smuzhiyun /* clear the acquire context so that it isn't accidentally reused */
3216*4882a593Smuzhiyun state->acquire_ctx = NULL;
3217*4882a593Smuzhiyun
3218*4882a593Smuzhiyun free:
3219*4882a593Smuzhiyun if (err < 0) {
3220*4882a593Smuzhiyun drm_atomic_state_put(state);
3221*4882a593Smuzhiyun state = ERR_PTR(err);
3222*4882a593Smuzhiyun }
3223*4882a593Smuzhiyun
3224*4882a593Smuzhiyun return state;
3225*4882a593Smuzhiyun }
3226*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
3227*4882a593Smuzhiyun
3228*4882a593Smuzhiyun /**
3229*4882a593Smuzhiyun * drm_atomic_helper_suspend - subsystem-level suspend helper
3230*4882a593Smuzhiyun * @dev: DRM device
3231*4882a593Smuzhiyun *
3232*4882a593Smuzhiyun * Duplicates the current atomic state, disables all active outputs and then
3233*4882a593Smuzhiyun * returns a pointer to the original atomic state to the caller. Drivers can
3234*4882a593Smuzhiyun * pass this pointer to the drm_atomic_helper_resume() helper upon resume to
3235*4882a593Smuzhiyun * restore the output configuration that was active at the time the system
3236*4882a593Smuzhiyun * entered suspend.
3237*4882a593Smuzhiyun *
3238*4882a593Smuzhiyun * Note that it is potentially unsafe to use this. The atomic state object
3239*4882a593Smuzhiyun * returned by this function is assumed to be persistent. Drivers must ensure
3240*4882a593Smuzhiyun * that this holds true. Before calling this function, drivers must make sure
3241*4882a593Smuzhiyun * to suspend fbdev emulation so that nothing can be using the device.
3242*4882a593Smuzhiyun *
3243*4882a593Smuzhiyun * Returns:
3244*4882a593Smuzhiyun * A pointer to a copy of the state before suspend on success or an ERR_PTR()-
3245*4882a593Smuzhiyun * encoded error code on failure. Drivers should store the returned atomic
3246*4882a593Smuzhiyun * state object and pass it to the drm_atomic_helper_resume() helper upon
3247*4882a593Smuzhiyun * resume.
3248*4882a593Smuzhiyun *
3249*4882a593Smuzhiyun * See also:
3250*4882a593Smuzhiyun * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
3251*4882a593Smuzhiyun * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state()
3252*4882a593Smuzhiyun */
drm_atomic_helper_suspend(struct drm_device * dev)3253*4882a593Smuzhiyun struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
3254*4882a593Smuzhiyun {
3255*4882a593Smuzhiyun struct drm_modeset_acquire_ctx ctx;
3256*4882a593Smuzhiyun struct drm_atomic_state *state;
3257*4882a593Smuzhiyun int err;
3258*4882a593Smuzhiyun
3259*4882a593Smuzhiyun /* This can never be returned, but it makes the compiler happy */
3260*4882a593Smuzhiyun state = ERR_PTR(-EINVAL);
3261*4882a593Smuzhiyun
3262*4882a593Smuzhiyun DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3263*4882a593Smuzhiyun
3264*4882a593Smuzhiyun state = drm_atomic_helper_duplicate_state(dev, &ctx);
3265*4882a593Smuzhiyun if (IS_ERR(state))
3266*4882a593Smuzhiyun goto unlock;
3267*4882a593Smuzhiyun
3268*4882a593Smuzhiyun err = drm_atomic_helper_disable_all(dev, &ctx);
3269*4882a593Smuzhiyun if (err < 0) {
3270*4882a593Smuzhiyun drm_atomic_state_put(state);
3271*4882a593Smuzhiyun state = ERR_PTR(err);
3272*4882a593Smuzhiyun goto unlock;
3273*4882a593Smuzhiyun }
3274*4882a593Smuzhiyun
3275*4882a593Smuzhiyun unlock:
3276*4882a593Smuzhiyun DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3277*4882a593Smuzhiyun if (err)
3278*4882a593Smuzhiyun return ERR_PTR(err);
3279*4882a593Smuzhiyun
3280*4882a593Smuzhiyun return state;
3281*4882a593Smuzhiyun }
3282*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_suspend);
3283*4882a593Smuzhiyun
3284*4882a593Smuzhiyun /**
3285*4882a593Smuzhiyun * drm_atomic_helper_commit_duplicated_state - commit duplicated state
3286*4882a593Smuzhiyun * @state: duplicated atomic state to commit
3287*4882a593Smuzhiyun * @ctx: pointer to acquire_ctx to use for commit.
3288*4882a593Smuzhiyun *
3289*4882a593Smuzhiyun * The state returned by drm_atomic_helper_duplicate_state() and
3290*4882a593Smuzhiyun * drm_atomic_helper_suspend() is partially invalid, and needs to
3291*4882a593Smuzhiyun * be fixed up before commit.
3292*4882a593Smuzhiyun *
3293*4882a593Smuzhiyun * Returns:
3294*4882a593Smuzhiyun * 0 on success or a negative error code on failure.
3295*4882a593Smuzhiyun *
3296*4882a593Smuzhiyun * See also:
3297*4882a593Smuzhiyun * drm_atomic_helper_suspend()
3298*4882a593Smuzhiyun */
drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state * state,struct drm_modeset_acquire_ctx * ctx)3299*4882a593Smuzhiyun int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
3300*4882a593Smuzhiyun struct drm_modeset_acquire_ctx *ctx)
3301*4882a593Smuzhiyun {
3302*4882a593Smuzhiyun int i, ret;
3303*4882a593Smuzhiyun struct drm_plane *plane;
3304*4882a593Smuzhiyun struct drm_plane_state *new_plane_state;
3305*4882a593Smuzhiyun struct drm_connector *connector;
3306*4882a593Smuzhiyun struct drm_connector_state *new_conn_state;
3307*4882a593Smuzhiyun struct drm_crtc *crtc;
3308*4882a593Smuzhiyun struct drm_crtc_state *new_crtc_state;
3309*4882a593Smuzhiyun
3310*4882a593Smuzhiyun state->acquire_ctx = ctx;
3311*4882a593Smuzhiyun
3312*4882a593Smuzhiyun for_each_new_plane_in_state(state, plane, new_plane_state, i)
3313*4882a593Smuzhiyun state->planes[i].old_state = plane->state;
3314*4882a593Smuzhiyun
3315*4882a593Smuzhiyun for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
3316*4882a593Smuzhiyun state->crtcs[i].old_state = crtc->state;
3317*4882a593Smuzhiyun
3318*4882a593Smuzhiyun for_each_new_connector_in_state(state, connector, new_conn_state, i)
3319*4882a593Smuzhiyun state->connectors[i].old_state = connector->state;
3320*4882a593Smuzhiyun
3321*4882a593Smuzhiyun ret = drm_atomic_commit(state);
3322*4882a593Smuzhiyun
3323*4882a593Smuzhiyun state->acquire_ctx = NULL;
3324*4882a593Smuzhiyun
3325*4882a593Smuzhiyun return ret;
3326*4882a593Smuzhiyun }
3327*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
3328*4882a593Smuzhiyun
3329*4882a593Smuzhiyun /**
3330*4882a593Smuzhiyun * drm_atomic_helper_resume - subsystem-level resume helper
3331*4882a593Smuzhiyun * @dev: DRM device
3332*4882a593Smuzhiyun * @state: atomic state to resume to
3333*4882a593Smuzhiyun *
3334*4882a593Smuzhiyun * Calls drm_mode_config_reset() to synchronize hardware and software states,
3335*4882a593Smuzhiyun * grabs all modeset locks and commits the atomic state object. This can be
3336*4882a593Smuzhiyun * used in conjunction with the drm_atomic_helper_suspend() helper to
3337*4882a593Smuzhiyun * implement suspend/resume for drivers that support atomic mode-setting.
3338*4882a593Smuzhiyun *
3339*4882a593Smuzhiyun * Returns:
3340*4882a593Smuzhiyun * 0 on success or a negative error code on failure.
3341*4882a593Smuzhiyun *
3342*4882a593Smuzhiyun * See also:
3343*4882a593Smuzhiyun * drm_atomic_helper_suspend()
3344*4882a593Smuzhiyun */
drm_atomic_helper_resume(struct drm_device * dev,struct drm_atomic_state * state)3345*4882a593Smuzhiyun int drm_atomic_helper_resume(struct drm_device *dev,
3346*4882a593Smuzhiyun struct drm_atomic_state *state)
3347*4882a593Smuzhiyun {
3348*4882a593Smuzhiyun struct drm_modeset_acquire_ctx ctx;
3349*4882a593Smuzhiyun int err;
3350*4882a593Smuzhiyun
3351*4882a593Smuzhiyun drm_mode_config_reset(dev);
3352*4882a593Smuzhiyun
3353*4882a593Smuzhiyun DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3354*4882a593Smuzhiyun
3355*4882a593Smuzhiyun err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
3356*4882a593Smuzhiyun
3357*4882a593Smuzhiyun DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3358*4882a593Smuzhiyun drm_atomic_state_put(state);
3359*4882a593Smuzhiyun
3360*4882a593Smuzhiyun return err;
3361*4882a593Smuzhiyun }
3362*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_resume);
3363*4882a593Smuzhiyun
page_flip_common(struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t flags)3364*4882a593Smuzhiyun static int page_flip_common(struct drm_atomic_state *state,
3365*4882a593Smuzhiyun struct drm_crtc *crtc,
3366*4882a593Smuzhiyun struct drm_framebuffer *fb,
3367*4882a593Smuzhiyun struct drm_pending_vblank_event *event,
3368*4882a593Smuzhiyun uint32_t flags)
3369*4882a593Smuzhiyun {
3370*4882a593Smuzhiyun struct drm_plane *plane = crtc->primary;
3371*4882a593Smuzhiyun struct drm_plane_state *plane_state;
3372*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
3373*4882a593Smuzhiyun int ret = 0;
3374*4882a593Smuzhiyun
3375*4882a593Smuzhiyun crtc_state = drm_atomic_get_crtc_state(state, crtc);
3376*4882a593Smuzhiyun if (IS_ERR(crtc_state))
3377*4882a593Smuzhiyun return PTR_ERR(crtc_state);
3378*4882a593Smuzhiyun
3379*4882a593Smuzhiyun crtc_state->event = event;
3380*4882a593Smuzhiyun crtc_state->async_flip = flags & DRM_MODE_PAGE_FLIP_ASYNC;
3381*4882a593Smuzhiyun
3382*4882a593Smuzhiyun plane_state = drm_atomic_get_plane_state(state, plane);
3383*4882a593Smuzhiyun if (IS_ERR(plane_state))
3384*4882a593Smuzhiyun return PTR_ERR(plane_state);
3385*4882a593Smuzhiyun
3386*4882a593Smuzhiyun ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3387*4882a593Smuzhiyun if (ret != 0)
3388*4882a593Smuzhiyun return ret;
3389*4882a593Smuzhiyun drm_atomic_set_fb_for_plane(plane_state, fb);
3390*4882a593Smuzhiyun
3391*4882a593Smuzhiyun /* Make sure we don't accidentally do a full modeset. */
3392*4882a593Smuzhiyun state->allow_modeset = false;
3393*4882a593Smuzhiyun if (!crtc_state->active) {
3394*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled, rejecting legacy flip\n",
3395*4882a593Smuzhiyun crtc->base.id, crtc->name);
3396*4882a593Smuzhiyun return -EINVAL;
3397*4882a593Smuzhiyun }
3398*4882a593Smuzhiyun
3399*4882a593Smuzhiyun return ret;
3400*4882a593Smuzhiyun }
3401*4882a593Smuzhiyun
3402*4882a593Smuzhiyun /**
3403*4882a593Smuzhiyun * drm_atomic_helper_page_flip - execute a legacy page flip
3404*4882a593Smuzhiyun * @crtc: DRM CRTC
3405*4882a593Smuzhiyun * @fb: DRM framebuffer
3406*4882a593Smuzhiyun * @event: optional DRM event to signal upon completion
3407*4882a593Smuzhiyun * @flags: flip flags for non-vblank sync'ed updates
3408*4882a593Smuzhiyun * @ctx: lock acquisition context
3409*4882a593Smuzhiyun *
3410*4882a593Smuzhiyun * Provides a default &drm_crtc_funcs.page_flip implementation
3411*4882a593Smuzhiyun * using the atomic driver interface.
3412*4882a593Smuzhiyun *
3413*4882a593Smuzhiyun * Returns:
3414*4882a593Smuzhiyun * Returns 0 on success, negative errno numbers on failure.
3415*4882a593Smuzhiyun *
3416*4882a593Smuzhiyun * See also:
3417*4882a593Smuzhiyun * drm_atomic_helper_page_flip_target()
3418*4882a593Smuzhiyun */
drm_atomic_helper_page_flip(struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t flags,struct drm_modeset_acquire_ctx * ctx)3419*4882a593Smuzhiyun int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
3420*4882a593Smuzhiyun struct drm_framebuffer *fb,
3421*4882a593Smuzhiyun struct drm_pending_vblank_event *event,
3422*4882a593Smuzhiyun uint32_t flags,
3423*4882a593Smuzhiyun struct drm_modeset_acquire_ctx *ctx)
3424*4882a593Smuzhiyun {
3425*4882a593Smuzhiyun struct drm_plane *plane = crtc->primary;
3426*4882a593Smuzhiyun struct drm_atomic_state *state;
3427*4882a593Smuzhiyun int ret = 0;
3428*4882a593Smuzhiyun
3429*4882a593Smuzhiyun state = drm_atomic_state_alloc(plane->dev);
3430*4882a593Smuzhiyun if (!state)
3431*4882a593Smuzhiyun return -ENOMEM;
3432*4882a593Smuzhiyun
3433*4882a593Smuzhiyun state->acquire_ctx = ctx;
3434*4882a593Smuzhiyun
3435*4882a593Smuzhiyun ret = page_flip_common(state, crtc, fb, event, flags);
3436*4882a593Smuzhiyun if (ret != 0)
3437*4882a593Smuzhiyun goto fail;
3438*4882a593Smuzhiyun
3439*4882a593Smuzhiyun ret = drm_atomic_nonblocking_commit(state);
3440*4882a593Smuzhiyun fail:
3441*4882a593Smuzhiyun drm_atomic_state_put(state);
3442*4882a593Smuzhiyun return ret;
3443*4882a593Smuzhiyun }
3444*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_page_flip);
3445*4882a593Smuzhiyun
3446*4882a593Smuzhiyun /**
3447*4882a593Smuzhiyun * drm_atomic_helper_page_flip_target - do page flip on target vblank period.
3448*4882a593Smuzhiyun * @crtc: DRM CRTC
3449*4882a593Smuzhiyun * @fb: DRM framebuffer
3450*4882a593Smuzhiyun * @event: optional DRM event to signal upon completion
3451*4882a593Smuzhiyun * @flags: flip flags for non-vblank sync'ed updates
3452*4882a593Smuzhiyun * @target: specifying the target vblank period when the flip to take effect
3453*4882a593Smuzhiyun * @ctx: lock acquisition context
3454*4882a593Smuzhiyun *
3455*4882a593Smuzhiyun * Provides a default &drm_crtc_funcs.page_flip_target implementation.
3456*4882a593Smuzhiyun * Similar to drm_atomic_helper_page_flip() with extra parameter to specify
3457*4882a593Smuzhiyun * target vblank period to flip.
3458*4882a593Smuzhiyun *
3459*4882a593Smuzhiyun * Returns:
3460*4882a593Smuzhiyun * Returns 0 on success, negative errno numbers on failure.
3461*4882a593Smuzhiyun */
drm_atomic_helper_page_flip_target(struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t flags,uint32_t target,struct drm_modeset_acquire_ctx * ctx)3462*4882a593Smuzhiyun int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
3463*4882a593Smuzhiyun struct drm_framebuffer *fb,
3464*4882a593Smuzhiyun struct drm_pending_vblank_event *event,
3465*4882a593Smuzhiyun uint32_t flags,
3466*4882a593Smuzhiyun uint32_t target,
3467*4882a593Smuzhiyun struct drm_modeset_acquire_ctx *ctx)
3468*4882a593Smuzhiyun {
3469*4882a593Smuzhiyun struct drm_plane *plane = crtc->primary;
3470*4882a593Smuzhiyun struct drm_atomic_state *state;
3471*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
3472*4882a593Smuzhiyun int ret = 0;
3473*4882a593Smuzhiyun
3474*4882a593Smuzhiyun state = drm_atomic_state_alloc(plane->dev);
3475*4882a593Smuzhiyun if (!state)
3476*4882a593Smuzhiyun return -ENOMEM;
3477*4882a593Smuzhiyun
3478*4882a593Smuzhiyun state->acquire_ctx = ctx;
3479*4882a593Smuzhiyun
3480*4882a593Smuzhiyun ret = page_flip_common(state, crtc, fb, event, flags);
3481*4882a593Smuzhiyun if (ret != 0)
3482*4882a593Smuzhiyun goto fail;
3483*4882a593Smuzhiyun
3484*4882a593Smuzhiyun crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3485*4882a593Smuzhiyun if (WARN_ON(!crtc_state)) {
3486*4882a593Smuzhiyun ret = -EINVAL;
3487*4882a593Smuzhiyun goto fail;
3488*4882a593Smuzhiyun }
3489*4882a593Smuzhiyun crtc_state->target_vblank = target;
3490*4882a593Smuzhiyun
3491*4882a593Smuzhiyun ret = drm_atomic_nonblocking_commit(state);
3492*4882a593Smuzhiyun fail:
3493*4882a593Smuzhiyun drm_atomic_state_put(state);
3494*4882a593Smuzhiyun return ret;
3495*4882a593Smuzhiyun }
3496*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
3497*4882a593Smuzhiyun
3498*4882a593Smuzhiyun /**
3499*4882a593Smuzhiyun * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
3500*4882a593Smuzhiyun * @crtc: CRTC object
3501*4882a593Smuzhiyun * @red: red correction table
3502*4882a593Smuzhiyun * @green: green correction table
3503*4882a593Smuzhiyun * @blue: green correction table
3504*4882a593Smuzhiyun * @size: size of the tables
3505*4882a593Smuzhiyun * @ctx: lock acquire context
3506*4882a593Smuzhiyun *
3507*4882a593Smuzhiyun * Implements support for legacy gamma correction table for drivers
3508*4882a593Smuzhiyun * that support color management through the DEGAMMA_LUT/GAMMA_LUT
3509*4882a593Smuzhiyun * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for
3510*4882a593Smuzhiyun * how the atomic color management and gamma tables work.
3511*4882a593Smuzhiyun */
drm_atomic_helper_legacy_gamma_set(struct drm_crtc * crtc,u16 * red,u16 * green,u16 * blue,uint32_t size,struct drm_modeset_acquire_ctx * ctx)3512*4882a593Smuzhiyun int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
3513*4882a593Smuzhiyun u16 *red, u16 *green, u16 *blue,
3514*4882a593Smuzhiyun uint32_t size,
3515*4882a593Smuzhiyun struct drm_modeset_acquire_ctx *ctx)
3516*4882a593Smuzhiyun {
3517*4882a593Smuzhiyun struct drm_device *dev = crtc->dev;
3518*4882a593Smuzhiyun struct drm_atomic_state *state;
3519*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
3520*4882a593Smuzhiyun struct drm_property_blob *blob = NULL;
3521*4882a593Smuzhiyun struct drm_color_lut *blob_data;
3522*4882a593Smuzhiyun int i, ret = 0;
3523*4882a593Smuzhiyun bool replaced;
3524*4882a593Smuzhiyun
3525*4882a593Smuzhiyun state = drm_atomic_state_alloc(crtc->dev);
3526*4882a593Smuzhiyun if (!state)
3527*4882a593Smuzhiyun return -ENOMEM;
3528*4882a593Smuzhiyun
3529*4882a593Smuzhiyun blob = drm_property_create_blob(dev,
3530*4882a593Smuzhiyun sizeof(struct drm_color_lut) * size,
3531*4882a593Smuzhiyun NULL);
3532*4882a593Smuzhiyun if (IS_ERR(blob)) {
3533*4882a593Smuzhiyun ret = PTR_ERR(blob);
3534*4882a593Smuzhiyun blob = NULL;
3535*4882a593Smuzhiyun goto fail;
3536*4882a593Smuzhiyun }
3537*4882a593Smuzhiyun
3538*4882a593Smuzhiyun /* Prepare GAMMA_LUT with the legacy values. */
3539*4882a593Smuzhiyun blob_data = blob->data;
3540*4882a593Smuzhiyun for (i = 0; i < size; i++) {
3541*4882a593Smuzhiyun blob_data[i].red = red[i];
3542*4882a593Smuzhiyun blob_data[i].green = green[i];
3543*4882a593Smuzhiyun blob_data[i].blue = blue[i];
3544*4882a593Smuzhiyun }
3545*4882a593Smuzhiyun
3546*4882a593Smuzhiyun state->acquire_ctx = ctx;
3547*4882a593Smuzhiyun crtc_state = drm_atomic_get_crtc_state(state, crtc);
3548*4882a593Smuzhiyun if (IS_ERR(crtc_state)) {
3549*4882a593Smuzhiyun ret = PTR_ERR(crtc_state);
3550*4882a593Smuzhiyun goto fail;
3551*4882a593Smuzhiyun }
3552*4882a593Smuzhiyun
3553*4882a593Smuzhiyun /* Reset DEGAMMA_LUT and CTM properties. */
3554*4882a593Smuzhiyun replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
3555*4882a593Smuzhiyun replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
3556*4882a593Smuzhiyun replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
3557*4882a593Smuzhiyun crtc_state->color_mgmt_changed |= replaced;
3558*4882a593Smuzhiyun
3559*4882a593Smuzhiyun ret = drm_atomic_commit(state);
3560*4882a593Smuzhiyun
3561*4882a593Smuzhiyun fail:
3562*4882a593Smuzhiyun drm_atomic_state_put(state);
3563*4882a593Smuzhiyun drm_property_blob_put(blob);
3564*4882a593Smuzhiyun return ret;
3565*4882a593Smuzhiyun }
3566*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
3567*4882a593Smuzhiyun
3568*4882a593Smuzhiyun /**
3569*4882a593Smuzhiyun * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
3570*4882a593Smuzhiyun * the input end of a bridge
3571*4882a593Smuzhiyun * @bridge: bridge control structure
3572*4882a593Smuzhiyun * @bridge_state: new bridge state
3573*4882a593Smuzhiyun * @crtc_state: new CRTC state
3574*4882a593Smuzhiyun * @conn_state: new connector state
3575*4882a593Smuzhiyun * @output_fmt: tested output bus format
3576*4882a593Smuzhiyun * @num_input_fmts: will contain the size of the returned array
3577*4882a593Smuzhiyun *
3578*4882a593Smuzhiyun * This helper is a pluggable implementation of the
3579*4882a593Smuzhiyun * &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't
3580*4882a593Smuzhiyun * modify the bus configuration between their input and their output. It
3581*4882a593Smuzhiyun * returns an array of input formats with a single element set to @output_fmt.
3582*4882a593Smuzhiyun *
3583*4882a593Smuzhiyun * RETURNS:
3584*4882a593Smuzhiyun * a valid format array of size @num_input_fmts, or NULL if the allocation
3585*4882a593Smuzhiyun * failed
3586*4882a593Smuzhiyun */
3587*4882a593Smuzhiyun u32 *
drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge * bridge,struct drm_bridge_state * bridge_state,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state,u32 output_fmt,unsigned int * num_input_fmts)3588*4882a593Smuzhiyun drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
3589*4882a593Smuzhiyun struct drm_bridge_state *bridge_state,
3590*4882a593Smuzhiyun struct drm_crtc_state *crtc_state,
3591*4882a593Smuzhiyun struct drm_connector_state *conn_state,
3592*4882a593Smuzhiyun u32 output_fmt,
3593*4882a593Smuzhiyun unsigned int *num_input_fmts)
3594*4882a593Smuzhiyun {
3595*4882a593Smuzhiyun u32 *input_fmts;
3596*4882a593Smuzhiyun
3597*4882a593Smuzhiyun input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
3598*4882a593Smuzhiyun if (!input_fmts) {
3599*4882a593Smuzhiyun *num_input_fmts = 0;
3600*4882a593Smuzhiyun return NULL;
3601*4882a593Smuzhiyun }
3602*4882a593Smuzhiyun
3603*4882a593Smuzhiyun *num_input_fmts = 1;
3604*4882a593Smuzhiyun input_fmts[0] = output_fmt;
3605*4882a593Smuzhiyun return input_fmts;
3606*4882a593Smuzhiyun }
3607*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt);
3608