1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (C) 2014 Red Hat
3*4882a593Smuzhiyun * Copyright (C) 2014 Intel Corp.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
6*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
7*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
8*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
10*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
13*4882a593Smuzhiyun * all copies or substantial portions of the Software.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Authors:
24*4882a593Smuzhiyun * Rob Clark <robdclark@gmail.com>
25*4882a593Smuzhiyun * Daniel Vetter <daniel.vetter@ffwll.ch>
26*4882a593Smuzhiyun */
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <linux/sync_file.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include <drm/drm_atomic.h>
32*4882a593Smuzhiyun #include <drm/drm_atomic_uapi.h>
33*4882a593Smuzhiyun #include <drm/drm_bridge.h>
34*4882a593Smuzhiyun #include <drm/drm_debugfs.h>
35*4882a593Smuzhiyun #include <drm/drm_device.h>
36*4882a593Smuzhiyun #include <drm/drm_drv.h>
37*4882a593Smuzhiyun #include <drm/drm_file.h>
38*4882a593Smuzhiyun #include <drm/drm_fourcc.h>
39*4882a593Smuzhiyun #include <drm/drm_mode.h>
40*4882a593Smuzhiyun #include <drm/drm_print.h>
41*4882a593Smuzhiyun #include <drm/drm_writeback.h>
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #include "drm_crtc_internal.h"
44*4882a593Smuzhiyun #include "drm_internal.h"
45*4882a593Smuzhiyun
__drm_crtc_commit_free(struct kref * kref)46*4882a593Smuzhiyun void __drm_crtc_commit_free(struct kref *kref)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun struct drm_crtc_commit *commit =
49*4882a593Smuzhiyun container_of(kref, struct drm_crtc_commit, ref);
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun kfree(commit);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun EXPORT_SYMBOL(__drm_crtc_commit_free);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /**
56*4882a593Smuzhiyun * drm_atomic_state_default_release -
57*4882a593Smuzhiyun * release memory initialized by drm_atomic_state_init
58*4882a593Smuzhiyun * @state: atomic state
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * Free all the memory allocated by drm_atomic_state_init.
61*4882a593Smuzhiyun * This should only be used by drivers which are still subclassing
62*4882a593Smuzhiyun * &drm_atomic_state and haven't switched to &drm_private_state yet.
63*4882a593Smuzhiyun */
drm_atomic_state_default_release(struct drm_atomic_state * state)64*4882a593Smuzhiyun void drm_atomic_state_default_release(struct drm_atomic_state *state)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun kfree(state->connectors);
67*4882a593Smuzhiyun kfree(state->crtcs);
68*4882a593Smuzhiyun kfree(state->planes);
69*4882a593Smuzhiyun kfree(state->private_objs);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_state_default_release);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /**
74*4882a593Smuzhiyun * drm_atomic_state_init - init new atomic state
75*4882a593Smuzhiyun * @dev: DRM device
76*4882a593Smuzhiyun * @state: atomic state
77*4882a593Smuzhiyun *
78*4882a593Smuzhiyun * Default implementation for filling in a new atomic state.
79*4882a593Smuzhiyun * This should only be used by drivers which are still subclassing
80*4882a593Smuzhiyun * &drm_atomic_state and haven't switched to &drm_private_state yet.
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun int
drm_atomic_state_init(struct drm_device * dev,struct drm_atomic_state * state)83*4882a593Smuzhiyun drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun kref_init(&state->ref);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* TODO legacy paths should maybe do a better job about
88*4882a593Smuzhiyun * setting this appropriately?
89*4882a593Smuzhiyun */
90*4882a593Smuzhiyun state->allow_modeset = true;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun state->crtcs = kcalloc(dev->mode_config.num_crtc,
93*4882a593Smuzhiyun sizeof(*state->crtcs), GFP_KERNEL);
94*4882a593Smuzhiyun if (!state->crtcs)
95*4882a593Smuzhiyun goto fail;
96*4882a593Smuzhiyun state->planes = kcalloc(dev->mode_config.num_total_plane,
97*4882a593Smuzhiyun sizeof(*state->planes), GFP_KERNEL);
98*4882a593Smuzhiyun if (!state->planes)
99*4882a593Smuzhiyun goto fail;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun state->dev = dev;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun return 0;
106*4882a593Smuzhiyun fail:
107*4882a593Smuzhiyun drm_atomic_state_default_release(state);
108*4882a593Smuzhiyun return -ENOMEM;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_state_init);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /**
113*4882a593Smuzhiyun * drm_atomic_state_alloc - allocate atomic state
114*4882a593Smuzhiyun * @dev: DRM device
115*4882a593Smuzhiyun *
116*4882a593Smuzhiyun * This allocates an empty atomic state to track updates.
117*4882a593Smuzhiyun */
118*4882a593Smuzhiyun struct drm_atomic_state *
drm_atomic_state_alloc(struct drm_device * dev)119*4882a593Smuzhiyun drm_atomic_state_alloc(struct drm_device *dev)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun struct drm_mode_config *config = &dev->mode_config;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun if (!config->funcs->atomic_state_alloc) {
124*4882a593Smuzhiyun struct drm_atomic_state *state;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun state = kzalloc(sizeof(*state), GFP_KERNEL);
127*4882a593Smuzhiyun if (!state)
128*4882a593Smuzhiyun return NULL;
129*4882a593Smuzhiyun if (drm_atomic_state_init(dev, state) < 0) {
130*4882a593Smuzhiyun kfree(state);
131*4882a593Smuzhiyun return NULL;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun return state;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun return config->funcs->atomic_state_alloc(dev);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_state_alloc);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /**
141*4882a593Smuzhiyun * drm_atomic_state_default_clear - clear base atomic state
142*4882a593Smuzhiyun * @state: atomic state
143*4882a593Smuzhiyun *
144*4882a593Smuzhiyun * Default implementation for clearing atomic state.
145*4882a593Smuzhiyun * This should only be used by drivers which are still subclassing
146*4882a593Smuzhiyun * &drm_atomic_state and haven't switched to &drm_private_state yet.
147*4882a593Smuzhiyun */
drm_atomic_state_default_clear(struct drm_atomic_state * state)148*4882a593Smuzhiyun void drm_atomic_state_default_clear(struct drm_atomic_state *state)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun struct drm_device *dev = state->dev;
151*4882a593Smuzhiyun struct drm_mode_config *config = &dev->mode_config;
152*4882a593Smuzhiyun int i;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun for (i = 0; i < state->num_connector; i++) {
157*4882a593Smuzhiyun struct drm_connector *connector = state->connectors[i].ptr;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun if (!connector)
160*4882a593Smuzhiyun continue;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun connector->funcs->atomic_destroy_state(connector,
163*4882a593Smuzhiyun state->connectors[i].state);
164*4882a593Smuzhiyun state->connectors[i].ptr = NULL;
165*4882a593Smuzhiyun state->connectors[i].state = NULL;
166*4882a593Smuzhiyun state->connectors[i].old_state = NULL;
167*4882a593Smuzhiyun state->connectors[i].new_state = NULL;
168*4882a593Smuzhiyun drm_connector_put(connector);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun for (i = 0; i < config->num_crtc; i++) {
172*4882a593Smuzhiyun struct drm_crtc *crtc = state->crtcs[i].ptr;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun if (!crtc)
175*4882a593Smuzhiyun continue;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun crtc->funcs->atomic_destroy_state(crtc,
178*4882a593Smuzhiyun state->crtcs[i].state);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun state->crtcs[i].ptr = NULL;
181*4882a593Smuzhiyun state->crtcs[i].state = NULL;
182*4882a593Smuzhiyun state->crtcs[i].old_state = NULL;
183*4882a593Smuzhiyun state->crtcs[i].new_state = NULL;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun if (state->crtcs[i].commit) {
186*4882a593Smuzhiyun drm_crtc_commit_put(state->crtcs[i].commit);
187*4882a593Smuzhiyun state->crtcs[i].commit = NULL;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun for (i = 0; i < config->num_total_plane; i++) {
192*4882a593Smuzhiyun struct drm_plane *plane = state->planes[i].ptr;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (!plane)
195*4882a593Smuzhiyun continue;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun plane->funcs->atomic_destroy_state(plane,
198*4882a593Smuzhiyun state->planes[i].state);
199*4882a593Smuzhiyun state->planes[i].ptr = NULL;
200*4882a593Smuzhiyun state->planes[i].state = NULL;
201*4882a593Smuzhiyun state->planes[i].old_state = NULL;
202*4882a593Smuzhiyun state->planes[i].new_state = NULL;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun for (i = 0; i < state->num_private_objs; i++) {
206*4882a593Smuzhiyun struct drm_private_obj *obj = state->private_objs[i].ptr;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun obj->funcs->atomic_destroy_state(obj,
209*4882a593Smuzhiyun state->private_objs[i].state);
210*4882a593Smuzhiyun state->private_objs[i].ptr = NULL;
211*4882a593Smuzhiyun state->private_objs[i].state = NULL;
212*4882a593Smuzhiyun state->private_objs[i].old_state = NULL;
213*4882a593Smuzhiyun state->private_objs[i].new_state = NULL;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun state->num_private_objs = 0;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun if (state->fake_commit) {
218*4882a593Smuzhiyun drm_crtc_commit_put(state->fake_commit);
219*4882a593Smuzhiyun state->fake_commit = NULL;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_state_default_clear);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /**
225*4882a593Smuzhiyun * drm_atomic_state_clear - clear state object
226*4882a593Smuzhiyun * @state: atomic state
227*4882a593Smuzhiyun *
228*4882a593Smuzhiyun * When the w/w mutex algorithm detects a deadlock we need to back off and drop
229*4882a593Smuzhiyun * all locks. So someone else could sneak in and change the current modeset
230*4882a593Smuzhiyun * configuration. Which means that all the state assembled in @state is no
231*4882a593Smuzhiyun * longer an atomic update to the current state, but to some arbitrary earlier
232*4882a593Smuzhiyun * state. Which could break assumptions the driver's
233*4882a593Smuzhiyun * &drm_mode_config_funcs.atomic_check likely relies on.
234*4882a593Smuzhiyun *
235*4882a593Smuzhiyun * Hence we must clear all cached state and completely start over, using this
236*4882a593Smuzhiyun * function.
237*4882a593Smuzhiyun */
drm_atomic_state_clear(struct drm_atomic_state * state)238*4882a593Smuzhiyun void drm_atomic_state_clear(struct drm_atomic_state *state)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun struct drm_device *dev = state->dev;
241*4882a593Smuzhiyun struct drm_mode_config *config = &dev->mode_config;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (config->funcs->atomic_state_clear)
244*4882a593Smuzhiyun config->funcs->atomic_state_clear(state);
245*4882a593Smuzhiyun else
246*4882a593Smuzhiyun drm_atomic_state_default_clear(state);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_state_clear);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /**
251*4882a593Smuzhiyun * __drm_atomic_state_free - free all memory for an atomic state
252*4882a593Smuzhiyun * @ref: This atomic state to deallocate
253*4882a593Smuzhiyun *
254*4882a593Smuzhiyun * This frees all memory associated with an atomic state, including all the
255*4882a593Smuzhiyun * per-object state for planes, CRTCs and connectors.
256*4882a593Smuzhiyun */
__drm_atomic_state_free(struct kref * ref)257*4882a593Smuzhiyun void __drm_atomic_state_free(struct kref *ref)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
260*4882a593Smuzhiyun struct drm_mode_config *config = &state->dev->mode_config;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun drm_atomic_state_clear(state);
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun if (config->funcs->atomic_state_free) {
267*4882a593Smuzhiyun config->funcs->atomic_state_free(state);
268*4882a593Smuzhiyun } else {
269*4882a593Smuzhiyun drm_atomic_state_default_release(state);
270*4882a593Smuzhiyun kfree(state);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun EXPORT_SYMBOL(__drm_atomic_state_free);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /**
276*4882a593Smuzhiyun * drm_atomic_get_crtc_state - get CRTC state
277*4882a593Smuzhiyun * @state: global atomic state object
278*4882a593Smuzhiyun * @crtc: CRTC to get state object for
279*4882a593Smuzhiyun *
280*4882a593Smuzhiyun * This function returns the CRTC state for the given CRTC, allocating it if
281*4882a593Smuzhiyun * needed. It will also grab the relevant CRTC lock to make sure that the state
282*4882a593Smuzhiyun * is consistent.
283*4882a593Smuzhiyun *
284*4882a593Smuzhiyun * Returns:
285*4882a593Smuzhiyun *
286*4882a593Smuzhiyun * Either the allocated state or the error code encoded into the pointer. When
287*4882a593Smuzhiyun * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
288*4882a593Smuzhiyun * entire atomic sequence must be restarted. All other errors are fatal.
289*4882a593Smuzhiyun */
290*4882a593Smuzhiyun struct drm_crtc_state *
drm_atomic_get_crtc_state(struct drm_atomic_state * state,struct drm_crtc * crtc)291*4882a593Smuzhiyun drm_atomic_get_crtc_state(struct drm_atomic_state *state,
292*4882a593Smuzhiyun struct drm_crtc *crtc)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun int ret, index = drm_crtc_index(crtc);
295*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun WARN_ON(!state->acquire_ctx);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
300*4882a593Smuzhiyun if (crtc_state)
301*4882a593Smuzhiyun return crtc_state;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
304*4882a593Smuzhiyun if (ret)
305*4882a593Smuzhiyun return ERR_PTR(ret);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
308*4882a593Smuzhiyun if (!crtc_state)
309*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun state->crtcs[index].state = crtc_state;
312*4882a593Smuzhiyun state->crtcs[index].old_state = crtc->state;
313*4882a593Smuzhiyun state->crtcs[index].new_state = crtc_state;
314*4882a593Smuzhiyun state->crtcs[index].ptr = crtc;
315*4882a593Smuzhiyun crtc_state->state = state;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
318*4882a593Smuzhiyun crtc->base.id, crtc->name, crtc_state, state);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun return crtc_state;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_get_crtc_state);
323*4882a593Smuzhiyun
drm_atomic_crtc_check(const struct drm_crtc_state * old_crtc_state,const struct drm_crtc_state * new_crtc_state)324*4882a593Smuzhiyun static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state,
325*4882a593Smuzhiyun const struct drm_crtc_state *new_crtc_state)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun struct drm_crtc *crtc = new_crtc_state->crtc;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /* NOTE: we explicitly don't enforce constraints such as primary
330*4882a593Smuzhiyun * layer covering entire screen, since that is something we want
331*4882a593Smuzhiyun * to allow (on hw that supports it). For hw that does not, it
332*4882a593Smuzhiyun * should be checked in driver's crtc->atomic_check() vfunc.
333*4882a593Smuzhiyun *
334*4882a593Smuzhiyun * TODO: Add generic modeset state checks once we support those.
335*4882a593Smuzhiyun */
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (new_crtc_state->active && !new_crtc_state->enable) {
338*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
339*4882a593Smuzhiyun crtc->base.id, crtc->name);
340*4882a593Smuzhiyun return -EINVAL;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /* The state->enable vs. state->mode_blob checks can be WARN_ON,
344*4882a593Smuzhiyun * as this is a kernel-internal detail that userspace should never
345*4882a593Smuzhiyun * be able to trigger. */
346*4882a593Smuzhiyun if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
347*4882a593Smuzhiyun WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) {
348*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
349*4882a593Smuzhiyun crtc->base.id, crtc->name);
350*4882a593Smuzhiyun return -EINVAL;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
354*4882a593Smuzhiyun WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) {
355*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
356*4882a593Smuzhiyun crtc->base.id, crtc->name);
357*4882a593Smuzhiyun return -EINVAL;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /*
361*4882a593Smuzhiyun * Reject event generation for when a CRTC is off and stays off.
362*4882a593Smuzhiyun * It wouldn't be hard to implement this, but userspace has a track
363*4882a593Smuzhiyun * record of happily burning through 100% cpu (or worse, crash) when the
364*4882a593Smuzhiyun * display pipe is suspended. To avoid all that fun just reject updates
365*4882a593Smuzhiyun * that ask for events since likely that indicates a bug in the
366*4882a593Smuzhiyun * compositor's drawing loop. This is consistent with the vblank IOCTL
367*4882a593Smuzhiyun * and legacy page_flip IOCTL which also reject service on a disabled
368*4882a593Smuzhiyun * pipe.
369*4882a593Smuzhiyun */
370*4882a593Smuzhiyun if (new_crtc_state->event &&
371*4882a593Smuzhiyun !new_crtc_state->active && !old_crtc_state->active) {
372*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
373*4882a593Smuzhiyun crtc->base.id, crtc->name);
374*4882a593Smuzhiyun return -EINVAL;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun return 0;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
drm_atomic_crtc_print_state(struct drm_printer * p,const struct drm_crtc_state * state)380*4882a593Smuzhiyun static void drm_atomic_crtc_print_state(struct drm_printer *p,
381*4882a593Smuzhiyun const struct drm_crtc_state *state)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun struct drm_crtc *crtc = state->crtc;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
386*4882a593Smuzhiyun drm_printf(p, "\tenable=%d\n", state->enable);
387*4882a593Smuzhiyun drm_printf(p, "\tactive=%d\n", state->active);
388*4882a593Smuzhiyun drm_printf(p, "\tself_refresh_active=%d\n", state->self_refresh_active);
389*4882a593Smuzhiyun drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
390*4882a593Smuzhiyun drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
391*4882a593Smuzhiyun drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
392*4882a593Smuzhiyun drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
393*4882a593Smuzhiyun drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
394*4882a593Smuzhiyun drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
395*4882a593Smuzhiyun drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
396*4882a593Smuzhiyun drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
397*4882a593Smuzhiyun drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun if (crtc->funcs->atomic_print_state)
400*4882a593Smuzhiyun crtc->funcs->atomic_print_state(p, state);
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
drm_atomic_connector_check(struct drm_connector * connector,struct drm_connector_state * state)403*4882a593Smuzhiyun static int drm_atomic_connector_check(struct drm_connector *connector,
404*4882a593Smuzhiyun struct drm_connector_state *state)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
407*4882a593Smuzhiyun struct drm_writeback_job *writeback_job = state->writeback_job;
408*4882a593Smuzhiyun const struct drm_display_info *info = &connector->display_info;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun state->max_bpc = info->bpc ? info->bpc : 8;
411*4882a593Smuzhiyun if (connector->max_bpc_property)
412*4882a593Smuzhiyun state->max_bpc = min(state->max_bpc, state->max_requested_bpc);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
415*4882a593Smuzhiyun return 0;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun if (writeback_job->fb && !state->crtc) {
418*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n",
419*4882a593Smuzhiyun connector->base.id, connector->name);
420*4882a593Smuzhiyun return -EINVAL;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun if (state->crtc)
424*4882a593Smuzhiyun crtc_state = drm_atomic_get_existing_crtc_state(state->state,
425*4882a593Smuzhiyun state->crtc);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun if (writeback_job->fb && !crtc_state->active) {
428*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
429*4882a593Smuzhiyun connector->base.id, connector->name,
430*4882a593Smuzhiyun state->crtc->base.id);
431*4882a593Smuzhiyun return -EINVAL;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun if (!writeback_job->fb) {
435*4882a593Smuzhiyun if (writeback_job->out_fence) {
436*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
437*4882a593Smuzhiyun connector->base.id, connector->name);
438*4882a593Smuzhiyun return -EINVAL;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun drm_writeback_cleanup_job(writeback_job);
442*4882a593Smuzhiyun state->writeback_job = NULL;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun return 0;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun /**
449*4882a593Smuzhiyun * drm_atomic_get_plane_state - get plane state
450*4882a593Smuzhiyun * @state: global atomic state object
451*4882a593Smuzhiyun * @plane: plane to get state object for
452*4882a593Smuzhiyun *
453*4882a593Smuzhiyun * This function returns the plane state for the given plane, allocating it if
454*4882a593Smuzhiyun * needed. It will also grab the relevant plane lock to make sure that the state
455*4882a593Smuzhiyun * is consistent.
456*4882a593Smuzhiyun *
457*4882a593Smuzhiyun * Returns:
458*4882a593Smuzhiyun *
459*4882a593Smuzhiyun * Either the allocated state or the error code encoded into the pointer. When
460*4882a593Smuzhiyun * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
461*4882a593Smuzhiyun * entire atomic sequence must be restarted. All other errors are fatal.
462*4882a593Smuzhiyun */
463*4882a593Smuzhiyun struct drm_plane_state *
drm_atomic_get_plane_state(struct drm_atomic_state * state,struct drm_plane * plane)464*4882a593Smuzhiyun drm_atomic_get_plane_state(struct drm_atomic_state *state,
465*4882a593Smuzhiyun struct drm_plane *plane)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun int ret, index = drm_plane_index(plane);
468*4882a593Smuzhiyun struct drm_plane_state *plane_state;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun WARN_ON(!state->acquire_ctx);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /* the legacy pointers should never be set */
473*4882a593Smuzhiyun WARN_ON(plane->fb);
474*4882a593Smuzhiyun WARN_ON(plane->old_fb);
475*4882a593Smuzhiyun WARN_ON(plane->crtc);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun plane_state = drm_atomic_get_existing_plane_state(state, plane);
478*4882a593Smuzhiyun if (plane_state)
479*4882a593Smuzhiyun return plane_state;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
482*4882a593Smuzhiyun if (ret)
483*4882a593Smuzhiyun return ERR_PTR(ret);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun plane_state = plane->funcs->atomic_duplicate_state(plane);
486*4882a593Smuzhiyun if (!plane_state)
487*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun state->planes[index].state = plane_state;
490*4882a593Smuzhiyun state->planes[index].ptr = plane;
491*4882a593Smuzhiyun state->planes[index].old_state = plane->state;
492*4882a593Smuzhiyun state->planes[index].new_state = plane_state;
493*4882a593Smuzhiyun plane_state->state = state;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
496*4882a593Smuzhiyun plane->base.id, plane->name, plane_state, state);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun if (plane_state->crtc) {
499*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun crtc_state = drm_atomic_get_crtc_state(state,
502*4882a593Smuzhiyun plane_state->crtc);
503*4882a593Smuzhiyun if (IS_ERR(crtc_state))
504*4882a593Smuzhiyun return ERR_CAST(crtc_state);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun return plane_state;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_get_plane_state);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun static bool
plane_switching_crtc(const struct drm_plane_state * old_plane_state,const struct drm_plane_state * new_plane_state)512*4882a593Smuzhiyun plane_switching_crtc(const struct drm_plane_state *old_plane_state,
513*4882a593Smuzhiyun const struct drm_plane_state *new_plane_state)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun if (!old_plane_state->crtc || !new_plane_state->crtc)
516*4882a593Smuzhiyun return false;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun if (old_plane_state->crtc == new_plane_state->crtc)
519*4882a593Smuzhiyun return false;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun /* This could be refined, but currently there's no helper or driver code
522*4882a593Smuzhiyun * to implement direct switching of active planes nor userspace to take
523*4882a593Smuzhiyun * advantage of more direct plane switching without the intermediate
524*4882a593Smuzhiyun * full OFF state.
525*4882a593Smuzhiyun */
526*4882a593Smuzhiyun return true;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun /**
530*4882a593Smuzhiyun * drm_atomic_plane_check - check plane state
531*4882a593Smuzhiyun * @old_plane_state: old plane state to check
532*4882a593Smuzhiyun * @new_plane_state: new plane state to check
533*4882a593Smuzhiyun *
534*4882a593Smuzhiyun * Provides core sanity checks for plane state.
535*4882a593Smuzhiyun *
536*4882a593Smuzhiyun * RETURNS:
537*4882a593Smuzhiyun * Zero on success, error code on failure
538*4882a593Smuzhiyun */
drm_atomic_plane_check(const struct drm_plane_state * old_plane_state,const struct drm_plane_state * new_plane_state)539*4882a593Smuzhiyun static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
540*4882a593Smuzhiyun const struct drm_plane_state *new_plane_state)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun struct drm_plane *plane = new_plane_state->plane;
543*4882a593Smuzhiyun struct drm_crtc *crtc = new_plane_state->crtc;
544*4882a593Smuzhiyun const struct drm_framebuffer *fb = new_plane_state->fb;
545*4882a593Smuzhiyun unsigned int fb_width, fb_height;
546*4882a593Smuzhiyun struct drm_mode_rect *clips;
547*4882a593Smuzhiyun uint32_t num_clips;
548*4882a593Smuzhiyun int ret;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /* either *both* CRTC and FB must be set, or neither */
551*4882a593Smuzhiyun if (crtc && !fb) {
552*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
553*4882a593Smuzhiyun plane->base.id, plane->name);
554*4882a593Smuzhiyun return -EINVAL;
555*4882a593Smuzhiyun } else if (fb && !crtc) {
556*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
557*4882a593Smuzhiyun plane->base.id, plane->name);
558*4882a593Smuzhiyun return -EINVAL;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /* if disabled, we don't care about the rest of the state: */
562*4882a593Smuzhiyun if (!crtc)
563*4882a593Smuzhiyun return 0;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun /* Check whether this plane is usable on this CRTC */
566*4882a593Smuzhiyun if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
567*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
568*4882a593Smuzhiyun crtc->base.id, crtc->name,
569*4882a593Smuzhiyun plane->base.id, plane->name);
570*4882a593Smuzhiyun return -EINVAL;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun /* Check whether this plane supports the fb pixel format. */
574*4882a593Smuzhiyun ret = drm_plane_check_pixel_format(plane, fb->format->format,
575*4882a593Smuzhiyun fb->modifier);
576*4882a593Smuzhiyun if (ret) {
577*4882a593Smuzhiyun struct drm_format_name_buf format_name;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
580*4882a593Smuzhiyun plane->base.id, plane->name,
581*4882a593Smuzhiyun drm_get_format_name(fb->format->format,
582*4882a593Smuzhiyun &format_name),
583*4882a593Smuzhiyun fb->modifier);
584*4882a593Smuzhiyun return ret;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun /* Give drivers some help against integer overflows */
588*4882a593Smuzhiyun if (new_plane_state->crtc_w > INT_MAX ||
589*4882a593Smuzhiyun new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w ||
590*4882a593Smuzhiyun new_plane_state->crtc_h > INT_MAX ||
591*4882a593Smuzhiyun new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) {
592*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
593*4882a593Smuzhiyun plane->base.id, plane->name,
594*4882a593Smuzhiyun new_plane_state->crtc_w, new_plane_state->crtc_h,
595*4882a593Smuzhiyun new_plane_state->crtc_x, new_plane_state->crtc_y);
596*4882a593Smuzhiyun return -ERANGE;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun fb_width = fb->width << 16;
600*4882a593Smuzhiyun fb_height = fb->height << 16;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun /* Make sure source coordinates are inside the fb. */
603*4882a593Smuzhiyun if (new_plane_state->src_w > fb_width ||
604*4882a593Smuzhiyun new_plane_state->src_x > fb_width - new_plane_state->src_w ||
605*4882a593Smuzhiyun new_plane_state->src_h > fb_height ||
606*4882a593Smuzhiyun new_plane_state->src_y > fb_height - new_plane_state->src_h) {
607*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
608*4882a593Smuzhiyun "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
609*4882a593Smuzhiyun plane->base.id, plane->name,
610*4882a593Smuzhiyun new_plane_state->src_w >> 16,
611*4882a593Smuzhiyun ((new_plane_state->src_w & 0xffff) * 15625) >> 10,
612*4882a593Smuzhiyun new_plane_state->src_h >> 16,
613*4882a593Smuzhiyun ((new_plane_state->src_h & 0xffff) * 15625) >> 10,
614*4882a593Smuzhiyun new_plane_state->src_x >> 16,
615*4882a593Smuzhiyun ((new_plane_state->src_x & 0xffff) * 15625) >> 10,
616*4882a593Smuzhiyun new_plane_state->src_y >> 16,
617*4882a593Smuzhiyun ((new_plane_state->src_y & 0xffff) * 15625) >> 10,
618*4882a593Smuzhiyun fb->width, fb->height);
619*4882a593Smuzhiyun return -ENOSPC;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun clips = drm_plane_get_damage_clips(new_plane_state);
623*4882a593Smuzhiyun num_clips = drm_plane_get_damage_clips_count(new_plane_state);
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun /* Make sure damage clips are valid and inside the fb. */
626*4882a593Smuzhiyun while (num_clips > 0) {
627*4882a593Smuzhiyun if (clips->x1 >= clips->x2 ||
628*4882a593Smuzhiyun clips->y1 >= clips->y2 ||
629*4882a593Smuzhiyun clips->x1 < 0 ||
630*4882a593Smuzhiyun clips->y1 < 0 ||
631*4882a593Smuzhiyun clips->x2 > fb_width ||
632*4882a593Smuzhiyun clips->y2 > fb_height) {
633*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid damage clip %d %d %d %d\n",
634*4882a593Smuzhiyun plane->base.id, plane->name, clips->x1,
635*4882a593Smuzhiyun clips->y1, clips->x2, clips->y2);
636*4882a593Smuzhiyun return -EINVAL;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun clips++;
639*4882a593Smuzhiyun num_clips--;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun if (plane_switching_crtc(old_plane_state, new_plane_state)) {
643*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
644*4882a593Smuzhiyun plane->base.id, plane->name);
645*4882a593Smuzhiyun return -EINVAL;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun return 0;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
drm_atomic_plane_print_state(struct drm_printer * p,const struct drm_plane_state * state)651*4882a593Smuzhiyun static void drm_atomic_plane_print_state(struct drm_printer *p,
652*4882a593Smuzhiyun const struct drm_plane_state *state)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun struct drm_plane *plane = state->plane;
655*4882a593Smuzhiyun struct drm_rect src = drm_plane_state_src(state);
656*4882a593Smuzhiyun struct drm_rect dest = drm_plane_state_dest(state);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
659*4882a593Smuzhiyun drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
660*4882a593Smuzhiyun drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
661*4882a593Smuzhiyun if (state->fb)
662*4882a593Smuzhiyun drm_framebuffer_print_info(p, 2, state->fb);
663*4882a593Smuzhiyun drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
664*4882a593Smuzhiyun drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
665*4882a593Smuzhiyun drm_printf(p, "\trotation=%x\n", state->rotation);
666*4882a593Smuzhiyun drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos);
667*4882a593Smuzhiyun drm_printf(p, "\tcolor-encoding=%s\n",
668*4882a593Smuzhiyun drm_get_color_encoding_name(state->color_encoding));
669*4882a593Smuzhiyun drm_printf(p, "\tcolor-range=%s\n",
670*4882a593Smuzhiyun drm_get_color_range_name(state->color_range));
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun if (plane->funcs->atomic_print_state)
673*4882a593Smuzhiyun plane->funcs->atomic_print_state(p, state);
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun /**
677*4882a593Smuzhiyun * DOC: handling driver private state
678*4882a593Smuzhiyun *
679*4882a593Smuzhiyun * Very often the DRM objects exposed to userspace in the atomic modeset api
680*4882a593Smuzhiyun * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the
681*4882a593Smuzhiyun * underlying hardware. Especially for any kind of shared resources (e.g. shared
682*4882a593Smuzhiyun * clocks, scaler units, bandwidth and fifo limits shared among a group of
683*4882a593Smuzhiyun * planes or CRTCs, and so on) it makes sense to model these as independent
684*4882a593Smuzhiyun * objects. Drivers then need to do similar state tracking and commit ordering for
685*4882a593Smuzhiyun * such private (since not exposed to userpace) objects as the atomic core and
686*4882a593Smuzhiyun * helpers already provide for connectors, planes and CRTCs.
687*4882a593Smuzhiyun *
688*4882a593Smuzhiyun * To make this easier on drivers the atomic core provides some support to track
689*4882a593Smuzhiyun * driver private state objects using struct &drm_private_obj, with the
690*4882a593Smuzhiyun * associated state struct &drm_private_state.
691*4882a593Smuzhiyun *
692*4882a593Smuzhiyun * Similar to userspace-exposed objects, private state structures can be
693*4882a593Smuzhiyun * acquired by calling drm_atomic_get_private_obj_state(). This also takes care
694*4882a593Smuzhiyun * of locking, hence drivers should not have a need to call drm_modeset_lock()
695*4882a593Smuzhiyun * directly. Sequence of the actual hardware state commit is not handled,
696*4882a593Smuzhiyun * drivers might need to keep track of struct drm_crtc_commit within subclassed
697*4882a593Smuzhiyun * structure of &drm_private_state as necessary, e.g. similar to
698*4882a593Smuzhiyun * &drm_plane_state.commit. See also &drm_atomic_state.fake_commit.
699*4882a593Smuzhiyun *
700*4882a593Smuzhiyun * All private state structures contained in a &drm_atomic_state update can be
701*4882a593Smuzhiyun * iterated using for_each_oldnew_private_obj_in_state(),
702*4882a593Smuzhiyun * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state().
703*4882a593Smuzhiyun * Drivers are recommended to wrap these for each type of driver private state
704*4882a593Smuzhiyun * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at
705*4882a593Smuzhiyun * least if they want to iterate over all objects of a given type.
706*4882a593Smuzhiyun *
707*4882a593Smuzhiyun * An earlier way to handle driver private state was by subclassing struct
708*4882a593Smuzhiyun * &drm_atomic_state. But since that encourages non-standard ways to implement
709*4882a593Smuzhiyun * the check/commit split atomic requires (by using e.g. "check and rollback or
710*4882a593Smuzhiyun * commit instead" of "duplicate state, check, then either commit or release
711*4882a593Smuzhiyun * duplicated state) it is deprecated in favour of using &drm_private_state.
712*4882a593Smuzhiyun */
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun /**
715*4882a593Smuzhiyun * drm_atomic_private_obj_init - initialize private object
716*4882a593Smuzhiyun * @dev: DRM device this object will be attached to
717*4882a593Smuzhiyun * @obj: private object
718*4882a593Smuzhiyun * @state: initial private object state
719*4882a593Smuzhiyun * @funcs: pointer to the struct of function pointers that identify the object
720*4882a593Smuzhiyun * type
721*4882a593Smuzhiyun *
722*4882a593Smuzhiyun * Initialize the private object, which can be embedded into any
723*4882a593Smuzhiyun * driver private object that needs its own atomic state.
724*4882a593Smuzhiyun */
725*4882a593Smuzhiyun void
drm_atomic_private_obj_init(struct drm_device * dev,struct drm_private_obj * obj,struct drm_private_state * state,const struct drm_private_state_funcs * funcs)726*4882a593Smuzhiyun drm_atomic_private_obj_init(struct drm_device *dev,
727*4882a593Smuzhiyun struct drm_private_obj *obj,
728*4882a593Smuzhiyun struct drm_private_state *state,
729*4882a593Smuzhiyun const struct drm_private_state_funcs *funcs)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun memset(obj, 0, sizeof(*obj));
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun drm_modeset_lock_init(&obj->lock);
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun obj->state = state;
736*4882a593Smuzhiyun obj->funcs = funcs;
737*4882a593Smuzhiyun list_add_tail(&obj->head, &dev->mode_config.privobj_list);
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_private_obj_init);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun /**
742*4882a593Smuzhiyun * drm_atomic_private_obj_fini - finalize private object
743*4882a593Smuzhiyun * @obj: private object
744*4882a593Smuzhiyun *
745*4882a593Smuzhiyun * Finalize the private object.
746*4882a593Smuzhiyun */
747*4882a593Smuzhiyun void
drm_atomic_private_obj_fini(struct drm_private_obj * obj)748*4882a593Smuzhiyun drm_atomic_private_obj_fini(struct drm_private_obj *obj)
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun list_del(&obj->head);
751*4882a593Smuzhiyun obj->funcs->atomic_destroy_state(obj, obj->state);
752*4882a593Smuzhiyun drm_modeset_lock_fini(&obj->lock);
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_private_obj_fini);
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun /**
757*4882a593Smuzhiyun * drm_atomic_get_private_obj_state - get private object state
758*4882a593Smuzhiyun * @state: global atomic state
759*4882a593Smuzhiyun * @obj: private object to get the state for
760*4882a593Smuzhiyun *
761*4882a593Smuzhiyun * This function returns the private object state for the given private object,
762*4882a593Smuzhiyun * allocating the state if needed. It will also grab the relevant private
763*4882a593Smuzhiyun * object lock to make sure that the state is consistent.
764*4882a593Smuzhiyun *
765*4882a593Smuzhiyun * RETURNS:
766*4882a593Smuzhiyun *
767*4882a593Smuzhiyun * Either the allocated state or the error code encoded into a pointer.
768*4882a593Smuzhiyun */
769*4882a593Smuzhiyun struct drm_private_state *
drm_atomic_get_private_obj_state(struct drm_atomic_state * state,struct drm_private_obj * obj)770*4882a593Smuzhiyun drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
771*4882a593Smuzhiyun struct drm_private_obj *obj)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun int index, num_objs, i, ret;
774*4882a593Smuzhiyun size_t size;
775*4882a593Smuzhiyun struct __drm_private_objs_state *arr;
776*4882a593Smuzhiyun struct drm_private_state *obj_state;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun for (i = 0; i < state->num_private_objs; i++)
779*4882a593Smuzhiyun if (obj == state->private_objs[i].ptr)
780*4882a593Smuzhiyun return state->private_objs[i].state;
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun ret = drm_modeset_lock(&obj->lock, state->acquire_ctx);
783*4882a593Smuzhiyun if (ret)
784*4882a593Smuzhiyun return ERR_PTR(ret);
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun num_objs = state->num_private_objs + 1;
787*4882a593Smuzhiyun size = sizeof(*state->private_objs) * num_objs;
788*4882a593Smuzhiyun arr = krealloc(state->private_objs, size, GFP_KERNEL);
789*4882a593Smuzhiyun if (!arr)
790*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun state->private_objs = arr;
793*4882a593Smuzhiyun index = state->num_private_objs;
794*4882a593Smuzhiyun memset(&state->private_objs[index], 0, sizeof(*state->private_objs));
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun obj_state = obj->funcs->atomic_duplicate_state(obj);
797*4882a593Smuzhiyun if (!obj_state)
798*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun state->private_objs[index].state = obj_state;
801*4882a593Smuzhiyun state->private_objs[index].old_state = obj->state;
802*4882a593Smuzhiyun state->private_objs[index].new_state = obj_state;
803*4882a593Smuzhiyun state->private_objs[index].ptr = obj;
804*4882a593Smuzhiyun obj_state->state = state;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun state->num_private_objs = num_objs;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n",
809*4882a593Smuzhiyun obj, obj_state, state);
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun return obj_state;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun /**
816*4882a593Smuzhiyun * drm_atomic_get_old_private_obj_state
817*4882a593Smuzhiyun * @state: global atomic state object
818*4882a593Smuzhiyun * @obj: private_obj to grab
819*4882a593Smuzhiyun *
820*4882a593Smuzhiyun * This function returns the old private object state for the given private_obj,
821*4882a593Smuzhiyun * or NULL if the private_obj is not part of the global atomic state.
822*4882a593Smuzhiyun */
823*4882a593Smuzhiyun struct drm_private_state *
drm_atomic_get_old_private_obj_state(struct drm_atomic_state * state,struct drm_private_obj * obj)824*4882a593Smuzhiyun drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state,
825*4882a593Smuzhiyun struct drm_private_obj *obj)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun int i;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun for (i = 0; i < state->num_private_objs; i++)
830*4882a593Smuzhiyun if (obj == state->private_objs[i].ptr)
831*4882a593Smuzhiyun return state->private_objs[i].old_state;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun return NULL;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_get_old_private_obj_state);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun /**
838*4882a593Smuzhiyun * drm_atomic_get_new_private_obj_state
839*4882a593Smuzhiyun * @state: global atomic state object
840*4882a593Smuzhiyun * @obj: private_obj to grab
841*4882a593Smuzhiyun *
842*4882a593Smuzhiyun * This function returns the new private object state for the given private_obj,
843*4882a593Smuzhiyun * or NULL if the private_obj is not part of the global atomic state.
844*4882a593Smuzhiyun */
845*4882a593Smuzhiyun struct drm_private_state *
drm_atomic_get_new_private_obj_state(struct drm_atomic_state * state,struct drm_private_obj * obj)846*4882a593Smuzhiyun drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state,
847*4882a593Smuzhiyun struct drm_private_obj *obj)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun int i;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun for (i = 0; i < state->num_private_objs; i++)
852*4882a593Smuzhiyun if (obj == state->private_objs[i].ptr)
853*4882a593Smuzhiyun return state->private_objs[i].new_state;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun return NULL;
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state);
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun /**
860*4882a593Smuzhiyun * drm_atomic_get_old_connector_for_encoder - Get old connector for an encoder
861*4882a593Smuzhiyun * @state: Atomic state
862*4882a593Smuzhiyun * @encoder: The encoder to fetch the connector state for
863*4882a593Smuzhiyun *
864*4882a593Smuzhiyun * This function finds and returns the connector that was connected to @encoder
865*4882a593Smuzhiyun * as specified by the @state.
866*4882a593Smuzhiyun *
867*4882a593Smuzhiyun * If there is no connector in @state which previously had @encoder connected to
868*4882a593Smuzhiyun * it, this function will return NULL. While this may seem like an invalid use
869*4882a593Smuzhiyun * case, it is sometimes useful to differentiate commits which had no prior
870*4882a593Smuzhiyun * connectors attached to @encoder vs ones that did (and to inspect their
871*4882a593Smuzhiyun * state). This is especially true in enable hooks because the pipeline has
872*4882a593Smuzhiyun * changed.
873*4882a593Smuzhiyun *
874*4882a593Smuzhiyun * Returns: The old connector connected to @encoder, or NULL if the encoder is
875*4882a593Smuzhiyun * not connected.
876*4882a593Smuzhiyun */
877*4882a593Smuzhiyun struct drm_connector *
drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state * state,struct drm_encoder * encoder)878*4882a593Smuzhiyun drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state,
879*4882a593Smuzhiyun struct drm_encoder *encoder)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun struct drm_connector_state *conn_state;
882*4882a593Smuzhiyun struct drm_connector *connector;
883*4882a593Smuzhiyun unsigned int i;
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun for_each_old_connector_in_state(state, connector, conn_state, i) {
886*4882a593Smuzhiyun if (conn_state->best_encoder == encoder)
887*4882a593Smuzhiyun return connector;
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun return NULL;
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder);
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun /**
895*4882a593Smuzhiyun * drm_atomic_get_new_connector_for_encoder - Get new connector for an encoder
896*4882a593Smuzhiyun * @state: Atomic state
897*4882a593Smuzhiyun * @encoder: The encoder to fetch the connector state for
898*4882a593Smuzhiyun *
899*4882a593Smuzhiyun * This function finds and returns the connector that will be connected to
900*4882a593Smuzhiyun * @encoder as specified by the @state.
901*4882a593Smuzhiyun *
902*4882a593Smuzhiyun * If there is no connector in @state which will have @encoder connected to it,
903*4882a593Smuzhiyun * this function will return NULL. While this may seem like an invalid use case,
904*4882a593Smuzhiyun * it is sometimes useful to differentiate commits which have no connectors
905*4882a593Smuzhiyun * attached to @encoder vs ones that do (and to inspect their state). This is
906*4882a593Smuzhiyun * especially true in disable hooks because the pipeline will change.
907*4882a593Smuzhiyun *
908*4882a593Smuzhiyun * Returns: The new connector connected to @encoder, or NULL if the encoder is
909*4882a593Smuzhiyun * not connected.
910*4882a593Smuzhiyun */
911*4882a593Smuzhiyun struct drm_connector *
drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state * state,struct drm_encoder * encoder)912*4882a593Smuzhiyun drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
913*4882a593Smuzhiyun struct drm_encoder *encoder)
914*4882a593Smuzhiyun {
915*4882a593Smuzhiyun struct drm_connector_state *conn_state;
916*4882a593Smuzhiyun struct drm_connector *connector;
917*4882a593Smuzhiyun unsigned int i;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun for_each_new_connector_in_state(state, connector, conn_state, i) {
920*4882a593Smuzhiyun if (conn_state->best_encoder == encoder)
921*4882a593Smuzhiyun return connector;
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun return NULL;
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder);
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun /**
929*4882a593Smuzhiyun * drm_atomic_get_connector_state - get connector state
930*4882a593Smuzhiyun * @state: global atomic state object
931*4882a593Smuzhiyun * @connector: connector to get state object for
932*4882a593Smuzhiyun *
933*4882a593Smuzhiyun * This function returns the connector state for the given connector,
934*4882a593Smuzhiyun * allocating it if needed. It will also grab the relevant connector lock to
935*4882a593Smuzhiyun * make sure that the state is consistent.
936*4882a593Smuzhiyun *
937*4882a593Smuzhiyun * Returns:
938*4882a593Smuzhiyun *
939*4882a593Smuzhiyun * Either the allocated state or the error code encoded into the pointer. When
940*4882a593Smuzhiyun * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
941*4882a593Smuzhiyun * entire atomic sequence must be restarted. All other errors are fatal.
942*4882a593Smuzhiyun */
943*4882a593Smuzhiyun struct drm_connector_state *
drm_atomic_get_connector_state(struct drm_atomic_state * state,struct drm_connector * connector)944*4882a593Smuzhiyun drm_atomic_get_connector_state(struct drm_atomic_state *state,
945*4882a593Smuzhiyun struct drm_connector *connector)
946*4882a593Smuzhiyun {
947*4882a593Smuzhiyun int ret, index;
948*4882a593Smuzhiyun struct drm_mode_config *config = &connector->dev->mode_config;
949*4882a593Smuzhiyun struct drm_connector_state *connector_state;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun WARN_ON(!state->acquire_ctx);
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
954*4882a593Smuzhiyun if (ret)
955*4882a593Smuzhiyun return ERR_PTR(ret);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun index = drm_connector_index(connector);
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun if (index >= state->num_connector) {
960*4882a593Smuzhiyun struct __drm_connnectors_state *c;
961*4882a593Smuzhiyun int alloc = max(index + 1, config->num_connector);
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
964*4882a593Smuzhiyun if (!c)
965*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun state->connectors = c;
968*4882a593Smuzhiyun memset(&state->connectors[state->num_connector], 0,
969*4882a593Smuzhiyun sizeof(*state->connectors) * (alloc - state->num_connector));
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun state->num_connector = alloc;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun if (state->connectors[index].state)
975*4882a593Smuzhiyun return state->connectors[index].state;
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun connector_state = connector->funcs->atomic_duplicate_state(connector);
978*4882a593Smuzhiyun if (!connector_state)
979*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun drm_connector_get(connector);
982*4882a593Smuzhiyun state->connectors[index].state = connector_state;
983*4882a593Smuzhiyun state->connectors[index].old_state = connector->state;
984*4882a593Smuzhiyun state->connectors[index].new_state = connector_state;
985*4882a593Smuzhiyun state->connectors[index].ptr = connector;
986*4882a593Smuzhiyun connector_state->state = state;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n",
989*4882a593Smuzhiyun connector->base.id, connector->name,
990*4882a593Smuzhiyun connector_state, state);
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun if (connector_state->crtc) {
993*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun crtc_state = drm_atomic_get_crtc_state(state,
996*4882a593Smuzhiyun connector_state->crtc);
997*4882a593Smuzhiyun if (IS_ERR(crtc_state))
998*4882a593Smuzhiyun return ERR_CAST(crtc_state);
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun return connector_state;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_get_connector_state);
1004*4882a593Smuzhiyun
drm_atomic_connector_print_state(struct drm_printer * p,const struct drm_connector_state * state)1005*4882a593Smuzhiyun static void drm_atomic_connector_print_state(struct drm_printer *p,
1006*4882a593Smuzhiyun const struct drm_connector_state *state)
1007*4882a593Smuzhiyun {
1008*4882a593Smuzhiyun struct drm_connector *connector = state->connector;
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
1011*4882a593Smuzhiyun drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
1012*4882a593Smuzhiyun drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
1015*4882a593Smuzhiyun if (state->writeback_job && state->writeback_job->fb)
1016*4882a593Smuzhiyun drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun if (connector->funcs->atomic_print_state)
1019*4882a593Smuzhiyun connector->funcs->atomic_print_state(p, state);
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun /**
1023*4882a593Smuzhiyun * drm_atomic_get_bridge_state - get bridge state
1024*4882a593Smuzhiyun * @state: global atomic state object
1025*4882a593Smuzhiyun * @bridge: bridge to get state object for
1026*4882a593Smuzhiyun *
1027*4882a593Smuzhiyun * This function returns the bridge state for the given bridge, allocating it
1028*4882a593Smuzhiyun * if needed. It will also grab the relevant bridge lock to make sure that the
1029*4882a593Smuzhiyun * state is consistent.
1030*4882a593Smuzhiyun *
1031*4882a593Smuzhiyun * Returns:
1032*4882a593Smuzhiyun *
1033*4882a593Smuzhiyun * Either the allocated state or the error code encoded into the pointer. When
1034*4882a593Smuzhiyun * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
1035*4882a593Smuzhiyun * entire atomic sequence must be restarted.
1036*4882a593Smuzhiyun */
1037*4882a593Smuzhiyun struct drm_bridge_state *
drm_atomic_get_bridge_state(struct drm_atomic_state * state,struct drm_bridge * bridge)1038*4882a593Smuzhiyun drm_atomic_get_bridge_state(struct drm_atomic_state *state,
1039*4882a593Smuzhiyun struct drm_bridge *bridge)
1040*4882a593Smuzhiyun {
1041*4882a593Smuzhiyun struct drm_private_state *obj_state;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun obj_state = drm_atomic_get_private_obj_state(state, &bridge->base);
1044*4882a593Smuzhiyun if (IS_ERR(obj_state))
1045*4882a593Smuzhiyun return ERR_CAST(obj_state);
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun return drm_priv_to_bridge_state(obj_state);
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_get_bridge_state);
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun /**
1052*4882a593Smuzhiyun * drm_atomic_get_old_bridge_state - get old bridge state, if it exists
1053*4882a593Smuzhiyun * @state: global atomic state object
1054*4882a593Smuzhiyun * @bridge: bridge to grab
1055*4882a593Smuzhiyun *
1056*4882a593Smuzhiyun * This function returns the old bridge state for the given bridge, or NULL if
1057*4882a593Smuzhiyun * the bridge is not part of the global atomic state.
1058*4882a593Smuzhiyun */
1059*4882a593Smuzhiyun struct drm_bridge_state *
drm_atomic_get_old_bridge_state(struct drm_atomic_state * state,struct drm_bridge * bridge)1060*4882a593Smuzhiyun drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
1061*4882a593Smuzhiyun struct drm_bridge *bridge)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun struct drm_private_state *obj_state;
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun obj_state = drm_atomic_get_old_private_obj_state(state, &bridge->base);
1066*4882a593Smuzhiyun if (!obj_state)
1067*4882a593Smuzhiyun return NULL;
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun return drm_priv_to_bridge_state(obj_state);
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_get_old_bridge_state);
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun /**
1074*4882a593Smuzhiyun * drm_atomic_get_new_bridge_state - get new bridge state, if it exists
1075*4882a593Smuzhiyun * @state: global atomic state object
1076*4882a593Smuzhiyun * @bridge: bridge to grab
1077*4882a593Smuzhiyun *
1078*4882a593Smuzhiyun * This function returns the new bridge state for the given bridge, or NULL if
1079*4882a593Smuzhiyun * the bridge is not part of the global atomic state.
1080*4882a593Smuzhiyun */
1081*4882a593Smuzhiyun struct drm_bridge_state *
drm_atomic_get_new_bridge_state(struct drm_atomic_state * state,struct drm_bridge * bridge)1082*4882a593Smuzhiyun drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
1083*4882a593Smuzhiyun struct drm_bridge *bridge)
1084*4882a593Smuzhiyun {
1085*4882a593Smuzhiyun struct drm_private_state *obj_state;
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun obj_state = drm_atomic_get_new_private_obj_state(state, &bridge->base);
1088*4882a593Smuzhiyun if (!obj_state)
1089*4882a593Smuzhiyun return NULL;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun return drm_priv_to_bridge_state(obj_state);
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_get_new_bridge_state);
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun /**
1096*4882a593Smuzhiyun * drm_atomic_add_encoder_bridges - add bridges attached to an encoder
1097*4882a593Smuzhiyun * @state: atomic state
1098*4882a593Smuzhiyun * @encoder: DRM encoder
1099*4882a593Smuzhiyun *
1100*4882a593Smuzhiyun * This function adds all bridges attached to @encoder. This is needed to add
1101*4882a593Smuzhiyun * bridge states to @state and make them available when
1102*4882a593Smuzhiyun * &drm_bridge_funcs.atomic_check(), &drm_bridge_funcs.atomic_pre_enable(),
1103*4882a593Smuzhiyun * &drm_bridge_funcs.atomic_enable(),
1104*4882a593Smuzhiyun * &drm_bridge_funcs.atomic_disable_post_disable() are called.
1105*4882a593Smuzhiyun *
1106*4882a593Smuzhiyun * Returns:
1107*4882a593Smuzhiyun * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1108*4882a593Smuzhiyun * then the w/w mutex code has detected a deadlock and the entire atomic
1109*4882a593Smuzhiyun * sequence must be restarted. All other errors are fatal.
1110*4882a593Smuzhiyun */
1111*4882a593Smuzhiyun int
drm_atomic_add_encoder_bridges(struct drm_atomic_state * state,struct drm_encoder * encoder)1112*4882a593Smuzhiyun drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
1113*4882a593Smuzhiyun struct drm_encoder *encoder)
1114*4882a593Smuzhiyun {
1115*4882a593Smuzhiyun struct drm_bridge_state *bridge_state;
1116*4882a593Smuzhiyun struct drm_bridge *bridge;
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun if (!encoder)
1119*4882a593Smuzhiyun return 0;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("Adding all bridges for [encoder:%d:%s] to %p\n",
1122*4882a593Smuzhiyun encoder->base.id, encoder->name, state);
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun drm_for_each_bridge_in_chain(encoder, bridge) {
1125*4882a593Smuzhiyun /* Skip bridges that don't implement the atomic state hooks. */
1126*4882a593Smuzhiyun if (!bridge->funcs->atomic_duplicate_state)
1127*4882a593Smuzhiyun continue;
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun bridge_state = drm_atomic_get_bridge_state(state, bridge);
1130*4882a593Smuzhiyun if (IS_ERR(bridge_state))
1131*4882a593Smuzhiyun return PTR_ERR(bridge_state);
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun return 0;
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_add_encoder_bridges);
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun /**
1139*4882a593Smuzhiyun * drm_atomic_add_affected_connectors - add connectors for CRTC
1140*4882a593Smuzhiyun * @state: atomic state
1141*4882a593Smuzhiyun * @crtc: DRM CRTC
1142*4882a593Smuzhiyun *
1143*4882a593Smuzhiyun * This function walks the current configuration and adds all connectors
1144*4882a593Smuzhiyun * currently using @crtc to the atomic configuration @state. Note that this
1145*4882a593Smuzhiyun * function must acquire the connection mutex. This can potentially cause
1146*4882a593Smuzhiyun * unneeded seralization if the update is just for the planes on one CRTC. Hence
1147*4882a593Smuzhiyun * drivers and helpers should only call this when really needed (e.g. when a
1148*4882a593Smuzhiyun * full modeset needs to happen due to some change).
1149*4882a593Smuzhiyun *
1150*4882a593Smuzhiyun * Returns:
1151*4882a593Smuzhiyun * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1152*4882a593Smuzhiyun * then the w/w mutex code has detected a deadlock and the entire atomic
1153*4882a593Smuzhiyun * sequence must be restarted. All other errors are fatal.
1154*4882a593Smuzhiyun */
1155*4882a593Smuzhiyun int
drm_atomic_add_affected_connectors(struct drm_atomic_state * state,struct drm_crtc * crtc)1156*4882a593Smuzhiyun drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
1157*4882a593Smuzhiyun struct drm_crtc *crtc)
1158*4882a593Smuzhiyun {
1159*4882a593Smuzhiyun struct drm_mode_config *config = &state->dev->mode_config;
1160*4882a593Smuzhiyun struct drm_connector *connector;
1161*4882a593Smuzhiyun struct drm_connector_state *conn_state;
1162*4882a593Smuzhiyun struct drm_connector_list_iter conn_iter;
1163*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
1164*4882a593Smuzhiyun int ret;
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun crtc_state = drm_atomic_get_crtc_state(state, crtc);
1167*4882a593Smuzhiyun if (IS_ERR(crtc_state))
1168*4882a593Smuzhiyun return PTR_ERR(crtc_state);
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
1171*4882a593Smuzhiyun if (ret)
1172*4882a593Smuzhiyun return ret;
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
1175*4882a593Smuzhiyun crtc->base.id, crtc->name, state);
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun /*
1178*4882a593Smuzhiyun * Changed connectors are already in @state, so only need to look
1179*4882a593Smuzhiyun * at the connector_mask in crtc_state.
1180*4882a593Smuzhiyun */
1181*4882a593Smuzhiyun drm_connector_list_iter_begin(state->dev, &conn_iter);
1182*4882a593Smuzhiyun drm_for_each_connector_iter(connector, &conn_iter) {
1183*4882a593Smuzhiyun if (!(crtc_state->connector_mask & drm_connector_mask(connector)))
1184*4882a593Smuzhiyun continue;
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun conn_state = drm_atomic_get_connector_state(state, connector);
1187*4882a593Smuzhiyun if (IS_ERR(conn_state)) {
1188*4882a593Smuzhiyun drm_connector_list_iter_end(&conn_iter);
1189*4882a593Smuzhiyun return PTR_ERR(conn_state);
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun drm_connector_list_iter_end(&conn_iter);
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun return 0;
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun /**
1199*4882a593Smuzhiyun * drm_atomic_add_affected_planes - add planes for CRTC
1200*4882a593Smuzhiyun * @state: atomic state
1201*4882a593Smuzhiyun * @crtc: DRM CRTC
1202*4882a593Smuzhiyun *
1203*4882a593Smuzhiyun * This function walks the current configuration and adds all planes
1204*4882a593Smuzhiyun * currently used by @crtc to the atomic configuration @state. This is useful
1205*4882a593Smuzhiyun * when an atomic commit also needs to check all currently enabled plane on
1206*4882a593Smuzhiyun * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
1207*4882a593Smuzhiyun * to avoid special code to force-enable all planes.
1208*4882a593Smuzhiyun *
1209*4882a593Smuzhiyun * Since acquiring a plane state will always also acquire the w/w mutex of the
1210*4882a593Smuzhiyun * current CRTC for that plane (if there is any) adding all the plane states for
1211*4882a593Smuzhiyun * a CRTC will not reduce parallism of atomic updates.
1212*4882a593Smuzhiyun *
1213*4882a593Smuzhiyun * Returns:
1214*4882a593Smuzhiyun * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1215*4882a593Smuzhiyun * then the w/w mutex code has detected a deadlock and the entire atomic
1216*4882a593Smuzhiyun * sequence must be restarted. All other errors are fatal.
1217*4882a593Smuzhiyun */
1218*4882a593Smuzhiyun int
drm_atomic_add_affected_planes(struct drm_atomic_state * state,struct drm_crtc * crtc)1219*4882a593Smuzhiyun drm_atomic_add_affected_planes(struct drm_atomic_state *state,
1220*4882a593Smuzhiyun struct drm_crtc *crtc)
1221*4882a593Smuzhiyun {
1222*4882a593Smuzhiyun const struct drm_crtc_state *old_crtc_state =
1223*4882a593Smuzhiyun drm_atomic_get_old_crtc_state(state, crtc);
1224*4882a593Smuzhiyun struct drm_plane *plane;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
1229*4882a593Smuzhiyun crtc->base.id, crtc->name, state);
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) {
1232*4882a593Smuzhiyun struct drm_plane_state *plane_state =
1233*4882a593Smuzhiyun drm_atomic_get_plane_state(state, plane);
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun if (IS_ERR(plane_state))
1236*4882a593Smuzhiyun return PTR_ERR(plane_state);
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun return 0;
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_add_affected_planes);
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun /**
1243*4882a593Smuzhiyun * drm_atomic_check_only - check whether a given config would work
1244*4882a593Smuzhiyun * @state: atomic configuration to check
1245*4882a593Smuzhiyun *
1246*4882a593Smuzhiyun * Note that this function can return -EDEADLK if the driver needed to acquire
1247*4882a593Smuzhiyun * more locks but encountered a deadlock. The caller must then do the usual w/w
1248*4882a593Smuzhiyun * backoff dance and restart. All other errors are fatal.
1249*4882a593Smuzhiyun *
1250*4882a593Smuzhiyun * Returns:
1251*4882a593Smuzhiyun * 0 on success, negative error code on failure.
1252*4882a593Smuzhiyun */
drm_atomic_check_only(struct drm_atomic_state * state)1253*4882a593Smuzhiyun int drm_atomic_check_only(struct drm_atomic_state *state)
1254*4882a593Smuzhiyun {
1255*4882a593Smuzhiyun struct drm_device *dev = state->dev;
1256*4882a593Smuzhiyun struct drm_mode_config *config = &dev->mode_config;
1257*4882a593Smuzhiyun struct drm_plane *plane;
1258*4882a593Smuzhiyun struct drm_plane_state *old_plane_state;
1259*4882a593Smuzhiyun struct drm_plane_state *new_plane_state;
1260*4882a593Smuzhiyun struct drm_crtc *crtc;
1261*4882a593Smuzhiyun struct drm_crtc_state *old_crtc_state;
1262*4882a593Smuzhiyun struct drm_crtc_state *new_crtc_state;
1263*4882a593Smuzhiyun struct drm_connector *conn;
1264*4882a593Smuzhiyun struct drm_connector_state *conn_state;
1265*4882a593Smuzhiyun int i, ret = 0;
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("checking %p\n", state);
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
1270*4882a593Smuzhiyun ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
1271*4882a593Smuzhiyun if (ret) {
1272*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
1273*4882a593Smuzhiyun plane->base.id, plane->name);
1274*4882a593Smuzhiyun return ret;
1275*4882a593Smuzhiyun }
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun
1278*4882a593Smuzhiyun for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1279*4882a593Smuzhiyun ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state);
1280*4882a593Smuzhiyun if (ret) {
1281*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
1282*4882a593Smuzhiyun crtc->base.id, crtc->name);
1283*4882a593Smuzhiyun return ret;
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun for_each_new_connector_in_state(state, conn, conn_state, i) {
1288*4882a593Smuzhiyun ret = drm_atomic_connector_check(conn, conn_state);
1289*4882a593Smuzhiyun if (ret) {
1290*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n",
1291*4882a593Smuzhiyun conn->base.id, conn->name);
1292*4882a593Smuzhiyun return ret;
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun if (config->funcs->atomic_check) {
1297*4882a593Smuzhiyun ret = config->funcs->atomic_check(state->dev, state);
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun if (ret) {
1300*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n",
1301*4882a593Smuzhiyun state, ret);
1302*4882a593Smuzhiyun return ret;
1303*4882a593Smuzhiyun }
1304*4882a593Smuzhiyun }
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun if (!state->allow_modeset) {
1307*4882a593Smuzhiyun for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1308*4882a593Smuzhiyun if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
1309*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
1310*4882a593Smuzhiyun crtc->base.id, crtc->name);
1311*4882a593Smuzhiyun return -EINVAL;
1312*4882a593Smuzhiyun }
1313*4882a593Smuzhiyun }
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun return 0;
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_check_only);
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun /**
1321*4882a593Smuzhiyun * drm_atomic_commit - commit configuration atomically
1322*4882a593Smuzhiyun * @state: atomic configuration to check
1323*4882a593Smuzhiyun *
1324*4882a593Smuzhiyun * Note that this function can return -EDEADLK if the driver needed to acquire
1325*4882a593Smuzhiyun * more locks but encountered a deadlock. The caller must then do the usual w/w
1326*4882a593Smuzhiyun * backoff dance and restart. All other errors are fatal.
1327*4882a593Smuzhiyun *
1328*4882a593Smuzhiyun * This function will take its own reference on @state.
1329*4882a593Smuzhiyun * Callers should always release their reference with drm_atomic_state_put().
1330*4882a593Smuzhiyun *
1331*4882a593Smuzhiyun * Returns:
1332*4882a593Smuzhiyun * 0 on success, negative error code on failure.
1333*4882a593Smuzhiyun */
drm_atomic_commit(struct drm_atomic_state * state)1334*4882a593Smuzhiyun int drm_atomic_commit(struct drm_atomic_state *state)
1335*4882a593Smuzhiyun {
1336*4882a593Smuzhiyun struct drm_mode_config *config = &state->dev->mode_config;
1337*4882a593Smuzhiyun int ret;
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun ret = drm_atomic_check_only(state);
1340*4882a593Smuzhiyun if (ret)
1341*4882a593Smuzhiyun return ret;
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("committing %p\n", state);
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun return config->funcs->atomic_commit(state->dev, state, false);
1346*4882a593Smuzhiyun }
1347*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_commit);
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun /**
1350*4882a593Smuzhiyun * drm_atomic_nonblocking_commit - atomic nonblocking commit
1351*4882a593Smuzhiyun * @state: atomic configuration to check
1352*4882a593Smuzhiyun *
1353*4882a593Smuzhiyun * Note that this function can return -EDEADLK if the driver needed to acquire
1354*4882a593Smuzhiyun * more locks but encountered a deadlock. The caller must then do the usual w/w
1355*4882a593Smuzhiyun * backoff dance and restart. All other errors are fatal.
1356*4882a593Smuzhiyun *
1357*4882a593Smuzhiyun * This function will take its own reference on @state.
1358*4882a593Smuzhiyun * Callers should always release their reference with drm_atomic_state_put().
1359*4882a593Smuzhiyun *
1360*4882a593Smuzhiyun * Returns:
1361*4882a593Smuzhiyun * 0 on success, negative error code on failure.
1362*4882a593Smuzhiyun */
drm_atomic_nonblocking_commit(struct drm_atomic_state * state)1363*4882a593Smuzhiyun int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
1364*4882a593Smuzhiyun {
1365*4882a593Smuzhiyun struct drm_mode_config *config = &state->dev->mode_config;
1366*4882a593Smuzhiyun int ret;
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun ret = drm_atomic_check_only(state);
1369*4882a593Smuzhiyun if (ret)
1370*4882a593Smuzhiyun return ret;
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state);
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun return config->funcs->atomic_commit(state->dev, state, true);
1375*4882a593Smuzhiyun }
1376*4882a593Smuzhiyun EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun /* just used from drm-client and atomic-helper: */
__drm_atomic_helper_disable_plane(struct drm_plane * plane,struct drm_plane_state * plane_state)1379*4882a593Smuzhiyun int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
1380*4882a593Smuzhiyun struct drm_plane_state *plane_state)
1381*4882a593Smuzhiyun {
1382*4882a593Smuzhiyun int ret;
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
1385*4882a593Smuzhiyun if (ret != 0)
1386*4882a593Smuzhiyun return ret;
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun drm_atomic_set_fb_for_plane(plane_state, NULL);
1389*4882a593Smuzhiyun plane_state->crtc_x = 0;
1390*4882a593Smuzhiyun plane_state->crtc_y = 0;
1391*4882a593Smuzhiyun plane_state->crtc_w = 0;
1392*4882a593Smuzhiyun plane_state->crtc_h = 0;
1393*4882a593Smuzhiyun plane_state->src_x = 0;
1394*4882a593Smuzhiyun plane_state->src_y = 0;
1395*4882a593Smuzhiyun plane_state->src_w = 0;
1396*4882a593Smuzhiyun plane_state->src_h = 0;
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun return 0;
1399*4882a593Smuzhiyun }
1400*4882a593Smuzhiyun EXPORT_SYMBOL(__drm_atomic_helper_disable_plane);
1401*4882a593Smuzhiyun
update_output_state(struct drm_atomic_state * state,struct drm_mode_set * set)1402*4882a593Smuzhiyun static int update_output_state(struct drm_atomic_state *state,
1403*4882a593Smuzhiyun struct drm_mode_set *set)
1404*4882a593Smuzhiyun {
1405*4882a593Smuzhiyun struct drm_device *dev = set->crtc->dev;
1406*4882a593Smuzhiyun struct drm_crtc *crtc;
1407*4882a593Smuzhiyun struct drm_crtc_state *new_crtc_state;
1408*4882a593Smuzhiyun struct drm_connector *connector;
1409*4882a593Smuzhiyun struct drm_connector_state *new_conn_state;
1410*4882a593Smuzhiyun int ret, i;
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
1413*4882a593Smuzhiyun state->acquire_ctx);
1414*4882a593Smuzhiyun if (ret)
1415*4882a593Smuzhiyun return ret;
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun /* First disable all connectors on the target crtc. */
1418*4882a593Smuzhiyun ret = drm_atomic_add_affected_connectors(state, set->crtc);
1419*4882a593Smuzhiyun if (ret)
1420*4882a593Smuzhiyun return ret;
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1423*4882a593Smuzhiyun if (new_conn_state->crtc == set->crtc) {
1424*4882a593Smuzhiyun ret = drm_atomic_set_crtc_for_connector(new_conn_state,
1425*4882a593Smuzhiyun NULL);
1426*4882a593Smuzhiyun if (ret)
1427*4882a593Smuzhiyun return ret;
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun /* Make sure legacy setCrtc always re-trains */
1430*4882a593Smuzhiyun new_conn_state->link_status = DRM_LINK_STATUS_GOOD;
1431*4882a593Smuzhiyun }
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun /* Then set all connectors from set->connectors on the target crtc */
1435*4882a593Smuzhiyun for (i = 0; i < set->num_connectors; i++) {
1436*4882a593Smuzhiyun new_conn_state = drm_atomic_get_connector_state(state,
1437*4882a593Smuzhiyun set->connectors[i]);
1438*4882a593Smuzhiyun if (IS_ERR(new_conn_state))
1439*4882a593Smuzhiyun return PTR_ERR(new_conn_state);
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun ret = drm_atomic_set_crtc_for_connector(new_conn_state,
1442*4882a593Smuzhiyun set->crtc);
1443*4882a593Smuzhiyun if (ret)
1444*4882a593Smuzhiyun return ret;
1445*4882a593Smuzhiyun }
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1448*4882a593Smuzhiyun /*
1449*4882a593Smuzhiyun * Don't update ->enable for the CRTC in the set_config request,
1450*4882a593Smuzhiyun * since a mismatch would indicate a bug in the upper layers.
1451*4882a593Smuzhiyun * The actual modeset code later on will catch any
1452*4882a593Smuzhiyun * inconsistencies here.
1453*4882a593Smuzhiyun */
1454*4882a593Smuzhiyun if (crtc == set->crtc)
1455*4882a593Smuzhiyun continue;
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun if (!new_crtc_state->connector_mask) {
1458*4882a593Smuzhiyun ret = drm_atomic_set_mode_prop_for_crtc(new_crtc_state,
1459*4882a593Smuzhiyun NULL);
1460*4882a593Smuzhiyun if (ret < 0)
1461*4882a593Smuzhiyun return ret;
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun new_crtc_state->active = false;
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun }
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun return 0;
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun /* just used from drm-client and atomic-helper: */
__drm_atomic_helper_set_config(struct drm_mode_set * set,struct drm_atomic_state * state)1471*4882a593Smuzhiyun int __drm_atomic_helper_set_config(struct drm_mode_set *set,
1472*4882a593Smuzhiyun struct drm_atomic_state *state)
1473*4882a593Smuzhiyun {
1474*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
1475*4882a593Smuzhiyun struct drm_plane_state *primary_state;
1476*4882a593Smuzhiyun struct drm_crtc *crtc = set->crtc;
1477*4882a593Smuzhiyun int hdisplay, vdisplay;
1478*4882a593Smuzhiyun int ret;
1479*4882a593Smuzhiyun
1480*4882a593Smuzhiyun crtc_state = drm_atomic_get_crtc_state(state, crtc);
1481*4882a593Smuzhiyun if (IS_ERR(crtc_state))
1482*4882a593Smuzhiyun return PTR_ERR(crtc_state);
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun primary_state = drm_atomic_get_plane_state(state, crtc->primary);
1485*4882a593Smuzhiyun if (IS_ERR(primary_state))
1486*4882a593Smuzhiyun return PTR_ERR(primary_state);
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun if (!set->mode) {
1489*4882a593Smuzhiyun WARN_ON(set->fb);
1490*4882a593Smuzhiyun WARN_ON(set->num_connectors);
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
1493*4882a593Smuzhiyun if (ret != 0)
1494*4882a593Smuzhiyun return ret;
1495*4882a593Smuzhiyun
1496*4882a593Smuzhiyun crtc_state->active = false;
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
1499*4882a593Smuzhiyun if (ret != 0)
1500*4882a593Smuzhiyun return ret;
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun drm_atomic_set_fb_for_plane(primary_state, NULL);
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun goto commit;
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun WARN_ON(!set->fb);
1508*4882a593Smuzhiyun WARN_ON(!set->num_connectors);
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode);
1511*4882a593Smuzhiyun if (ret != 0)
1512*4882a593Smuzhiyun return ret;
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun crtc_state->active = true;
1515*4882a593Smuzhiyun
1516*4882a593Smuzhiyun ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
1517*4882a593Smuzhiyun if (ret != 0)
1518*4882a593Smuzhiyun return ret;
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay);
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun drm_atomic_set_fb_for_plane(primary_state, set->fb);
1523*4882a593Smuzhiyun primary_state->crtc_x = 0;
1524*4882a593Smuzhiyun primary_state->crtc_y = 0;
1525*4882a593Smuzhiyun primary_state->crtc_w = hdisplay;
1526*4882a593Smuzhiyun primary_state->crtc_h = vdisplay;
1527*4882a593Smuzhiyun primary_state->src_x = set->x << 16;
1528*4882a593Smuzhiyun primary_state->src_y = set->y << 16;
1529*4882a593Smuzhiyun if (drm_rotation_90_or_270(primary_state->rotation)) {
1530*4882a593Smuzhiyun primary_state->src_w = vdisplay << 16;
1531*4882a593Smuzhiyun primary_state->src_h = hdisplay << 16;
1532*4882a593Smuzhiyun } else {
1533*4882a593Smuzhiyun primary_state->src_w = hdisplay << 16;
1534*4882a593Smuzhiyun primary_state->src_h = vdisplay << 16;
1535*4882a593Smuzhiyun }
1536*4882a593Smuzhiyun
1537*4882a593Smuzhiyun commit:
1538*4882a593Smuzhiyun ret = update_output_state(state, set);
1539*4882a593Smuzhiyun if (ret)
1540*4882a593Smuzhiyun return ret;
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyun return 0;
1543*4882a593Smuzhiyun }
1544*4882a593Smuzhiyun EXPORT_SYMBOL(__drm_atomic_helper_set_config);
1545*4882a593Smuzhiyun
drm_atomic_print_state(const struct drm_atomic_state * state)1546*4882a593Smuzhiyun void drm_atomic_print_state(const struct drm_atomic_state *state)
1547*4882a593Smuzhiyun {
1548*4882a593Smuzhiyun struct drm_printer p = drm_info_printer(state->dev->dev);
1549*4882a593Smuzhiyun struct drm_plane *plane;
1550*4882a593Smuzhiyun struct drm_plane_state *plane_state;
1551*4882a593Smuzhiyun struct drm_crtc *crtc;
1552*4882a593Smuzhiyun struct drm_crtc_state *crtc_state;
1553*4882a593Smuzhiyun struct drm_connector *connector;
1554*4882a593Smuzhiyun struct drm_connector_state *connector_state;
1555*4882a593Smuzhiyun int i;
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun DRM_DEBUG_ATOMIC("checking %p\n", state);
1558*4882a593Smuzhiyun
1559*4882a593Smuzhiyun for_each_new_plane_in_state(state, plane, plane_state, i)
1560*4882a593Smuzhiyun drm_atomic_plane_print_state(&p, plane_state);
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun for_each_new_crtc_in_state(state, crtc, crtc_state, i)
1563*4882a593Smuzhiyun drm_atomic_crtc_print_state(&p, crtc_state);
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun for_each_new_connector_in_state(state, connector, connector_state, i)
1566*4882a593Smuzhiyun drm_atomic_connector_print_state(&p, connector_state);
1567*4882a593Smuzhiyun }
1568*4882a593Smuzhiyun
__drm_state_dump(struct drm_device * dev,struct drm_printer * p,bool take_locks)1569*4882a593Smuzhiyun static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
1570*4882a593Smuzhiyun bool take_locks)
1571*4882a593Smuzhiyun {
1572*4882a593Smuzhiyun struct drm_mode_config *config = &dev->mode_config;
1573*4882a593Smuzhiyun struct drm_plane *plane;
1574*4882a593Smuzhiyun struct drm_crtc *crtc;
1575*4882a593Smuzhiyun struct drm_connector *connector;
1576*4882a593Smuzhiyun struct drm_connector_list_iter conn_iter;
1577*4882a593Smuzhiyun
1578*4882a593Smuzhiyun if (!drm_drv_uses_atomic_modeset(dev))
1579*4882a593Smuzhiyun return;
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun list_for_each_entry(plane, &config->plane_list, head) {
1582*4882a593Smuzhiyun if (take_locks)
1583*4882a593Smuzhiyun drm_modeset_lock(&plane->mutex, NULL);
1584*4882a593Smuzhiyun drm_atomic_plane_print_state(p, plane->state);
1585*4882a593Smuzhiyun if (take_locks)
1586*4882a593Smuzhiyun drm_modeset_unlock(&plane->mutex);
1587*4882a593Smuzhiyun }
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun list_for_each_entry(crtc, &config->crtc_list, head) {
1590*4882a593Smuzhiyun if (take_locks)
1591*4882a593Smuzhiyun drm_modeset_lock(&crtc->mutex, NULL);
1592*4882a593Smuzhiyun drm_atomic_crtc_print_state(p, crtc->state);
1593*4882a593Smuzhiyun if (take_locks)
1594*4882a593Smuzhiyun drm_modeset_unlock(&crtc->mutex);
1595*4882a593Smuzhiyun }
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun drm_connector_list_iter_begin(dev, &conn_iter);
1598*4882a593Smuzhiyun if (take_locks)
1599*4882a593Smuzhiyun drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1600*4882a593Smuzhiyun drm_for_each_connector_iter(connector, &conn_iter)
1601*4882a593Smuzhiyun drm_atomic_connector_print_state(p, connector->state);
1602*4882a593Smuzhiyun if (take_locks)
1603*4882a593Smuzhiyun drm_modeset_unlock(&dev->mode_config.connection_mutex);
1604*4882a593Smuzhiyun drm_connector_list_iter_end(&conn_iter);
1605*4882a593Smuzhiyun }
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun /**
1608*4882a593Smuzhiyun * drm_state_dump - dump entire device atomic state
1609*4882a593Smuzhiyun * @dev: the drm device
1610*4882a593Smuzhiyun * @p: where to print the state to
1611*4882a593Smuzhiyun *
1612*4882a593Smuzhiyun * Just for debugging. Drivers might want an option to dump state
1613*4882a593Smuzhiyun * to dmesg in case of error irq's. (Hint, you probably want to
1614*4882a593Smuzhiyun * ratelimit this!)
1615*4882a593Smuzhiyun *
1616*4882a593Smuzhiyun * The caller must drm_modeset_lock_all(), or if this is called
1617*4882a593Smuzhiyun * from error irq handler, it should not be enabled by default.
1618*4882a593Smuzhiyun * (Ie. if you are debugging errors you might not care that this
1619*4882a593Smuzhiyun * is racey. But calling this without all modeset locks held is
1620*4882a593Smuzhiyun * not inherently safe.)
1621*4882a593Smuzhiyun */
drm_state_dump(struct drm_device * dev,struct drm_printer * p)1622*4882a593Smuzhiyun void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
1623*4882a593Smuzhiyun {
1624*4882a593Smuzhiyun __drm_state_dump(dev, p, false);
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun EXPORT_SYMBOL(drm_state_dump);
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
drm_state_info(struct seq_file * m,void * data)1629*4882a593Smuzhiyun static int drm_state_info(struct seq_file *m, void *data)
1630*4882a593Smuzhiyun {
1631*4882a593Smuzhiyun struct drm_info_node *node = (struct drm_info_node *) m->private;
1632*4882a593Smuzhiyun struct drm_device *dev = node->minor->dev;
1633*4882a593Smuzhiyun struct drm_printer p = drm_seq_file_printer(m);
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun __drm_state_dump(dev, &p, true);
1636*4882a593Smuzhiyun
1637*4882a593Smuzhiyun return 0;
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun /* any use in debugfs files to dump individual planes/crtc/etc? */
1641*4882a593Smuzhiyun static const struct drm_info_list drm_atomic_debugfs_list[] = {
1642*4882a593Smuzhiyun {"state", drm_state_info, 0},
1643*4882a593Smuzhiyun };
1644*4882a593Smuzhiyun
drm_atomic_debugfs_init(struct drm_minor * minor)1645*4882a593Smuzhiyun void drm_atomic_debugfs_init(struct drm_minor *minor)
1646*4882a593Smuzhiyun {
1647*4882a593Smuzhiyun drm_debugfs_create_files(drm_atomic_debugfs_list,
1648*4882a593Smuzhiyun ARRAY_SIZE(drm_atomic_debugfs_list),
1649*4882a593Smuzhiyun minor->debugfs_root, minor);
1650*4882a593Smuzhiyun }
1651*4882a593Smuzhiyun #endif
1652