1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (C) 2014 Red Hat
3*4882a593Smuzhiyun * Author: Rob Clark <robdclark@gmail.com>
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
6*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
7*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
8*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
10*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
13*4882a593Smuzhiyun * all copies or substantial portions of the Software.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <drm/drm_atomic.h>
25*4882a593Smuzhiyun #include <drm/drm_crtc.h>
26*4882a593Smuzhiyun #include <drm/drm_device.h>
27*4882a593Smuzhiyun #include <drm/drm_modeset_lock.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /**
30*4882a593Smuzhiyun * DOC: kms locking
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * As KMS moves toward more fine grained locking, and atomic ioctl where
33*4882a593Smuzhiyun * userspace can indirectly control locking order, it becomes necessary
34*4882a593Smuzhiyun * to use &ww_mutex and acquire-contexts to avoid deadlocks. But because
35*4882a593Smuzhiyun * the locking is more distributed around the driver code, we want a bit
36*4882a593Smuzhiyun * of extra utility/tracking out of our acquire-ctx. This is provided
37*4882a593Smuzhiyun * by &struct drm_modeset_lock and &struct drm_modeset_acquire_ctx.
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.rst
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun * The basic usage pattern is to::
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * drm_modeset_acquire_init(ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
44*4882a593Smuzhiyun * retry:
45*4882a593Smuzhiyun * foreach (lock in random_ordered_set_of_locks) {
46*4882a593Smuzhiyun * ret = drm_modeset_lock(lock, ctx)
47*4882a593Smuzhiyun * if (ret == -EDEADLK) {
48*4882a593Smuzhiyun * ret = drm_modeset_backoff(ctx);
49*4882a593Smuzhiyun * if (!ret)
50*4882a593Smuzhiyun * goto retry;
51*4882a593Smuzhiyun * }
52*4882a593Smuzhiyun * if (ret)
53*4882a593Smuzhiyun * goto out;
54*4882a593Smuzhiyun * }
55*4882a593Smuzhiyun * ... do stuff ...
56*4882a593Smuzhiyun * out:
57*4882a593Smuzhiyun * drm_modeset_drop_locks(ctx);
58*4882a593Smuzhiyun * drm_modeset_acquire_fini(ctx);
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * For convenience this control flow is implemented in
61*4882a593Smuzhiyun * DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END() for the case
62*4882a593Smuzhiyun * where all modeset locks need to be taken through drm_modeset_lock_all_ctx().
63*4882a593Smuzhiyun *
64*4882a593Smuzhiyun * If all that is needed is a single modeset lock, then the &struct
65*4882a593Smuzhiyun * drm_modeset_acquire_ctx is not needed and the locking can be simplified
66*4882a593Smuzhiyun * by passing a NULL instead of ctx in the drm_modeset_lock() call or
67*4882a593Smuzhiyun * calling drm_modeset_lock_single_interruptible(). To unlock afterwards
68*4882a593Smuzhiyun * call drm_modeset_unlock().
69*4882a593Smuzhiyun *
70*4882a593Smuzhiyun * On top of these per-object locks using &ww_mutex there's also an overall
71*4882a593Smuzhiyun * &drm_mode_config.mutex, for protecting everything else. Mostly this means
72*4882a593Smuzhiyun * probe state of connectors, and preventing hotplug add/removal of connectors.
73*4882a593Smuzhiyun *
74*4882a593Smuzhiyun * Finally there's a bunch of dedicated locks to protect drm core internal
75*4882a593Smuzhiyun * lists and lookup data structures.
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun static DEFINE_WW_CLASS(crtc_ww_class);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /**
81*4882a593Smuzhiyun * drm_modeset_lock_all - take all modeset locks
82*4882a593Smuzhiyun * @dev: DRM device
83*4882a593Smuzhiyun *
84*4882a593Smuzhiyun * This function takes all modeset locks, suitable where a more fine-grained
85*4882a593Smuzhiyun * scheme isn't (yet) implemented. Locks must be dropped by calling the
86*4882a593Smuzhiyun * drm_modeset_unlock_all() function.
87*4882a593Smuzhiyun *
88*4882a593Smuzhiyun * This function is deprecated. It allocates a lock acquisition context and
89*4882a593Smuzhiyun * stores it in &drm_device.mode_config. This facilitate conversion of
90*4882a593Smuzhiyun * existing code because it removes the need to manually deal with the
91*4882a593Smuzhiyun * acquisition context, but it is also brittle because the context is global
92*4882a593Smuzhiyun * and care must be taken not to nest calls. New code should use the
93*4882a593Smuzhiyun * drm_modeset_lock_all_ctx() function and pass in the context explicitly.
94*4882a593Smuzhiyun */
drm_modeset_lock_all(struct drm_device * dev)95*4882a593Smuzhiyun void drm_modeset_lock_all(struct drm_device *dev)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun struct drm_mode_config *config = &dev->mode_config;
98*4882a593Smuzhiyun struct drm_modeset_acquire_ctx *ctx;
99*4882a593Smuzhiyun int ret;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
102*4882a593Smuzhiyun if (WARN_ON(!ctx))
103*4882a593Smuzhiyun return;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun mutex_lock(&config->mutex);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun drm_modeset_acquire_init(ctx, 0);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun retry:
110*4882a593Smuzhiyun ret = drm_modeset_lock_all_ctx(dev, ctx);
111*4882a593Smuzhiyun if (ret < 0) {
112*4882a593Smuzhiyun if (ret == -EDEADLK) {
113*4882a593Smuzhiyun drm_modeset_backoff(ctx);
114*4882a593Smuzhiyun goto retry;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun drm_modeset_acquire_fini(ctx);
118*4882a593Smuzhiyun kfree(ctx);
119*4882a593Smuzhiyun return;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun ww_acquire_done(&ctx->ww_ctx);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun WARN_ON(config->acquire_ctx);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun * We hold the locks now, so it is safe to stash the acquisition
127*4882a593Smuzhiyun * context for drm_modeset_unlock_all().
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun config->acquire_ctx = ctx;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun drm_warn_on_modeset_not_all_locked(dev);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun EXPORT_SYMBOL(drm_modeset_lock_all);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /**
136*4882a593Smuzhiyun * drm_modeset_unlock_all - drop all modeset locks
137*4882a593Smuzhiyun * @dev: DRM device
138*4882a593Smuzhiyun *
139*4882a593Smuzhiyun * This function drops all modeset locks taken by a previous call to the
140*4882a593Smuzhiyun * drm_modeset_lock_all() function.
141*4882a593Smuzhiyun *
142*4882a593Smuzhiyun * This function is deprecated. It uses the lock acquisition context stored
143*4882a593Smuzhiyun * in &drm_device.mode_config. This facilitates conversion of existing
144*4882a593Smuzhiyun * code because it removes the need to manually deal with the acquisition
145*4882a593Smuzhiyun * context, but it is also brittle because the context is global and care must
146*4882a593Smuzhiyun * be taken not to nest calls. New code should pass the acquisition context
147*4882a593Smuzhiyun * directly to the drm_modeset_drop_locks() function.
148*4882a593Smuzhiyun */
drm_modeset_unlock_all(struct drm_device * dev)149*4882a593Smuzhiyun void drm_modeset_unlock_all(struct drm_device *dev)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun struct drm_mode_config *config = &dev->mode_config;
152*4882a593Smuzhiyun struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (WARN_ON(!ctx))
155*4882a593Smuzhiyun return;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun config->acquire_ctx = NULL;
158*4882a593Smuzhiyun drm_modeset_drop_locks(ctx);
159*4882a593Smuzhiyun drm_modeset_acquire_fini(ctx);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun kfree(ctx);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun mutex_unlock(&dev->mode_config.mutex);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun EXPORT_SYMBOL(drm_modeset_unlock_all);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /**
168*4882a593Smuzhiyun * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
169*4882a593Smuzhiyun * @dev: device
170*4882a593Smuzhiyun *
171*4882a593Smuzhiyun * Useful as a debug assert.
172*4882a593Smuzhiyun */
drm_warn_on_modeset_not_all_locked(struct drm_device * dev)173*4882a593Smuzhiyun void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun struct drm_crtc *crtc;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun /* Locking is currently fubar in the panic handler. */
178*4882a593Smuzhiyun if (oops_in_progress)
179*4882a593Smuzhiyun return;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun drm_for_each_crtc(crtc, dev)
182*4882a593Smuzhiyun WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
185*4882a593Smuzhiyun WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /**
190*4882a593Smuzhiyun * drm_modeset_acquire_init - initialize acquire context
191*4882a593Smuzhiyun * @ctx: the acquire context
192*4882a593Smuzhiyun * @flags: 0 or %DRM_MODESET_ACQUIRE_INTERRUPTIBLE
193*4882a593Smuzhiyun *
194*4882a593Smuzhiyun * When passing %DRM_MODESET_ACQUIRE_INTERRUPTIBLE to @flags,
195*4882a593Smuzhiyun * all calls to drm_modeset_lock() will perform an interruptible
196*4882a593Smuzhiyun * wait.
197*4882a593Smuzhiyun */
drm_modeset_acquire_init(struct drm_modeset_acquire_ctx * ctx,uint32_t flags)198*4882a593Smuzhiyun void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
199*4882a593Smuzhiyun uint32_t flags)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun memset(ctx, 0, sizeof(*ctx));
202*4882a593Smuzhiyun ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
203*4882a593Smuzhiyun INIT_LIST_HEAD(&ctx->locked);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if (flags & DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
206*4882a593Smuzhiyun ctx->interruptible = true;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun EXPORT_SYMBOL(drm_modeset_acquire_init);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /**
211*4882a593Smuzhiyun * drm_modeset_acquire_fini - cleanup acquire context
212*4882a593Smuzhiyun * @ctx: the acquire context
213*4882a593Smuzhiyun */
drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx * ctx)214*4882a593Smuzhiyun void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun ww_acquire_fini(&ctx->ww_ctx);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun EXPORT_SYMBOL(drm_modeset_acquire_fini);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /**
221*4882a593Smuzhiyun * drm_modeset_drop_locks - drop all locks
222*4882a593Smuzhiyun * @ctx: the acquire context
223*4882a593Smuzhiyun *
224*4882a593Smuzhiyun * Drop all locks currently held against this acquire context.
225*4882a593Smuzhiyun */
drm_modeset_drop_locks(struct drm_modeset_acquire_ctx * ctx)226*4882a593Smuzhiyun void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun WARN_ON(ctx->contended);
229*4882a593Smuzhiyun while (!list_empty(&ctx->locked)) {
230*4882a593Smuzhiyun struct drm_modeset_lock *lock;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun lock = list_first_entry(&ctx->locked,
233*4882a593Smuzhiyun struct drm_modeset_lock, head);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun drm_modeset_unlock(lock);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun EXPORT_SYMBOL(drm_modeset_drop_locks);
239*4882a593Smuzhiyun
modeset_lock(struct drm_modeset_lock * lock,struct drm_modeset_acquire_ctx * ctx,bool interruptible,bool slow)240*4882a593Smuzhiyun static inline int modeset_lock(struct drm_modeset_lock *lock,
241*4882a593Smuzhiyun struct drm_modeset_acquire_ctx *ctx,
242*4882a593Smuzhiyun bool interruptible, bool slow)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun int ret;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun WARN_ON(ctx->contended);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun if (ctx->trylock_only) {
249*4882a593Smuzhiyun lockdep_assert_held(&ctx->ww_ctx);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if (!ww_mutex_trylock(&lock->mutex))
252*4882a593Smuzhiyun return -EBUSY;
253*4882a593Smuzhiyun else
254*4882a593Smuzhiyun return 0;
255*4882a593Smuzhiyun } else if (interruptible && slow) {
256*4882a593Smuzhiyun ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
257*4882a593Smuzhiyun } else if (interruptible) {
258*4882a593Smuzhiyun ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
259*4882a593Smuzhiyun } else if (slow) {
260*4882a593Smuzhiyun ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
261*4882a593Smuzhiyun ret = 0;
262*4882a593Smuzhiyun } else {
263*4882a593Smuzhiyun ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun if (!ret) {
266*4882a593Smuzhiyun WARN_ON(!list_empty(&lock->head));
267*4882a593Smuzhiyun list_add(&lock->head, &ctx->locked);
268*4882a593Smuzhiyun } else if (ret == -EALREADY) {
269*4882a593Smuzhiyun /* we already hold the lock.. this is fine. For atomic
270*4882a593Smuzhiyun * we will need to be able to drm_modeset_lock() things
271*4882a593Smuzhiyun * without having to keep track of what is already locked
272*4882a593Smuzhiyun * or not.
273*4882a593Smuzhiyun */
274*4882a593Smuzhiyun ret = 0;
275*4882a593Smuzhiyun } else if (ret == -EDEADLK) {
276*4882a593Smuzhiyun ctx->contended = lock;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun return ret;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /**
283*4882a593Smuzhiyun * drm_modeset_backoff - deadlock avoidance backoff
284*4882a593Smuzhiyun * @ctx: the acquire context
285*4882a593Smuzhiyun *
286*4882a593Smuzhiyun * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
287*4882a593Smuzhiyun * you must call this function to drop all currently held locks and
288*4882a593Smuzhiyun * block until the contended lock becomes available.
289*4882a593Smuzhiyun *
290*4882a593Smuzhiyun * This function returns 0 on success, or -ERESTARTSYS if this context
291*4882a593Smuzhiyun * is initialized with %DRM_MODESET_ACQUIRE_INTERRUPTIBLE and the
292*4882a593Smuzhiyun * wait has been interrupted.
293*4882a593Smuzhiyun */
drm_modeset_backoff(struct drm_modeset_acquire_ctx * ctx)294*4882a593Smuzhiyun int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun struct drm_modeset_lock *contended = ctx->contended;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun ctx->contended = NULL;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun if (WARN_ON(!contended))
301*4882a593Smuzhiyun return 0;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun drm_modeset_drop_locks(ctx);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun return modeset_lock(contended, ctx, ctx->interruptible, true);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun EXPORT_SYMBOL(drm_modeset_backoff);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /**
310*4882a593Smuzhiyun * drm_modeset_lock_init - initialize lock
311*4882a593Smuzhiyun * @lock: lock to init
312*4882a593Smuzhiyun */
drm_modeset_lock_init(struct drm_modeset_lock * lock)313*4882a593Smuzhiyun void drm_modeset_lock_init(struct drm_modeset_lock *lock)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun ww_mutex_init(&lock->mutex, &crtc_ww_class);
316*4882a593Smuzhiyun INIT_LIST_HEAD(&lock->head);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun EXPORT_SYMBOL(drm_modeset_lock_init);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /**
321*4882a593Smuzhiyun * drm_modeset_lock - take modeset lock
322*4882a593Smuzhiyun * @lock: lock to take
323*4882a593Smuzhiyun * @ctx: acquire ctx
324*4882a593Smuzhiyun *
325*4882a593Smuzhiyun * If @ctx is not NULL, then its ww acquire context is used and the
326*4882a593Smuzhiyun * lock will be tracked by the context and can be released by calling
327*4882a593Smuzhiyun * drm_modeset_drop_locks(). If -EDEADLK is returned, this means a
328*4882a593Smuzhiyun * deadlock scenario has been detected and it is an error to attempt
329*4882a593Smuzhiyun * to take any more locks without first calling drm_modeset_backoff().
330*4882a593Smuzhiyun *
331*4882a593Smuzhiyun * If the @ctx is not NULL and initialized with
332*4882a593Smuzhiyun * %DRM_MODESET_ACQUIRE_INTERRUPTIBLE, this function will fail with
333*4882a593Smuzhiyun * -ERESTARTSYS when interrupted.
334*4882a593Smuzhiyun *
335*4882a593Smuzhiyun * If @ctx is NULL then the function call behaves like a normal,
336*4882a593Smuzhiyun * uninterruptible non-nesting mutex_lock() call.
337*4882a593Smuzhiyun */
drm_modeset_lock(struct drm_modeset_lock * lock,struct drm_modeset_acquire_ctx * ctx)338*4882a593Smuzhiyun int drm_modeset_lock(struct drm_modeset_lock *lock,
339*4882a593Smuzhiyun struct drm_modeset_acquire_ctx *ctx)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun if (ctx)
342*4882a593Smuzhiyun return modeset_lock(lock, ctx, ctx->interruptible, false);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun ww_mutex_lock(&lock->mutex, NULL);
345*4882a593Smuzhiyun return 0;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun EXPORT_SYMBOL(drm_modeset_lock);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /**
350*4882a593Smuzhiyun * drm_modeset_lock_single_interruptible - take a single modeset lock
351*4882a593Smuzhiyun * @lock: lock to take
352*4882a593Smuzhiyun *
353*4882a593Smuzhiyun * This function behaves as drm_modeset_lock() with a NULL context,
354*4882a593Smuzhiyun * but performs interruptible waits.
355*4882a593Smuzhiyun *
356*4882a593Smuzhiyun * This function returns 0 on success, or -ERESTARTSYS when interrupted.
357*4882a593Smuzhiyun */
drm_modeset_lock_single_interruptible(struct drm_modeset_lock * lock)358*4882a593Smuzhiyun int drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun return ww_mutex_lock_interruptible(&lock->mutex, NULL);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun EXPORT_SYMBOL(drm_modeset_lock_single_interruptible);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /**
365*4882a593Smuzhiyun * drm_modeset_unlock - drop modeset lock
366*4882a593Smuzhiyun * @lock: lock to release
367*4882a593Smuzhiyun */
drm_modeset_unlock(struct drm_modeset_lock * lock)368*4882a593Smuzhiyun void drm_modeset_unlock(struct drm_modeset_lock *lock)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun list_del_init(&lock->head);
371*4882a593Smuzhiyun ww_mutex_unlock(&lock->mutex);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun EXPORT_SYMBOL(drm_modeset_unlock);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /**
376*4882a593Smuzhiyun * drm_modeset_lock_all_ctx - take all modeset locks
377*4882a593Smuzhiyun * @dev: DRM device
378*4882a593Smuzhiyun * @ctx: lock acquisition context
379*4882a593Smuzhiyun *
380*4882a593Smuzhiyun * This function takes all modeset locks, suitable where a more fine-grained
381*4882a593Smuzhiyun * scheme isn't (yet) implemented.
382*4882a593Smuzhiyun *
383*4882a593Smuzhiyun * Unlike drm_modeset_lock_all(), it doesn't take the &drm_mode_config.mutex
384*4882a593Smuzhiyun * since that lock isn't required for modeset state changes. Callers which
385*4882a593Smuzhiyun * need to grab that lock too need to do so outside of the acquire context
386*4882a593Smuzhiyun * @ctx.
387*4882a593Smuzhiyun *
388*4882a593Smuzhiyun * Locks acquired with this function should be released by calling the
389*4882a593Smuzhiyun * drm_modeset_drop_locks() function on @ctx.
390*4882a593Smuzhiyun *
391*4882a593Smuzhiyun * See also: DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END()
392*4882a593Smuzhiyun *
393*4882a593Smuzhiyun * Returns: 0 on success or a negative error-code on failure.
394*4882a593Smuzhiyun */
drm_modeset_lock_all_ctx(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)395*4882a593Smuzhiyun int drm_modeset_lock_all_ctx(struct drm_device *dev,
396*4882a593Smuzhiyun struct drm_modeset_acquire_ctx *ctx)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun struct drm_private_obj *privobj;
399*4882a593Smuzhiyun struct drm_crtc *crtc;
400*4882a593Smuzhiyun struct drm_plane *plane;
401*4882a593Smuzhiyun int ret;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
404*4882a593Smuzhiyun if (ret)
405*4882a593Smuzhiyun return ret;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun drm_for_each_crtc(crtc, dev) {
408*4882a593Smuzhiyun ret = drm_modeset_lock(&crtc->mutex, ctx);
409*4882a593Smuzhiyun if (ret)
410*4882a593Smuzhiyun return ret;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun drm_for_each_plane(plane, dev) {
414*4882a593Smuzhiyun ret = drm_modeset_lock(&plane->mutex, ctx);
415*4882a593Smuzhiyun if (ret)
416*4882a593Smuzhiyun return ret;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun drm_for_each_privobj(privobj, dev) {
420*4882a593Smuzhiyun ret = drm_modeset_lock(&privobj->lock, ctx);
421*4882a593Smuzhiyun if (ret)
422*4882a593Smuzhiyun return ret;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun return 0;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun EXPORT_SYMBOL(drm_modeset_lock_all_ctx);
428