xref: /OK3568_Linux_fs/kernel/include/drm/drm_modeset_lock.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2014 Red Hat
3*4882a593Smuzhiyun  * Author: Rob Clark <robdclark@gmail.com>
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
6*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
7*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
8*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
10*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
13*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18*4882a593Smuzhiyun  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21*4882a593Smuzhiyun  * OTHER DEALINGS IN THE SOFTWARE.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #ifndef DRM_MODESET_LOCK_H_
25*4882a593Smuzhiyun #define DRM_MODESET_LOCK_H_
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include <linux/ww_mutex.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun struct drm_modeset_lock;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /**
32*4882a593Smuzhiyun  * struct drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx)
33*4882a593Smuzhiyun  * @ww_ctx: base acquire ctx
34*4882a593Smuzhiyun  * @contended: used internally for -EDEADLK handling
35*4882a593Smuzhiyun  * @locked: list of held locks
36*4882a593Smuzhiyun  * @trylock_only: trylock mode used in atomic contexts/panic notifiers
37*4882a593Smuzhiyun  * @interruptible: whether interruptible locking should be used.
38*4882a593Smuzhiyun  *
39*4882a593Smuzhiyun  * Each thread competing for a set of locks must use one acquire
40*4882a593Smuzhiyun  * ctx.  And if any lock fxn returns -EDEADLK, it must backoff and
41*4882a593Smuzhiyun  * retry.
42*4882a593Smuzhiyun  */
43*4882a593Smuzhiyun struct drm_modeset_acquire_ctx {
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	struct ww_acquire_ctx ww_ctx;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	/*
48*4882a593Smuzhiyun 	 * Contended lock: if a lock is contended you should only call
49*4882a593Smuzhiyun 	 * drm_modeset_backoff() which drops locks and slow-locks the
50*4882a593Smuzhiyun 	 * contended lock.
51*4882a593Smuzhiyun 	 */
52*4882a593Smuzhiyun 	struct drm_modeset_lock *contended;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	/*
55*4882a593Smuzhiyun 	 * list of held locks (drm_modeset_lock)
56*4882a593Smuzhiyun 	 */
57*4882a593Smuzhiyun 	struct list_head locked;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	/*
60*4882a593Smuzhiyun 	 * Trylock mode, use only for panic handlers!
61*4882a593Smuzhiyun 	 */
62*4882a593Smuzhiyun 	bool trylock_only;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	/* Perform interruptible waits on this context. */
65*4882a593Smuzhiyun 	bool interruptible;
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /**
69*4882a593Smuzhiyun  * struct drm_modeset_lock - used for locking modeset resources.
70*4882a593Smuzhiyun  * @mutex: resource locking
71*4882a593Smuzhiyun  * @head: used to hold its place on &drm_atomi_state.locked list when
72*4882a593Smuzhiyun  *    part of an atomic update
73*4882a593Smuzhiyun  *
74*4882a593Smuzhiyun  * Used for locking CRTCs and other modeset resources.
75*4882a593Smuzhiyun  */
76*4882a593Smuzhiyun struct drm_modeset_lock {
77*4882a593Smuzhiyun 	/*
78*4882a593Smuzhiyun 	 * modeset lock
79*4882a593Smuzhiyun 	 */
80*4882a593Smuzhiyun 	struct ww_mutex mutex;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	/*
83*4882a593Smuzhiyun 	 * Resources that are locked as part of an atomic update are added
84*4882a593Smuzhiyun 	 * to a list (so we know what to unlock at the end).
85*4882a593Smuzhiyun 	 */
86*4882a593Smuzhiyun 	struct list_head head;
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #define DRM_MODESET_ACQUIRE_INTERRUPTIBLE BIT(0)
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
92*4882a593Smuzhiyun 		uint32_t flags);
93*4882a593Smuzhiyun void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx);
94*4882a593Smuzhiyun void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx);
95*4882a593Smuzhiyun int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun void drm_modeset_lock_init(struct drm_modeset_lock *lock);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun /**
100*4882a593Smuzhiyun  * drm_modeset_lock_fini - cleanup lock
101*4882a593Smuzhiyun  * @lock: lock to cleanup
102*4882a593Smuzhiyun  */
drm_modeset_lock_fini(struct drm_modeset_lock * lock)103*4882a593Smuzhiyun static inline void drm_modeset_lock_fini(struct drm_modeset_lock *lock)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	WARN_ON(!list_empty(&lock->head));
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun /**
109*4882a593Smuzhiyun  * drm_modeset_is_locked - equivalent to mutex_is_locked()
110*4882a593Smuzhiyun  * @lock: lock to check
111*4882a593Smuzhiyun  */
drm_modeset_is_locked(struct drm_modeset_lock * lock)112*4882a593Smuzhiyun static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	return ww_mutex_is_locked(&lock->mutex);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /**
118*4882a593Smuzhiyun  * drm_modeset_lock_assert_held - equivalent to lockdep_assert_held()
119*4882a593Smuzhiyun  * @lock: lock to check
120*4882a593Smuzhiyun  */
drm_modeset_lock_assert_held(struct drm_modeset_lock * lock)121*4882a593Smuzhiyun static inline void drm_modeset_lock_assert_held(struct drm_modeset_lock *lock)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	lockdep_assert_held(&lock->mutex.base);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun int drm_modeset_lock(struct drm_modeset_lock *lock,
127*4882a593Smuzhiyun 		struct drm_modeset_acquire_ctx *ctx);
128*4882a593Smuzhiyun int __must_check drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock);
129*4882a593Smuzhiyun void drm_modeset_unlock(struct drm_modeset_lock *lock);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun struct drm_device;
132*4882a593Smuzhiyun struct drm_crtc;
133*4882a593Smuzhiyun struct drm_plane;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun void drm_modeset_lock_all(struct drm_device *dev);
136*4882a593Smuzhiyun void drm_modeset_unlock_all(struct drm_device *dev);
137*4882a593Smuzhiyun void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun int drm_modeset_lock_all_ctx(struct drm_device *dev,
140*4882a593Smuzhiyun 			     struct drm_modeset_acquire_ctx *ctx);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun /**
143*4882a593Smuzhiyun  * DRM_MODESET_LOCK_ALL_BEGIN - Helper to acquire modeset locks
144*4882a593Smuzhiyun  * @dev: drm device
145*4882a593Smuzhiyun  * @ctx: local modeset acquire context, will be dereferenced
146*4882a593Smuzhiyun  * @flags: DRM_MODESET_ACQUIRE_* flags to pass to drm_modeset_acquire_init()
147*4882a593Smuzhiyun  * @ret: local ret/err/etc variable to track error status
148*4882a593Smuzhiyun  *
149*4882a593Smuzhiyun  * Use these macros to simplify grabbing all modeset locks using a local
150*4882a593Smuzhiyun  * context. This has the advantage of reducing boilerplate, but also properly
151*4882a593Smuzhiyun  * checking return values where appropriate.
152*4882a593Smuzhiyun  *
153*4882a593Smuzhiyun  * Any code run between BEGIN and END will be holding the modeset locks.
154*4882a593Smuzhiyun  *
155*4882a593Smuzhiyun  * This must be paired with DRM_MODESET_LOCK_ALL_END(). We will jump back and
156*4882a593Smuzhiyun  * forth between the labels on deadlock and error conditions.
157*4882a593Smuzhiyun  *
158*4882a593Smuzhiyun  * Drivers can acquire additional modeset locks. If any lock acquisition
159*4882a593Smuzhiyun  * fails, the control flow needs to jump to DRM_MODESET_LOCK_ALL_END() with
160*4882a593Smuzhiyun  * the @ret parameter containing the return value of drm_modeset_lock().
161*4882a593Smuzhiyun  *
162*4882a593Smuzhiyun  * Returns:
163*4882a593Smuzhiyun  * The only possible value of ret immediately after DRM_MODESET_LOCK_ALL_BEGIN()
164*4882a593Smuzhiyun  * is 0, so no error checking is necessary
165*4882a593Smuzhiyun  */
166*4882a593Smuzhiyun #define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret)		\
167*4882a593Smuzhiyun 	if (!drm_drv_uses_atomic_modeset(dev))				\
168*4882a593Smuzhiyun 		mutex_lock(&dev->mode_config.mutex);			\
169*4882a593Smuzhiyun 	drm_modeset_acquire_init(&ctx, flags);				\
170*4882a593Smuzhiyun modeset_lock_retry:							\
171*4882a593Smuzhiyun 	ret = drm_modeset_lock_all_ctx(dev, &ctx);			\
172*4882a593Smuzhiyun 	if (ret)							\
173*4882a593Smuzhiyun 		goto modeset_lock_fail;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun /**
176*4882a593Smuzhiyun  * DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks
177*4882a593Smuzhiyun  * @dev: drm device
178*4882a593Smuzhiyun  * @ctx: local modeset acquire context, will be dereferenced
179*4882a593Smuzhiyun  * @ret: local ret/err/etc variable to track error status
180*4882a593Smuzhiyun  *
181*4882a593Smuzhiyun  * The other side of DRM_MODESET_LOCK_ALL_BEGIN(). It will bounce back to BEGIN
182*4882a593Smuzhiyun  * if ret is -EDEADLK.
183*4882a593Smuzhiyun  *
184*4882a593Smuzhiyun  * It's important that you use the same ret variable for begin and end so
185*4882a593Smuzhiyun  * deadlock conditions are properly handled.
186*4882a593Smuzhiyun  *
187*4882a593Smuzhiyun  * Returns:
188*4882a593Smuzhiyun  * ret will be untouched unless it is -EDEADLK on entry. That means that if you
189*4882a593Smuzhiyun  * successfully acquire the locks, ret will be whatever your code sets it to. If
190*4882a593Smuzhiyun  * there is a deadlock or other failure with acquire or backoff, ret will be set
191*4882a593Smuzhiyun  * to that failure. In both of these cases the code between BEGIN/END will not
192*4882a593Smuzhiyun  * be run, so the failure will reflect the inability to grab the locks.
193*4882a593Smuzhiyun  */
194*4882a593Smuzhiyun #define DRM_MODESET_LOCK_ALL_END(dev, ctx, ret)				\
195*4882a593Smuzhiyun modeset_lock_fail:							\
196*4882a593Smuzhiyun 	if (ret == -EDEADLK) {						\
197*4882a593Smuzhiyun 		ret = drm_modeset_backoff(&ctx);			\
198*4882a593Smuzhiyun 		if (!ret)						\
199*4882a593Smuzhiyun 			goto modeset_lock_retry;			\
200*4882a593Smuzhiyun 	}								\
201*4882a593Smuzhiyun 	drm_modeset_drop_locks(&ctx);					\
202*4882a593Smuzhiyun 	drm_modeset_acquire_fini(&ctx);					\
203*4882a593Smuzhiyun 	if (!drm_drv_uses_atomic_modeset(dev))				\
204*4882a593Smuzhiyun 		mutex_unlock(&dev->mode_config.mutex);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun #endif /* DRM_MODESET_LOCK_H_ */
207