xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/msm/msm_atomic.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2014 Red Hat
4*4882a593Smuzhiyun  * Author: Rob Clark <robdclark@gmail.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <drm/drm_atomic_uapi.h>
8*4882a593Smuzhiyun #include <drm/drm_gem_framebuffer_helper.h>
9*4882a593Smuzhiyun #include <drm/drm_vblank.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include "msm_atomic_trace.h"
12*4882a593Smuzhiyun #include "msm_drv.h"
13*4882a593Smuzhiyun #include "msm_gem.h"
14*4882a593Smuzhiyun #include "msm_kms.h"
15*4882a593Smuzhiyun 
msm_atomic_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)16*4882a593Smuzhiyun int msm_atomic_prepare_fb(struct drm_plane *plane,
17*4882a593Smuzhiyun 			  struct drm_plane_state *new_state)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	struct msm_drm_private *priv = plane->dev->dev_private;
20*4882a593Smuzhiyun 	struct msm_kms *kms = priv->kms;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 	if (!new_state->fb)
23*4882a593Smuzhiyun 		return 0;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	drm_gem_fb_prepare_fb(plane, new_state);
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	return msm_framebuffer_prepare(new_state->fb, kms->aspace);
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun  * Helpers to control vblanks while we flush.. basically just to ensure
32*4882a593Smuzhiyun  * that vblank accounting is switched on, so we get valid seqn/timestamp
33*4882a593Smuzhiyun  * on pageflip events (if requested)
34*4882a593Smuzhiyun  */
35*4882a593Smuzhiyun 
vblank_get(struct msm_kms * kms,unsigned crtc_mask)36*4882a593Smuzhiyun static void vblank_get(struct msm_kms *kms, unsigned crtc_mask)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	struct drm_crtc *crtc;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
41*4882a593Smuzhiyun 		if (!crtc->state->active)
42*4882a593Smuzhiyun 			continue;
43*4882a593Smuzhiyun 		drm_crtc_vblank_get(crtc);
44*4882a593Smuzhiyun 	}
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
vblank_put(struct msm_kms * kms,unsigned crtc_mask)47*4882a593Smuzhiyun static void vblank_put(struct msm_kms *kms, unsigned crtc_mask)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	struct drm_crtc *crtc;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
52*4882a593Smuzhiyun 		if (!crtc->state->active)
53*4882a593Smuzhiyun 			continue;
54*4882a593Smuzhiyun 		drm_crtc_vblank_put(crtc);
55*4882a593Smuzhiyun 	}
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
msm_atomic_async_commit(struct msm_kms * kms,int crtc_idx)58*4882a593Smuzhiyun static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	unsigned crtc_mask = BIT(crtc_idx);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	trace_msm_atomic_async_commit_start(crtc_mask);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	mutex_lock(&kms->commit_lock);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	if (!(kms->pending_crtc_mask & crtc_mask)) {
67*4882a593Smuzhiyun 		mutex_unlock(&kms->commit_lock);
68*4882a593Smuzhiyun 		goto out;
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	kms->pending_crtc_mask &= ~crtc_mask;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	kms->funcs->enable_commit(kms);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	vblank_get(kms, crtc_mask);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/*
78*4882a593Smuzhiyun 	 * Flush hardware updates:
79*4882a593Smuzhiyun 	 */
80*4882a593Smuzhiyun 	trace_msm_atomic_flush_commit(crtc_mask);
81*4882a593Smuzhiyun 	kms->funcs->flush_commit(kms, crtc_mask);
82*4882a593Smuzhiyun 	mutex_unlock(&kms->commit_lock);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	/*
85*4882a593Smuzhiyun 	 * Wait for flush to complete:
86*4882a593Smuzhiyun 	 */
87*4882a593Smuzhiyun 	trace_msm_atomic_wait_flush_start(crtc_mask);
88*4882a593Smuzhiyun 	kms->funcs->wait_flush(kms, crtc_mask);
89*4882a593Smuzhiyun 	trace_msm_atomic_wait_flush_finish(crtc_mask);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	vblank_put(kms, crtc_mask);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	mutex_lock(&kms->commit_lock);
94*4882a593Smuzhiyun 	kms->funcs->complete_commit(kms, crtc_mask);
95*4882a593Smuzhiyun 	mutex_unlock(&kms->commit_lock);
96*4882a593Smuzhiyun 	kms->funcs->disable_commit(kms);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun out:
99*4882a593Smuzhiyun 	trace_msm_atomic_async_commit_finish(crtc_mask);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
msm_atomic_pending_timer(struct hrtimer * t)102*4882a593Smuzhiyun static enum hrtimer_restart msm_atomic_pending_timer(struct hrtimer *t)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	struct msm_pending_timer *timer = container_of(t,
105*4882a593Smuzhiyun 			struct msm_pending_timer, timer);
106*4882a593Smuzhiyun 	struct msm_drm_private *priv = timer->kms->dev->dev_private;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	queue_work(priv->wq, &timer->work);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	return HRTIMER_NORESTART;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
msm_atomic_pending_work(struct work_struct * work)113*4882a593Smuzhiyun static void msm_atomic_pending_work(struct work_struct *work)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	struct msm_pending_timer *timer = container_of(work,
116*4882a593Smuzhiyun 			struct msm_pending_timer, work);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	msm_atomic_async_commit(timer->kms, timer->crtc_idx);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
msm_atomic_init_pending_timer(struct msm_pending_timer * timer,struct msm_kms * kms,int crtc_idx)121*4882a593Smuzhiyun void msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
122*4882a593Smuzhiyun 		struct msm_kms *kms, int crtc_idx)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	timer->kms = kms;
125*4882a593Smuzhiyun 	timer->crtc_idx = crtc_idx;
126*4882a593Smuzhiyun 	hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
127*4882a593Smuzhiyun 	timer->timer.function = msm_atomic_pending_timer;
128*4882a593Smuzhiyun 	INIT_WORK(&timer->work, msm_atomic_pending_work);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
can_do_async(struct drm_atomic_state * state,struct drm_crtc ** async_crtc)131*4882a593Smuzhiyun static bool can_do_async(struct drm_atomic_state *state,
132*4882a593Smuzhiyun 		struct drm_crtc **async_crtc)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	struct drm_connector_state *connector_state;
135*4882a593Smuzhiyun 	struct drm_connector *connector;
136*4882a593Smuzhiyun 	struct drm_crtc_state *crtc_state;
137*4882a593Smuzhiyun 	struct drm_crtc *crtc;
138*4882a593Smuzhiyun 	int i, num_crtcs = 0;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	if (!(state->legacy_cursor_update || state->async_update))
141*4882a593Smuzhiyun 		return false;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	/* any connector change, means slow path: */
144*4882a593Smuzhiyun 	for_each_new_connector_in_state(state, connector, connector_state, i)
145*4882a593Smuzhiyun 		return false;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
148*4882a593Smuzhiyun 		if (drm_atomic_crtc_needs_modeset(crtc_state))
149*4882a593Smuzhiyun 			return false;
150*4882a593Smuzhiyun 		if (++num_crtcs > 1)
151*4882a593Smuzhiyun 			return false;
152*4882a593Smuzhiyun 		*async_crtc = crtc;
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	return true;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun /* Get bitmask of crtcs that will need to be flushed.  The bitmask
159*4882a593Smuzhiyun  * can be used with for_each_crtc_mask() iterator, to iterate
160*4882a593Smuzhiyun  * effected crtcs without needing to preserve the atomic state.
161*4882a593Smuzhiyun  */
get_crtc_mask(struct drm_atomic_state * state)162*4882a593Smuzhiyun static unsigned get_crtc_mask(struct drm_atomic_state *state)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	struct drm_crtc_state *crtc_state;
165*4882a593Smuzhiyun 	struct drm_crtc *crtc;
166*4882a593Smuzhiyun 	unsigned i, mask = 0;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	for_each_new_crtc_in_state(state, crtc, crtc_state, i)
169*4882a593Smuzhiyun 		mask |= drm_crtc_mask(crtc);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	return mask;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
msm_atomic_commit_tail(struct drm_atomic_state * state)174*4882a593Smuzhiyun void msm_atomic_commit_tail(struct drm_atomic_state *state)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct drm_device *dev = state->dev;
177*4882a593Smuzhiyun 	struct msm_drm_private *priv = dev->dev_private;
178*4882a593Smuzhiyun 	struct msm_kms *kms = priv->kms;
179*4882a593Smuzhiyun 	struct drm_crtc *async_crtc = NULL;
180*4882a593Smuzhiyun 	unsigned crtc_mask = get_crtc_mask(state);
181*4882a593Smuzhiyun 	bool async = kms->funcs->vsync_time &&
182*4882a593Smuzhiyun 			can_do_async(state, &async_crtc);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	trace_msm_atomic_commit_tail_start(async, crtc_mask);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	kms->funcs->enable_commit(kms);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	/*
189*4882a593Smuzhiyun 	 * Ensure any previous (potentially async) commit has
190*4882a593Smuzhiyun 	 * completed:
191*4882a593Smuzhiyun 	 */
192*4882a593Smuzhiyun 	trace_msm_atomic_wait_flush_start(crtc_mask);
193*4882a593Smuzhiyun 	kms->funcs->wait_flush(kms, crtc_mask);
194*4882a593Smuzhiyun 	trace_msm_atomic_wait_flush_finish(crtc_mask);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	mutex_lock(&kms->commit_lock);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	/*
199*4882a593Smuzhiyun 	 * Now that there is no in-progress flush, prepare the
200*4882a593Smuzhiyun 	 * current update:
201*4882a593Smuzhiyun 	 */
202*4882a593Smuzhiyun 	kms->funcs->prepare_commit(kms, state);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/*
205*4882a593Smuzhiyun 	 * Push atomic updates down to hardware:
206*4882a593Smuzhiyun 	 */
207*4882a593Smuzhiyun 	drm_atomic_helper_commit_modeset_disables(dev, state);
208*4882a593Smuzhiyun 	drm_atomic_helper_commit_planes(dev, state, 0);
209*4882a593Smuzhiyun 	drm_atomic_helper_commit_modeset_enables(dev, state);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	if (async) {
212*4882a593Smuzhiyun 		struct msm_pending_timer *timer =
213*4882a593Smuzhiyun 			&kms->pending_timers[drm_crtc_index(async_crtc)];
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 		/* async updates are limited to single-crtc updates: */
216*4882a593Smuzhiyun 		WARN_ON(crtc_mask != drm_crtc_mask(async_crtc));
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 		/*
219*4882a593Smuzhiyun 		 * Start timer if we don't already have an update pending
220*4882a593Smuzhiyun 		 * on this crtc:
221*4882a593Smuzhiyun 		 */
222*4882a593Smuzhiyun 		if (!(kms->pending_crtc_mask & crtc_mask)) {
223*4882a593Smuzhiyun 			ktime_t vsync_time, wakeup_time;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 			kms->pending_crtc_mask |= crtc_mask;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 			vsync_time = kms->funcs->vsync_time(kms, async_crtc);
228*4882a593Smuzhiyun 			wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1));
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 			hrtimer_start(&timer->timer, wakeup_time,
231*4882a593Smuzhiyun 					HRTIMER_MODE_ABS);
232*4882a593Smuzhiyun 		}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 		kms->funcs->disable_commit(kms);
235*4882a593Smuzhiyun 		mutex_unlock(&kms->commit_lock);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 		/*
238*4882a593Smuzhiyun 		 * At this point, from drm core's perspective, we
239*4882a593Smuzhiyun 		 * are done with the atomic update, so we can just
240*4882a593Smuzhiyun 		 * go ahead and signal that it is done:
241*4882a593Smuzhiyun 		 */
242*4882a593Smuzhiyun 		drm_atomic_helper_commit_hw_done(state);
243*4882a593Smuzhiyun 		drm_atomic_helper_cleanup_planes(dev, state);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 		trace_msm_atomic_commit_tail_finish(async, crtc_mask);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 		return;
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	/*
251*4882a593Smuzhiyun 	 * If there is any async flush pending on updated crtcs, fold
252*4882a593Smuzhiyun 	 * them into the current flush.
253*4882a593Smuzhiyun 	 */
254*4882a593Smuzhiyun 	kms->pending_crtc_mask &= ~crtc_mask;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	vblank_get(kms, crtc_mask);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	/*
259*4882a593Smuzhiyun 	 * Flush hardware updates:
260*4882a593Smuzhiyun 	 */
261*4882a593Smuzhiyun 	trace_msm_atomic_flush_commit(crtc_mask);
262*4882a593Smuzhiyun 	kms->funcs->flush_commit(kms, crtc_mask);
263*4882a593Smuzhiyun 	mutex_unlock(&kms->commit_lock);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	/*
266*4882a593Smuzhiyun 	 * Wait for flush to complete:
267*4882a593Smuzhiyun 	 */
268*4882a593Smuzhiyun 	trace_msm_atomic_wait_flush_start(crtc_mask);
269*4882a593Smuzhiyun 	kms->funcs->wait_flush(kms, crtc_mask);
270*4882a593Smuzhiyun 	trace_msm_atomic_wait_flush_finish(crtc_mask);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	vblank_put(kms, crtc_mask);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	mutex_lock(&kms->commit_lock);
275*4882a593Smuzhiyun 	kms->funcs->complete_commit(kms, crtc_mask);
276*4882a593Smuzhiyun 	mutex_unlock(&kms->commit_lock);
277*4882a593Smuzhiyun 	kms->funcs->disable_commit(kms);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	drm_atomic_helper_commit_hw_done(state);
280*4882a593Smuzhiyun 	drm_atomic_helper_cleanup_planes(dev, state);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	trace_msm_atomic_commit_tail_finish(async, crtc_mask);
283*4882a593Smuzhiyun }
284