xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/vc4/vc4_kms.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2015 Broadcom
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun /**
7*4882a593Smuzhiyun  * DOC: VC4 KMS
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * This is the general code for implementing KMS mode setting that
10*4882a593Smuzhiyun  * doesn't clearly associate with any of the other objects (plane,
11*4882a593Smuzhiyun  * crtc, HDMI encoder).
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/clk.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <drm/drm_atomic.h>
17*4882a593Smuzhiyun #include <drm/drm_atomic_helper.h>
18*4882a593Smuzhiyun #include <drm/drm_crtc.h>
19*4882a593Smuzhiyun #include <drm/drm_gem_framebuffer_helper.h>
20*4882a593Smuzhiyun #include <drm/drm_plane_helper.h>
21*4882a593Smuzhiyun #include <drm/drm_probe_helper.h>
22*4882a593Smuzhiyun #include <drm/drm_vblank.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include "vc4_drv.h"
25*4882a593Smuzhiyun #include "vc4_regs.h"
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define HVS_NUM_CHANNELS 3
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun struct vc4_ctm_state {
30*4882a593Smuzhiyun 	struct drm_private_state base;
31*4882a593Smuzhiyun 	struct drm_color_ctm *ctm;
32*4882a593Smuzhiyun 	int fifo;
33*4882a593Smuzhiyun };
34*4882a593Smuzhiyun 
to_vc4_ctm_state(struct drm_private_state * priv)35*4882a593Smuzhiyun static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	return container_of(priv, struct vc4_ctm_state, base);
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun struct vc4_hvs_state {
41*4882a593Smuzhiyun 	struct drm_private_state base;
42*4882a593Smuzhiyun 	unsigned int unassigned_channels;
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun static struct vc4_hvs_state *
to_vc4_hvs_state(struct drm_private_state * priv)46*4882a593Smuzhiyun to_vc4_hvs_state(struct drm_private_state *priv)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	return container_of(priv, struct vc4_hvs_state, base);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun struct vc4_load_tracker_state {
52*4882a593Smuzhiyun 	struct drm_private_state base;
53*4882a593Smuzhiyun 	u64 hvs_load;
54*4882a593Smuzhiyun 	u64 membus_load;
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun static struct vc4_load_tracker_state *
to_vc4_load_tracker_state(struct drm_private_state * priv)58*4882a593Smuzhiyun to_vc4_load_tracker_state(struct drm_private_state *priv)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	return container_of(priv, struct vc4_load_tracker_state, base);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
vc4_get_ctm_state(struct drm_atomic_state * state,struct drm_private_obj * manager)63*4882a593Smuzhiyun static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
64*4882a593Smuzhiyun 					       struct drm_private_obj *manager)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	struct drm_device *dev = state->dev;
67*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
68*4882a593Smuzhiyun 	struct drm_private_state *priv_state;
69*4882a593Smuzhiyun 	int ret;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
72*4882a593Smuzhiyun 	if (ret)
73*4882a593Smuzhiyun 		return ERR_PTR(ret);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	priv_state = drm_atomic_get_private_obj_state(state, manager);
76*4882a593Smuzhiyun 	if (IS_ERR(priv_state))
77*4882a593Smuzhiyun 		return ERR_CAST(priv_state);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	return to_vc4_ctm_state(priv_state);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun static struct drm_private_state *
vc4_ctm_duplicate_state(struct drm_private_obj * obj)83*4882a593Smuzhiyun vc4_ctm_duplicate_state(struct drm_private_obj *obj)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	struct vc4_ctm_state *state;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
88*4882a593Smuzhiyun 	if (!state)
89*4882a593Smuzhiyun 		return NULL;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	return &state->base;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
vc4_ctm_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)96*4882a593Smuzhiyun static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
97*4882a593Smuzhiyun 				  struct drm_private_state *state)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	kfree(ctm_state);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
105*4882a593Smuzhiyun 	.atomic_duplicate_state = vc4_ctm_duplicate_state,
106*4882a593Smuzhiyun 	.atomic_destroy_state = vc4_ctm_destroy_state,
107*4882a593Smuzhiyun };
108*4882a593Smuzhiyun 
vc4_ctm_obj_fini(struct drm_device * dev,void * unused)109*4882a593Smuzhiyun static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	drm_atomic_private_obj_fini(&vc4->ctm_manager);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
vc4_ctm_obj_init(struct vc4_dev * vc4)116*4882a593Smuzhiyun static int vc4_ctm_obj_init(struct vc4_dev *vc4)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	struct vc4_ctm_state *ctm_state;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	drm_modeset_lock_init(&vc4->ctm_state_lock);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
123*4882a593Smuzhiyun 	if (!ctm_state)
124*4882a593Smuzhiyun 		return -ENOMEM;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
127*4882a593Smuzhiyun 				    &vc4_ctm_state_funcs);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun /* Converts a DRM S31.32 value to the HW S0.9 format. */
vc4_ctm_s31_32_to_s0_9(u64 in)133*4882a593Smuzhiyun static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	u16 r;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	/* Sign bit. */
138*4882a593Smuzhiyun 	r = in & BIT_ULL(63) ? BIT(9) : 0;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	if ((in & GENMASK_ULL(62, 32)) > 0) {
141*4882a593Smuzhiyun 		/* We have zero integer bits so we can only saturate here. */
142*4882a593Smuzhiyun 		r |= GENMASK(8, 0);
143*4882a593Smuzhiyun 	} else {
144*4882a593Smuzhiyun 		/* Otherwise take the 9 most important fractional bits. */
145*4882a593Smuzhiyun 		r |= (in >> 23) & GENMASK(8, 0);
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	return r;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun static void
vc4_ctm_commit(struct vc4_dev * vc4,struct drm_atomic_state * state)152*4882a593Smuzhiyun vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
155*4882a593Smuzhiyun 	struct drm_color_ctm *ctm = ctm_state->ctm;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	if (ctm_state->fifo) {
158*4882a593Smuzhiyun 		HVS_WRITE(SCALER_OLEDCOEF2,
159*4882a593Smuzhiyun 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
160*4882a593Smuzhiyun 					SCALER_OLEDCOEF2_R_TO_R) |
161*4882a593Smuzhiyun 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
162*4882a593Smuzhiyun 					SCALER_OLEDCOEF2_R_TO_G) |
163*4882a593Smuzhiyun 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
164*4882a593Smuzhiyun 					SCALER_OLEDCOEF2_R_TO_B));
165*4882a593Smuzhiyun 		HVS_WRITE(SCALER_OLEDCOEF1,
166*4882a593Smuzhiyun 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
167*4882a593Smuzhiyun 					SCALER_OLEDCOEF1_G_TO_R) |
168*4882a593Smuzhiyun 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
169*4882a593Smuzhiyun 					SCALER_OLEDCOEF1_G_TO_G) |
170*4882a593Smuzhiyun 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
171*4882a593Smuzhiyun 					SCALER_OLEDCOEF1_G_TO_B));
172*4882a593Smuzhiyun 		HVS_WRITE(SCALER_OLEDCOEF0,
173*4882a593Smuzhiyun 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
174*4882a593Smuzhiyun 					SCALER_OLEDCOEF0_B_TO_R) |
175*4882a593Smuzhiyun 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
176*4882a593Smuzhiyun 					SCALER_OLEDCOEF0_B_TO_G) |
177*4882a593Smuzhiyun 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
178*4882a593Smuzhiyun 					SCALER_OLEDCOEF0_B_TO_B));
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	HVS_WRITE(SCALER_OLEDOFFS,
182*4882a593Smuzhiyun 		  VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun static struct vc4_hvs_state *
vc4_hvs_get_global_state(struct drm_atomic_state * state)186*4882a593Smuzhiyun vc4_hvs_get_global_state(struct drm_atomic_state *state)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
189*4882a593Smuzhiyun 	struct drm_private_state *priv_state;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
192*4882a593Smuzhiyun 	if (IS_ERR(priv_state))
193*4882a593Smuzhiyun 		return ERR_CAST(priv_state);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	return to_vc4_hvs_state(priv_state);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
vc4_hvs_pv_muxing_commit(struct vc4_dev * vc4,struct drm_atomic_state * state)198*4882a593Smuzhiyun static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
199*4882a593Smuzhiyun 				     struct drm_atomic_state *state)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	struct drm_crtc_state *crtc_state;
202*4882a593Smuzhiyun 	struct drm_crtc *crtc;
203*4882a593Smuzhiyun 	unsigned int i;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
206*4882a593Smuzhiyun 		struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
207*4882a593Smuzhiyun 		u32 dispctrl;
208*4882a593Smuzhiyun 		u32 dsp3_mux;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 		if (!crtc_state->active)
211*4882a593Smuzhiyun 			continue;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 		if (vc4_state->assigned_channel != 2)
214*4882a593Smuzhiyun 			continue;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 		/*
217*4882a593Smuzhiyun 		 * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
218*4882a593Smuzhiyun 		 * FIFO X'.
219*4882a593Smuzhiyun 		 * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
220*4882a593Smuzhiyun 		 *
221*4882a593Smuzhiyun 		 * DSP3 is connected to FIFO2 unless the transposer is
222*4882a593Smuzhiyun 		 * enabled. In this case, FIFO 2 is directly accessed by the
223*4882a593Smuzhiyun 		 * TXP IP, and we need to disable the FIFO2 -> pixelvalve1
224*4882a593Smuzhiyun 		 * route.
225*4882a593Smuzhiyun 		 */
226*4882a593Smuzhiyun 		if (vc4_state->feed_txp)
227*4882a593Smuzhiyun 			dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
228*4882a593Smuzhiyun 		else
229*4882a593Smuzhiyun 			dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 		dispctrl = HVS_READ(SCALER_DISPCTRL) &
232*4882a593Smuzhiyun 			   ~SCALER_DISPCTRL_DSP3_MUX_MASK;
233*4882a593Smuzhiyun 		HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
vc5_hvs_pv_muxing_commit(struct vc4_dev * vc4,struct drm_atomic_state * state)237*4882a593Smuzhiyun static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
238*4882a593Smuzhiyun 				     struct drm_atomic_state *state)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	struct drm_crtc_state *crtc_state;
241*4882a593Smuzhiyun 	struct drm_crtc *crtc;
242*4882a593Smuzhiyun 	unsigned char mux;
243*4882a593Smuzhiyun 	unsigned int i;
244*4882a593Smuzhiyun 	u32 reg;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
247*4882a593Smuzhiyun 		struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
248*4882a593Smuzhiyun 		struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 		if (!vc4_state->update_muxing)
251*4882a593Smuzhiyun 			continue;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 		switch (vc4_crtc->data->hvs_output) {
254*4882a593Smuzhiyun 		case 2:
255*4882a593Smuzhiyun 			mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
256*4882a593Smuzhiyun 			reg = HVS_READ(SCALER_DISPECTRL);
257*4882a593Smuzhiyun 			HVS_WRITE(SCALER_DISPECTRL,
258*4882a593Smuzhiyun 				  (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
259*4882a593Smuzhiyun 				  VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
260*4882a593Smuzhiyun 			break;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 		case 3:
263*4882a593Smuzhiyun 			if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
264*4882a593Smuzhiyun 				mux = 3;
265*4882a593Smuzhiyun 			else
266*4882a593Smuzhiyun 				mux = vc4_state->assigned_channel;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 			reg = HVS_READ(SCALER_DISPCTRL);
269*4882a593Smuzhiyun 			HVS_WRITE(SCALER_DISPCTRL,
270*4882a593Smuzhiyun 				  (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
271*4882a593Smuzhiyun 				  VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
272*4882a593Smuzhiyun 			break;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 		case 4:
275*4882a593Smuzhiyun 			if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
276*4882a593Smuzhiyun 				mux = 3;
277*4882a593Smuzhiyun 			else
278*4882a593Smuzhiyun 				mux = vc4_state->assigned_channel;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 			reg = HVS_READ(SCALER_DISPEOLN);
281*4882a593Smuzhiyun 			HVS_WRITE(SCALER_DISPEOLN,
282*4882a593Smuzhiyun 				  (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
283*4882a593Smuzhiyun 				  VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 			break;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 		case 5:
288*4882a593Smuzhiyun 			if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
289*4882a593Smuzhiyun 				mux = 3;
290*4882a593Smuzhiyun 			else
291*4882a593Smuzhiyun 				mux = vc4_state->assigned_channel;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 			reg = HVS_READ(SCALER_DISPDITHER);
294*4882a593Smuzhiyun 			HVS_WRITE(SCALER_DISPDITHER,
295*4882a593Smuzhiyun 				  (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
296*4882a593Smuzhiyun 				  VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
297*4882a593Smuzhiyun 			break;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 		default:
300*4882a593Smuzhiyun 			break;
301*4882a593Smuzhiyun 		}
302*4882a593Smuzhiyun 	}
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun static void
vc4_atomic_complete_commit(struct drm_atomic_state * state)306*4882a593Smuzhiyun vc4_atomic_complete_commit(struct drm_atomic_state *state)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	struct drm_device *dev = state->dev;
309*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
310*4882a593Smuzhiyun 	struct vc4_hvs *hvs = vc4->hvs;
311*4882a593Smuzhiyun 	struct drm_crtc_state *new_crtc_state;
312*4882a593Smuzhiyun 	struct drm_crtc *crtc;
313*4882a593Smuzhiyun 	int i;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
316*4882a593Smuzhiyun 		struct vc4_crtc_state *vc4_crtc_state;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 		if (!new_crtc_state->commit)
319*4882a593Smuzhiyun 			continue;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 		vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
322*4882a593Smuzhiyun 		vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
323*4882a593Smuzhiyun 	}
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	if (vc4->hvs->hvs5)
326*4882a593Smuzhiyun 		clk_set_min_rate(hvs->core_clk, 500000000);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	drm_atomic_helper_wait_for_fences(dev, state, false);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	drm_atomic_helper_wait_for_dependencies(state);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	drm_atomic_helper_commit_modeset_disables(dev, state);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	vc4_ctm_commit(vc4, state);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	if (vc4->hvs->hvs5)
337*4882a593Smuzhiyun 		vc5_hvs_pv_muxing_commit(vc4, state);
338*4882a593Smuzhiyun 	else
339*4882a593Smuzhiyun 		vc4_hvs_pv_muxing_commit(vc4, state);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	drm_atomic_helper_commit_planes(dev, state, 0);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	drm_atomic_helper_commit_modeset_enables(dev, state);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	drm_atomic_helper_fake_vblank(state);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	drm_atomic_helper_commit_hw_done(state);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	drm_atomic_helper_wait_for_flip_done(dev, state);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	drm_atomic_helper_cleanup_planes(dev, state);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	drm_atomic_helper_commit_cleanup_done(state);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	if (vc4->hvs->hvs5)
356*4882a593Smuzhiyun 		clk_set_min_rate(hvs->core_clk, 0);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	drm_atomic_state_put(state);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	up(&vc4->async_modeset);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun 
commit_work(struct work_struct * work)363*4882a593Smuzhiyun static void commit_work(struct work_struct *work)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	struct drm_atomic_state *state = container_of(work,
366*4882a593Smuzhiyun 						      struct drm_atomic_state,
367*4882a593Smuzhiyun 						      commit_work);
368*4882a593Smuzhiyun 	vc4_atomic_complete_commit(state);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun /**
372*4882a593Smuzhiyun  * vc4_atomic_commit - commit validated state object
373*4882a593Smuzhiyun  * @dev: DRM device
374*4882a593Smuzhiyun  * @state: the driver state object
375*4882a593Smuzhiyun  * @nonblock: nonblocking commit
376*4882a593Smuzhiyun  *
377*4882a593Smuzhiyun  * This function commits a with drm_atomic_helper_check() pre-validated state
378*4882a593Smuzhiyun  * object. This can still fail when e.g. the framebuffer reservation fails. For
379*4882a593Smuzhiyun  * now this doesn't implement asynchronous commits.
380*4882a593Smuzhiyun  *
381*4882a593Smuzhiyun  * RETURNS
382*4882a593Smuzhiyun  * Zero for success or -errno.
383*4882a593Smuzhiyun  */
vc4_atomic_commit(struct drm_device * dev,struct drm_atomic_state * state,bool nonblock)384*4882a593Smuzhiyun static int vc4_atomic_commit(struct drm_device *dev,
385*4882a593Smuzhiyun 			     struct drm_atomic_state *state,
386*4882a593Smuzhiyun 			     bool nonblock)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
389*4882a593Smuzhiyun 	int ret;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	if (state->async_update) {
392*4882a593Smuzhiyun 		ret = down_interruptible(&vc4->async_modeset);
393*4882a593Smuzhiyun 		if (ret)
394*4882a593Smuzhiyun 			return ret;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 		ret = drm_atomic_helper_prepare_planes(dev, state);
397*4882a593Smuzhiyun 		if (ret) {
398*4882a593Smuzhiyun 			up(&vc4->async_modeset);
399*4882a593Smuzhiyun 			return ret;
400*4882a593Smuzhiyun 		}
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 		drm_atomic_helper_async_commit(dev, state);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 		drm_atomic_helper_cleanup_planes(dev, state);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 		up(&vc4->async_modeset);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 		return 0;
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	/* We know for sure we don't want an async update here. Set
412*4882a593Smuzhiyun 	 * state->legacy_cursor_update to false to prevent
413*4882a593Smuzhiyun 	 * drm_atomic_helper_setup_commit() from auto-completing
414*4882a593Smuzhiyun 	 * commit->flip_done.
415*4882a593Smuzhiyun 	 */
416*4882a593Smuzhiyun 	state->legacy_cursor_update = false;
417*4882a593Smuzhiyun 	ret = drm_atomic_helper_setup_commit(state, nonblock);
418*4882a593Smuzhiyun 	if (ret)
419*4882a593Smuzhiyun 		return ret;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	INIT_WORK(&state->commit_work, commit_work);
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	ret = down_interruptible(&vc4->async_modeset);
424*4882a593Smuzhiyun 	if (ret)
425*4882a593Smuzhiyun 		return ret;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	ret = drm_atomic_helper_prepare_planes(dev, state);
428*4882a593Smuzhiyun 	if (ret) {
429*4882a593Smuzhiyun 		up(&vc4->async_modeset);
430*4882a593Smuzhiyun 		return ret;
431*4882a593Smuzhiyun 	}
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	if (!nonblock) {
434*4882a593Smuzhiyun 		ret = drm_atomic_helper_wait_for_fences(dev, state, true);
435*4882a593Smuzhiyun 		if (ret) {
436*4882a593Smuzhiyun 			drm_atomic_helper_cleanup_planes(dev, state);
437*4882a593Smuzhiyun 			up(&vc4->async_modeset);
438*4882a593Smuzhiyun 			return ret;
439*4882a593Smuzhiyun 		}
440*4882a593Smuzhiyun 	}
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	/*
443*4882a593Smuzhiyun 	 * This is the point of no return - everything below never fails except
444*4882a593Smuzhiyun 	 * when the hw goes bonghits. Which means we can commit the new state on
445*4882a593Smuzhiyun 	 * the software side now.
446*4882a593Smuzhiyun 	 */
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	/*
451*4882a593Smuzhiyun 	 * Everything below can be run asynchronously without the need to grab
452*4882a593Smuzhiyun 	 * any modeset locks at all under one condition: It must be guaranteed
453*4882a593Smuzhiyun 	 * that the asynchronous work has either been cancelled (if the driver
454*4882a593Smuzhiyun 	 * supports it, which at least requires that the framebuffers get
455*4882a593Smuzhiyun 	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
456*4882a593Smuzhiyun 	 * before the new state gets committed on the software side with
457*4882a593Smuzhiyun 	 * drm_atomic_helper_swap_state().
458*4882a593Smuzhiyun 	 *
459*4882a593Smuzhiyun 	 * This scheme allows new atomic state updates to be prepared and
460*4882a593Smuzhiyun 	 * checked in parallel to the asynchronous completion of the previous
461*4882a593Smuzhiyun 	 * update. Which is important since compositors need to figure out the
462*4882a593Smuzhiyun 	 * composition of the next frame right after having submitted the
463*4882a593Smuzhiyun 	 * current layout.
464*4882a593Smuzhiyun 	 */
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	drm_atomic_state_get(state);
467*4882a593Smuzhiyun 	if (nonblock)
468*4882a593Smuzhiyun 		queue_work(system_unbound_wq, &state->commit_work);
469*4882a593Smuzhiyun 	else
470*4882a593Smuzhiyun 		vc4_atomic_complete_commit(state);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	return 0;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun 
vc4_fb_create(struct drm_device * dev,struct drm_file * file_priv,const struct drm_mode_fb_cmd2 * mode_cmd)475*4882a593Smuzhiyun static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
476*4882a593Smuzhiyun 					     struct drm_file *file_priv,
477*4882a593Smuzhiyun 					     const struct drm_mode_fb_cmd2 *mode_cmd)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun 	struct drm_mode_fb_cmd2 mode_cmd_local;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	/* If the user didn't specify a modifier, use the
482*4882a593Smuzhiyun 	 * vc4_set_tiling_ioctl() state for the BO.
483*4882a593Smuzhiyun 	 */
484*4882a593Smuzhiyun 	if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
485*4882a593Smuzhiyun 		struct drm_gem_object *gem_obj;
486*4882a593Smuzhiyun 		struct vc4_bo *bo;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 		gem_obj = drm_gem_object_lookup(file_priv,
489*4882a593Smuzhiyun 						mode_cmd->handles[0]);
490*4882a593Smuzhiyun 		if (!gem_obj) {
491*4882a593Smuzhiyun 			DRM_DEBUG("Failed to look up GEM BO %d\n",
492*4882a593Smuzhiyun 				  mode_cmd->handles[0]);
493*4882a593Smuzhiyun 			return ERR_PTR(-ENOENT);
494*4882a593Smuzhiyun 		}
495*4882a593Smuzhiyun 		bo = to_vc4_bo(gem_obj);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 		mode_cmd_local = *mode_cmd;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 		if (bo->t_format) {
500*4882a593Smuzhiyun 			mode_cmd_local.modifier[0] =
501*4882a593Smuzhiyun 				DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
502*4882a593Smuzhiyun 		} else {
503*4882a593Smuzhiyun 			mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
504*4882a593Smuzhiyun 		}
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 		drm_gem_object_put(gem_obj);
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 		mode_cmd = &mode_cmd_local;
509*4882a593Smuzhiyun 	}
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	return drm_gem_fb_create(dev, file_priv, mode_cmd);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun /* Our CTM has some peculiar limitations: we can only enable it for one CRTC
515*4882a593Smuzhiyun  * at a time and the HW only supports S0.9 scalars. To account for the latter,
516*4882a593Smuzhiyun  * we don't allow userland to set a CTM that we have no hope of approximating.
517*4882a593Smuzhiyun  */
518*4882a593Smuzhiyun static int
vc4_ctm_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)519*4882a593Smuzhiyun vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
522*4882a593Smuzhiyun 	struct vc4_ctm_state *ctm_state = NULL;
523*4882a593Smuzhiyun 	struct drm_crtc *crtc;
524*4882a593Smuzhiyun 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
525*4882a593Smuzhiyun 	struct drm_color_ctm *ctm;
526*4882a593Smuzhiyun 	int i;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
529*4882a593Smuzhiyun 		/* CTM is being disabled. */
530*4882a593Smuzhiyun 		if (!new_crtc_state->ctm && old_crtc_state->ctm) {
531*4882a593Smuzhiyun 			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
532*4882a593Smuzhiyun 			if (IS_ERR(ctm_state))
533*4882a593Smuzhiyun 				return PTR_ERR(ctm_state);
534*4882a593Smuzhiyun 			ctm_state->fifo = 0;
535*4882a593Smuzhiyun 		}
536*4882a593Smuzhiyun 	}
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
539*4882a593Smuzhiyun 		if (new_crtc_state->ctm == old_crtc_state->ctm)
540*4882a593Smuzhiyun 			continue;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 		if (!ctm_state) {
543*4882a593Smuzhiyun 			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
544*4882a593Smuzhiyun 			if (IS_ERR(ctm_state))
545*4882a593Smuzhiyun 				return PTR_ERR(ctm_state);
546*4882a593Smuzhiyun 		}
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 		/* CTM is being enabled or the matrix changed. */
549*4882a593Smuzhiyun 		if (new_crtc_state->ctm) {
550*4882a593Smuzhiyun 			struct vc4_crtc_state *vc4_crtc_state =
551*4882a593Smuzhiyun 				to_vc4_crtc_state(new_crtc_state);
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 			/* fifo is 1-based since 0 disables CTM. */
554*4882a593Smuzhiyun 			int fifo = vc4_crtc_state->assigned_channel + 1;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 			/* Check userland isn't trying to turn on CTM for more
557*4882a593Smuzhiyun 			 * than one CRTC at a time.
558*4882a593Smuzhiyun 			 */
559*4882a593Smuzhiyun 			if (ctm_state->fifo && ctm_state->fifo != fifo) {
560*4882a593Smuzhiyun 				DRM_DEBUG_DRIVER("Too many CTM configured\n");
561*4882a593Smuzhiyun 				return -EINVAL;
562*4882a593Smuzhiyun 			}
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 			/* Check we can approximate the specified CTM.
565*4882a593Smuzhiyun 			 * We disallow scalars |c| > 1.0 since the HW has
566*4882a593Smuzhiyun 			 * no integer bits.
567*4882a593Smuzhiyun 			 */
568*4882a593Smuzhiyun 			ctm = new_crtc_state->ctm->data;
569*4882a593Smuzhiyun 			for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
570*4882a593Smuzhiyun 				u64 val = ctm->matrix[i];
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 				val &= ~BIT_ULL(63);
573*4882a593Smuzhiyun 				if (val > BIT_ULL(32))
574*4882a593Smuzhiyun 					return -EINVAL;
575*4882a593Smuzhiyun 			}
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 			ctm_state->fifo = fifo;
578*4882a593Smuzhiyun 			ctm_state->ctm = ctm;
579*4882a593Smuzhiyun 		}
580*4882a593Smuzhiyun 	}
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	return 0;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
vc4_load_tracker_atomic_check(struct drm_atomic_state * state)585*4882a593Smuzhiyun static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	struct drm_plane_state *old_plane_state, *new_plane_state;
588*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
589*4882a593Smuzhiyun 	struct vc4_load_tracker_state *load_state;
590*4882a593Smuzhiyun 	struct drm_private_state *priv_state;
591*4882a593Smuzhiyun 	struct drm_plane *plane;
592*4882a593Smuzhiyun 	int i;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	if (!vc4->load_tracker_available)
595*4882a593Smuzhiyun 		return 0;
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	priv_state = drm_atomic_get_private_obj_state(state,
598*4882a593Smuzhiyun 						      &vc4->load_tracker);
599*4882a593Smuzhiyun 	if (IS_ERR(priv_state))
600*4882a593Smuzhiyun 		return PTR_ERR(priv_state);
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	load_state = to_vc4_load_tracker_state(priv_state);
603*4882a593Smuzhiyun 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
604*4882a593Smuzhiyun 				       new_plane_state, i) {
605*4882a593Smuzhiyun 		struct vc4_plane_state *vc4_plane_state;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 		if (old_plane_state->fb && old_plane_state->crtc) {
608*4882a593Smuzhiyun 			vc4_plane_state = to_vc4_plane_state(old_plane_state);
609*4882a593Smuzhiyun 			load_state->membus_load -= vc4_plane_state->membus_load;
610*4882a593Smuzhiyun 			load_state->hvs_load -= vc4_plane_state->hvs_load;
611*4882a593Smuzhiyun 		}
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 		if (new_plane_state->fb && new_plane_state->crtc) {
614*4882a593Smuzhiyun 			vc4_plane_state = to_vc4_plane_state(new_plane_state);
615*4882a593Smuzhiyun 			load_state->membus_load += vc4_plane_state->membus_load;
616*4882a593Smuzhiyun 			load_state->hvs_load += vc4_plane_state->hvs_load;
617*4882a593Smuzhiyun 		}
618*4882a593Smuzhiyun 	}
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	/* Don't check the load when the tracker is disabled. */
621*4882a593Smuzhiyun 	if (!vc4->load_tracker_enabled)
622*4882a593Smuzhiyun 		return 0;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	/* The absolute limit is 2Gbyte/sec, but let's take a margin to let
625*4882a593Smuzhiyun 	 * the system work when other blocks are accessing the memory.
626*4882a593Smuzhiyun 	 */
627*4882a593Smuzhiyun 	if (load_state->membus_load > SZ_1G + SZ_512M)
628*4882a593Smuzhiyun 		return -ENOSPC;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	/* HVS clock is supposed to run @ 250Mhz, let's take a margin and
631*4882a593Smuzhiyun 	 * consider the maximum number of cycles is 240M.
632*4882a593Smuzhiyun 	 */
633*4882a593Smuzhiyun 	if (load_state->hvs_load > 240000000ULL)
634*4882a593Smuzhiyun 		return -ENOSPC;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	return 0;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun static struct drm_private_state *
vc4_load_tracker_duplicate_state(struct drm_private_obj * obj)640*4882a593Smuzhiyun vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun 	struct vc4_load_tracker_state *state;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
645*4882a593Smuzhiyun 	if (!state)
646*4882a593Smuzhiyun 		return NULL;
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	return &state->base;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun 
vc4_load_tracker_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)653*4882a593Smuzhiyun static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
654*4882a593Smuzhiyun 					   struct drm_private_state *state)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun 	struct vc4_load_tracker_state *load_state;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	load_state = to_vc4_load_tracker_state(state);
659*4882a593Smuzhiyun 	kfree(load_state);
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
663*4882a593Smuzhiyun 	.atomic_duplicate_state = vc4_load_tracker_duplicate_state,
664*4882a593Smuzhiyun 	.atomic_destroy_state = vc4_load_tracker_destroy_state,
665*4882a593Smuzhiyun };
666*4882a593Smuzhiyun 
vc4_load_tracker_obj_fini(struct drm_device * dev,void * unused)667*4882a593Smuzhiyun static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	if (!vc4->load_tracker_available)
672*4882a593Smuzhiyun 		return;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	drm_atomic_private_obj_fini(&vc4->load_tracker);
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun 
vc4_load_tracker_obj_init(struct vc4_dev * vc4)677*4882a593Smuzhiyun static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun 	struct vc4_load_tracker_state *load_state;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	if (!vc4->load_tracker_available)
682*4882a593Smuzhiyun 		return 0;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
685*4882a593Smuzhiyun 	if (!load_state)
686*4882a593Smuzhiyun 		return -ENOMEM;
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
689*4882a593Smuzhiyun 				    &load_state->base,
690*4882a593Smuzhiyun 				    &vc4_load_tracker_state_funcs);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun static struct drm_private_state *
vc4_hvs_channels_duplicate_state(struct drm_private_obj * obj)696*4882a593Smuzhiyun vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun 	struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
699*4882a593Smuzhiyun 	struct vc4_hvs_state *state;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	state = kzalloc(sizeof(*state), GFP_KERNEL);
702*4882a593Smuzhiyun 	if (!state)
703*4882a593Smuzhiyun 		return NULL;
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	state->unassigned_channels = old_state->unassigned_channels;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	return &state->base;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun 
vc4_hvs_channels_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)712*4882a593Smuzhiyun static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
713*4882a593Smuzhiyun 					   struct drm_private_state *state)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun 	struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	kfree(hvs_state);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
721*4882a593Smuzhiyun 	.atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
722*4882a593Smuzhiyun 	.atomic_destroy_state = vc4_hvs_channels_destroy_state,
723*4882a593Smuzhiyun };
724*4882a593Smuzhiyun 
vc4_hvs_channels_obj_fini(struct drm_device * dev,void * unused)725*4882a593Smuzhiyun static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	drm_atomic_private_obj_fini(&vc4->hvs_channels);
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun 
vc4_hvs_channels_obj_init(struct vc4_dev * vc4)732*4882a593Smuzhiyun static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun 	struct vc4_hvs_state *state;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	state = kzalloc(sizeof(*state), GFP_KERNEL);
737*4882a593Smuzhiyun 	if (!state)
738*4882a593Smuzhiyun 		return -ENOMEM;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	state->unassigned_channels = GENMASK(HVS_NUM_CHANNELS - 1, 0);
741*4882a593Smuzhiyun 	drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
742*4882a593Smuzhiyun 				    &state->base,
743*4882a593Smuzhiyun 				    &vc4_hvs_state_funcs);
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun /*
749*4882a593Smuzhiyun  * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
750*4882a593Smuzhiyun  * the TXP (and therefore all the CRTCs found on that platform).
751*4882a593Smuzhiyun  *
752*4882a593Smuzhiyun  * The naive (and our initial) implementation would just iterate over
753*4882a593Smuzhiyun  * all the active CRTCs, try to find a suitable FIFO, and then remove it
754*4882a593Smuzhiyun  * from the pool of available FIFOs. However, there are a few corner
755*4882a593Smuzhiyun  * cases that need to be considered:
756*4882a593Smuzhiyun  *
757*4882a593Smuzhiyun  * - When running in a dual-display setup (so with two CRTCs involved),
758*4882a593Smuzhiyun  *   we can update the state of a single CRTC (for example by changing
759*4882a593Smuzhiyun  *   its mode using xrandr under X11) without affecting the other. In
760*4882a593Smuzhiyun  *   this case, the other CRTC wouldn't be in the state at all, so we
761*4882a593Smuzhiyun  *   need to consider all the running CRTCs in the DRM device to assign
762*4882a593Smuzhiyun  *   a FIFO, not just the one in the state.
763*4882a593Smuzhiyun  *
764*4882a593Smuzhiyun  * - To fix the above, we can't use drm_atomic_get_crtc_state on all
765*4882a593Smuzhiyun  *   enabled CRTCs to pull their CRTC state into the global state, since
766*4882a593Smuzhiyun  *   a page flip would start considering their vblank to complete. Since
767*4882a593Smuzhiyun  *   we don't have a guarantee that they are actually active, that
768*4882a593Smuzhiyun  *   vblank might never happen, and shouldn't even be considered if we
769*4882a593Smuzhiyun  *   want to do a page flip on a single CRTC. That can be tested by
770*4882a593Smuzhiyun  *   doing a modetest -v first on HDMI1 and then on HDMI0.
771*4882a593Smuzhiyun  *
772*4882a593Smuzhiyun  * - Since we need the pixelvalve to be disabled and enabled back when
773*4882a593Smuzhiyun  *   the FIFO is changed, we should keep the FIFO assigned for as long
774*4882a593Smuzhiyun  *   as the CRTC is enabled, only considering it free again once that
775*4882a593Smuzhiyun  *   CRTC has been disabled. This can be tested by booting X11 on a
776*4882a593Smuzhiyun  *   single display, and changing the resolution down and then back up.
777*4882a593Smuzhiyun  */
vc4_pv_muxing_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)778*4882a593Smuzhiyun static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
779*4882a593Smuzhiyun 				      struct drm_atomic_state *state)
780*4882a593Smuzhiyun {
781*4882a593Smuzhiyun 	struct vc4_hvs_state *hvs_new_state;
782*4882a593Smuzhiyun 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
783*4882a593Smuzhiyun 	struct drm_crtc *crtc;
784*4882a593Smuzhiyun 	unsigned int i;
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	hvs_new_state = vc4_hvs_get_global_state(state);
787*4882a593Smuzhiyun 	if (!hvs_new_state)
788*4882a593Smuzhiyun 		return -EINVAL;
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
791*4882a593Smuzhiyun 		struct vc4_crtc_state *old_vc4_crtc_state =
792*4882a593Smuzhiyun 			to_vc4_crtc_state(old_crtc_state);
793*4882a593Smuzhiyun 		struct vc4_crtc_state *new_vc4_crtc_state =
794*4882a593Smuzhiyun 			to_vc4_crtc_state(new_crtc_state);
795*4882a593Smuzhiyun 		struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
796*4882a593Smuzhiyun 		unsigned int matching_channels;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 		/* Nothing to do here, let's skip it */
799*4882a593Smuzhiyun 		if (old_crtc_state->enable == new_crtc_state->enable)
800*4882a593Smuzhiyun 			continue;
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 		/* Muxing will need to be modified, mark it as such */
803*4882a593Smuzhiyun 		new_vc4_crtc_state->update_muxing = true;
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 		/* If we're disabling our CRTC, we put back our channel */
806*4882a593Smuzhiyun 		if (!new_crtc_state->enable) {
807*4882a593Smuzhiyun 			hvs_new_state->unassigned_channels |= BIT(old_vc4_crtc_state->assigned_channel);
808*4882a593Smuzhiyun 			new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
809*4882a593Smuzhiyun 			continue;
810*4882a593Smuzhiyun 		}
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 		/*
813*4882a593Smuzhiyun 		 * The problem we have to solve here is that we have
814*4882a593Smuzhiyun 		 * up to 7 encoders, connected to up to 6 CRTCs.
815*4882a593Smuzhiyun 		 *
816*4882a593Smuzhiyun 		 * Those CRTCs, depending on the instance, can be
817*4882a593Smuzhiyun 		 * routed to 1, 2 or 3 HVS FIFOs, and we need to set
818*4882a593Smuzhiyun 		 * the change the muxing between FIFOs and outputs in
819*4882a593Smuzhiyun 		 * the HVS accordingly.
820*4882a593Smuzhiyun 		 *
821*4882a593Smuzhiyun 		 * It would be pretty hard to come up with an
822*4882a593Smuzhiyun 		 * algorithm that would generically solve
823*4882a593Smuzhiyun 		 * this. However, the current routing trees we support
824*4882a593Smuzhiyun 		 * allow us to simplify a bit the problem.
825*4882a593Smuzhiyun 		 *
826*4882a593Smuzhiyun 		 * Indeed, with the current supported layouts, if we
827*4882a593Smuzhiyun 		 * try to assign in the ascending crtc index order the
828*4882a593Smuzhiyun 		 * FIFOs, we can't fall into the situation where an
829*4882a593Smuzhiyun 		 * earlier CRTC that had multiple routes is assigned
830*4882a593Smuzhiyun 		 * one that was the only option for a later CRTC.
831*4882a593Smuzhiyun 		 *
832*4882a593Smuzhiyun 		 * If the layout changes and doesn't give us that in
833*4882a593Smuzhiyun 		 * the future, we will need to have something smarter,
834*4882a593Smuzhiyun 		 * but it works so far.
835*4882a593Smuzhiyun 		 */
836*4882a593Smuzhiyun 		matching_channels = hvs_new_state->unassigned_channels & vc4_crtc->data->hvs_available_channels;
837*4882a593Smuzhiyun 		if (matching_channels) {
838*4882a593Smuzhiyun 			unsigned int channel = ffs(matching_channels) - 1;
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 			new_vc4_crtc_state->assigned_channel = channel;
841*4882a593Smuzhiyun 			hvs_new_state->unassigned_channels &= ~BIT(channel);
842*4882a593Smuzhiyun 		} else {
843*4882a593Smuzhiyun 			return -EINVAL;
844*4882a593Smuzhiyun 		}
845*4882a593Smuzhiyun 	}
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	return 0;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun static int
vc4_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)851*4882a593Smuzhiyun vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
852*4882a593Smuzhiyun {
853*4882a593Smuzhiyun 	int ret;
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	ret = vc4_pv_muxing_atomic_check(dev, state);
856*4882a593Smuzhiyun 	if (ret)
857*4882a593Smuzhiyun 		return ret;
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	ret = vc4_ctm_atomic_check(dev, state);
860*4882a593Smuzhiyun 	if (ret < 0)
861*4882a593Smuzhiyun 		return ret;
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	ret = drm_atomic_helper_check(dev, state);
864*4882a593Smuzhiyun 	if (ret)
865*4882a593Smuzhiyun 		return ret;
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	return vc4_load_tracker_atomic_check(state);
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun static const struct drm_mode_config_funcs vc4_mode_funcs = {
871*4882a593Smuzhiyun 	.atomic_check = vc4_atomic_check,
872*4882a593Smuzhiyun 	.atomic_commit = vc4_atomic_commit,
873*4882a593Smuzhiyun 	.fb_create = vc4_fb_create,
874*4882a593Smuzhiyun };
875*4882a593Smuzhiyun 
vc4_kms_load(struct drm_device * dev)876*4882a593Smuzhiyun int vc4_kms_load(struct drm_device *dev)
877*4882a593Smuzhiyun {
878*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
879*4882a593Smuzhiyun 	bool is_vc5 = of_device_is_compatible(dev->dev->of_node,
880*4882a593Smuzhiyun 					      "brcm,bcm2711-vc5");
881*4882a593Smuzhiyun 	int ret;
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	if (!is_vc5) {
884*4882a593Smuzhiyun 		vc4->load_tracker_available = true;
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 		/* Start with the load tracker enabled. Can be
887*4882a593Smuzhiyun 		 * disabled through the debugfs load_tracker file.
888*4882a593Smuzhiyun 		 */
889*4882a593Smuzhiyun 		vc4->load_tracker_enabled = true;
890*4882a593Smuzhiyun 	}
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	sema_init(&vc4->async_modeset, 1);
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	/* Set support for vblank irq fast disable, before drm_vblank_init() */
895*4882a593Smuzhiyun 	dev->vblank_disable_immediate = true;
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	dev->irq_enabled = true;
898*4882a593Smuzhiyun 	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
899*4882a593Smuzhiyun 	if (ret < 0) {
900*4882a593Smuzhiyun 		dev_err(dev->dev, "failed to initialize vblank\n");
901*4882a593Smuzhiyun 		return ret;
902*4882a593Smuzhiyun 	}
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	if (is_vc5) {
905*4882a593Smuzhiyun 		dev->mode_config.max_width = 7680;
906*4882a593Smuzhiyun 		dev->mode_config.max_height = 7680;
907*4882a593Smuzhiyun 	} else {
908*4882a593Smuzhiyun 		dev->mode_config.max_width = 2048;
909*4882a593Smuzhiyun 		dev->mode_config.max_height = 2048;
910*4882a593Smuzhiyun 	}
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	dev->mode_config.funcs = &vc4_mode_funcs;
913*4882a593Smuzhiyun 	dev->mode_config.preferred_depth = 24;
914*4882a593Smuzhiyun 	dev->mode_config.async_page_flip = true;
915*4882a593Smuzhiyun 	dev->mode_config.allow_fb_modifiers = true;
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	ret = vc4_ctm_obj_init(vc4);
918*4882a593Smuzhiyun 	if (ret)
919*4882a593Smuzhiyun 		return ret;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	ret = vc4_load_tracker_obj_init(vc4);
922*4882a593Smuzhiyun 	if (ret)
923*4882a593Smuzhiyun 		return ret;
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	ret = vc4_hvs_channels_obj_init(vc4);
926*4882a593Smuzhiyun 	if (ret)
927*4882a593Smuzhiyun 		return ret;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	drm_mode_config_reset(dev);
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	drm_kms_helper_poll_init(dev);
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 	return 0;
934*4882a593Smuzhiyun }
935