xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/display/intel_bw.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: MIT
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright © 2019 Intel Corporation
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <drm/drm_atomic_state_helper.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include "intel_atomic.h"
9*4882a593Smuzhiyun #include "intel_bw.h"
10*4882a593Smuzhiyun #include "intel_cdclk.h"
11*4882a593Smuzhiyun #include "intel_display_types.h"
12*4882a593Smuzhiyun #include "intel_pm.h"
13*4882a593Smuzhiyun #include "intel_sideband.h"
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /* Parameters for Qclk Geyserville (QGV) */
16*4882a593Smuzhiyun struct intel_qgv_point {
17*4882a593Smuzhiyun 	u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
18*4882a593Smuzhiyun };
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun struct intel_qgv_info {
21*4882a593Smuzhiyun 	struct intel_qgv_point points[I915_NUM_QGV_POINTS];
22*4882a593Smuzhiyun 	u8 num_points;
23*4882a593Smuzhiyun 	u8 num_channels;
24*4882a593Smuzhiyun 	u8 t_bl;
25*4882a593Smuzhiyun 	enum intel_dram_type dram_type;
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun 
icl_pcode_read_mem_global_info(struct drm_i915_private * dev_priv,struct intel_qgv_info * qi)28*4882a593Smuzhiyun static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
29*4882a593Smuzhiyun 					  struct intel_qgv_info *qi)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	u32 val = 0;
32*4882a593Smuzhiyun 	int ret;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	ret = sandybridge_pcode_read(dev_priv,
35*4882a593Smuzhiyun 				     ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
36*4882a593Smuzhiyun 				     ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
37*4882a593Smuzhiyun 				     &val, NULL);
38*4882a593Smuzhiyun 	if (ret)
39*4882a593Smuzhiyun 		return ret;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	if (IS_GEN(dev_priv, 12)) {
42*4882a593Smuzhiyun 		switch (val & 0xf) {
43*4882a593Smuzhiyun 		case 0:
44*4882a593Smuzhiyun 			qi->dram_type = INTEL_DRAM_DDR4;
45*4882a593Smuzhiyun 			break;
46*4882a593Smuzhiyun 		case 3:
47*4882a593Smuzhiyun 			qi->dram_type = INTEL_DRAM_LPDDR4;
48*4882a593Smuzhiyun 			break;
49*4882a593Smuzhiyun 		case 4:
50*4882a593Smuzhiyun 			qi->dram_type = INTEL_DRAM_DDR3;
51*4882a593Smuzhiyun 			break;
52*4882a593Smuzhiyun 		case 5:
53*4882a593Smuzhiyun 			qi->dram_type = INTEL_DRAM_LPDDR3;
54*4882a593Smuzhiyun 			break;
55*4882a593Smuzhiyun 		default:
56*4882a593Smuzhiyun 			MISSING_CASE(val & 0xf);
57*4882a593Smuzhiyun 			break;
58*4882a593Smuzhiyun 		}
59*4882a593Smuzhiyun 	} else if (IS_GEN(dev_priv, 11)) {
60*4882a593Smuzhiyun 		switch (val & 0xf) {
61*4882a593Smuzhiyun 		case 0:
62*4882a593Smuzhiyun 			qi->dram_type = INTEL_DRAM_DDR4;
63*4882a593Smuzhiyun 			break;
64*4882a593Smuzhiyun 		case 1:
65*4882a593Smuzhiyun 			qi->dram_type = INTEL_DRAM_DDR3;
66*4882a593Smuzhiyun 			break;
67*4882a593Smuzhiyun 		case 2:
68*4882a593Smuzhiyun 			qi->dram_type = INTEL_DRAM_LPDDR3;
69*4882a593Smuzhiyun 			break;
70*4882a593Smuzhiyun 		case 3:
71*4882a593Smuzhiyun 			qi->dram_type = INTEL_DRAM_LPDDR4;
72*4882a593Smuzhiyun 			break;
73*4882a593Smuzhiyun 		default:
74*4882a593Smuzhiyun 			MISSING_CASE(val & 0xf);
75*4882a593Smuzhiyun 			break;
76*4882a593Smuzhiyun 		}
77*4882a593Smuzhiyun 	} else {
78*4882a593Smuzhiyun 		MISSING_CASE(INTEL_GEN(dev_priv));
79*4882a593Smuzhiyun 		qi->dram_type = INTEL_DRAM_LPDDR3; /* Conservative default */
80*4882a593Smuzhiyun 	}
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	qi->num_channels = (val & 0xf0) >> 4;
83*4882a593Smuzhiyun 	qi->num_points = (val & 0xf00) >> 8;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if (IS_GEN(dev_priv, 12))
86*4882a593Smuzhiyun 		qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 16;
87*4882a593Smuzhiyun 	else if (IS_GEN(dev_priv, 11))
88*4882a593Smuzhiyun 		qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	return 0;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
icl_pcode_read_qgv_point_info(struct drm_i915_private * dev_priv,struct intel_qgv_point * sp,int point)93*4882a593Smuzhiyun static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
94*4882a593Smuzhiyun 					 struct intel_qgv_point *sp,
95*4882a593Smuzhiyun 					 int point)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	u32 val = 0, val2 = 0;
98*4882a593Smuzhiyun 	int ret;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	ret = sandybridge_pcode_read(dev_priv,
101*4882a593Smuzhiyun 				     ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
102*4882a593Smuzhiyun 				     ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
103*4882a593Smuzhiyun 				     &val, &val2);
104*4882a593Smuzhiyun 	if (ret)
105*4882a593Smuzhiyun 		return ret;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	sp->dclk = val & 0xffff;
108*4882a593Smuzhiyun 	sp->t_rp = (val & 0xff0000) >> 16;
109*4882a593Smuzhiyun 	sp->t_rcd = (val & 0xff000000) >> 24;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	sp->t_rdpre = val2 & 0xff;
112*4882a593Smuzhiyun 	sp->t_ras = (val2 & 0xff00) >> 8;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	sp->t_rc = sp->t_rp + sp->t_ras;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	return 0;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
icl_pcode_restrict_qgv_points(struct drm_i915_private * dev_priv,u32 points_mask)119*4882a593Smuzhiyun int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
120*4882a593Smuzhiyun 				  u32 points_mask)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	int ret;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/* bspec says to keep retrying for at least 1 ms */
125*4882a593Smuzhiyun 	ret = skl_pcode_request(dev_priv, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
126*4882a593Smuzhiyun 				points_mask,
127*4882a593Smuzhiyun 				ICL_PCODE_POINTS_RESTRICTED_MASK,
128*4882a593Smuzhiyun 				ICL_PCODE_POINTS_RESTRICTED,
129*4882a593Smuzhiyun 				1);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	if (ret < 0) {
132*4882a593Smuzhiyun 		drm_err(&dev_priv->drm, "Failed to disable qgv points (%d)\n", ret);
133*4882a593Smuzhiyun 		return ret;
134*4882a593Smuzhiyun 	}
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	return 0;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
icl_get_qgv_points(struct drm_i915_private * dev_priv,struct intel_qgv_info * qi)139*4882a593Smuzhiyun static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
140*4882a593Smuzhiyun 			      struct intel_qgv_info *qi)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	int i, ret;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	ret = icl_pcode_read_mem_global_info(dev_priv, qi);
145*4882a593Smuzhiyun 	if (ret)
146*4882a593Smuzhiyun 		return ret;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (drm_WARN_ON(&dev_priv->drm,
149*4882a593Smuzhiyun 			qi->num_points > ARRAY_SIZE(qi->points)))
150*4882a593Smuzhiyun 		qi->num_points = ARRAY_SIZE(qi->points);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	for (i = 0; i < qi->num_points; i++) {
153*4882a593Smuzhiyun 		struct intel_qgv_point *sp = &qi->points[i];
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 		ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i);
156*4882a593Smuzhiyun 		if (ret)
157*4882a593Smuzhiyun 			return ret;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 		drm_dbg_kms(&dev_priv->drm,
160*4882a593Smuzhiyun 			    "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
161*4882a593Smuzhiyun 			    i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
162*4882a593Smuzhiyun 			    sp->t_rcd, sp->t_rc);
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	return 0;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
icl_calc_bw(int dclk,int num,int den)168*4882a593Smuzhiyun static int icl_calc_bw(int dclk, int num, int den)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	/* multiples of 16.666MHz (100/6) */
171*4882a593Smuzhiyun 	return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
icl_sagv_max_dclk(const struct intel_qgv_info * qi)174*4882a593Smuzhiyun static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	u16 dclk = 0;
177*4882a593Smuzhiyun 	int i;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	for (i = 0; i < qi->num_points; i++)
180*4882a593Smuzhiyun 		dclk = max(dclk, qi->points[i].dclk);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	return dclk;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun struct intel_sa_info {
186*4882a593Smuzhiyun 	u16 displayrtids;
187*4882a593Smuzhiyun 	u8 deburst, deprogbwlimit;
188*4882a593Smuzhiyun };
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun static const struct intel_sa_info icl_sa_info = {
191*4882a593Smuzhiyun 	.deburst = 8,
192*4882a593Smuzhiyun 	.deprogbwlimit = 25, /* GB/s */
193*4882a593Smuzhiyun 	.displayrtids = 128,
194*4882a593Smuzhiyun };
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun static const struct intel_sa_info tgl_sa_info = {
197*4882a593Smuzhiyun 	.deburst = 16,
198*4882a593Smuzhiyun 	.deprogbwlimit = 34, /* GB/s */
199*4882a593Smuzhiyun 	.displayrtids = 256,
200*4882a593Smuzhiyun };
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun static const struct intel_sa_info rkl_sa_info = {
203*4882a593Smuzhiyun 	.deburst = 16,
204*4882a593Smuzhiyun 	.deprogbwlimit = 20, /* GB/s */
205*4882a593Smuzhiyun 	.displayrtids = 128,
206*4882a593Smuzhiyun };
207*4882a593Smuzhiyun 
icl_get_bw_info(struct drm_i915_private * dev_priv,const struct intel_sa_info * sa)208*4882a593Smuzhiyun static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	struct intel_qgv_info qi = {};
211*4882a593Smuzhiyun 	bool is_y_tile = true; /* assume y tile may be used */
212*4882a593Smuzhiyun 	int num_channels;
213*4882a593Smuzhiyun 	int deinterleave;
214*4882a593Smuzhiyun 	int ipqdepth, ipqdepthpch;
215*4882a593Smuzhiyun 	int dclk_max;
216*4882a593Smuzhiyun 	int maxdebw;
217*4882a593Smuzhiyun 	int i, ret;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	ret = icl_get_qgv_points(dev_priv, &qi);
220*4882a593Smuzhiyun 	if (ret) {
221*4882a593Smuzhiyun 		drm_dbg_kms(&dev_priv->drm,
222*4882a593Smuzhiyun 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
223*4882a593Smuzhiyun 		return ret;
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 	num_channels = qi.num_channels;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
228*4882a593Smuzhiyun 	dclk_max = icl_sagv_max_dclk(&qi);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	ipqdepthpch = 16;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	maxdebw = min(sa->deprogbwlimit * 1000,
233*4882a593Smuzhiyun 		      icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */
234*4882a593Smuzhiyun 	ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
237*4882a593Smuzhiyun 		struct intel_bw_info *bi = &dev_priv->max_bw[i];
238*4882a593Smuzhiyun 		int clpchgroup;
239*4882a593Smuzhiyun 		int j;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 		clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
242*4882a593Smuzhiyun 		bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 		bi->num_qgv_points = qi.num_points;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 		for (j = 0; j < qi.num_points; j++) {
247*4882a593Smuzhiyun 			const struct intel_qgv_point *sp = &qi.points[j];
248*4882a593Smuzhiyun 			int ct, bw;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 			/*
251*4882a593Smuzhiyun 			 * Max row cycle time
252*4882a593Smuzhiyun 			 *
253*4882a593Smuzhiyun 			 * FIXME what is the logic behind the
254*4882a593Smuzhiyun 			 * assumed burst length?
255*4882a593Smuzhiyun 			 */
256*4882a593Smuzhiyun 			ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
257*4882a593Smuzhiyun 				   (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
258*4882a593Smuzhiyun 			bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 			bi->deratedbw[j] = min(maxdebw,
261*4882a593Smuzhiyun 					       bw * 9 / 10); /* 90% */
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 			drm_dbg_kms(&dev_priv->drm,
264*4882a593Smuzhiyun 				    "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
265*4882a593Smuzhiyun 				    i, j, bi->num_planes, bi->deratedbw[j]);
266*4882a593Smuzhiyun 		}
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 		if (bi->num_planes == 1)
269*4882a593Smuzhiyun 			break;
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	/*
273*4882a593Smuzhiyun 	 * In case if SAGV is disabled in BIOS, we always get 1
274*4882a593Smuzhiyun 	 * SAGV point, but we can't send PCode commands to restrict it
275*4882a593Smuzhiyun 	 * as it will fail and pointless anyway.
276*4882a593Smuzhiyun 	 */
277*4882a593Smuzhiyun 	if (qi.num_points == 1)
278*4882a593Smuzhiyun 		dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
279*4882a593Smuzhiyun 	else
280*4882a593Smuzhiyun 		dev_priv->sagv_status = I915_SAGV_ENABLED;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	return 0;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
icl_max_bw(struct drm_i915_private * dev_priv,int num_planes,int qgv_point)285*4882a593Smuzhiyun static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
286*4882a593Smuzhiyun 			       int num_planes, int qgv_point)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	int i;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	/*
291*4882a593Smuzhiyun 	 * Let's return max bw for 0 planes
292*4882a593Smuzhiyun 	 */
293*4882a593Smuzhiyun 	num_planes = max(1, num_planes);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
296*4882a593Smuzhiyun 		const struct intel_bw_info *bi =
297*4882a593Smuzhiyun 			&dev_priv->max_bw[i];
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 		/*
300*4882a593Smuzhiyun 		 * Pcode will not expose all QGV points when
301*4882a593Smuzhiyun 		 * SAGV is forced to off/min/med/max.
302*4882a593Smuzhiyun 		 */
303*4882a593Smuzhiyun 		if (qgv_point >= bi->num_qgv_points)
304*4882a593Smuzhiyun 			return UINT_MAX;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 		if (num_planes >= bi->num_planes)
307*4882a593Smuzhiyun 			return bi->deratedbw[qgv_point];
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	return 0;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
intel_bw_init_hw(struct drm_i915_private * dev_priv)313*4882a593Smuzhiyun void intel_bw_init_hw(struct drm_i915_private *dev_priv)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	if (!HAS_DISPLAY(dev_priv))
316*4882a593Smuzhiyun 		return;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	if (IS_ROCKETLAKE(dev_priv))
319*4882a593Smuzhiyun 		icl_get_bw_info(dev_priv, &rkl_sa_info);
320*4882a593Smuzhiyun 	else if (IS_GEN(dev_priv, 12))
321*4882a593Smuzhiyun 		icl_get_bw_info(dev_priv, &tgl_sa_info);
322*4882a593Smuzhiyun 	else if (IS_GEN(dev_priv, 11))
323*4882a593Smuzhiyun 		icl_get_bw_info(dev_priv, &icl_sa_info);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
intel_bw_crtc_num_active_planes(const struct intel_crtc_state * crtc_state)326*4882a593Smuzhiyun static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	/*
329*4882a593Smuzhiyun 	 * We assume cursors are small enough
330*4882a593Smuzhiyun 	 * to not not cause bandwidth problems.
331*4882a593Smuzhiyun 	 */
332*4882a593Smuzhiyun 	return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun 
intel_bw_crtc_data_rate(const struct intel_crtc_state * crtc_state)335*4882a593Smuzhiyun static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
338*4882a593Smuzhiyun 	unsigned int data_rate = 0;
339*4882a593Smuzhiyun 	enum plane_id plane_id;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	for_each_plane_id_on_crtc(crtc, plane_id) {
342*4882a593Smuzhiyun 		/*
343*4882a593Smuzhiyun 		 * We assume cursors are small enough
344*4882a593Smuzhiyun 		 * to not not cause bandwidth problems.
345*4882a593Smuzhiyun 		 */
346*4882a593Smuzhiyun 		if (plane_id == PLANE_CURSOR)
347*4882a593Smuzhiyun 			continue;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 		data_rate += crtc_state->data_rate[plane_id];
350*4882a593Smuzhiyun 	}
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	return data_rate;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun 
intel_bw_crtc_update(struct intel_bw_state * bw_state,const struct intel_crtc_state * crtc_state)355*4882a593Smuzhiyun void intel_bw_crtc_update(struct intel_bw_state *bw_state,
356*4882a593Smuzhiyun 			  const struct intel_crtc_state *crtc_state)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
359*4882a593Smuzhiyun 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	bw_state->data_rate[crtc->pipe] =
362*4882a593Smuzhiyun 		intel_bw_crtc_data_rate(crtc_state);
363*4882a593Smuzhiyun 	bw_state->num_active_planes[crtc->pipe] =
364*4882a593Smuzhiyun 		intel_bw_crtc_num_active_planes(crtc_state);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
367*4882a593Smuzhiyun 		    pipe_name(crtc->pipe),
368*4882a593Smuzhiyun 		    bw_state->data_rate[crtc->pipe],
369*4882a593Smuzhiyun 		    bw_state->num_active_planes[crtc->pipe]);
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun 
intel_bw_num_active_planes(struct drm_i915_private * dev_priv,const struct intel_bw_state * bw_state)372*4882a593Smuzhiyun static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
373*4882a593Smuzhiyun 					       const struct intel_bw_state *bw_state)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	unsigned int num_active_planes = 0;
376*4882a593Smuzhiyun 	enum pipe pipe;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	for_each_pipe(dev_priv, pipe)
379*4882a593Smuzhiyun 		num_active_planes += bw_state->num_active_planes[pipe];
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	return num_active_planes;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun 
intel_bw_data_rate(struct drm_i915_private * dev_priv,const struct intel_bw_state * bw_state)384*4882a593Smuzhiyun static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
385*4882a593Smuzhiyun 				       const struct intel_bw_state *bw_state)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	unsigned int data_rate = 0;
388*4882a593Smuzhiyun 	enum pipe pipe;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	for_each_pipe(dev_priv, pipe)
391*4882a593Smuzhiyun 		data_rate += bw_state->data_rate[pipe];
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	return data_rate;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state * state)397*4882a593Smuzhiyun intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
400*4882a593Smuzhiyun 	struct intel_global_state *bw_state;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->bw_obj);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	return to_intel_bw_state(bw_state);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun struct intel_bw_state *
intel_atomic_get_new_bw_state(struct intel_atomic_state * state)408*4882a593Smuzhiyun intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
411*4882a593Smuzhiyun 	struct intel_global_state *bw_state;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->bw_obj);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	return to_intel_bw_state(bw_state);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state * state)419*4882a593Smuzhiyun intel_atomic_get_bw_state(struct intel_atomic_state *state)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
422*4882a593Smuzhiyun 	struct intel_global_state *bw_state;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->bw_obj);
425*4882a593Smuzhiyun 	if (IS_ERR(bw_state))
426*4882a593Smuzhiyun 		return ERR_CAST(bw_state);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	return to_intel_bw_state(bw_state);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun 
skl_bw_calc_min_cdclk(struct intel_atomic_state * state)431*4882a593Smuzhiyun int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
434*4882a593Smuzhiyun 	struct intel_bw_state *new_bw_state = NULL;
435*4882a593Smuzhiyun 	struct intel_bw_state *old_bw_state = NULL;
436*4882a593Smuzhiyun 	const struct intel_crtc_state *crtc_state;
437*4882a593Smuzhiyun 	struct intel_crtc *crtc;
438*4882a593Smuzhiyun 	int max_bw = 0;
439*4882a593Smuzhiyun 	int slice_id;
440*4882a593Smuzhiyun 	enum pipe pipe;
441*4882a593Smuzhiyun 	int i;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
444*4882a593Smuzhiyun 		enum plane_id plane_id;
445*4882a593Smuzhiyun 		struct intel_dbuf_bw *crtc_bw;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 		new_bw_state = intel_atomic_get_bw_state(state);
448*4882a593Smuzhiyun 		if (IS_ERR(new_bw_state))
449*4882a593Smuzhiyun 			return PTR_ERR(new_bw_state);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 		old_bw_state = intel_atomic_get_old_bw_state(state);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 		crtc_bw = &new_bw_state->dbuf_bw[crtc->pipe];
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 		memset(&crtc_bw->used_bw, 0, sizeof(crtc_bw->used_bw));
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 		if (!crtc_state->hw.active)
458*4882a593Smuzhiyun 			continue;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 		for_each_plane_id_on_crtc(crtc, plane_id) {
461*4882a593Smuzhiyun 			const struct skl_ddb_entry *plane_alloc =
462*4882a593Smuzhiyun 				&crtc_state->wm.skl.plane_ddb_y[plane_id];
463*4882a593Smuzhiyun 			const struct skl_ddb_entry *uv_plane_alloc =
464*4882a593Smuzhiyun 				&crtc_state->wm.skl.plane_ddb_uv[plane_id];
465*4882a593Smuzhiyun 			unsigned int data_rate = crtc_state->data_rate[plane_id];
466*4882a593Smuzhiyun 			unsigned int dbuf_mask = 0;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 			dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, plane_alloc);
469*4882a593Smuzhiyun 			dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, uv_plane_alloc);
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 			/*
472*4882a593Smuzhiyun 			 * FIXME: To calculate that more properly we probably
473*4882a593Smuzhiyun 			 * need to to split per plane data_rate into data_rate_y
474*4882a593Smuzhiyun 			 * and data_rate_uv for multiplanar formats in order not
475*4882a593Smuzhiyun 			 * to get accounted those twice if they happen to reside
476*4882a593Smuzhiyun 			 * on different slices.
477*4882a593Smuzhiyun 			 * However for pre-icl this would work anyway because
478*4882a593Smuzhiyun 			 * we have only single slice and for icl+ uv plane has
479*4882a593Smuzhiyun 			 * non-zero data rate.
480*4882a593Smuzhiyun 			 * So in worst case those calculation are a bit
481*4882a593Smuzhiyun 			 * pessimistic, which shouldn't pose any significant
482*4882a593Smuzhiyun 			 * problem anyway.
483*4882a593Smuzhiyun 			 */
484*4882a593Smuzhiyun 			for_each_dbuf_slice_in_mask(slice_id, dbuf_mask)
485*4882a593Smuzhiyun 				crtc_bw->used_bw[slice_id] += data_rate;
486*4882a593Smuzhiyun 		}
487*4882a593Smuzhiyun 	}
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	if (!old_bw_state)
490*4882a593Smuzhiyun 		return 0;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	for_each_pipe(dev_priv, pipe) {
493*4882a593Smuzhiyun 		struct intel_dbuf_bw *crtc_bw;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 		crtc_bw = &new_bw_state->dbuf_bw[pipe];
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 		for_each_dbuf_slice(slice_id) {
498*4882a593Smuzhiyun 			/*
499*4882a593Smuzhiyun 			 * Current experimental observations show that contrary
500*4882a593Smuzhiyun 			 * to BSpec we get underruns once we exceed 64 * CDCLK
501*4882a593Smuzhiyun 			 * for slices in total.
502*4882a593Smuzhiyun 			 * As a temporary measure in order not to keep CDCLK
503*4882a593Smuzhiyun 			 * bumped up all the time we calculate CDCLK according
504*4882a593Smuzhiyun 			 * to this formula for  overall bw consumed by slices.
505*4882a593Smuzhiyun 			 */
506*4882a593Smuzhiyun 			max_bw += crtc_bw->used_bw[slice_id];
507*4882a593Smuzhiyun 		}
508*4882a593Smuzhiyun 	}
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	new_bw_state->min_cdclk = max_bw / 64;
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
513*4882a593Smuzhiyun 		int ret = intel_atomic_lock_global_state(&new_bw_state->base);
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 		if (ret)
516*4882a593Smuzhiyun 			return ret;
517*4882a593Smuzhiyun 	}
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	return 0;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun 
intel_bw_calc_min_cdclk(struct intel_atomic_state * state)522*4882a593Smuzhiyun int intel_bw_calc_min_cdclk(struct intel_atomic_state *state)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
525*4882a593Smuzhiyun 	struct intel_bw_state *new_bw_state = NULL;
526*4882a593Smuzhiyun 	struct intel_bw_state *old_bw_state = NULL;
527*4882a593Smuzhiyun 	const struct intel_crtc_state *crtc_state;
528*4882a593Smuzhiyun 	struct intel_crtc *crtc;
529*4882a593Smuzhiyun 	int min_cdclk = 0;
530*4882a593Smuzhiyun 	enum pipe pipe;
531*4882a593Smuzhiyun 	int i;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
534*4882a593Smuzhiyun 		new_bw_state = intel_atomic_get_bw_state(state);
535*4882a593Smuzhiyun 		if (IS_ERR(new_bw_state))
536*4882a593Smuzhiyun 			return PTR_ERR(new_bw_state);
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 		old_bw_state = intel_atomic_get_old_bw_state(state);
539*4882a593Smuzhiyun 	}
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	if (!old_bw_state)
542*4882a593Smuzhiyun 		return 0;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	for_each_pipe(dev_priv, pipe) {
545*4882a593Smuzhiyun 		struct intel_cdclk_state *cdclk_state;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 		cdclk_state = intel_atomic_get_new_cdclk_state(state);
548*4882a593Smuzhiyun 		if (!cdclk_state)
549*4882a593Smuzhiyun 			return 0;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 		min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
552*4882a593Smuzhiyun 	}
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	new_bw_state->min_cdclk = min_cdclk;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
557*4882a593Smuzhiyun 		int ret = intel_atomic_lock_global_state(&new_bw_state->base);
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 		if (ret)
560*4882a593Smuzhiyun 			return ret;
561*4882a593Smuzhiyun 	}
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	return 0;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun 
intel_bw_atomic_check(struct intel_atomic_state * state)566*4882a593Smuzhiyun int intel_bw_atomic_check(struct intel_atomic_state *state)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
569*4882a593Smuzhiyun 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
570*4882a593Smuzhiyun 	struct intel_bw_state *new_bw_state = NULL;
571*4882a593Smuzhiyun 	const struct intel_bw_state *old_bw_state = NULL;
572*4882a593Smuzhiyun 	unsigned int data_rate;
573*4882a593Smuzhiyun 	unsigned int num_active_planes;
574*4882a593Smuzhiyun 	struct intel_crtc *crtc;
575*4882a593Smuzhiyun 	int i, ret;
576*4882a593Smuzhiyun 	u32 allowed_points = 0;
577*4882a593Smuzhiyun 	unsigned int max_bw_point = 0, max_bw = 0;
578*4882a593Smuzhiyun 	unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
579*4882a593Smuzhiyun 	u32 mask = (1 << num_qgv_points) - 1;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	/* FIXME earlier gens need some checks too */
582*4882a593Smuzhiyun 	if (INTEL_GEN(dev_priv) < 11)
583*4882a593Smuzhiyun 		return 0;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
586*4882a593Smuzhiyun 					    new_crtc_state, i) {
587*4882a593Smuzhiyun 		unsigned int old_data_rate =
588*4882a593Smuzhiyun 			intel_bw_crtc_data_rate(old_crtc_state);
589*4882a593Smuzhiyun 		unsigned int new_data_rate =
590*4882a593Smuzhiyun 			intel_bw_crtc_data_rate(new_crtc_state);
591*4882a593Smuzhiyun 		unsigned int old_active_planes =
592*4882a593Smuzhiyun 			intel_bw_crtc_num_active_planes(old_crtc_state);
593*4882a593Smuzhiyun 		unsigned int new_active_planes =
594*4882a593Smuzhiyun 			intel_bw_crtc_num_active_planes(new_crtc_state);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 		/*
597*4882a593Smuzhiyun 		 * Avoid locking the bw state when
598*4882a593Smuzhiyun 		 * nothing significant has changed.
599*4882a593Smuzhiyun 		 */
600*4882a593Smuzhiyun 		if (old_data_rate == new_data_rate &&
601*4882a593Smuzhiyun 		    old_active_planes == new_active_planes)
602*4882a593Smuzhiyun 			continue;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 		new_bw_state = intel_atomic_get_bw_state(state);
605*4882a593Smuzhiyun 		if (IS_ERR(new_bw_state))
606*4882a593Smuzhiyun 			return PTR_ERR(new_bw_state);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 		new_bw_state->data_rate[crtc->pipe] = new_data_rate;
609*4882a593Smuzhiyun 		new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 		drm_dbg_kms(&dev_priv->drm,
612*4882a593Smuzhiyun 			    "pipe %c data rate %u num active planes %u\n",
613*4882a593Smuzhiyun 			    pipe_name(crtc->pipe),
614*4882a593Smuzhiyun 			    new_bw_state->data_rate[crtc->pipe],
615*4882a593Smuzhiyun 			    new_bw_state->num_active_planes[crtc->pipe]);
616*4882a593Smuzhiyun 	}
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	if (!new_bw_state)
619*4882a593Smuzhiyun 		return 0;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	ret = intel_atomic_lock_global_state(&new_bw_state->base);
622*4882a593Smuzhiyun 	if (ret)
623*4882a593Smuzhiyun 		return ret;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	data_rate = intel_bw_data_rate(dev_priv, new_bw_state);
626*4882a593Smuzhiyun 	data_rate = DIV_ROUND_UP(data_rate, 1000);
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	num_active_planes = intel_bw_num_active_planes(dev_priv, new_bw_state);
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	for (i = 0; i < num_qgv_points; i++) {
631*4882a593Smuzhiyun 		unsigned int max_data_rate;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 		max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
634*4882a593Smuzhiyun 		/*
635*4882a593Smuzhiyun 		 * We need to know which qgv point gives us
636*4882a593Smuzhiyun 		 * maximum bandwidth in order to disable SAGV
637*4882a593Smuzhiyun 		 * if we find that we exceed SAGV block time
638*4882a593Smuzhiyun 		 * with watermarks. By that moment we already
639*4882a593Smuzhiyun 		 * have those, as it is calculated earlier in
640*4882a593Smuzhiyun 		 * intel_atomic_check,
641*4882a593Smuzhiyun 		 */
642*4882a593Smuzhiyun 		if (max_data_rate > max_bw) {
643*4882a593Smuzhiyun 			max_bw_point = i;
644*4882a593Smuzhiyun 			max_bw = max_data_rate;
645*4882a593Smuzhiyun 		}
646*4882a593Smuzhiyun 		if (max_data_rate >= data_rate)
647*4882a593Smuzhiyun 			allowed_points |= BIT(i);
648*4882a593Smuzhiyun 		drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n",
649*4882a593Smuzhiyun 			    i, max_data_rate, data_rate);
650*4882a593Smuzhiyun 	}
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	/*
653*4882a593Smuzhiyun 	 * BSpec states that we always should have at least one allowed point
654*4882a593Smuzhiyun 	 * left, so if we couldn't - simply reject the configuration for obvious
655*4882a593Smuzhiyun 	 * reasons.
656*4882a593Smuzhiyun 	 */
657*4882a593Smuzhiyun 	if (allowed_points == 0) {
658*4882a593Smuzhiyun 		drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory"
659*4882a593Smuzhiyun 			    " bandwidth %d for display configuration(%d active planes).\n",
660*4882a593Smuzhiyun 			    data_rate, num_active_planes);
661*4882a593Smuzhiyun 		return -EINVAL;
662*4882a593Smuzhiyun 	}
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	/*
665*4882a593Smuzhiyun 	 * Leave only single point with highest bandwidth, if
666*4882a593Smuzhiyun 	 * we can't enable SAGV due to the increased memory latency it may
667*4882a593Smuzhiyun 	 * cause.
668*4882a593Smuzhiyun 	 */
669*4882a593Smuzhiyun 	if (!intel_can_enable_sagv(dev_priv, new_bw_state)) {
670*4882a593Smuzhiyun 		allowed_points = BIT(max_bw_point);
671*4882a593Smuzhiyun 		drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n",
672*4882a593Smuzhiyun 			    max_bw_point);
673*4882a593Smuzhiyun 	}
674*4882a593Smuzhiyun 	/*
675*4882a593Smuzhiyun 	 * We store the ones which need to be masked as that is what PCode
676*4882a593Smuzhiyun 	 * actually accepts as a parameter.
677*4882a593Smuzhiyun 	 */
678*4882a593Smuzhiyun 	new_bw_state->qgv_points_mask = ~allowed_points & mask;
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	old_bw_state = intel_atomic_get_old_bw_state(state);
681*4882a593Smuzhiyun 	/*
682*4882a593Smuzhiyun 	 * If the actual mask had changed we need to make sure that
683*4882a593Smuzhiyun 	 * the commits are serialized(in case this is a nomodeset, nonblocking)
684*4882a593Smuzhiyun 	 */
685*4882a593Smuzhiyun 	if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
686*4882a593Smuzhiyun 		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
687*4882a593Smuzhiyun 		if (ret)
688*4882a593Smuzhiyun 			return ret;
689*4882a593Smuzhiyun 	}
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	return 0;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun static struct intel_global_state *
intel_bw_duplicate_state(struct intel_global_obj * obj)695*4882a593Smuzhiyun intel_bw_duplicate_state(struct intel_global_obj *obj)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun 	struct intel_bw_state *state;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
700*4882a593Smuzhiyun 	if (!state)
701*4882a593Smuzhiyun 		return NULL;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	return &state->base;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun 
intel_bw_destroy_state(struct intel_global_obj * obj,struct intel_global_state * state)706*4882a593Smuzhiyun static void intel_bw_destroy_state(struct intel_global_obj *obj,
707*4882a593Smuzhiyun 				   struct intel_global_state *state)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun 	kfree(state);
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun static const struct intel_global_state_funcs intel_bw_funcs = {
713*4882a593Smuzhiyun 	.atomic_duplicate_state = intel_bw_duplicate_state,
714*4882a593Smuzhiyun 	.atomic_destroy_state = intel_bw_destroy_state,
715*4882a593Smuzhiyun };
716*4882a593Smuzhiyun 
intel_bw_init(struct drm_i915_private * dev_priv)717*4882a593Smuzhiyun int intel_bw_init(struct drm_i915_private *dev_priv)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun 	struct intel_bw_state *state;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	state = kzalloc(sizeof(*state), GFP_KERNEL);
722*4882a593Smuzhiyun 	if (!state)
723*4882a593Smuzhiyun 		return -ENOMEM;
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	intel_atomic_global_obj_init(dev_priv, &dev_priv->bw_obj,
726*4882a593Smuzhiyun 				     &state->base, &intel_bw_funcs);
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	return 0;
729*4882a593Smuzhiyun }
730