1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* Copyright (c) 2016 The Linux Foundation. All rights reserved.
3*4882a593Smuzhiyun */
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/pm_opp.h>
6*4882a593Smuzhiyun #include "a5xx_gpu.h"
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun * The GPMU data block is a block of shared registers that can be used to
10*4882a593Smuzhiyun * communicate back and forth. These "registers" are by convention with the GPMU
11*4882a593Smuzhiyun * firwmare and not bound to any specific hardware design
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
15*4882a593Smuzhiyun #define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
16*4882a593Smuzhiyun #define AGC_MSG_BASE (AGC_INIT_BASE + 7)
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define AGC_MSG_STATE (AGC_MSG_BASE + 0)
19*4882a593Smuzhiyun #define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
20*4882a593Smuzhiyun #define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
21*4882a593Smuzhiyun #define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #define AGC_POWER_CONFIG_PRODUCTION_ID 1
24*4882a593Smuzhiyun #define AGC_INIT_MSG_VALUE 0xBABEFACE
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /* AGC_LM_CONFIG (A540+) */
27*4882a593Smuzhiyun #define AGC_LM_CONFIG (136/4)
28*4882a593Smuzhiyun #define AGC_LM_CONFIG_GPU_VERSION_SHIFT 17
29*4882a593Smuzhiyun #define AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE 1
30*4882a593Smuzhiyun #define AGC_LM_CONFIG_THROTTLE_DISABLE (2 << 8)
31*4882a593Smuzhiyun #define AGC_LM_CONFIG_ISENSE_ENABLE (1 << 4)
32*4882a593Smuzhiyun #define AGC_LM_CONFIG_ENABLE_ERROR (3 << 4)
33*4882a593Smuzhiyun #define AGC_LM_CONFIG_LLM_ENABLED (1 << 16)
34*4882a593Smuzhiyun #define AGC_LM_CONFIG_BCL_DISABLED (1 << 24)
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #define AGC_LEVEL_CONFIG (140/4)
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static struct {
39*4882a593Smuzhiyun uint32_t reg;
40*4882a593Smuzhiyun uint32_t value;
41*4882a593Smuzhiyun } a5xx_sequence_regs[] = {
42*4882a593Smuzhiyun { 0xB9A1, 0x00010303 },
43*4882a593Smuzhiyun { 0xB9A2, 0x13000000 },
44*4882a593Smuzhiyun { 0xB9A3, 0x00460020 },
45*4882a593Smuzhiyun { 0xB9A4, 0x10000000 },
46*4882a593Smuzhiyun { 0xB9A5, 0x040A1707 },
47*4882a593Smuzhiyun { 0xB9A6, 0x00010000 },
48*4882a593Smuzhiyun { 0xB9A7, 0x0E000904 },
49*4882a593Smuzhiyun { 0xB9A8, 0x10000000 },
50*4882a593Smuzhiyun { 0xB9A9, 0x01165000 },
51*4882a593Smuzhiyun { 0xB9AA, 0x000E0002 },
52*4882a593Smuzhiyun { 0xB9AB, 0x03884141 },
53*4882a593Smuzhiyun { 0xB9AC, 0x10000840 },
54*4882a593Smuzhiyun { 0xB9AD, 0x572A5000 },
55*4882a593Smuzhiyun { 0xB9AE, 0x00000003 },
56*4882a593Smuzhiyun { 0xB9AF, 0x00000000 },
57*4882a593Smuzhiyun { 0xB9B0, 0x10000000 },
58*4882a593Smuzhiyun { 0xB828, 0x6C204010 },
59*4882a593Smuzhiyun { 0xB829, 0x6C204011 },
60*4882a593Smuzhiyun { 0xB82A, 0x6C204012 },
61*4882a593Smuzhiyun { 0xB82B, 0x6C204013 },
62*4882a593Smuzhiyun { 0xB82C, 0x6C204014 },
63*4882a593Smuzhiyun { 0xB90F, 0x00000004 },
64*4882a593Smuzhiyun { 0xB910, 0x00000002 },
65*4882a593Smuzhiyun { 0xB911, 0x00000002 },
66*4882a593Smuzhiyun { 0xB912, 0x00000002 },
67*4882a593Smuzhiyun { 0xB913, 0x00000002 },
68*4882a593Smuzhiyun { 0xB92F, 0x00000004 },
69*4882a593Smuzhiyun { 0xB930, 0x00000005 },
70*4882a593Smuzhiyun { 0xB931, 0x00000005 },
71*4882a593Smuzhiyun { 0xB932, 0x00000005 },
72*4882a593Smuzhiyun { 0xB933, 0x00000005 },
73*4882a593Smuzhiyun { 0xB96F, 0x00000001 },
74*4882a593Smuzhiyun { 0xB970, 0x00000003 },
75*4882a593Smuzhiyun { 0xB94F, 0x00000004 },
76*4882a593Smuzhiyun { 0xB950, 0x0000000B },
77*4882a593Smuzhiyun { 0xB951, 0x0000000B },
78*4882a593Smuzhiyun { 0xB952, 0x0000000B },
79*4882a593Smuzhiyun { 0xB953, 0x0000000B },
80*4882a593Smuzhiyun { 0xB907, 0x00000019 },
81*4882a593Smuzhiyun { 0xB927, 0x00000019 },
82*4882a593Smuzhiyun { 0xB947, 0x00000019 },
83*4882a593Smuzhiyun { 0xB967, 0x00000019 },
84*4882a593Smuzhiyun { 0xB987, 0x00000019 },
85*4882a593Smuzhiyun { 0xB906, 0x00220001 },
86*4882a593Smuzhiyun { 0xB926, 0x00220001 },
87*4882a593Smuzhiyun { 0xB946, 0x00220001 },
88*4882a593Smuzhiyun { 0xB966, 0x00220001 },
89*4882a593Smuzhiyun { 0xB986, 0x00300000 },
90*4882a593Smuzhiyun { 0xAC40, 0x0340FF41 },
91*4882a593Smuzhiyun { 0xAC41, 0x03BEFED0 },
92*4882a593Smuzhiyun { 0xAC42, 0x00331FED },
93*4882a593Smuzhiyun { 0xAC43, 0x021FFDD3 },
94*4882a593Smuzhiyun { 0xAC44, 0x5555AAAA },
95*4882a593Smuzhiyun { 0xAC45, 0x5555AAAA },
96*4882a593Smuzhiyun { 0xB9BA, 0x00000008 },
97*4882a593Smuzhiyun };
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun * Get the actual voltage value for the operating point at the specified
101*4882a593Smuzhiyun * frequency
102*4882a593Smuzhiyun */
_get_mvolts(struct msm_gpu * gpu,uint32_t freq)103*4882a593Smuzhiyun static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun struct drm_device *dev = gpu->dev;
106*4882a593Smuzhiyun struct msm_drm_private *priv = dev->dev_private;
107*4882a593Smuzhiyun struct platform_device *pdev = priv->gpu_pdev;
108*4882a593Smuzhiyun struct dev_pm_opp *opp;
109*4882a593Smuzhiyun u32 ret = 0;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (!IS_ERR(opp)) {
114*4882a593Smuzhiyun ret = dev_pm_opp_get_voltage(opp) / 1000;
115*4882a593Smuzhiyun dev_pm_opp_put(opp);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun return ret;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* Setup thermal limit management */
a530_lm_setup(struct msm_gpu * gpu)122*4882a593Smuzhiyun static void a530_lm_setup(struct msm_gpu *gpu)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
125*4882a593Smuzhiyun struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
126*4882a593Smuzhiyun unsigned int i;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* Write the block of sequence registers */
129*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
130*4882a593Smuzhiyun gpu_write(gpu, a5xx_sequence_regs[i].reg,
131*4882a593Smuzhiyun a5xx_sequence_regs[i].value);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* Hard code the A530 GPU thermal sensor ID for the GPMU */
134*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, 0x60007);
135*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
136*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /* Until we get clock scaling 0 is always the active power level */
139*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /* The threshold is fixed at 6000 for A530 */
144*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
147*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /* Write the voltage table */
150*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
151*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun gpu_write(gpu, AGC_MSG_STATE, 1);
154*4882a593Smuzhiyun gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* Write the max power - hard coded to 5448 for A530 */
157*4882a593Smuzhiyun gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
158*4882a593Smuzhiyun gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /*
161*4882a593Smuzhiyun * For now just write the one voltage level - we will do more when we
162*4882a593Smuzhiyun * can do scaling
163*4882a593Smuzhiyun */
164*4882a593Smuzhiyun gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
165*4882a593Smuzhiyun gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
168*4882a593Smuzhiyun gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun #define PAYLOAD_SIZE(_size) ((_size) * sizeof(u32))
172*4882a593Smuzhiyun #define LM_DCVS_LIMIT 1
173*4882a593Smuzhiyun #define LEVEL_CONFIG ~(0x303)
174*4882a593Smuzhiyun
a540_lm_setup(struct msm_gpu * gpu)175*4882a593Smuzhiyun static void a540_lm_setup(struct msm_gpu *gpu)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
178*4882a593Smuzhiyun u32 config;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* The battery current limiter isn't enabled for A540 */
181*4882a593Smuzhiyun config = AGC_LM_CONFIG_BCL_DISABLED;
182*4882a593Smuzhiyun config |= adreno_gpu->rev.patchid << AGC_LM_CONFIG_GPU_VERSION_SHIFT;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /* For now disable GPMU side throttling */
185*4882a593Smuzhiyun config |= AGC_LM_CONFIG_THROTTLE_DISABLE;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /* Until we get clock scaling 0 is always the active power level */
188*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /* Fixed at 6000 for now */
191*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun gpu_write(gpu, AGC_MSG_STATE, 0x80000001);
194*4882a593Smuzhiyun gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
197*4882a593Smuzhiyun gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
200*4882a593Smuzhiyun gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LM_CONFIG), config);
203*4882a593Smuzhiyun gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LEVEL_CONFIG), LEVEL_CONFIG);
204*4882a593Smuzhiyun gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE,
205*4882a593Smuzhiyun PAYLOAD_SIZE(AGC_LEVEL_CONFIG + 1));
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* Enable SP/TP cpower collapse */
a5xx_pc_init(struct msm_gpu * gpu)211*4882a593Smuzhiyun static void a5xx_pc_init(struct msm_gpu *gpu)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
214*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
215*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
216*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* Enable the GPMU microcontroller */
a5xx_gpmu_init(struct msm_gpu * gpu)220*4882a593Smuzhiyun static int a5xx_gpmu_init(struct msm_gpu *gpu)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
223*4882a593Smuzhiyun struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
224*4882a593Smuzhiyun struct msm_ringbuffer *ring = gpu->rb[0];
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if (!a5xx_gpu->gpmu_dwords)
227*4882a593Smuzhiyun return 0;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /* Turn off protected mode for this operation */
230*4882a593Smuzhiyun OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
231*4882a593Smuzhiyun OUT_RING(ring, 0);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /* Kick off the IB to load the GPMU microcode */
234*4882a593Smuzhiyun OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
235*4882a593Smuzhiyun OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
236*4882a593Smuzhiyun OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
237*4882a593Smuzhiyun OUT_RING(ring, a5xx_gpu->gpmu_dwords);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /* Turn back on protected mode */
240*4882a593Smuzhiyun OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
241*4882a593Smuzhiyun OUT_RING(ring, 1);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun a5xx_flush(gpu, ring, true);
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (!a5xx_idle(gpu, ring)) {
246*4882a593Smuzhiyun DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
247*4882a593Smuzhiyun gpu->name);
248*4882a593Smuzhiyun return -EINVAL;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if (adreno_is_a530(adreno_gpu))
252*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /* Kick off the GPMU */
255*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /*
258*4882a593Smuzhiyun * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
259*4882a593Smuzhiyun * won't have advanced power collapse.
260*4882a593Smuzhiyun */
261*4882a593Smuzhiyun if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
262*4882a593Smuzhiyun 0xBABEFACE))
263*4882a593Smuzhiyun DRM_ERROR("%s: GPMU firmware initialization timed out\n",
264*4882a593Smuzhiyun gpu->name);
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun if (!adreno_is_a530(adreno_gpu)) {
267*4882a593Smuzhiyun u32 val = gpu_read(gpu, REG_A5XX_GPMU_GENERAL_1);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun if (val)
270*4882a593Smuzhiyun DRM_ERROR("%s: GPMU firmware initialization failed: %d\n",
271*4882a593Smuzhiyun gpu->name, val);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun return 0;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /* Enable limits management */
a5xx_lm_enable(struct msm_gpu * gpu)278*4882a593Smuzhiyun static void a5xx_lm_enable(struct msm_gpu *gpu)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /* This init sequence only applies to A530 */
283*4882a593Smuzhiyun if (!adreno_is_a530(adreno_gpu))
284*4882a593Smuzhiyun return;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
287*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
288*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
289*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
290*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
a5xx_power_init(struct msm_gpu * gpu)295*4882a593Smuzhiyun int a5xx_power_init(struct msm_gpu *gpu)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
298*4882a593Smuzhiyun int ret;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /* Not all A5xx chips have a GPMU */
301*4882a593Smuzhiyun if (adreno_is_a510(adreno_gpu))
302*4882a593Smuzhiyun return 0;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /* Set up the limits management */
305*4882a593Smuzhiyun if (adreno_is_a530(adreno_gpu))
306*4882a593Smuzhiyun a530_lm_setup(gpu);
307*4882a593Smuzhiyun else if (adreno_is_a540(adreno_gpu))
308*4882a593Smuzhiyun a540_lm_setup(gpu);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /* Set up SP/TP power collpase */
311*4882a593Smuzhiyun a5xx_pc_init(gpu);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /* Start the GPMU */
314*4882a593Smuzhiyun ret = a5xx_gpmu_init(gpu);
315*4882a593Smuzhiyun if (ret)
316*4882a593Smuzhiyun return ret;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /* Start the limits management */
319*4882a593Smuzhiyun a5xx_lm_enable(gpu);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun return 0;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
a5xx_gpmu_ucode_init(struct msm_gpu * gpu)324*4882a593Smuzhiyun void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
327*4882a593Smuzhiyun struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
328*4882a593Smuzhiyun struct drm_device *drm = gpu->dev;
329*4882a593Smuzhiyun uint32_t dwords = 0, offset = 0, bosize;
330*4882a593Smuzhiyun unsigned int *data, *ptr, *cmds;
331*4882a593Smuzhiyun unsigned int cmds_size;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun if (adreno_is_a510(adreno_gpu))
334*4882a593Smuzhiyun return;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (a5xx_gpu->gpmu_bo)
337*4882a593Smuzhiyun return;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun data = (unsigned int *) adreno_gpu->fw[ADRENO_FW_GPMU]->data;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /*
342*4882a593Smuzhiyun * The first dword is the size of the remaining data in dwords. Use it
343*4882a593Smuzhiyun * as a checksum of sorts and make sure it matches the actual size of
344*4882a593Smuzhiyun * the firmware that we read
345*4882a593Smuzhiyun */
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (adreno_gpu->fw[ADRENO_FW_GPMU]->size < 8 ||
348*4882a593Smuzhiyun (data[0] < 2) || (data[0] >=
349*4882a593Smuzhiyun (adreno_gpu->fw[ADRENO_FW_GPMU]->size >> 2)))
350*4882a593Smuzhiyun return;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
353*4882a593Smuzhiyun if (data[1] != 2)
354*4882a593Smuzhiyun return;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun cmds = data + data[2] + 3;
357*4882a593Smuzhiyun cmds_size = data[0] - data[2] - 2;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /*
360*4882a593Smuzhiyun * A single type4 opcode can only have so many values attached so
361*4882a593Smuzhiyun * add enough opcodes to load the all the commands
362*4882a593Smuzhiyun */
363*4882a593Smuzhiyun bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun ptr = msm_gem_kernel_new_locked(drm, bosize,
366*4882a593Smuzhiyun MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
367*4882a593Smuzhiyun &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
368*4882a593Smuzhiyun if (IS_ERR(ptr))
369*4882a593Smuzhiyun return;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun msm_gem_object_set_name(a5xx_gpu->gpmu_bo, "gpmufw");
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun while (cmds_size > 0) {
374*4882a593Smuzhiyun int i;
375*4882a593Smuzhiyun uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
376*4882a593Smuzhiyun TYPE4_MAX_PAYLOAD : cmds_size;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
379*4882a593Smuzhiyun _size);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun for (i = 0; i < _size; i++)
382*4882a593Smuzhiyun ptr[dwords++] = *cmds++;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun offset += _size;
385*4882a593Smuzhiyun cmds_size -= _size;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
389*4882a593Smuzhiyun a5xx_gpu->gpmu_dwords = dwords;
390*4882a593Smuzhiyun }
391