1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2013 Red Hat
4*4882a593Smuzhiyun * Author: Rob Clark <robdclark@gmail.com>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (c) 2014 The Linux Foundation. All rights reserved.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include "a3xx_gpu.h"
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #define A3XX_INT0_MASK \
12*4882a593Smuzhiyun (A3XX_INT0_RBBM_AHB_ERROR | \
13*4882a593Smuzhiyun A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
14*4882a593Smuzhiyun A3XX_INT0_CP_T0_PACKET_IN_IB | \
15*4882a593Smuzhiyun A3XX_INT0_CP_OPCODE_ERROR | \
16*4882a593Smuzhiyun A3XX_INT0_CP_RESERVED_BIT_ERROR | \
17*4882a593Smuzhiyun A3XX_INT0_CP_HW_FAULT | \
18*4882a593Smuzhiyun A3XX_INT0_CP_IB1_INT | \
19*4882a593Smuzhiyun A3XX_INT0_CP_IB2_INT | \
20*4882a593Smuzhiyun A3XX_INT0_CP_RB_INT | \
21*4882a593Smuzhiyun A3XX_INT0_CP_REG_PROTECT_FAULT | \
22*4882a593Smuzhiyun A3XX_INT0_CP_AHB_ERROR_HALT | \
23*4882a593Smuzhiyun A3XX_INT0_CACHE_FLUSH_TS | \
24*4882a593Smuzhiyun A3XX_INT0_UCHE_OOB_ACCESS)
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun extern bool hang_debug;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun static void a3xx_dump(struct msm_gpu *gpu);
29*4882a593Smuzhiyun static bool a3xx_idle(struct msm_gpu *gpu);
30*4882a593Smuzhiyun
a3xx_submit(struct msm_gpu * gpu,struct msm_gem_submit * submit)31*4882a593Smuzhiyun static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun struct msm_drm_private *priv = gpu->dev->dev_private;
34*4882a593Smuzhiyun struct msm_ringbuffer *ring = submit->ring;
35*4882a593Smuzhiyun unsigned int i;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun for (i = 0; i < submit->nr_cmds; i++) {
38*4882a593Smuzhiyun switch (submit->cmd[i].type) {
39*4882a593Smuzhiyun case MSM_SUBMIT_CMD_IB_TARGET_BUF:
40*4882a593Smuzhiyun /* ignore IB-targets */
41*4882a593Smuzhiyun break;
42*4882a593Smuzhiyun case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
43*4882a593Smuzhiyun /* ignore if there has not been a ctx switch: */
44*4882a593Smuzhiyun if (priv->lastctx == submit->queue->ctx)
45*4882a593Smuzhiyun break;
46*4882a593Smuzhiyun fallthrough;
47*4882a593Smuzhiyun case MSM_SUBMIT_CMD_BUF:
48*4882a593Smuzhiyun OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
49*4882a593Smuzhiyun OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
50*4882a593Smuzhiyun OUT_RING(ring, submit->cmd[i].size);
51*4882a593Smuzhiyun OUT_PKT2(ring);
52*4882a593Smuzhiyun break;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
57*4882a593Smuzhiyun OUT_RING(ring, submit->seqno);
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /* Flush HLSQ lazy updates to make sure there is nothing
60*4882a593Smuzhiyun * pending for indirect loads after the timestamp has
61*4882a593Smuzhiyun * passed:
62*4882a593Smuzhiyun */
63*4882a593Smuzhiyun OUT_PKT3(ring, CP_EVENT_WRITE, 1);
64*4882a593Smuzhiyun OUT_RING(ring, HLSQ_FLUSH);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* wait for idle before cache flush/interrupt */
67*4882a593Smuzhiyun OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
68*4882a593Smuzhiyun OUT_RING(ring, 0x00000000);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
71*4882a593Smuzhiyun OUT_PKT3(ring, CP_EVENT_WRITE, 3);
72*4882a593Smuzhiyun OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
73*4882a593Smuzhiyun OUT_RING(ring, rbmemptr(ring, fence));
74*4882a593Smuzhiyun OUT_RING(ring, submit->seqno);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #if 0
77*4882a593Smuzhiyun /* Dummy set-constant to trigger context rollover */
78*4882a593Smuzhiyun OUT_PKT3(ring, CP_SET_CONSTANT, 2);
79*4882a593Smuzhiyun OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
80*4882a593Smuzhiyun OUT_RING(ring, 0x00000000);
81*4882a593Smuzhiyun #endif
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
a3xx_me_init(struct msm_gpu * gpu)86*4882a593Smuzhiyun static bool a3xx_me_init(struct msm_gpu *gpu)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun struct msm_ringbuffer *ring = gpu->rb[0];
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun OUT_PKT3(ring, CP_ME_INIT, 17);
91*4882a593Smuzhiyun OUT_RING(ring, 0x000003f7);
92*4882a593Smuzhiyun OUT_RING(ring, 0x00000000);
93*4882a593Smuzhiyun OUT_RING(ring, 0x00000000);
94*4882a593Smuzhiyun OUT_RING(ring, 0x00000000);
95*4882a593Smuzhiyun OUT_RING(ring, 0x00000080);
96*4882a593Smuzhiyun OUT_RING(ring, 0x00000100);
97*4882a593Smuzhiyun OUT_RING(ring, 0x00000180);
98*4882a593Smuzhiyun OUT_RING(ring, 0x00006600);
99*4882a593Smuzhiyun OUT_RING(ring, 0x00000150);
100*4882a593Smuzhiyun OUT_RING(ring, 0x0000014e);
101*4882a593Smuzhiyun OUT_RING(ring, 0x00000154);
102*4882a593Smuzhiyun OUT_RING(ring, 0x00000001);
103*4882a593Smuzhiyun OUT_RING(ring, 0x00000000);
104*4882a593Smuzhiyun OUT_RING(ring, 0x00000000);
105*4882a593Smuzhiyun OUT_RING(ring, 0x00000000);
106*4882a593Smuzhiyun OUT_RING(ring, 0x00000000);
107*4882a593Smuzhiyun OUT_RING(ring, 0x00000000);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
110*4882a593Smuzhiyun return a3xx_idle(gpu);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
a3xx_hw_init(struct msm_gpu * gpu)113*4882a593Smuzhiyun static int a3xx_hw_init(struct msm_gpu *gpu)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
116*4882a593Smuzhiyun struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
117*4882a593Smuzhiyun uint32_t *ptr, len;
118*4882a593Smuzhiyun int i, ret;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun DBG("%s", gpu->name);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun if (adreno_is_a305(adreno_gpu)) {
123*4882a593Smuzhiyun /* Set up 16 deep read/write request queues: */
124*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
125*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
126*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
127*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
128*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
129*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
130*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
131*4882a593Smuzhiyun /* Enable WR-REQ: */
132*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
133*4882a593Smuzhiyun /* Set up round robin arbitration between both AXI ports: */
134*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
135*4882a593Smuzhiyun /* Set up AOOO: */
136*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
137*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
138*4882a593Smuzhiyun } else if (adreno_is_a306(adreno_gpu)) {
139*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
140*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000a);
141*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x0000000a);
142*4882a593Smuzhiyun } else if (adreno_is_a320(adreno_gpu)) {
143*4882a593Smuzhiyun /* Set up 16 deep read/write request queues: */
144*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
145*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
146*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
147*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
148*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
149*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
150*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
151*4882a593Smuzhiyun /* Enable WR-REQ: */
152*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
153*4882a593Smuzhiyun /* Set up round robin arbitration between both AXI ports: */
154*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
155*4882a593Smuzhiyun /* Set up AOOO: */
156*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
157*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
158*4882a593Smuzhiyun /* Enable 1K sort: */
159*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
160*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun } else if (adreno_is_a330v2(adreno_gpu)) {
163*4882a593Smuzhiyun /*
164*4882a593Smuzhiyun * Most of the VBIF registers on 8974v2 have the correct
165*4882a593Smuzhiyun * values at power on, so we won't modify those if we don't
166*4882a593Smuzhiyun * need to
167*4882a593Smuzhiyun */
168*4882a593Smuzhiyun /* Enable 1k sort: */
169*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
170*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
171*4882a593Smuzhiyun /* Enable WR-REQ: */
172*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
173*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
174*4882a593Smuzhiyun /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
175*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun } else if (adreno_is_a330(adreno_gpu)) {
178*4882a593Smuzhiyun /* Set up 16 deep read/write request queues: */
179*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
180*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818);
181*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818);
182*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818);
183*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
184*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
185*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818);
186*4882a593Smuzhiyun /* Enable WR-REQ: */
187*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
188*4882a593Smuzhiyun /* Set up round robin arbitration between both AXI ports: */
189*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
190*4882a593Smuzhiyun /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
191*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
192*4882a593Smuzhiyun /* Set up AOOO: */
193*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f);
194*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f);
195*4882a593Smuzhiyun /* Enable 1K sort: */
196*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
197*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
198*4882a593Smuzhiyun /* Disable VBIF clock gating. This is to enable AXI running
199*4882a593Smuzhiyun * higher frequency than GPU:
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun } else {
204*4882a593Smuzhiyun BUG();
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* Make all blocks contribute to the GPU BUSY perf counter: */
208*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* Tune the hystersis counters for SP and CP idle detection: */
211*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10);
212*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /* Enable the RBBM error reporting bits. This lets us get
215*4882a593Smuzhiyun * useful information on failure:
216*4882a593Smuzhiyun */
217*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* Enable AHB error reporting: */
220*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /* Turn on the power counters: */
223*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /* Turn on hang detection - this spews a lot of useful information
226*4882a593Smuzhiyun * into the RBBM registers on a hang:
227*4882a593Smuzhiyun */
228*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */
231*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /* Enable Clock gating: */
234*4882a593Smuzhiyun if (adreno_is_a306(adreno_gpu))
235*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
236*4882a593Smuzhiyun else if (adreno_is_a320(adreno_gpu))
237*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
238*4882a593Smuzhiyun else if (adreno_is_a330v2(adreno_gpu))
239*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
240*4882a593Smuzhiyun else if (adreno_is_a330(adreno_gpu))
241*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (adreno_is_a330v2(adreno_gpu))
244*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455);
245*4882a593Smuzhiyun else if (adreno_is_a330(adreno_gpu))
246*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /* Set the OCMEM base address for A330, etc */
249*4882a593Smuzhiyun if (a3xx_gpu->ocmem.hdl) {
250*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
251*4882a593Smuzhiyun (unsigned int)(a3xx_gpu->ocmem.base >> 14));
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /* Turn on performance counters: */
255*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /* Enable the perfcntrs that we use.. */
258*4882a593Smuzhiyun for (i = 0; i < gpu->num_perfcntrs; i++) {
259*4882a593Smuzhiyun const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
260*4882a593Smuzhiyun gpu_write(gpu, perfcntr->select_reg, perfcntr->select_val);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun ret = adreno_hw_init(gpu);
266*4882a593Smuzhiyun if (ret)
267*4882a593Smuzhiyun return ret;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /*
270*4882a593Smuzhiyun * Use the default ringbuffer size and block size but disable the RPTR
271*4882a593Smuzhiyun * shadow
272*4882a593Smuzhiyun */
273*4882a593Smuzhiyun gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
274*4882a593Smuzhiyun MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /* Set the ringbuffer address */
277*4882a593Smuzhiyun gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* setup access protection: */
280*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /* RBBM registers */
283*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040);
284*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080);
285*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc);
286*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108);
287*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140);
288*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /* CP registers */
291*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700);
292*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8);
293*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0);
294*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178);
295*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /* RB registers */
298*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /* VBIF registers */
301*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /* NOTE: PM4/micro-engine firmware registers look to be the same
304*4882a593Smuzhiyun * for a2xx and a3xx.. we could possibly push that part down to
305*4882a593Smuzhiyun * adreno_gpu base class. Or push both PM4 and PFP but
306*4882a593Smuzhiyun * parameterize the pfp ucode addr/data registers..
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* Load PM4: */
310*4882a593Smuzhiyun ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
311*4882a593Smuzhiyun len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
312*4882a593Smuzhiyun DBG("loading PM4 ucode version: %x", ptr[1]);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun gpu_write(gpu, REG_AXXX_CP_DEBUG,
315*4882a593Smuzhiyun AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
316*4882a593Smuzhiyun AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
317*4882a593Smuzhiyun gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
318*4882a593Smuzhiyun for (i = 1; i < len; i++)
319*4882a593Smuzhiyun gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* Load PFP: */
322*4882a593Smuzhiyun ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
323*4882a593Smuzhiyun len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
324*4882a593Smuzhiyun DBG("loading PFP ucode version: %x", ptr[5]);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
327*4882a593Smuzhiyun for (i = 1; i < len; i++)
328*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
331*4882a593Smuzhiyun if (adreno_is_a305(adreno_gpu) || adreno_is_a306(adreno_gpu) ||
332*4882a593Smuzhiyun adreno_is_a320(adreno_gpu)) {
333*4882a593Smuzhiyun gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
334*4882a593Smuzhiyun AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
335*4882a593Smuzhiyun AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
336*4882a593Smuzhiyun AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
337*4882a593Smuzhiyun } else if (adreno_is_a330(adreno_gpu)) {
338*4882a593Smuzhiyun /* NOTE: this (value take from downstream android driver)
339*4882a593Smuzhiyun * includes some bits outside of the known bitfields. But
340*4882a593Smuzhiyun * A330 has this "MERCIU queue" thing too, which might
341*4882a593Smuzhiyun * explain a new bitfield or reshuffling:
342*4882a593Smuzhiyun */
343*4882a593Smuzhiyun gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x003e2008);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /* clear ME_HALT to start micro engine */
347*4882a593Smuzhiyun gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun return a3xx_me_init(gpu) ? 0 : -EINVAL;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
a3xx_recover(struct msm_gpu * gpu)352*4882a593Smuzhiyun static void a3xx_recover(struct msm_gpu *gpu)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun int i;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun adreno_dump_info(gpu);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
359*4882a593Smuzhiyun printk("CP_SCRATCH_REG%d: %u\n", i,
360*4882a593Smuzhiyun gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* dump registers before resetting gpu, if enabled: */
364*4882a593Smuzhiyun if (hang_debug)
365*4882a593Smuzhiyun a3xx_dump(gpu);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1);
368*4882a593Smuzhiyun gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD);
369*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0);
370*4882a593Smuzhiyun adreno_recover(gpu);
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
a3xx_destroy(struct msm_gpu * gpu)373*4882a593Smuzhiyun static void a3xx_destroy(struct msm_gpu *gpu)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
376*4882a593Smuzhiyun struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun DBG("%s", gpu->name);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun adreno_gpu_cleanup(adreno_gpu);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun adreno_gpu_ocmem_cleanup(&a3xx_gpu->ocmem);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun kfree(a3xx_gpu);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
a3xx_idle(struct msm_gpu * gpu)387*4882a593Smuzhiyun static bool a3xx_idle(struct msm_gpu *gpu)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun /* wait for ringbuffer to drain: */
390*4882a593Smuzhiyun if (!adreno_idle(gpu, gpu->rb[0]))
391*4882a593Smuzhiyun return false;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /* then wait for GPU to finish: */
394*4882a593Smuzhiyun if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) &
395*4882a593Smuzhiyun A3XX_RBBM_STATUS_GPU_BUSY))) {
396*4882a593Smuzhiyun DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* TODO maybe we need to reset GPU here to recover from hang? */
399*4882a593Smuzhiyun return false;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun return true;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
a3xx_irq(struct msm_gpu * gpu)405*4882a593Smuzhiyun static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun uint32_t status;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS);
410*4882a593Smuzhiyun DBG("%s: %08x", gpu->name, status);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun // TODO
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun msm_gpu_retire(gpu);
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun return IRQ_HANDLED;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun static const unsigned int a3xx_registers[] = {
422*4882a593Smuzhiyun 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
423*4882a593Smuzhiyun 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
424*4882a593Smuzhiyun 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5,
425*4882a593Smuzhiyun 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1,
426*4882a593Smuzhiyun 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd,
427*4882a593Smuzhiyun 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff,
428*4882a593Smuzhiyun 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f,
429*4882a593Smuzhiyun 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f,
430*4882a593Smuzhiyun 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
431*4882a593Smuzhiyun 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
432*4882a593Smuzhiyun 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
433*4882a593Smuzhiyun 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05,
434*4882a593Smuzhiyun 0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65,
435*4882a593Smuzhiyun 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
436*4882a593Smuzhiyun 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
437*4882a593Smuzhiyun 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
438*4882a593Smuzhiyun 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075,
439*4882a593Smuzhiyun 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
440*4882a593Smuzhiyun 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
441*4882a593Smuzhiyun 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
442*4882a593Smuzhiyun 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
443*4882a593Smuzhiyun 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
444*4882a593Smuzhiyun 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
445*4882a593Smuzhiyun 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2440, 0x2440, 0x2444, 0x2444,
446*4882a593Smuzhiyun 0x2448, 0x244d, 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470,
447*4882a593Smuzhiyun 0x2472, 0x2472, 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3,
448*4882a593Smuzhiyun 0x24e4, 0x24ef, 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e,
449*4882a593Smuzhiyun 0x2510, 0x2511, 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea,
450*4882a593Smuzhiyun 0x25ec, 0x25ed, 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617,
451*4882a593Smuzhiyun 0x261a, 0x261a, 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0,
452*4882a593Smuzhiyun 0x26c4, 0x26ce, 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9,
453*4882a593Smuzhiyun 0x26ec, 0x26ec, 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743,
454*4882a593Smuzhiyun 0x300c, 0x300e, 0x301c, 0x301d, 0x302a, 0x302a, 0x302c, 0x302d,
455*4882a593Smuzhiyun 0x3030, 0x3031, 0x3034, 0x3036, 0x303c, 0x303c, 0x305e, 0x305f,
456*4882a593Smuzhiyun ~0 /* sentinel */
457*4882a593Smuzhiyun };
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /* would be nice to not have to duplicate the _show() stuff with printk(): */
a3xx_dump(struct msm_gpu * gpu)460*4882a593Smuzhiyun static void a3xx_dump(struct msm_gpu *gpu)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun printk("status: %08x\n",
463*4882a593Smuzhiyun gpu_read(gpu, REG_A3XX_RBBM_STATUS));
464*4882a593Smuzhiyun adreno_dump(gpu);
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
a3xx_gpu_state_get(struct msm_gpu * gpu)467*4882a593Smuzhiyun static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun if (!state)
472*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun adreno_gpu_state_get(gpu, state);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun state->rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun return state;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
a3xx_get_rptr(struct msm_gpu * gpu,struct msm_ringbuffer * ring)481*4882a593Smuzhiyun static u32 a3xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
484*4882a593Smuzhiyun return ring->memptrs->rptr;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun static const struct adreno_gpu_funcs funcs = {
488*4882a593Smuzhiyun .base = {
489*4882a593Smuzhiyun .get_param = adreno_get_param,
490*4882a593Smuzhiyun .hw_init = a3xx_hw_init,
491*4882a593Smuzhiyun .pm_suspend = msm_gpu_pm_suspend,
492*4882a593Smuzhiyun .pm_resume = msm_gpu_pm_resume,
493*4882a593Smuzhiyun .recover = a3xx_recover,
494*4882a593Smuzhiyun .submit = a3xx_submit,
495*4882a593Smuzhiyun .active_ring = adreno_active_ring,
496*4882a593Smuzhiyun .irq = a3xx_irq,
497*4882a593Smuzhiyun .destroy = a3xx_destroy,
498*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
499*4882a593Smuzhiyun .show = adreno_show,
500*4882a593Smuzhiyun #endif
501*4882a593Smuzhiyun .gpu_state_get = a3xx_gpu_state_get,
502*4882a593Smuzhiyun .gpu_state_put = adreno_gpu_state_put,
503*4882a593Smuzhiyun .create_address_space = adreno_iommu_create_address_space,
504*4882a593Smuzhiyun .get_rptr = a3xx_get_rptr,
505*4882a593Smuzhiyun },
506*4882a593Smuzhiyun };
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun static const struct msm_gpu_perfcntr perfcntrs[] = {
509*4882a593Smuzhiyun { REG_A3XX_SP_PERFCOUNTER6_SELECT, REG_A3XX_RBBM_PERFCTR_SP_6_LO,
510*4882a593Smuzhiyun SP_ALU_ACTIVE_CYCLES, "ALUACTIVE" },
511*4882a593Smuzhiyun { REG_A3XX_SP_PERFCOUNTER7_SELECT, REG_A3XX_RBBM_PERFCTR_SP_7_LO,
512*4882a593Smuzhiyun SP_FS_FULL_ALU_INSTRUCTIONS, "ALUFULL" },
513*4882a593Smuzhiyun };
514*4882a593Smuzhiyun
a3xx_gpu_init(struct drm_device * dev)515*4882a593Smuzhiyun struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun struct a3xx_gpu *a3xx_gpu = NULL;
518*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu;
519*4882a593Smuzhiyun struct msm_gpu *gpu;
520*4882a593Smuzhiyun struct msm_drm_private *priv = dev->dev_private;
521*4882a593Smuzhiyun struct platform_device *pdev = priv->gpu_pdev;
522*4882a593Smuzhiyun int ret;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun if (!pdev) {
525*4882a593Smuzhiyun DRM_DEV_ERROR(dev->dev, "no a3xx device\n");
526*4882a593Smuzhiyun ret = -ENXIO;
527*4882a593Smuzhiyun goto fail;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL);
531*4882a593Smuzhiyun if (!a3xx_gpu) {
532*4882a593Smuzhiyun ret = -ENOMEM;
533*4882a593Smuzhiyun goto fail;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun adreno_gpu = &a3xx_gpu->base;
537*4882a593Smuzhiyun gpu = &adreno_gpu->base;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun gpu->perfcntrs = perfcntrs;
540*4882a593Smuzhiyun gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun adreno_gpu->registers = a3xx_registers;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
545*4882a593Smuzhiyun if (ret)
546*4882a593Smuzhiyun goto fail;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun /* if needed, allocate gmem: */
549*4882a593Smuzhiyun if (adreno_is_a330(adreno_gpu)) {
550*4882a593Smuzhiyun ret = adreno_gpu_ocmem_init(&adreno_gpu->base.pdev->dev,
551*4882a593Smuzhiyun adreno_gpu, &a3xx_gpu->ocmem);
552*4882a593Smuzhiyun if (ret)
553*4882a593Smuzhiyun goto fail;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun if (!gpu->aspace) {
557*4882a593Smuzhiyun /* TODO we think it is possible to configure the GPU to
558*4882a593Smuzhiyun * restrict access to VRAM carveout. But the required
559*4882a593Smuzhiyun * registers are unknown. For now just bail out and
560*4882a593Smuzhiyun * limp along with just modesetting. If it turns out
561*4882a593Smuzhiyun * to not be possible to restrict access, then we must
562*4882a593Smuzhiyun * implement a cmdstream validator.
563*4882a593Smuzhiyun */
564*4882a593Smuzhiyun DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
565*4882a593Smuzhiyun ret = -ENXIO;
566*4882a593Smuzhiyun goto fail;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun /*
570*4882a593Smuzhiyun * Set the ICC path to maximum speed for now by multiplying the fastest
571*4882a593Smuzhiyun * frequency by the bus width (8). We'll want to scale this later on to
572*4882a593Smuzhiyun * improve battery life.
573*4882a593Smuzhiyun */
574*4882a593Smuzhiyun icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
575*4882a593Smuzhiyun icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun return gpu;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun fail:
580*4882a593Smuzhiyun if (a3xx_gpu)
581*4882a593Smuzhiyun a3xx_destroy(&a3xx_gpu->base.base);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun return ERR_PTR(ret);
584*4882a593Smuzhiyun }
585