1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3*4882a593Smuzhiyun */
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/kernel.h>
6*4882a593Smuzhiyun #include <linux/types.h>
7*4882a593Smuzhiyun #include <linux/cpumask.h>
8*4882a593Smuzhiyun #include <linux/qcom_scm.h>
9*4882a593Smuzhiyun #include <linux/pm_opp.h>
10*4882a593Smuzhiyun #include <linux/nvmem-consumer.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include "msm_gem.h"
13*4882a593Smuzhiyun #include "msm_mmu.h"
14*4882a593Smuzhiyun #include "a5xx_gpu.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun extern bool hang_debug;
17*4882a593Smuzhiyun static void a5xx_dump(struct msm_gpu *gpu);
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define GPU_PAS_ID 13
20*4882a593Smuzhiyun
a5xx_flush(struct msm_gpu * gpu,struct msm_ringbuffer * ring,bool sync)21*4882a593Smuzhiyun void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
22*4882a593Smuzhiyun bool sync)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
25*4882a593Smuzhiyun struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
26*4882a593Smuzhiyun uint32_t wptr;
27*4882a593Smuzhiyun unsigned long flags;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun * Most flush operations need to issue a WHERE_AM_I opcode to sync up
31*4882a593Smuzhiyun * the rptr shadow
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun if (a5xx_gpu->has_whereami && sync) {
34*4882a593Smuzhiyun OUT_PKT7(ring, CP_WHERE_AM_I, 2);
35*4882a593Smuzhiyun OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
36*4882a593Smuzhiyun OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun spin_lock_irqsave(&ring->lock, flags);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* Copy the shadow to the actual register */
42*4882a593Smuzhiyun ring->cur = ring->next;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /* Make sure to wrap wptr if we need to */
45*4882a593Smuzhiyun wptr = get_wptr(ring);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun spin_unlock_irqrestore(&ring->lock, flags);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* Make sure everything is posted before making a decision */
50*4882a593Smuzhiyun mb();
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* Update HW if this is the current ring and we are not in preempt */
53*4882a593Smuzhiyun if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
54*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
a5xx_submit_in_rb(struct msm_gpu * gpu,struct msm_gem_submit * submit)57*4882a593Smuzhiyun static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun struct msm_drm_private *priv = gpu->dev->dev_private;
60*4882a593Smuzhiyun struct msm_ringbuffer *ring = submit->ring;
61*4882a593Smuzhiyun struct msm_gem_object *obj;
62*4882a593Smuzhiyun uint32_t *ptr, dwords;
63*4882a593Smuzhiyun unsigned int i;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun for (i = 0; i < submit->nr_cmds; i++) {
66*4882a593Smuzhiyun switch (submit->cmd[i].type) {
67*4882a593Smuzhiyun case MSM_SUBMIT_CMD_IB_TARGET_BUF:
68*4882a593Smuzhiyun break;
69*4882a593Smuzhiyun case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
70*4882a593Smuzhiyun if (priv->lastctx == submit->queue->ctx)
71*4882a593Smuzhiyun break;
72*4882a593Smuzhiyun fallthrough;
73*4882a593Smuzhiyun case MSM_SUBMIT_CMD_BUF:
74*4882a593Smuzhiyun /* copy commands into RB: */
75*4882a593Smuzhiyun obj = submit->bos[submit->cmd[i].idx].obj;
76*4882a593Smuzhiyun dwords = submit->cmd[i].size;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun ptr = msm_gem_get_vaddr(&obj->base);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* _get_vaddr() shouldn't fail at this point,
81*4882a593Smuzhiyun * since we've already mapped it once in
82*4882a593Smuzhiyun * submit_reloc()
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun if (WARN_ON(!ptr))
85*4882a593Smuzhiyun return;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun for (i = 0; i < dwords; i++) {
88*4882a593Smuzhiyun /* normally the OUT_PKTn() would wait
89*4882a593Smuzhiyun * for space for the packet. But since
90*4882a593Smuzhiyun * we just OUT_RING() the whole thing,
91*4882a593Smuzhiyun * need to call adreno_wait_ring()
92*4882a593Smuzhiyun * ourself:
93*4882a593Smuzhiyun */
94*4882a593Smuzhiyun adreno_wait_ring(ring, 1);
95*4882a593Smuzhiyun OUT_RING(ring, ptr[i]);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun msm_gem_put_vaddr(&obj->base);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun break;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun a5xx_flush(gpu, ring, true);
105*4882a593Smuzhiyun a5xx_preempt_trigger(gpu);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* we might not necessarily have a cmd from userspace to
108*4882a593Smuzhiyun * trigger an event to know that submit has completed, so
109*4882a593Smuzhiyun * do this manually:
110*4882a593Smuzhiyun */
111*4882a593Smuzhiyun a5xx_idle(gpu, ring);
112*4882a593Smuzhiyun ring->memptrs->fence = submit->seqno;
113*4882a593Smuzhiyun msm_gpu_retire(gpu);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
a5xx_submit(struct msm_gpu * gpu,struct msm_gem_submit * submit)116*4882a593Smuzhiyun static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
119*4882a593Smuzhiyun struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
120*4882a593Smuzhiyun struct msm_drm_private *priv = gpu->dev->dev_private;
121*4882a593Smuzhiyun struct msm_ringbuffer *ring = submit->ring;
122*4882a593Smuzhiyun unsigned int i, ibs = 0;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
125*4882a593Smuzhiyun priv->lastctx = NULL;
126*4882a593Smuzhiyun a5xx_submit_in_rb(gpu, submit);
127*4882a593Smuzhiyun return;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
131*4882a593Smuzhiyun OUT_RING(ring, 0x02);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* Turn off protected mode to write to special registers */
134*4882a593Smuzhiyun OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
135*4882a593Smuzhiyun OUT_RING(ring, 0);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /* Set the save preemption record for the ring/command */
138*4882a593Smuzhiyun OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
139*4882a593Smuzhiyun OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
140*4882a593Smuzhiyun OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /* Turn back on protected mode */
143*4882a593Smuzhiyun OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
144*4882a593Smuzhiyun OUT_RING(ring, 1);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /* Enable local preemption for finegrain preemption */
147*4882a593Smuzhiyun OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
148*4882a593Smuzhiyun OUT_RING(ring, 0x02);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
151*4882a593Smuzhiyun OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
152*4882a593Smuzhiyun OUT_RING(ring, 0x02);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /* Submit the commands */
155*4882a593Smuzhiyun for (i = 0; i < submit->nr_cmds; i++) {
156*4882a593Smuzhiyun switch (submit->cmd[i].type) {
157*4882a593Smuzhiyun case MSM_SUBMIT_CMD_IB_TARGET_BUF:
158*4882a593Smuzhiyun break;
159*4882a593Smuzhiyun case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
160*4882a593Smuzhiyun if (priv->lastctx == submit->queue->ctx)
161*4882a593Smuzhiyun break;
162*4882a593Smuzhiyun fallthrough;
163*4882a593Smuzhiyun case MSM_SUBMIT_CMD_BUF:
164*4882a593Smuzhiyun OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
165*4882a593Smuzhiyun OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
166*4882a593Smuzhiyun OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
167*4882a593Smuzhiyun OUT_RING(ring, submit->cmd[i].size);
168*4882a593Smuzhiyun ibs++;
169*4882a593Smuzhiyun break;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun * Write the render mode to NULL (0) to indicate to the CP that the IBs
175*4882a593Smuzhiyun * are done rendering - otherwise a lucky preemption would start
176*4882a593Smuzhiyun * replaying from the last checkpoint
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
179*4882a593Smuzhiyun OUT_RING(ring, 0);
180*4882a593Smuzhiyun OUT_RING(ring, 0);
181*4882a593Smuzhiyun OUT_RING(ring, 0);
182*4882a593Smuzhiyun OUT_RING(ring, 0);
183*4882a593Smuzhiyun OUT_RING(ring, 0);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* Turn off IB level preemptions */
186*4882a593Smuzhiyun OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
187*4882a593Smuzhiyun OUT_RING(ring, 0x01);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /* Write the fence to the scratch register */
190*4882a593Smuzhiyun OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
191*4882a593Smuzhiyun OUT_RING(ring, submit->seqno);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun * Execute a CACHE_FLUSH_TS event. This will ensure that the
195*4882a593Smuzhiyun * timestamp is written to the memory and then triggers the interrupt
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun OUT_PKT7(ring, CP_EVENT_WRITE, 4);
198*4882a593Smuzhiyun OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
199*4882a593Smuzhiyun CP_EVENT_WRITE_0_IRQ);
200*4882a593Smuzhiyun OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
201*4882a593Smuzhiyun OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
202*4882a593Smuzhiyun OUT_RING(ring, submit->seqno);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /* Yield the floor on command completion */
205*4882a593Smuzhiyun OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
206*4882a593Smuzhiyun /*
207*4882a593Smuzhiyun * If dword[2:1] are non zero, they specify an address for the CP to
208*4882a593Smuzhiyun * write the value of dword[3] to on preemption complete. Write 0 to
209*4882a593Smuzhiyun * skip the write
210*4882a593Smuzhiyun */
211*4882a593Smuzhiyun OUT_RING(ring, 0x00);
212*4882a593Smuzhiyun OUT_RING(ring, 0x00);
213*4882a593Smuzhiyun /* Data value - not used if the address above is 0 */
214*4882a593Smuzhiyun OUT_RING(ring, 0x01);
215*4882a593Smuzhiyun /* Set bit 0 to trigger an interrupt on preempt complete */
216*4882a593Smuzhiyun OUT_RING(ring, 0x01);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /* A WHERE_AM_I packet is not needed after a YIELD */
219*4882a593Smuzhiyun a5xx_flush(gpu, ring, false);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* Check to see if we need to start preemption */
222*4882a593Smuzhiyun a5xx_preempt_trigger(gpu);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun static const struct {
226*4882a593Smuzhiyun u32 offset;
227*4882a593Smuzhiyun u32 value;
228*4882a593Smuzhiyun } a5xx_hwcg[] = {
229*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
230*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
231*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
232*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
233*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
234*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
235*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
236*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
237*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
238*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
239*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
240*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
241*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
242*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
243*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
244*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
245*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
246*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
247*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
248*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
249*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
250*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
251*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
252*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
253*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
254*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
255*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
256*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
257*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
258*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
259*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
260*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
261*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
262*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
263*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
264*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
265*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
266*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
267*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
268*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
269*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
270*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
271*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
272*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
273*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
274*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
275*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
276*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
277*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
278*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
279*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
280*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
281*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
282*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
283*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
284*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
285*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
286*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
287*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
288*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
289*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
290*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
291*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
292*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
293*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
294*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
295*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
296*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
297*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
298*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
299*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
300*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
301*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
302*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
303*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
304*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
305*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
306*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
307*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
308*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
309*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
310*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
311*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
312*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
313*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
314*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
315*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
316*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
317*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
318*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
319*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
320*4882a593Smuzhiyun {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
321*4882a593Smuzhiyun };
322*4882a593Smuzhiyun
a5xx_set_hwcg(struct msm_gpu * gpu,bool state)323*4882a593Smuzhiyun void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
326*4882a593Smuzhiyun unsigned int i;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
329*4882a593Smuzhiyun gpu_write(gpu, a5xx_hwcg[i].offset,
330*4882a593Smuzhiyun state ? a5xx_hwcg[i].value : 0);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun if (adreno_is_a540(adreno_gpu)) {
333*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0);
334*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU, state ? 0x00000004 : 0);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
338*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
a5xx_me_init(struct msm_gpu * gpu)341*4882a593Smuzhiyun static int a5xx_me_init(struct msm_gpu *gpu)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
344*4882a593Smuzhiyun struct msm_ringbuffer *ring = gpu->rb[0];
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun OUT_PKT7(ring, CP_ME_INIT, 8);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun OUT_RING(ring, 0x0000002F);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /* Enable multiple hardware contexts */
351*4882a593Smuzhiyun OUT_RING(ring, 0x00000003);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* Enable error detection */
354*4882a593Smuzhiyun OUT_RING(ring, 0x20000000);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /* Don't enable header dump */
357*4882a593Smuzhiyun OUT_RING(ring, 0x00000000);
358*4882a593Smuzhiyun OUT_RING(ring, 0x00000000);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /* Specify workarounds for various microcode issues */
361*4882a593Smuzhiyun if (adreno_is_a530(adreno_gpu)) {
362*4882a593Smuzhiyun /* Workaround for token end syncs
363*4882a593Smuzhiyun * Force a WFI after every direct-render 3D mode draw and every
364*4882a593Smuzhiyun * 2D mode 3 draw
365*4882a593Smuzhiyun */
366*4882a593Smuzhiyun OUT_RING(ring, 0x0000000B);
367*4882a593Smuzhiyun } else if (adreno_is_a510(adreno_gpu)) {
368*4882a593Smuzhiyun /* Workaround for token and syncs */
369*4882a593Smuzhiyun OUT_RING(ring, 0x00000001);
370*4882a593Smuzhiyun } else {
371*4882a593Smuzhiyun /* No workarounds enabled */
372*4882a593Smuzhiyun OUT_RING(ring, 0x00000000);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun OUT_RING(ring, 0x00000000);
376*4882a593Smuzhiyun OUT_RING(ring, 0x00000000);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun a5xx_flush(gpu, ring, true);
379*4882a593Smuzhiyun return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
a5xx_preempt_start(struct msm_gpu * gpu)382*4882a593Smuzhiyun static int a5xx_preempt_start(struct msm_gpu *gpu)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
385*4882a593Smuzhiyun struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
386*4882a593Smuzhiyun struct msm_ringbuffer *ring = gpu->rb[0];
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun if (gpu->nr_rings == 1)
389*4882a593Smuzhiyun return 0;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /* Turn off protected mode to write to special registers */
392*4882a593Smuzhiyun OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
393*4882a593Smuzhiyun OUT_RING(ring, 0);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /* Set the save preemption record for the ring/command */
396*4882a593Smuzhiyun OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
397*4882a593Smuzhiyun OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
398*4882a593Smuzhiyun OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /* Turn back on protected mode */
401*4882a593Smuzhiyun OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
402*4882a593Smuzhiyun OUT_RING(ring, 1);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
405*4882a593Smuzhiyun OUT_RING(ring, 0x00);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
408*4882a593Smuzhiyun OUT_RING(ring, 0x01);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
411*4882a593Smuzhiyun OUT_RING(ring, 0x01);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun /* Yield the floor on command completion */
414*4882a593Smuzhiyun OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
415*4882a593Smuzhiyun OUT_RING(ring, 0x00);
416*4882a593Smuzhiyun OUT_RING(ring, 0x00);
417*4882a593Smuzhiyun OUT_RING(ring, 0x01);
418*4882a593Smuzhiyun OUT_RING(ring, 0x01);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /* The WHERE_AMI_I packet is not needed after a YIELD is issued */
421*4882a593Smuzhiyun a5xx_flush(gpu, ring, false);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
a5xx_ucode_check_version(struct a5xx_gpu * a5xx_gpu,struct drm_gem_object * obj)426*4882a593Smuzhiyun static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu,
427*4882a593Smuzhiyun struct drm_gem_object *obj)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun u32 *buf = msm_gem_get_vaddr_active(obj);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun if (IS_ERR(buf))
432*4882a593Smuzhiyun return;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun * If the lowest nibble is 0xa that is an indication that this microcode
436*4882a593Smuzhiyun * has been patched. The actual version is in dword [3] but we only care
437*4882a593Smuzhiyun * about the patchlevel which is the lowest nibble of dword [3]
438*4882a593Smuzhiyun */
439*4882a593Smuzhiyun if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
440*4882a593Smuzhiyun a5xx_gpu->has_whereami = true;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun msm_gem_put_vaddr(obj);
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
a5xx_ucode_init(struct msm_gpu * gpu)445*4882a593Smuzhiyun static int a5xx_ucode_init(struct msm_gpu *gpu)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
448*4882a593Smuzhiyun struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
449*4882a593Smuzhiyun int ret;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun if (!a5xx_gpu->pm4_bo) {
452*4882a593Smuzhiyun a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
453*4882a593Smuzhiyun adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun if (IS_ERR(a5xx_gpu->pm4_bo)) {
457*4882a593Smuzhiyun ret = PTR_ERR(a5xx_gpu->pm4_bo);
458*4882a593Smuzhiyun a5xx_gpu->pm4_bo = NULL;
459*4882a593Smuzhiyun DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n",
460*4882a593Smuzhiyun ret);
461*4882a593Smuzhiyun return ret;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw");
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun if (!a5xx_gpu->pfp_bo) {
468*4882a593Smuzhiyun a5xx_gpu->pfp_bo = adreno_fw_create_bo(gpu,
469*4882a593Smuzhiyun adreno_gpu->fw[ADRENO_FW_PFP], &a5xx_gpu->pfp_iova);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun if (IS_ERR(a5xx_gpu->pfp_bo)) {
472*4882a593Smuzhiyun ret = PTR_ERR(a5xx_gpu->pfp_bo);
473*4882a593Smuzhiyun a5xx_gpu->pfp_bo = NULL;
474*4882a593Smuzhiyun DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n",
475*4882a593Smuzhiyun ret);
476*4882a593Smuzhiyun return ret;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
480*4882a593Smuzhiyun a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
484*4882a593Smuzhiyun REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
487*4882a593Smuzhiyun REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun return 0;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun #define SCM_GPU_ZAP_SHADER_RESUME 0
493*4882a593Smuzhiyun
a5xx_zap_shader_resume(struct msm_gpu * gpu)494*4882a593Smuzhiyun static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun int ret;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun ret = qcom_scm_set_remote_state(SCM_GPU_ZAP_SHADER_RESUME, GPU_PAS_ID);
499*4882a593Smuzhiyun if (ret)
500*4882a593Smuzhiyun DRM_ERROR("%s: zap-shader resume failed: %d\n",
501*4882a593Smuzhiyun gpu->name, ret);
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun return ret;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
a5xx_zap_shader_init(struct msm_gpu * gpu)506*4882a593Smuzhiyun static int a5xx_zap_shader_init(struct msm_gpu *gpu)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun static bool loaded;
509*4882a593Smuzhiyun int ret;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun /*
512*4882a593Smuzhiyun * If the zap shader is already loaded into memory we just need to kick
513*4882a593Smuzhiyun * the remote processor to reinitialize it
514*4882a593Smuzhiyun */
515*4882a593Smuzhiyun if (loaded)
516*4882a593Smuzhiyun return a5xx_zap_shader_resume(gpu);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun loaded = !ret;
521*4882a593Smuzhiyun return ret;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun #define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
525*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
526*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
527*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
528*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
529*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
530*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
531*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
532*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_CP_SW | \
533*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
534*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
535*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
536*4882a593Smuzhiyun
a5xx_hw_init(struct msm_gpu * gpu)537*4882a593Smuzhiyun static int a5xx_hw_init(struct msm_gpu *gpu)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
540*4882a593Smuzhiyun struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
541*4882a593Smuzhiyun int ret;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun if (adreno_is_a540(adreno_gpu))
546*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun /* Make all blocks contribute to the GPU BUSY perf counter */
549*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /* Enable RBBM error reporting bits */
552*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
555*4882a593Smuzhiyun /*
556*4882a593Smuzhiyun * Mask out the activity signals from RB1-3 to avoid false
557*4882a593Smuzhiyun * positives
558*4882a593Smuzhiyun */
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
561*4882a593Smuzhiyun 0xF0000000);
562*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
563*4882a593Smuzhiyun 0xFFFFFFFF);
564*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
565*4882a593Smuzhiyun 0xFFFFFFFF);
566*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
567*4882a593Smuzhiyun 0xFFFFFFFF);
568*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
569*4882a593Smuzhiyun 0xFFFFFFFF);
570*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
571*4882a593Smuzhiyun 0xFFFFFFFF);
572*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
573*4882a593Smuzhiyun 0xFFFFFFFF);
574*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
575*4882a593Smuzhiyun 0xFFFFFFFF);
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun /* Enable fault detection */
579*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
580*4882a593Smuzhiyun (1 << 30) | 0xFFFF);
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun /* Turn on performance counters */
583*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun /* Select CP0 to always count cycles */
586*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /* Select RBBM0 to countable 6 to get the busy status for devfreq */
589*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun /* Increase VFD cache access so LRZ and other data gets evicted less */
592*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun /* Disable L2 bypass in the UCHE */
595*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
596*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
597*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
598*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun /* Set the GMEM VA range (0 to gpu->gmem) */
601*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
602*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000);
603*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
604*4882a593Smuzhiyun 0x00100000 + adreno_gpu->gmem - 1);
605*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun if (adreno_is_a510(adreno_gpu)) {
608*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
609*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
610*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
611*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
612*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
613*4882a593Smuzhiyun (0x200 << 11 | 0x200 << 22));
614*4882a593Smuzhiyun } else {
615*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
616*4882a593Smuzhiyun if (adreno_is_a530(adreno_gpu))
617*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
618*4882a593Smuzhiyun if (adreno_is_a540(adreno_gpu))
619*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
620*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
621*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
622*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
623*4882a593Smuzhiyun (0x400 << 11 | 0x300 << 22));
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
627*4882a593Smuzhiyun gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun /* Enable USE_RETENTION_FLOPS */
630*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun /* Enable ME/PFP split notification */
633*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun /*
636*4882a593Smuzhiyun * In A5x, CCU can send context_done event of a particular context to
637*4882a593Smuzhiyun * UCHE which ultimately reaches CP even when there is valid
638*4882a593Smuzhiyun * transaction of that context inside CCU. This can let CP to program
639*4882a593Smuzhiyun * config registers, which will make the "valid transaction" inside
640*4882a593Smuzhiyun * CCU to be interpreted differently. This can cause gpu fault. This
641*4882a593Smuzhiyun * bug is fixed in latest A510 revision. To enable this bug fix -
642*4882a593Smuzhiyun * bit[11] of RB_DBG_ECO_CNTL need to be set to 0, default is 1
643*4882a593Smuzhiyun * (disable). For older A510 version this bit is unused.
644*4882a593Smuzhiyun */
645*4882a593Smuzhiyun if (adreno_is_a510(adreno_gpu))
646*4882a593Smuzhiyun gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, (1 << 11), 0);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun /* Enable HWCG */
649*4882a593Smuzhiyun a5xx_set_hwcg(gpu, true);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun /* Set the highest bank bit */
654*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7);
655*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1);
656*4882a593Smuzhiyun if (adreno_is_a540(adreno_gpu))
657*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, 2);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun /* Protect registers from the CP */
660*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun /* RBBM */
663*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
664*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
665*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
666*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
667*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
668*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun /* Content protect */
671*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
672*4882a593Smuzhiyun ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
673*4882a593Smuzhiyun 16));
674*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
675*4882a593Smuzhiyun ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /* CP */
678*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
679*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
680*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
681*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun /* RB */
684*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
685*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun /* VPC */
688*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
689*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun /* UCHE */
692*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun if (adreno_is_a530(adreno_gpu) || adreno_is_a510(adreno_gpu))
695*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
696*4882a593Smuzhiyun ADRENO_PROTECT_RW(0x10000, 0x8000));
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
699*4882a593Smuzhiyun /*
700*4882a593Smuzhiyun * Disable the trusted memory range - we don't actually supported secure
701*4882a593Smuzhiyun * memory rendering at this point in time and we don't want to block off
702*4882a593Smuzhiyun * part of the virtual memory space.
703*4882a593Smuzhiyun */
704*4882a593Smuzhiyun gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
705*4882a593Smuzhiyun REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
706*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun /* Put the GPU into 64 bit by default */
709*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
710*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1);
711*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
712*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1);
713*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1);
714*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
715*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1);
716*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1);
717*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
718*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1);
719*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
720*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun /*
723*4882a593Smuzhiyun * VPC corner case with local memory load kill leads to corrupt
724*4882a593Smuzhiyun * internal state. Normal Disable does not work for all a5x chips.
725*4882a593Smuzhiyun * So do the following setting to disable it.
726*4882a593Smuzhiyun */
727*4882a593Smuzhiyun if (adreno_gpu->info->quirks & ADRENO_QUIRK_LMLOADKILL_DISABLE) {
728*4882a593Smuzhiyun gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, BIT(23));
729*4882a593Smuzhiyun gpu_rmw(gpu, REG_A5XX_HLSQ_DBG_ECO_CNTL, BIT(18), 0);
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun ret = adreno_hw_init(gpu);
733*4882a593Smuzhiyun if (ret)
734*4882a593Smuzhiyun return ret;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun if (!adreno_is_a510(adreno_gpu))
737*4882a593Smuzhiyun a5xx_gpmu_ucode_init(gpu);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun ret = a5xx_ucode_init(gpu);
740*4882a593Smuzhiyun if (ret)
741*4882a593Smuzhiyun return ret;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun /* Set the ringbuffer address */
744*4882a593Smuzhiyun gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
745*4882a593Smuzhiyun gpu->rb[0]->iova);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun /*
748*4882a593Smuzhiyun * If the microcode supports the WHERE_AM_I opcode then we can use that
749*4882a593Smuzhiyun * in lieu of the RPTR shadow and enable preemption. Otherwise, we
750*4882a593Smuzhiyun * can't safely use the RPTR shadow or preemption. In either case, the
751*4882a593Smuzhiyun * RPTR shadow should be disabled in hardware.
752*4882a593Smuzhiyun */
753*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
754*4882a593Smuzhiyun MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun /* Create a privileged buffer for the RPTR shadow */
757*4882a593Smuzhiyun if (a5xx_gpu->has_whereami) {
758*4882a593Smuzhiyun if (!a5xx_gpu->shadow_bo) {
759*4882a593Smuzhiyun a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
760*4882a593Smuzhiyun sizeof(u32) * gpu->nr_rings,
761*4882a593Smuzhiyun MSM_BO_UNCACHED | MSM_BO_MAP_PRIV,
762*4882a593Smuzhiyun gpu->aspace, &a5xx_gpu->shadow_bo,
763*4882a593Smuzhiyun &a5xx_gpu->shadow_iova);
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun if (IS_ERR(a5xx_gpu->shadow))
766*4882a593Smuzhiyun return PTR_ERR(a5xx_gpu->shadow);
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
770*4882a593Smuzhiyun REG_A5XX_CP_RB_RPTR_ADDR_HI, shadowptr(a5xx_gpu, gpu->rb[0]));
771*4882a593Smuzhiyun } else if (gpu->nr_rings > 1) {
772*4882a593Smuzhiyun /* Disable preemption if WHERE_AM_I isn't available */
773*4882a593Smuzhiyun a5xx_preempt_fini(gpu);
774*4882a593Smuzhiyun gpu->nr_rings = 1;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun a5xx_preempt_hw_init(gpu);
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun /* Disable the interrupts through the initial bringup stage */
780*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun /* Clear ME_HALT to start the micro engine */
783*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
784*4882a593Smuzhiyun ret = a5xx_me_init(gpu);
785*4882a593Smuzhiyun if (ret)
786*4882a593Smuzhiyun return ret;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun ret = a5xx_power_init(gpu);
789*4882a593Smuzhiyun if (ret)
790*4882a593Smuzhiyun return ret;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun /*
793*4882a593Smuzhiyun * Send a pipeline event stat to get misbehaving counters to start
794*4882a593Smuzhiyun * ticking correctly
795*4882a593Smuzhiyun */
796*4882a593Smuzhiyun if (adreno_is_a530(adreno_gpu)) {
797*4882a593Smuzhiyun OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
798*4882a593Smuzhiyun OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT));
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun a5xx_flush(gpu, gpu->rb[0], true);
801*4882a593Smuzhiyun if (!a5xx_idle(gpu, gpu->rb[0]))
802*4882a593Smuzhiyun return -EINVAL;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun /*
806*4882a593Smuzhiyun * If the chip that we are using does support loading one, then
807*4882a593Smuzhiyun * try to load a zap shader into the secure world. If successful
808*4882a593Smuzhiyun * we can use the CP to switch out of secure mode. If not then we
809*4882a593Smuzhiyun * have no resource but to try to switch ourselves out manually. If we
810*4882a593Smuzhiyun * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
811*4882a593Smuzhiyun * be blocked and a permissions violation will soon follow.
812*4882a593Smuzhiyun */
813*4882a593Smuzhiyun ret = a5xx_zap_shader_init(gpu);
814*4882a593Smuzhiyun if (!ret) {
815*4882a593Smuzhiyun OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
816*4882a593Smuzhiyun OUT_RING(gpu->rb[0], 0x00000000);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun a5xx_flush(gpu, gpu->rb[0], true);
819*4882a593Smuzhiyun if (!a5xx_idle(gpu, gpu->rb[0]))
820*4882a593Smuzhiyun return -EINVAL;
821*4882a593Smuzhiyun } else if (ret == -ENODEV) {
822*4882a593Smuzhiyun /*
823*4882a593Smuzhiyun * This device does not use zap shader (but print a warning
824*4882a593Smuzhiyun * just in case someone got their dt wrong.. hopefully they
825*4882a593Smuzhiyun * have a debug UART to realize the error of their ways...
826*4882a593Smuzhiyun * if you mess this up you are about to crash horribly)
827*4882a593Smuzhiyun */
828*4882a593Smuzhiyun dev_warn_once(gpu->dev->dev,
829*4882a593Smuzhiyun "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
830*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
831*4882a593Smuzhiyun } else {
832*4882a593Smuzhiyun return ret;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /* Last step - yield the ringbuffer */
836*4882a593Smuzhiyun a5xx_preempt_start(gpu);
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun return 0;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
a5xx_recover(struct msm_gpu * gpu)841*4882a593Smuzhiyun static void a5xx_recover(struct msm_gpu *gpu)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun int i;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun adreno_dump_info(gpu);
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
848*4882a593Smuzhiyun printk("CP_SCRATCH_REG%d: %u\n", i,
849*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i)));
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun if (hang_debug)
853*4882a593Smuzhiyun a5xx_dump(gpu);
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
856*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
857*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
858*4882a593Smuzhiyun adreno_recover(gpu);
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun
a5xx_destroy(struct msm_gpu * gpu)861*4882a593Smuzhiyun static void a5xx_destroy(struct msm_gpu *gpu)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
864*4882a593Smuzhiyun struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun DBG("%s", gpu->name);
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun a5xx_preempt_fini(gpu);
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun if (a5xx_gpu->pm4_bo) {
871*4882a593Smuzhiyun msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
872*4882a593Smuzhiyun drm_gem_object_put(a5xx_gpu->pm4_bo);
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun if (a5xx_gpu->pfp_bo) {
876*4882a593Smuzhiyun msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
877*4882a593Smuzhiyun drm_gem_object_put(a5xx_gpu->pfp_bo);
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun if (a5xx_gpu->gpmu_bo) {
881*4882a593Smuzhiyun msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
882*4882a593Smuzhiyun drm_gem_object_put(a5xx_gpu->gpmu_bo);
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun if (a5xx_gpu->shadow_bo) {
886*4882a593Smuzhiyun msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace);
887*4882a593Smuzhiyun drm_gem_object_put(a5xx_gpu->shadow_bo);
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun adreno_gpu_cleanup(adreno_gpu);
891*4882a593Smuzhiyun kfree(a5xx_gpu);
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
_a5xx_check_idle(struct msm_gpu * gpu)894*4882a593Smuzhiyun static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
895*4882a593Smuzhiyun {
896*4882a593Smuzhiyun if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
897*4882a593Smuzhiyun return false;
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun /*
900*4882a593Smuzhiyun * Nearly every abnormality ends up pausing the GPU and triggering a
901*4882a593Smuzhiyun * fault so we can safely just watch for this one interrupt to fire
902*4882a593Smuzhiyun */
903*4882a593Smuzhiyun return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
904*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun
a5xx_idle(struct msm_gpu * gpu,struct msm_ringbuffer * ring)907*4882a593Smuzhiyun bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
910*4882a593Smuzhiyun struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun if (ring != a5xx_gpu->cur_ring) {
913*4882a593Smuzhiyun WARN(1, "Tried to idle a non-current ringbuffer\n");
914*4882a593Smuzhiyun return false;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun /* wait for CP to drain ringbuffer: */
918*4882a593Smuzhiyun if (!adreno_idle(gpu, ring))
919*4882a593Smuzhiyun return false;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun if (spin_until(_a5xx_check_idle(gpu))) {
922*4882a593Smuzhiyun DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
923*4882a593Smuzhiyun gpu->name, __builtin_return_address(0),
924*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_RBBM_STATUS),
925*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS),
926*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
927*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_CP_RB_WPTR));
928*4882a593Smuzhiyun return false;
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun return true;
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
a5xx_fault_handler(void * arg,unsigned long iova,int flags)934*4882a593Smuzhiyun static int a5xx_fault_handler(void *arg, unsigned long iova, int flags)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun struct msm_gpu *gpu = arg;
937*4882a593Smuzhiyun pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
938*4882a593Smuzhiyun iova, flags,
939*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)),
940*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)),
941*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
942*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)));
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun return -EFAULT;
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun
a5xx_cp_err_irq(struct msm_gpu * gpu)947*4882a593Smuzhiyun static void a5xx_cp_err_irq(struct msm_gpu *gpu)
948*4882a593Smuzhiyun {
949*4882a593Smuzhiyun u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
952*4882a593Smuzhiyun u32 val;
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun /*
957*4882a593Smuzhiyun * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
958*4882a593Smuzhiyun * read it twice
959*4882a593Smuzhiyun */
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
962*4882a593Smuzhiyun val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
965*4882a593Smuzhiyun val);
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
969*4882a593Smuzhiyun dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
970*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun if (status & A5XX_CP_INT_CP_DMA_ERROR)
973*4882a593Smuzhiyun dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
976*4882a593Smuzhiyun u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun dev_err_ratelimited(gpu->dev->dev,
979*4882a593Smuzhiyun "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
980*4882a593Smuzhiyun val & (1 << 24) ? "WRITE" : "READ",
981*4882a593Smuzhiyun (val & 0xFFFFF) >> 2, val);
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun if (status & A5XX_CP_INT_CP_AHB_ERROR) {
985*4882a593Smuzhiyun u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
986*4882a593Smuzhiyun const char *access[16] = { "reserved", "reserved",
987*4882a593Smuzhiyun "timestamp lo", "timestamp hi", "pfp read", "pfp write",
988*4882a593Smuzhiyun "", "", "me read", "me write", "", "", "crashdump read",
989*4882a593Smuzhiyun "crashdump write" };
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun dev_err_ratelimited(gpu->dev->dev,
992*4882a593Smuzhiyun "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
993*4882a593Smuzhiyun status & 0xFFFFF, access[(status >> 24) & 0xF],
994*4882a593Smuzhiyun (status & (1 << 31)), status);
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun
a5xx_rbbm_err_irq(struct msm_gpu * gpu,u32 status)998*4882a593Smuzhiyun static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
999*4882a593Smuzhiyun {
1000*4882a593Smuzhiyun if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
1001*4882a593Smuzhiyun u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun dev_err_ratelimited(gpu->dev->dev,
1004*4882a593Smuzhiyun "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
1005*4882a593Smuzhiyun val & (1 << 28) ? "WRITE" : "READ",
1006*4882a593Smuzhiyun (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
1007*4882a593Smuzhiyun (val >> 24) & 0xF);
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun /* Clear the error */
1010*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun /* Clear the interrupt */
1013*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
1014*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
1018*4882a593Smuzhiyun dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
1021*4882a593Smuzhiyun dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
1022*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
1025*4882a593Smuzhiyun dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
1026*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
1029*4882a593Smuzhiyun dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
1030*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
1033*4882a593Smuzhiyun dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
1036*4882a593Smuzhiyun dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun
a5xx_uche_err_irq(struct msm_gpu * gpu)1039*4882a593Smuzhiyun static void a5xx_uche_err_irq(struct msm_gpu *gpu)
1040*4882a593Smuzhiyun {
1041*4882a593Smuzhiyun uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
1046*4882a593Smuzhiyun addr);
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun
a5xx_gpmu_err_irq(struct msm_gpu * gpu)1049*4882a593Smuzhiyun static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun
a5xx_fault_detect_irq(struct msm_gpu * gpu)1054*4882a593Smuzhiyun static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
1055*4882a593Smuzhiyun {
1056*4882a593Smuzhiyun struct drm_device *dev = gpu->dev;
1057*4882a593Smuzhiyun struct msm_drm_private *priv = dev->dev_private;
1058*4882a593Smuzhiyun struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
1061*4882a593Smuzhiyun ring ? ring->id : -1, ring ? ring->seqno : 0,
1062*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_RBBM_STATUS),
1063*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
1064*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
1065*4882a593Smuzhiyun gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
1066*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
1067*4882a593Smuzhiyun gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
1068*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun /* Turn off the hangcheck timer to keep it from bothering us */
1071*4882a593Smuzhiyun del_timer(&gpu->hangcheck_timer);
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun queue_work(priv->wq, &gpu->recover_work);
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun #define RBBM_ERROR_MASK \
1077*4882a593Smuzhiyun (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
1078*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
1079*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
1080*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
1081*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
1082*4882a593Smuzhiyun A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
1083*4882a593Smuzhiyun
a5xx_irq(struct msm_gpu * gpu)1084*4882a593Smuzhiyun static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun /*
1089*4882a593Smuzhiyun * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it
1090*4882a593Smuzhiyun * before the source is cleared the interrupt will storm.
1091*4882a593Smuzhiyun */
1092*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
1093*4882a593Smuzhiyun status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun /* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
1096*4882a593Smuzhiyun if (status & RBBM_ERROR_MASK)
1097*4882a593Smuzhiyun a5xx_rbbm_err_irq(gpu, status);
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
1100*4882a593Smuzhiyun a5xx_cp_err_irq(gpu);
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
1103*4882a593Smuzhiyun a5xx_fault_detect_irq(gpu);
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
1106*4882a593Smuzhiyun a5xx_uche_err_irq(gpu);
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
1109*4882a593Smuzhiyun a5xx_gpmu_err_irq(gpu);
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
1112*4882a593Smuzhiyun a5xx_preempt_trigger(gpu);
1113*4882a593Smuzhiyun msm_gpu_retire(gpu);
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
1117*4882a593Smuzhiyun a5xx_preempt_irq(gpu);
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun return IRQ_HANDLED;
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun static const u32 a5xx_registers[] = {
1123*4882a593Smuzhiyun 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
1124*4882a593Smuzhiyun 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
1125*4882a593Smuzhiyun 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
1126*4882a593Smuzhiyun 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
1127*4882a593Smuzhiyun 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
1128*4882a593Smuzhiyun 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
1129*4882a593Smuzhiyun 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
1130*4882a593Smuzhiyun 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
1131*4882a593Smuzhiyun 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
1132*4882a593Smuzhiyun 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
1133*4882a593Smuzhiyun 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
1134*4882a593Smuzhiyun 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
1135*4882a593Smuzhiyun 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
1136*4882a593Smuzhiyun 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
1137*4882a593Smuzhiyun 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
1138*4882a593Smuzhiyun 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
1139*4882a593Smuzhiyun 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
1140*4882a593Smuzhiyun 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
1141*4882a593Smuzhiyun 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
1142*4882a593Smuzhiyun 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
1143*4882a593Smuzhiyun 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
1144*4882a593Smuzhiyun 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
1145*4882a593Smuzhiyun 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
1146*4882a593Smuzhiyun 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
1147*4882a593Smuzhiyun 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
1148*4882a593Smuzhiyun 0xEAA5, 0xEAC2, 0xA800, 0xA800, 0xA820, 0xA828, 0xA840, 0xA87D,
1149*4882a593Smuzhiyun 0XA880, 0xA88D, 0xA890, 0xA8A3, 0xA8D0, 0xA8D8, 0xA8E0, 0xA8F5,
1150*4882a593Smuzhiyun 0xAC60, 0xAC60, ~0,
1151*4882a593Smuzhiyun };
1152*4882a593Smuzhiyun
a5xx_dump(struct msm_gpu * gpu)1153*4882a593Smuzhiyun static void a5xx_dump(struct msm_gpu *gpu)
1154*4882a593Smuzhiyun {
1155*4882a593Smuzhiyun DRM_DEV_INFO(gpu->dev->dev, "status: %08x\n",
1156*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_RBBM_STATUS));
1157*4882a593Smuzhiyun adreno_dump(gpu);
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun
a5xx_pm_resume(struct msm_gpu * gpu)1160*4882a593Smuzhiyun static int a5xx_pm_resume(struct msm_gpu *gpu)
1161*4882a593Smuzhiyun {
1162*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1163*4882a593Smuzhiyun int ret;
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun /* Turn on the core power */
1166*4882a593Smuzhiyun ret = msm_gpu_pm_resume(gpu);
1167*4882a593Smuzhiyun if (ret)
1168*4882a593Smuzhiyun return ret;
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun if (adreno_is_a510(adreno_gpu)) {
1171*4882a593Smuzhiyun /* Halt the sp_input_clk at HM level */
1172*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
1173*4882a593Smuzhiyun a5xx_set_hwcg(gpu, true);
1174*4882a593Smuzhiyun /* Turn on sp_input_clk at HM level */
1175*4882a593Smuzhiyun gpu_rmw(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xff, 0);
1176*4882a593Smuzhiyun return 0;
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun /* Turn the RBCCU domain first to limit the chances of voltage droop */
1180*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun /* Wait 3 usecs before polling */
1183*4882a593Smuzhiyun udelay(3);
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
1186*4882a593Smuzhiyun (1 << 20), (1 << 20));
1187*4882a593Smuzhiyun if (ret) {
1188*4882a593Smuzhiyun DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
1189*4882a593Smuzhiyun gpu->name,
1190*4882a593Smuzhiyun gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
1191*4882a593Smuzhiyun return ret;
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun /* Turn on the SP domain */
1195*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
1196*4882a593Smuzhiyun ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
1197*4882a593Smuzhiyun (1 << 20), (1 << 20));
1198*4882a593Smuzhiyun if (ret)
1199*4882a593Smuzhiyun DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
1200*4882a593Smuzhiyun gpu->name);
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun return ret;
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun
a5xx_pm_suspend(struct msm_gpu * gpu)1205*4882a593Smuzhiyun static int a5xx_pm_suspend(struct msm_gpu *gpu)
1206*4882a593Smuzhiyun {
1207*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1208*4882a593Smuzhiyun struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1209*4882a593Smuzhiyun u32 mask = 0xf;
1210*4882a593Smuzhiyun int i, ret;
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun /* A510 has 3 XIN ports in VBIF */
1213*4882a593Smuzhiyun if (adreno_is_a510(adreno_gpu))
1214*4882a593Smuzhiyun mask = 0x7;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun /* Clear the VBIF pipe before shutting down */
1217*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, mask);
1218*4882a593Smuzhiyun spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) &
1219*4882a593Smuzhiyun mask) == mask);
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun /*
1224*4882a593Smuzhiyun * Reset the VBIF before power collapse to avoid issue with FIFO
1225*4882a593Smuzhiyun * entries
1226*4882a593Smuzhiyun */
1227*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
1228*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun ret = msm_gpu_pm_suspend(gpu);
1231*4882a593Smuzhiyun if (ret)
1232*4882a593Smuzhiyun return ret;
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun if (a5xx_gpu->has_whereami)
1235*4882a593Smuzhiyun for (i = 0; i < gpu->nr_rings; i++)
1236*4882a593Smuzhiyun a5xx_gpu->shadow[i] = 0;
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun return 0;
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun
a5xx_get_timestamp(struct msm_gpu * gpu,uint64_t * value)1241*4882a593Smuzhiyun static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
1242*4882a593Smuzhiyun {
1243*4882a593Smuzhiyun *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
1244*4882a593Smuzhiyun REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun return 0;
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun struct a5xx_crashdumper {
1250*4882a593Smuzhiyun void *ptr;
1251*4882a593Smuzhiyun struct drm_gem_object *bo;
1252*4882a593Smuzhiyun u64 iova;
1253*4882a593Smuzhiyun };
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun struct a5xx_gpu_state {
1256*4882a593Smuzhiyun struct msm_gpu_state base;
1257*4882a593Smuzhiyun u32 *hlsqregs;
1258*4882a593Smuzhiyun };
1259*4882a593Smuzhiyun
a5xx_crashdumper_init(struct msm_gpu * gpu,struct a5xx_crashdumper * dumper)1260*4882a593Smuzhiyun static int a5xx_crashdumper_init(struct msm_gpu *gpu,
1261*4882a593Smuzhiyun struct a5xx_crashdumper *dumper)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
1264*4882a593Smuzhiyun SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
1265*4882a593Smuzhiyun &dumper->bo, &dumper->iova);
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun if (!IS_ERR(dumper->ptr))
1268*4882a593Smuzhiyun msm_gem_object_set_name(dumper->bo, "crashdump");
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun return PTR_ERR_OR_ZERO(dumper->ptr);
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun
a5xx_crashdumper_run(struct msm_gpu * gpu,struct a5xx_crashdumper * dumper)1273*4882a593Smuzhiyun static int a5xx_crashdumper_run(struct msm_gpu *gpu,
1274*4882a593Smuzhiyun struct a5xx_crashdumper *dumper)
1275*4882a593Smuzhiyun {
1276*4882a593Smuzhiyun u32 val;
1277*4882a593Smuzhiyun
1278*4882a593Smuzhiyun if (IS_ERR_OR_NULL(dumper->ptr))
1279*4882a593Smuzhiyun return -EINVAL;
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
1282*4882a593Smuzhiyun REG_A5XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun return gpu_poll_timeout(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, val,
1287*4882a593Smuzhiyun val & 0x04, 100, 10000);
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun /*
1291*4882a593Smuzhiyun * These are a list of the registers that need to be read through the HLSQ
1292*4882a593Smuzhiyun * aperture through the crashdumper. These are not nominally accessible from
1293*4882a593Smuzhiyun * the CPU on a secure platform.
1294*4882a593Smuzhiyun */
1295*4882a593Smuzhiyun static const struct {
1296*4882a593Smuzhiyun u32 type;
1297*4882a593Smuzhiyun u32 regoffset;
1298*4882a593Smuzhiyun u32 count;
1299*4882a593Smuzhiyun } a5xx_hlsq_aperture_regs[] = {
1300*4882a593Smuzhiyun { 0x35, 0xe00, 0x32 }, /* HSLQ non-context */
1301*4882a593Smuzhiyun { 0x31, 0x2080, 0x1 }, /* HLSQ 2D context 0 */
1302*4882a593Smuzhiyun { 0x33, 0x2480, 0x1 }, /* HLSQ 2D context 1 */
1303*4882a593Smuzhiyun { 0x32, 0xe780, 0x62 }, /* HLSQ 3D context 0 */
1304*4882a593Smuzhiyun { 0x34, 0xef80, 0x62 }, /* HLSQ 3D context 1 */
1305*4882a593Smuzhiyun { 0x3f, 0x0ec0, 0x40 }, /* SP non-context */
1306*4882a593Smuzhiyun { 0x3d, 0x2040, 0x1 }, /* SP 2D context 0 */
1307*4882a593Smuzhiyun { 0x3b, 0x2440, 0x1 }, /* SP 2D context 1 */
1308*4882a593Smuzhiyun { 0x3e, 0xe580, 0x170 }, /* SP 3D context 0 */
1309*4882a593Smuzhiyun { 0x3c, 0xed80, 0x170 }, /* SP 3D context 1 */
1310*4882a593Smuzhiyun { 0x3a, 0x0f00, 0x1c }, /* TP non-context */
1311*4882a593Smuzhiyun { 0x38, 0x2000, 0xa }, /* TP 2D context 0 */
1312*4882a593Smuzhiyun { 0x36, 0x2400, 0xa }, /* TP 2D context 1 */
1313*4882a593Smuzhiyun { 0x39, 0xe700, 0x80 }, /* TP 3D context 0 */
1314*4882a593Smuzhiyun { 0x37, 0xef00, 0x80 }, /* TP 3D context 1 */
1315*4882a593Smuzhiyun };
1316*4882a593Smuzhiyun
a5xx_gpu_state_get_hlsq_regs(struct msm_gpu * gpu,struct a5xx_gpu_state * a5xx_state)1317*4882a593Smuzhiyun static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
1318*4882a593Smuzhiyun struct a5xx_gpu_state *a5xx_state)
1319*4882a593Smuzhiyun {
1320*4882a593Smuzhiyun struct a5xx_crashdumper dumper = { 0 };
1321*4882a593Smuzhiyun u32 offset, count = 0;
1322*4882a593Smuzhiyun u64 *ptr;
1323*4882a593Smuzhiyun int i;
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun if (a5xx_crashdumper_init(gpu, &dumper))
1326*4882a593Smuzhiyun return;
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun /* The script will be written at offset 0 */
1329*4882a593Smuzhiyun ptr = dumper.ptr;
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun /* Start writing the data at offset 256k */
1332*4882a593Smuzhiyun offset = dumper.iova + (256 * SZ_1K);
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun /* Count how many additional registers to get from the HLSQ aperture */
1335*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
1336*4882a593Smuzhiyun count += a5xx_hlsq_aperture_regs[i].count;
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun a5xx_state->hlsqregs = kcalloc(count, sizeof(u32), GFP_KERNEL);
1339*4882a593Smuzhiyun if (!a5xx_state->hlsqregs)
1340*4882a593Smuzhiyun return;
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun /* Build the crashdump script */
1343*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
1344*4882a593Smuzhiyun u32 type = a5xx_hlsq_aperture_regs[i].type;
1345*4882a593Smuzhiyun u32 c = a5xx_hlsq_aperture_regs[i].count;
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun /* Write the register to select the desired bank */
1348*4882a593Smuzhiyun *ptr++ = ((u64) type << 8);
1349*4882a593Smuzhiyun *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_READ_SEL) << 44) |
1350*4882a593Smuzhiyun (1 << 21) | 1;
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun *ptr++ = offset;
1353*4882a593Smuzhiyun *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE) << 44)
1354*4882a593Smuzhiyun | c;
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun offset += c * sizeof(u32);
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun /* Write two zeros to close off the script */
1360*4882a593Smuzhiyun *ptr++ = 0;
1361*4882a593Smuzhiyun *ptr++ = 0;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun if (a5xx_crashdumper_run(gpu, &dumper)) {
1364*4882a593Smuzhiyun kfree(a5xx_state->hlsqregs);
1365*4882a593Smuzhiyun msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
1366*4882a593Smuzhiyun return;
1367*4882a593Smuzhiyun }
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun /* Copy the data from the crashdumper to the state */
1370*4882a593Smuzhiyun memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
1371*4882a593Smuzhiyun count * sizeof(u32));
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun
a5xx_gpu_state_get(struct msm_gpu * gpu)1376*4882a593Smuzhiyun static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
1377*4882a593Smuzhiyun {
1378*4882a593Smuzhiyun struct a5xx_gpu_state *a5xx_state = kzalloc(sizeof(*a5xx_state),
1379*4882a593Smuzhiyun GFP_KERNEL);
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun if (!a5xx_state)
1382*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun /* Temporarily disable hardware clock gating before reading the hw */
1385*4882a593Smuzhiyun a5xx_set_hwcg(gpu, false);
1386*4882a593Smuzhiyun
1387*4882a593Smuzhiyun /* First get the generic state from the adreno core */
1388*4882a593Smuzhiyun adreno_gpu_state_get(gpu, &(a5xx_state->base));
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun a5xx_state->base.rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS);
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun /* Get the HLSQ regs with the help of the crashdumper */
1393*4882a593Smuzhiyun a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state);
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun a5xx_set_hwcg(gpu, true);
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun return &a5xx_state->base;
1398*4882a593Smuzhiyun }
1399*4882a593Smuzhiyun
a5xx_gpu_state_destroy(struct kref * kref)1400*4882a593Smuzhiyun static void a5xx_gpu_state_destroy(struct kref *kref)
1401*4882a593Smuzhiyun {
1402*4882a593Smuzhiyun struct msm_gpu_state *state = container_of(kref,
1403*4882a593Smuzhiyun struct msm_gpu_state, ref);
1404*4882a593Smuzhiyun struct a5xx_gpu_state *a5xx_state = container_of(state,
1405*4882a593Smuzhiyun struct a5xx_gpu_state, base);
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun kfree(a5xx_state->hlsqregs);
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun adreno_gpu_state_destroy(state);
1410*4882a593Smuzhiyun kfree(a5xx_state);
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun
a5xx_gpu_state_put(struct msm_gpu_state * state)1413*4882a593Smuzhiyun static int a5xx_gpu_state_put(struct msm_gpu_state *state)
1414*4882a593Smuzhiyun {
1415*4882a593Smuzhiyun if (IS_ERR_OR_NULL(state))
1416*4882a593Smuzhiyun return 1;
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun return kref_put(&state->ref, a5xx_gpu_state_destroy);
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
a5xx_show(struct msm_gpu * gpu,struct msm_gpu_state * state,struct drm_printer * p)1423*4882a593Smuzhiyun static void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
1424*4882a593Smuzhiyun struct drm_printer *p)
1425*4882a593Smuzhiyun {
1426*4882a593Smuzhiyun int i, j;
1427*4882a593Smuzhiyun u32 pos = 0;
1428*4882a593Smuzhiyun struct a5xx_gpu_state *a5xx_state = container_of(state,
1429*4882a593Smuzhiyun struct a5xx_gpu_state, base);
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun if (IS_ERR_OR_NULL(state))
1432*4882a593Smuzhiyun return;
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun adreno_show(gpu, state, p);
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun /* Dump the additional a5xx HLSQ registers */
1437*4882a593Smuzhiyun if (!a5xx_state->hlsqregs)
1438*4882a593Smuzhiyun return;
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun drm_printf(p, "registers-hlsq:\n");
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
1443*4882a593Smuzhiyun u32 o = a5xx_hlsq_aperture_regs[i].regoffset;
1444*4882a593Smuzhiyun u32 c = a5xx_hlsq_aperture_regs[i].count;
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun for (j = 0; j < c; j++, pos++, o++) {
1447*4882a593Smuzhiyun /*
1448*4882a593Smuzhiyun * To keep the crashdump simple we pull the entire range
1449*4882a593Smuzhiyun * for each register type but not all of the registers
1450*4882a593Smuzhiyun * in the range are valid. Fortunately invalid registers
1451*4882a593Smuzhiyun * stick out like a sore thumb with a value of
1452*4882a593Smuzhiyun * 0xdeadbeef
1453*4882a593Smuzhiyun */
1454*4882a593Smuzhiyun if (a5xx_state->hlsqregs[pos] == 0xdeadbeef)
1455*4882a593Smuzhiyun continue;
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
1458*4882a593Smuzhiyun o << 2, a5xx_state->hlsqregs[pos]);
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun }
1461*4882a593Smuzhiyun }
1462*4882a593Smuzhiyun #endif
1463*4882a593Smuzhiyun
a5xx_active_ring(struct msm_gpu * gpu)1464*4882a593Smuzhiyun static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
1465*4882a593Smuzhiyun {
1466*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1467*4882a593Smuzhiyun struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun return a5xx_gpu->cur_ring;
1470*4882a593Smuzhiyun }
1471*4882a593Smuzhiyun
a5xx_gpu_busy(struct msm_gpu * gpu)1472*4882a593Smuzhiyun static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
1473*4882a593Smuzhiyun {
1474*4882a593Smuzhiyun u64 busy_cycles, busy_time;
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun /* Only read the gpu busy if the hardware is already active */
1477*4882a593Smuzhiyun if (pm_runtime_get_if_in_use(&gpu->pdev->dev) == 0)
1478*4882a593Smuzhiyun return 0;
1479*4882a593Smuzhiyun
1480*4882a593Smuzhiyun busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
1481*4882a593Smuzhiyun REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun busy_time = busy_cycles - gpu->devfreq.busy_cycles;
1484*4882a593Smuzhiyun do_div(busy_time, clk_get_rate(gpu->core_clk) / 1000000);
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun gpu->devfreq.busy_cycles = busy_cycles;
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun pm_runtime_put(&gpu->pdev->dev);
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun if (WARN_ON(busy_time > ~0LU))
1491*4882a593Smuzhiyun return ~0LU;
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun return (unsigned long)busy_time;
1494*4882a593Smuzhiyun }
1495*4882a593Smuzhiyun
a5xx_get_rptr(struct msm_gpu * gpu,struct msm_ringbuffer * ring)1496*4882a593Smuzhiyun static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
1497*4882a593Smuzhiyun {
1498*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1499*4882a593Smuzhiyun struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun if (a5xx_gpu->has_whereami)
1502*4882a593Smuzhiyun return a5xx_gpu->shadow[ring->id];
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR);
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun static const struct adreno_gpu_funcs funcs = {
1508*4882a593Smuzhiyun .base = {
1509*4882a593Smuzhiyun .get_param = adreno_get_param,
1510*4882a593Smuzhiyun .hw_init = a5xx_hw_init,
1511*4882a593Smuzhiyun .pm_suspend = a5xx_pm_suspend,
1512*4882a593Smuzhiyun .pm_resume = a5xx_pm_resume,
1513*4882a593Smuzhiyun .recover = a5xx_recover,
1514*4882a593Smuzhiyun .submit = a5xx_submit,
1515*4882a593Smuzhiyun .active_ring = a5xx_active_ring,
1516*4882a593Smuzhiyun .irq = a5xx_irq,
1517*4882a593Smuzhiyun .destroy = a5xx_destroy,
1518*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
1519*4882a593Smuzhiyun .show = a5xx_show,
1520*4882a593Smuzhiyun #endif
1521*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS)
1522*4882a593Smuzhiyun .debugfs_init = a5xx_debugfs_init,
1523*4882a593Smuzhiyun #endif
1524*4882a593Smuzhiyun .gpu_busy = a5xx_gpu_busy,
1525*4882a593Smuzhiyun .gpu_state_get = a5xx_gpu_state_get,
1526*4882a593Smuzhiyun .gpu_state_put = a5xx_gpu_state_put,
1527*4882a593Smuzhiyun .create_address_space = adreno_iommu_create_address_space,
1528*4882a593Smuzhiyun .get_rptr = a5xx_get_rptr,
1529*4882a593Smuzhiyun },
1530*4882a593Smuzhiyun .get_timestamp = a5xx_get_timestamp,
1531*4882a593Smuzhiyun };
1532*4882a593Smuzhiyun
check_speed_bin(struct device * dev)1533*4882a593Smuzhiyun static void check_speed_bin(struct device *dev)
1534*4882a593Smuzhiyun {
1535*4882a593Smuzhiyun struct nvmem_cell *cell;
1536*4882a593Smuzhiyun u32 val;
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun /*
1539*4882a593Smuzhiyun * If the OPP table specifies a opp-supported-hw property then we have
1540*4882a593Smuzhiyun * to set something with dev_pm_opp_set_supported_hw() or the table
1541*4882a593Smuzhiyun * doesn't get populated so pick an arbitrary value that should
1542*4882a593Smuzhiyun * ensure the default frequencies are selected but not conflict with any
1543*4882a593Smuzhiyun * actual bins
1544*4882a593Smuzhiyun */
1545*4882a593Smuzhiyun val = 0x80;
1546*4882a593Smuzhiyun
1547*4882a593Smuzhiyun cell = nvmem_cell_get(dev, "speed_bin");
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun if (!IS_ERR(cell)) {
1550*4882a593Smuzhiyun void *buf = nvmem_cell_read(cell, NULL);
1551*4882a593Smuzhiyun
1552*4882a593Smuzhiyun if (!IS_ERR(buf)) {
1553*4882a593Smuzhiyun u8 bin = *((u8 *) buf);
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun val = (1 << bin);
1556*4882a593Smuzhiyun kfree(buf);
1557*4882a593Smuzhiyun }
1558*4882a593Smuzhiyun
1559*4882a593Smuzhiyun nvmem_cell_put(cell);
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun dev_pm_opp_set_supported_hw(dev, &val, 1);
1563*4882a593Smuzhiyun }
1564*4882a593Smuzhiyun
a5xx_gpu_init(struct drm_device * dev)1565*4882a593Smuzhiyun struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
1566*4882a593Smuzhiyun {
1567*4882a593Smuzhiyun struct msm_drm_private *priv = dev->dev_private;
1568*4882a593Smuzhiyun struct platform_device *pdev = priv->gpu_pdev;
1569*4882a593Smuzhiyun struct a5xx_gpu *a5xx_gpu = NULL;
1570*4882a593Smuzhiyun struct adreno_gpu *adreno_gpu;
1571*4882a593Smuzhiyun struct msm_gpu *gpu;
1572*4882a593Smuzhiyun int ret;
1573*4882a593Smuzhiyun
1574*4882a593Smuzhiyun if (!pdev) {
1575*4882a593Smuzhiyun DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
1576*4882a593Smuzhiyun return ERR_PTR(-ENXIO);
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
1580*4882a593Smuzhiyun if (!a5xx_gpu)
1581*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun adreno_gpu = &a5xx_gpu->base;
1584*4882a593Smuzhiyun gpu = &adreno_gpu->base;
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun adreno_gpu->registers = a5xx_registers;
1587*4882a593Smuzhiyun
1588*4882a593Smuzhiyun a5xx_gpu->lm_leakage = 0x4E001A;
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun check_speed_bin(&pdev->dev);
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
1593*4882a593Smuzhiyun if (ret) {
1594*4882a593Smuzhiyun a5xx_destroy(&(a5xx_gpu->base.base));
1595*4882a593Smuzhiyun return ERR_PTR(ret);
1596*4882a593Smuzhiyun }
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun if (gpu->aspace)
1599*4882a593Smuzhiyun msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
1600*4882a593Smuzhiyun
1601*4882a593Smuzhiyun /* Set up the preemption specific bits and pieces for each ringbuffer */
1602*4882a593Smuzhiyun a5xx_preempt_init(gpu);
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun return gpu;
1605*4882a593Smuzhiyun }
1606