1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2014 The Linux Foundation. All rights reserved.
4*4882a593Smuzhiyun * Copyright (C) 2013 Red Hat
5*4882a593Smuzhiyun * Author: Rob Clark <robdclark@gmail.com>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/clk.h>
9*4882a593Smuzhiyun #include <linux/component.h>
10*4882a593Smuzhiyun #include <linux/platform_device.h>
11*4882a593Smuzhiyun #include <linux/pm_runtime.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <drm/drm_irq.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include "vc4_drv.h"
16*4882a593Smuzhiyun #include "vc4_regs.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun static const struct debugfs_reg32 v3d_regs[] = {
19*4882a593Smuzhiyun VC4_REG32(V3D_IDENT0),
20*4882a593Smuzhiyun VC4_REG32(V3D_IDENT1),
21*4882a593Smuzhiyun VC4_REG32(V3D_IDENT2),
22*4882a593Smuzhiyun VC4_REG32(V3D_SCRATCH),
23*4882a593Smuzhiyun VC4_REG32(V3D_L2CACTL),
24*4882a593Smuzhiyun VC4_REG32(V3D_SLCACTL),
25*4882a593Smuzhiyun VC4_REG32(V3D_INTCTL),
26*4882a593Smuzhiyun VC4_REG32(V3D_INTENA),
27*4882a593Smuzhiyun VC4_REG32(V3D_INTDIS),
28*4882a593Smuzhiyun VC4_REG32(V3D_CT0CS),
29*4882a593Smuzhiyun VC4_REG32(V3D_CT1CS),
30*4882a593Smuzhiyun VC4_REG32(V3D_CT0EA),
31*4882a593Smuzhiyun VC4_REG32(V3D_CT1EA),
32*4882a593Smuzhiyun VC4_REG32(V3D_CT0CA),
33*4882a593Smuzhiyun VC4_REG32(V3D_CT1CA),
34*4882a593Smuzhiyun VC4_REG32(V3D_CT00RA0),
35*4882a593Smuzhiyun VC4_REG32(V3D_CT01RA0),
36*4882a593Smuzhiyun VC4_REG32(V3D_CT0LC),
37*4882a593Smuzhiyun VC4_REG32(V3D_CT1LC),
38*4882a593Smuzhiyun VC4_REG32(V3D_CT0PC),
39*4882a593Smuzhiyun VC4_REG32(V3D_CT1PC),
40*4882a593Smuzhiyun VC4_REG32(V3D_PCS),
41*4882a593Smuzhiyun VC4_REG32(V3D_BFC),
42*4882a593Smuzhiyun VC4_REG32(V3D_RFC),
43*4882a593Smuzhiyun VC4_REG32(V3D_BPCA),
44*4882a593Smuzhiyun VC4_REG32(V3D_BPCS),
45*4882a593Smuzhiyun VC4_REG32(V3D_BPOA),
46*4882a593Smuzhiyun VC4_REG32(V3D_BPOS),
47*4882a593Smuzhiyun VC4_REG32(V3D_BXCF),
48*4882a593Smuzhiyun VC4_REG32(V3D_SQRSV0),
49*4882a593Smuzhiyun VC4_REG32(V3D_SQRSV1),
50*4882a593Smuzhiyun VC4_REG32(V3D_SQCNTL),
51*4882a593Smuzhiyun VC4_REG32(V3D_SRQPC),
52*4882a593Smuzhiyun VC4_REG32(V3D_SRQUA),
53*4882a593Smuzhiyun VC4_REG32(V3D_SRQUL),
54*4882a593Smuzhiyun VC4_REG32(V3D_SRQCS),
55*4882a593Smuzhiyun VC4_REG32(V3D_VPACNTL),
56*4882a593Smuzhiyun VC4_REG32(V3D_VPMBASE),
57*4882a593Smuzhiyun VC4_REG32(V3D_PCTRC),
58*4882a593Smuzhiyun VC4_REG32(V3D_PCTRE),
59*4882a593Smuzhiyun VC4_REG32(V3D_PCTR(0)),
60*4882a593Smuzhiyun VC4_REG32(V3D_PCTRS(0)),
61*4882a593Smuzhiyun VC4_REG32(V3D_PCTR(1)),
62*4882a593Smuzhiyun VC4_REG32(V3D_PCTRS(1)),
63*4882a593Smuzhiyun VC4_REG32(V3D_PCTR(2)),
64*4882a593Smuzhiyun VC4_REG32(V3D_PCTRS(2)),
65*4882a593Smuzhiyun VC4_REG32(V3D_PCTR(3)),
66*4882a593Smuzhiyun VC4_REG32(V3D_PCTRS(3)),
67*4882a593Smuzhiyun VC4_REG32(V3D_PCTR(4)),
68*4882a593Smuzhiyun VC4_REG32(V3D_PCTRS(4)),
69*4882a593Smuzhiyun VC4_REG32(V3D_PCTR(5)),
70*4882a593Smuzhiyun VC4_REG32(V3D_PCTRS(5)),
71*4882a593Smuzhiyun VC4_REG32(V3D_PCTR(6)),
72*4882a593Smuzhiyun VC4_REG32(V3D_PCTRS(6)),
73*4882a593Smuzhiyun VC4_REG32(V3D_PCTR(7)),
74*4882a593Smuzhiyun VC4_REG32(V3D_PCTRS(7)),
75*4882a593Smuzhiyun VC4_REG32(V3D_PCTR(8)),
76*4882a593Smuzhiyun VC4_REG32(V3D_PCTRS(8)),
77*4882a593Smuzhiyun VC4_REG32(V3D_PCTR(9)),
78*4882a593Smuzhiyun VC4_REG32(V3D_PCTRS(9)),
79*4882a593Smuzhiyun VC4_REG32(V3D_PCTR(10)),
80*4882a593Smuzhiyun VC4_REG32(V3D_PCTRS(10)),
81*4882a593Smuzhiyun VC4_REG32(V3D_PCTR(11)),
82*4882a593Smuzhiyun VC4_REG32(V3D_PCTRS(11)),
83*4882a593Smuzhiyun VC4_REG32(V3D_PCTR(12)),
84*4882a593Smuzhiyun VC4_REG32(V3D_PCTRS(12)),
85*4882a593Smuzhiyun VC4_REG32(V3D_PCTR(13)),
86*4882a593Smuzhiyun VC4_REG32(V3D_PCTRS(13)),
87*4882a593Smuzhiyun VC4_REG32(V3D_PCTR(14)),
88*4882a593Smuzhiyun VC4_REG32(V3D_PCTRS(14)),
89*4882a593Smuzhiyun VC4_REG32(V3D_PCTR(15)),
90*4882a593Smuzhiyun VC4_REG32(V3D_PCTRS(15)),
91*4882a593Smuzhiyun VC4_REG32(V3D_DBGE),
92*4882a593Smuzhiyun VC4_REG32(V3D_FDBGO),
93*4882a593Smuzhiyun VC4_REG32(V3D_FDBGB),
94*4882a593Smuzhiyun VC4_REG32(V3D_FDBGR),
95*4882a593Smuzhiyun VC4_REG32(V3D_FDBGS),
96*4882a593Smuzhiyun VC4_REG32(V3D_ERRSTAT),
97*4882a593Smuzhiyun };
98*4882a593Smuzhiyun
vc4_v3d_debugfs_ident(struct seq_file * m,void * unused)99*4882a593Smuzhiyun static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun struct drm_info_node *node = (struct drm_info_node *)m->private;
102*4882a593Smuzhiyun struct drm_device *dev = node->minor->dev;
103*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(dev);
104*4882a593Smuzhiyun int ret = vc4_v3d_pm_get(vc4);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun if (ret == 0) {
107*4882a593Smuzhiyun uint32_t ident1 = V3D_READ(V3D_IDENT1);
108*4882a593Smuzhiyun uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC);
109*4882a593Smuzhiyun uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS);
110*4882a593Smuzhiyun uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun seq_printf(m, "Revision: %d\n",
113*4882a593Smuzhiyun VC4_GET_FIELD(ident1, V3D_IDENT1_REV));
114*4882a593Smuzhiyun seq_printf(m, "Slices: %d\n", nslc);
115*4882a593Smuzhiyun seq_printf(m, "TMUs: %d\n", nslc * tups);
116*4882a593Smuzhiyun seq_printf(m, "QPUs: %d\n", nslc * qups);
117*4882a593Smuzhiyun seq_printf(m, "Semaphores: %d\n",
118*4882a593Smuzhiyun VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM));
119*4882a593Smuzhiyun vc4_v3d_pm_put(vc4);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun return 0;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /**
126*4882a593Smuzhiyun * Wraps pm_runtime_get_sync() in a refcount, so that we can reliably
127*4882a593Smuzhiyun * get the pm_runtime refcount to 0 in vc4_reset().
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun int
vc4_v3d_pm_get(struct vc4_dev * vc4)130*4882a593Smuzhiyun vc4_v3d_pm_get(struct vc4_dev *vc4)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun mutex_lock(&vc4->power_lock);
133*4882a593Smuzhiyun if (vc4->power_refcount++ == 0) {
134*4882a593Smuzhiyun int ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun if (ret < 0) {
137*4882a593Smuzhiyun vc4->power_refcount--;
138*4882a593Smuzhiyun mutex_unlock(&vc4->power_lock);
139*4882a593Smuzhiyun return ret;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun mutex_unlock(&vc4->power_lock);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun return 0;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun void
vc4_v3d_pm_put(struct vc4_dev * vc4)148*4882a593Smuzhiyun vc4_v3d_pm_put(struct vc4_dev *vc4)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun mutex_lock(&vc4->power_lock);
151*4882a593Smuzhiyun if (--vc4->power_refcount == 0) {
152*4882a593Smuzhiyun pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
153*4882a593Smuzhiyun pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun mutex_unlock(&vc4->power_lock);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
vc4_v3d_init_hw(struct drm_device * dev)158*4882a593Smuzhiyun static void vc4_v3d_init_hw(struct drm_device *dev)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(dev);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /* Take all the memory that would have been reserved for user
163*4882a593Smuzhiyun * QPU programs, since we don't have an interface for running
164*4882a593Smuzhiyun * them, anyway.
165*4882a593Smuzhiyun */
166*4882a593Smuzhiyun V3D_WRITE(V3D_VPMBASE, 0);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
vc4_v3d_get_bin_slot(struct vc4_dev * vc4)169*4882a593Smuzhiyun int vc4_v3d_get_bin_slot(struct vc4_dev *vc4)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun struct drm_device *dev = &vc4->base;
172*4882a593Smuzhiyun unsigned long irqflags;
173*4882a593Smuzhiyun int slot;
174*4882a593Smuzhiyun uint64_t seqno = 0;
175*4882a593Smuzhiyun struct vc4_exec_info *exec;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun try_again:
178*4882a593Smuzhiyun spin_lock_irqsave(&vc4->job_lock, irqflags);
179*4882a593Smuzhiyun slot = ffs(~vc4->bin_alloc_used);
180*4882a593Smuzhiyun if (slot != 0) {
181*4882a593Smuzhiyun /* Switch from ffs() bit index to a 0-based index. */
182*4882a593Smuzhiyun slot--;
183*4882a593Smuzhiyun vc4->bin_alloc_used |= BIT(slot);
184*4882a593Smuzhiyun spin_unlock_irqrestore(&vc4->job_lock, irqflags);
185*4882a593Smuzhiyun return slot;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* Couldn't find an open slot. Wait for render to complete
189*4882a593Smuzhiyun * and try again.
190*4882a593Smuzhiyun */
191*4882a593Smuzhiyun exec = vc4_last_render_job(vc4);
192*4882a593Smuzhiyun if (exec)
193*4882a593Smuzhiyun seqno = exec->seqno;
194*4882a593Smuzhiyun spin_unlock_irqrestore(&vc4->job_lock, irqflags);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if (seqno) {
197*4882a593Smuzhiyun int ret = vc4_wait_for_seqno(dev, seqno, ~0ull, true);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun if (ret == 0)
200*4882a593Smuzhiyun goto try_again;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun return ret;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun return -ENOMEM;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /**
209*4882a593Smuzhiyun * bin_bo_alloc() - allocates the memory that will be used for
210*4882a593Smuzhiyun * tile binning.
211*4882a593Smuzhiyun *
212*4882a593Smuzhiyun * The binner has a limitation that the addresses in the tile state
213*4882a593Smuzhiyun * buffer that point into the tile alloc buffer or binner overflow
214*4882a593Smuzhiyun * memory only have 28 bits (256MB), and the top 4 on the bus for
215*4882a593Smuzhiyun * tile alloc references end up coming from the tile state buffer's
216*4882a593Smuzhiyun * address.
217*4882a593Smuzhiyun *
218*4882a593Smuzhiyun * To work around this, we allocate a single large buffer while V3D is
219*4882a593Smuzhiyun * in use, make sure that it has the top 4 bits constant across its
220*4882a593Smuzhiyun * entire extent, and then put the tile state, tile alloc, and binner
221*4882a593Smuzhiyun * overflow memory inside that buffer.
222*4882a593Smuzhiyun *
223*4882a593Smuzhiyun * This creates a limitation where we may not be able to execute a job
224*4882a593Smuzhiyun * if it doesn't fit within the buffer that we allocated up front.
225*4882a593Smuzhiyun * However, it turns out that 16MB is "enough for anybody", and
226*4882a593Smuzhiyun * real-world applications run into allocation failures from the
227*4882a593Smuzhiyun * overall CMA pool before they make scenes complicated enough to run
228*4882a593Smuzhiyun * out of bin space.
229*4882a593Smuzhiyun */
bin_bo_alloc(struct vc4_dev * vc4)230*4882a593Smuzhiyun static int bin_bo_alloc(struct vc4_dev *vc4)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun struct vc4_v3d *v3d = vc4->v3d;
233*4882a593Smuzhiyun uint32_t size = 16 * 1024 * 1024;
234*4882a593Smuzhiyun int ret = 0;
235*4882a593Smuzhiyun struct list_head list;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun if (!v3d)
238*4882a593Smuzhiyun return -ENODEV;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* We may need to try allocating more than once to get a BO
241*4882a593Smuzhiyun * that doesn't cross 256MB. Track the ones we've allocated
242*4882a593Smuzhiyun * that failed so far, so that we can free them when we've got
243*4882a593Smuzhiyun * one that succeeded (if we freed them right away, our next
244*4882a593Smuzhiyun * allocation would probably be the same chunk of memory).
245*4882a593Smuzhiyun */
246*4882a593Smuzhiyun INIT_LIST_HEAD(&list);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun while (true) {
249*4882a593Smuzhiyun struct vc4_bo *bo = vc4_bo_create(&vc4->base, size, true,
250*4882a593Smuzhiyun VC4_BO_TYPE_BIN);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (IS_ERR(bo)) {
253*4882a593Smuzhiyun ret = PTR_ERR(bo);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun dev_err(&v3d->pdev->dev,
256*4882a593Smuzhiyun "Failed to allocate memory for tile binning: "
257*4882a593Smuzhiyun "%d. You may need to enable CMA or give it "
258*4882a593Smuzhiyun "more memory.",
259*4882a593Smuzhiyun ret);
260*4882a593Smuzhiyun break;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* Check if this BO won't trigger the addressing bug. */
264*4882a593Smuzhiyun if ((bo->base.paddr & 0xf0000000) ==
265*4882a593Smuzhiyun ((bo->base.paddr + bo->base.base.size - 1) & 0xf0000000)) {
266*4882a593Smuzhiyun vc4->bin_bo = bo;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /* Set up for allocating 512KB chunks of
269*4882a593Smuzhiyun * binner memory. The biggest allocation we
270*4882a593Smuzhiyun * need to do is for the initial tile alloc +
271*4882a593Smuzhiyun * tile state buffer. We can render to a
272*4882a593Smuzhiyun * maximum of ((2048*2048) / (32*32) = 4096
273*4882a593Smuzhiyun * tiles in a frame (until we do floating
274*4882a593Smuzhiyun * point rendering, at which point it would be
275*4882a593Smuzhiyun * 8192). Tile state is 48b/tile (rounded to
276*4882a593Smuzhiyun * a page), and tile alloc is 32b/tile
277*4882a593Smuzhiyun * (rounded to a page), plus a page of extra,
278*4882a593Smuzhiyun * for a total of 320kb for our worst-case.
279*4882a593Smuzhiyun * We choose 512kb so that it divides evenly
280*4882a593Smuzhiyun * into our 16MB, and the rest of the 512kb
281*4882a593Smuzhiyun * will be used as storage for the overflow
282*4882a593Smuzhiyun * from the initial 32b CL per bin.
283*4882a593Smuzhiyun */
284*4882a593Smuzhiyun vc4->bin_alloc_size = 512 * 1024;
285*4882a593Smuzhiyun vc4->bin_alloc_used = 0;
286*4882a593Smuzhiyun vc4->bin_alloc_overflow = 0;
287*4882a593Smuzhiyun WARN_ON_ONCE(sizeof(vc4->bin_alloc_used) * 8 !=
288*4882a593Smuzhiyun bo->base.base.size / vc4->bin_alloc_size);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun kref_init(&vc4->bin_bo_kref);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /* Enable the out-of-memory interrupt to set our
293*4882a593Smuzhiyun * newly-allocated binner BO, potentially from an
294*4882a593Smuzhiyun * already-pending-but-masked interrupt.
295*4882a593Smuzhiyun */
296*4882a593Smuzhiyun V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun break;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /* Put it on the list to free later, and try again. */
302*4882a593Smuzhiyun list_add(&bo->unref_head, &list);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /* Free all the BOs we allocated but didn't choose. */
306*4882a593Smuzhiyun while (!list_empty(&list)) {
307*4882a593Smuzhiyun struct vc4_bo *bo = list_last_entry(&list,
308*4882a593Smuzhiyun struct vc4_bo, unref_head);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun list_del(&bo->unref_head);
311*4882a593Smuzhiyun drm_gem_object_put(&bo->base.base);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun return ret;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
vc4_v3d_bin_bo_get(struct vc4_dev * vc4,bool * used)317*4882a593Smuzhiyun int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun int ret = 0;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun mutex_lock(&vc4->bin_bo_lock);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun if (used && *used)
324*4882a593Smuzhiyun goto complete;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun if (vc4->bin_bo)
327*4882a593Smuzhiyun kref_get(&vc4->bin_bo_kref);
328*4882a593Smuzhiyun else
329*4882a593Smuzhiyun ret = bin_bo_alloc(vc4);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun if (ret == 0 && used)
332*4882a593Smuzhiyun *used = true;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun complete:
335*4882a593Smuzhiyun mutex_unlock(&vc4->bin_bo_lock);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun return ret;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
bin_bo_release(struct kref * ref)340*4882a593Smuzhiyun static void bin_bo_release(struct kref *ref)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun struct vc4_dev *vc4 = container_of(ref, struct vc4_dev, bin_bo_kref);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (WARN_ON_ONCE(!vc4->bin_bo))
345*4882a593Smuzhiyun return;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun drm_gem_object_put(&vc4->bin_bo->base.base);
348*4882a593Smuzhiyun vc4->bin_bo = NULL;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
vc4_v3d_bin_bo_put(struct vc4_dev * vc4)351*4882a593Smuzhiyun void vc4_v3d_bin_bo_put(struct vc4_dev *vc4)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun mutex_lock(&vc4->bin_bo_lock);
354*4882a593Smuzhiyun kref_put(&vc4->bin_bo_kref, bin_bo_release);
355*4882a593Smuzhiyun mutex_unlock(&vc4->bin_bo_lock);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun #ifdef CONFIG_PM
vc4_v3d_runtime_suspend(struct device * dev)359*4882a593Smuzhiyun static int vc4_v3d_runtime_suspend(struct device *dev)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun struct vc4_v3d *v3d = dev_get_drvdata(dev);
362*4882a593Smuzhiyun struct vc4_dev *vc4 = v3d->vc4;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun vc4_irq_uninstall(&vc4->base);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun clk_disable_unprepare(v3d->clk);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun return 0;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
vc4_v3d_runtime_resume(struct device * dev)371*4882a593Smuzhiyun static int vc4_v3d_runtime_resume(struct device *dev)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun struct vc4_v3d *v3d = dev_get_drvdata(dev);
374*4882a593Smuzhiyun struct vc4_dev *vc4 = v3d->vc4;
375*4882a593Smuzhiyun int ret;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun ret = clk_prepare_enable(v3d->clk);
378*4882a593Smuzhiyun if (ret != 0)
379*4882a593Smuzhiyun return ret;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun vc4_v3d_init_hw(&vc4->base);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
384*4882a593Smuzhiyun enable_irq(vc4->base.irq);
385*4882a593Smuzhiyun vc4_irq_postinstall(&vc4->base);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun return 0;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun #endif
390*4882a593Smuzhiyun
vc4_v3d_bind(struct device * dev,struct device * master,void * data)391*4882a593Smuzhiyun static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun struct platform_device *pdev = to_platform_device(dev);
394*4882a593Smuzhiyun struct drm_device *drm = dev_get_drvdata(master);
395*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(drm);
396*4882a593Smuzhiyun struct vc4_v3d *v3d = NULL;
397*4882a593Smuzhiyun int ret;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun v3d = devm_kzalloc(&pdev->dev, sizeof(*v3d), GFP_KERNEL);
400*4882a593Smuzhiyun if (!v3d)
401*4882a593Smuzhiyun return -ENOMEM;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun dev_set_drvdata(dev, v3d);
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun v3d->pdev = pdev;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun v3d->regs = vc4_ioremap_regs(pdev, 0);
408*4882a593Smuzhiyun if (IS_ERR(v3d->regs))
409*4882a593Smuzhiyun return PTR_ERR(v3d->regs);
410*4882a593Smuzhiyun v3d->regset.base = v3d->regs;
411*4882a593Smuzhiyun v3d->regset.regs = v3d_regs;
412*4882a593Smuzhiyun v3d->regset.nregs = ARRAY_SIZE(v3d_regs);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun vc4->v3d = v3d;
415*4882a593Smuzhiyun v3d->vc4 = vc4;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun v3d->clk = devm_clk_get(dev, NULL);
418*4882a593Smuzhiyun if (IS_ERR(v3d->clk)) {
419*4882a593Smuzhiyun int ret = PTR_ERR(v3d->clk);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun if (ret == -ENOENT) {
422*4882a593Smuzhiyun /* bcm2835 didn't have a clock reference in the DT. */
423*4882a593Smuzhiyun ret = 0;
424*4882a593Smuzhiyun v3d->clk = NULL;
425*4882a593Smuzhiyun } else {
426*4882a593Smuzhiyun if (ret != -EPROBE_DEFER)
427*4882a593Smuzhiyun dev_err(dev, "Failed to get V3D clock: %d\n",
428*4882a593Smuzhiyun ret);
429*4882a593Smuzhiyun return ret;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
434*4882a593Smuzhiyun DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
435*4882a593Smuzhiyun V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0);
436*4882a593Smuzhiyun return -EINVAL;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun ret = clk_prepare_enable(v3d->clk);
440*4882a593Smuzhiyun if (ret != 0)
441*4882a593Smuzhiyun return ret;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun /* Reset the binner overflow address/size at setup, to be sure
444*4882a593Smuzhiyun * we don't reuse an old one.
445*4882a593Smuzhiyun */
446*4882a593Smuzhiyun V3D_WRITE(V3D_BPOA, 0);
447*4882a593Smuzhiyun V3D_WRITE(V3D_BPOS, 0);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun vc4_v3d_init_hw(drm);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
452*4882a593Smuzhiyun if (ret) {
453*4882a593Smuzhiyun DRM_ERROR("Failed to install IRQ handler\n");
454*4882a593Smuzhiyun return ret;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun pm_runtime_set_active(dev);
458*4882a593Smuzhiyun pm_runtime_use_autosuspend(dev);
459*4882a593Smuzhiyun pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */
460*4882a593Smuzhiyun pm_runtime_enable(dev);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun vc4_debugfs_add_file(drm, "v3d_ident", vc4_v3d_debugfs_ident, NULL);
463*4882a593Smuzhiyun vc4_debugfs_add_regset32(drm, "v3d_regs", &v3d->regset);
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun return 0;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
vc4_v3d_unbind(struct device * dev,struct device * master,void * data)468*4882a593Smuzhiyun static void vc4_v3d_unbind(struct device *dev, struct device *master,
469*4882a593Smuzhiyun void *data)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun struct drm_device *drm = dev_get_drvdata(master);
472*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(drm);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun pm_runtime_disable(dev);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun drm_irq_uninstall(drm);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /* Disable the binner's overflow memory address, so the next
479*4882a593Smuzhiyun * driver probe (if any) doesn't try to reuse our old
480*4882a593Smuzhiyun * allocation.
481*4882a593Smuzhiyun */
482*4882a593Smuzhiyun V3D_WRITE(V3D_BPOA, 0);
483*4882a593Smuzhiyun V3D_WRITE(V3D_BPOS, 0);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun vc4->v3d = NULL;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun static const struct dev_pm_ops vc4_v3d_pm_ops = {
489*4882a593Smuzhiyun SET_RUNTIME_PM_OPS(vc4_v3d_runtime_suspend, vc4_v3d_runtime_resume, NULL)
490*4882a593Smuzhiyun };
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun static const struct component_ops vc4_v3d_ops = {
493*4882a593Smuzhiyun .bind = vc4_v3d_bind,
494*4882a593Smuzhiyun .unbind = vc4_v3d_unbind,
495*4882a593Smuzhiyun };
496*4882a593Smuzhiyun
vc4_v3d_dev_probe(struct platform_device * pdev)497*4882a593Smuzhiyun static int vc4_v3d_dev_probe(struct platform_device *pdev)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun return component_add(&pdev->dev, &vc4_v3d_ops);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
vc4_v3d_dev_remove(struct platform_device * pdev)502*4882a593Smuzhiyun static int vc4_v3d_dev_remove(struct platform_device *pdev)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun component_del(&pdev->dev, &vc4_v3d_ops);
505*4882a593Smuzhiyun return 0;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun const struct of_device_id vc4_v3d_dt_match[] = {
509*4882a593Smuzhiyun { .compatible = "brcm,bcm2835-v3d" },
510*4882a593Smuzhiyun { .compatible = "brcm,cygnus-v3d" },
511*4882a593Smuzhiyun { .compatible = "brcm,vc4-v3d" },
512*4882a593Smuzhiyun {}
513*4882a593Smuzhiyun };
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun struct platform_driver vc4_v3d_driver = {
516*4882a593Smuzhiyun .probe = vc4_v3d_dev_probe,
517*4882a593Smuzhiyun .remove = vc4_v3d_dev_remove,
518*4882a593Smuzhiyun .driver = {
519*4882a593Smuzhiyun .name = "vc4_v3d",
520*4882a593Smuzhiyun .of_match_table = vc4_v3d_dt_match,
521*4882a593Smuzhiyun .pm = &vc4_v3d_pm_ops,
522*4882a593Smuzhiyun },
523*4882a593Smuzhiyun };
524