xref: /OK3568_Linux_fs/kernel/drivers/gpu/host1x/cdma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Tegra host1x Command DMA
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2010-2013, NVIDIA Corporation.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <asm/cacheflush.h>
10*4882a593Smuzhiyun #include <linux/device.h>
11*4882a593Smuzhiyun #include <linux/dma-mapping.h>
12*4882a593Smuzhiyun #include <linux/host1x.h>
13*4882a593Smuzhiyun #include <linux/interrupt.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/kfifo.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <trace/events/host1x.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include "cdma.h"
20*4882a593Smuzhiyun #include "channel.h"
21*4882a593Smuzhiyun #include "dev.h"
22*4882a593Smuzhiyun #include "debug.h"
23*4882a593Smuzhiyun #include "job.h"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * push_buffer
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  * The push buffer is a circular array of words to be fetched by command DMA.
29*4882a593Smuzhiyun  * Note that it works slightly differently to the sync queue; fence == pos
30*4882a593Smuzhiyun  * means that the push buffer is full, not empty.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * Typically the commands written into the push buffer are a pair of words. We
35*4882a593Smuzhiyun  * use slots to represent each of these pairs and to simplify things. Note the
36*4882a593Smuzhiyun  * strange number of slots allocated here. 512 slots will fit exactly within a
37*4882a593Smuzhiyun  * single memory page. We also need one additional word at the end of the push
38*4882a593Smuzhiyun  * buffer for the RESTART opcode that will instruct the CDMA to jump back to
39*4882a593Smuzhiyun  * the beginning of the push buffer. With 512 slots, this means that we'll use
40*4882a593Smuzhiyun  * 2 memory pages and waste 4092 bytes of the second page that will never be
41*4882a593Smuzhiyun  * used.
42*4882a593Smuzhiyun  */
43*4882a593Smuzhiyun #define HOST1X_PUSHBUFFER_SLOTS	511
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * Clean up push buffer resources
47*4882a593Smuzhiyun  */
host1x_pushbuffer_destroy(struct push_buffer * pb)48*4882a593Smuzhiyun static void host1x_pushbuffer_destroy(struct push_buffer *pb)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	struct host1x_cdma *cdma = pb_to_cdma(pb);
51*4882a593Smuzhiyun 	struct host1x *host1x = cdma_to_host1x(cdma);
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	if (!pb->mapped)
54*4882a593Smuzhiyun 		return;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	if (host1x->domain) {
57*4882a593Smuzhiyun 		iommu_unmap(host1x->domain, pb->dma, pb->alloc_size);
58*4882a593Smuzhiyun 		free_iova(&host1x->iova, iova_pfn(&host1x->iova, pb->dma));
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	dma_free_wc(host1x->dev, pb->alloc_size, pb->mapped, pb->phys);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	pb->mapped = NULL;
64*4882a593Smuzhiyun 	pb->phys = 0;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun  * Init push buffer resources
69*4882a593Smuzhiyun  */
host1x_pushbuffer_init(struct push_buffer * pb)70*4882a593Smuzhiyun static int host1x_pushbuffer_init(struct push_buffer *pb)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	struct host1x_cdma *cdma = pb_to_cdma(pb);
73*4882a593Smuzhiyun 	struct host1x *host1x = cdma_to_host1x(cdma);
74*4882a593Smuzhiyun 	struct iova *alloc;
75*4882a593Smuzhiyun 	u32 size;
76*4882a593Smuzhiyun 	int err;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	pb->mapped = NULL;
79*4882a593Smuzhiyun 	pb->phys = 0;
80*4882a593Smuzhiyun 	pb->size = HOST1X_PUSHBUFFER_SLOTS * 8;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	size = pb->size + 4;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	/* initialize buffer pointers */
85*4882a593Smuzhiyun 	pb->fence = pb->size - 8;
86*4882a593Smuzhiyun 	pb->pos = 0;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	if (host1x->domain) {
89*4882a593Smuzhiyun 		unsigned long shift;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 		size = iova_align(&host1x->iova, size);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 		pb->mapped = dma_alloc_wc(host1x->dev, size, &pb->phys,
94*4882a593Smuzhiyun 					  GFP_KERNEL);
95*4882a593Smuzhiyun 		if (!pb->mapped)
96*4882a593Smuzhiyun 			return -ENOMEM;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 		shift = iova_shift(&host1x->iova);
99*4882a593Smuzhiyun 		alloc = alloc_iova(&host1x->iova, size >> shift,
100*4882a593Smuzhiyun 				   host1x->iova_end >> shift, true);
101*4882a593Smuzhiyun 		if (!alloc) {
102*4882a593Smuzhiyun 			err = -ENOMEM;
103*4882a593Smuzhiyun 			goto iommu_free_mem;
104*4882a593Smuzhiyun 		}
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 		pb->dma = iova_dma_addr(&host1x->iova, alloc);
107*4882a593Smuzhiyun 		err = iommu_map(host1x->domain, pb->dma, pb->phys, size,
108*4882a593Smuzhiyun 				IOMMU_READ);
109*4882a593Smuzhiyun 		if (err)
110*4882a593Smuzhiyun 			goto iommu_free_iova;
111*4882a593Smuzhiyun 	} else {
112*4882a593Smuzhiyun 		pb->mapped = dma_alloc_wc(host1x->dev, size, &pb->phys,
113*4882a593Smuzhiyun 					  GFP_KERNEL);
114*4882a593Smuzhiyun 		if (!pb->mapped)
115*4882a593Smuzhiyun 			return -ENOMEM;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 		pb->dma = pb->phys;
118*4882a593Smuzhiyun 	}
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	pb->alloc_size = size;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	host1x_hw_pushbuffer_init(host1x, pb);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	return 0;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun iommu_free_iova:
127*4882a593Smuzhiyun 	__free_iova(&host1x->iova, alloc);
128*4882a593Smuzhiyun iommu_free_mem:
129*4882a593Smuzhiyun 	dma_free_wc(host1x->dev, size, pb->mapped, pb->phys);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	return err;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun /*
135*4882a593Smuzhiyun  * Push two words to the push buffer
136*4882a593Smuzhiyun  * Caller must ensure push buffer is not full
137*4882a593Smuzhiyun  */
host1x_pushbuffer_push(struct push_buffer * pb,u32 op1,u32 op2)138*4882a593Smuzhiyun static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	u32 *p = (u32 *)((void *)pb->mapped + pb->pos);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	WARN_ON(pb->pos == pb->fence);
143*4882a593Smuzhiyun 	*(p++) = op1;
144*4882a593Smuzhiyun 	*(p++) = op2;
145*4882a593Smuzhiyun 	pb->pos += 8;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	if (pb->pos >= pb->size)
148*4882a593Smuzhiyun 		pb->pos -= pb->size;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun  * Pop a number of two word slots from the push buffer
153*4882a593Smuzhiyun  * Caller must ensure push buffer is not empty
154*4882a593Smuzhiyun  */
host1x_pushbuffer_pop(struct push_buffer * pb,unsigned int slots)155*4882a593Smuzhiyun static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	/* Advance the next write position */
158*4882a593Smuzhiyun 	pb->fence += slots * 8;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	if (pb->fence >= pb->size)
161*4882a593Smuzhiyun 		pb->fence -= pb->size;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun  * Return the number of two word slots free in the push buffer
166*4882a593Smuzhiyun  */
host1x_pushbuffer_space(struct push_buffer * pb)167*4882a593Smuzhiyun static u32 host1x_pushbuffer_space(struct push_buffer *pb)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	unsigned int fence = pb->fence;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	if (pb->fence < pb->pos)
172*4882a593Smuzhiyun 		fence += pb->size;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	return (fence - pb->pos) / 8;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun /*
178*4882a593Smuzhiyun  * Sleep (if necessary) until the requested event happens
179*4882a593Smuzhiyun  *   - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
180*4882a593Smuzhiyun  *     - Returns 1
181*4882a593Smuzhiyun  *   - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
182*4882a593Smuzhiyun  *     - Return the amount of space (> 0)
183*4882a593Smuzhiyun  * Must be called with the cdma lock held.
184*4882a593Smuzhiyun  */
host1x_cdma_wait_locked(struct host1x_cdma * cdma,enum cdma_event event)185*4882a593Smuzhiyun unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
186*4882a593Smuzhiyun 				     enum cdma_event event)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	for (;;) {
189*4882a593Smuzhiyun 		struct push_buffer *pb = &cdma->push_buffer;
190*4882a593Smuzhiyun 		unsigned int space;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 		switch (event) {
193*4882a593Smuzhiyun 		case CDMA_EVENT_SYNC_QUEUE_EMPTY:
194*4882a593Smuzhiyun 			space = list_empty(&cdma->sync_queue) ? 1 : 0;
195*4882a593Smuzhiyun 			break;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 		case CDMA_EVENT_PUSH_BUFFER_SPACE:
198*4882a593Smuzhiyun 			space = host1x_pushbuffer_space(pb);
199*4882a593Smuzhiyun 			break;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 		default:
202*4882a593Smuzhiyun 			WARN_ON(1);
203*4882a593Smuzhiyun 			return -EINVAL;
204*4882a593Smuzhiyun 		}
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 		if (space)
207*4882a593Smuzhiyun 			return space;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 		trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma)->dev),
210*4882a593Smuzhiyun 				       event);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 		/* If somebody has managed to already start waiting, yield */
213*4882a593Smuzhiyun 		if (cdma->event != CDMA_EVENT_NONE) {
214*4882a593Smuzhiyun 			mutex_unlock(&cdma->lock);
215*4882a593Smuzhiyun 			schedule();
216*4882a593Smuzhiyun 			mutex_lock(&cdma->lock);
217*4882a593Smuzhiyun 			continue;
218*4882a593Smuzhiyun 		}
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 		cdma->event = event;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 		mutex_unlock(&cdma->lock);
223*4882a593Smuzhiyun 		wait_for_completion(&cdma->complete);
224*4882a593Smuzhiyun 		mutex_lock(&cdma->lock);
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	return 0;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun /*
231*4882a593Smuzhiyun  * Sleep (if necessary) until the push buffer has enough free space.
232*4882a593Smuzhiyun  *
233*4882a593Smuzhiyun  * Must be called with the cdma lock held.
234*4882a593Smuzhiyun  */
host1x_cdma_wait_pushbuffer_space(struct host1x * host1x,struct host1x_cdma * cdma,unsigned int needed)235*4882a593Smuzhiyun static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
236*4882a593Smuzhiyun 					     struct host1x_cdma *cdma,
237*4882a593Smuzhiyun 					     unsigned int needed)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	while (true) {
240*4882a593Smuzhiyun 		struct push_buffer *pb = &cdma->push_buffer;
241*4882a593Smuzhiyun 		unsigned int space;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 		space = host1x_pushbuffer_space(pb);
244*4882a593Smuzhiyun 		if (space >= needed)
245*4882a593Smuzhiyun 			break;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 		trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma)->dev),
248*4882a593Smuzhiyun 				       CDMA_EVENT_PUSH_BUFFER_SPACE);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 		host1x_hw_cdma_flush(host1x, cdma);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 		/* If somebody has managed to already start waiting, yield */
253*4882a593Smuzhiyun 		if (cdma->event != CDMA_EVENT_NONE) {
254*4882a593Smuzhiyun 			mutex_unlock(&cdma->lock);
255*4882a593Smuzhiyun 			schedule();
256*4882a593Smuzhiyun 			mutex_lock(&cdma->lock);
257*4882a593Smuzhiyun 			continue;
258*4882a593Smuzhiyun 		}
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 		cdma->event = CDMA_EVENT_PUSH_BUFFER_SPACE;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 		mutex_unlock(&cdma->lock);
263*4882a593Smuzhiyun 		wait_for_completion(&cdma->complete);
264*4882a593Smuzhiyun 		mutex_lock(&cdma->lock);
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	return 0;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun /*
270*4882a593Smuzhiyun  * Start timer that tracks the time spent by the job.
271*4882a593Smuzhiyun  * Must be called with the cdma lock held.
272*4882a593Smuzhiyun  */
cdma_start_timer_locked(struct host1x_cdma * cdma,struct host1x_job * job)273*4882a593Smuzhiyun static void cdma_start_timer_locked(struct host1x_cdma *cdma,
274*4882a593Smuzhiyun 				    struct host1x_job *job)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun 	struct host1x *host = cdma_to_host1x(cdma);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	if (cdma->timeout.client) {
279*4882a593Smuzhiyun 		/* timer already started */
280*4882a593Smuzhiyun 		return;
281*4882a593Smuzhiyun 	}
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	cdma->timeout.client = job->client;
284*4882a593Smuzhiyun 	cdma->timeout.syncpt = host1x_syncpt_get(host, job->syncpt_id);
285*4882a593Smuzhiyun 	cdma->timeout.syncpt_val = job->syncpt_end;
286*4882a593Smuzhiyun 	cdma->timeout.start_ktime = ktime_get();
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	schedule_delayed_work(&cdma->timeout.wq,
289*4882a593Smuzhiyun 			      msecs_to_jiffies(job->timeout));
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun /*
293*4882a593Smuzhiyun  * Stop timer when a buffer submission completes.
294*4882a593Smuzhiyun  * Must be called with the cdma lock held.
295*4882a593Smuzhiyun  */
stop_cdma_timer_locked(struct host1x_cdma * cdma)296*4882a593Smuzhiyun static void stop_cdma_timer_locked(struct host1x_cdma *cdma)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	cancel_delayed_work(&cdma->timeout.wq);
299*4882a593Smuzhiyun 	cdma->timeout.client = NULL;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun /*
303*4882a593Smuzhiyun  * For all sync queue entries that have already finished according to the
304*4882a593Smuzhiyun  * current sync point registers:
305*4882a593Smuzhiyun  *  - unpin & unref their mems
306*4882a593Smuzhiyun  *  - pop their push buffer slots
307*4882a593Smuzhiyun  *  - remove them from the sync queue
308*4882a593Smuzhiyun  * This is normally called from the host code's worker thread, but can be
309*4882a593Smuzhiyun  * called manually if necessary.
310*4882a593Smuzhiyun  * Must be called with the cdma lock held.
311*4882a593Smuzhiyun  */
update_cdma_locked(struct host1x_cdma * cdma)312*4882a593Smuzhiyun static void update_cdma_locked(struct host1x_cdma *cdma)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	bool signal = false;
315*4882a593Smuzhiyun 	struct host1x *host1x = cdma_to_host1x(cdma);
316*4882a593Smuzhiyun 	struct host1x_job *job, *n;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	/* If CDMA is stopped, queue is cleared and we can return */
319*4882a593Smuzhiyun 	if (!cdma->running)
320*4882a593Smuzhiyun 		return;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	/*
323*4882a593Smuzhiyun 	 * Walk the sync queue, reading the sync point registers as necessary,
324*4882a593Smuzhiyun 	 * to consume as many sync queue entries as possible without blocking
325*4882a593Smuzhiyun 	 */
326*4882a593Smuzhiyun 	list_for_each_entry_safe(job, n, &cdma->sync_queue, list) {
327*4882a593Smuzhiyun 		struct host1x_syncpt *sp =
328*4882a593Smuzhiyun 			host1x_syncpt_get(host1x, job->syncpt_id);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 		/* Check whether this syncpt has completed, and bail if not */
331*4882a593Smuzhiyun 		if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) {
332*4882a593Smuzhiyun 			/* Start timer on next pending syncpt */
333*4882a593Smuzhiyun 			if (job->timeout)
334*4882a593Smuzhiyun 				cdma_start_timer_locked(cdma, job);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 			break;
337*4882a593Smuzhiyun 		}
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 		/* Cancel timeout, when a buffer completes */
340*4882a593Smuzhiyun 		if (cdma->timeout.client)
341*4882a593Smuzhiyun 			stop_cdma_timer_locked(cdma);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 		/* Unpin the memory */
344*4882a593Smuzhiyun 		host1x_job_unpin(job);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 		/* Pop push buffer slots */
347*4882a593Smuzhiyun 		if (job->num_slots) {
348*4882a593Smuzhiyun 			struct push_buffer *pb = &cdma->push_buffer;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 			host1x_pushbuffer_pop(pb, job->num_slots);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 			if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
353*4882a593Smuzhiyun 				signal = true;
354*4882a593Smuzhiyun 		}
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 		list_del(&job->list);
357*4882a593Smuzhiyun 		host1x_job_put(job);
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY &&
361*4882a593Smuzhiyun 	    list_empty(&cdma->sync_queue))
362*4882a593Smuzhiyun 		signal = true;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	if (signal) {
365*4882a593Smuzhiyun 		cdma->event = CDMA_EVENT_NONE;
366*4882a593Smuzhiyun 		complete(&cdma->complete);
367*4882a593Smuzhiyun 	}
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
host1x_cdma_update_sync_queue(struct host1x_cdma * cdma,struct device * dev)370*4882a593Smuzhiyun void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
371*4882a593Smuzhiyun 				   struct device *dev)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun 	struct host1x *host1x = cdma_to_host1x(cdma);
374*4882a593Smuzhiyun 	u32 restart_addr, syncpt_incrs, syncpt_val;
375*4882a593Smuzhiyun 	struct host1x_job *job, *next_job = NULL;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	dev_dbg(dev, "%s: starting cleanup (thresh %d)\n",
380*4882a593Smuzhiyun 		__func__, syncpt_val);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	/*
383*4882a593Smuzhiyun 	 * Move the sync_queue read pointer to the first entry that hasn't
384*4882a593Smuzhiyun 	 * completed based on the current HW syncpt value. It's likely there
385*4882a593Smuzhiyun 	 * won't be any (i.e. we're still at the head), but covers the case
386*4882a593Smuzhiyun 	 * where a syncpt incr happens just prior/during the teardown.
387*4882a593Smuzhiyun 	 */
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	dev_dbg(dev, "%s: skip completed buffers still in sync_queue\n",
390*4882a593Smuzhiyun 		__func__);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	list_for_each_entry(job, &cdma->sync_queue, list) {
393*4882a593Smuzhiyun 		if (syncpt_val < job->syncpt_end) {
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 			if (!list_is_last(&job->list, &cdma->sync_queue))
396*4882a593Smuzhiyun 				next_job = list_next_entry(job, list);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 			goto syncpt_incr;
399*4882a593Smuzhiyun 		}
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 		host1x_job_dump(dev, job);
402*4882a593Smuzhiyun 	}
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	/* all jobs have been completed */
405*4882a593Smuzhiyun 	job = NULL;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun syncpt_incr:
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	/*
410*4882a593Smuzhiyun 	 * Increment with CPU the remaining syncpts of a partially executed job.
411*4882a593Smuzhiyun 	 *
412*4882a593Smuzhiyun 	 * CDMA will continue execution starting with the next job or will get
413*4882a593Smuzhiyun 	 * into idle state.
414*4882a593Smuzhiyun 	 */
415*4882a593Smuzhiyun 	if (next_job)
416*4882a593Smuzhiyun 		restart_addr = next_job->first_get;
417*4882a593Smuzhiyun 	else
418*4882a593Smuzhiyun 		restart_addr = cdma->last_pos;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	/* do CPU increments for the remaining syncpts */
421*4882a593Smuzhiyun 	if (job) {
422*4882a593Smuzhiyun 		dev_dbg(dev, "%s: perform CPU incr on pending buffers\n",
423*4882a593Smuzhiyun 			__func__);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 		/* won't need a timeout when replayed */
426*4882a593Smuzhiyun 		job->timeout = 0;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 		syncpt_incrs = job->syncpt_end - syncpt_val;
429*4882a593Smuzhiyun 		dev_dbg(dev, "%s: CPU incr (%d)\n", __func__, syncpt_incrs);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 		host1x_job_dump(dev, job);
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 		/* safe to use CPU to incr syncpts */
434*4882a593Smuzhiyun 		host1x_hw_cdma_timeout_cpu_incr(host1x, cdma, job->first_get,
435*4882a593Smuzhiyun 						syncpt_incrs, job->syncpt_end,
436*4882a593Smuzhiyun 						job->num_slots);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 		dev_dbg(dev, "%s: finished sync_queue modification\n",
439*4882a593Smuzhiyun 			__func__);
440*4882a593Smuzhiyun 	}
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	/* roll back DMAGET and start up channel again */
443*4882a593Smuzhiyun 	host1x_hw_cdma_resume(host1x, cdma, restart_addr);
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun /*
447*4882a593Smuzhiyun  * Create a cdma
448*4882a593Smuzhiyun  */
host1x_cdma_init(struct host1x_cdma * cdma)449*4882a593Smuzhiyun int host1x_cdma_init(struct host1x_cdma *cdma)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun 	int err;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	mutex_init(&cdma->lock);
454*4882a593Smuzhiyun 	init_completion(&cdma->complete);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	INIT_LIST_HEAD(&cdma->sync_queue);
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	cdma->event = CDMA_EVENT_NONE;
459*4882a593Smuzhiyun 	cdma->running = false;
460*4882a593Smuzhiyun 	cdma->torndown = false;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	err = host1x_pushbuffer_init(&cdma->push_buffer);
463*4882a593Smuzhiyun 	if (err)
464*4882a593Smuzhiyun 		return err;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	return 0;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun /*
470*4882a593Smuzhiyun  * Destroy a cdma
471*4882a593Smuzhiyun  */
host1x_cdma_deinit(struct host1x_cdma * cdma)472*4882a593Smuzhiyun int host1x_cdma_deinit(struct host1x_cdma *cdma)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun 	struct push_buffer *pb = &cdma->push_buffer;
475*4882a593Smuzhiyun 	struct host1x *host1x = cdma_to_host1x(cdma);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	if (cdma->running) {
478*4882a593Smuzhiyun 		pr_warn("%s: CDMA still running\n", __func__);
479*4882a593Smuzhiyun 		return -EBUSY;
480*4882a593Smuzhiyun 	}
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	host1x_pushbuffer_destroy(pb);
483*4882a593Smuzhiyun 	host1x_hw_cdma_timeout_destroy(host1x, cdma);
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	return 0;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun /*
489*4882a593Smuzhiyun  * Begin a cdma submit
490*4882a593Smuzhiyun  */
host1x_cdma_begin(struct host1x_cdma * cdma,struct host1x_job * job)491*4882a593Smuzhiyun int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	struct host1x *host1x = cdma_to_host1x(cdma);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	mutex_lock(&cdma->lock);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	if (job->timeout) {
498*4882a593Smuzhiyun 		/* init state on first submit with timeout value */
499*4882a593Smuzhiyun 		if (!cdma->timeout.initialized) {
500*4882a593Smuzhiyun 			int err;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 			err = host1x_hw_cdma_timeout_init(host1x, cdma,
503*4882a593Smuzhiyun 							  job->syncpt_id);
504*4882a593Smuzhiyun 			if (err) {
505*4882a593Smuzhiyun 				mutex_unlock(&cdma->lock);
506*4882a593Smuzhiyun 				return err;
507*4882a593Smuzhiyun 			}
508*4882a593Smuzhiyun 		}
509*4882a593Smuzhiyun 	}
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	if (!cdma->running)
512*4882a593Smuzhiyun 		host1x_hw_cdma_start(host1x, cdma);
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	cdma->slots_free = 0;
515*4882a593Smuzhiyun 	cdma->slots_used = 0;
516*4882a593Smuzhiyun 	cdma->first_get = cdma->push_buffer.pos;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	trace_host1x_cdma_begin(dev_name(job->channel->dev));
519*4882a593Smuzhiyun 	return 0;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun /*
523*4882a593Smuzhiyun  * Push two words into a push buffer slot
524*4882a593Smuzhiyun  * Blocks as necessary if the push buffer is full.
525*4882a593Smuzhiyun  */
host1x_cdma_push(struct host1x_cdma * cdma,u32 op1,u32 op2)526*4882a593Smuzhiyun void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun 	struct host1x *host1x = cdma_to_host1x(cdma);
529*4882a593Smuzhiyun 	struct push_buffer *pb = &cdma->push_buffer;
530*4882a593Smuzhiyun 	u32 slots_free = cdma->slots_free;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	if (host1x_debug_trace_cmdbuf)
533*4882a593Smuzhiyun 		trace_host1x_cdma_push(dev_name(cdma_to_channel(cdma)->dev),
534*4882a593Smuzhiyun 				       op1, op2);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	if (slots_free == 0) {
537*4882a593Smuzhiyun 		host1x_hw_cdma_flush(host1x, cdma);
538*4882a593Smuzhiyun 		slots_free = host1x_cdma_wait_locked(cdma,
539*4882a593Smuzhiyun 						CDMA_EVENT_PUSH_BUFFER_SPACE);
540*4882a593Smuzhiyun 	}
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	cdma->slots_free = slots_free - 1;
543*4882a593Smuzhiyun 	cdma->slots_used++;
544*4882a593Smuzhiyun 	host1x_pushbuffer_push(pb, op1, op2);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun /*
548*4882a593Smuzhiyun  * Push four words into two consecutive push buffer slots. Note that extra
549*4882a593Smuzhiyun  * care needs to be taken not to split the two slots across the end of the
550*4882a593Smuzhiyun  * push buffer. Otherwise the RESTART opcode at the end of the push buffer
551*4882a593Smuzhiyun  * that ensures processing will restart at the beginning will break up the
552*4882a593Smuzhiyun  * four words.
553*4882a593Smuzhiyun  *
554*4882a593Smuzhiyun  * Blocks as necessary if the push buffer is full.
555*4882a593Smuzhiyun  */
host1x_cdma_push_wide(struct host1x_cdma * cdma,u32 op1,u32 op2,u32 op3,u32 op4)556*4882a593Smuzhiyun void host1x_cdma_push_wide(struct host1x_cdma *cdma, u32 op1, u32 op2,
557*4882a593Smuzhiyun 			   u32 op3, u32 op4)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun 	struct host1x_channel *channel = cdma_to_channel(cdma);
560*4882a593Smuzhiyun 	struct host1x *host1x = cdma_to_host1x(cdma);
561*4882a593Smuzhiyun 	struct push_buffer *pb = &cdma->push_buffer;
562*4882a593Smuzhiyun 	unsigned int needed = 2, extra = 0, i;
563*4882a593Smuzhiyun 	unsigned int space = cdma->slots_free;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	if (host1x_debug_trace_cmdbuf)
566*4882a593Smuzhiyun 		trace_host1x_cdma_push_wide(dev_name(channel->dev), op1, op2,
567*4882a593Smuzhiyun 					    op3, op4);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	/* compute number of extra slots needed for padding */
570*4882a593Smuzhiyun 	if (pb->pos + 16 > pb->size) {
571*4882a593Smuzhiyun 		extra = (pb->size - pb->pos) / 8;
572*4882a593Smuzhiyun 		needed += extra;
573*4882a593Smuzhiyun 	}
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	host1x_cdma_wait_pushbuffer_space(host1x, cdma, needed);
576*4882a593Smuzhiyun 	space = host1x_pushbuffer_space(pb);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	cdma->slots_free = space - needed;
579*4882a593Smuzhiyun 	cdma->slots_used += needed;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	/*
582*4882a593Smuzhiyun 	 * Note that we rely on the fact that this is only used to submit wide
583*4882a593Smuzhiyun 	 * gather opcodes, which consist of 3 words, and they are padded with
584*4882a593Smuzhiyun 	 * a NOP to avoid having to deal with fractional slots (a slot always
585*4882a593Smuzhiyun 	 * represents 2 words). The fourth opcode passed to this function will
586*4882a593Smuzhiyun 	 * therefore always be a NOP.
587*4882a593Smuzhiyun 	 *
588*4882a593Smuzhiyun 	 * This works around a slight ambiguity when it comes to opcodes. For
589*4882a593Smuzhiyun 	 * all current host1x incarnations the NOP opcode uses the exact same
590*4882a593Smuzhiyun 	 * encoding (0x20000000), so we could hard-code the value here, but a
591*4882a593Smuzhiyun 	 * new incarnation may change it and break that assumption.
592*4882a593Smuzhiyun 	 */
593*4882a593Smuzhiyun 	for (i = 0; i < extra; i++)
594*4882a593Smuzhiyun 		host1x_pushbuffer_push(pb, op4, op4);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	host1x_pushbuffer_push(pb, op1, op2);
597*4882a593Smuzhiyun 	host1x_pushbuffer_push(pb, op3, op4);
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun /*
601*4882a593Smuzhiyun  * End a cdma submit
602*4882a593Smuzhiyun  * Kick off DMA, add job to the sync queue, and a number of slots to be freed
603*4882a593Smuzhiyun  * from the pushbuffer. The handles for a submit must all be pinned at the same
604*4882a593Smuzhiyun  * time, but they can be unpinned in smaller chunks.
605*4882a593Smuzhiyun  */
host1x_cdma_end(struct host1x_cdma * cdma,struct host1x_job * job)606*4882a593Smuzhiyun void host1x_cdma_end(struct host1x_cdma *cdma,
607*4882a593Smuzhiyun 		     struct host1x_job *job)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	struct host1x *host1x = cdma_to_host1x(cdma);
610*4882a593Smuzhiyun 	bool idle = list_empty(&cdma->sync_queue);
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	host1x_hw_cdma_flush(host1x, cdma);
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	job->first_get = cdma->first_get;
615*4882a593Smuzhiyun 	job->num_slots = cdma->slots_used;
616*4882a593Smuzhiyun 	host1x_job_get(job);
617*4882a593Smuzhiyun 	list_add_tail(&job->list, &cdma->sync_queue);
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	/* start timer on idle -> active transitions */
620*4882a593Smuzhiyun 	if (job->timeout && idle)
621*4882a593Smuzhiyun 		cdma_start_timer_locked(cdma, job);
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	trace_host1x_cdma_end(dev_name(job->channel->dev));
624*4882a593Smuzhiyun 	mutex_unlock(&cdma->lock);
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun /*
628*4882a593Smuzhiyun  * Update cdma state according to current sync point values
629*4882a593Smuzhiyun  */
host1x_cdma_update(struct host1x_cdma * cdma)630*4882a593Smuzhiyun void host1x_cdma_update(struct host1x_cdma *cdma)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun 	mutex_lock(&cdma->lock);
633*4882a593Smuzhiyun 	update_cdma_locked(cdma);
634*4882a593Smuzhiyun 	mutex_unlock(&cdma->lock);
635*4882a593Smuzhiyun }
636