xref: /OK3568_Linux_fs/kernel/drivers/media/pci/ivtv/ivtv-queue.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun     buffer queues.
4*4882a593Smuzhiyun     Copyright (C) 2003-2004  Kevin Thayer <nufan_wfk at yahoo.com>
5*4882a593Smuzhiyun     Copyright (C) 2004  Chris Kennedy <c@groovy.org>
6*4882a593Smuzhiyun     Copyright (C) 2005-2007  Hans Verkuil <hverkuil@xs4all.nl>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include "ivtv-driver.h"
11*4882a593Smuzhiyun #include "ivtv-queue.h"
12*4882a593Smuzhiyun 
ivtv_buf_copy_from_user(struct ivtv_stream * s,struct ivtv_buffer * buf,const char __user * src,int copybytes)13*4882a593Smuzhiyun int ivtv_buf_copy_from_user(struct ivtv_stream *s, struct ivtv_buffer *buf, const char __user *src, int copybytes)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun 	if (s->buf_size - buf->bytesused < copybytes)
16*4882a593Smuzhiyun 		copybytes = s->buf_size - buf->bytesused;
17*4882a593Smuzhiyun 	if (copy_from_user(buf->buf + buf->bytesused, src, copybytes)) {
18*4882a593Smuzhiyun 		return -EFAULT;
19*4882a593Smuzhiyun 	}
20*4882a593Smuzhiyun 	buf->bytesused += copybytes;
21*4882a593Smuzhiyun 	return copybytes;
22*4882a593Smuzhiyun }
23*4882a593Smuzhiyun 
ivtv_buf_swap(struct ivtv_buffer * buf)24*4882a593Smuzhiyun void ivtv_buf_swap(struct ivtv_buffer *buf)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	int i;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	for (i = 0; i < buf->bytesused; i += 4)
29*4882a593Smuzhiyun 		swab32s((u32 *)(buf->buf + i));
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun 
ivtv_queue_init(struct ivtv_queue * q)32*4882a593Smuzhiyun void ivtv_queue_init(struct ivtv_queue *q)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	INIT_LIST_HEAD(&q->list);
35*4882a593Smuzhiyun 	q->buffers = 0;
36*4882a593Smuzhiyun 	q->length = 0;
37*4882a593Smuzhiyun 	q->bytesused = 0;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
ivtv_enqueue(struct ivtv_stream * s,struct ivtv_buffer * buf,struct ivtv_queue * q)40*4882a593Smuzhiyun void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	unsigned long flags;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	/* clear the buffer if it is going to be enqueued to the free queue */
45*4882a593Smuzhiyun 	if (q == &s->q_free) {
46*4882a593Smuzhiyun 		buf->bytesused = 0;
47*4882a593Smuzhiyun 		buf->readpos = 0;
48*4882a593Smuzhiyun 		buf->b_flags = 0;
49*4882a593Smuzhiyun 		buf->dma_xfer_cnt = 0;
50*4882a593Smuzhiyun 	}
51*4882a593Smuzhiyun 	spin_lock_irqsave(&s->qlock, flags);
52*4882a593Smuzhiyun 	list_add_tail(&buf->list, &q->list);
53*4882a593Smuzhiyun 	q->buffers++;
54*4882a593Smuzhiyun 	q->length += s->buf_size;
55*4882a593Smuzhiyun 	q->bytesused += buf->bytesused - buf->readpos;
56*4882a593Smuzhiyun 	spin_unlock_irqrestore(&s->qlock, flags);
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
ivtv_dequeue(struct ivtv_stream * s,struct ivtv_queue * q)59*4882a593Smuzhiyun struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	struct ivtv_buffer *buf = NULL;
62*4882a593Smuzhiyun 	unsigned long flags;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	spin_lock_irqsave(&s->qlock, flags);
65*4882a593Smuzhiyun 	if (!list_empty(&q->list)) {
66*4882a593Smuzhiyun 		buf = list_entry(q->list.next, struct ivtv_buffer, list);
67*4882a593Smuzhiyun 		list_del_init(q->list.next);
68*4882a593Smuzhiyun 		q->buffers--;
69*4882a593Smuzhiyun 		q->length -= s->buf_size;
70*4882a593Smuzhiyun 		q->bytesused -= buf->bytesused - buf->readpos;
71*4882a593Smuzhiyun 	}
72*4882a593Smuzhiyun 	spin_unlock_irqrestore(&s->qlock, flags);
73*4882a593Smuzhiyun 	return buf;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
ivtv_queue_move_buf(struct ivtv_stream * s,struct ivtv_queue * from,struct ivtv_queue * to,int clear)76*4882a593Smuzhiyun static void ivtv_queue_move_buf(struct ivtv_stream *s, struct ivtv_queue *from,
77*4882a593Smuzhiyun 		struct ivtv_queue *to, int clear)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	struct ivtv_buffer *buf = list_entry(from->list.next, struct ivtv_buffer, list);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	list_move_tail(from->list.next, &to->list);
82*4882a593Smuzhiyun 	from->buffers--;
83*4882a593Smuzhiyun 	from->length -= s->buf_size;
84*4882a593Smuzhiyun 	from->bytesused -= buf->bytesused - buf->readpos;
85*4882a593Smuzhiyun 	/* special handling for q_free */
86*4882a593Smuzhiyun 	if (clear)
87*4882a593Smuzhiyun 		buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
88*4882a593Smuzhiyun 	to->buffers++;
89*4882a593Smuzhiyun 	to->length += s->buf_size;
90*4882a593Smuzhiyun 	to->bytesused += buf->bytesused - buf->readpos;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /* Move 'needed_bytes' worth of buffers from queue 'from' into queue 'to'.
94*4882a593Smuzhiyun    If 'needed_bytes' == 0, then move all buffers from 'from' into 'to'.
95*4882a593Smuzhiyun    If 'steal' != NULL, then buffers may also taken from that queue if
96*4882a593Smuzhiyun    needed, but only if 'from' is the free queue.
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun    The buffer is automatically cleared if it goes to the free queue. It is
99*4882a593Smuzhiyun    also cleared if buffers need to be taken from the 'steal' queue and
100*4882a593Smuzhiyun    the 'from' queue is the free queue.
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun    When 'from' is q_free, then needed_bytes is compared to the total
103*4882a593Smuzhiyun    available buffer length, otherwise needed_bytes is compared to the
104*4882a593Smuzhiyun    bytesused value. For the 'steal' queue the total available buffer
105*4882a593Smuzhiyun    length is always used.
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun    -ENOMEM is returned if the buffers could not be obtained, 0 if all
108*4882a593Smuzhiyun    buffers where obtained from the 'from' list and if non-zero then
109*4882a593Smuzhiyun    the number of stolen buffers is returned. */
ivtv_queue_move(struct ivtv_stream * s,struct ivtv_queue * from,struct ivtv_queue * steal,struct ivtv_queue * to,int needed_bytes)110*4882a593Smuzhiyun int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal,
111*4882a593Smuzhiyun 		    struct ivtv_queue *to, int needed_bytes)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	unsigned long flags;
114*4882a593Smuzhiyun 	int rc = 0;
115*4882a593Smuzhiyun 	int from_free = from == &s->q_free;
116*4882a593Smuzhiyun 	int to_free = to == &s->q_free;
117*4882a593Smuzhiyun 	int bytes_available, bytes_steal;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	spin_lock_irqsave(&s->qlock, flags);
120*4882a593Smuzhiyun 	if (needed_bytes == 0) {
121*4882a593Smuzhiyun 		from_free = 1;
122*4882a593Smuzhiyun 		needed_bytes = from->length;
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	bytes_available = from_free ? from->length : from->bytesused;
126*4882a593Smuzhiyun 	bytes_steal = (from_free && steal) ? steal->length : 0;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	if (bytes_available + bytes_steal < needed_bytes) {
129*4882a593Smuzhiyun 		spin_unlock_irqrestore(&s->qlock, flags);
130*4882a593Smuzhiyun 		return -ENOMEM;
131*4882a593Smuzhiyun 	}
132*4882a593Smuzhiyun 	while (steal && bytes_available < needed_bytes) {
133*4882a593Smuzhiyun 		struct ivtv_buffer *buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
134*4882a593Smuzhiyun 		u16 dma_xfer_cnt = buf->dma_xfer_cnt;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 		/* move buffers from the tail of the 'steal' queue to the tail of the
137*4882a593Smuzhiyun 		   'from' queue. Always copy all the buffers with the same dma_xfer_cnt
138*4882a593Smuzhiyun 		   value, this ensures that you do not end up with partial frame data
139*4882a593Smuzhiyun 		   if one frame is stored in multiple buffers. */
140*4882a593Smuzhiyun 		while (dma_xfer_cnt == buf->dma_xfer_cnt) {
141*4882a593Smuzhiyun 			list_move_tail(steal->list.prev, &from->list);
142*4882a593Smuzhiyun 			rc++;
143*4882a593Smuzhiyun 			steal->buffers--;
144*4882a593Smuzhiyun 			steal->length -= s->buf_size;
145*4882a593Smuzhiyun 			steal->bytesused -= buf->bytesused - buf->readpos;
146*4882a593Smuzhiyun 			buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
147*4882a593Smuzhiyun 			from->buffers++;
148*4882a593Smuzhiyun 			from->length += s->buf_size;
149*4882a593Smuzhiyun 			bytes_available += s->buf_size;
150*4882a593Smuzhiyun 			if (list_empty(&steal->list))
151*4882a593Smuzhiyun 				break;
152*4882a593Smuzhiyun 			buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
153*4882a593Smuzhiyun 		}
154*4882a593Smuzhiyun 	}
155*4882a593Smuzhiyun 	if (from_free) {
156*4882a593Smuzhiyun 		u32 old_length = to->length;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 		while (to->length - old_length < needed_bytes) {
159*4882a593Smuzhiyun 			ivtv_queue_move_buf(s, from, to, 1);
160*4882a593Smuzhiyun 		}
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun 	else {
163*4882a593Smuzhiyun 		u32 old_bytesused = to->bytesused;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 		while (to->bytesused - old_bytesused < needed_bytes) {
166*4882a593Smuzhiyun 			ivtv_queue_move_buf(s, from, to, to_free);
167*4882a593Smuzhiyun 		}
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun 	spin_unlock_irqrestore(&s->qlock, flags);
170*4882a593Smuzhiyun 	return rc;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
ivtv_flush_queues(struct ivtv_stream * s)173*4882a593Smuzhiyun void ivtv_flush_queues(struct ivtv_stream *s)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	ivtv_queue_move(s, &s->q_io, NULL, &s->q_free, 0);
176*4882a593Smuzhiyun 	ivtv_queue_move(s, &s->q_full, NULL, &s->q_free, 0);
177*4882a593Smuzhiyun 	ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
178*4882a593Smuzhiyun 	ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
ivtv_stream_alloc(struct ivtv_stream * s)181*4882a593Smuzhiyun int ivtv_stream_alloc(struct ivtv_stream *s)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	struct ivtv *itv = s->itv;
184*4882a593Smuzhiyun 	int SGsize = sizeof(struct ivtv_sg_host_element) * s->buffers;
185*4882a593Smuzhiyun 	int i;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	if (s->buffers == 0)
188*4882a593Smuzhiyun 		return 0;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	IVTV_DEBUG_INFO("Allocate %s%s stream: %d x %d buffers (%dkB total)\n",
191*4882a593Smuzhiyun 		s->dma != PCI_DMA_NONE ? "DMA " : "",
192*4882a593Smuzhiyun 		s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	s->sg_pending = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
195*4882a593Smuzhiyun 	if (s->sg_pending == NULL) {
196*4882a593Smuzhiyun 		IVTV_ERR("Could not allocate sg_pending for %s stream\n", s->name);
197*4882a593Smuzhiyun 		return -ENOMEM;
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun 	s->sg_pending_size = 0;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	s->sg_processing = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
202*4882a593Smuzhiyun 	if (s->sg_processing == NULL) {
203*4882a593Smuzhiyun 		IVTV_ERR("Could not allocate sg_processing for %s stream\n", s->name);
204*4882a593Smuzhiyun 		kfree(s->sg_pending);
205*4882a593Smuzhiyun 		s->sg_pending = NULL;
206*4882a593Smuzhiyun 		return -ENOMEM;
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 	s->sg_processing_size = 0;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	s->sg_dma = kzalloc(sizeof(struct ivtv_sg_element),
211*4882a593Smuzhiyun 					GFP_KERNEL|__GFP_NOWARN);
212*4882a593Smuzhiyun 	if (s->sg_dma == NULL) {
213*4882a593Smuzhiyun 		IVTV_ERR("Could not allocate sg_dma for %s stream\n", s->name);
214*4882a593Smuzhiyun 		kfree(s->sg_pending);
215*4882a593Smuzhiyun 		s->sg_pending = NULL;
216*4882a593Smuzhiyun 		kfree(s->sg_processing);
217*4882a593Smuzhiyun 		s->sg_processing = NULL;
218*4882a593Smuzhiyun 		return -ENOMEM;
219*4882a593Smuzhiyun 	}
220*4882a593Smuzhiyun 	if (ivtv_might_use_dma(s)) {
221*4882a593Smuzhiyun 		s->sg_handle = pci_map_single(itv->pdev, s->sg_dma,
222*4882a593Smuzhiyun 				sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
223*4882a593Smuzhiyun 		ivtv_stream_sync_for_cpu(s);
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	/* allocate stream buffers. Initially all buffers are in q_free. */
227*4882a593Smuzhiyun 	for (i = 0; i < s->buffers; i++) {
228*4882a593Smuzhiyun 		struct ivtv_buffer *buf = kzalloc(sizeof(struct ivtv_buffer),
229*4882a593Smuzhiyun 						GFP_KERNEL|__GFP_NOWARN);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 		if (buf == NULL)
232*4882a593Smuzhiyun 			break;
233*4882a593Smuzhiyun 		buf->buf = kmalloc(s->buf_size + 256, GFP_KERNEL|__GFP_NOWARN);
234*4882a593Smuzhiyun 		if (buf->buf == NULL) {
235*4882a593Smuzhiyun 			kfree(buf);
236*4882a593Smuzhiyun 			break;
237*4882a593Smuzhiyun 		}
238*4882a593Smuzhiyun 		INIT_LIST_HEAD(&buf->list);
239*4882a593Smuzhiyun 		if (ivtv_might_use_dma(s)) {
240*4882a593Smuzhiyun 			buf->dma_handle = pci_map_single(s->itv->pdev,
241*4882a593Smuzhiyun 				buf->buf, s->buf_size + 256, s->dma);
242*4882a593Smuzhiyun 			ivtv_buf_sync_for_cpu(s, buf);
243*4882a593Smuzhiyun 		}
244*4882a593Smuzhiyun 		ivtv_enqueue(s, buf, &s->q_free);
245*4882a593Smuzhiyun 	}
246*4882a593Smuzhiyun 	if (i == s->buffers)
247*4882a593Smuzhiyun 		return 0;
248*4882a593Smuzhiyun 	IVTV_ERR("Couldn't allocate buffers for %s stream\n", s->name);
249*4882a593Smuzhiyun 	ivtv_stream_free(s);
250*4882a593Smuzhiyun 	return -ENOMEM;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
ivtv_stream_free(struct ivtv_stream * s)253*4882a593Smuzhiyun void ivtv_stream_free(struct ivtv_stream *s)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	struct ivtv_buffer *buf;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	/* move all buffers to q_free */
258*4882a593Smuzhiyun 	ivtv_flush_queues(s);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	/* empty q_free */
261*4882a593Smuzhiyun 	while ((buf = ivtv_dequeue(s, &s->q_free))) {
262*4882a593Smuzhiyun 		if (ivtv_might_use_dma(s))
263*4882a593Smuzhiyun 			pci_unmap_single(s->itv->pdev, buf->dma_handle,
264*4882a593Smuzhiyun 				s->buf_size + 256, s->dma);
265*4882a593Smuzhiyun 		kfree(buf->buf);
266*4882a593Smuzhiyun 		kfree(buf);
267*4882a593Smuzhiyun 	}
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	/* Free SG Array/Lists */
270*4882a593Smuzhiyun 	if (s->sg_dma != NULL) {
271*4882a593Smuzhiyun 		if (s->sg_handle != IVTV_DMA_UNMAPPED) {
272*4882a593Smuzhiyun 			pci_unmap_single(s->itv->pdev, s->sg_handle,
273*4882a593Smuzhiyun 				 sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
274*4882a593Smuzhiyun 			s->sg_handle = IVTV_DMA_UNMAPPED;
275*4882a593Smuzhiyun 		}
276*4882a593Smuzhiyun 		kfree(s->sg_pending);
277*4882a593Smuzhiyun 		kfree(s->sg_processing);
278*4882a593Smuzhiyun 		kfree(s->sg_dma);
279*4882a593Smuzhiyun 		s->sg_pending = NULL;
280*4882a593Smuzhiyun 		s->sg_processing = NULL;
281*4882a593Smuzhiyun 		s->sg_dma = NULL;
282*4882a593Smuzhiyun 		s->sg_pending_size = 0;
283*4882a593Smuzhiyun 		s->sg_processing_size = 0;
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun }
286