xref: /OK3568_Linux_fs/kernel/drivers/media/pci/cx18/cx18-queue.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  cx18 buffer queues
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Derived from ivtv-queue.c
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
8*4882a593Smuzhiyun  *  Copyright (C) 2008  Andy Walls <awalls@md.metrocast.net>
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include "cx18-driver.h"
12*4882a593Smuzhiyun #include "cx18-queue.h"
13*4882a593Smuzhiyun #include "cx18-streams.h"
14*4882a593Smuzhiyun #include "cx18-scb.h"
15*4882a593Smuzhiyun #include "cx18-io.h"
16*4882a593Smuzhiyun 
cx18_buf_swap(struct cx18_buffer * buf)17*4882a593Smuzhiyun void cx18_buf_swap(struct cx18_buffer *buf)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	int i;
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 	for (i = 0; i < buf->bytesused; i += 4)
22*4882a593Smuzhiyun 		swab32s((u32 *)(buf->buf + i));
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun 
_cx18_mdl_swap(struct cx18_mdl * mdl)25*4882a593Smuzhiyun void _cx18_mdl_swap(struct cx18_mdl *mdl)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	struct cx18_buffer *buf;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	list_for_each_entry(buf, &mdl->buf_list, list) {
30*4882a593Smuzhiyun 		if (buf->bytesused == 0)
31*4882a593Smuzhiyun 			break;
32*4882a593Smuzhiyun 		cx18_buf_swap(buf);
33*4882a593Smuzhiyun 	}
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun 
cx18_queue_init(struct cx18_queue * q)36*4882a593Smuzhiyun void cx18_queue_init(struct cx18_queue *q)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	INIT_LIST_HEAD(&q->list);
39*4882a593Smuzhiyun 	atomic_set(&q->depth, 0);
40*4882a593Smuzhiyun 	q->bytesused = 0;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
_cx18_enqueue(struct cx18_stream * s,struct cx18_mdl * mdl,struct cx18_queue * q,int to_front)43*4882a593Smuzhiyun struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_mdl *mdl,
44*4882a593Smuzhiyun 				 struct cx18_queue *q, int to_front)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	/* clear the mdl if it is not to be enqueued to the full queue */
47*4882a593Smuzhiyun 	if (q != &s->q_full) {
48*4882a593Smuzhiyun 		mdl->bytesused = 0;
49*4882a593Smuzhiyun 		mdl->readpos = 0;
50*4882a593Smuzhiyun 		mdl->m_flags = 0;
51*4882a593Smuzhiyun 		mdl->skipped = 0;
52*4882a593Smuzhiyun 		mdl->curr_buf = NULL;
53*4882a593Smuzhiyun 	}
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	/* q_busy is restricted to a max buffer count imposed by firmware */
56*4882a593Smuzhiyun 	if (q == &s->q_busy &&
57*4882a593Smuzhiyun 	    atomic_read(&q->depth) >= CX18_MAX_FW_MDLS_PER_STREAM)
58*4882a593Smuzhiyun 		q = &s->q_free;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	spin_lock(&q->lock);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	if (to_front)
63*4882a593Smuzhiyun 		list_add(&mdl->list, &q->list); /* LIFO */
64*4882a593Smuzhiyun 	else
65*4882a593Smuzhiyun 		list_add_tail(&mdl->list, &q->list); /* FIFO */
66*4882a593Smuzhiyun 	q->bytesused += mdl->bytesused - mdl->readpos;
67*4882a593Smuzhiyun 	atomic_inc(&q->depth);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	spin_unlock(&q->lock);
70*4882a593Smuzhiyun 	return q;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
cx18_dequeue(struct cx18_stream * s,struct cx18_queue * q)73*4882a593Smuzhiyun struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	struct cx18_mdl *mdl = NULL;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	spin_lock(&q->lock);
78*4882a593Smuzhiyun 	if (!list_empty(&q->list)) {
79*4882a593Smuzhiyun 		mdl = list_first_entry(&q->list, struct cx18_mdl, list);
80*4882a593Smuzhiyun 		list_del_init(&mdl->list);
81*4882a593Smuzhiyun 		q->bytesused -= mdl->bytesused - mdl->readpos;
82*4882a593Smuzhiyun 		mdl->skipped = 0;
83*4882a593Smuzhiyun 		atomic_dec(&q->depth);
84*4882a593Smuzhiyun 	}
85*4882a593Smuzhiyun 	spin_unlock(&q->lock);
86*4882a593Smuzhiyun 	return mdl;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
_cx18_mdl_update_bufs_for_cpu(struct cx18_stream * s,struct cx18_mdl * mdl)89*4882a593Smuzhiyun static void _cx18_mdl_update_bufs_for_cpu(struct cx18_stream *s,
90*4882a593Smuzhiyun 					  struct cx18_mdl *mdl)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	struct cx18_buffer *buf;
93*4882a593Smuzhiyun 	u32 buf_size = s->buf_size;
94*4882a593Smuzhiyun 	u32 bytesused = mdl->bytesused;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	list_for_each_entry(buf, &mdl->buf_list, list) {
97*4882a593Smuzhiyun 		buf->readpos = 0;
98*4882a593Smuzhiyun 		if (bytesused >= buf_size) {
99*4882a593Smuzhiyun 			buf->bytesused = buf_size;
100*4882a593Smuzhiyun 			bytesused -= buf_size;
101*4882a593Smuzhiyun 		} else {
102*4882a593Smuzhiyun 			buf->bytesused = bytesused;
103*4882a593Smuzhiyun 			bytesused = 0;
104*4882a593Smuzhiyun 		}
105*4882a593Smuzhiyun 		cx18_buf_sync_for_cpu(s, buf);
106*4882a593Smuzhiyun 	}
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
cx18_mdl_update_bufs_for_cpu(struct cx18_stream * s,struct cx18_mdl * mdl)109*4882a593Smuzhiyun static inline void cx18_mdl_update_bufs_for_cpu(struct cx18_stream *s,
110*4882a593Smuzhiyun 						struct cx18_mdl *mdl)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	struct cx18_buffer *buf;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	if (list_is_singular(&mdl->buf_list)) {
115*4882a593Smuzhiyun 		buf = list_first_entry(&mdl->buf_list, struct cx18_buffer,
116*4882a593Smuzhiyun 				       list);
117*4882a593Smuzhiyun 		buf->bytesused = mdl->bytesused;
118*4882a593Smuzhiyun 		buf->readpos = 0;
119*4882a593Smuzhiyun 		cx18_buf_sync_for_cpu(s, buf);
120*4882a593Smuzhiyun 	} else {
121*4882a593Smuzhiyun 		_cx18_mdl_update_bufs_for_cpu(s, mdl);
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
cx18_queue_get_mdl(struct cx18_stream * s,u32 id,u32 bytesused)125*4882a593Smuzhiyun struct cx18_mdl *cx18_queue_get_mdl(struct cx18_stream *s, u32 id,
126*4882a593Smuzhiyun 	u32 bytesused)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	struct cx18 *cx = s->cx;
129*4882a593Smuzhiyun 	struct cx18_mdl *mdl;
130*4882a593Smuzhiyun 	struct cx18_mdl *tmp;
131*4882a593Smuzhiyun 	struct cx18_mdl *ret = NULL;
132*4882a593Smuzhiyun 	LIST_HEAD(sweep_up);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	/*
135*4882a593Smuzhiyun 	 * We don't have to acquire multiple q locks here, because we are
136*4882a593Smuzhiyun 	 * serialized by the single threaded work handler.
137*4882a593Smuzhiyun 	 * MDLs from the firmware will thus remain in order as
138*4882a593Smuzhiyun 	 * they are moved from q_busy to q_full or to the dvb ring buffer.
139*4882a593Smuzhiyun 	 */
140*4882a593Smuzhiyun 	spin_lock(&s->q_busy.lock);
141*4882a593Smuzhiyun 	list_for_each_entry_safe(mdl, tmp, &s->q_busy.list, list) {
142*4882a593Smuzhiyun 		/*
143*4882a593Smuzhiyun 		 * We should find what the firmware told us is done,
144*4882a593Smuzhiyun 		 * right at the front of the queue.  If we don't, we likely have
145*4882a593Smuzhiyun 		 * missed an mdl done message from the firmware.
146*4882a593Smuzhiyun 		 * Once we skip an mdl repeatedly, relative to the size of
147*4882a593Smuzhiyun 		 * q_busy, we have high confidence we've missed it.
148*4882a593Smuzhiyun 		 */
149*4882a593Smuzhiyun 		if (mdl->id != id) {
150*4882a593Smuzhiyun 			mdl->skipped++;
151*4882a593Smuzhiyun 			if (mdl->skipped >= atomic_read(&s->q_busy.depth)-1) {
152*4882a593Smuzhiyun 				/* mdl must have fallen out of rotation */
153*4882a593Smuzhiyun 				CX18_WARN("Skipped %s, MDL %d, %d times - it must have dropped out of rotation\n",
154*4882a593Smuzhiyun 					  s->name, mdl->id,
155*4882a593Smuzhiyun 					  mdl->skipped);
156*4882a593Smuzhiyun 				/* Sweep it up to put it back into rotation */
157*4882a593Smuzhiyun 				list_move_tail(&mdl->list, &sweep_up);
158*4882a593Smuzhiyun 				atomic_dec(&s->q_busy.depth);
159*4882a593Smuzhiyun 			}
160*4882a593Smuzhiyun 			continue;
161*4882a593Smuzhiyun 		}
162*4882a593Smuzhiyun 		/*
163*4882a593Smuzhiyun 		 * We pull the desired mdl off of the queue here.  Something
164*4882a593Smuzhiyun 		 * will have to put it back on a queue later.
165*4882a593Smuzhiyun 		 */
166*4882a593Smuzhiyun 		list_del_init(&mdl->list);
167*4882a593Smuzhiyun 		atomic_dec(&s->q_busy.depth);
168*4882a593Smuzhiyun 		ret = mdl;
169*4882a593Smuzhiyun 		break;
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun 	spin_unlock(&s->q_busy.lock);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	/*
174*4882a593Smuzhiyun 	 * We found the mdl for which we were looking.  Get it ready for
175*4882a593Smuzhiyun 	 * the caller to put on q_full or in the dvb ring buffer.
176*4882a593Smuzhiyun 	 */
177*4882a593Smuzhiyun 	if (ret != NULL) {
178*4882a593Smuzhiyun 		ret->bytesused = bytesused;
179*4882a593Smuzhiyun 		ret->skipped = 0;
180*4882a593Smuzhiyun 		/* 0'ed readpos, m_flags & curr_buf when mdl went on q_busy */
181*4882a593Smuzhiyun 		cx18_mdl_update_bufs_for_cpu(s, ret);
182*4882a593Smuzhiyun 		if (s->type != CX18_ENC_STREAM_TYPE_TS)
183*4882a593Smuzhiyun 			set_bit(CX18_F_M_NEED_SWAP, &ret->m_flags);
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	/* Put any mdls the firmware is ignoring back into normal rotation */
187*4882a593Smuzhiyun 	list_for_each_entry_safe(mdl, tmp, &sweep_up, list) {
188*4882a593Smuzhiyun 		list_del_init(&mdl->list);
189*4882a593Smuzhiyun 		cx18_enqueue(s, mdl, &s->q_free);
190*4882a593Smuzhiyun 	}
191*4882a593Smuzhiyun 	return ret;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun /* Move all mdls of a queue, while flushing the mdl */
cx18_queue_flush(struct cx18_stream * s,struct cx18_queue * q_src,struct cx18_queue * q_dst)195*4882a593Smuzhiyun static void cx18_queue_flush(struct cx18_stream *s,
196*4882a593Smuzhiyun 			     struct cx18_queue *q_src, struct cx18_queue *q_dst)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	struct cx18_mdl *mdl;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	/* It only makes sense to flush to q_free or q_idle */
201*4882a593Smuzhiyun 	if (q_src == q_dst || q_dst == &s->q_full || q_dst == &s->q_busy)
202*4882a593Smuzhiyun 		return;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	spin_lock(&q_src->lock);
205*4882a593Smuzhiyun 	spin_lock(&q_dst->lock);
206*4882a593Smuzhiyun 	while (!list_empty(&q_src->list)) {
207*4882a593Smuzhiyun 		mdl = list_first_entry(&q_src->list, struct cx18_mdl, list);
208*4882a593Smuzhiyun 		list_move_tail(&mdl->list, &q_dst->list);
209*4882a593Smuzhiyun 		mdl->bytesused = 0;
210*4882a593Smuzhiyun 		mdl->readpos = 0;
211*4882a593Smuzhiyun 		mdl->m_flags = 0;
212*4882a593Smuzhiyun 		mdl->skipped = 0;
213*4882a593Smuzhiyun 		mdl->curr_buf = NULL;
214*4882a593Smuzhiyun 		atomic_inc(&q_dst->depth);
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 	cx18_queue_init(q_src);
217*4882a593Smuzhiyun 	spin_unlock(&q_src->lock);
218*4882a593Smuzhiyun 	spin_unlock(&q_dst->lock);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
cx18_flush_queues(struct cx18_stream * s)221*4882a593Smuzhiyun void cx18_flush_queues(struct cx18_stream *s)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	cx18_queue_flush(s, &s->q_busy, &s->q_free);
224*4882a593Smuzhiyun 	cx18_queue_flush(s, &s->q_full, &s->q_free);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun  * Note, s->buf_pool is not protected by a lock,
229*4882a593Smuzhiyun  * the stream better not have *anything* going on when calling this
230*4882a593Smuzhiyun  */
cx18_unload_queues(struct cx18_stream * s)231*4882a593Smuzhiyun void cx18_unload_queues(struct cx18_stream *s)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	struct cx18_queue *q_idle = &s->q_idle;
234*4882a593Smuzhiyun 	struct cx18_mdl *mdl;
235*4882a593Smuzhiyun 	struct cx18_buffer *buf;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	/* Move all MDLS to q_idle */
238*4882a593Smuzhiyun 	cx18_queue_flush(s, &s->q_busy, q_idle);
239*4882a593Smuzhiyun 	cx18_queue_flush(s, &s->q_full, q_idle);
240*4882a593Smuzhiyun 	cx18_queue_flush(s, &s->q_free, q_idle);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	/* Reset MDL id's and move all buffers back to the stream's buf_pool */
243*4882a593Smuzhiyun 	spin_lock(&q_idle->lock);
244*4882a593Smuzhiyun 	list_for_each_entry(mdl, &q_idle->list, list) {
245*4882a593Smuzhiyun 		while (!list_empty(&mdl->buf_list)) {
246*4882a593Smuzhiyun 			buf = list_first_entry(&mdl->buf_list,
247*4882a593Smuzhiyun 					       struct cx18_buffer, list);
248*4882a593Smuzhiyun 			list_move_tail(&buf->list, &s->buf_pool);
249*4882a593Smuzhiyun 			buf->bytesused = 0;
250*4882a593Smuzhiyun 			buf->readpos = 0;
251*4882a593Smuzhiyun 		}
252*4882a593Smuzhiyun 		mdl->id = s->mdl_base_idx; /* reset id to a "safe" value */
253*4882a593Smuzhiyun 		/* all other mdl fields were cleared by cx18_queue_flush() */
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun 	spin_unlock(&q_idle->lock);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun /*
259*4882a593Smuzhiyun  * Note, s->buf_pool is not protected by a lock,
260*4882a593Smuzhiyun  * the stream better not have *anything* going on when calling this
261*4882a593Smuzhiyun  */
cx18_load_queues(struct cx18_stream * s)262*4882a593Smuzhiyun void cx18_load_queues(struct cx18_stream *s)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	struct cx18 *cx = s->cx;
265*4882a593Smuzhiyun 	struct cx18_mdl *mdl;
266*4882a593Smuzhiyun 	struct cx18_buffer *buf;
267*4882a593Smuzhiyun 	int mdl_id;
268*4882a593Smuzhiyun 	int i;
269*4882a593Smuzhiyun 	u32 partial_buf_size;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	/*
272*4882a593Smuzhiyun 	 * Attach buffers to MDLs, give the MDLs ids, and add MDLs to q_free
273*4882a593Smuzhiyun 	 * Excess MDLs are left on q_idle
274*4882a593Smuzhiyun 	 * Excess buffers are left in buf_pool and/or on an MDL in q_idle
275*4882a593Smuzhiyun 	 */
276*4882a593Smuzhiyun 	mdl_id = s->mdl_base_idx;
277*4882a593Smuzhiyun 	for (mdl = cx18_dequeue(s, &s->q_idle), i = s->bufs_per_mdl;
278*4882a593Smuzhiyun 	     mdl != NULL && i == s->bufs_per_mdl;
279*4882a593Smuzhiyun 	     mdl = cx18_dequeue(s, &s->q_idle)) {
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 		mdl->id = mdl_id;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 		for (i = 0; i < s->bufs_per_mdl; i++) {
284*4882a593Smuzhiyun 			if (list_empty(&s->buf_pool))
285*4882a593Smuzhiyun 				break;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 			buf = list_first_entry(&s->buf_pool, struct cx18_buffer,
288*4882a593Smuzhiyun 					       list);
289*4882a593Smuzhiyun 			list_move_tail(&buf->list, &mdl->buf_list);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 			/* update the firmware's MDL array with this buffer */
292*4882a593Smuzhiyun 			cx18_writel(cx, buf->dma_handle,
293*4882a593Smuzhiyun 				    &cx->scb->cpu_mdl[mdl_id + i].paddr);
294*4882a593Smuzhiyun 			cx18_writel(cx, s->buf_size,
295*4882a593Smuzhiyun 				    &cx->scb->cpu_mdl[mdl_id + i].length);
296*4882a593Smuzhiyun 		}
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 		if (i == s->bufs_per_mdl) {
299*4882a593Smuzhiyun 			/*
300*4882a593Smuzhiyun 			 * The encoder doesn't honor s->mdl_size.  So in the
301*4882a593Smuzhiyun 			 * case of a non-integral number of buffers to meet
302*4882a593Smuzhiyun 			 * mdl_size, we lie about the size of the last buffer
303*4882a593Smuzhiyun 			 * in the MDL to get the encoder to really only send
304*4882a593Smuzhiyun 			 * us mdl_size bytes per MDL transfer.
305*4882a593Smuzhiyun 			 */
306*4882a593Smuzhiyun 			partial_buf_size = s->mdl_size % s->buf_size;
307*4882a593Smuzhiyun 			if (partial_buf_size) {
308*4882a593Smuzhiyun 				cx18_writel(cx, partial_buf_size,
309*4882a593Smuzhiyun 				      &cx->scb->cpu_mdl[mdl_id + i - 1].length);
310*4882a593Smuzhiyun 			}
311*4882a593Smuzhiyun 			cx18_enqueue(s, mdl, &s->q_free);
312*4882a593Smuzhiyun 		} else {
313*4882a593Smuzhiyun 			/* Not enough buffers for this MDL; we won't use it */
314*4882a593Smuzhiyun 			cx18_push(s, mdl, &s->q_idle);
315*4882a593Smuzhiyun 		}
316*4882a593Smuzhiyun 		mdl_id += i;
317*4882a593Smuzhiyun 	}
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
_cx18_mdl_sync_for_device(struct cx18_stream * s,struct cx18_mdl * mdl)320*4882a593Smuzhiyun void _cx18_mdl_sync_for_device(struct cx18_stream *s, struct cx18_mdl *mdl)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun 	int dma = s->dma;
323*4882a593Smuzhiyun 	u32 buf_size = s->buf_size;
324*4882a593Smuzhiyun 	struct pci_dev *pci_dev = s->cx->pci_dev;
325*4882a593Smuzhiyun 	struct cx18_buffer *buf;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	list_for_each_entry(buf, &mdl->buf_list, list)
328*4882a593Smuzhiyun 		pci_dma_sync_single_for_device(pci_dev, buf->dma_handle,
329*4882a593Smuzhiyun 					       buf_size, dma);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
cx18_stream_alloc(struct cx18_stream * s)332*4882a593Smuzhiyun int cx18_stream_alloc(struct cx18_stream *s)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	struct cx18 *cx = s->cx;
335*4882a593Smuzhiyun 	int i;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (s->buffers == 0)
338*4882a593Smuzhiyun 		return 0;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers (%d.%02d kB total)\n",
341*4882a593Smuzhiyun 		s->name, s->buffers, s->buf_size,
342*4882a593Smuzhiyun 		s->buffers * s->buf_size / 1024,
343*4882a593Smuzhiyun 		(s->buffers * s->buf_size * 100 / 1024) % 100);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	if (((char __iomem *)&cx->scb->cpu_mdl[cx->free_mdl_idx + s->buffers] -
346*4882a593Smuzhiyun 				(char __iomem *)cx->scb) > SCB_RESERVED_SIZE) {
347*4882a593Smuzhiyun 		unsigned bufsz = (((char __iomem *)cx->scb) + SCB_RESERVED_SIZE -
348*4882a593Smuzhiyun 					((char __iomem *)cx->scb->cpu_mdl));
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 		CX18_ERR("Too many buffers, cannot fit in SCB area\n");
351*4882a593Smuzhiyun 		CX18_ERR("Max buffers = %zu\n",
352*4882a593Smuzhiyun 			bufsz / sizeof(struct cx18_mdl_ent));
353*4882a593Smuzhiyun 		return -ENOMEM;
354*4882a593Smuzhiyun 	}
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	s->mdl_base_idx = cx->free_mdl_idx;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	/* allocate stream buffers and MDLs */
359*4882a593Smuzhiyun 	for (i = 0; i < s->buffers; i++) {
360*4882a593Smuzhiyun 		struct cx18_mdl *mdl;
361*4882a593Smuzhiyun 		struct cx18_buffer *buf;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 		/* 1 MDL per buffer to handle the worst & also default case */
364*4882a593Smuzhiyun 		mdl = kzalloc(sizeof(struct cx18_mdl), GFP_KERNEL|__GFP_NOWARN);
365*4882a593Smuzhiyun 		if (mdl == NULL)
366*4882a593Smuzhiyun 			break;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 		buf = kzalloc(sizeof(struct cx18_buffer),
369*4882a593Smuzhiyun 				GFP_KERNEL|__GFP_NOWARN);
370*4882a593Smuzhiyun 		if (buf == NULL) {
371*4882a593Smuzhiyun 			kfree(mdl);
372*4882a593Smuzhiyun 			break;
373*4882a593Smuzhiyun 		}
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 		buf->buf = kmalloc(s->buf_size, GFP_KERNEL|__GFP_NOWARN);
376*4882a593Smuzhiyun 		if (buf->buf == NULL) {
377*4882a593Smuzhiyun 			kfree(mdl);
378*4882a593Smuzhiyun 			kfree(buf);
379*4882a593Smuzhiyun 			break;
380*4882a593Smuzhiyun 		}
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 		INIT_LIST_HEAD(&mdl->list);
383*4882a593Smuzhiyun 		INIT_LIST_HEAD(&mdl->buf_list);
384*4882a593Smuzhiyun 		mdl->id = s->mdl_base_idx; /* a somewhat safe value */
385*4882a593Smuzhiyun 		cx18_enqueue(s, mdl, &s->q_idle);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 		INIT_LIST_HEAD(&buf->list);
388*4882a593Smuzhiyun 		buf->dma_handle = pci_map_single(s->cx->pci_dev,
389*4882a593Smuzhiyun 				buf->buf, s->buf_size, s->dma);
390*4882a593Smuzhiyun 		cx18_buf_sync_for_cpu(s, buf);
391*4882a593Smuzhiyun 		list_add_tail(&buf->list, &s->buf_pool);
392*4882a593Smuzhiyun 	}
393*4882a593Smuzhiyun 	if (i == s->buffers) {
394*4882a593Smuzhiyun 		cx->free_mdl_idx += s->buffers;
395*4882a593Smuzhiyun 		return 0;
396*4882a593Smuzhiyun 	}
397*4882a593Smuzhiyun 	CX18_ERR("Couldn't allocate buffers for %s stream\n", s->name);
398*4882a593Smuzhiyun 	cx18_stream_free(s);
399*4882a593Smuzhiyun 	return -ENOMEM;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun 
cx18_stream_free(struct cx18_stream * s)402*4882a593Smuzhiyun void cx18_stream_free(struct cx18_stream *s)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	struct cx18_mdl *mdl;
405*4882a593Smuzhiyun 	struct cx18_buffer *buf;
406*4882a593Smuzhiyun 	struct cx18 *cx = s->cx;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	CX18_DEBUG_INFO("Deallocating buffers for %s stream\n", s->name);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	/* move all buffers to buf_pool and all MDLs to q_idle */
411*4882a593Smuzhiyun 	cx18_unload_queues(s);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	/* empty q_idle */
414*4882a593Smuzhiyun 	while ((mdl = cx18_dequeue(s, &s->q_idle)))
415*4882a593Smuzhiyun 		kfree(mdl);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	/* empty buf_pool */
418*4882a593Smuzhiyun 	while (!list_empty(&s->buf_pool)) {
419*4882a593Smuzhiyun 		buf = list_first_entry(&s->buf_pool, struct cx18_buffer, list);
420*4882a593Smuzhiyun 		list_del_init(&buf->list);
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 		pci_unmap_single(s->cx->pci_dev, buf->dma_handle,
423*4882a593Smuzhiyun 				s->buf_size, s->dma);
424*4882a593Smuzhiyun 		kfree(buf->buf);
425*4882a593Smuzhiyun 		kfree(buf);
426*4882a593Smuzhiyun 	}
427*4882a593Smuzhiyun }
428