xref: /OK3568_Linux_fs/kernel/drivers/usb/gadget/function/uvc_video.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *	uvc_video.c  --  USB Video Class Gadget driver
4  *
5  *	Copyright (C) 2009-2010
6  *	    Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/device.h>
11 #include <linux/errno.h>
12 #include <linux/usb/ch9.h>
13 #include <linux/usb/gadget.h>
14 #include <linux/usb/video.h>
15 #include <linux/pm_qos.h>
16 
17 #include <media/v4l2-dev.h>
18 
19 #include "uvc.h"
20 #include "uvc_queue.h"
21 #include "uvc_video.h"
22 #include "u_uvc.h"
23 
24 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
uvc_using_zero_copy(struct uvc_video * video)25 static bool uvc_using_zero_copy(struct uvc_video *video)
26 {
27 	struct uvc_device *uvc = container_of(video, struct uvc_device, video);
28 	struct f_uvc_opts *opts = fi_to_f_uvc_opts(uvc->func.fi);
29 
30 	if (opts && opts->uvc_zero_copy && video->fcc != V4L2_PIX_FMT_YUYV)
31 		return true;
32 	else
33 		return false;
34 }
35 
uvc_wait_req_complete(struct uvc_video * video,struct uvc_request * ureq)36 static void uvc_wait_req_complete(struct uvc_video *video, struct uvc_request *ureq)
37 {
38 	unsigned long flags;
39 	struct usb_request *req;
40 	int ret;
41 
42 	spin_lock_irqsave(&video->req_lock, flags);
43 
44 	list_for_each_entry(req, &video->req_free, list) {
45 		if (req == ureq->req)
46 			break;
47 	}
48 
49 	if (req != ureq->req) {
50 		reinit_completion(&ureq->req_done);
51 
52 		spin_unlock_irqrestore(&video->req_lock, flags);
53 		ret = wait_for_completion_timeout(&ureq->req_done,
54 						  msecs_to_jiffies(500));
55 		if (ret == 0)
56 			uvcg_warn(&video->uvc->func,
57 				  "timed out waiting for req done\n");
58 		return;
59 	}
60 
61 	spin_unlock_irqrestore(&video->req_lock, flags);
62 }
63 #else
uvc_using_zero_copy(struct uvc_video * video)64 static inline bool uvc_using_zero_copy(struct uvc_video *video)
65 {
66 	return false;
67 }
68 
uvc_wait_req_complete(struct uvc_video * video,struct uvc_request * ureq)69 static inline void uvc_wait_req_complete(struct uvc_video *video, struct uvc_request *ureq)
70 { }
71 #endif
72 
73 /* --------------------------------------------------------------------------
74  * Video codecs
75  */
76 
77 static int
uvc_video_encode_header(struct uvc_video * video,struct uvc_buffer * buf,u8 * data,int len)78 uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
79 		u8 *data, int len)
80 {
81 	if (uvc_using_zero_copy(video)) {
82 		u8 *mem;
83 
84 		mem = buf->mem + video->queue.buf_used +
85 		      (video->queue.buf_used / (video->req_size - 2)) * 2;
86 
87 		mem[0] = 2;
88 		mem[1] = UVC_STREAM_EOH | video->fid;
89 		if (buf->bytesused - video->queue.buf_used <= len - 2)
90 			mem[1] |= UVC_STREAM_EOF;
91 
92 		return 2;
93 	}
94 
95 	data[0] = 2;
96 	data[1] = UVC_STREAM_EOH | video->fid;
97 
98 	if (buf->bytesused - video->queue.buf_used <= len - 2)
99 		data[1] |= UVC_STREAM_EOF;
100 
101 	return 2;
102 }
103 
104 static int
uvc_video_encode_data(struct uvc_video * video,struct uvc_buffer * buf,u8 * data,int len)105 uvc_video_encode_data(struct uvc_video *video, struct uvc_buffer *buf,
106 		u8 *data, int len)
107 {
108 	struct uvc_video_queue *queue = &video->queue;
109 	unsigned int nbytes;
110 	void *mem;
111 
112 	/* Copy video data to the USB buffer. */
113 	mem = buf->mem + queue->buf_used;
114 	nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used);
115 
116 	if (!uvc_using_zero_copy(video))
117 		memcpy(data, mem, nbytes);
118 	queue->buf_used += nbytes;
119 
120 	return nbytes;
121 }
122 
123 static void
uvc_video_encode_bulk(struct usb_request * req,struct uvc_video * video,struct uvc_buffer * buf)124 uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
125 		struct uvc_buffer *buf)
126 {
127 	void *mem = req->buf;
128 	int len = video->req_size;
129 	int ret;
130 
131 	/* Add a header at the beginning of the payload. */
132 	if (video->payload_size == 0) {
133 		ret = uvc_video_encode_header(video, buf, mem, len);
134 		video->payload_size += ret;
135 		mem += ret;
136 		len -= ret;
137 	}
138 
139 	/* Process video data. */
140 	len = min((int)(video->max_payload_size - video->payload_size), len);
141 	ret = uvc_video_encode_data(video, buf, mem, len);
142 
143 	video->payload_size += ret;
144 	len -= ret;
145 
146 	req->length = video->req_size - len;
147 	req->zero = video->payload_size == video->max_payload_size;
148 
149 	if (buf->bytesused == video->queue.buf_used) {
150 		video->queue.buf_used = 0;
151 		buf->state = UVC_BUF_STATE_DONE;
152 		uvcg_queue_next_buffer(&video->queue, buf);
153 		video->fid ^= UVC_STREAM_FID;
154 
155 		video->payload_size = 0;
156 		req->zero = 1;
157 	}
158 
159 	if (video->payload_size == video->max_payload_size ||
160 	    buf->bytesused == video->queue.buf_used)
161 		video->payload_size = 0;
162 }
163 
164 static void
uvc_video_encode_isoc(struct usb_request * req,struct uvc_video * video,struct uvc_buffer * buf)165 uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
166 		struct uvc_buffer *buf)
167 {
168 	void *mem = req->buf;
169 	int len = video->req_size;
170 	int ret;
171 
172 	if (uvc_using_zero_copy(video))
173 		req->buf = buf->mem + video->queue.buf_used +
174 			   (video->queue.buf_used / (video->req_size - 2)) * 2;
175 
176 	/* Add the header. */
177 	ret = uvc_video_encode_header(video, buf, mem, len);
178 	mem += ret;
179 	len -= ret;
180 
181 	/* Process video data. */
182 	ret = uvc_video_encode_data(video, buf, mem, len);
183 	len -= ret;
184 
185 	req->length = video->req_size - len;
186 
187 	if (buf->bytesused == video->queue.buf_used) {
188 		video->queue.buf_used = 0;
189 		buf->state = UVC_BUF_STATE_DONE;
190 		uvcg_queue_next_buffer(&video->queue, buf);
191 		video->fid ^= UVC_STREAM_FID;
192 	}
193 }
194 
195 /* --------------------------------------------------------------------------
196  * Request handling
197  */
198 
uvcg_video_ep_queue(struct uvc_video * video,struct usb_request * req)199 static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
200 {
201 	int ret;
202 
203 	ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
204 	if (ret < 0) {
205 		uvcg_err(&video->uvc->func, "Failed to queue request (%d).\n",
206 			 ret);
207 
208 		/* If the endpoint is disabled the descriptor may be NULL. */
209 		if (video->ep->desc) {
210 			/* Isochronous endpoints can't be halted. */
211 			if (usb_endpoint_xfer_bulk(video->ep->desc))
212 				usb_ep_set_halt(video->ep);
213 		}
214 	}
215 
216 	return ret;
217 }
218 
219 static void
uvc_video_complete(struct usb_ep * ep,struct usb_request * req)220 uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
221 {
222 	struct uvc_request *ureq = req->context;
223 	struct uvc_video *video = ureq->video;
224 	struct uvc_video_queue *queue = &video->queue;
225 	struct uvc_device *uvc = video->uvc;
226 	unsigned long flags;
227 
228 	switch (req->status) {
229 	case 0:
230 		break;
231 
232 	case -ESHUTDOWN:	/* disconnect from host. */
233 		uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
234 		uvcg_queue_cancel(queue, 1);
235 		break;
236 
237 	default:
238 		uvcg_warn(&video->uvc->func,
239 			  "VS request completed with status %d.\n",
240 			  req->status);
241 		uvcg_queue_cancel(queue, 0);
242 	}
243 
244 	spin_lock_irqsave(&video->req_lock, flags);
245 	list_add_tail(&req->list, &video->req_free);
246 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
247 	complete(&ureq->req_done);
248 #endif
249 	spin_unlock_irqrestore(&video->req_lock, flags);
250 
251 	if (uvc->state == UVC_STATE_STREAMING)
252 		schedule_work(&video->pump);
253 }
254 
255 static int
uvc_video_free_requests(struct uvc_video * video)256 uvc_video_free_requests(struct uvc_video *video)
257 {
258 	unsigned int i;
259 
260 	if (video->ureq) {
261 		for (i = 0; i < video->uvc_num_requests; ++i) {
262 			if (video->ureq[i].req) {
263 				uvc_wait_req_complete(video, &video->ureq[i]);
264 				usb_ep_free_request(video->ep, video->ureq[i].req);
265 				video->ureq[i].req = NULL;
266 			}
267 
268 			if (video->ureq[i].req_buffer) {
269 				kfree(video->ureq[i].req_buffer);
270 				video->ureq[i].req_buffer = NULL;
271 			}
272 		}
273 
274 		kfree(video->ureq);
275 		video->ureq = NULL;
276 	}
277 
278 	INIT_LIST_HEAD(&video->req_free);
279 	video->req_size = 0;
280 	return 0;
281 }
282 
283 static int
uvc_video_alloc_requests(struct uvc_video * video)284 uvc_video_alloc_requests(struct uvc_video *video)
285 {
286 	unsigned int req_size;
287 	unsigned int i;
288 	int ret = -ENOMEM;
289 
290 	BUG_ON(video->req_size);
291 
292 	if (!usb_endpoint_xfer_bulk(video->ep->desc)) {
293 		req_size = video->ep->maxpacket
294 			 * max_t(unsigned int, video->ep->maxburst, 1)
295 			 * (video->ep->mult);
296 	} else {
297 		req_size = video->ep->maxpacket
298 			 * max_t(unsigned int, video->ep->maxburst, 1);
299 	}
300 
301 	video->ureq = kcalloc(video->uvc_num_requests, sizeof(struct uvc_request), GFP_KERNEL);
302 	if (video->ureq == NULL)
303 		return -ENOMEM;
304 
305 	for (i = 0; i < video->uvc_num_requests; ++i) {
306 		video->ureq[i].req_buffer = kmalloc(req_size, GFP_KERNEL);
307 		if (video->ureq[i].req_buffer == NULL)
308 			goto error;
309 
310 		video->ureq[i].req = usb_ep_alloc_request(video->ep, GFP_KERNEL);
311 		if (video->ureq[i].req == NULL)
312 			goto error;
313 
314 		video->ureq[i].req->buf = video->ureq[i].req_buffer;
315 		video->ureq[i].req->length = 0;
316 		video->ureq[i].req->complete = uvc_video_complete;
317 		video->ureq[i].req->context = &video->ureq[i];
318 		video->ureq[i].video = video;
319 
320 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
321 		init_completion(&video->ureq[i].req_done);
322 #endif
323 		list_add_tail(&video->ureq[i].req->list, &video->req_free);
324 	}
325 
326 	video->req_size = req_size;
327 
328 	return 0;
329 
330 error:
331 	uvc_video_free_requests(video);
332 	return ret;
333 }
334 
335 /* --------------------------------------------------------------------------
336  * Video streaming
337  */
338 
339 /*
340  * uvcg_video_pump - Pump video data into the USB requests
341  *
342  * This function fills the available USB requests (listed in req_free) with
343  * video data from the queued buffers.
344  */
uvcg_video_pump(struct work_struct * work)345 static void uvcg_video_pump(struct work_struct *work)
346 {
347 	struct uvc_video *video = container_of(work, struct uvc_video, pump);
348 	struct uvc_video_queue *queue = &video->queue;
349 	struct usb_request *req = NULL;
350 	struct uvc_buffer *buf;
351 	unsigned long flags;
352 	int ret;
353 
354 	while (video->ep->enabled) {
355 		/* Retrieve the first available USB request, protected by the
356 		 * request lock.
357 		 */
358 		spin_lock_irqsave(&video->req_lock, flags);
359 		if (list_empty(&video->req_free)) {
360 			spin_unlock_irqrestore(&video->req_lock, flags);
361 			return;
362 		}
363 		req = list_first_entry(&video->req_free, struct usb_request,
364 					list);
365 		list_del(&req->list);
366 		spin_unlock_irqrestore(&video->req_lock, flags);
367 
368 		/* Retrieve the first available video buffer and fill the
369 		 * request, protected by the video queue irqlock.
370 		 */
371 		spin_lock_irqsave(&queue->irqlock, flags);
372 		buf = uvcg_queue_head(queue);
373 		if (buf == NULL) {
374 			spin_unlock_irqrestore(&queue->irqlock, flags);
375 			break;
376 		}
377 
378 		video->encode(req, video, buf);
379 
380 		/* Queue the USB request */
381 		ret = uvcg_video_ep_queue(video, req);
382 		spin_unlock_irqrestore(&queue->irqlock, flags);
383 
384 		if (ret < 0) {
385 			uvcg_queue_cancel(queue, 0);
386 			break;
387 		}
388 
389 		/* Endpoint now owns the request */
390 		req = NULL;
391 	}
392 
393 	if (!req)
394 		return;
395 
396 	spin_lock_irqsave(&video->req_lock, flags);
397 	list_add_tail(&req->list, &video->req_free);
398 	spin_unlock_irqrestore(&video->req_lock, flags);
399 	return;
400 }
401 
402 /*
403  * Enable or disable the video stream.
404  */
uvcg_video_enable(struct uvc_video * video,int enable)405 int uvcg_video_enable(struct uvc_video *video, int enable)
406 {
407 	unsigned int i;
408 	int ret;
409 	struct uvc_device *uvc;
410 	struct f_uvc_opts *opts;
411 
412 	if (video->ep == NULL) {
413 		uvcg_info(&video->uvc->func,
414 			  "Video enable failed, device is uninitialized.\n");
415 		return -ENODEV;
416 	}
417 
418 	uvc = container_of(video, struct uvc_device, video);
419 	opts = fi_to_f_uvc_opts(uvc->func.fi);
420 
421 	if (!enable) {
422 		cancel_work_sync(&video->pump);
423 		uvcg_queue_cancel(&video->queue, 0);
424 
425 		for (i = 0; i < video->uvc_num_requests; ++i)
426 			if (video->ureq && video->ureq[i].req)
427 				usb_ep_dequeue(video->ep, video->ureq[i].req);
428 
429 		uvc_video_free_requests(video);
430 		uvcg_queue_enable(&video->queue, 0);
431 		if (cpu_latency_qos_request_active(&uvc->pm_qos))
432 			cpu_latency_qos_remove_request(&uvc->pm_qos);
433 		return 0;
434 	}
435 
436 	cpu_latency_qos_add_request(&uvc->pm_qos, opts->pm_qos_latency);
437 	if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0)
438 		return ret;
439 
440 	if ((ret = uvc_video_alloc_requests(video)) < 0)
441 		return ret;
442 
443 	if (video->max_payload_size) {
444 		video->encode = uvc_video_encode_bulk;
445 		video->payload_size = 0;
446 	} else
447 		video->encode = uvc_video_encode_isoc;
448 
449 	schedule_work(&video->pump);
450 
451 	return ret;
452 }
453 
454 /*
455  * Initialize the UVC video stream.
456  */
uvcg_video_init(struct uvc_video * video,struct uvc_device * uvc)457 int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
458 {
459 	INIT_LIST_HEAD(&video->req_free);
460 	spin_lock_init(&video->req_lock);
461 	INIT_WORK(&video->pump, uvcg_video_pump);
462 
463 	video->uvc = uvc;
464 	video->fcc = V4L2_PIX_FMT_YUYV;
465 	video->bpp = 16;
466 	video->width = 320;
467 	video->height = 240;
468 	video->imagesize = 320 * 240 * 2;
469 
470 	/* Initialize the video buffers queue. */
471 	uvcg_queue_init(&video->queue, V4L2_BUF_TYPE_VIDEO_OUTPUT,
472 			&video->mutex);
473 	return 0;
474 }
475 
476