1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * uvc_queue.c -- USB Video Class driver - Buffers management
4 *
5 * Copyright (C) 2005-2010
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 */
8
9 #include <linux/atomic.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/usb.h>
15 #include <linux/videodev2.h>
16 #include <linux/vmalloc.h>
17 #include <linux/wait.h>
18 #include <media/videobuf2-v4l2.h>
19 #include <media/videobuf2-vmalloc.h>
20
21 #include "uvcvideo.h"
22
23 /* ------------------------------------------------------------------------
24 * Video buffers queue management.
25 *
26 * Video queues is initialized by uvc_queue_init(). The function performs
27 * basic initialization of the uvc_video_queue struct and never fails.
28 *
29 * Video buffers are managed by videobuf2. The driver uses a mutex to protect
30 * the videobuf2 queue operations by serializing calls to videobuf2 and a
31 * spinlock to protect the IRQ queue that holds the buffers to be processed by
32 * the driver.
33 */
34
uvc_vbuf_to_buffer(struct vb2_v4l2_buffer * buf)35 static inline struct uvc_buffer *uvc_vbuf_to_buffer(struct vb2_v4l2_buffer *buf)
36 {
37 return container_of(buf, struct uvc_buffer, buf);
38 }
39
40 /*
41 * Return all queued buffers to videobuf2 in the requested state.
42 *
43 * This function must be called with the queue spinlock held.
44 */
__uvc_queue_return_buffers(struct uvc_video_queue * queue,enum uvc_buffer_state state)45 static void __uvc_queue_return_buffers(struct uvc_video_queue *queue,
46 enum uvc_buffer_state state)
47 {
48 enum vb2_buffer_state vb2_state = state == UVC_BUF_STATE_ERROR
49 ? VB2_BUF_STATE_ERROR
50 : VB2_BUF_STATE_QUEUED;
51
52 lockdep_assert_held(&queue->irqlock);
53
54 while (!list_empty(&queue->irqqueue)) {
55 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue,
56 struct uvc_buffer,
57 queue);
58 list_del(&buf->queue);
59 buf->state = state;
60 vb2_buffer_done(&buf->buf.vb2_buf, vb2_state);
61 }
62 }
63
uvc_queue_return_buffers(struct uvc_video_queue * queue,enum uvc_buffer_state state)64 static void uvc_queue_return_buffers(struct uvc_video_queue *queue,
65 enum uvc_buffer_state state)
66 {
67 spin_lock_irq(&queue->irqlock);
68 __uvc_queue_return_buffers(queue, state);
69 spin_unlock_irq(&queue->irqlock);
70 }
71
72 /* -----------------------------------------------------------------------------
73 * videobuf2 queue operations
74 */
75
uvc_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])76 static int uvc_queue_setup(struct vb2_queue *vq,
77 unsigned int *nbuffers, unsigned int *nplanes,
78 unsigned int sizes[], struct device *alloc_devs[])
79 {
80 struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
81 struct uvc_streaming *stream;
82 unsigned int size;
83
84 switch (vq->type) {
85 case V4L2_BUF_TYPE_META_CAPTURE:
86 size = UVC_METADATA_BUF_SIZE;
87 break;
88
89 default:
90 stream = uvc_queue_to_stream(queue);
91 size = stream->ctrl.dwMaxVideoFrameSize;
92 break;
93 }
94
95 /*
96 * When called with plane sizes, validate them. The driver supports
97 * single planar formats only, and requires buffers to be large enough
98 * to store a complete frame.
99 */
100 if (*nplanes)
101 return *nplanes != 1 || sizes[0] < size ? -EINVAL : 0;
102
103 *nplanes = 1;
104 sizes[0] = size;
105 return 0;
106 }
107
uvc_buffer_prepare(struct vb2_buffer * vb)108 static int uvc_buffer_prepare(struct vb2_buffer *vb)
109 {
110 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
111 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
112 struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf);
113
114 if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
115 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
116 uvc_dbg(uvc_queue_to_stream(queue)->dev, CAPTURE,
117 "[E] Bytes used out of bounds\n");
118 return -EINVAL;
119 }
120
121 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED))
122 return -ENODEV;
123
124 buf->state = UVC_BUF_STATE_QUEUED;
125 buf->error = 0;
126 buf->mem = vb2_plane_vaddr(vb, 0);
127 buf->length = vb2_plane_size(vb, 0);
128 if (vb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
129 buf->bytesused = 0;
130 else
131 buf->bytesused = vb2_get_plane_payload(vb, 0);
132
133 return 0;
134 }
135
uvc_buffer_queue(struct vb2_buffer * vb)136 static void uvc_buffer_queue(struct vb2_buffer *vb)
137 {
138 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
139 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
140 struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf);
141 unsigned long flags;
142
143 spin_lock_irqsave(&queue->irqlock, flags);
144 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) {
145 kref_init(&buf->ref);
146 list_add_tail(&buf->queue, &queue->irqqueue);
147 } else {
148 /*
149 * If the device is disconnected return the buffer to userspace
150 * directly. The next QBUF call will fail with -ENODEV.
151 */
152 buf->state = UVC_BUF_STATE_ERROR;
153 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
154 }
155
156 spin_unlock_irqrestore(&queue->irqlock, flags);
157 }
158
uvc_buffer_finish(struct vb2_buffer * vb)159 static void uvc_buffer_finish(struct vb2_buffer *vb)
160 {
161 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
162 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
163 struct uvc_streaming *stream = uvc_queue_to_stream(queue);
164 struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf);
165
166 if (vb->state == VB2_BUF_STATE_DONE)
167 uvc_video_clock_update(stream, vbuf, buf);
168 }
169
uvc_start_streaming_video(struct vb2_queue * vq,unsigned int count)170 static int uvc_start_streaming_video(struct vb2_queue *vq, unsigned int count)
171 {
172 struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
173 struct uvc_streaming *stream = uvc_queue_to_stream(queue);
174 int ret;
175
176 lockdep_assert_irqs_enabled();
177
178 ret = uvc_pm_get(stream->dev);
179 if (ret)
180 return ret;
181
182 queue->buf_used = 0;
183
184 ret = uvc_video_start_streaming(stream);
185 if (ret == 0)
186 return 0;
187
188 uvc_pm_put(stream->dev);
189
190 uvc_queue_return_buffers(queue, UVC_BUF_STATE_QUEUED);
191
192 return ret;
193 }
194
uvc_stop_streaming_video(struct vb2_queue * vq)195 static void uvc_stop_streaming_video(struct vb2_queue *vq)
196 {
197 struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
198 struct uvc_streaming *stream = uvc_queue_to_stream(queue);
199
200 lockdep_assert_irqs_enabled();
201
202 uvc_video_stop_streaming(uvc_queue_to_stream(queue));
203
204 uvc_pm_put(stream->dev);
205
206 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
207 }
208
uvc_stop_streaming_meta(struct vb2_queue * vq)209 static void uvc_stop_streaming_meta(struct vb2_queue *vq)
210 {
211 struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
212
213 lockdep_assert_irqs_enabled();
214
215 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
216 }
217
218 static const struct vb2_ops uvc_queue_qops = {
219 .queue_setup = uvc_queue_setup,
220 .buf_prepare = uvc_buffer_prepare,
221 .buf_queue = uvc_buffer_queue,
222 .buf_finish = uvc_buffer_finish,
223 .start_streaming = uvc_start_streaming_video,
224 .stop_streaming = uvc_stop_streaming_video,
225 };
226
227 static const struct vb2_ops uvc_meta_queue_qops = {
228 .queue_setup = uvc_queue_setup,
229 .buf_prepare = uvc_buffer_prepare,
230 .buf_queue = uvc_buffer_queue,
231 /*
232 * .start_streaming is not provided here. Metadata relies on video
233 * streaming being active. If video isn't streaming, then no metadata
234 * will arrive either.
235 */
236 .stop_streaming = uvc_stop_streaming_meta,
237 };
238
uvc_queue_init(struct uvc_video_queue * queue,enum v4l2_buf_type type)239 int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
240 {
241 int ret;
242
243 queue->queue.type = type;
244 queue->queue.io_modes = VB2_MMAP | VB2_USERPTR;
245 queue->queue.drv_priv = queue;
246 queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
247 queue->queue.mem_ops = &vb2_vmalloc_memops;
248 queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
249 | V4L2_BUF_FLAG_TSTAMP_SRC_SOE;
250 queue->queue.lock = &queue->mutex;
251
252 switch (type) {
253 case V4L2_BUF_TYPE_META_CAPTURE:
254 queue->queue.ops = &uvc_meta_queue_qops;
255 break;
256 default:
257 queue->queue.io_modes |= VB2_DMABUF;
258 queue->queue.ops = &uvc_queue_qops;
259 break;
260 }
261
262 ret = vb2_queue_init(&queue->queue);
263 if (ret)
264 return ret;
265
266 mutex_init(&queue->mutex);
267 spin_lock_init(&queue->irqlock);
268 INIT_LIST_HEAD(&queue->irqqueue);
269
270 return 0;
271 }
272
273 /* -----------------------------------------------------------------------------
274 *
275 */
276
277 /*
278 * Cancel the video buffers queue.
279 *
280 * Cancelling the queue marks all buffers on the irq queue as erroneous,
281 * wakes them up and removes them from the queue.
282 *
283 * If the disconnect parameter is set, further calls to uvc_queue_buffer will
284 * fail with -ENODEV.
285 *
286 * This function acquires the irq spinlock and can be called from interrupt
287 * context.
288 */
uvc_queue_cancel(struct uvc_video_queue * queue,int disconnect)289 void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
290 {
291 unsigned long flags;
292
293 spin_lock_irqsave(&queue->irqlock, flags);
294 __uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
295 /*
296 * This must be protected by the irqlock spinlock to avoid race
297 * conditions between uvc_buffer_queue and the disconnection event that
298 * could result in an interruptible wait in uvc_dequeue_buffer. Do not
299 * blindly replace this logic by checking for the UVC_QUEUE_DISCONNECTED
300 * state outside the queue code.
301 */
302 if (disconnect)
303 queue->flags |= UVC_QUEUE_DISCONNECTED;
304 spin_unlock_irqrestore(&queue->irqlock, flags);
305 }
306
307 /*
308 * uvc_queue_get_current_buffer: Obtain the current working output buffer
309 *
310 * Buffers may span multiple packets, and even URBs, therefore the active buffer
311 * remains on the queue until the EOF marker.
312 */
313 static struct uvc_buffer *
__uvc_queue_get_current_buffer(struct uvc_video_queue * queue)314 __uvc_queue_get_current_buffer(struct uvc_video_queue *queue)
315 {
316 if (list_empty(&queue->irqqueue))
317 return NULL;
318
319 return list_first_entry(&queue->irqqueue, struct uvc_buffer, queue);
320 }
321
uvc_queue_get_current_buffer(struct uvc_video_queue * queue)322 struct uvc_buffer *uvc_queue_get_current_buffer(struct uvc_video_queue *queue)
323 {
324 struct uvc_buffer *nextbuf;
325 unsigned long flags;
326
327 spin_lock_irqsave(&queue->irqlock, flags);
328 nextbuf = __uvc_queue_get_current_buffer(queue);
329 spin_unlock_irqrestore(&queue->irqlock, flags);
330
331 return nextbuf;
332 }
333
334 /*
335 * uvc_queue_buffer_requeue: Requeue a buffer on our internal irqqueue
336 *
337 * Reuse a buffer through our internal queue without the need to 'prepare'.
338 * The buffer will be returned to userspace through the uvc_buffer_queue call if
339 * the device has been disconnected.
340 */
uvc_queue_buffer_requeue(struct uvc_video_queue * queue,struct uvc_buffer * buf)341 static void uvc_queue_buffer_requeue(struct uvc_video_queue *queue,
342 struct uvc_buffer *buf)
343 {
344 buf->error = 0;
345 buf->state = UVC_BUF_STATE_QUEUED;
346 buf->bytesused = 0;
347 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
348
349 uvc_buffer_queue(&buf->buf.vb2_buf);
350 }
351
uvc_queue_buffer_complete(struct kref * ref)352 static void uvc_queue_buffer_complete(struct kref *ref)
353 {
354 struct uvc_buffer *buf = container_of(ref, struct uvc_buffer, ref);
355 struct vb2_buffer *vb = &buf->buf.vb2_buf;
356 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
357
358 if (buf->error && !uvc_no_drop_param) {
359 uvc_queue_buffer_requeue(queue, buf);
360 return;
361 }
362
363 buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
364 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
365 vb2_buffer_done(&buf->buf.vb2_buf, buf->error ? VB2_BUF_STATE_ERROR :
366 VB2_BUF_STATE_DONE);
367 }
368
369 /*
370 * Release a reference on the buffer. Complete the buffer when the last
371 * reference is released.
372 */
uvc_queue_buffer_release(struct uvc_buffer * buf)373 void uvc_queue_buffer_release(struct uvc_buffer *buf)
374 {
375 kref_put(&buf->ref, uvc_queue_buffer_complete);
376 }
377
378 /*
379 * Remove this buffer from the queue. Lifetime will persist while async actions
380 * are still running (if any), and uvc_queue_buffer_release will give the buffer
381 * back to VB2 when all users have completed.
382 */
uvc_queue_next_buffer(struct uvc_video_queue * queue,struct uvc_buffer * buf)383 struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
384 struct uvc_buffer *buf)
385 {
386 struct uvc_buffer *nextbuf;
387 unsigned long flags;
388
389 spin_lock_irqsave(&queue->irqlock, flags);
390 list_del(&buf->queue);
391 nextbuf = __uvc_queue_get_current_buffer(queue);
392 spin_unlock_irqrestore(&queue->irqlock, flags);
393
394 uvc_queue_buffer_release(buf);
395
396 return nextbuf;
397 }
398