1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * V4L2 sub-device
4 *
5 * Copyright (C) 2010 Nokia Corporation
6 *
7 * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
8 * Sakari Ailus <sakari.ailus@iki.fi>
9 */
10
11 #include <linux/export.h>
12 #include <linux/ioctl.h>
13 #include <linux/leds.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/overflow.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/version.h>
20 #include <linux/videodev2.h>
21
22 #include <media/v4l2-ctrls.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-event.h>
25 #include <media/v4l2-fh.h>
26 #include <media/v4l2-ioctl.h>
27
28 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
29 /*
30 * The Streams API is an experimental feature. To use the Streams API, set
31 * 'v4l2_subdev_enable_streams_api' to 1 below.
32 */
33
34 static bool v4l2_subdev_enable_streams_api;
35 #endif
36
37 /*
38 * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set
39 * of streams.
40 *
41 * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX
42 * restricts the total number of streams in a pad, although the stream ID is
43 * not restricted.
44 */
45 #define V4L2_SUBDEV_MAX_STREAM_ID 63
46
47 #include "v4l2-subdev-priv.h"
48
49 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
subdev_fh_init(struct v4l2_subdev_fh * fh,struct v4l2_subdev * sd)50 static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
51 {
52 struct v4l2_subdev_state *state;
53 static struct lock_class_key key;
54
55 state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key);
56 if (IS_ERR(state))
57 return PTR_ERR(state);
58
59 fh->state = state;
60
61 return 0;
62 }
63
subdev_fh_free(struct v4l2_subdev_fh * fh)64 static void subdev_fh_free(struct v4l2_subdev_fh *fh)
65 {
66 __v4l2_subdev_state_free(fh->state);
67 fh->state = NULL;
68 }
69
subdev_open(struct file * file)70 static int subdev_open(struct file *file)
71 {
72 struct video_device *vdev = video_devdata(file);
73 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
74 struct v4l2_subdev_fh *subdev_fh;
75 int ret;
76
77 subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL);
78 if (subdev_fh == NULL)
79 return -ENOMEM;
80
81 ret = subdev_fh_init(subdev_fh, sd);
82 if (ret) {
83 kfree(subdev_fh);
84 return ret;
85 }
86
87 v4l2_fh_init(&subdev_fh->vfh, vdev);
88 v4l2_fh_add(&subdev_fh->vfh);
89 file->private_data = &subdev_fh->vfh;
90
91 if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) {
92 struct module *owner;
93
94 owner = sd->entity.graph_obj.mdev->dev->driver->owner;
95 if (!try_module_get(owner)) {
96 ret = -EBUSY;
97 goto err;
98 }
99 subdev_fh->owner = owner;
100 }
101
102 if (sd->internal_ops && sd->internal_ops->open) {
103 ret = sd->internal_ops->open(sd, subdev_fh);
104 if (ret < 0)
105 goto err;
106 }
107
108 return 0;
109
110 err:
111 module_put(subdev_fh->owner);
112 v4l2_fh_del(&subdev_fh->vfh);
113 v4l2_fh_exit(&subdev_fh->vfh);
114 subdev_fh_free(subdev_fh);
115 kfree(subdev_fh);
116
117 return ret;
118 }
119
subdev_close(struct file * file)120 static int subdev_close(struct file *file)
121 {
122 struct video_device *vdev = video_devdata(file);
123 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
124 struct v4l2_fh *vfh = file->private_data;
125 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
126
127 if (sd->internal_ops && sd->internal_ops->close)
128 sd->internal_ops->close(sd, subdev_fh);
129 module_put(subdev_fh->owner);
130 v4l2_fh_del(vfh);
131 v4l2_fh_exit(vfh);
132 subdev_fh_free(subdev_fh);
133 kfree(subdev_fh);
134 file->private_data = NULL;
135
136 return 0;
137 }
138 #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
subdev_open(struct file * file)139 static int subdev_open(struct file *file)
140 {
141 return -ENODEV;
142 }
143
subdev_close(struct file * file)144 static int subdev_close(struct file *file)
145 {
146 return -ENODEV;
147 }
148 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
149
check_which(u32 which)150 static inline int check_which(u32 which)
151 {
152 if (which != V4L2_SUBDEV_FORMAT_TRY &&
153 which != V4L2_SUBDEV_FORMAT_ACTIVE)
154 return -EINVAL;
155
156 return 0;
157 }
158
check_pad(struct v4l2_subdev * sd,u32 pad)159 static inline int check_pad(struct v4l2_subdev *sd, u32 pad)
160 {
161 #if defined(CONFIG_MEDIA_CONTROLLER)
162 if (sd->entity.num_pads) {
163 if (pad >= sd->entity.num_pads)
164 return -EINVAL;
165 return 0;
166 }
167 #endif
168 /* allow pad 0 on subdevices not registered as media entities */
169 if (pad > 0)
170 return -EINVAL;
171 return 0;
172 }
173
check_state(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,u32 which,u32 pad,u32 stream)174 static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
175 u32 which, u32 pad, u32 stream)
176 {
177 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
178 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
179 if (!v4l2_subdev_state_get_stream_format(state, pad, stream))
180 return -EINVAL;
181 return 0;
182 #else
183 return -EINVAL;
184 #endif
185 }
186
187 if (stream != 0)
188 return -EINVAL;
189
190 if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads))
191 return -EINVAL;
192
193 return 0;
194 }
195
check_format(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_format * format)196 static inline int check_format(struct v4l2_subdev *sd,
197 struct v4l2_subdev_state *state,
198 struct v4l2_subdev_format *format)
199 {
200 if (!format)
201 return -EINVAL;
202
203 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
204 format->stream = 0;
205
206 return check_which(format->which) ? : check_pad(sd, format->pad) ? :
207 check_state(sd, state, format->which, format->pad, format->stream);
208 }
209
call_get_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_format * format)210 static int call_get_fmt(struct v4l2_subdev *sd,
211 struct v4l2_subdev_state *state,
212 struct v4l2_subdev_format *format)
213 {
214 return check_format(sd, state, format) ? :
215 sd->ops->pad->get_fmt(sd, state, format);
216 }
217
call_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_format * format)218 static int call_set_fmt(struct v4l2_subdev *sd,
219 struct v4l2_subdev_state *state,
220 struct v4l2_subdev_format *format)
221 {
222 return check_format(sd, state, format) ? :
223 sd->ops->pad->set_fmt(sd, state, format);
224 }
225
call_enum_mbus_code(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_mbus_code_enum * code)226 static int call_enum_mbus_code(struct v4l2_subdev *sd,
227 struct v4l2_subdev_state *state,
228 struct v4l2_subdev_mbus_code_enum *code)
229 {
230 if (!code)
231 return -EINVAL;
232
233 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
234 code->stream = 0;
235
236 return check_which(code->which) ? : check_pad(sd, code->pad) ? :
237 check_state(sd, state, code->which, code->pad, code->stream) ? :
238 sd->ops->pad->enum_mbus_code(sd, state, code);
239 }
240
call_enum_frame_size(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_frame_size_enum * fse)241 static int call_enum_frame_size(struct v4l2_subdev *sd,
242 struct v4l2_subdev_state *state,
243 struct v4l2_subdev_frame_size_enum *fse)
244 {
245 if (!fse)
246 return -EINVAL;
247
248 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
249 fse->stream = 0;
250
251 return check_which(fse->which) ? : check_pad(sd, fse->pad) ? :
252 check_state(sd, state, fse->which, fse->pad, fse->stream) ? :
253 sd->ops->pad->enum_frame_size(sd, state, fse);
254 }
255
check_frame_interval(struct v4l2_subdev * sd,struct v4l2_subdev_frame_interval * fi)256 static inline int check_frame_interval(struct v4l2_subdev *sd,
257 struct v4l2_subdev_frame_interval *fi)
258 {
259 if (!fi)
260 return -EINVAL;
261
262 return check_pad(sd, fi->pad);
263 }
264
call_g_frame_interval(struct v4l2_subdev * sd,struct v4l2_subdev_frame_interval * fi)265 static int call_g_frame_interval(struct v4l2_subdev *sd,
266 struct v4l2_subdev_frame_interval *fi)
267 {
268 return check_frame_interval(sd, fi) ? :
269 sd->ops->video->g_frame_interval(sd, fi);
270 }
271
call_s_frame_interval(struct v4l2_subdev * sd,struct v4l2_subdev_frame_interval * fi)272 static int call_s_frame_interval(struct v4l2_subdev *sd,
273 struct v4l2_subdev_frame_interval *fi)
274 {
275 return check_frame_interval(sd, fi) ? :
276 sd->ops->video->s_frame_interval(sd, fi);
277 }
278
call_enum_frame_interval(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_frame_interval_enum * fie)279 static int call_enum_frame_interval(struct v4l2_subdev *sd,
280 struct v4l2_subdev_state *state,
281 struct v4l2_subdev_frame_interval_enum *fie)
282 {
283 if (!fie)
284 return -EINVAL;
285
286 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
287 fie->stream = 0;
288
289 return check_which(fie->which) ? : check_pad(sd, fie->pad) ? :
290 check_state(sd, state, fie->which, fie->pad, fie->stream) ? :
291 sd->ops->pad->enum_frame_interval(sd, state, fie);
292 }
293
check_selection(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_selection * sel)294 static inline int check_selection(struct v4l2_subdev *sd,
295 struct v4l2_subdev_state *state,
296 struct v4l2_subdev_selection *sel)
297 {
298 if (!sel)
299 return -EINVAL;
300
301 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
302 sel->stream = 0;
303
304 return check_which(sel->which) ? : check_pad(sd, sel->pad) ? :
305 check_state(sd, state, sel->which, sel->pad, sel->stream);
306 }
307
call_get_selection(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_selection * sel)308 static int call_get_selection(struct v4l2_subdev *sd,
309 struct v4l2_subdev_state *state,
310 struct v4l2_subdev_selection *sel)
311 {
312 return check_selection(sd, state, sel) ? :
313 sd->ops->pad->get_selection(sd, state, sel);
314 }
315
call_set_selection(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_selection * sel)316 static int call_set_selection(struct v4l2_subdev *sd,
317 struct v4l2_subdev_state *state,
318 struct v4l2_subdev_selection *sel)
319 {
320 return check_selection(sd, state, sel) ? :
321 sd->ops->pad->set_selection(sd, state, sel);
322 }
323
check_edid(struct v4l2_subdev * sd,struct v4l2_subdev_edid * edid)324 static inline int check_edid(struct v4l2_subdev *sd,
325 struct v4l2_subdev_edid *edid)
326 {
327 if (!edid)
328 return -EINVAL;
329
330 if (edid->blocks && edid->edid == NULL)
331 return -EINVAL;
332
333 return check_pad(sd, edid->pad);
334 }
335
call_get_edid(struct v4l2_subdev * sd,struct v4l2_subdev_edid * edid)336 static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
337 {
338 return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid);
339 }
340
call_set_edid(struct v4l2_subdev * sd,struct v4l2_subdev_edid * edid)341 static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
342 {
343 return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid);
344 }
345
call_dv_timings_cap(struct v4l2_subdev * sd,struct v4l2_dv_timings_cap * cap)346 static int call_dv_timings_cap(struct v4l2_subdev *sd,
347 struct v4l2_dv_timings_cap *cap)
348 {
349 if (!cap)
350 return -EINVAL;
351
352 return check_pad(sd, cap->pad) ? :
353 sd->ops->pad->dv_timings_cap(sd, cap);
354 }
355
call_enum_dv_timings(struct v4l2_subdev * sd,struct v4l2_enum_dv_timings * dvt)356 static int call_enum_dv_timings(struct v4l2_subdev *sd,
357 struct v4l2_enum_dv_timings *dvt)
358 {
359 if (!dvt)
360 return -EINVAL;
361
362 return check_pad(sd, dvt->pad) ? :
363 sd->ops->pad->enum_dv_timings(sd, dvt);
364 }
365
call_get_mbus_config(struct v4l2_subdev * sd,unsigned int pad,struct v4l2_mbus_config * config)366 static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
367 struct v4l2_mbus_config *config)
368 {
369 return check_pad(sd, pad) ? :
370 sd->ops->pad->get_mbus_config(sd, pad, config);
371 }
372
call_s_stream(struct v4l2_subdev * sd,int enable)373 static int call_s_stream(struct v4l2_subdev *sd, int enable)
374 {
375 int ret;
376
377 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
378 if (!IS_ERR_OR_NULL(sd->privacy_led)) {
379 if (enable)
380 led_set_brightness(sd->privacy_led,
381 sd->privacy_led->max_brightness);
382 else
383 led_set_brightness(sd->privacy_led, 0);
384 }
385 #endif
386 ret = sd->ops->video->s_stream(sd, enable);
387
388 if (!enable && ret < 0) {
389 dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret);
390 return 0;
391 }
392
393 return ret;
394 }
395
396 #ifdef CONFIG_MEDIA_CONTROLLER
397 /*
398 * Create state-management wrapper for pad ops dealing with subdev state. The
399 * wrapper handles the case where the caller does not provide the called
400 * subdev's state. This should be removed when all the callers are fixed.
401 */
402 #define DEFINE_STATE_WRAPPER(f, arg_type) \
403 static int call_##f##_state(struct v4l2_subdev *sd, \
404 struct v4l2_subdev_state *_state, \
405 arg_type *arg) \
406 { \
407 struct v4l2_subdev_state *state = _state; \
408 int ret; \
409 if (!_state) \
410 state = v4l2_subdev_lock_and_get_active_state(sd); \
411 ret = call_##f(sd, state, arg); \
412 if (!_state && state) \
413 v4l2_subdev_unlock_state(state); \
414 return ret; \
415 }
416
417 #else /* CONFIG_MEDIA_CONTROLLER */
418
419 #define DEFINE_STATE_WRAPPER(f, arg_type) \
420 static int call_##f##_state(struct v4l2_subdev *sd, \
421 struct v4l2_subdev_state *state, \
422 arg_type *arg) \
423 { \
424 return call_##f(sd, state, arg); \
425 }
426
427 #endif /* CONFIG_MEDIA_CONTROLLER */
428
429 DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format);
430 DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format);
431 DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum);
432 DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum);
433 DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum);
434 DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection);
435 DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection);
436
437 static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
438 .get_fmt = call_get_fmt_state,
439 .set_fmt = call_set_fmt_state,
440 .enum_mbus_code = call_enum_mbus_code_state,
441 .enum_frame_size = call_enum_frame_size_state,
442 .enum_frame_interval = call_enum_frame_interval_state,
443 .get_selection = call_get_selection_state,
444 .set_selection = call_set_selection_state,
445 .get_edid = call_get_edid,
446 .set_edid = call_set_edid,
447 .dv_timings_cap = call_dv_timings_cap,
448 .enum_dv_timings = call_enum_dv_timings,
449 .get_mbus_config = call_get_mbus_config,
450 };
451
452 static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = {
453 .g_frame_interval = call_g_frame_interval,
454 .s_frame_interval = call_s_frame_interval,
455 .s_stream = call_s_stream,
456 };
457
458 const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = {
459 .pad = &v4l2_subdev_call_pad_wrappers,
460 .video = &v4l2_subdev_call_video_wrappers,
461 };
462 EXPORT_SYMBOL(v4l2_subdev_call_wrappers);
463
464 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
465
466 static struct v4l2_subdev_state *
subdev_ioctl_get_state(struct v4l2_subdev * sd,struct v4l2_subdev_fh * subdev_fh,unsigned int cmd,void * arg)467 subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh,
468 unsigned int cmd, void *arg)
469 {
470 u32 which;
471
472 switch (cmd) {
473 default:
474 return NULL;
475 case VIDIOC_SUBDEV_G_FMT:
476 case VIDIOC_SUBDEV_S_FMT:
477 which = ((struct v4l2_subdev_format *)arg)->which;
478 break;
479 case VIDIOC_SUBDEV_G_CROP:
480 case VIDIOC_SUBDEV_S_CROP:
481 which = ((struct v4l2_subdev_crop *)arg)->which;
482 break;
483 case VIDIOC_SUBDEV_ENUM_MBUS_CODE:
484 which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which;
485 break;
486 case VIDIOC_SUBDEV_ENUM_FRAME_SIZE:
487 which = ((struct v4l2_subdev_frame_size_enum *)arg)->which;
488 break;
489 case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL:
490 which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which;
491 break;
492 case VIDIOC_SUBDEV_G_SELECTION:
493 case VIDIOC_SUBDEV_S_SELECTION:
494 which = ((struct v4l2_subdev_selection *)arg)->which;
495 break;
496 case VIDIOC_SUBDEV_G_ROUTING:
497 case VIDIOC_SUBDEV_S_ROUTING:
498 which = ((struct v4l2_subdev_routing *)arg)->which;
499 break;
500 }
501
502 return which == V4L2_SUBDEV_FORMAT_TRY ?
503 subdev_fh->state :
504 v4l2_subdev_get_unlocked_active_state(sd);
505 }
506
subdev_do_ioctl(struct file * file,unsigned int cmd,void * arg,struct v4l2_subdev_state * state)507 static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
508 struct v4l2_subdev_state *state)
509 {
510 struct video_device *vdev = video_devdata(file);
511 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
512 struct v4l2_fh *vfh = file->private_data;
513 bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
514 bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS;
515 int rval;
516
517 switch (cmd) {
518 case VIDIOC_SUBDEV_QUERYCAP: {
519 struct v4l2_subdev_capability *cap = arg;
520
521 memset(cap->reserved, 0, sizeof(cap->reserved));
522 cap->version = LINUX_VERSION_CODE;
523 cap->capabilities =
524 (ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) |
525 (streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0);
526
527 return 0;
528 }
529
530 case VIDIOC_QUERYCTRL:
531 /*
532 * TODO: this really should be folded into v4l2_queryctrl (this
533 * currently returns -EINVAL for NULL control handlers).
534 * However, v4l2_queryctrl() is still called directly by
535 * drivers as well and until that has been addressed I believe
536 * it is safer to do the check here. The same is true for the
537 * other control ioctls below.
538 */
539 if (!vfh->ctrl_handler)
540 return -ENOTTY;
541 return v4l2_queryctrl(vfh->ctrl_handler, arg);
542
543 case VIDIOC_QUERY_EXT_CTRL:
544 if (!vfh->ctrl_handler)
545 return -ENOTTY;
546 return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg);
547
548 case VIDIOC_QUERYMENU:
549 if (!vfh->ctrl_handler)
550 return -ENOTTY;
551 return v4l2_querymenu(vfh->ctrl_handler, arg);
552
553 case VIDIOC_G_CTRL:
554 if (!vfh->ctrl_handler)
555 return -ENOTTY;
556 return v4l2_g_ctrl(vfh->ctrl_handler, arg);
557
558 case VIDIOC_S_CTRL:
559 if (!vfh->ctrl_handler)
560 return -ENOTTY;
561 return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
562
563 case VIDIOC_G_EXT_CTRLS:
564 if (!vfh->ctrl_handler)
565 return -ENOTTY;
566 return v4l2_g_ext_ctrls(vfh->ctrl_handler,
567 vdev, sd->v4l2_dev->mdev, arg);
568
569 case VIDIOC_S_EXT_CTRLS:
570 if (!vfh->ctrl_handler)
571 return -ENOTTY;
572 return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
573 vdev, sd->v4l2_dev->mdev, arg);
574
575 case VIDIOC_TRY_EXT_CTRLS:
576 if (!vfh->ctrl_handler)
577 return -ENOTTY;
578 return v4l2_try_ext_ctrls(vfh->ctrl_handler,
579 vdev, sd->v4l2_dev->mdev, arg);
580
581 case VIDIOC_DQEVENT:
582 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
583 return -ENOIOCTLCMD;
584
585 return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
586
587 case VIDIOC_SUBSCRIBE_EVENT:
588 return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
589
590 case VIDIOC_UNSUBSCRIBE_EVENT:
591 return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
592
593 #ifdef CONFIG_VIDEO_ADV_DEBUG
594 case VIDIOC_DBG_G_REGISTER:
595 {
596 struct v4l2_dbg_register *p = arg;
597
598 if (!capable(CAP_SYS_ADMIN))
599 return -EPERM;
600 return v4l2_subdev_call(sd, core, g_register, p);
601 }
602 case VIDIOC_DBG_S_REGISTER:
603 {
604 struct v4l2_dbg_register *p = arg;
605
606 if (!capable(CAP_SYS_ADMIN))
607 return -EPERM;
608 return v4l2_subdev_call(sd, core, s_register, p);
609 }
610 case VIDIOC_DBG_G_CHIP_INFO:
611 {
612 struct v4l2_dbg_chip_info *p = arg;
613
614 if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr)
615 return -EINVAL;
616 if (sd->ops->core && sd->ops->core->s_register)
617 p->flags |= V4L2_CHIP_FL_WRITABLE;
618 if (sd->ops->core && sd->ops->core->g_register)
619 p->flags |= V4L2_CHIP_FL_READABLE;
620 strscpy(p->name, sd->name, sizeof(p->name));
621 return 0;
622 }
623 #endif
624
625 case VIDIOC_LOG_STATUS: {
626 int ret;
627
628 pr_info("%s: ================= START STATUS =================\n",
629 sd->name);
630 ret = v4l2_subdev_call(sd, core, log_status);
631 pr_info("%s: ================== END STATUS ==================\n",
632 sd->name);
633 return ret;
634 }
635
636 case VIDIOC_SUBDEV_G_FMT: {
637 struct v4l2_subdev_format *format = arg;
638
639 memset(format->reserved, 0, sizeof(format->reserved));
640 memset(format->format.reserved, 0, sizeof(format->format.reserved));
641 return v4l2_subdev_call(sd, pad, get_fmt, state, format);
642 }
643
644 case VIDIOC_SUBDEV_S_FMT: {
645 struct v4l2_subdev_format *format = arg;
646
647 if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
648 return -EPERM;
649
650 memset(format->reserved, 0, sizeof(format->reserved));
651 memset(format->format.reserved, 0, sizeof(format->format.reserved));
652 return v4l2_subdev_call(sd, pad, set_fmt, state, format);
653 }
654
655 case VIDIOC_SUBDEV_G_CROP: {
656 struct v4l2_subdev_crop *crop = arg;
657 struct v4l2_subdev_selection sel;
658
659 memset(crop->reserved, 0, sizeof(crop->reserved));
660 memset(&sel, 0, sizeof(sel));
661 sel.which = crop->which;
662 sel.pad = crop->pad;
663 sel.target = V4L2_SEL_TGT_CROP;
664
665 rval = v4l2_subdev_call(
666 sd, pad, get_selection, state, &sel);
667
668 crop->rect = sel.r;
669
670 return rval;
671 }
672
673 case VIDIOC_SUBDEV_S_CROP: {
674 struct v4l2_subdev_crop *crop = arg;
675 struct v4l2_subdev_selection sel;
676
677 if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
678 return -EPERM;
679
680 memset(crop->reserved, 0, sizeof(crop->reserved));
681 memset(&sel, 0, sizeof(sel));
682 sel.which = crop->which;
683 sel.pad = crop->pad;
684 sel.target = V4L2_SEL_TGT_CROP;
685 sel.r = crop->rect;
686
687 rval = v4l2_subdev_call(
688 sd, pad, set_selection, state, &sel);
689
690 crop->rect = sel.r;
691
692 return rval;
693 }
694
695 case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
696 struct v4l2_subdev_mbus_code_enum *code = arg;
697
698 memset(code->reserved, 0, sizeof(code->reserved));
699 return v4l2_subdev_call(sd, pad, enum_mbus_code, state,
700 code);
701 }
702
703 case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
704 struct v4l2_subdev_frame_size_enum *fse = arg;
705
706 memset(fse->reserved, 0, sizeof(fse->reserved));
707 return v4l2_subdev_call(sd, pad, enum_frame_size, state,
708 fse);
709 }
710
711 case VIDIOC_SUBDEV_G_FRAME_INTERVAL: {
712 struct v4l2_subdev_frame_interval *fi = arg;
713
714 memset(fi->reserved, 0, sizeof(fi->reserved));
715 return v4l2_subdev_call(sd, video, g_frame_interval, arg);
716 }
717
718 case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
719 struct v4l2_subdev_frame_interval *fi = arg;
720
721 if (ro_subdev)
722 return -EPERM;
723
724 memset(fi->reserved, 0, sizeof(fi->reserved));
725 return v4l2_subdev_call(sd, video, s_frame_interval, arg);
726 }
727
728 case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
729 struct v4l2_subdev_frame_interval_enum *fie = arg;
730
731 memset(fie->reserved, 0, sizeof(fie->reserved));
732 return v4l2_subdev_call(sd, pad, enum_frame_interval, state,
733 fie);
734 }
735
736 case VIDIOC_SUBDEV_G_SELECTION: {
737 struct v4l2_subdev_selection *sel = arg;
738
739 memset(sel->reserved, 0, sizeof(sel->reserved));
740 return v4l2_subdev_call(
741 sd, pad, get_selection, state, sel);
742 }
743
744 case VIDIOC_SUBDEV_S_SELECTION: {
745 struct v4l2_subdev_selection *sel = arg;
746
747 if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
748 return -EPERM;
749
750 memset(sel->reserved, 0, sizeof(sel->reserved));
751 return v4l2_subdev_call(
752 sd, pad, set_selection, state, sel);
753 }
754
755 case VIDIOC_G_EDID: {
756 struct v4l2_subdev_edid *edid = arg;
757
758 return v4l2_subdev_call(sd, pad, get_edid, edid);
759 }
760
761 case VIDIOC_S_EDID: {
762 struct v4l2_subdev_edid *edid = arg;
763
764 return v4l2_subdev_call(sd, pad, set_edid, edid);
765 }
766
767 case VIDIOC_SUBDEV_DV_TIMINGS_CAP: {
768 struct v4l2_dv_timings_cap *cap = arg;
769
770 return v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
771 }
772
773 case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: {
774 struct v4l2_enum_dv_timings *dvt = arg;
775
776 return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt);
777 }
778
779 case VIDIOC_SUBDEV_QUERY_DV_TIMINGS:
780 return v4l2_subdev_call(sd, video, query_dv_timings, arg);
781
782 case VIDIOC_SUBDEV_G_DV_TIMINGS:
783 return v4l2_subdev_call(sd, video, g_dv_timings, arg);
784
785 case VIDIOC_SUBDEV_S_DV_TIMINGS:
786 if (ro_subdev)
787 return -EPERM;
788
789 return v4l2_subdev_call(sd, video, s_dv_timings, arg);
790
791 case VIDIOC_SUBDEV_G_STD:
792 return v4l2_subdev_call(sd, video, g_std, arg);
793
794 case VIDIOC_SUBDEV_S_STD: {
795 v4l2_std_id *std = arg;
796
797 if (ro_subdev)
798 return -EPERM;
799
800 return v4l2_subdev_call(sd, video, s_std, *std);
801 }
802
803 case VIDIOC_SUBDEV_ENUMSTD: {
804 struct v4l2_standard *p = arg;
805 v4l2_std_id id;
806
807 if (v4l2_subdev_call(sd, video, g_tvnorms, &id))
808 return -EINVAL;
809
810 return v4l_video_std_enumstd(p, id);
811 }
812
813 case VIDIOC_SUBDEV_QUERYSTD:
814 return v4l2_subdev_call(sd, video, querystd, arg);
815
816 case VIDIOC_SUBDEV_G_ROUTING: {
817 struct v4l2_subdev_routing *routing = arg;
818 struct v4l2_subdev_krouting *krouting;
819
820 if (!v4l2_subdev_enable_streams_api)
821 return -ENOIOCTLCMD;
822
823 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
824 return -ENOIOCTLCMD;
825
826 memset(routing->reserved, 0, sizeof(routing->reserved));
827
828 krouting = &state->routing;
829
830 if (routing->num_routes < krouting->num_routes) {
831 routing->num_routes = krouting->num_routes;
832 return -ENOSPC;
833 }
834
835 memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
836 krouting->routes,
837 krouting->num_routes * sizeof(*krouting->routes));
838 routing->num_routes = krouting->num_routes;
839
840 return 0;
841 }
842
843 case VIDIOC_SUBDEV_S_ROUTING: {
844 struct v4l2_subdev_routing *routing = arg;
845 struct v4l2_subdev_route *routes =
846 (struct v4l2_subdev_route *)(uintptr_t)routing->routes;
847 struct v4l2_subdev_krouting krouting = {};
848 unsigned int i;
849
850 if (!v4l2_subdev_enable_streams_api)
851 return -ENOIOCTLCMD;
852
853 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
854 return -ENOIOCTLCMD;
855
856 if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
857 return -EPERM;
858
859 memset(routing->reserved, 0, sizeof(routing->reserved));
860
861 for (i = 0; i < routing->num_routes; ++i) {
862 const struct v4l2_subdev_route *route = &routes[i];
863 const struct media_pad *pads = sd->entity.pads;
864
865 if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID ||
866 route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID)
867 return -EINVAL;
868
869 if (route->sink_pad >= sd->entity.num_pads)
870 return -EINVAL;
871
872 if (!(pads[route->sink_pad].flags &
873 MEDIA_PAD_FL_SINK))
874 return -EINVAL;
875
876 if (route->source_pad >= sd->entity.num_pads)
877 return -EINVAL;
878
879 if (!(pads[route->source_pad].flags &
880 MEDIA_PAD_FL_SOURCE))
881 return -EINVAL;
882 }
883
884 krouting.num_routes = routing->num_routes;
885 krouting.routes = routes;
886
887 return v4l2_subdev_call(sd, pad, set_routing, state,
888 routing->which, &krouting);
889 }
890
891 default:
892 return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
893 }
894
895 return 0;
896 }
897
subdev_do_ioctl_lock(struct file * file,unsigned int cmd,void * arg)898 static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg)
899 {
900 struct video_device *vdev = video_devdata(file);
901 struct mutex *lock = vdev->lock;
902 long ret = -ENODEV;
903
904 if (lock && mutex_lock_interruptible(lock))
905 return -ERESTARTSYS;
906
907 if (video_is_registered(vdev)) {
908 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
909 struct v4l2_fh *vfh = file->private_data;
910 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
911 struct v4l2_subdev_state *state;
912
913 state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg);
914
915 if (state)
916 v4l2_subdev_lock_state(state);
917
918 ret = subdev_do_ioctl(file, cmd, arg, state);
919
920 if (state)
921 v4l2_subdev_unlock_state(state);
922 }
923
924 if (lock)
925 mutex_unlock(lock);
926 return ret;
927 }
928
subdev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)929 static long subdev_ioctl(struct file *file, unsigned int cmd,
930 unsigned long arg)
931 {
932 return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock);
933 }
934
935 #ifdef CONFIG_COMPAT
subdev_compat_ioctl32(struct file * file,unsigned int cmd,unsigned long arg)936 static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
937 unsigned long arg)
938 {
939 struct video_device *vdev = video_devdata(file);
940 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
941
942 return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg);
943 }
944 #endif
945
946 #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
subdev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)947 static long subdev_ioctl(struct file *file, unsigned int cmd,
948 unsigned long arg)
949 {
950 return -ENODEV;
951 }
952
953 #ifdef CONFIG_COMPAT
subdev_compat_ioctl32(struct file * file,unsigned int cmd,unsigned long arg)954 static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
955 unsigned long arg)
956 {
957 return -ENODEV;
958 }
959 #endif
960 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
961
subdev_poll(struct file * file,poll_table * wait)962 static __poll_t subdev_poll(struct file *file, poll_table *wait)
963 {
964 struct video_device *vdev = video_devdata(file);
965 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
966 struct v4l2_fh *fh = file->private_data;
967
968 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
969 return EPOLLERR;
970
971 poll_wait(file, &fh->wait, wait);
972
973 if (v4l2_event_pending(fh))
974 return EPOLLPRI;
975
976 return 0;
977 }
978
979 const struct v4l2_file_operations v4l2_subdev_fops = {
980 .owner = THIS_MODULE,
981 .open = subdev_open,
982 .unlocked_ioctl = subdev_ioctl,
983 #ifdef CONFIG_COMPAT
984 .compat_ioctl32 = subdev_compat_ioctl32,
985 #endif
986 .release = subdev_close,
987 .poll = subdev_poll,
988 };
989
990 #ifdef CONFIG_MEDIA_CONTROLLER
991
v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity * entity,struct fwnode_endpoint * endpoint)992 int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity,
993 struct fwnode_endpoint *endpoint)
994 {
995 struct fwnode_handle *fwnode;
996 struct v4l2_subdev *sd;
997
998 if (!is_media_entity_v4l2_subdev(entity))
999 return -EINVAL;
1000
1001 sd = media_entity_to_v4l2_subdev(entity);
1002
1003 fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode);
1004 fwnode_handle_put(fwnode);
1005
1006 if (device_match_fwnode(sd->dev, fwnode))
1007 return endpoint->port;
1008
1009 return -ENXIO;
1010 }
1011 EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1);
1012
v4l2_subdev_link_validate_default(struct v4l2_subdev * sd,struct media_link * link,struct v4l2_subdev_format * source_fmt,struct v4l2_subdev_format * sink_fmt)1013 int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
1014 struct media_link *link,
1015 struct v4l2_subdev_format *source_fmt,
1016 struct v4l2_subdev_format *sink_fmt)
1017 {
1018 bool pass = true;
1019
1020 /* The width, height and code must match. */
1021 if (source_fmt->format.width != sink_fmt->format.width) {
1022 dev_dbg(sd->entity.graph_obj.mdev->dev,
1023 "%s: width does not match (source %u, sink %u)\n",
1024 __func__,
1025 source_fmt->format.width, sink_fmt->format.width);
1026 pass = false;
1027 }
1028
1029 if (source_fmt->format.height != sink_fmt->format.height) {
1030 dev_dbg(sd->entity.graph_obj.mdev->dev,
1031 "%s: height does not match (source %u, sink %u)\n",
1032 __func__,
1033 source_fmt->format.height, sink_fmt->format.height);
1034 pass = false;
1035 }
1036
1037 if (source_fmt->format.code != sink_fmt->format.code) {
1038 dev_dbg(sd->entity.graph_obj.mdev->dev,
1039 "%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n",
1040 __func__,
1041 source_fmt->format.code, sink_fmt->format.code);
1042 pass = false;
1043 }
1044
1045 /* The field order must match, or the sink field order must be NONE
1046 * to support interlaced hardware connected to bridges that support
1047 * progressive formats only.
1048 */
1049 if (source_fmt->format.field != sink_fmt->format.field &&
1050 sink_fmt->format.field != V4L2_FIELD_NONE) {
1051 dev_dbg(sd->entity.graph_obj.mdev->dev,
1052 "%s: field does not match (source %u, sink %u)\n",
1053 __func__,
1054 source_fmt->format.field, sink_fmt->format.field);
1055 pass = false;
1056 }
1057
1058 if (pass)
1059 return 0;
1060
1061 dev_dbg(sd->entity.graph_obj.mdev->dev,
1062 "%s: link was \"%s\":%u -> \"%s\":%u\n", __func__,
1063 link->source->entity->name, link->source->index,
1064 link->sink->entity->name, link->sink->index);
1065
1066 return -EPIPE;
1067 }
1068 EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
1069
1070 static int
v4l2_subdev_link_validate_get_format(struct media_pad * pad,u32 stream,struct v4l2_subdev_format * fmt)1071 v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream,
1072 struct v4l2_subdev_format *fmt)
1073 {
1074 if (is_media_entity_v4l2_subdev(pad->entity)) {
1075 struct v4l2_subdev *sd =
1076 media_entity_to_v4l2_subdev(pad->entity);
1077
1078 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1079 fmt->pad = pad->index;
1080 fmt->stream = stream;
1081
1082 return v4l2_subdev_call(sd, pad, get_fmt,
1083 v4l2_subdev_get_locked_active_state(sd),
1084 fmt);
1085 }
1086
1087 WARN(pad->entity->function != MEDIA_ENT_F_IO_V4L,
1088 "Driver bug! Wrong media entity type 0x%08x, entity %s\n",
1089 pad->entity->function, pad->entity->name);
1090
1091 return -EINVAL;
1092 }
1093
1094 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1095
__v4l2_link_validate_get_streams(struct media_pad * pad,u64 * streams_mask)1096 static void __v4l2_link_validate_get_streams(struct media_pad *pad,
1097 u64 *streams_mask)
1098 {
1099 struct v4l2_subdev_route *route;
1100 struct v4l2_subdev_state *state;
1101 struct v4l2_subdev *subdev;
1102
1103 subdev = media_entity_to_v4l2_subdev(pad->entity);
1104
1105 *streams_mask = 0;
1106
1107 state = v4l2_subdev_get_locked_active_state(subdev);
1108 if (WARN_ON(!state))
1109 return;
1110
1111 for_each_active_route(&state->routing, route) {
1112 u32 route_pad;
1113 u32 route_stream;
1114
1115 if (pad->flags & MEDIA_PAD_FL_SOURCE) {
1116 route_pad = route->source_pad;
1117 route_stream = route->source_stream;
1118 } else {
1119 route_pad = route->sink_pad;
1120 route_stream = route->sink_stream;
1121 }
1122
1123 if (route_pad != pad->index)
1124 continue;
1125
1126 *streams_mask |= BIT_ULL(route_stream);
1127 }
1128 }
1129
1130 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
1131
v4l2_link_validate_get_streams(struct media_pad * pad,u64 * streams_mask)1132 static void v4l2_link_validate_get_streams(struct media_pad *pad,
1133 u64 *streams_mask)
1134 {
1135 struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity);
1136
1137 if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) {
1138 /* Non-streams subdevs have an implicit stream 0 */
1139 *streams_mask = BIT_ULL(0);
1140 return;
1141 }
1142
1143 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1144 __v4l2_link_validate_get_streams(pad, streams_mask);
1145 #else
1146 /* This shouldn't happen */
1147 *streams_mask = 0;
1148 #endif
1149 }
1150
v4l2_subdev_link_validate_locked(struct media_link * link)1151 static int v4l2_subdev_link_validate_locked(struct media_link *link)
1152 {
1153 struct v4l2_subdev *sink_subdev =
1154 media_entity_to_v4l2_subdev(link->sink->entity);
1155 struct device *dev = sink_subdev->entity.graph_obj.mdev->dev;
1156 u64 source_streams_mask;
1157 u64 sink_streams_mask;
1158 u64 dangling_sink_streams;
1159 u32 stream;
1160 int ret;
1161
1162 dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n",
1163 link->source->entity->name, link->source->index,
1164 link->sink->entity->name, link->sink->index);
1165
1166 v4l2_link_validate_get_streams(link->source, &source_streams_mask);
1167 v4l2_link_validate_get_streams(link->sink, &sink_streams_mask);
1168
1169 /*
1170 * It is ok to have more source streams than sink streams as extra
1171 * source streams can just be ignored by the receiver, but having extra
1172 * sink streams is an error as streams must have a source.
1173 */
1174 dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) &
1175 sink_streams_mask;
1176 if (dangling_sink_streams) {
1177 dev_err(dev, "Dangling sink streams: mask %#llx\n",
1178 dangling_sink_streams);
1179 return -EINVAL;
1180 }
1181
1182 /* Validate source and sink stream formats */
1183
1184 for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) {
1185 struct v4l2_subdev_format sink_fmt, source_fmt;
1186
1187 if (!(sink_streams_mask & BIT_ULL(stream)))
1188 continue;
1189
1190 dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n",
1191 link->source->entity->name, link->source->index, stream,
1192 link->sink->entity->name, link->sink->index, stream);
1193
1194 ret = v4l2_subdev_link_validate_get_format(link->source, stream,
1195 &source_fmt);
1196 if (ret < 0) {
1197 dev_dbg(dev,
1198 "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
1199 link->source->entity->name, link->source->index,
1200 stream);
1201 continue;
1202 }
1203
1204 ret = v4l2_subdev_link_validate_get_format(link->sink, stream,
1205 &sink_fmt);
1206 if (ret < 0) {
1207 dev_dbg(dev,
1208 "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
1209 link->sink->entity->name, link->sink->index,
1210 stream);
1211 continue;
1212 }
1213
1214 /* TODO: add stream number to link_validate() */
1215 ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link,
1216 &source_fmt, &sink_fmt);
1217 if (!ret)
1218 continue;
1219
1220 if (ret != -ENOIOCTLCMD)
1221 return ret;
1222
1223 ret = v4l2_subdev_link_validate_default(sink_subdev, link,
1224 &source_fmt, &sink_fmt);
1225
1226 if (ret)
1227 return ret;
1228 }
1229
1230 return 0;
1231 }
1232
v4l2_subdev_link_validate(struct media_link * link)1233 int v4l2_subdev_link_validate(struct media_link *link)
1234 {
1235 struct v4l2_subdev *source_sd, *sink_sd;
1236 struct v4l2_subdev_state *source_state, *sink_state;
1237 int ret;
1238
1239 sink_sd = media_entity_to_v4l2_subdev(link->sink->entity);
1240 source_sd = media_entity_to_v4l2_subdev(link->source->entity);
1241
1242 sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd);
1243 source_state = v4l2_subdev_get_unlocked_active_state(source_sd);
1244
1245 if (sink_state)
1246 v4l2_subdev_lock_state(sink_state);
1247
1248 if (source_state)
1249 v4l2_subdev_lock_state(source_state);
1250
1251 ret = v4l2_subdev_link_validate_locked(link);
1252
1253 if (sink_state)
1254 v4l2_subdev_unlock_state(sink_state);
1255
1256 if (source_state)
1257 v4l2_subdev_unlock_state(source_state);
1258
1259 return ret;
1260 }
1261 EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
1262
v4l2_subdev_has_pad_interdep(struct media_entity * entity,unsigned int pad0,unsigned int pad1)1263 bool v4l2_subdev_has_pad_interdep(struct media_entity *entity,
1264 unsigned int pad0, unsigned int pad1)
1265 {
1266 struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
1267 struct v4l2_subdev_krouting *routing;
1268 struct v4l2_subdev_state *state;
1269 unsigned int i;
1270
1271 state = v4l2_subdev_lock_and_get_active_state(sd);
1272
1273 routing = &state->routing;
1274
1275 for (i = 0; i < routing->num_routes; ++i) {
1276 struct v4l2_subdev_route *route = &routing->routes[i];
1277
1278 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
1279 continue;
1280
1281 if ((route->sink_pad == pad0 && route->source_pad == pad1) ||
1282 (route->source_pad == pad0 && route->sink_pad == pad1)) {
1283 v4l2_subdev_unlock_state(state);
1284 return true;
1285 }
1286 }
1287
1288 v4l2_subdev_unlock_state(state);
1289
1290 return false;
1291 }
1292 EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep);
1293
1294 struct v4l2_subdev_state *
__v4l2_subdev_state_alloc(struct v4l2_subdev * sd,const char * lock_name,struct lock_class_key * lock_key)1295 __v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name,
1296 struct lock_class_key *lock_key)
1297 {
1298 struct v4l2_subdev_state *state;
1299 int ret;
1300
1301 state = kzalloc(sizeof(*state), GFP_KERNEL);
1302 if (!state)
1303 return ERR_PTR(-ENOMEM);
1304
1305 __mutex_init(&state->_lock, lock_name, lock_key);
1306 if (sd->state_lock)
1307 state->lock = sd->state_lock;
1308 else
1309 state->lock = &state->_lock;
1310
1311 /* Drivers that support streams do not need the legacy pad config */
1312 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) {
1313 state->pads = kvcalloc(sd->entity.num_pads,
1314 sizeof(*state->pads), GFP_KERNEL);
1315 if (!state->pads) {
1316 ret = -ENOMEM;
1317 goto err;
1318 }
1319 }
1320
1321 /*
1322 * There can be no race at this point, but we lock the state anyway to
1323 * satisfy lockdep checks.
1324 */
1325 v4l2_subdev_lock_state(state);
1326 ret = v4l2_subdev_call(sd, pad, init_cfg, state);
1327 v4l2_subdev_unlock_state(state);
1328
1329 if (ret < 0 && ret != -ENOIOCTLCMD)
1330 goto err;
1331
1332 return state;
1333
1334 err:
1335 if (state && state->pads)
1336 kvfree(state->pads);
1337
1338 kfree(state);
1339
1340 return ERR_PTR(ret);
1341 }
1342 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc);
1343
__v4l2_subdev_state_free(struct v4l2_subdev_state * state)1344 void __v4l2_subdev_state_free(struct v4l2_subdev_state *state)
1345 {
1346 if (!state)
1347 return;
1348
1349 mutex_destroy(&state->_lock);
1350
1351 kfree(state->routing.routes);
1352 kvfree(state->stream_configs.configs);
1353 kvfree(state->pads);
1354 kfree(state);
1355 }
1356 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free);
1357
__v4l2_subdev_init_finalize(struct v4l2_subdev * sd,const char * name,struct lock_class_key * key)1358 int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
1359 struct lock_class_key *key)
1360 {
1361 struct v4l2_subdev_state *state;
1362
1363 state = __v4l2_subdev_state_alloc(sd, name, key);
1364 if (IS_ERR(state))
1365 return PTR_ERR(state);
1366
1367 sd->active_state = state;
1368
1369 return 0;
1370 }
1371 EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize);
1372
v4l2_subdev_cleanup(struct v4l2_subdev * sd)1373 void v4l2_subdev_cleanup(struct v4l2_subdev *sd)
1374 {
1375 __v4l2_subdev_state_free(sd->active_state);
1376 sd->active_state = NULL;
1377 }
1378 EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup);
1379
1380 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1381
1382 static int
v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs * stream_configs,const struct v4l2_subdev_krouting * routing)1383 v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs,
1384 const struct v4l2_subdev_krouting *routing)
1385 {
1386 struct v4l2_subdev_stream_configs new_configs = { 0 };
1387 struct v4l2_subdev_route *route;
1388 u32 idx;
1389
1390 /* Count number of formats needed */
1391 for_each_active_route(routing, route) {
1392 /*
1393 * Each route needs a format on both ends of the route.
1394 */
1395 new_configs.num_configs += 2;
1396 }
1397
1398 if (new_configs.num_configs) {
1399 new_configs.configs = kvcalloc(new_configs.num_configs,
1400 sizeof(*new_configs.configs),
1401 GFP_KERNEL);
1402
1403 if (!new_configs.configs)
1404 return -ENOMEM;
1405 }
1406
1407 /*
1408 * Fill in the 'pad' and stream' value for each item in the array from
1409 * the routing table
1410 */
1411 idx = 0;
1412
1413 for_each_active_route(routing, route) {
1414 new_configs.configs[idx].pad = route->sink_pad;
1415 new_configs.configs[idx].stream = route->sink_stream;
1416
1417 idx++;
1418
1419 new_configs.configs[idx].pad = route->source_pad;
1420 new_configs.configs[idx].stream = route->source_stream;
1421
1422 idx++;
1423 }
1424
1425 kvfree(stream_configs->configs);
1426 *stream_configs = new_configs;
1427
1428 return 0;
1429 }
1430
v4l2_subdev_get_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_format * format)1431 int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
1432 struct v4l2_subdev_format *format)
1433 {
1434 struct v4l2_mbus_framefmt *fmt;
1435
1436 if (sd->flags & V4L2_SUBDEV_FL_STREAMS)
1437 fmt = v4l2_subdev_state_get_stream_format(state, format->pad,
1438 format->stream);
1439 else if (format->pad < sd->entity.num_pads && format->stream == 0)
1440 fmt = v4l2_subdev_get_pad_format(sd, state, format->pad);
1441 else
1442 fmt = NULL;
1443
1444 if (!fmt)
1445 return -EINVAL;
1446
1447 format->format = *fmt;
1448
1449 return 0;
1450 }
1451 EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt);
1452
v4l2_subdev_set_routing(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,const struct v4l2_subdev_krouting * routing)1453 int v4l2_subdev_set_routing(struct v4l2_subdev *sd,
1454 struct v4l2_subdev_state *state,
1455 const struct v4l2_subdev_krouting *routing)
1456 {
1457 struct v4l2_subdev_krouting *dst = &state->routing;
1458 const struct v4l2_subdev_krouting *src = routing;
1459 struct v4l2_subdev_krouting new_routing = { 0 };
1460 size_t bytes;
1461 int r;
1462
1463 if (unlikely(check_mul_overflow((size_t)src->num_routes,
1464 sizeof(*src->routes), &bytes)))
1465 return -EOVERFLOW;
1466
1467 lockdep_assert_held(state->lock);
1468
1469 if (src->num_routes > 0) {
1470 new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL);
1471 if (!new_routing.routes)
1472 return -ENOMEM;
1473 }
1474
1475 new_routing.num_routes = src->num_routes;
1476
1477 r = v4l2_subdev_init_stream_configs(&state->stream_configs,
1478 &new_routing);
1479 if (r) {
1480 kfree(new_routing.routes);
1481 return r;
1482 }
1483
1484 kfree(dst->routes);
1485 *dst = new_routing;
1486
1487 return 0;
1488 }
1489 EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing);
1490
1491 struct v4l2_subdev_route *
__v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting * routing,struct v4l2_subdev_route * route)1492 __v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing,
1493 struct v4l2_subdev_route *route)
1494 {
1495 if (route)
1496 ++route;
1497 else
1498 route = &routing->routes[0];
1499
1500 for (; route < routing->routes + routing->num_routes; ++route) {
1501 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
1502 continue;
1503
1504 return route;
1505 }
1506
1507 return NULL;
1508 }
1509 EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route);
1510
v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_krouting * routing,const struct v4l2_mbus_framefmt * fmt)1511 int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
1512 struct v4l2_subdev_state *state,
1513 struct v4l2_subdev_krouting *routing,
1514 const struct v4l2_mbus_framefmt *fmt)
1515 {
1516 struct v4l2_subdev_stream_configs *stream_configs;
1517 unsigned int i;
1518 int ret;
1519
1520 ret = v4l2_subdev_set_routing(sd, state, routing);
1521 if (ret)
1522 return ret;
1523
1524 stream_configs = &state->stream_configs;
1525
1526 for (i = 0; i < stream_configs->num_configs; ++i)
1527 stream_configs->configs[i].fmt = *fmt;
1528
1529 return 0;
1530 }
1531 EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt);
1532
1533 struct v4l2_mbus_framefmt *
v4l2_subdev_state_get_stream_format(struct v4l2_subdev_state * state,unsigned int pad,u32 stream)1534 v4l2_subdev_state_get_stream_format(struct v4l2_subdev_state *state,
1535 unsigned int pad, u32 stream)
1536 {
1537 struct v4l2_subdev_stream_configs *stream_configs;
1538 unsigned int i;
1539
1540 lockdep_assert_held(state->lock);
1541
1542 stream_configs = &state->stream_configs;
1543
1544 for (i = 0; i < stream_configs->num_configs; ++i) {
1545 if (stream_configs->configs[i].pad == pad &&
1546 stream_configs->configs[i].stream == stream)
1547 return &stream_configs->configs[i].fmt;
1548 }
1549
1550 return NULL;
1551 }
1552 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_format);
1553
1554 struct v4l2_rect *
v4l2_subdev_state_get_stream_crop(struct v4l2_subdev_state * state,unsigned int pad,u32 stream)1555 v4l2_subdev_state_get_stream_crop(struct v4l2_subdev_state *state,
1556 unsigned int pad, u32 stream)
1557 {
1558 struct v4l2_subdev_stream_configs *stream_configs;
1559 unsigned int i;
1560
1561 lockdep_assert_held(state->lock);
1562
1563 stream_configs = &state->stream_configs;
1564
1565 for (i = 0; i < stream_configs->num_configs; ++i) {
1566 if (stream_configs->configs[i].pad == pad &&
1567 stream_configs->configs[i].stream == stream)
1568 return &stream_configs->configs[i].crop;
1569 }
1570
1571 return NULL;
1572 }
1573 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_crop);
1574
1575 struct v4l2_rect *
v4l2_subdev_state_get_stream_compose(struct v4l2_subdev_state * state,unsigned int pad,u32 stream)1576 v4l2_subdev_state_get_stream_compose(struct v4l2_subdev_state *state,
1577 unsigned int pad, u32 stream)
1578 {
1579 struct v4l2_subdev_stream_configs *stream_configs;
1580 unsigned int i;
1581
1582 lockdep_assert_held(state->lock);
1583
1584 stream_configs = &state->stream_configs;
1585
1586 for (i = 0; i < stream_configs->num_configs; ++i) {
1587 if (stream_configs->configs[i].pad == pad &&
1588 stream_configs->configs[i].stream == stream)
1589 return &stream_configs->configs[i].compose;
1590 }
1591
1592 return NULL;
1593 }
1594 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_compose);
1595
v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting * routing,u32 pad,u32 stream,u32 * other_pad,u32 * other_stream)1596 int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing,
1597 u32 pad, u32 stream, u32 *other_pad,
1598 u32 *other_stream)
1599 {
1600 unsigned int i;
1601
1602 for (i = 0; i < routing->num_routes; ++i) {
1603 struct v4l2_subdev_route *route = &routing->routes[i];
1604
1605 if (route->source_pad == pad &&
1606 route->source_stream == stream) {
1607 if (other_pad)
1608 *other_pad = route->sink_pad;
1609 if (other_stream)
1610 *other_stream = route->sink_stream;
1611 return 0;
1612 }
1613
1614 if (route->sink_pad == pad && route->sink_stream == stream) {
1615 if (other_pad)
1616 *other_pad = route->source_pad;
1617 if (other_stream)
1618 *other_stream = route->source_stream;
1619 return 0;
1620 }
1621 }
1622
1623 return -EINVAL;
1624 }
1625 EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end);
1626
1627 struct v4l2_mbus_framefmt *
v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state * state,u32 pad,u32 stream)1628 v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state,
1629 u32 pad, u32 stream)
1630 {
1631 u32 other_pad, other_stream;
1632 int ret;
1633
1634 ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
1635 pad, stream,
1636 &other_pad, &other_stream);
1637 if (ret)
1638 return NULL;
1639
1640 return v4l2_subdev_state_get_stream_format(state, other_pad,
1641 other_stream);
1642 }
1643 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format);
1644
v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state * state,u32 pad0,u32 pad1,u64 * streams)1645 u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state,
1646 u32 pad0, u32 pad1, u64 *streams)
1647 {
1648 const struct v4l2_subdev_krouting *routing = &state->routing;
1649 struct v4l2_subdev_route *route;
1650 u64 streams0 = 0;
1651 u64 streams1 = 0;
1652
1653 for_each_active_route(routing, route) {
1654 if (route->sink_pad == pad0 && route->source_pad == pad1 &&
1655 (*streams & BIT_ULL(route->sink_stream))) {
1656 streams0 |= BIT_ULL(route->sink_stream);
1657 streams1 |= BIT_ULL(route->source_stream);
1658 }
1659 if (route->source_pad == pad0 && route->sink_pad == pad1 &&
1660 (*streams & BIT_ULL(route->source_stream))) {
1661 streams0 |= BIT_ULL(route->source_stream);
1662 streams1 |= BIT_ULL(route->sink_stream);
1663 }
1664 }
1665
1666 *streams = streams0;
1667 return streams1;
1668 }
1669 EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams);
1670
v4l2_subdev_routing_validate(struct v4l2_subdev * sd,const struct v4l2_subdev_krouting * routing,enum v4l2_subdev_routing_restriction disallow)1671 int v4l2_subdev_routing_validate(struct v4l2_subdev *sd,
1672 const struct v4l2_subdev_krouting *routing,
1673 enum v4l2_subdev_routing_restriction disallow)
1674 {
1675 u32 *remote_pads = NULL;
1676 unsigned int i, j;
1677 int ret = -EINVAL;
1678
1679 if (disallow & V4L2_SUBDEV_ROUTING_NO_STREAM_MIX) {
1680 remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads),
1681 GFP_KERNEL);
1682 if (!remote_pads)
1683 return -ENOMEM;
1684
1685 for (i = 0; i < sd->entity.num_pads; ++i)
1686 remote_pads[i] = U32_MAX;
1687 }
1688
1689 for (i = 0; i < routing->num_routes; ++i) {
1690 const struct v4l2_subdev_route *route = &routing->routes[i];
1691
1692 /* Validate the sink and source pad numbers. */
1693 if (route->sink_pad >= sd->entity.num_pads ||
1694 !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) {
1695 dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n",
1696 i, route->sink_pad);
1697 goto out;
1698 }
1699
1700 if (route->source_pad >= sd->entity.num_pads ||
1701 !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) {
1702 dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n",
1703 i, route->source_pad);
1704 goto out;
1705 }
1706
1707 /*
1708 * V4L2_SUBDEV_ROUTING_NO_STREAM_MIX: Streams on the same pad
1709 * may not be routed to streams on different pads.
1710 */
1711 if (disallow & V4L2_SUBDEV_ROUTING_NO_STREAM_MIX) {
1712 if (remote_pads[route->sink_pad] != U32_MAX &&
1713 remote_pads[route->sink_pad] != route->source_pad) {
1714 dev_dbg(sd->dev,
1715 "route %u attempts to mix %s streams\n",
1716 i, "sink");
1717 goto out;
1718 }
1719
1720 if (remote_pads[route->source_pad] != U32_MAX &&
1721 remote_pads[route->source_pad] != route->sink_pad) {
1722 dev_dbg(sd->dev,
1723 "route %u attempts to mix %s streams\n",
1724 i, "source");
1725 goto out;
1726 }
1727
1728 remote_pads[route->sink_pad] = route->source_pad;
1729 remote_pads[route->source_pad] = route->sink_pad;
1730 }
1731
1732 for (j = i + 1; j < routing->num_routes; ++j) {
1733 const struct v4l2_subdev_route *r = &routing->routes[j];
1734
1735 /*
1736 * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can
1737 * originate from the same (sink) stream.
1738 */
1739 if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) &&
1740 route->sink_pad == r->sink_pad &&
1741 route->sink_stream == r->sink_stream) {
1742 dev_dbg(sd->dev,
1743 "routes %u and %u originate from same sink (%u/%u)\n",
1744 i, j, route->sink_pad,
1745 route->sink_stream);
1746 goto out;
1747 }
1748
1749 /*
1750 * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end
1751 * at the same (source) stream.
1752 */
1753 if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) &&
1754 route->source_pad == r->source_pad &&
1755 route->source_stream == r->source_stream) {
1756 dev_dbg(sd->dev,
1757 "routes %u and %u end at same source (%u/%u)\n",
1758 i, j, route->source_pad,
1759 route->source_stream);
1760 goto out;
1761 }
1762 }
1763 }
1764
1765 ret = 0;
1766
1767 out:
1768 kfree(remote_pads);
1769 return ret;
1770 }
1771 EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate);
1772
v4l2_subdev_enable_streams_fallback(struct v4l2_subdev * sd,u32 pad,u64 streams_mask)1773 static int v4l2_subdev_enable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
1774 u64 streams_mask)
1775 {
1776 struct device *dev = sd->entity.graph_obj.mdev->dev;
1777 unsigned int i;
1778 int ret;
1779
1780 /*
1781 * The subdev doesn't implement pad-based stream enable, fall back
1782 * on the .s_stream() operation. This can only be done for subdevs that
1783 * have a single source pad, as sd->enabled_streams is global to the
1784 * subdev.
1785 */
1786 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
1787 return -EOPNOTSUPP;
1788
1789 for (i = 0; i < sd->entity.num_pads; ++i) {
1790 if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
1791 return -EOPNOTSUPP;
1792 }
1793
1794 if (sd->enabled_streams & streams_mask) {
1795 dev_dbg(dev, "set of streams %#llx already enabled on %s:%u\n",
1796 streams_mask, sd->entity.name, pad);
1797 return -EALREADY;
1798 }
1799
1800 /* Start streaming when the first streams are enabled. */
1801 if (!sd->enabled_streams) {
1802 ret = v4l2_subdev_call(sd, video, s_stream, 1);
1803 if (ret)
1804 return ret;
1805 }
1806
1807 sd->enabled_streams |= streams_mask;
1808
1809 return 0;
1810 }
1811
v4l2_subdev_enable_streams(struct v4l2_subdev * sd,u32 pad,u64 streams_mask)1812 int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
1813 u64 streams_mask)
1814 {
1815 struct device *dev = sd->entity.graph_obj.mdev->dev;
1816 struct v4l2_subdev_state *state;
1817 u64 found_streams = 0;
1818 unsigned int i;
1819 int ret;
1820
1821 /* A few basic sanity checks first. */
1822 if (pad >= sd->entity.num_pads)
1823 return -EINVAL;
1824
1825 if (!streams_mask)
1826 return 0;
1827
1828 /* Fallback on .s_stream() if .enable_streams() isn't available. */
1829 if (!sd->ops->pad || !sd->ops->pad->enable_streams)
1830 return v4l2_subdev_enable_streams_fallback(sd, pad,
1831 streams_mask);
1832
1833 state = v4l2_subdev_lock_and_get_active_state(sd);
1834
1835 /*
1836 * Verify that the requested streams exist and that they are not
1837 * already enabled.
1838 */
1839 for (i = 0; i < state->stream_configs.num_configs; ++i) {
1840 struct v4l2_subdev_stream_config *cfg =
1841 &state->stream_configs.configs[i];
1842
1843 if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
1844 continue;
1845
1846 found_streams |= BIT_ULL(cfg->stream);
1847
1848 if (cfg->enabled) {
1849 dev_dbg(dev, "stream %u already enabled on %s:%u\n",
1850 cfg->stream, sd->entity.name, pad);
1851 ret = -EALREADY;
1852 goto done;
1853 }
1854 }
1855
1856 if (found_streams != streams_mask) {
1857 dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
1858 streams_mask & ~found_streams, sd->entity.name, pad);
1859 ret = -EINVAL;
1860 goto done;
1861 }
1862
1863 /* Call the .enable_streams() operation. */
1864 ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad,
1865 streams_mask);
1866 if (ret)
1867 goto done;
1868
1869 /* Mark the streams as enabled. */
1870 for (i = 0; i < state->stream_configs.num_configs; ++i) {
1871 struct v4l2_subdev_stream_config *cfg =
1872 &state->stream_configs.configs[i];
1873
1874 if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
1875 cfg->enabled = true;
1876 }
1877
1878 done:
1879 v4l2_subdev_unlock_state(state);
1880
1881 return ret;
1882 }
1883 EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams);
1884
v4l2_subdev_disable_streams_fallback(struct v4l2_subdev * sd,u32 pad,u64 streams_mask)1885 static int v4l2_subdev_disable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
1886 u64 streams_mask)
1887 {
1888 struct device *dev = sd->entity.graph_obj.mdev->dev;
1889 unsigned int i;
1890 int ret;
1891
1892 /*
1893 * If the subdev doesn't implement pad-based stream enable, fall back
1894 * on the .s_stream() operation. This can only be done for subdevs that
1895 * have a single source pad, as sd->enabled_streams is global to the
1896 * subdev.
1897 */
1898 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
1899 return -EOPNOTSUPP;
1900
1901 for (i = 0; i < sd->entity.num_pads; ++i) {
1902 if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
1903 return -EOPNOTSUPP;
1904 }
1905
1906 if ((sd->enabled_streams & streams_mask) != streams_mask) {
1907 dev_dbg(dev, "set of streams %#llx already disabled on %s:%u\n",
1908 streams_mask, sd->entity.name, pad);
1909 return -EALREADY;
1910 }
1911
1912 /* Stop streaming when the last streams are disabled. */
1913 if (!(sd->enabled_streams & ~streams_mask)) {
1914 ret = v4l2_subdev_call(sd, video, s_stream, 0);
1915 if (ret)
1916 return ret;
1917 }
1918
1919 sd->enabled_streams &= ~streams_mask;
1920
1921 return 0;
1922 }
1923
v4l2_subdev_disable_streams(struct v4l2_subdev * sd,u32 pad,u64 streams_mask)1924 int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad,
1925 u64 streams_mask)
1926 {
1927 struct device *dev = sd->entity.graph_obj.mdev->dev;
1928 struct v4l2_subdev_state *state;
1929 u64 found_streams = 0;
1930 unsigned int i;
1931 int ret;
1932
1933 /* A few basic sanity checks first. */
1934 if (pad >= sd->entity.num_pads)
1935 return -EINVAL;
1936
1937 if (!streams_mask)
1938 return 0;
1939
1940 /* Fallback on .s_stream() if .disable_streams() isn't available. */
1941 if (!sd->ops->pad || !sd->ops->pad->disable_streams)
1942 return v4l2_subdev_disable_streams_fallback(sd, pad,
1943 streams_mask);
1944
1945 state = v4l2_subdev_lock_and_get_active_state(sd);
1946
1947 /*
1948 * Verify that the requested streams exist and that they are not
1949 * already disabled.
1950 */
1951 for (i = 0; i < state->stream_configs.num_configs; ++i) {
1952 struct v4l2_subdev_stream_config *cfg =
1953 &state->stream_configs.configs[i];
1954
1955 if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
1956 continue;
1957
1958 found_streams |= BIT_ULL(cfg->stream);
1959
1960 if (!cfg->enabled) {
1961 dev_dbg(dev, "stream %u already disabled on %s:%u\n",
1962 cfg->stream, sd->entity.name, pad);
1963 ret = -EALREADY;
1964 goto done;
1965 }
1966 }
1967
1968 if (found_streams != streams_mask) {
1969 dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
1970 streams_mask & ~found_streams, sd->entity.name, pad);
1971 ret = -EINVAL;
1972 goto done;
1973 }
1974
1975 /* Call the .disable_streams() operation. */
1976 ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad,
1977 streams_mask);
1978 if (ret)
1979 goto done;
1980
1981 /* Mark the streams as disabled. */
1982 for (i = 0; i < state->stream_configs.num_configs; ++i) {
1983 struct v4l2_subdev_stream_config *cfg =
1984 &state->stream_configs.configs[i];
1985
1986 if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
1987 cfg->enabled = false;
1988 }
1989
1990 done:
1991 v4l2_subdev_unlock_state(state);
1992
1993 return ret;
1994 }
1995 EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams);
1996
v4l2_subdev_s_stream_helper(struct v4l2_subdev * sd,int enable)1997 int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable)
1998 {
1999 struct v4l2_subdev_state *state;
2000 struct v4l2_subdev_route *route;
2001 struct media_pad *pad;
2002 u64 source_mask = 0;
2003 int pad_index = -1;
2004
2005 /*
2006 * Find the source pad. This helper is meant for subdevs that have a
2007 * single source pad, so failures shouldn't happen, but catch them
2008 * loudly nonetheless as they indicate a driver bug.
2009 */
2010 media_entity_for_each_pad(&sd->entity, pad) {
2011 if (pad->flags & MEDIA_PAD_FL_SOURCE) {
2012 pad_index = pad->index;
2013 break;
2014 }
2015 }
2016
2017 if (WARN_ON(pad_index == -1))
2018 return -EINVAL;
2019
2020 /*
2021 * As there's a single source pad, just collect all the source streams.
2022 */
2023 state = v4l2_subdev_lock_and_get_active_state(sd);
2024
2025 for_each_active_route(&state->routing, route)
2026 source_mask |= BIT_ULL(route->source_stream);
2027
2028 v4l2_subdev_unlock_state(state);
2029
2030 if (enable)
2031 return v4l2_subdev_enable_streams(sd, pad_index, source_mask);
2032 else
2033 return v4l2_subdev_disable_streams(sd, pad_index, source_mask);
2034 }
2035 EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper);
2036
2037 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
2038
2039 #endif /* CONFIG_MEDIA_CONTROLLER */
2040
v4l2_subdev_init(struct v4l2_subdev * sd,const struct v4l2_subdev_ops * ops)2041 void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
2042 {
2043 INIT_LIST_HEAD(&sd->list);
2044 BUG_ON(!ops);
2045 sd->ops = ops;
2046 sd->v4l2_dev = NULL;
2047 sd->flags = 0;
2048 sd->name[0] = '\0';
2049 sd->grp_id = 0;
2050 sd->dev_priv = NULL;
2051 sd->host_priv = NULL;
2052 sd->privacy_led = NULL;
2053 #if defined(CONFIG_MEDIA_CONTROLLER)
2054 sd->entity.name = sd->name;
2055 sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
2056 sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
2057 #endif
2058 }
2059 EXPORT_SYMBOL(v4l2_subdev_init);
2060
v4l2_subdev_notify_event(struct v4l2_subdev * sd,const struct v4l2_event * ev)2061 void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
2062 const struct v4l2_event *ev)
2063 {
2064 v4l2_event_queue(sd->devnode, ev);
2065 v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev);
2066 }
2067 EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event);
2068
v4l2_subdev_get_privacy_led(struct v4l2_subdev * sd)2069 int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd)
2070 {
2071 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
2072 sd->privacy_led = led_get(sd->dev, "privacy-led");
2073 if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT)
2074 return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led),
2075 "getting privacy LED\n");
2076
2077 if (!IS_ERR_OR_NULL(sd->privacy_led)) {
2078 mutex_lock(&sd->privacy_led->led_access);
2079 led_sysfs_disable(sd->privacy_led);
2080 led_trigger_remove(sd->privacy_led);
2081 led_set_brightness(sd->privacy_led, 0);
2082 mutex_unlock(&sd->privacy_led->led_access);
2083 }
2084 #endif
2085 return 0;
2086 }
2087 EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led);
2088
v4l2_subdev_put_privacy_led(struct v4l2_subdev * sd)2089 void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd)
2090 {
2091 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
2092 if (!IS_ERR_OR_NULL(sd->privacy_led)) {
2093 mutex_lock(&sd->privacy_led->led_access);
2094 led_sysfs_enable(sd->privacy_led);
2095 mutex_unlock(&sd->privacy_led->led_access);
2096 led_put(sd->privacy_led);
2097 }
2098 #endif
2099 }
2100 EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led);
2101