1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * V4L2 sub-device
4 *
5 * Copyright (C) 2010 Nokia Corporation
6 *
7 * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
8 *	    Sakari Ailus <sakari.ailus@iki.fi>
9 */
10
11#include <linux/export.h>
12#include <linux/ioctl.h>
13#include <linux/leds.h>
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/overflow.h>
17#include <linux/slab.h>
18#include <linux/string.h>
19#include <linux/types.h>
20#include <linux/version.h>
21#include <linux/videodev2.h>
22
23#include <media/v4l2-ctrls.h>
24#include <media/v4l2-device.h>
25#include <media/v4l2-event.h>
26#include <media/v4l2-fh.h>
27#include <media/v4l2-ioctl.h>
28
29#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
30/*
31 * The Streams API is an experimental feature. To use the Streams API, set
32 * 'v4l2_subdev_enable_streams_api' to 1 below.
33 */
34
35static bool v4l2_subdev_enable_streams_api;
36#endif
37
38/*
39 * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set
40 * of streams.
41 *
42 * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX
43 * restricts the total number of streams in a pad, although the stream ID is
44 * not restricted.
45 */
46#define V4L2_SUBDEV_MAX_STREAM_ID 63
47
48#include "v4l2-subdev-priv.h"
49
50#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
51static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
52{
53	struct v4l2_subdev_state *state;
54	static struct lock_class_key key;
55
56	state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key);
57	if (IS_ERR(state))
58		return PTR_ERR(state);
59
60	fh->state = state;
61
62	return 0;
63}
64
65static void subdev_fh_free(struct v4l2_subdev_fh *fh)
66{
67	__v4l2_subdev_state_free(fh->state);
68	fh->state = NULL;
69}
70
71static int subdev_open(struct file *file)
72{
73	struct video_device *vdev = video_devdata(file);
74	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
75	struct v4l2_subdev_fh *subdev_fh;
76	int ret;
77
78	subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL);
79	if (subdev_fh == NULL)
80		return -ENOMEM;
81
82	ret = subdev_fh_init(subdev_fh, sd);
83	if (ret) {
84		kfree(subdev_fh);
85		return ret;
86	}
87
88	v4l2_fh_init(&subdev_fh->vfh, vdev);
89	v4l2_fh_add(&subdev_fh->vfh);
90	file->private_data = &subdev_fh->vfh;
91
92	if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) {
93		struct module *owner;
94
95		owner = sd->entity.graph_obj.mdev->dev->driver->owner;
96		if (!try_module_get(owner)) {
97			ret = -EBUSY;
98			goto err;
99		}
100		subdev_fh->owner = owner;
101	}
102
103	if (sd->internal_ops && sd->internal_ops->open) {
104		ret = sd->internal_ops->open(sd, subdev_fh);
105		if (ret < 0)
106			goto err;
107	}
108
109	return 0;
110
111err:
112	module_put(subdev_fh->owner);
113	v4l2_fh_del(&subdev_fh->vfh);
114	v4l2_fh_exit(&subdev_fh->vfh);
115	subdev_fh_free(subdev_fh);
116	kfree(subdev_fh);
117
118	return ret;
119}
120
121static int subdev_close(struct file *file)
122{
123	struct video_device *vdev = video_devdata(file);
124	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
125	struct v4l2_fh *vfh = file->private_data;
126	struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
127
128	if (sd->internal_ops && sd->internal_ops->close)
129		sd->internal_ops->close(sd, subdev_fh);
130	module_put(subdev_fh->owner);
131	v4l2_fh_del(vfh);
132	v4l2_fh_exit(vfh);
133	subdev_fh_free(subdev_fh);
134	kfree(subdev_fh);
135	file->private_data = NULL;
136
137	return 0;
138}
139#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
140static int subdev_open(struct file *file)
141{
142	return -ENODEV;
143}
144
145static int subdev_close(struct file *file)
146{
147	return -ENODEV;
148}
149#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
150
151static inline int check_which(u32 which)
152{
153	if (which != V4L2_SUBDEV_FORMAT_TRY &&
154	    which != V4L2_SUBDEV_FORMAT_ACTIVE)
155		return -EINVAL;
156
157	return 0;
158}
159
160static inline int check_pad(struct v4l2_subdev *sd, u32 pad)
161{
162#if defined(CONFIG_MEDIA_CONTROLLER)
163	if (sd->entity.num_pads) {
164		if (pad >= sd->entity.num_pads)
165			return -EINVAL;
166		return 0;
167	}
168#endif
169	/* allow pad 0 on subdevices not registered as media entities */
170	if (pad > 0)
171		return -EINVAL;
172	return 0;
173}
174
175static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
176		       u32 which, u32 pad, u32 stream)
177{
178	if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
179#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
180		if (!v4l2_subdev_state_get_format(state, pad, stream))
181			return -EINVAL;
182		return 0;
183#else
184		return -EINVAL;
185#endif
186	}
187
188	if (stream != 0)
189		return -EINVAL;
190
191	if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads))
192		return -EINVAL;
193
194	return 0;
195}
196
197static inline int check_format(struct v4l2_subdev *sd,
198			       struct v4l2_subdev_state *state,
199			       struct v4l2_subdev_format *format)
200{
201	if (!format)
202		return -EINVAL;
203
204	return check_which(format->which) ? : check_pad(sd, format->pad) ? :
205	       check_state(sd, state, format->which, format->pad, format->stream);
206}
207
208static int call_get_fmt(struct v4l2_subdev *sd,
209			struct v4l2_subdev_state *state,
210			struct v4l2_subdev_format *format)
211{
212	return check_format(sd, state, format) ? :
213	       sd->ops->pad->get_fmt(sd, state, format);
214}
215
216static int call_set_fmt(struct v4l2_subdev *sd,
217			struct v4l2_subdev_state *state,
218			struct v4l2_subdev_format *format)
219{
220	return check_format(sd, state, format) ? :
221	       sd->ops->pad->set_fmt(sd, state, format);
222}
223
224static int call_enum_mbus_code(struct v4l2_subdev *sd,
225			       struct v4l2_subdev_state *state,
226			       struct v4l2_subdev_mbus_code_enum *code)
227{
228	if (!code)
229		return -EINVAL;
230
231	return check_which(code->which) ? : check_pad(sd, code->pad) ? :
232	       check_state(sd, state, code->which, code->pad, code->stream) ? :
233	       sd->ops->pad->enum_mbus_code(sd, state, code);
234}
235
236static int call_enum_frame_size(struct v4l2_subdev *sd,
237				struct v4l2_subdev_state *state,
238				struct v4l2_subdev_frame_size_enum *fse)
239{
240	if (!fse)
241		return -EINVAL;
242
243	return check_which(fse->which) ? : check_pad(sd, fse->pad) ? :
244	       check_state(sd, state, fse->which, fse->pad, fse->stream) ? :
245	       sd->ops->pad->enum_frame_size(sd, state, fse);
246}
247
248static int call_enum_frame_interval(struct v4l2_subdev *sd,
249				    struct v4l2_subdev_state *state,
250				    struct v4l2_subdev_frame_interval_enum *fie)
251{
252	if (!fie)
253		return -EINVAL;
254
255	return check_which(fie->which) ? : check_pad(sd, fie->pad) ? :
256	       check_state(sd, state, fie->which, fie->pad, fie->stream) ? :
257	       sd->ops->pad->enum_frame_interval(sd, state, fie);
258}
259
260static inline int check_selection(struct v4l2_subdev *sd,
261				  struct v4l2_subdev_state *state,
262				  struct v4l2_subdev_selection *sel)
263{
264	if (!sel)
265		return -EINVAL;
266
267	return check_which(sel->which) ? : check_pad(sd, sel->pad) ? :
268	       check_state(sd, state, sel->which, sel->pad, sel->stream);
269}
270
271static int call_get_selection(struct v4l2_subdev *sd,
272			      struct v4l2_subdev_state *state,
273			      struct v4l2_subdev_selection *sel)
274{
275	return check_selection(sd, state, sel) ? :
276	       sd->ops->pad->get_selection(sd, state, sel);
277}
278
279static int call_set_selection(struct v4l2_subdev *sd,
280			      struct v4l2_subdev_state *state,
281			      struct v4l2_subdev_selection *sel)
282{
283	return check_selection(sd, state, sel) ? :
284	       sd->ops->pad->set_selection(sd, state, sel);
285}
286
287static inline int check_frame_interval(struct v4l2_subdev *sd,
288				       struct v4l2_subdev_state *state,
289				       struct v4l2_subdev_frame_interval *fi)
290{
291	if (!fi)
292		return -EINVAL;
293
294	return check_which(fi->which) ? : check_pad(sd, fi->pad) ? :
295	       check_state(sd, state, fi->which, fi->pad, fi->stream);
296}
297
298static int call_get_frame_interval(struct v4l2_subdev *sd,
299				   struct v4l2_subdev_state *state,
300				   struct v4l2_subdev_frame_interval *fi)
301{
302	return check_frame_interval(sd, state, fi) ? :
303	       sd->ops->pad->get_frame_interval(sd, state, fi);
304}
305
306static int call_set_frame_interval(struct v4l2_subdev *sd,
307				   struct v4l2_subdev_state *state,
308				   struct v4l2_subdev_frame_interval *fi)
309{
310	return check_frame_interval(sd, state, fi) ? :
311	       sd->ops->pad->set_frame_interval(sd, state, fi);
312}
313
314static int call_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
315			       struct v4l2_mbus_frame_desc *fd)
316{
317	unsigned int i;
318	int ret;
319
320	memset(fd, 0, sizeof(*fd));
321
322	ret = sd->ops->pad->get_frame_desc(sd, pad, fd);
323	if (ret)
324		return ret;
325
326	dev_dbg(sd->dev, "Frame descriptor on pad %u, type %s\n", pad,
327		fd->type == V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL ? "parallel" :
328		fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2 ? "CSI-2" :
329		"unknown");
330
331	for (i = 0; i < fd->num_entries; i++) {
332		struct v4l2_mbus_frame_desc_entry *entry = &fd->entry[i];
333		char buf[20] = "";
334
335		if (fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2)
336			WARN_ON(snprintf(buf, sizeof(buf),
337					 ", vc %u, dt 0x%02x",
338					 entry->bus.csi2.vc,
339					 entry->bus.csi2.dt) >= sizeof(buf));
340
341		dev_dbg(sd->dev,
342			"\tstream %u, code 0x%04x, length %u, flags 0x%04x%s\n",
343			entry->stream, entry->pixelcode, entry->length,
344			entry->flags, buf);
345	}
346
347	return 0;
348}
349
350static inline int check_edid(struct v4l2_subdev *sd,
351			     struct v4l2_subdev_edid *edid)
352{
353	if (!edid)
354		return -EINVAL;
355
356	if (edid->blocks && edid->edid == NULL)
357		return -EINVAL;
358
359	return check_pad(sd, edid->pad);
360}
361
362static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
363{
364	return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid);
365}
366
367static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
368{
369	return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid);
370}
371
372static int call_dv_timings_cap(struct v4l2_subdev *sd,
373			       struct v4l2_dv_timings_cap *cap)
374{
375	if (!cap)
376		return -EINVAL;
377
378	return check_pad(sd, cap->pad) ? :
379	       sd->ops->pad->dv_timings_cap(sd, cap);
380}
381
382static int call_enum_dv_timings(struct v4l2_subdev *sd,
383				struct v4l2_enum_dv_timings *dvt)
384{
385	if (!dvt)
386		return -EINVAL;
387
388	return check_pad(sd, dvt->pad) ? :
389	       sd->ops->pad->enum_dv_timings(sd, dvt);
390}
391
392static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
393				struct v4l2_mbus_config *config)
394{
395	return check_pad(sd, pad) ? :
396	       sd->ops->pad->get_mbus_config(sd, pad, config);
397}
398
399static int call_s_stream(struct v4l2_subdev *sd, int enable)
400{
401	int ret;
402
403	/*
404	 * The .s_stream() operation must never be called to start or stop an
405	 * already started or stopped subdev. Catch offenders but don't return
406	 * an error yet to avoid regressions.
407	 *
408	 * As .s_stream() is mutually exclusive with the .enable_streams() and
409	 * .disable_streams() operation, we can use the enabled_streams field
410	 * to store the subdev streaming state.
411	 */
412	if (WARN_ON(!!sd->enabled_streams == !!enable))
413		return 0;
414
415#if IS_REACHABLE(CONFIG_LEDS_CLASS)
416	if (!IS_ERR_OR_NULL(sd->privacy_led)) {
417		if (enable)
418			led_set_brightness(sd->privacy_led,
419					   sd->privacy_led->max_brightness);
420		else
421			led_set_brightness(sd->privacy_led, 0);
422	}
423#endif
424	ret = sd->ops->video->s_stream(sd, enable);
425
426	if (!enable && ret < 0) {
427		dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret);
428		ret = 0;
429	}
430
431	if (!ret)
432		sd->enabled_streams = enable ? BIT(0) : 0;
433
434	return ret;
435}
436
437#ifdef CONFIG_MEDIA_CONTROLLER
438/*
439 * Create state-management wrapper for pad ops dealing with subdev state. The
440 * wrapper handles the case where the caller does not provide the called
441 * subdev's state. This should be removed when all the callers are fixed.
442 */
443#define DEFINE_STATE_WRAPPER(f, arg_type)                                  \
444	static int call_##f##_state(struct v4l2_subdev *sd,                \
445				    struct v4l2_subdev_state *_state,      \
446				    arg_type *arg)                         \
447	{                                                                  \
448		struct v4l2_subdev_state *state = _state;                  \
449		int ret;                                                   \
450		if (!_state)                                               \
451			state = v4l2_subdev_lock_and_get_active_state(sd); \
452		ret = call_##f(sd, state, arg);                            \
453		if (!_state && state)                                      \
454			v4l2_subdev_unlock_state(state);                   \
455		return ret;                                                \
456	}
457
458#else /* CONFIG_MEDIA_CONTROLLER */
459
460#define DEFINE_STATE_WRAPPER(f, arg_type)                            \
461	static int call_##f##_state(struct v4l2_subdev *sd,          \
462				    struct v4l2_subdev_state *state, \
463				    arg_type *arg)                   \
464	{                                                            \
465		return call_##f(sd, state, arg);                     \
466	}
467
468#endif /* CONFIG_MEDIA_CONTROLLER */
469
470DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format);
471DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format);
472DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum);
473DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum);
474DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum);
475DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection);
476DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection);
477
478static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
479	.get_fmt		= call_get_fmt_state,
480	.set_fmt		= call_set_fmt_state,
481	.enum_mbus_code		= call_enum_mbus_code_state,
482	.enum_frame_size	= call_enum_frame_size_state,
483	.enum_frame_interval	= call_enum_frame_interval_state,
484	.get_selection		= call_get_selection_state,
485	.set_selection		= call_set_selection_state,
486	.get_frame_interval	= call_get_frame_interval,
487	.set_frame_interval	= call_set_frame_interval,
488	.get_edid		= call_get_edid,
489	.set_edid		= call_set_edid,
490	.dv_timings_cap		= call_dv_timings_cap,
491	.enum_dv_timings	= call_enum_dv_timings,
492	.get_frame_desc		= call_get_frame_desc,
493	.get_mbus_config	= call_get_mbus_config,
494};
495
496static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = {
497	.s_stream		= call_s_stream,
498};
499
500const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = {
501	.pad	= &v4l2_subdev_call_pad_wrappers,
502	.video	= &v4l2_subdev_call_video_wrappers,
503};
504EXPORT_SYMBOL(v4l2_subdev_call_wrappers);
505
506#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
507
508static struct v4l2_subdev_state *
509subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh,
510		       unsigned int cmd, void *arg)
511{
512	u32 which;
513
514	switch (cmd) {
515	default:
516		return NULL;
517	case VIDIOC_SUBDEV_G_FMT:
518	case VIDIOC_SUBDEV_S_FMT:
519		which = ((struct v4l2_subdev_format *)arg)->which;
520		break;
521	case VIDIOC_SUBDEV_G_CROP:
522	case VIDIOC_SUBDEV_S_CROP:
523		which = ((struct v4l2_subdev_crop *)arg)->which;
524		break;
525	case VIDIOC_SUBDEV_ENUM_MBUS_CODE:
526		which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which;
527		break;
528	case VIDIOC_SUBDEV_ENUM_FRAME_SIZE:
529		which = ((struct v4l2_subdev_frame_size_enum *)arg)->which;
530		break;
531	case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL:
532		which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which;
533		break;
534	case VIDIOC_SUBDEV_G_SELECTION:
535	case VIDIOC_SUBDEV_S_SELECTION:
536		which = ((struct v4l2_subdev_selection *)arg)->which;
537		break;
538	case VIDIOC_SUBDEV_G_FRAME_INTERVAL:
539	case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
540		struct v4l2_subdev_frame_interval *fi = arg;
541
542		if (!(subdev_fh->client_caps &
543		      V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH))
544			fi->which = V4L2_SUBDEV_FORMAT_ACTIVE;
545
546		which = fi->which;
547		break;
548	}
549	case VIDIOC_SUBDEV_G_ROUTING:
550	case VIDIOC_SUBDEV_S_ROUTING:
551		which = ((struct v4l2_subdev_routing *)arg)->which;
552		break;
553	}
554
555	return which == V4L2_SUBDEV_FORMAT_TRY ?
556			     subdev_fh->state :
557			     v4l2_subdev_get_unlocked_active_state(sd);
558}
559
560static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
561			    struct v4l2_subdev_state *state)
562{
563	struct video_device *vdev = video_devdata(file);
564	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
565	struct v4l2_fh *vfh = file->private_data;
566	struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
567	bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
568	bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS;
569	bool client_supports_streams = subdev_fh->client_caps &
570				       V4L2_SUBDEV_CLIENT_CAP_STREAMS;
571	int rval;
572
573	/*
574	 * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS.
575	 * Remove this when the API is no longer experimental.
576	 */
577	if (!v4l2_subdev_enable_streams_api)
578		streams_subdev = false;
579
580	switch (cmd) {
581	case VIDIOC_SUBDEV_QUERYCAP: {
582		struct v4l2_subdev_capability *cap = arg;
583
584		memset(cap->reserved, 0, sizeof(cap->reserved));
585		cap->version = LINUX_VERSION_CODE;
586		cap->capabilities =
587			(ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) |
588			(streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0);
589
590		return 0;
591	}
592
593	case VIDIOC_QUERYCTRL:
594		/*
595		 * TODO: this really should be folded into v4l2_queryctrl (this
596		 * currently returns -EINVAL for NULL control handlers).
597		 * However, v4l2_queryctrl() is still called directly by
598		 * drivers as well and until that has been addressed I believe
599		 * it is safer to do the check here. The same is true for the
600		 * other control ioctls below.
601		 */
602		if (!vfh->ctrl_handler)
603			return -ENOTTY;
604		return v4l2_queryctrl(vfh->ctrl_handler, arg);
605
606	case VIDIOC_QUERY_EXT_CTRL:
607		if (!vfh->ctrl_handler)
608			return -ENOTTY;
609		return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg);
610
611	case VIDIOC_QUERYMENU:
612		if (!vfh->ctrl_handler)
613			return -ENOTTY;
614		return v4l2_querymenu(vfh->ctrl_handler, arg);
615
616	case VIDIOC_G_CTRL:
617		if (!vfh->ctrl_handler)
618			return -ENOTTY;
619		return v4l2_g_ctrl(vfh->ctrl_handler, arg);
620
621	case VIDIOC_S_CTRL:
622		if (!vfh->ctrl_handler)
623			return -ENOTTY;
624		return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
625
626	case VIDIOC_G_EXT_CTRLS:
627		if (!vfh->ctrl_handler)
628			return -ENOTTY;
629		return v4l2_g_ext_ctrls(vfh->ctrl_handler,
630					vdev, sd->v4l2_dev->mdev, arg);
631
632	case VIDIOC_S_EXT_CTRLS:
633		if (!vfh->ctrl_handler)
634			return -ENOTTY;
635		return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
636					vdev, sd->v4l2_dev->mdev, arg);
637
638	case VIDIOC_TRY_EXT_CTRLS:
639		if (!vfh->ctrl_handler)
640			return -ENOTTY;
641		return v4l2_try_ext_ctrls(vfh->ctrl_handler,
642					  vdev, sd->v4l2_dev->mdev, arg);
643
644	case VIDIOC_DQEVENT:
645		if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
646			return -ENOIOCTLCMD;
647
648		return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
649
650	case VIDIOC_SUBSCRIBE_EVENT:
651		return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
652
653	case VIDIOC_UNSUBSCRIBE_EVENT:
654		return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
655
656#ifdef CONFIG_VIDEO_ADV_DEBUG
657	case VIDIOC_DBG_G_REGISTER:
658	{
659		struct v4l2_dbg_register *p = arg;
660
661		if (!capable(CAP_SYS_ADMIN))
662			return -EPERM;
663		return v4l2_subdev_call(sd, core, g_register, p);
664	}
665	case VIDIOC_DBG_S_REGISTER:
666	{
667		struct v4l2_dbg_register *p = arg;
668
669		if (!capable(CAP_SYS_ADMIN))
670			return -EPERM;
671		return v4l2_subdev_call(sd, core, s_register, p);
672	}
673	case VIDIOC_DBG_G_CHIP_INFO:
674	{
675		struct v4l2_dbg_chip_info *p = arg;
676
677		if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr)
678			return -EINVAL;
679		if (sd->ops->core && sd->ops->core->s_register)
680			p->flags |= V4L2_CHIP_FL_WRITABLE;
681		if (sd->ops->core && sd->ops->core->g_register)
682			p->flags |= V4L2_CHIP_FL_READABLE;
683		strscpy(p->name, sd->name, sizeof(p->name));
684		return 0;
685	}
686#endif
687
688	case VIDIOC_LOG_STATUS: {
689		int ret;
690
691		pr_info("%s: =================  START STATUS  =================\n",
692			sd->name);
693		ret = v4l2_subdev_call(sd, core, log_status);
694		pr_info("%s: ==================  END STATUS  ==================\n",
695			sd->name);
696		return ret;
697	}
698
699	case VIDIOC_SUBDEV_G_FMT: {
700		struct v4l2_subdev_format *format = arg;
701
702		if (!client_supports_streams)
703			format->stream = 0;
704
705		memset(format->reserved, 0, sizeof(format->reserved));
706		memset(format->format.reserved, 0, sizeof(format->format.reserved));
707		return v4l2_subdev_call(sd, pad, get_fmt, state, format);
708	}
709
710	case VIDIOC_SUBDEV_S_FMT: {
711		struct v4l2_subdev_format *format = arg;
712
713		if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
714			return -EPERM;
715
716		if (!client_supports_streams)
717			format->stream = 0;
718
719		memset(format->reserved, 0, sizeof(format->reserved));
720		memset(format->format.reserved, 0, sizeof(format->format.reserved));
721		return v4l2_subdev_call(sd, pad, set_fmt, state, format);
722	}
723
724	case VIDIOC_SUBDEV_G_CROP: {
725		struct v4l2_subdev_crop *crop = arg;
726		struct v4l2_subdev_selection sel;
727
728		if (!client_supports_streams)
729			crop->stream = 0;
730
731		memset(crop->reserved, 0, sizeof(crop->reserved));
732		memset(&sel, 0, sizeof(sel));
733		sel.which = crop->which;
734		sel.pad = crop->pad;
735		sel.target = V4L2_SEL_TGT_CROP;
736
737		rval = v4l2_subdev_call(
738			sd, pad, get_selection, state, &sel);
739
740		crop->rect = sel.r;
741
742		return rval;
743	}
744
745	case VIDIOC_SUBDEV_S_CROP: {
746		struct v4l2_subdev_crop *crop = arg;
747		struct v4l2_subdev_selection sel;
748
749		if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
750			return -EPERM;
751
752		if (!client_supports_streams)
753			crop->stream = 0;
754
755		memset(crop->reserved, 0, sizeof(crop->reserved));
756		memset(&sel, 0, sizeof(sel));
757		sel.which = crop->which;
758		sel.pad = crop->pad;
759		sel.target = V4L2_SEL_TGT_CROP;
760		sel.r = crop->rect;
761
762		rval = v4l2_subdev_call(
763			sd, pad, set_selection, state, &sel);
764
765		crop->rect = sel.r;
766
767		return rval;
768	}
769
770	case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
771		struct v4l2_subdev_mbus_code_enum *code = arg;
772
773		if (!client_supports_streams)
774			code->stream = 0;
775
776		memset(code->reserved, 0, sizeof(code->reserved));
777		return v4l2_subdev_call(sd, pad, enum_mbus_code, state,
778					code);
779	}
780
781	case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
782		struct v4l2_subdev_frame_size_enum *fse = arg;
783
784		if (!client_supports_streams)
785			fse->stream = 0;
786
787		memset(fse->reserved, 0, sizeof(fse->reserved));
788		return v4l2_subdev_call(sd, pad, enum_frame_size, state,
789					fse);
790	}
791
792	case VIDIOC_SUBDEV_G_FRAME_INTERVAL: {
793		struct v4l2_subdev_frame_interval *fi = arg;
794
795		if (!client_supports_streams)
796			fi->stream = 0;
797
798		memset(fi->reserved, 0, sizeof(fi->reserved));
799		return v4l2_subdev_call(sd, pad, get_frame_interval, state, fi);
800	}
801
802	case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
803		struct v4l2_subdev_frame_interval *fi = arg;
804
805		if (!client_supports_streams)
806			fi->stream = 0;
807
808		if (fi->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
809			return -EPERM;
810
811		memset(fi->reserved, 0, sizeof(fi->reserved));
812		return v4l2_subdev_call(sd, pad, set_frame_interval, state, fi);
813	}
814
815	case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
816		struct v4l2_subdev_frame_interval_enum *fie = arg;
817
818		if (!client_supports_streams)
819			fie->stream = 0;
820
821		memset(fie->reserved, 0, sizeof(fie->reserved));
822		return v4l2_subdev_call(sd, pad, enum_frame_interval, state,
823					fie);
824	}
825
826	case VIDIOC_SUBDEV_G_SELECTION: {
827		struct v4l2_subdev_selection *sel = arg;
828
829		if (!client_supports_streams)
830			sel->stream = 0;
831
832		memset(sel->reserved, 0, sizeof(sel->reserved));
833		return v4l2_subdev_call(
834			sd, pad, get_selection, state, sel);
835	}
836
837	case VIDIOC_SUBDEV_S_SELECTION: {
838		struct v4l2_subdev_selection *sel = arg;
839
840		if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
841			return -EPERM;
842
843		if (!client_supports_streams)
844			sel->stream = 0;
845
846		memset(sel->reserved, 0, sizeof(sel->reserved));
847		return v4l2_subdev_call(
848			sd, pad, set_selection, state, sel);
849	}
850
851	case VIDIOC_G_EDID: {
852		struct v4l2_subdev_edid *edid = arg;
853
854		return v4l2_subdev_call(sd, pad, get_edid, edid);
855	}
856
857	case VIDIOC_S_EDID: {
858		struct v4l2_subdev_edid *edid = arg;
859
860		return v4l2_subdev_call(sd, pad, set_edid, edid);
861	}
862
863	case VIDIOC_SUBDEV_DV_TIMINGS_CAP: {
864		struct v4l2_dv_timings_cap *cap = arg;
865
866		return v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
867	}
868
869	case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: {
870		struct v4l2_enum_dv_timings *dvt = arg;
871
872		return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt);
873	}
874
875	case VIDIOC_SUBDEV_QUERY_DV_TIMINGS:
876		return v4l2_subdev_call(sd, video, query_dv_timings, arg);
877
878	case VIDIOC_SUBDEV_G_DV_TIMINGS:
879		return v4l2_subdev_call(sd, video, g_dv_timings, arg);
880
881	case VIDIOC_SUBDEV_S_DV_TIMINGS:
882		if (ro_subdev)
883			return -EPERM;
884
885		return v4l2_subdev_call(sd, video, s_dv_timings, arg);
886
887	case VIDIOC_SUBDEV_G_STD:
888		return v4l2_subdev_call(sd, video, g_std, arg);
889
890	case VIDIOC_SUBDEV_S_STD: {
891		v4l2_std_id *std = arg;
892
893		if (ro_subdev)
894			return -EPERM;
895
896		return v4l2_subdev_call(sd, video, s_std, *std);
897	}
898
899	case VIDIOC_SUBDEV_ENUMSTD: {
900		struct v4l2_standard *p = arg;
901		v4l2_std_id id;
902
903		if (v4l2_subdev_call(sd, video, g_tvnorms, &id))
904			return -EINVAL;
905
906		return v4l_video_std_enumstd(p, id);
907	}
908
909	case VIDIOC_SUBDEV_QUERYSTD:
910		return v4l2_subdev_call(sd, video, querystd, arg);
911
912	case VIDIOC_SUBDEV_G_ROUTING: {
913		struct v4l2_subdev_routing *routing = arg;
914		struct v4l2_subdev_krouting *krouting;
915
916		if (!v4l2_subdev_enable_streams_api)
917			return -ENOIOCTLCMD;
918
919		if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
920			return -ENOIOCTLCMD;
921
922		memset(routing->reserved, 0, sizeof(routing->reserved));
923
924		krouting = &state->routing;
925
926		if (routing->num_routes < krouting->num_routes) {
927			routing->num_routes = krouting->num_routes;
928			return -ENOSPC;
929		}
930
931		memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
932		       krouting->routes,
933		       krouting->num_routes * sizeof(*krouting->routes));
934		routing->num_routes = krouting->num_routes;
935
936		return 0;
937	}
938
939	case VIDIOC_SUBDEV_S_ROUTING: {
940		struct v4l2_subdev_routing *routing = arg;
941		struct v4l2_subdev_route *routes =
942			(struct v4l2_subdev_route *)(uintptr_t)routing->routes;
943		struct v4l2_subdev_krouting krouting = {};
944		unsigned int i;
945
946		if (!v4l2_subdev_enable_streams_api)
947			return -ENOIOCTLCMD;
948
949		if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
950			return -ENOIOCTLCMD;
951
952		if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
953			return -EPERM;
954
955		memset(routing->reserved, 0, sizeof(routing->reserved));
956
957		for (i = 0; i < routing->num_routes; ++i) {
958			const struct v4l2_subdev_route *route = &routes[i];
959			const struct media_pad *pads = sd->entity.pads;
960
961			if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID ||
962			    route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID)
963				return -EINVAL;
964
965			if (route->sink_pad >= sd->entity.num_pads)
966				return -EINVAL;
967
968			if (!(pads[route->sink_pad].flags &
969			      MEDIA_PAD_FL_SINK))
970				return -EINVAL;
971
972			if (route->source_pad >= sd->entity.num_pads)
973				return -EINVAL;
974
975			if (!(pads[route->source_pad].flags &
976			      MEDIA_PAD_FL_SOURCE))
977				return -EINVAL;
978		}
979
980		krouting.num_routes = routing->num_routes;
981		krouting.routes = routes;
982
983		return v4l2_subdev_call(sd, pad, set_routing, state,
984					routing->which, &krouting);
985	}
986
987	case VIDIOC_SUBDEV_G_CLIENT_CAP: {
988		struct v4l2_subdev_client_capability *client_cap = arg;
989
990		client_cap->capabilities = subdev_fh->client_caps;
991
992		return 0;
993	}
994
995	case VIDIOC_SUBDEV_S_CLIENT_CAP: {
996		struct v4l2_subdev_client_capability *client_cap = arg;
997
998		/*
999		 * Clear V4L2_SUBDEV_CLIENT_CAP_STREAMS if streams API is not
1000		 * enabled. Remove this when streams API is no longer
1001		 * experimental.
1002		 */
1003		if (!v4l2_subdev_enable_streams_api)
1004			client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS;
1005
1006		/* Filter out unsupported capabilities */
1007		client_cap->capabilities &= (V4L2_SUBDEV_CLIENT_CAP_STREAMS |
1008					     V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH);
1009
1010		subdev_fh->client_caps = client_cap->capabilities;
1011
1012		return 0;
1013	}
1014
1015	default:
1016		return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
1017	}
1018
1019	return 0;
1020}
1021
1022static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg)
1023{
1024	struct video_device *vdev = video_devdata(file);
1025	struct mutex *lock = vdev->lock;
1026	long ret = -ENODEV;
1027
1028	if (lock && mutex_lock_interruptible(lock))
1029		return -ERESTARTSYS;
1030
1031	if (video_is_registered(vdev)) {
1032		struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
1033		struct v4l2_fh *vfh = file->private_data;
1034		struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
1035		struct v4l2_subdev_state *state;
1036
1037		state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg);
1038
1039		if (state)
1040			v4l2_subdev_lock_state(state);
1041
1042		ret = subdev_do_ioctl(file, cmd, arg, state);
1043
1044		if (state)
1045			v4l2_subdev_unlock_state(state);
1046	}
1047
1048	if (lock)
1049		mutex_unlock(lock);
1050	return ret;
1051}
1052
1053static long subdev_ioctl(struct file *file, unsigned int cmd,
1054	unsigned long arg)
1055{
1056	return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock);
1057}
1058
1059#ifdef CONFIG_COMPAT
1060static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
1061	unsigned long arg)
1062{
1063	struct video_device *vdev = video_devdata(file);
1064	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
1065
1066	return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg);
1067}
1068#endif
1069
1070#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
1071static long subdev_ioctl(struct file *file, unsigned int cmd,
1072			 unsigned long arg)
1073{
1074	return -ENODEV;
1075}
1076
1077#ifdef CONFIG_COMPAT
1078static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
1079				  unsigned long arg)
1080{
1081	return -ENODEV;
1082}
1083#endif
1084#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
1085
1086static __poll_t subdev_poll(struct file *file, poll_table *wait)
1087{
1088	struct video_device *vdev = video_devdata(file);
1089	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
1090	struct v4l2_fh *fh = file->private_data;
1091
1092	if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
1093		return EPOLLERR;
1094
1095	poll_wait(file, &fh->wait, wait);
1096
1097	if (v4l2_event_pending(fh))
1098		return EPOLLPRI;
1099
1100	return 0;
1101}
1102
1103const struct v4l2_file_operations v4l2_subdev_fops = {
1104	.owner = THIS_MODULE,
1105	.open = subdev_open,
1106	.unlocked_ioctl = subdev_ioctl,
1107#ifdef CONFIG_COMPAT
1108	.compat_ioctl32 = subdev_compat_ioctl32,
1109#endif
1110	.release = subdev_close,
1111	.poll = subdev_poll,
1112};
1113
1114#ifdef CONFIG_MEDIA_CONTROLLER
1115
1116int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity,
1117				      struct fwnode_endpoint *endpoint)
1118{
1119	struct fwnode_handle *fwnode;
1120	struct v4l2_subdev *sd;
1121
1122	if (!is_media_entity_v4l2_subdev(entity))
1123		return -EINVAL;
1124
1125	sd = media_entity_to_v4l2_subdev(entity);
1126
1127	fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode);
1128	fwnode_handle_put(fwnode);
1129
1130	if (device_match_fwnode(sd->dev, fwnode))
1131		return endpoint->port;
1132
1133	return -ENXIO;
1134}
1135EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1);
1136
1137int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
1138				      struct media_link *link,
1139				      struct v4l2_subdev_format *source_fmt,
1140				      struct v4l2_subdev_format *sink_fmt)
1141{
1142	bool pass = true;
1143
1144	/* The width, height and code must match. */
1145	if (source_fmt->format.width != sink_fmt->format.width) {
1146		dev_dbg(sd->entity.graph_obj.mdev->dev,
1147			"%s: width does not match (source %u, sink %u)\n",
1148			__func__,
1149			source_fmt->format.width, sink_fmt->format.width);
1150		pass = false;
1151	}
1152
1153	if (source_fmt->format.height != sink_fmt->format.height) {
1154		dev_dbg(sd->entity.graph_obj.mdev->dev,
1155			"%s: height does not match (source %u, sink %u)\n",
1156			__func__,
1157			source_fmt->format.height, sink_fmt->format.height);
1158		pass = false;
1159	}
1160
1161	if (source_fmt->format.code != sink_fmt->format.code) {
1162		dev_dbg(sd->entity.graph_obj.mdev->dev,
1163			"%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n",
1164			__func__,
1165			source_fmt->format.code, sink_fmt->format.code);
1166		pass = false;
1167	}
1168
1169	/* The field order must match, or the sink field order must be NONE
1170	 * to support interlaced hardware connected to bridges that support
1171	 * progressive formats only.
1172	 */
1173	if (source_fmt->format.field != sink_fmt->format.field &&
1174	    sink_fmt->format.field != V4L2_FIELD_NONE) {
1175		dev_dbg(sd->entity.graph_obj.mdev->dev,
1176			"%s: field does not match (source %u, sink %u)\n",
1177			__func__,
1178			source_fmt->format.field, sink_fmt->format.field);
1179		pass = false;
1180	}
1181
1182	if (pass)
1183		return 0;
1184
1185	dev_dbg(sd->entity.graph_obj.mdev->dev,
1186		"%s: link was \"%s\":%u -> \"%s\":%u\n", __func__,
1187		link->source->entity->name, link->source->index,
1188		link->sink->entity->name, link->sink->index);
1189
1190	return -EPIPE;
1191}
1192EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
1193
1194static int
1195v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream,
1196				     struct v4l2_subdev_format *fmt,
1197				     bool states_locked)
1198{
1199	struct v4l2_subdev_state *state;
1200	struct v4l2_subdev *sd;
1201	int ret;
1202
1203	if (!is_media_entity_v4l2_subdev(pad->entity)) {
1204		WARN(pad->entity->function != MEDIA_ENT_F_IO_V4L,
1205		     "Driver bug! Wrong media entity type 0x%08x, entity %s\n",
1206		     pad->entity->function, pad->entity->name);
1207
1208		return -EINVAL;
1209	}
1210
1211	sd = media_entity_to_v4l2_subdev(pad->entity);
1212
1213	fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1214	fmt->pad = pad->index;
1215	fmt->stream = stream;
1216
1217	if (states_locked)
1218		state = v4l2_subdev_get_locked_active_state(sd);
1219	else
1220		state = v4l2_subdev_lock_and_get_active_state(sd);
1221
1222	ret = v4l2_subdev_call(sd, pad, get_fmt, state, fmt);
1223
1224	if (!states_locked && state)
1225		v4l2_subdev_unlock_state(state);
1226
1227	return ret;
1228}
1229
1230#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1231
1232static void __v4l2_link_validate_get_streams(struct media_pad *pad,
1233					     u64 *streams_mask,
1234					     bool states_locked)
1235{
1236	struct v4l2_subdev_route *route;
1237	struct v4l2_subdev_state *state;
1238	struct v4l2_subdev *subdev;
1239
1240	subdev = media_entity_to_v4l2_subdev(pad->entity);
1241
1242	*streams_mask = 0;
1243
1244	if (states_locked)
1245		state = v4l2_subdev_get_locked_active_state(subdev);
1246	else
1247		state = v4l2_subdev_lock_and_get_active_state(subdev);
1248
1249	if (WARN_ON(!state))
1250		return;
1251
1252	for_each_active_route(&state->routing, route) {
1253		u32 route_pad;
1254		u32 route_stream;
1255
1256		if (pad->flags & MEDIA_PAD_FL_SOURCE) {
1257			route_pad = route->source_pad;
1258			route_stream = route->source_stream;
1259		} else {
1260			route_pad = route->sink_pad;
1261			route_stream = route->sink_stream;
1262		}
1263
1264		if (route_pad != pad->index)
1265			continue;
1266
1267		*streams_mask |= BIT_ULL(route_stream);
1268	}
1269
1270	if (!states_locked)
1271		v4l2_subdev_unlock_state(state);
1272}
1273
1274#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
1275
1276static void v4l2_link_validate_get_streams(struct media_pad *pad,
1277					   u64 *streams_mask,
1278					   bool states_locked)
1279{
1280	struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity);
1281
1282	if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) {
1283		/* Non-streams subdevs have an implicit stream 0 */
1284		*streams_mask = BIT_ULL(0);
1285		return;
1286	}
1287
1288#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1289	__v4l2_link_validate_get_streams(pad, streams_mask, states_locked);
1290#else
1291	/* This shouldn't happen */
1292	*streams_mask = 0;
1293#endif
1294}
1295
1296static int v4l2_subdev_link_validate_locked(struct media_link *link, bool states_locked)
1297{
1298	struct v4l2_subdev *sink_subdev =
1299		media_entity_to_v4l2_subdev(link->sink->entity);
1300	struct device *dev = sink_subdev->entity.graph_obj.mdev->dev;
1301	u64 source_streams_mask;
1302	u64 sink_streams_mask;
1303	u64 dangling_sink_streams;
1304	u32 stream;
1305	int ret;
1306
1307	dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n",
1308		link->source->entity->name, link->source->index,
1309		link->sink->entity->name, link->sink->index);
1310
1311	v4l2_link_validate_get_streams(link->source, &source_streams_mask, states_locked);
1312	v4l2_link_validate_get_streams(link->sink, &sink_streams_mask, states_locked);
1313
1314	/*
1315	 * It is ok to have more source streams than sink streams as extra
1316	 * source streams can just be ignored by the receiver, but having extra
1317	 * sink streams is an error as streams must have a source.
1318	 */
1319	dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) &
1320				sink_streams_mask;
1321	if (dangling_sink_streams) {
1322		dev_err(dev, "Dangling sink streams: mask %#llx\n",
1323			dangling_sink_streams);
1324		return -EINVAL;
1325	}
1326
1327	/* Validate source and sink stream formats */
1328
1329	for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) {
1330		struct v4l2_subdev_format sink_fmt, source_fmt;
1331
1332		if (!(sink_streams_mask & BIT_ULL(stream)))
1333			continue;
1334
1335		dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n",
1336			link->source->entity->name, link->source->index, stream,
1337			link->sink->entity->name, link->sink->index, stream);
1338
1339		ret = v4l2_subdev_link_validate_get_format(link->source, stream,
1340							   &source_fmt, states_locked);
1341		if (ret < 0) {
1342			dev_dbg(dev,
1343				"Failed to get format for \"%s\":%u:%u (but that's ok)\n",
1344				link->source->entity->name, link->source->index,
1345				stream);
1346			continue;
1347		}
1348
1349		ret = v4l2_subdev_link_validate_get_format(link->sink, stream,
1350							   &sink_fmt, states_locked);
1351		if (ret < 0) {
1352			dev_dbg(dev,
1353				"Failed to get format for \"%s\":%u:%u (but that's ok)\n",
1354				link->sink->entity->name, link->sink->index,
1355				stream);
1356			continue;
1357		}
1358
1359		/* TODO: add stream number to link_validate() */
1360		ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link,
1361				       &source_fmt, &sink_fmt);
1362		if (!ret)
1363			continue;
1364
1365		if (ret != -ENOIOCTLCMD)
1366			return ret;
1367
1368		ret = v4l2_subdev_link_validate_default(sink_subdev, link,
1369							&source_fmt, &sink_fmt);
1370
1371		if (ret)
1372			return ret;
1373	}
1374
1375	return 0;
1376}
1377
1378int v4l2_subdev_link_validate(struct media_link *link)
1379{
1380	struct v4l2_subdev *source_sd, *sink_sd;
1381	struct v4l2_subdev_state *source_state, *sink_state;
1382	bool states_locked;
1383	int ret;
1384
1385	if (!is_media_entity_v4l2_subdev(link->sink->entity) ||
1386	    !is_media_entity_v4l2_subdev(link->source->entity)) {
1387		pr_warn_once("%s of link '%s':%u->'%s':%u is not a V4L2 sub-device, driver bug!\n",
1388			     !is_media_entity_v4l2_subdev(link->sink->entity) ?
1389			     "sink" : "source",
1390			     link->source->entity->name, link->source->index,
1391			     link->sink->entity->name, link->sink->index);
1392		return 0;
1393	}
1394
1395	sink_sd = media_entity_to_v4l2_subdev(link->sink->entity);
1396	source_sd = media_entity_to_v4l2_subdev(link->source->entity);
1397
1398	sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd);
1399	source_state = v4l2_subdev_get_unlocked_active_state(source_sd);
1400
1401	states_locked = sink_state && source_state;
1402
1403	if (states_locked) {
1404		v4l2_subdev_lock_state(sink_state);
1405		v4l2_subdev_lock_state(source_state);
1406	}
1407
1408	ret = v4l2_subdev_link_validate_locked(link, states_locked);
1409
1410	if (states_locked) {
1411		v4l2_subdev_unlock_state(sink_state);
1412		v4l2_subdev_unlock_state(source_state);
1413	}
1414
1415	return ret;
1416}
1417EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
1418
1419bool v4l2_subdev_has_pad_interdep(struct media_entity *entity,
1420				  unsigned int pad0, unsigned int pad1)
1421{
1422	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
1423	struct v4l2_subdev_krouting *routing;
1424	struct v4l2_subdev_state *state;
1425	unsigned int i;
1426
1427	state = v4l2_subdev_lock_and_get_active_state(sd);
1428
1429	routing = &state->routing;
1430
1431	for (i = 0; i < routing->num_routes; ++i) {
1432		struct v4l2_subdev_route *route = &routing->routes[i];
1433
1434		if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
1435			continue;
1436
1437		if ((route->sink_pad == pad0 && route->source_pad == pad1) ||
1438		    (route->source_pad == pad0 && route->sink_pad == pad1)) {
1439			v4l2_subdev_unlock_state(state);
1440			return true;
1441		}
1442	}
1443
1444	v4l2_subdev_unlock_state(state);
1445
1446	return false;
1447}
1448EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep);
1449
1450struct v4l2_subdev_state *
1451__v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name,
1452			  struct lock_class_key *lock_key)
1453{
1454	struct v4l2_subdev_state *state;
1455	int ret;
1456
1457	state = kzalloc(sizeof(*state), GFP_KERNEL);
1458	if (!state)
1459		return ERR_PTR(-ENOMEM);
1460
1461	__mutex_init(&state->_lock, lock_name, lock_key);
1462	if (sd->state_lock)
1463		state->lock = sd->state_lock;
1464	else
1465		state->lock = &state->_lock;
1466
1467	state->sd = sd;
1468
1469	/* Drivers that support streams do not need the legacy pad config */
1470	if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) {
1471		state->pads = kvcalloc(sd->entity.num_pads,
1472				       sizeof(*state->pads), GFP_KERNEL);
1473		if (!state->pads) {
1474			ret = -ENOMEM;
1475			goto err;
1476		}
1477	}
1478
1479	if (sd->internal_ops && sd->internal_ops->init_state) {
1480		/*
1481		 * There can be no race at this point, but we lock the state
1482		 * anyway to satisfy lockdep checks.
1483		 */
1484		v4l2_subdev_lock_state(state);
1485		ret = sd->internal_ops->init_state(sd, state);
1486		v4l2_subdev_unlock_state(state);
1487
1488		if (ret)
1489			goto err;
1490	}
1491
1492	return state;
1493
1494err:
1495	if (state && state->pads)
1496		kvfree(state->pads);
1497
1498	kfree(state);
1499
1500	return ERR_PTR(ret);
1501}
1502EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc);
1503
1504void __v4l2_subdev_state_free(struct v4l2_subdev_state *state)
1505{
1506	if (!state)
1507		return;
1508
1509	mutex_destroy(&state->_lock);
1510
1511	kfree(state->routing.routes);
1512	kvfree(state->stream_configs.configs);
1513	kvfree(state->pads);
1514	kfree(state);
1515}
1516EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free);
1517
1518int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
1519				struct lock_class_key *key)
1520{
1521	struct v4l2_subdev_state *state;
1522
1523	state = __v4l2_subdev_state_alloc(sd, name, key);
1524	if (IS_ERR(state))
1525		return PTR_ERR(state);
1526
1527	sd->active_state = state;
1528
1529	return 0;
1530}
1531EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize);
1532
1533void v4l2_subdev_cleanup(struct v4l2_subdev *sd)
1534{
1535	struct v4l2_async_subdev_endpoint *ase, *ase_tmp;
1536
1537	__v4l2_subdev_state_free(sd->active_state);
1538	sd->active_state = NULL;
1539
1540	/* Uninitialised sub-device, bail out here. */
1541	if (!sd->async_subdev_endpoint_list.next)
1542		return;
1543
1544	list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list,
1545				 async_subdev_endpoint_entry) {
1546		list_del(&ase->async_subdev_endpoint_entry);
1547
1548		kfree(ase);
1549	}
1550}
1551EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup);
1552
1553struct v4l2_mbus_framefmt *
1554__v4l2_subdev_state_get_format(struct v4l2_subdev_state *state,
1555			       unsigned int pad, u32 stream)
1556{
1557	struct v4l2_subdev_stream_configs *stream_configs;
1558	unsigned int i;
1559
1560	if (WARN_ON_ONCE(!state))
1561		return NULL;
1562
1563	if (state->pads) {
1564		if (stream)
1565			return NULL;
1566
1567		if (pad >= state->sd->entity.num_pads)
1568			return NULL;
1569
1570		return &state->pads[pad].format;
1571	}
1572
1573	lockdep_assert_held(state->lock);
1574
1575	stream_configs = &state->stream_configs;
1576
1577	for (i = 0; i < stream_configs->num_configs; ++i) {
1578		if (stream_configs->configs[i].pad == pad &&
1579		    stream_configs->configs[i].stream == stream)
1580			return &stream_configs->configs[i].fmt;
1581	}
1582
1583	return NULL;
1584}
1585EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_format);
1586
1587struct v4l2_rect *
1588__v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad,
1589			     u32 stream)
1590{
1591	struct v4l2_subdev_stream_configs *stream_configs;
1592	unsigned int i;
1593
1594	if (WARN_ON_ONCE(!state))
1595		return NULL;
1596
1597	if (state->pads) {
1598		if (stream)
1599			return NULL;
1600
1601		if (pad >= state->sd->entity.num_pads)
1602			return NULL;
1603
1604		return &state->pads[pad].crop;
1605	}
1606
1607	lockdep_assert_held(state->lock);
1608
1609	stream_configs = &state->stream_configs;
1610
1611	for (i = 0; i < stream_configs->num_configs; ++i) {
1612		if (stream_configs->configs[i].pad == pad &&
1613		    stream_configs->configs[i].stream == stream)
1614			return &stream_configs->configs[i].crop;
1615	}
1616
1617	return NULL;
1618}
1619EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_crop);
1620
1621struct v4l2_rect *
1622__v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state,
1623				unsigned int pad, u32 stream)
1624{
1625	struct v4l2_subdev_stream_configs *stream_configs;
1626	unsigned int i;
1627
1628	if (WARN_ON_ONCE(!state))
1629		return NULL;
1630
1631	if (state->pads) {
1632		if (stream)
1633			return NULL;
1634
1635		if (pad >= state->sd->entity.num_pads)
1636			return NULL;
1637
1638		return &state->pads[pad].compose;
1639	}
1640
1641	lockdep_assert_held(state->lock);
1642
1643	stream_configs = &state->stream_configs;
1644
1645	for (i = 0; i < stream_configs->num_configs; ++i) {
1646		if (stream_configs->configs[i].pad == pad &&
1647		    stream_configs->configs[i].stream == stream)
1648			return &stream_configs->configs[i].compose;
1649	}
1650
1651	return NULL;
1652}
1653EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_compose);
1654
1655struct v4l2_fract *
1656__v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state,
1657				 unsigned int pad, u32 stream)
1658{
1659	struct v4l2_subdev_stream_configs *stream_configs;
1660	unsigned int i;
1661
1662	if (WARN_ON(!state))
1663		return NULL;
1664
1665	lockdep_assert_held(state->lock);
1666
1667	if (state->pads) {
1668		if (stream)
1669			return NULL;
1670
1671		if (pad >= state->sd->entity.num_pads)
1672			return NULL;
1673
1674		return &state->pads[pad].interval;
1675	}
1676
1677	lockdep_assert_held(state->lock);
1678
1679	stream_configs = &state->stream_configs;
1680
1681	for (i = 0; i < stream_configs->num_configs; ++i) {
1682		if (stream_configs->configs[i].pad == pad &&
1683		    stream_configs->configs[i].stream == stream)
1684			return &stream_configs->configs[i].interval;
1685	}
1686
1687	return NULL;
1688}
1689EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_interval);
1690
1691#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1692
1693static int
1694v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs,
1695				const struct v4l2_subdev_krouting *routing)
1696{
1697	struct v4l2_subdev_stream_configs new_configs = { 0 };
1698	struct v4l2_subdev_route *route;
1699	u32 idx;
1700
1701	/* Count number of formats needed */
1702	for_each_active_route(routing, route) {
1703		/*
1704		 * Each route needs a format on both ends of the route.
1705		 */
1706		new_configs.num_configs += 2;
1707	}
1708
1709	if (new_configs.num_configs) {
1710		new_configs.configs = kvcalloc(new_configs.num_configs,
1711					       sizeof(*new_configs.configs),
1712					       GFP_KERNEL);
1713
1714		if (!new_configs.configs)
1715			return -ENOMEM;
1716	}
1717
1718	/*
1719	 * Fill in the 'pad' and stream' value for each item in the array from
1720	 * the routing table
1721	 */
1722	idx = 0;
1723
1724	for_each_active_route(routing, route) {
1725		new_configs.configs[idx].pad = route->sink_pad;
1726		new_configs.configs[idx].stream = route->sink_stream;
1727
1728		idx++;
1729
1730		new_configs.configs[idx].pad = route->source_pad;
1731		new_configs.configs[idx].stream = route->source_stream;
1732
1733		idx++;
1734	}
1735
1736	kvfree(stream_configs->configs);
1737	*stream_configs = new_configs;
1738
1739	return 0;
1740}
1741
1742int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
1743			struct v4l2_subdev_format *format)
1744{
1745	struct v4l2_mbus_framefmt *fmt;
1746
1747	fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream);
1748	if (!fmt)
1749		return -EINVAL;
1750
1751	format->format = *fmt;
1752
1753	return 0;
1754}
1755EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt);
1756
1757int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd,
1758				   struct v4l2_subdev_state *state,
1759				   struct v4l2_subdev_frame_interval *fi)
1760{
1761	struct v4l2_fract *interval;
1762
1763	interval = v4l2_subdev_state_get_interval(state, fi->pad, fi->stream);
1764	if (!interval)
1765		return -EINVAL;
1766
1767	fi->interval = *interval;
1768
1769	return 0;
1770}
1771EXPORT_SYMBOL_GPL(v4l2_subdev_get_frame_interval);
1772
1773int v4l2_subdev_set_routing(struct v4l2_subdev *sd,
1774			    struct v4l2_subdev_state *state,
1775			    const struct v4l2_subdev_krouting *routing)
1776{
1777	struct v4l2_subdev_krouting *dst = &state->routing;
1778	const struct v4l2_subdev_krouting *src = routing;
1779	struct v4l2_subdev_krouting new_routing = { 0 };
1780	size_t bytes;
1781	int r;
1782
1783	if (unlikely(check_mul_overflow((size_t)src->num_routes,
1784					sizeof(*src->routes), &bytes)))
1785		return -EOVERFLOW;
1786
1787	lockdep_assert_held(state->lock);
1788
1789	if (src->num_routes > 0) {
1790		new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL);
1791		if (!new_routing.routes)
1792			return -ENOMEM;
1793	}
1794
1795	new_routing.num_routes = src->num_routes;
1796
1797	r = v4l2_subdev_init_stream_configs(&state->stream_configs,
1798					    &new_routing);
1799	if (r) {
1800		kfree(new_routing.routes);
1801		return r;
1802	}
1803
1804	kfree(dst->routes);
1805	*dst = new_routing;
1806
1807	return 0;
1808}
1809EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing);
1810
1811struct v4l2_subdev_route *
1812__v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing,
1813				struct v4l2_subdev_route *route)
1814{
1815	if (route)
1816		++route;
1817	else
1818		route = &routing->routes[0];
1819
1820	for (; route < routing->routes + routing->num_routes; ++route) {
1821		if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
1822			continue;
1823
1824		return route;
1825	}
1826
1827	return NULL;
1828}
1829EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route);
1830
1831int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
1832				     struct v4l2_subdev_state *state,
1833				     const struct v4l2_subdev_krouting *routing,
1834				     const struct v4l2_mbus_framefmt *fmt)
1835{
1836	struct v4l2_subdev_stream_configs *stream_configs;
1837	unsigned int i;
1838	int ret;
1839
1840	ret = v4l2_subdev_set_routing(sd, state, routing);
1841	if (ret)
1842		return ret;
1843
1844	stream_configs = &state->stream_configs;
1845
1846	for (i = 0; i < stream_configs->num_configs; ++i)
1847		stream_configs->configs[i].fmt = *fmt;
1848
1849	return 0;
1850}
1851EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt);
1852
1853int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing,
1854					  u32 pad, u32 stream, u32 *other_pad,
1855					  u32 *other_stream)
1856{
1857	unsigned int i;
1858
1859	for (i = 0; i < routing->num_routes; ++i) {
1860		struct v4l2_subdev_route *route = &routing->routes[i];
1861
1862		if (route->source_pad == pad &&
1863		    route->source_stream == stream) {
1864			if (other_pad)
1865				*other_pad = route->sink_pad;
1866			if (other_stream)
1867				*other_stream = route->sink_stream;
1868			return 0;
1869		}
1870
1871		if (route->sink_pad == pad && route->sink_stream == stream) {
1872			if (other_pad)
1873				*other_pad = route->source_pad;
1874			if (other_stream)
1875				*other_stream = route->source_stream;
1876			return 0;
1877		}
1878	}
1879
1880	return -EINVAL;
1881}
1882EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end);
1883
1884struct v4l2_mbus_framefmt *
1885v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state,
1886					     u32 pad, u32 stream)
1887{
1888	u32 other_pad, other_stream;
1889	int ret;
1890
1891	ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
1892						    pad, stream,
1893						    &other_pad, &other_stream);
1894	if (ret)
1895		return NULL;
1896
1897	return v4l2_subdev_state_get_format(state, other_pad, other_stream);
1898}
1899EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format);
1900
1901u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state,
1902				    u32 pad0, u32 pad1, u64 *streams)
1903{
1904	const struct v4l2_subdev_krouting *routing = &state->routing;
1905	struct v4l2_subdev_route *route;
1906	u64 streams0 = 0;
1907	u64 streams1 = 0;
1908
1909	for_each_active_route(routing, route) {
1910		if (route->sink_pad == pad0 && route->source_pad == pad1 &&
1911		    (*streams & BIT_ULL(route->sink_stream))) {
1912			streams0 |= BIT_ULL(route->sink_stream);
1913			streams1 |= BIT_ULL(route->source_stream);
1914		}
1915		if (route->source_pad == pad0 && route->sink_pad == pad1 &&
1916		    (*streams & BIT_ULL(route->source_stream))) {
1917			streams0 |= BIT_ULL(route->source_stream);
1918			streams1 |= BIT_ULL(route->sink_stream);
1919		}
1920	}
1921
1922	*streams = streams0;
1923	return streams1;
1924}
1925EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams);
1926
1927int v4l2_subdev_routing_validate(struct v4l2_subdev *sd,
1928				 const struct v4l2_subdev_krouting *routing,
1929				 enum v4l2_subdev_routing_restriction disallow)
1930{
1931	u32 *remote_pads = NULL;
1932	unsigned int i, j;
1933	int ret = -EINVAL;
1934
1935	if (disallow & (V4L2_SUBDEV_ROUTING_NO_STREAM_MIX |
1936			V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING)) {
1937		remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads),
1938				      GFP_KERNEL);
1939		if (!remote_pads)
1940			return -ENOMEM;
1941
1942		for (i = 0; i < sd->entity.num_pads; ++i)
1943			remote_pads[i] = U32_MAX;
1944	}
1945
1946	for (i = 0; i < routing->num_routes; ++i) {
1947		const struct v4l2_subdev_route *route = &routing->routes[i];
1948
1949		/* Validate the sink and source pad numbers. */
1950		if (route->sink_pad >= sd->entity.num_pads ||
1951		    !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) {
1952			dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n",
1953				i, route->sink_pad);
1954			goto out;
1955		}
1956
1957		if (route->source_pad >= sd->entity.num_pads ||
1958		    !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) {
1959			dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n",
1960				i, route->source_pad);
1961			goto out;
1962		}
1963
1964		/*
1965		 * V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: all streams from a
1966		 * sink pad must be routed to a single source pad.
1967		 */
1968		if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX) {
1969			if (remote_pads[route->sink_pad] != U32_MAX &&
1970			    remote_pads[route->sink_pad] != route->source_pad) {
1971				dev_dbg(sd->dev,
1972					"route %u attempts to mix %s streams\n",
1973					i, "sink");
1974				goto out;
1975			}
1976		}
1977
1978		/*
1979		 * V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: all streams on a
1980		 * source pad must originate from a single sink pad.
1981		 */
1982		if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) {
1983			if (remote_pads[route->source_pad] != U32_MAX &&
1984			    remote_pads[route->source_pad] != route->sink_pad) {
1985				dev_dbg(sd->dev,
1986					"route %u attempts to mix %s streams\n",
1987					i, "source");
1988				goto out;
1989			}
1990		}
1991
1992		/*
1993		 * V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: Pads on the sink
1994		 * side can not do stream multiplexing, i.e. there can be only
1995		 * a single stream in a sink pad.
1996		 */
1997		if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING) {
1998			if (remote_pads[route->sink_pad] != U32_MAX) {
1999				dev_dbg(sd->dev,
2000					"route %u attempts to multiplex on %s pad %u\n",
2001					i, "sink", route->sink_pad);
2002				goto out;
2003			}
2004		}
2005
2006		/*
2007		 * V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: Pads on the
2008		 * source side can not do stream multiplexing, i.e. there can
2009		 * be only a single stream in a source pad.
2010		 */
2011		if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING) {
2012			if (remote_pads[route->source_pad] != U32_MAX) {
2013				dev_dbg(sd->dev,
2014					"route %u attempts to multiplex on %s pad %u\n",
2015					i, "source", route->source_pad);
2016				goto out;
2017			}
2018		}
2019
2020		if (remote_pads) {
2021			remote_pads[route->sink_pad] = route->source_pad;
2022			remote_pads[route->source_pad] = route->sink_pad;
2023		}
2024
2025		for (j = i + 1; j < routing->num_routes; ++j) {
2026			const struct v4l2_subdev_route *r = &routing->routes[j];
2027
2028			/*
2029			 * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can
2030			 * originate from the same (sink) stream.
2031			 */
2032			if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) &&
2033			    route->sink_pad == r->sink_pad &&
2034			    route->sink_stream == r->sink_stream) {
2035				dev_dbg(sd->dev,
2036					"routes %u and %u originate from same sink (%u/%u)\n",
2037					i, j, route->sink_pad,
2038					route->sink_stream);
2039				goto out;
2040			}
2041
2042			/*
2043			 * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end
2044			 * at the same (source) stream.
2045			 */
2046			if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) &&
2047			    route->source_pad == r->source_pad &&
2048			    route->source_stream == r->source_stream) {
2049				dev_dbg(sd->dev,
2050					"routes %u and %u end at same source (%u/%u)\n",
2051					i, j, route->source_pad,
2052					route->source_stream);
2053				goto out;
2054			}
2055		}
2056	}
2057
2058	ret = 0;
2059
2060out:
2061	kfree(remote_pads);
2062	return ret;
2063}
2064EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate);
2065
2066static int v4l2_subdev_enable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
2067					       u64 streams_mask)
2068{
2069	struct device *dev = sd->entity.graph_obj.mdev->dev;
2070	unsigned int i;
2071	int ret;
2072
2073	/*
2074	 * The subdev doesn't implement pad-based stream enable, fall back
2075	 * on the .s_stream() operation. This can only be done for subdevs that
2076	 * have a single source pad, as sd->enabled_streams is global to the
2077	 * subdev.
2078	 */
2079	if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
2080		return -EOPNOTSUPP;
2081
2082	for (i = 0; i < sd->entity.num_pads; ++i) {
2083		if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
2084			return -EOPNOTSUPP;
2085	}
2086
2087	if (sd->enabled_streams & streams_mask) {
2088		dev_dbg(dev, "set of streams %#llx already enabled on %s:%u\n",
2089			streams_mask, sd->entity.name, pad);
2090		return -EALREADY;
2091	}
2092
2093	/* Start streaming when the first streams are enabled. */
2094	if (!sd->enabled_streams) {
2095		ret = v4l2_subdev_call(sd, video, s_stream, 1);
2096		if (ret)
2097			return ret;
2098	}
2099
2100	sd->enabled_streams |= streams_mask;
2101
2102	return 0;
2103}
2104
2105int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
2106			       u64 streams_mask)
2107{
2108	struct device *dev = sd->entity.graph_obj.mdev->dev;
2109	struct v4l2_subdev_state *state;
2110	u64 found_streams = 0;
2111	unsigned int i;
2112	int ret;
2113
2114	/* A few basic sanity checks first. */
2115	if (pad >= sd->entity.num_pads)
2116		return -EINVAL;
2117
2118	if (!streams_mask)
2119		return 0;
2120
2121	/* Fallback on .s_stream() if .enable_streams() isn't available. */
2122	if (!sd->ops->pad || !sd->ops->pad->enable_streams)
2123		return v4l2_subdev_enable_streams_fallback(sd, pad,
2124							   streams_mask);
2125
2126	state = v4l2_subdev_lock_and_get_active_state(sd);
2127
2128	/*
2129	 * Verify that the requested streams exist and that they are not
2130	 * already enabled.
2131	 */
2132	for (i = 0; i < state->stream_configs.num_configs; ++i) {
2133		struct v4l2_subdev_stream_config *cfg =
2134			&state->stream_configs.configs[i];
2135
2136		if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
2137			continue;
2138
2139		found_streams |= BIT_ULL(cfg->stream);
2140
2141		if (cfg->enabled) {
2142			dev_dbg(dev, "stream %u already enabled on %s:%u\n",
2143				cfg->stream, sd->entity.name, pad);
2144			ret = -EALREADY;
2145			goto done;
2146		}
2147	}
2148
2149	if (found_streams != streams_mask) {
2150		dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
2151			streams_mask & ~found_streams, sd->entity.name, pad);
2152		ret = -EINVAL;
2153		goto done;
2154	}
2155
2156	dev_dbg(dev, "enable streams %u:%#llx\n", pad, streams_mask);
2157
2158	/* Call the .enable_streams() operation. */
2159	ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad,
2160			       streams_mask);
2161	if (ret) {
2162		dev_dbg(dev, "enable streams %u:%#llx failed: %d\n", pad,
2163			streams_mask, ret);
2164		goto done;
2165	}
2166
2167	/* Mark the streams as enabled. */
2168	for (i = 0; i < state->stream_configs.num_configs; ++i) {
2169		struct v4l2_subdev_stream_config *cfg =
2170			&state->stream_configs.configs[i];
2171
2172		if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
2173			cfg->enabled = true;
2174	}
2175
2176done:
2177	v4l2_subdev_unlock_state(state);
2178
2179	return ret;
2180}
2181EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams);
2182
2183static int v4l2_subdev_disable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
2184						u64 streams_mask)
2185{
2186	struct device *dev = sd->entity.graph_obj.mdev->dev;
2187	unsigned int i;
2188	int ret;
2189
2190	/*
2191	 * If the subdev doesn't implement pad-based stream enable, fall  back
2192	 * on the .s_stream() operation. This can only be done for subdevs that
2193	 * have a single source pad, as sd->enabled_streams is global to the
2194	 * subdev.
2195	 */
2196	if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
2197		return -EOPNOTSUPP;
2198
2199	for (i = 0; i < sd->entity.num_pads; ++i) {
2200		if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
2201			return -EOPNOTSUPP;
2202	}
2203
2204	if ((sd->enabled_streams & streams_mask) != streams_mask) {
2205		dev_dbg(dev, "set of streams %#llx already disabled on %s:%u\n",
2206			streams_mask, sd->entity.name, pad);
2207		return -EALREADY;
2208	}
2209
2210	/* Stop streaming when the last streams are disabled. */
2211	if (!(sd->enabled_streams & ~streams_mask)) {
2212		ret = v4l2_subdev_call(sd, video, s_stream, 0);
2213		if (ret)
2214			return ret;
2215	}
2216
2217	sd->enabled_streams &= ~streams_mask;
2218
2219	return 0;
2220}
2221
2222int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad,
2223				u64 streams_mask)
2224{
2225	struct device *dev = sd->entity.graph_obj.mdev->dev;
2226	struct v4l2_subdev_state *state;
2227	u64 found_streams = 0;
2228	unsigned int i;
2229	int ret;
2230
2231	/* A few basic sanity checks first. */
2232	if (pad >= sd->entity.num_pads)
2233		return -EINVAL;
2234
2235	if (!streams_mask)
2236		return 0;
2237
2238	/* Fallback on .s_stream() if .disable_streams() isn't available. */
2239	if (!sd->ops->pad || !sd->ops->pad->disable_streams)
2240		return v4l2_subdev_disable_streams_fallback(sd, pad,
2241							    streams_mask);
2242
2243	state = v4l2_subdev_lock_and_get_active_state(sd);
2244
2245	/*
2246	 * Verify that the requested streams exist and that they are not
2247	 * already disabled.
2248	 */
2249	for (i = 0; i < state->stream_configs.num_configs; ++i) {
2250		struct v4l2_subdev_stream_config *cfg =
2251			&state->stream_configs.configs[i];
2252
2253		if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
2254			continue;
2255
2256		found_streams |= BIT_ULL(cfg->stream);
2257
2258		if (!cfg->enabled) {
2259			dev_dbg(dev, "stream %u already disabled on %s:%u\n",
2260				cfg->stream, sd->entity.name, pad);
2261			ret = -EALREADY;
2262			goto done;
2263		}
2264	}
2265
2266	if (found_streams != streams_mask) {
2267		dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
2268			streams_mask & ~found_streams, sd->entity.name, pad);
2269		ret = -EINVAL;
2270		goto done;
2271	}
2272
2273	dev_dbg(dev, "disable streams %u:%#llx\n", pad, streams_mask);
2274
2275	/* Call the .disable_streams() operation. */
2276	ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad,
2277			       streams_mask);
2278	if (ret) {
2279		dev_dbg(dev, "disable streams %u:%#llx failed: %d\n", pad,
2280			streams_mask, ret);
2281		goto done;
2282	}
2283
2284	/* Mark the streams as disabled. */
2285	for (i = 0; i < state->stream_configs.num_configs; ++i) {
2286		struct v4l2_subdev_stream_config *cfg =
2287			&state->stream_configs.configs[i];
2288
2289		if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
2290			cfg->enabled = false;
2291	}
2292
2293done:
2294	v4l2_subdev_unlock_state(state);
2295
2296	return ret;
2297}
2298EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams);
2299
2300int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable)
2301{
2302	struct v4l2_subdev_state *state;
2303	struct v4l2_subdev_route *route;
2304	struct media_pad *pad;
2305	u64 source_mask = 0;
2306	int pad_index = -1;
2307
2308	/*
2309	 * Find the source pad. This helper is meant for subdevs that have a
2310	 * single source pad, so failures shouldn't happen, but catch them
2311	 * loudly nonetheless as they indicate a driver bug.
2312	 */
2313	media_entity_for_each_pad(&sd->entity, pad) {
2314		if (pad->flags & MEDIA_PAD_FL_SOURCE) {
2315			pad_index = pad->index;
2316			break;
2317		}
2318	}
2319
2320	if (WARN_ON(pad_index == -1))
2321		return -EINVAL;
2322
2323	/*
2324	 * As there's a single source pad, just collect all the source streams.
2325	 */
2326	state = v4l2_subdev_lock_and_get_active_state(sd);
2327
2328	for_each_active_route(&state->routing, route)
2329		source_mask |= BIT_ULL(route->source_stream);
2330
2331	v4l2_subdev_unlock_state(state);
2332
2333	if (enable)
2334		return v4l2_subdev_enable_streams(sd, pad_index, source_mask);
2335	else
2336		return v4l2_subdev_disable_streams(sd, pad_index, source_mask);
2337}
2338EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper);
2339
2340#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
2341
2342#endif /* CONFIG_MEDIA_CONTROLLER */
2343
2344void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
2345{
2346	INIT_LIST_HEAD(&sd->list);
2347	BUG_ON(!ops);
2348	sd->ops = ops;
2349	sd->v4l2_dev = NULL;
2350	sd->flags = 0;
2351	sd->name[0] = '\0';
2352	sd->grp_id = 0;
2353	sd->dev_priv = NULL;
2354	sd->host_priv = NULL;
2355	sd->privacy_led = NULL;
2356	INIT_LIST_HEAD(&sd->async_subdev_endpoint_list);
2357#if defined(CONFIG_MEDIA_CONTROLLER)
2358	sd->entity.name = sd->name;
2359	sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
2360	sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
2361#endif
2362}
2363EXPORT_SYMBOL(v4l2_subdev_init);
2364
2365void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
2366			      const struct v4l2_event *ev)
2367{
2368	v4l2_event_queue(sd->devnode, ev);
2369	v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev);
2370}
2371EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event);
2372
2373int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd)
2374{
2375#if IS_REACHABLE(CONFIG_LEDS_CLASS)
2376	sd->privacy_led = led_get(sd->dev, "privacy-led");
2377	if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT)
2378		return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led),
2379				     "getting privacy LED\n");
2380
2381	if (!IS_ERR_OR_NULL(sd->privacy_led)) {
2382		mutex_lock(&sd->privacy_led->led_access);
2383		led_sysfs_disable(sd->privacy_led);
2384		led_trigger_remove(sd->privacy_led);
2385		led_set_brightness(sd->privacy_led, 0);
2386		mutex_unlock(&sd->privacy_led->led_access);
2387	}
2388#endif
2389	return 0;
2390}
2391EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led);
2392
2393void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd)
2394{
2395#if IS_REACHABLE(CONFIG_LEDS_CLASS)
2396	if (!IS_ERR_OR_NULL(sd->privacy_led)) {
2397		mutex_lock(&sd->privacy_led->led_access);
2398		led_sysfs_enable(sd->privacy_led);
2399		mutex_unlock(&sd->privacy_led->led_access);
2400		led_put(sd->privacy_led);
2401	}
2402#endif
2403}
2404EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led);
2405