• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * V4L2 asynchronous subdevice registration API
4  *
5  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
6  */
7 
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/i2c.h>
12 #include <linux/list.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 
22 #include <media/v4l2-async.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-fwnode.h>
25 #include <media/v4l2-subdev.h>
26 
v4l2_async_notifier_call_bound(struct v4l2_async_notifier * n,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)27 static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n,
28 					  struct v4l2_subdev *subdev,
29 					  struct v4l2_async_subdev *asd)
30 {
31 	if (!n->ops || !n->ops->bound)
32 		return 0;
33 
34 	return n->ops->bound(n, subdev, asd);
35 }
36 
v4l2_async_notifier_call_unbind(struct v4l2_async_notifier * n,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)37 static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n,
38 					    struct v4l2_subdev *subdev,
39 					    struct v4l2_async_subdev *asd)
40 {
41 	if (!n->ops || !n->ops->unbind)
42 		return;
43 
44 	n->ops->unbind(n, subdev, asd);
45 }
46 
v4l2_async_notifier_call_complete(struct v4l2_async_notifier * n)47 static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n)
48 {
49 	if (!n->ops || !n->ops->complete)
50 		return 0;
51 
52 	return n->ops->complete(n);
53 }
54 
match_i2c(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)55 static bool match_i2c(struct v4l2_async_notifier *notifier,
56 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
57 {
58 #if IS_ENABLED(CONFIG_I2C)
59 	struct i2c_client *client = i2c_verify_client(sd->dev);
60 
61 	return client &&
62 		asd->match.i2c.adapter_id == client->adapter->nr &&
63 		asd->match.i2c.address == client->addr;
64 #else
65 	return false;
66 #endif
67 }
68 
match_fwnode(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)69 static bool match_fwnode(struct v4l2_async_notifier *notifier,
70 			 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
71 {
72 	struct fwnode_handle *other_fwnode;
73 	struct fwnode_handle *dev_fwnode;
74 	bool asd_fwnode_is_ep;
75 	bool sd_fwnode_is_ep;
76 	struct device *dev;
77 
78 	/*
79 	 * Both the subdev and the async subdev can provide either an endpoint
80 	 * fwnode or a device fwnode. Start with the simple case of direct
81 	 * fwnode matching.
82 	 */
83 	if (sd->fwnode == asd->match.fwnode)
84 		return true;
85 
86 	/*
87 	 * Check the same situation for any possible secondary assigned to the
88 	 * subdev's fwnode
89 	 */
90 	if (!IS_ERR_OR_NULL(sd->fwnode->secondary) &&
91 	    sd->fwnode->secondary == asd->match.fwnode)
92 		return true;
93 
94 	/*
95 	 * Otherwise, check if the sd fwnode and the asd fwnode refer to an
96 	 * endpoint or a device. If they're of the same type, there's no match.
97 	 * Technically speaking this checks if the nodes refer to a connected
98 	 * endpoint, which is the simplest check that works for both OF and
99 	 * ACPI. This won't make a difference, as drivers should not try to
100 	 * match unconnected endpoints.
101 	 */
102 	sd_fwnode_is_ep = fwnode_graph_is_endpoint(sd->fwnode);
103 	asd_fwnode_is_ep = fwnode_graph_is_endpoint(asd->match.fwnode);
104 
105 	if (sd_fwnode_is_ep == asd_fwnode_is_ep)
106 		return false;
107 
108 	/*
109 	 * The sd and asd fwnodes are of different types. Get the device fwnode
110 	 * parent of the endpoint fwnode, and compare it with the other fwnode.
111 	 */
112 	if (sd_fwnode_is_ep) {
113 		dev_fwnode = fwnode_graph_get_port_parent(sd->fwnode);
114 		other_fwnode = asd->match.fwnode;
115 	} else {
116 		dev_fwnode = fwnode_graph_get_port_parent(asd->match.fwnode);
117 		other_fwnode = sd->fwnode;
118 	}
119 
120 	fwnode_handle_put(dev_fwnode);
121 
122 	if (dev_fwnode != other_fwnode)
123 		return false;
124 
125 	/*
126 	 * We have a heterogeneous match. Retrieve the struct device of the side
127 	 * that matched on a device fwnode to print its driver name.
128 	 */
129 	if (sd_fwnode_is_ep)
130 		dev = notifier->v4l2_dev ? notifier->v4l2_dev->dev
131 		    : notifier->sd->dev;
132 	else
133 		dev = sd->dev;
134 
135 	if (dev && dev->driver) {
136 		if (sd_fwnode_is_ep)
137 			dev_warn(dev, "Driver %s uses device fwnode, incorrect match may occur\n",
138 				 dev->driver->name);
139 		dev_notice(dev, "Consider updating driver %s to match on endpoints\n",
140 			   dev->driver->name);
141 	}
142 
143 	return true;
144 }
145 
146 static LIST_HEAD(subdev_list);
147 static LIST_HEAD(notifier_list);
148 static DEFINE_MUTEX(list_lock);
149 
150 static struct v4l2_async_subdev *
v4l2_async_find_match(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd)151 v4l2_async_find_match(struct v4l2_async_notifier *notifier,
152 		      struct v4l2_subdev *sd)
153 {
154 	bool (*match)(struct v4l2_async_notifier *notifier,
155 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd);
156 	struct v4l2_async_subdev *asd;
157 
158 	list_for_each_entry(asd, &notifier->waiting, list) {
159 		/* bus_type has been verified valid before */
160 		switch (asd->match_type) {
161 		case V4L2_ASYNC_MATCH_I2C:
162 			match = match_i2c;
163 			break;
164 		case V4L2_ASYNC_MATCH_FWNODE:
165 			match = match_fwnode;
166 			break;
167 		default:
168 			/* Cannot happen, unless someone breaks us */
169 			WARN_ON(true);
170 			return NULL;
171 		}
172 
173 		/* match cannot be NULL here */
174 		if (match(notifier, sd, asd))
175 			return asd;
176 	}
177 
178 	return NULL;
179 }
180 
181 /* Compare two async sub-device descriptors for equivalence */
asd_equal(struct v4l2_async_subdev * asd_x,struct v4l2_async_subdev * asd_y)182 static bool asd_equal(struct v4l2_async_subdev *asd_x,
183 		      struct v4l2_async_subdev *asd_y)
184 {
185 	if (asd_x->match_type != asd_y->match_type)
186 		return false;
187 
188 	switch (asd_x->match_type) {
189 	case V4L2_ASYNC_MATCH_I2C:
190 		return asd_x->match.i2c.adapter_id ==
191 			asd_y->match.i2c.adapter_id &&
192 			asd_x->match.i2c.address ==
193 			asd_y->match.i2c.address;
194 	case V4L2_ASYNC_MATCH_FWNODE:
195 		return asd_x->match.fwnode == asd_y->match.fwnode;
196 	default:
197 		break;
198 	}
199 
200 	return false;
201 }
202 
203 /* Find the sub-device notifier registered by a sub-device driver. */
204 static struct v4l2_async_notifier *
v4l2_async_find_subdev_notifier(struct v4l2_subdev * sd)205 v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
206 {
207 	struct v4l2_async_notifier *n;
208 
209 	list_for_each_entry(n, &notifier_list, list)
210 		if (n->sd == sd)
211 			return n;
212 
213 	return NULL;
214 }
215 
216 /* Get v4l2_device related to the notifier if one can be found. */
217 static struct v4l2_device *
v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier * notifier)218 v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier *notifier)
219 {
220 	while (notifier->parent)
221 		notifier = notifier->parent;
222 
223 	return notifier->v4l2_dev;
224 }
225 
226 /*
227  * Return true if all child sub-device notifiers are complete, false otherwise.
228  */
229 static bool
v4l2_async_notifier_can_complete(struct v4l2_async_notifier * notifier)230 v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier)
231 {
232 	struct v4l2_subdev *sd;
233 
234 	if (!list_empty(&notifier->waiting))
235 		return false;
236 
237 	list_for_each_entry(sd, &notifier->done, async_list) {
238 		struct v4l2_async_notifier *subdev_notifier =
239 			v4l2_async_find_subdev_notifier(sd);
240 
241 		if (subdev_notifier &&
242 		    !v4l2_async_notifier_can_complete(subdev_notifier))
243 			return false;
244 	}
245 
246 	return true;
247 }
248 
249 /*
250  * Complete the master notifier if possible. This is done when all async
251  * sub-devices have been bound; v4l2_device is also available then.
252  */
253 static int
v4l2_async_notifier_try_complete(struct v4l2_async_notifier * notifier)254 v4l2_async_notifier_try_complete(struct v4l2_async_notifier *notifier)
255 {
256 	/* Quick check whether there are still more sub-devices here. */
257 	if (!list_empty(&notifier->waiting))
258 		return 0;
259 
260 	/* Check the entire notifier tree; find the root notifier first. */
261 	while (notifier->parent)
262 		notifier = notifier->parent;
263 
264 	/* This is root if it has v4l2_dev. */
265 	if (!notifier->v4l2_dev)
266 		return 0;
267 
268 	/* Is everything ready? */
269 	if (!v4l2_async_notifier_can_complete(notifier))
270 		return 0;
271 
272 	return v4l2_async_notifier_call_complete(notifier);
273 }
274 
275 static int
276 v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier);
277 
v4l2_async_match_notify(struct v4l2_async_notifier * notifier,struct v4l2_device * v4l2_dev,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)278 static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
279 				   struct v4l2_device *v4l2_dev,
280 				   struct v4l2_subdev *sd,
281 				   struct v4l2_async_subdev *asd)
282 {
283 	struct v4l2_async_notifier *subdev_notifier;
284 	int ret;
285 
286 	ret = v4l2_device_register_subdev(v4l2_dev, sd);
287 	if (ret < 0)
288 		return ret;
289 
290 	ret = v4l2_async_notifier_call_bound(notifier, sd, asd);
291 	if (ret < 0) {
292 		v4l2_device_unregister_subdev(sd);
293 		return ret;
294 	}
295 
296 	/* Remove from the waiting list */
297 	list_del(&asd->list);
298 	sd->asd = asd;
299 	sd->notifier = notifier;
300 
301 	/* Move from the global subdevice list to notifier's done */
302 	list_move(&sd->async_list, &notifier->done);
303 
304 	/*
305 	 * See if the sub-device has a notifier. If not, return here.
306 	 */
307 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
308 	if (!subdev_notifier || subdev_notifier->parent)
309 		return 0;
310 
311 	/*
312 	 * Proceed with checking for the sub-device notifier's async
313 	 * sub-devices, and return the result. The error will be handled by the
314 	 * caller.
315 	 */
316 	subdev_notifier->parent = notifier;
317 
318 	return v4l2_async_notifier_try_all_subdevs(subdev_notifier);
319 }
320 
321 /* Test all async sub-devices in a notifier for a match. */
322 static int
v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier * notifier)323 v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier)
324 {
325 	struct v4l2_device *v4l2_dev =
326 		v4l2_async_notifier_find_v4l2_dev(notifier);
327 	struct v4l2_subdev *sd;
328 
329 	if (!v4l2_dev)
330 		return 0;
331 
332 again:
333 	list_for_each_entry(sd, &subdev_list, async_list) {
334 		struct v4l2_async_subdev *asd;
335 		int ret;
336 
337 		asd = v4l2_async_find_match(notifier, sd);
338 		if (!asd)
339 			continue;
340 
341 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
342 		if (ret < 0)
343 			return ret;
344 
345 		/*
346 		 * v4l2_async_match_notify() may lead to registering a
347 		 * new notifier and thus changing the async subdevs
348 		 * list. In order to proceed safely from here, restart
349 		 * parsing the list from the beginning.
350 		 */
351 		goto again;
352 	}
353 
354 	return 0;
355 }
356 
v4l2_async_cleanup(struct v4l2_subdev * sd)357 static void v4l2_async_cleanup(struct v4l2_subdev *sd)
358 {
359 	v4l2_device_unregister_subdev(sd);
360 	/*
361 	 * Subdevice driver will reprobe and put the subdev back
362 	 * onto the list
363 	 */
364 	list_del_init(&sd->async_list);
365 	sd->asd = NULL;
366 }
367 
368 /* Unbind all sub-devices in the notifier tree. */
369 static void
v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier * notifier)370 v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
371 {
372 	struct v4l2_subdev *sd, *tmp;
373 
374 	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
375 		struct v4l2_async_notifier *subdev_notifier =
376 			v4l2_async_find_subdev_notifier(sd);
377 
378 		if (subdev_notifier)
379 			v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
380 
381 		v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
382 		v4l2_async_cleanup(sd);
383 
384 		list_move(&sd->async_list, &subdev_list);
385 	}
386 
387 	notifier->parent = NULL;
388 }
389 
390 /* See if an async sub-device can be found in a notifier's lists. */
391 static bool
__v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd)392 __v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
393 				       struct v4l2_async_subdev *asd)
394 {
395 	struct v4l2_async_subdev *asd_y;
396 	struct v4l2_subdev *sd;
397 
398 	list_for_each_entry(asd_y, &notifier->waiting, list)
399 		if (asd_equal(asd, asd_y))
400 			return true;
401 
402 	list_for_each_entry(sd, &notifier->done, async_list) {
403 		if (WARN_ON(!sd->asd))
404 			continue;
405 
406 		if (asd_equal(asd, sd->asd))
407 			return true;
408 	}
409 
410 	return false;
411 }
412 
413 /*
414  * Find out whether an async sub-device was set up already or
415  * whether it exists in a given notifier before @this_index.
416  * If @this_index < 0, search the notifier's entire @asd_list.
417  */
418 static bool
v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd,int this_index)419 v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
420 				     struct v4l2_async_subdev *asd,
421 				     int this_index)
422 {
423 	struct v4l2_async_subdev *asd_y;
424 	int j = 0;
425 
426 	lockdep_assert_held(&list_lock);
427 
428 	/* Check that an asd is not being added more than once. */
429 	list_for_each_entry(asd_y, &notifier->asd_list, asd_list) {
430 		if (this_index >= 0 && j++ >= this_index)
431 			break;
432 		if (asd_equal(asd, asd_y))
433 			return true;
434 	}
435 
436 	/* Check that an asd does not exist in other notifiers. */
437 	list_for_each_entry(notifier, &notifier_list, list)
438 		if (__v4l2_async_notifier_has_async_subdev(notifier, asd))
439 			return true;
440 
441 	return false;
442 }
443 
v4l2_async_notifier_asd_valid(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd,int this_index)444 static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
445 					 struct v4l2_async_subdev *asd,
446 					 int this_index)
447 {
448 	struct device *dev =
449 		notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
450 
451 	if (!asd)
452 		return -EINVAL;
453 
454 	switch (asd->match_type) {
455 	case V4L2_ASYNC_MATCH_I2C:
456 	case V4L2_ASYNC_MATCH_FWNODE:
457 		if (v4l2_async_notifier_has_async_subdev(notifier, asd,
458 							 this_index)) {
459 			dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
460 			return -EEXIST;
461 		}
462 		break;
463 	default:
464 		dev_err(dev, "Invalid match type %u on %p\n",
465 			asd->match_type, asd);
466 		return -EINVAL;
467 	}
468 
469 	return 0;
470 }
471 
v4l2_async_notifier_init(struct v4l2_async_notifier * notifier)472 void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier)
473 {
474 	INIT_LIST_HEAD(&notifier->asd_list);
475 }
476 EXPORT_SYMBOL(v4l2_async_notifier_init);
477 
__v4l2_async_notifier_register(struct v4l2_async_notifier * notifier)478 static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
479 {
480 	struct v4l2_async_subdev *asd;
481 	int ret, i = 0;
482 
483 	INIT_LIST_HEAD(&notifier->waiting);
484 	INIT_LIST_HEAD(&notifier->done);
485 
486 	mutex_lock(&list_lock);
487 
488 	list_for_each_entry(asd, &notifier->asd_list, asd_list) {
489 		ret = v4l2_async_notifier_asd_valid(notifier, asd, i++);
490 		if (ret)
491 			goto err_unlock;
492 
493 		list_add_tail(&asd->list, &notifier->waiting);
494 	}
495 
496 	ret = v4l2_async_notifier_try_all_subdevs(notifier);
497 	if (ret < 0)
498 		goto err_unbind;
499 
500 	ret = v4l2_async_notifier_try_complete(notifier);
501 	if (ret < 0)
502 		goto err_unbind;
503 
504 	/* Keep also completed notifiers on the list */
505 	list_add(&notifier->list, &notifier_list);
506 
507 	mutex_unlock(&list_lock);
508 
509 	return 0;
510 
511 err_unbind:
512 	/*
513 	 * On failure, unbind all sub-devices registered through this notifier.
514 	 */
515 	v4l2_async_notifier_unbind_all_subdevs(notifier);
516 
517 err_unlock:
518 	mutex_unlock(&list_lock);
519 
520 	return ret;
521 }
522 
v4l2_async_notifier_register(struct v4l2_device * v4l2_dev,struct v4l2_async_notifier * notifier)523 int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
524 				 struct v4l2_async_notifier *notifier)
525 {
526 	int ret;
527 
528 	if (WARN_ON(!v4l2_dev || notifier->sd))
529 		return -EINVAL;
530 
531 	notifier->v4l2_dev = v4l2_dev;
532 
533 	ret = __v4l2_async_notifier_register(notifier);
534 	if (ret)
535 		notifier->v4l2_dev = NULL;
536 
537 	return ret;
538 }
539 EXPORT_SYMBOL(v4l2_async_notifier_register);
540 
v4l2_async_subdev_notifier_register(struct v4l2_subdev * sd,struct v4l2_async_notifier * notifier)541 int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
542 					struct v4l2_async_notifier *notifier)
543 {
544 	int ret;
545 
546 	if (WARN_ON(!sd || notifier->v4l2_dev))
547 		return -EINVAL;
548 
549 	notifier->sd = sd;
550 
551 	ret = __v4l2_async_notifier_register(notifier);
552 	if (ret)
553 		notifier->sd = NULL;
554 
555 	return ret;
556 }
557 EXPORT_SYMBOL(v4l2_async_subdev_notifier_register);
558 
559 static void
__v4l2_async_notifier_unregister(struct v4l2_async_notifier * notifier)560 __v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
561 {
562 	if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
563 		return;
564 
565 	v4l2_async_notifier_unbind_all_subdevs(notifier);
566 
567 	notifier->sd = NULL;
568 	notifier->v4l2_dev = NULL;
569 
570 	list_del(&notifier->list);
571 }
572 
v4l2_async_notifier_unregister(struct v4l2_async_notifier * notifier)573 void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
574 {
575 	mutex_lock(&list_lock);
576 
577 	__v4l2_async_notifier_unregister(notifier);
578 
579 	mutex_unlock(&list_lock);
580 }
581 EXPORT_SYMBOL(v4l2_async_notifier_unregister);
582 
__v4l2_async_notifier_cleanup(struct v4l2_async_notifier * notifier)583 static void __v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
584 {
585 	struct v4l2_async_subdev *asd, *tmp;
586 
587 	if (!notifier || !notifier->asd_list.next)
588 		return;
589 
590 	list_for_each_entry_safe(asd, tmp, &notifier->asd_list, asd_list) {
591 		switch (asd->match_type) {
592 		case V4L2_ASYNC_MATCH_FWNODE:
593 			fwnode_handle_put(asd->match.fwnode);
594 			break;
595 		default:
596 			break;
597 		}
598 
599 		list_del(&asd->asd_list);
600 		kfree(asd);
601 	}
602 }
603 
v4l2_async_notifier_cleanup(struct v4l2_async_notifier * notifier)604 void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
605 {
606 	mutex_lock(&list_lock);
607 
608 	__v4l2_async_notifier_cleanup(notifier);
609 
610 	mutex_unlock(&list_lock);
611 }
612 EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup);
613 
__v4l2_async_notifier_add_subdev(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd)614 int __v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier,
615 				   struct v4l2_async_subdev *asd)
616 {
617 	int ret;
618 
619 	mutex_lock(&list_lock);
620 
621 	ret = v4l2_async_notifier_asd_valid(notifier, asd, -1);
622 	if (ret)
623 		goto unlock;
624 
625 	list_add_tail(&asd->asd_list, &notifier->asd_list);
626 
627 unlock:
628 	mutex_unlock(&list_lock);
629 	return ret;
630 }
631 EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_subdev);
632 
633 struct v4l2_async_subdev *
__v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier * notifier,struct fwnode_handle * fwnode,unsigned int asd_struct_size)634 __v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
635 					struct fwnode_handle *fwnode,
636 					unsigned int asd_struct_size)
637 {
638 	struct v4l2_async_subdev *asd;
639 	int ret;
640 
641 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
642 	if (!asd)
643 		return ERR_PTR(-ENOMEM);
644 
645 	asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
646 	asd->match.fwnode = fwnode_handle_get(fwnode);
647 
648 	ret = __v4l2_async_notifier_add_subdev(notifier, asd);
649 	if (ret) {
650 		fwnode_handle_put(fwnode);
651 		kfree(asd);
652 		return ERR_PTR(ret);
653 	}
654 
655 	return asd;
656 }
657 EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_fwnode_subdev);
658 
659 struct v4l2_async_subdev *
__v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier * notif,struct fwnode_handle * endpoint,unsigned int asd_struct_size)660 __v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif,
661 					       struct fwnode_handle *endpoint,
662 					       unsigned int asd_struct_size)
663 {
664 	struct v4l2_async_subdev *asd;
665 	struct fwnode_handle *remote;
666 
667 	remote = fwnode_graph_get_remote_port_parent(endpoint);
668 	if (!remote)
669 		return ERR_PTR(-ENOTCONN);
670 
671 	asd = __v4l2_async_notifier_add_fwnode_subdev(notif, remote,
672 						      asd_struct_size);
673 	/*
674 	 * Calling __v4l2_async_notifier_add_fwnode_subdev grabs a refcount,
675 	 * so drop the one we got in fwnode_graph_get_remote_port_parent.
676 	 */
677 	fwnode_handle_put(remote);
678 	return asd;
679 }
680 EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_fwnode_remote_subdev);
681 
682 struct v4l2_async_subdev *
__v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier * notifier,int adapter_id,unsigned short address,unsigned int asd_struct_size)683 __v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
684 				     int adapter_id, unsigned short address,
685 				     unsigned int asd_struct_size)
686 {
687 	struct v4l2_async_subdev *asd;
688 	int ret;
689 
690 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
691 	if (!asd)
692 		return ERR_PTR(-ENOMEM);
693 
694 	asd->match_type = V4L2_ASYNC_MATCH_I2C;
695 	asd->match.i2c.adapter_id = adapter_id;
696 	asd->match.i2c.address = address;
697 
698 	ret = __v4l2_async_notifier_add_subdev(notifier, asd);
699 	if (ret) {
700 		kfree(asd);
701 		return ERR_PTR(ret);
702 	}
703 
704 	return asd;
705 }
706 EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_i2c_subdev);
707 
v4l2_async_register_subdev(struct v4l2_subdev * sd)708 int v4l2_async_register_subdev(struct v4l2_subdev *sd)
709 {
710 	struct v4l2_async_notifier *subdev_notifier;
711 	struct v4l2_async_notifier *notifier;
712 	int ret;
713 
714 	/*
715 	 * No reference taken. The reference is held by the device
716 	 * (struct v4l2_subdev.dev), and async sub-device does not
717 	 * exist independently of the device at any point of time.
718 	 */
719 	if (!sd->fwnode && sd->dev)
720 		sd->fwnode = dev_fwnode(sd->dev);
721 
722 	mutex_lock(&list_lock);
723 
724 	INIT_LIST_HEAD(&sd->async_list);
725 
726 	list_for_each_entry(notifier, &notifier_list, list) {
727 		struct v4l2_device *v4l2_dev =
728 			v4l2_async_notifier_find_v4l2_dev(notifier);
729 		struct v4l2_async_subdev *asd;
730 
731 		if (!v4l2_dev)
732 			continue;
733 
734 		asd = v4l2_async_find_match(notifier, sd);
735 		if (!asd)
736 			continue;
737 
738 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
739 		if (ret)
740 			goto err_unbind;
741 
742 		ret = v4l2_async_notifier_try_complete(notifier);
743 		if (ret)
744 			goto err_unbind;
745 
746 		goto out_unlock;
747 	}
748 
749 	/* None matched, wait for hot-plugging */
750 	list_add(&sd->async_list, &subdev_list);
751 
752 out_unlock:
753 	mutex_unlock(&list_lock);
754 
755 	return 0;
756 
757 err_unbind:
758 	/*
759 	 * Complete failed. Unbind the sub-devices bound through registering
760 	 * this async sub-device.
761 	 */
762 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
763 	if (subdev_notifier)
764 		v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
765 
766 	if (sd->asd)
767 		v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
768 	v4l2_async_cleanup(sd);
769 
770 	mutex_unlock(&list_lock);
771 
772 	return ret;
773 }
774 EXPORT_SYMBOL(v4l2_async_register_subdev);
775 
v4l2_async_unregister_subdev(struct v4l2_subdev * sd)776 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
777 {
778 	if (!sd->async_list.next)
779 		return;
780 
781 	mutex_lock(&list_lock);
782 
783 	__v4l2_async_notifier_unregister(sd->subdev_notifier);
784 	__v4l2_async_notifier_cleanup(sd->subdev_notifier);
785 	kfree(sd->subdev_notifier);
786 	sd->subdev_notifier = NULL;
787 
788 	if (sd->asd) {
789 		struct v4l2_async_notifier *notifier = sd->notifier;
790 
791 		list_add(&sd->asd->list, &notifier->waiting);
792 
793 		v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
794 	}
795 
796 	v4l2_async_cleanup(sd);
797 
798 	mutex_unlock(&list_lock);
799 }
800 EXPORT_SYMBOL(v4l2_async_unregister_subdev);
801 
print_waiting_subdev(struct seq_file * s,struct v4l2_async_subdev * asd)802 static void print_waiting_subdev(struct seq_file *s,
803 				 struct v4l2_async_subdev *asd)
804 {
805 	switch (asd->match_type) {
806 	case V4L2_ASYNC_MATCH_I2C:
807 		seq_printf(s, " [i2c] dev=%d-%04x\n", asd->match.i2c.adapter_id,
808 			   asd->match.i2c.address);
809 		break;
810 	case V4L2_ASYNC_MATCH_FWNODE: {
811 		struct fwnode_handle *devnode, *fwnode = asd->match.fwnode;
812 
813 		devnode = fwnode_graph_is_endpoint(fwnode) ?
814 			  fwnode_graph_get_port_parent(fwnode) :
815 			  fwnode_handle_get(fwnode);
816 
817 		seq_printf(s, " [fwnode] dev=%s, node=%pfw\n",
818 			   devnode->dev ? dev_name(devnode->dev) : "nil",
819 			   fwnode);
820 
821 		fwnode_handle_put(devnode);
822 		break;
823 	}
824 	}
825 }
826 
827 static const char *
v4l2_async_notifier_name(struct v4l2_async_notifier * notifier)828 v4l2_async_notifier_name(struct v4l2_async_notifier *notifier)
829 {
830 	if (notifier->v4l2_dev)
831 		return notifier->v4l2_dev->name;
832 	else if (notifier->sd)
833 		return notifier->sd->name;
834 	else
835 		return "nil";
836 }
837 
pending_subdevs_show(struct seq_file * s,void * data)838 static int pending_subdevs_show(struct seq_file *s, void *data)
839 {
840 	struct v4l2_async_notifier *notif;
841 	struct v4l2_async_subdev *asd;
842 
843 	mutex_lock(&list_lock);
844 
845 	list_for_each_entry(notif, &notifier_list, list) {
846 		seq_printf(s, "%s:\n", v4l2_async_notifier_name(notif));
847 		list_for_each_entry(asd, &notif->waiting, list)
848 			print_waiting_subdev(s, asd);
849 	}
850 
851 	mutex_unlock(&list_lock);
852 
853 	return 0;
854 }
855 DEFINE_SHOW_ATTRIBUTE(pending_subdevs);
856 
857 static struct dentry *v4l2_async_debugfs_dir;
858 
v4l2_async_init(void)859 static int __init v4l2_async_init(void)
860 {
861 	v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL);
862 	debugfs_create_file("pending_async_subdevices", 0444,
863 			    v4l2_async_debugfs_dir, NULL,
864 			    &pending_subdevs_fops);
865 
866 	return 0;
867 }
868 
v4l2_async_exit(void)869 static void __exit v4l2_async_exit(void)
870 {
871 	debugfs_remove_recursive(v4l2_async_debugfs_dir);
872 }
873 
874 subsys_initcall(v4l2_async_init);
875 module_exit(v4l2_async_exit);
876 
877 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
878 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
879 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
880 MODULE_LICENSE("GPL");
881