• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * V4L2 asynchronous subdevice registration API
4  *
5  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
6  */
7 
8 #include <linux/device.h>
9 #include <linux/err.h>
10 #include <linux/i2c.h>
11 #include <linux/list.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 
20 #include <media/v4l2-async.h>
21 #include <media/v4l2-device.h>
22 #include <media/v4l2-fwnode.h>
23 #include <media/v4l2-subdev.h>
24 
v4l2_async_notifier_call_bound(struct v4l2_async_notifier * n,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)25 static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n, struct v4l2_subdev *subdev,
26                                           struct v4l2_async_subdev *asd)
27 {
28     if (!n->ops || !n->ops->bound) {
29         return 0;
30     }
31 
32     return n->ops->bound(n, subdev, asd);
33 }
34 
v4l2_async_notifier_call_unbind(struct v4l2_async_notifier * n,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)35 static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n, struct v4l2_subdev *subdev,
36                                             struct v4l2_async_subdev *asd)
37 {
38     if (!n->ops || !n->ops->unbind) {
39         return;
40     }
41 
42     n->ops->unbind(n, subdev, asd);
43 }
44 
v4l2_async_notifier_call_complete(struct v4l2_async_notifier * n)45 static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n)
46 {
47     if (!n->ops || !n->ops->complete) {
48         return 0;
49     }
50 
51     return n->ops->complete(n);
52 }
53 
match_i2c(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)54 static bool match_i2c(struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
55 {
56 #if IS_ENABLED(CONFIG_I2C)
57     struct i2c_client *client = i2c_verify_client(sd->dev);
58 
59     return client && asd->match.i2c.adapter_id == client->adapter->nr && asd->match.i2c.address == client->addr;
60 #else
61     return false;
62 #endif
63 }
64 
match_devname(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)65 static bool match_devname(struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
66 {
67     return !strcmp(asd->match.device_name, dev_name(sd->dev));
68 }
69 
match_fwnode(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)70 static bool match_fwnode(struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
71 {
72     struct fwnode_handle *other_fwnode;
73     struct fwnode_handle *dev_fwnode;
74     bool asd_fwnode_is_ep;
75     bool sd_fwnode_is_ep;
76     struct device *dev;
77 
78     /*
79      * Both the subdev and the async subdev can provide either an endpoint
80      * fwnode or a device fwnode. Start with the simple case of direct
81      * fwnode matching.
82      */
83     if (sd->fwnode == asd->match.fwnode) {
84         return true;
85     }
86 
87     /*
88      * Otherwise, check if the sd fwnode and the asd fwnode refer to an
89      * endpoint or a device. If they're of the same type, there's no match.
90      * Technically speaking this checks if the nodes refer to a connected
91      * endpoint, which is the simplest check that works for both OF and
92      * ACPI. This won't make a difference, as drivers should not try to
93      * match unconnected endpoints.
94      */
95     sd_fwnode_is_ep = fwnode_graph_is_endpoint(sd->fwnode);
96     asd_fwnode_is_ep = fwnode_graph_is_endpoint(asd->match.fwnode);
97     if (sd_fwnode_is_ep == asd_fwnode_is_ep) {
98         return false;
99     }
100 
101     /*
102      * The sd and asd fwnodes are of different types. Get the device fwnode
103      * parent of the endpoint fwnode, and compare it with the other fwnode.
104      */
105     if (sd_fwnode_is_ep) {
106         dev_fwnode = fwnode_graph_get_port_parent(sd->fwnode);
107         other_fwnode = asd->match.fwnode;
108     } else {
109         dev_fwnode = fwnode_graph_get_port_parent(asd->match.fwnode);
110         other_fwnode = sd->fwnode;
111     }
112 
113     fwnode_handle_put(dev_fwnode);
114 
115     if (dev_fwnode != other_fwnode) {
116         return false;
117     }
118 
119     /*
120      * We have a heterogeneous match. Retrieve the struct device of the side
121      * that matched on a device fwnode to print its driver name.
122      */
123     if (sd_fwnode_is_ep) {
124         dev = notifier->v4l2_dev ? notifier->v4l2_dev->dev : notifier->sd->dev;
125     } else {
126         dev = sd->dev;
127     }
128 
129     if (dev && dev->driver) {
130         if (sd_fwnode_is_ep) {
131             dev_warn(dev, "Driver %s uses device fwnode, incorrect match may occur\n", dev->driver->name);
132         }
133         dev_notice(dev, "Consider updating driver %s to match on endpoints\n", dev->driver->name);
134     }
135 
136     return true;
137 }
138 
match_custom(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)139 static bool match_custom(struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
140 {
141     if (!asd->match.custom.match) {
142         /* Match always */
143         return true;
144     }
145 
146     return asd->match.custom.match(sd->dev, asd);
147 }
148 
149 static LIST_HEAD(subdev_list);
150 static LIST_HEAD(notifier_list);
151 static DEFINE_MUTEX(list_lock);
152 
v4l2_async_find_match(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd)153 static struct v4l2_async_subdev *v4l2_async_find_match(struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd)
154 {
155     bool (*match)(struct v4l2_async_notifier * notifier, struct v4l2_subdev * sd, struct v4l2_async_subdev * asd);
156     struct v4l2_async_subdev *asd;
157 
158     list_for_each_entry(asd, &notifier->waiting, list)
159     {
160         /* bus_type has been verified valid before */
161         switch (asd->match_type) {
162             case V4L2_ASYNC_MATCH_CUSTOM:
163                 match = match_custom;
164                 break;
165             case V4L2_ASYNC_MATCH_DEVNAME:
166                 match = match_devname;
167                 break;
168             case V4L2_ASYNC_MATCH_I2C:
169                 match = match_i2c;
170                 break;
171             case V4L2_ASYNC_MATCH_FWNODE:
172                 match = match_fwnode;
173                 break;
174             default:
175                 /* Cannot happen, unless someone breaks us */
176                 WARN_ON(true);
177                 return NULL;
178         }
179 
180         /* match cannot be NULL here */
181         if (match(notifier, sd, asd)) {
182             return asd;
183         }
184     }
185 
186     return NULL;
187 }
188 
189 /* Compare two async sub-device descriptors for equivalence */
asd_equal(struct v4l2_async_subdev * asd_x,struct v4l2_async_subdev * asd_y)190 static bool asd_equal(struct v4l2_async_subdev *asd_x, struct v4l2_async_subdev *asd_y)
191 {
192     if (asd_x->match_type != asd_y->match_type) {
193         return false;
194     }
195 
196     switch (asd_x->match_type) {
197         case V4L2_ASYNC_MATCH_DEVNAME:
198             return strcmp(asd_x->match.device_name, asd_y->match.device_name) == 0;
199         case V4L2_ASYNC_MATCH_I2C:
200             return asd_x->match.i2c.adapter_id == asd_y->match.i2c.adapter_id &&
201                    asd_x->match.i2c.address == asd_y->match.i2c.address;
202         case V4L2_ASYNC_MATCH_FWNODE:
203             return asd_x->match.fwnode == asd_y->match.fwnode;
204         default:
205             break;
206     }
207 
208     return false;
209 }
210 
211 /* Find the sub-device notifier registered by a sub-device driver. */
v4l2_async_find_subdev_notifier(struct v4l2_subdev * sd)212 static struct v4l2_async_notifier *v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
213 {
214     struct v4l2_async_notifier *n;
215 
216     list_for_each_entry(n, &notifier_list, list) if (n->sd == sd) return n;
217 
218     return NULL;
219 }
220 
221 /* Get v4l2_device related to the notifier if one can be found. */
v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier * notifier)222 static struct v4l2_device *v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier *notifier)
223 {
224     while (notifier->parent) {
225         notifier = notifier->parent;
226     }
227 
228     return notifier->v4l2_dev;
229 }
230 
231 /*
232  * Return true if all child sub-device notifiers are complete, false otherwise.
233  */
v4l2_async_notifier_can_complete(struct v4l2_async_notifier * notifier)234 static bool v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier)
235 {
236     struct v4l2_subdev *sd;
237 
238     if (!list_empty(&notifier->waiting)) {
239         return false;
240     }
241 
242     list_for_each_entry(sd, &notifier->done, async_list)
243     {
244         struct v4l2_async_notifier *subdev_notifier = v4l2_async_find_subdev_notifier(sd);
245 
246         if (subdev_notifier && !v4l2_async_notifier_can_complete(subdev_notifier)) {
247             return false;
248         }
249     }
250 
251     return true;
252 }
253 
254 /*
255  * Complete the master notifier if possible. This is done when all async
256  * sub-devices have been bound; v4l2_device is also available then.
257  */
v4l2_async_notifier_try_complete(struct v4l2_async_notifier * notifier)258 static int v4l2_async_notifier_try_complete(struct v4l2_async_notifier *notifier)
259 {
260     /* Quick check whether there are still more sub-devices here. */
261     if (!list_empty(&notifier->waiting)) {
262         return 0;
263     }
264 
265     /* Check the entire notifier tree; find the root notifier first. */
266     while (notifier->parent) {
267         notifier = notifier->parent;
268     }
269 
270     /* This is root if it has v4l2_dev. */
271     if (!notifier->v4l2_dev) {
272         return 0;
273     }
274 
275     /* Is everything ready? */
276     if (!v4l2_async_notifier_can_complete(notifier)) {
277         return 0;
278     }
279 
280     return v4l2_async_notifier_call_complete(notifier);
281 }
282 
283 static int v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier);
284 
v4l2_async_match_notify(struct v4l2_async_notifier * notifier,struct v4l2_device * v4l2_dev,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)285 static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier, struct v4l2_device *v4l2_dev,
286                                    struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
287 {
288     struct v4l2_async_notifier *subdev_notifier;
289     int ret;
290 
291     ret = v4l2_device_register_subdev(v4l2_dev, sd);
292     if (ret < 0) {
293         return ret;
294     }
295 
296     ret = v4l2_async_notifier_call_bound(notifier, sd, asd);
297     if (ret < 0) {
298         v4l2_device_unregister_subdev(sd);
299         return ret;
300     }
301 
302     /* Remove from the waiting list */
303     list_del(&asd->list);
304     sd->asd = asd;
305     sd->notifier = notifier;
306 
307     /* Move from the global subdevice list to notifier's done */
308     list_move(&sd->async_list, &notifier->done);
309 
310     /*
311      * See if the sub-device has a notifier. If not, return here.
312      */
313     subdev_notifier = v4l2_async_find_subdev_notifier(sd);
314     if (!subdev_notifier || subdev_notifier->parent) {
315         return 0;
316     }
317 
318     /*
319      * Proceed with checking for the sub-device notifier's async
320      * sub-devices, and return the result. The error will be handled by the
321      * caller.
322      */
323     subdev_notifier->parent = notifier;
324 
325     return v4l2_async_notifier_try_all_subdevs(subdev_notifier);
326 }
327 
328 /* Test all async sub-devices in a notifier for a match. */
v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier * notifier)329 static int v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier)
330 {
331     struct v4l2_device *v4l2_dev = v4l2_async_notifier_find_v4l2_dev(notifier);
332     struct v4l2_subdev *sd;
333 
334     if (!v4l2_dev) {
335         return 0;
336     }
337 
338 again:
339     list_for_each_entry(sd, &subdev_list, async_list)
340     {
341         struct v4l2_async_subdev *asd;
342         int ret;
343 
344         asd = v4l2_async_find_match(notifier, sd);
345         if (!asd) {
346             continue;
347         }
348 
349         ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
350         if (ret < 0) {
351             return ret;
352         }
353 
354         /*
355          * v4l2_async_match_notify() may lead to registering a
356          * new notifier and thus changing the async subdevs
357          * list. In order to proceed safely from here, restart
358          * parsing the list from the beginning.
359          */
360         goto again;
361     }
362 
363     return 0;
364 }
365 
v4l2_async_cleanup(struct v4l2_subdev * sd)366 static void v4l2_async_cleanup(struct v4l2_subdev *sd)
367 {
368     v4l2_device_unregister_subdev(sd);
369     /*
370      * Subdevice driver will reprobe and put the subdev back
371      * onto the list
372      */
373     list_del_init(&sd->async_list);
374     sd->asd = NULL;
375 }
376 
377 /* Unbind all sub-devices in the notifier tree. */
v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier * notifier)378 static void v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
379 {
380     struct v4l2_subdev *sd, *tmp;
381 
382     list_for_each_entry_safe(sd, tmp, &notifier->done, async_list)
383     {
384         struct v4l2_async_notifier *subdev_notifier = v4l2_async_find_subdev_notifier(sd);
385 
386         if (subdev_notifier) {
387             v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
388         }
389 
390         v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
391         v4l2_async_cleanup(sd);
392 
393         list_move(&sd->async_list, &subdev_list);
394     }
395 
396     notifier->parent = NULL;
397 }
398 
399 /* See if an async sub-device can be found in a notifier's lists. */
v4l2_async_notifier_has_async_subdev_ext(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd)400 static bool v4l2_async_notifier_has_async_subdev_ext(struct v4l2_async_notifier *notifier,
401                                                      struct v4l2_async_subdev *asd)
402 {
403     struct v4l2_async_subdev *asd_y;
404     struct v4l2_subdev *sd;
405 
406     list_for_each_entry(asd_y, &notifier->waiting, list) if (asd_equal(asd, asd_y)) return true;
407 
408     list_for_each_entry(sd, &notifier->done, async_list)
409     {
410         if (WARN_ON(!sd->asd)) {
411             continue;
412         }
413 
414         if (asd_equal(asd, sd->asd)) {
415             return true;
416         }
417     }
418 
419     return false;
420 }
421 
422 /*
423  * Find out whether an async sub-device was set up already or
424  * whether it exists in a given notifier before @this_index.
425  * If @this_index < 0, search the notifier's entire @asd_list.
426  */
v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd,int this_index)427 static bool v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier, struct v4l2_async_subdev *asd,
428                                                  int this_index)
429 {
430     struct v4l2_async_subdev *asd_y;
431     int j = 0;
432 
433     lockdep_assert_held(&list_lock);
434 
435     /* Check that an asd is not being added more than once. */
436     list_for_each_entry(asd_y, &notifier->asd_list, asd_list)
437     {
438         if (this_index >= 0 && j++ >= this_index) {
439             break;
440         }
441         if (asd_equal(asd, asd_y)) {
442             return true;
443         }
444     }
445 
446     /* Check that an asd does not exist in other notifiers. */
447     list_for_each_entry(notifier, &notifier_list,
448                         list) if (v4l2_async_notifier_has_async_subdev_ext(notifier, asd)) return true;
449 
450     return false;
451 }
452 
v4l2_async_notifier_asd_valid(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd,int this_index)453 static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier, struct v4l2_async_subdev *asd,
454                                          int this_index)
455 {
456     struct device *dev = notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
457 
458     if (!asd) {
459         return -EINVAL;
460     }
461 
462     switch (asd->match_type) {
463         case V4L2_ASYNC_MATCH_CUSTOM:
464         case V4L2_ASYNC_MATCH_DEVNAME:
465         case V4L2_ASYNC_MATCH_I2C:
466         case V4L2_ASYNC_MATCH_FWNODE:
467             if (v4l2_async_notifier_has_async_subdev(notifier, asd, this_index)) {
468                 dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
469                 return -EEXIST;
470             }
471             break;
472         default:
473             dev_err(dev, "Invalid match type %u on %p\n", asd->match_type, asd);
474             return -EINVAL;
475     }
476 
477     return 0;
478 }
479 
v4l2_async_notifier_init(struct v4l2_async_notifier * notifier)480 void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier)
481 {
482     INIT_LIST_HEAD(&notifier->asd_list);
483 }
484 EXPORT_SYMBOL(v4l2_async_notifier_init);
485 
v4l2_async_notifier_register_ext(struct v4l2_async_notifier * notifier)486 static int v4l2_async_notifier_register_ext(struct v4l2_async_notifier *notifier)
487 {
488     struct v4l2_async_subdev *asd;
489     int ret, i = 0;
490 
491     INIT_LIST_HEAD(&notifier->waiting);
492     INIT_LIST_HEAD(&notifier->done);
493 
494     mutex_lock(&list_lock);
495 
496     list_for_each_entry(asd, &notifier->asd_list, asd_list)
497     {
498         ret = v4l2_async_notifier_asd_valid(notifier, asd, i++);
499         if (ret) {
500             goto err_unlock;
501         }
502 
503         list_add_tail(&asd->list, &notifier->waiting);
504     }
505 
506     ret = v4l2_async_notifier_try_all_subdevs(notifier);
507     if (ret < 0) {
508         goto err_unbind;
509     }
510 
511     ret = v4l2_async_notifier_try_complete(notifier);
512     if (ret < 0) {
513         goto err_unbind;
514     }
515 
516     /* Keep also completed notifiers on the list */
517     list_add(&notifier->list, &notifier_list);
518 
519     mutex_unlock(&list_lock);
520 
521     return 0;
522 
523 err_unbind:
524     /*
525      * On failure, unbind all sub-devices registered through this notifier.
526      */
527     v4l2_async_notifier_unbind_all_subdevs(notifier);
528 
529 err_unlock:
530     mutex_unlock(&list_lock);
531 
532     return ret;
533 }
534 
v4l2_async_notifier_register(struct v4l2_device * v4l2_dev,struct v4l2_async_notifier * notifier)535 int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev, struct v4l2_async_notifier *notifier)
536 {
537     int ret;
538 
539     if (WARN_ON(!v4l2_dev || notifier->sd)) {
540         return -EINVAL;
541     }
542 
543     notifier->v4l2_dev = v4l2_dev;
544 
545     ret = v4l2_async_notifier_register_ext(notifier);
546     if (ret) {
547         notifier->v4l2_dev = NULL;
548     }
549 
550     return ret;
551 }
552 EXPORT_SYMBOL(v4l2_async_notifier_register);
553 
554 #if IS_ENABLED(CONFIG_NO_GKI)
v4l2_async_notifier_clr_unready_dev_ext(struct v4l2_async_notifier * notifier)555 static int v4l2_async_notifier_clr_unready_dev_ext(struct v4l2_async_notifier *notifier)
556 {
557     struct v4l2_subdev *sd, *tmp;
558     int clr_num = 0;
559 
560     list_for_each_entry_safe(sd, tmp, &notifier->done, async_list)
561     {
562         struct v4l2_async_notifier *subdev_notifier = v4l2_async_find_subdev_notifier(sd);
563 
564         if (subdev_notifier) {
565             clr_num += v4l2_async_notifier_clr_unready_dev_ext(subdev_notifier);
566         }
567     }
568 
569     list_for_each_entry_safe(sd, tmp, &notifier->waiting, async_list)
570     {
571         list_del_init(&sd->async_list);
572         sd->asd = NULL;
573         sd->dev = NULL;
574         clr_num++;
575     }
576 
577     return clr_num;
578 }
579 
v4l2_async_notifier_clr_unready_dev(struct v4l2_async_notifier * notifier)580 int v4l2_async_notifier_clr_unready_dev(struct v4l2_async_notifier *notifier)
581 {
582     int ret = 0;
583     int clr_num = 0;
584 
585     mutex_lock(&list_lock);
586 
587     while (notifier->parent) {
588         notifier = notifier->parent;
589     }
590 
591     if (!notifier->v4l2_dev) {
592         goto out;
593     }
594 
595     clr_num = v4l2_async_notifier_clr_unready_dev_ext(notifier);
596     dev_info(notifier->v4l2_dev->dev, "clear unready subdev num: %d\n", clr_num);
597 
598     if (clr_num > 0) {
599         ret = v4l2_async_notifier_try_complete(notifier);
600     }
601 
602 out:
603     mutex_unlock(&list_lock);
604 
605     return ret;
606 }
607 EXPORT_SYMBOL(v4l2_async_notifier_clr_unready_dev);
608 #endif
609 
v4l2_async_subdev_notifier_register(struct v4l2_subdev * sd,struct v4l2_async_notifier * notifier)610 int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd, struct v4l2_async_notifier *notifier)
611 {
612     int ret;
613 
614     if (WARN_ON(!sd || notifier->v4l2_dev)) {
615         return -EINVAL;
616     }
617 
618     notifier->sd = sd;
619 
620     ret = v4l2_async_notifier_register_ext(notifier);
621     if (ret) {
622         notifier->sd = NULL;
623     }
624 
625     return ret;
626 }
627 EXPORT_SYMBOL(v4l2_async_subdev_notifier_register);
628 
v4l2_async_notifier_unregister_ext(struct v4l2_async_notifier * notifier)629 static void v4l2_async_notifier_unregister_ext(struct v4l2_async_notifier *notifier)
630 {
631     if (!notifier || (!notifier->v4l2_dev && !notifier->sd)) {
632         return;
633     }
634 
635     v4l2_async_notifier_unbind_all_subdevs(notifier);
636 
637     notifier->sd = NULL;
638     notifier->v4l2_dev = NULL;
639 
640     list_del(&notifier->list);
641 }
642 
v4l2_async_notifier_unregister(struct v4l2_async_notifier * notifier)643 void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
644 {
645     mutex_lock(&list_lock);
646 
647     v4l2_async_notifier_unregister_ext(notifier);
648 
649     mutex_unlock(&list_lock);
650 }
651 EXPORT_SYMBOL(v4l2_async_notifier_unregister);
652 
v4l2_async_notifier_cleanup_ext(struct v4l2_async_notifier * notifier)653 static void v4l2_async_notifier_cleanup_ext(struct v4l2_async_notifier *notifier)
654 {
655     struct v4l2_async_subdev *asd, *tmp;
656 
657     if (!notifier || !notifier->asd_list.next) {
658         return;
659     }
660 
661     list_for_each_entry_safe(asd, tmp, &notifier->asd_list, asd_list)
662     {
663         switch (asd->match_type) {
664             case V4L2_ASYNC_MATCH_FWNODE:
665                 fwnode_handle_put(asd->match.fwnode);
666                 break;
667             default:
668                 break;
669         }
670 
671         list_del(&asd->asd_list);
672         kfree(asd);
673     }
674 }
675 
v4l2_async_notifier_cleanup(struct v4l2_async_notifier * notifier)676 void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
677 {
678     mutex_lock(&list_lock);
679 
680     v4l2_async_notifier_cleanup_ext(notifier);
681 
682     mutex_unlock(&list_lock);
683 }
684 EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup);
685 
v4l2_async_notifier_add_subdev(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd)686 int v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier, struct v4l2_async_subdev *asd)
687 {
688     int ret;
689 
690     mutex_lock(&list_lock);
691 
692     ret = v4l2_async_notifier_asd_valid(notifier, asd, -1);
693     if (ret) {
694         goto unlock;
695     }
696 
697     list_add_tail(&asd->asd_list, &notifier->asd_list);
698 
699 unlock:
700     mutex_unlock(&list_lock);
701     return ret;
702 }
703 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_subdev);
704 
v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier * notifier,struct fwnode_handle * fwnode,unsigned int asd_struct_size)705 struct v4l2_async_subdev *v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
706                                                                 struct fwnode_handle *fwnode,
707                                                                 unsigned int asd_struct_size)
708 {
709     struct v4l2_async_subdev *asd;
710     int ret;
711 
712     asd = kzalloc(asd_struct_size, GFP_KERNEL);
713     if (!asd) {
714         return ERR_PTR(-ENOMEM);
715     }
716 
717     asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
718     asd->match.fwnode = fwnode_handle_get(fwnode);
719 
720     ret = v4l2_async_notifier_add_subdev(notifier, asd);
721     if (ret) {
722         fwnode_handle_put(fwnode);
723         kfree(asd);
724         return ERR_PTR(ret);
725     }
726 
727     return asd;
728 }
729 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_subdev);
730 
v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier * notif,struct fwnode_handle * endpoint,unsigned int asd_struct_size)731 struct v4l2_async_subdev *v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif,
732                                                                        struct fwnode_handle *endpoint,
733                                                                        unsigned int asd_struct_size)
734 {
735     struct v4l2_async_subdev *asd;
736     struct fwnode_handle *remote;
737 
738     remote = fwnode_graph_get_remote_port_parent(endpoint);
739     if (!remote) {
740         return ERR_PTR(-ENOTCONN);
741     }
742 
743     asd = v4l2_async_notifier_add_fwnode_subdev(notif, remote, asd_struct_size);
744     /*
745      * Calling v4l2_async_notifier_add_fwnode_subdev grabs a refcount,
746      * so drop the one we got in fwnode_graph_get_remote_port_parent.
747      */
748     fwnode_handle_put(remote);
749     return asd;
750 }
751 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_remote_subdev);
752 
v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier * notifier,int adapter_id,unsigned short address,unsigned int asd_struct_size)753 struct v4l2_async_subdev *v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier, int adapter_id,
754                                                              unsigned short address, unsigned int asd_struct_size)
755 {
756     struct v4l2_async_subdev *asd;
757     int ret;
758 
759     asd = kzalloc(asd_struct_size, GFP_KERNEL);
760     if (!asd) {
761         return ERR_PTR(-ENOMEM);
762     }
763 
764     asd->match_type = V4L2_ASYNC_MATCH_I2C;
765     asd->match.i2c.adapter_id = adapter_id;
766     asd->match.i2c.address = address;
767 
768     ret = v4l2_async_notifier_add_subdev(notifier, asd);
769     if (ret) {
770         kfree(asd);
771         return ERR_PTR(ret);
772     }
773 
774     return asd;
775 }
776 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_i2c_subdev);
777 
v4l2_async_notifier_add_devname_subdev(struct v4l2_async_notifier * notifier,const char * device_name,unsigned int asd_struct_size)778 struct v4l2_async_subdev *v4l2_async_notifier_add_devname_subdev(struct v4l2_async_notifier *notifier,
779                                                                  const char *device_name, unsigned int asd_struct_size)
780 {
781     struct v4l2_async_subdev *asd;
782     int ret;
783 
784     asd = kzalloc(asd_struct_size, GFP_KERNEL);
785     if (!asd) {
786         return ERR_PTR(-ENOMEM);
787     }
788 
789     asd->match_type = V4L2_ASYNC_MATCH_DEVNAME;
790     asd->match.device_name = device_name;
791 
792     ret = v4l2_async_notifier_add_subdev(notifier, asd);
793     if (ret) {
794         kfree(asd);
795         return ERR_PTR(ret);
796     }
797 
798     return asd;
799 }
800 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_devname_subdev);
801 
v4l2_async_register_subdev(struct v4l2_subdev * sd)802 int v4l2_async_register_subdev(struct v4l2_subdev *sd)
803 {
804     struct v4l2_async_notifier *subdev_notifier;
805     struct v4l2_async_notifier *notifier;
806     int ret;
807 
808     /*
809      * No reference taken. The reference is held by the device
810      * (struct v4l2_subdev.dev), and async sub-device does not
811      * exist independently of the device at any point of time.
812      */
813     if (!sd->fwnode && sd->dev) {
814         sd->fwnode = dev_fwnode(sd->dev);
815     }
816 
817     mutex_lock(&list_lock);
818 
819     INIT_LIST_HEAD(&sd->async_list);
820 
821     list_for_each_entry(notifier, &notifier_list, list)
822     {
823         struct v4l2_device *v4l2_dev = v4l2_async_notifier_find_v4l2_dev(notifier);
824         struct v4l2_async_subdev *asd;
825 
826         if (!v4l2_dev) {
827             continue;
828         }
829 
830         asd = v4l2_async_find_match(notifier, sd);
831         if (!asd) {
832             continue;
833         }
834 
835         ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
836         if (ret) {
837             goto err_unbind;
838         }
839 
840         ret = v4l2_async_notifier_try_complete(notifier);
841         if (ret) {
842             goto err_unbind;
843         }
844 
845         goto out_unlock;
846     }
847 
848     /* None matched, wait for hot-plugging */
849     list_add(&sd->async_list, &subdev_list);
850 
851 out_unlock:
852     mutex_unlock(&list_lock);
853 
854     return 0;
855 
856 err_unbind:
857     /*
858      * Complete failed. Unbind the sub-devices bound through registering
859      * this async sub-device.
860      */
861     subdev_notifier = v4l2_async_find_subdev_notifier(sd);
862     if (subdev_notifier) {
863         v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
864     }
865 
866     if (sd->asd) {
867         v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
868     }
869     v4l2_async_cleanup(sd);
870 
871     mutex_unlock(&list_lock);
872 
873     return ret;
874 }
875 EXPORT_SYMBOL(v4l2_async_register_subdev);
876 
v4l2_async_unregister_subdev(struct v4l2_subdev * sd)877 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
878 {
879     mutex_lock(&list_lock);
880 
881     v4l2_async_notifier_unregister_ext(sd->subdev_notifier);
882     v4l2_async_notifier_cleanup_ext(sd->subdev_notifier);
883     kfree(sd->subdev_notifier);
884     sd->subdev_notifier = NULL;
885 
886     if (sd->asd) {
887         struct v4l2_async_notifier *notifier = sd->notifier;
888 
889         list_add(&sd->asd->list, &notifier->waiting);
890 
891         v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
892     }
893 
894     v4l2_async_cleanup(sd);
895 
896     mutex_unlock(&list_lock);
897 }
898 EXPORT_SYMBOL(v4l2_async_unregister_subdev);
899