1 /*
2 * v4l2-event.c
3 *
4 * V4L2 events.
5 *
6 * Copyright (C) 2009--2010 Nokia Corporation.
7 *
8 * Contact: Sakari Ailus <sakari.ailus@iki.fi>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * 02110-1301 USA
23 */
24
25 #include <media/v4l2-dev.h>
26 #include <media/v4l2-fh.h>
27 #include <media/v4l2-event.h>
28
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/export.h>
32
sev_pos(const struct v4l2_subscribed_event * sev,unsigned idx)33 static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
34 {
35 idx += sev->first;
36 return idx >= sev->elems ? idx - sev->elems : idx;
37 }
38
__v4l2_event_dequeue(struct v4l2_fh * fh,struct v4l2_event * event)39 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
40 {
41 struct v4l2_kevent *kev;
42 unsigned long flags;
43
44 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
45
46 if (list_empty(&fh->available)) {
47 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
48 return -ENOENT;
49 }
50
51 WARN_ON(fh->navailable == 0);
52
53 kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
54 list_del(&kev->list);
55 fh->navailable--;
56
57 kev->event.pending = fh->navailable;
58 *event = kev->event;
59 kev->sev->first = sev_pos(kev->sev, 1);
60 kev->sev->in_use--;
61
62 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
63
64 return 0;
65 }
66
v4l2_event_dequeue(struct v4l2_fh * fh,struct v4l2_event * event,int nonblocking)67 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
68 int nonblocking)
69 {
70 int ret;
71
72 if (nonblocking)
73 return __v4l2_event_dequeue(fh, event);
74
75 /* Release the vdev lock while waiting */
76 if (fh->vdev->lock)
77 mutex_unlock(fh->vdev->lock);
78
79 do {
80 ret = wait_event_interruptible(fh->wait,
81 fh->navailable != 0);
82 if (ret < 0)
83 break;
84
85 ret = __v4l2_event_dequeue(fh, event);
86 } while (ret == -ENOENT);
87
88 if (fh->vdev->lock)
89 mutex_lock(fh->vdev->lock);
90
91 return ret;
92 }
93 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
94
95 /* Caller must hold fh->vdev->fh_lock! */
v4l2_event_subscribed(struct v4l2_fh * fh,u32 type,u32 id)96 static struct v4l2_subscribed_event *v4l2_event_subscribed(
97 struct v4l2_fh *fh, u32 type, u32 id)
98 {
99 struct v4l2_subscribed_event *sev;
100
101 assert_spin_locked(&fh->vdev->fh_lock);
102
103 list_for_each_entry(sev, &fh->subscribed, list)
104 if (sev->type == type && sev->id == id)
105 return sev;
106
107 return NULL;
108 }
109
__v4l2_event_queue_fh(struct v4l2_fh * fh,const struct v4l2_event * ev,const struct timespec * ts)110 static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
111 const struct timespec *ts)
112 {
113 struct v4l2_subscribed_event *sev;
114 struct v4l2_kevent *kev;
115 bool copy_payload = true;
116
117 /* Are we subscribed? */
118 sev = v4l2_event_subscribed(fh, ev->type, ev->id);
119 if (sev == NULL)
120 return;
121
122 /* Increase event sequence number on fh. */
123 fh->sequence++;
124
125 /* Do we have any free events? */
126 if (sev->in_use == sev->elems) {
127 /* no, remove the oldest one */
128 kev = sev->events + sev_pos(sev, 0);
129 list_del(&kev->list);
130 sev->in_use--;
131 sev->first = sev_pos(sev, 1);
132 fh->navailable--;
133 if (sev->elems == 1) {
134 if (sev->ops && sev->ops->replace) {
135 sev->ops->replace(&kev->event, ev);
136 copy_payload = false;
137 }
138 } else if (sev->ops && sev->ops->merge) {
139 struct v4l2_kevent *second_oldest =
140 sev->events + sev_pos(sev, 0);
141 sev->ops->merge(&kev->event, &second_oldest->event);
142 }
143 }
144
145 /* Take one and fill it. */
146 kev = sev->events + sev_pos(sev, sev->in_use);
147 kev->event.type = ev->type;
148 if (copy_payload)
149 kev->event.u = ev->u;
150 kev->event.id = ev->id;
151 kev->event.timestamp = *ts;
152 kev->event.sequence = fh->sequence;
153 sev->in_use++;
154 list_add_tail(&kev->list, &fh->available);
155
156 fh->navailable++;
157
158 wake_up_all(&fh->wait);
159 }
160
v4l2_event_queue(struct video_device * vdev,const struct v4l2_event * ev)161 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
162 {
163 struct v4l2_fh *fh;
164 unsigned long flags;
165 struct timespec timestamp;
166
167 if (vdev == NULL)
168 return;
169
170 ktime_get_ts(×tamp);
171
172 spin_lock_irqsave(&vdev->fh_lock, flags);
173
174 list_for_each_entry(fh, &vdev->fh_list, list)
175 __v4l2_event_queue_fh(fh, ev, ×tamp);
176
177 spin_unlock_irqrestore(&vdev->fh_lock, flags);
178 }
179 EXPORT_SYMBOL_GPL(v4l2_event_queue);
180
v4l2_event_queue_fh(struct v4l2_fh * fh,const struct v4l2_event * ev)181 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
182 {
183 unsigned long flags;
184 struct timespec timestamp;
185
186 ktime_get_ts(×tamp);
187
188 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
189 __v4l2_event_queue_fh(fh, ev, ×tamp);
190 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
191 }
192 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
193
v4l2_event_pending(struct v4l2_fh * fh)194 int v4l2_event_pending(struct v4l2_fh *fh)
195 {
196 return fh->navailable;
197 }
198 EXPORT_SYMBOL_GPL(v4l2_event_pending);
199
__v4l2_event_unsubscribe(struct v4l2_subscribed_event * sev)200 static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
201 {
202 struct v4l2_fh *fh = sev->fh;
203 unsigned int i;
204
205 lockdep_assert_held(&fh->subscribe_lock);
206 assert_spin_locked(&fh->vdev->fh_lock);
207
208 /* Remove any pending events for this subscription */
209 for (i = 0; i < sev->in_use; i++) {
210 list_del(&sev->events[sev_pos(sev, i)].list);
211 fh->navailable--;
212 }
213 list_del(&sev->list);
214 }
215
v4l2_event_subscribe(struct v4l2_fh * fh,const struct v4l2_event_subscription * sub,unsigned elems,const struct v4l2_subscribed_event_ops * ops)216 int v4l2_event_subscribe(struct v4l2_fh *fh,
217 const struct v4l2_event_subscription *sub, unsigned elems,
218 const struct v4l2_subscribed_event_ops *ops)
219 {
220 struct v4l2_subscribed_event *sev, *found_ev;
221 unsigned long flags;
222 unsigned i;
223 int ret = 0;
224
225 if (sub->type == V4L2_EVENT_ALL)
226 return -EINVAL;
227
228 if (elems < 1)
229 elems = 1;
230
231 sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
232 if (!sev)
233 return -ENOMEM;
234 for (i = 0; i < elems; i++)
235 sev->events[i].sev = sev;
236 sev->type = sub->type;
237 sev->id = sub->id;
238 sev->flags = sub->flags;
239 sev->fh = fh;
240 sev->ops = ops;
241 sev->elems = elems;
242
243 mutex_lock(&fh->subscribe_lock);
244
245 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
246 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
247 if (!found_ev)
248 list_add(&sev->list, &fh->subscribed);
249 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
250
251 if (found_ev) {
252 /* Already listening */
253 kfree(sev);
254 } else if (sev->ops && sev->ops->add) {
255 ret = sev->ops->add(sev, elems);
256 if (ret) {
257 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
258 __v4l2_event_unsubscribe(sev);
259 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
260 kfree(sev);
261 }
262 }
263
264 mutex_unlock(&fh->subscribe_lock);
265
266 return ret;
267 }
268 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
269
v4l2_event_unsubscribe_all(struct v4l2_fh * fh)270 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
271 {
272 struct v4l2_event_subscription sub;
273 struct v4l2_subscribed_event *sev;
274 unsigned long flags;
275
276 do {
277 sev = NULL;
278
279 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
280 if (!list_empty(&fh->subscribed)) {
281 sev = list_first_entry(&fh->subscribed,
282 struct v4l2_subscribed_event, list);
283 sub.type = sev->type;
284 sub.id = sev->id;
285 }
286 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
287 if (sev)
288 v4l2_event_unsubscribe(fh, &sub);
289 } while (sev);
290 }
291 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
292
v4l2_event_unsubscribe(struct v4l2_fh * fh,const struct v4l2_event_subscription * sub)293 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
294 const struct v4l2_event_subscription *sub)
295 {
296 struct v4l2_subscribed_event *sev;
297 unsigned long flags;
298
299 if (sub->type == V4L2_EVENT_ALL) {
300 v4l2_event_unsubscribe_all(fh);
301 return 0;
302 }
303
304 mutex_lock(&fh->subscribe_lock);
305
306 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
307
308 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
309 if (sev != NULL)
310 __v4l2_event_unsubscribe(sev);
311
312 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
313
314 if (sev && sev->ops && sev->ops->del)
315 sev->ops->del(sev);
316
317 kfree(sev);
318 mutex_unlock(&fh->subscribe_lock);
319
320 return 0;
321 }
322 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
323
v4l2_event_subdev_unsubscribe(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)324 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
325 struct v4l2_event_subscription *sub)
326 {
327 return v4l2_event_unsubscribe(fh, sub);
328 }
329 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
330
v4l2_event_src_replace(struct v4l2_event * old,const struct v4l2_event * new)331 static void v4l2_event_src_replace(struct v4l2_event *old,
332 const struct v4l2_event *new)
333 {
334 u32 old_changes = old->u.src_change.changes;
335
336 old->u.src_change = new->u.src_change;
337 old->u.src_change.changes |= old_changes;
338 }
339
v4l2_event_src_merge(const struct v4l2_event * old,struct v4l2_event * new)340 static void v4l2_event_src_merge(const struct v4l2_event *old,
341 struct v4l2_event *new)
342 {
343 new->u.src_change.changes |= old->u.src_change.changes;
344 }
345
346 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
347 .replace = v4l2_event_src_replace,
348 .merge = v4l2_event_src_merge,
349 };
350
v4l2_src_change_event_subscribe(struct v4l2_fh * fh,const struct v4l2_event_subscription * sub)351 int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
352 const struct v4l2_event_subscription *sub)
353 {
354 if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
355 return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
356 return -EINVAL;
357 }
358 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
359
v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)360 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
361 struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
362 {
363 return v4l2_src_change_event_subscribe(fh, sub);
364 }
365 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);
366