• Home
  • Raw
  • Download

Lines Matching refs:fh

39 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)  in __v4l2_event_dequeue()  argument
44 spin_lock_irqsave(&fh->vdev->fh_lock, flags); in __v4l2_event_dequeue()
46 if (list_empty(&fh->available)) { in __v4l2_event_dequeue()
47 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); in __v4l2_event_dequeue()
51 WARN_ON(fh->navailable == 0); in __v4l2_event_dequeue()
53 kev = list_first_entry(&fh->available, struct v4l2_kevent, list); in __v4l2_event_dequeue()
55 fh->navailable--; in __v4l2_event_dequeue()
57 kev->event.pending = fh->navailable; in __v4l2_event_dequeue()
62 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); in __v4l2_event_dequeue()
67 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, in v4l2_event_dequeue() argument
73 return __v4l2_event_dequeue(fh, event); in v4l2_event_dequeue()
76 if (fh->vdev->lock) in v4l2_event_dequeue()
77 mutex_unlock(fh->vdev->lock); in v4l2_event_dequeue()
80 ret = wait_event_interruptible(fh->wait, in v4l2_event_dequeue()
81 fh->navailable != 0); in v4l2_event_dequeue()
85 ret = __v4l2_event_dequeue(fh, event); in v4l2_event_dequeue()
88 if (fh->vdev->lock) in v4l2_event_dequeue()
89 mutex_lock(fh->vdev->lock); in v4l2_event_dequeue()
97 struct v4l2_fh *fh, u32 type, u32 id) in v4l2_event_subscribed() argument
101 assert_spin_locked(&fh->vdev->fh_lock); in v4l2_event_subscribed()
103 list_for_each_entry(sev, &fh->subscribed, list) in v4l2_event_subscribed()
110 static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev, in __v4l2_event_queue_fh() argument
118 sev = v4l2_event_subscribed(fh, ev->type, ev->id); in __v4l2_event_queue_fh()
131 fh->sequence++; in __v4l2_event_queue_fh()
140 fh->navailable--; in __v4l2_event_queue_fh()
160 kev->event.sequence = fh->sequence; in __v4l2_event_queue_fh()
162 list_add_tail(&kev->list, &fh->available); in __v4l2_event_queue_fh()
164 fh->navailable++; in __v4l2_event_queue_fh()
166 wake_up_all(&fh->wait); in __v4l2_event_queue_fh()
171 struct v4l2_fh *fh; in v4l2_event_queue() local
179 list_for_each_entry(fh, &vdev->fh_list, list) in v4l2_event_queue()
180 __v4l2_event_queue_fh(fh, ev, &timestamp); in v4l2_event_queue()
186 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev) in v4l2_event_queue_fh() argument
193 spin_lock_irqsave(&fh->vdev->fh_lock, flags); in v4l2_event_queue_fh()
194 __v4l2_event_queue_fh(fh, ev, &timestamp); in v4l2_event_queue_fh()
195 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); in v4l2_event_queue_fh()
199 int v4l2_event_pending(struct v4l2_fh *fh) in v4l2_event_pending() argument
201 return fh->navailable; in v4l2_event_pending()
205 int v4l2_event_subscribe(struct v4l2_fh *fh, in v4l2_event_subscribe() argument
227 sev->fh = fh; in v4l2_event_subscribe()
230 spin_lock_irqsave(&fh->vdev->fh_lock, flags); in v4l2_event_subscribe()
231 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); in v4l2_event_subscribe()
233 list_add(&sev->list, &fh->subscribed); in v4l2_event_subscribe()
234 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); in v4l2_event_subscribe()
245 v4l2_event_unsubscribe(fh, sub); in v4l2_event_subscribe()
257 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh) in v4l2_event_unsubscribe_all() argument
266 spin_lock_irqsave(&fh->vdev->fh_lock, flags); in v4l2_event_unsubscribe_all()
267 if (!list_empty(&fh->subscribed)) { in v4l2_event_unsubscribe_all()
268 sev = list_first_entry(&fh->subscribed, in v4l2_event_unsubscribe_all()
273 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); in v4l2_event_unsubscribe_all()
275 v4l2_event_unsubscribe(fh, &sub); in v4l2_event_unsubscribe_all()
280 int v4l2_event_unsubscribe(struct v4l2_fh *fh, in v4l2_event_unsubscribe() argument
288 v4l2_event_unsubscribe_all(fh); in v4l2_event_unsubscribe()
292 spin_lock_irqsave(&fh->vdev->fh_lock, flags); in v4l2_event_unsubscribe()
294 sev = v4l2_event_subscribed(fh, sub->type, sub->id); in v4l2_event_unsubscribe()
299 fh->navailable--; in v4l2_event_unsubscribe()
304 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); in v4l2_event_unsubscribe()
315 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh, in v4l2_event_subdev_unsubscribe() argument
318 return v4l2_event_unsubscribe(fh, sub); in v4l2_event_subdev_unsubscribe()