• Home
  • Raw
  • Download

Lines Matching refs:fiq

196 u64 fuse_get_unique(struct fuse_iqueue *fiq)  in fuse_get_unique()  argument
198 fiq->reqctr += FUSE_REQ_ID_STEP; in fuse_get_unique()
199 return fiq->reqctr; in fuse_get_unique()
211 static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq, bool sync) in fuse_dev_wake_and_unlock() argument
212 __releases(fiq->lock) in fuse_dev_wake_and_unlock()
215 wake_up_sync(&fiq->waitq); in fuse_dev_wake_and_unlock()
217 wake_up(&fiq->waitq); in fuse_dev_wake_and_unlock()
218 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); in fuse_dev_wake_and_unlock()
219 spin_unlock(&fiq->lock); in fuse_dev_wake_and_unlock()
229 static void queue_request_and_unlock(struct fuse_iqueue *fiq, in queue_request_and_unlock() argument
231 __releases(fiq->lock) in queue_request_and_unlock()
236 list_add_tail(&req->list, &fiq->pending); in queue_request_and_unlock()
237 fiq->ops->wake_pending_and_unlock(fiq, sync); in queue_request_and_unlock()
243 struct fuse_iqueue *fiq = &fc->iq; in fuse_queue_forget() local
253 spin_lock(&fiq->lock); in fuse_queue_forget()
254 if (fiq->connected) { in fuse_queue_forget()
255 fiq->forget_list_tail->next = forget; in fuse_queue_forget()
256 fiq->forget_list_tail = forget; in fuse_queue_forget()
257 fiq->ops->wake_forget_and_unlock(fiq, false); in fuse_queue_forget()
260 spin_unlock(&fiq->lock); in fuse_queue_forget()
266 struct fuse_iqueue *fiq = &fc->iq; in flush_bg_queue() local
275 spin_lock(&fiq->lock); in flush_bg_queue()
276 req->in.h.unique = fuse_get_unique(fiq); in flush_bg_queue()
277 queue_request_and_unlock(fiq, req, false); in flush_bg_queue()
293 struct fuse_iqueue *fiq = &fc->iq; in fuse_request_end() local
304 spin_lock(&fiq->lock); in fuse_request_end()
306 spin_unlock(&fiq->lock); in fuse_request_end()
349 struct fuse_iqueue *fiq = &req->fm->fc->iq; in queue_interrupt() local
351 spin_lock(&fiq->lock); in queue_interrupt()
354 spin_unlock(&fiq->lock); in queue_interrupt()
359 list_add_tail(&req->intr_entry, &fiq->interrupts); in queue_interrupt()
367 spin_unlock(&fiq->lock); in queue_interrupt()
370 fiq->ops->wake_interrupt_and_unlock(fiq, false); in queue_interrupt()
372 spin_unlock(&fiq->lock); in queue_interrupt()
380 struct fuse_iqueue *fiq = &fc->iq; in request_wait_answer() local
404 spin_lock(&fiq->lock); in request_wait_answer()
408 spin_unlock(&fiq->lock); in request_wait_answer()
413 spin_unlock(&fiq->lock); in request_wait_answer()
425 struct fuse_iqueue *fiq = &req->fm->fc->iq; in __fuse_request_send() local
428 spin_lock(&fiq->lock); in __fuse_request_send()
429 if (!fiq->connected) { in __fuse_request_send()
430 spin_unlock(&fiq->lock); in __fuse_request_send()
433 req->in.h.unique = fuse_get_unique(fiq); in __fuse_request_send()
437 queue_request_and_unlock(fiq, req, true); in __fuse_request_send()
599 struct fuse_iqueue *fiq = &fm->fc->iq; in fuse_simple_notify_reply() local
611 spin_lock(&fiq->lock); in fuse_simple_notify_reply()
612 if (fiq->connected) { in fuse_simple_notify_reply()
613 queue_request_and_unlock(fiq, req, false); in fuse_simple_notify_reply()
616 spin_unlock(&fiq->lock); in fuse_simple_notify_reply()
1046 static int forget_pending(struct fuse_iqueue *fiq) in forget_pending() argument
1048 return fiq->forget_list_head.next != NULL; in forget_pending()
1051 static int request_pending(struct fuse_iqueue *fiq) in request_pending() argument
1053 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) || in request_pending()
1054 forget_pending(fiq); in request_pending()
1065 static int fuse_read_interrupt(struct fuse_iqueue *fiq, in fuse_read_interrupt() argument
1068 __releases(fiq->lock) in fuse_read_interrupt()
1083 spin_unlock(&fiq->lock); in fuse_read_interrupt()
1095 struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, in fuse_dequeue_forget() argument
1099 struct fuse_forget_link *head = fiq->forget_list_head.next; in fuse_dequeue_forget()
1106 fiq->forget_list_head.next = *newhead; in fuse_dequeue_forget()
1108 if (fiq->forget_list_head.next == NULL) in fuse_dequeue_forget()
1109 fiq->forget_list_tail = &fiq->forget_list_head; in fuse_dequeue_forget()
1118 static int fuse_read_single_forget(struct fuse_iqueue *fiq, in fuse_read_single_forget() argument
1121 __releases(fiq->lock) in fuse_read_single_forget()
1124 struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL); in fuse_read_single_forget()
1131 .unique = fuse_get_unique(fiq), in fuse_read_single_forget()
1135 spin_unlock(&fiq->lock); in fuse_read_single_forget()
1151 static int fuse_read_batch_forget(struct fuse_iqueue *fiq, in fuse_read_batch_forget() argument
1153 __releases(fiq->lock) in fuse_read_batch_forget()
1162 .unique = fuse_get_unique(fiq), in fuse_read_batch_forget()
1167 spin_unlock(&fiq->lock); in fuse_read_batch_forget()
1172 head = fuse_dequeue_forget(fiq, max_forgets, &count); in fuse_read_batch_forget()
1173 spin_unlock(&fiq->lock); in fuse_read_batch_forget()
1200 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq, in fuse_read_forget() argument
1203 __releases(fiq->lock) in fuse_read_forget()
1205 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL) in fuse_read_forget()
1206 return fuse_read_single_forget(fiq, cs, nbytes); in fuse_read_forget()
1208 return fuse_read_batch_forget(fiq, cs, nbytes); in fuse_read_forget()
1225 struct fuse_iqueue *fiq = &fc->iq; in fuse_dev_do_read() local
1252 spin_lock(&fiq->lock); in fuse_dev_do_read()
1253 if (!fiq->connected || request_pending(fiq)) in fuse_dev_do_read()
1255 spin_unlock(&fiq->lock); in fuse_dev_do_read()
1259 err = wait_event_interruptible_exclusive(fiq->waitq, in fuse_dev_do_read()
1260 !fiq->connected || request_pending(fiq)); in fuse_dev_do_read()
1265 if (!fiq->connected) { in fuse_dev_do_read()
1270 if (!list_empty(&fiq->interrupts)) { in fuse_dev_do_read()
1271 req = list_entry(fiq->interrupts.next, struct fuse_req, in fuse_dev_do_read()
1273 return fuse_read_interrupt(fiq, cs, nbytes, req); in fuse_dev_do_read()
1276 if (forget_pending(fiq)) { in fuse_dev_do_read()
1277 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0) in fuse_dev_do_read()
1278 return fuse_read_forget(fc, fiq, cs, nbytes); in fuse_dev_do_read()
1280 if (fiq->forget_batch <= -8) in fuse_dev_do_read()
1281 fiq->forget_batch = 16; in fuse_dev_do_read()
1284 req = list_entry(fiq->pending.next, struct fuse_req, list); in fuse_dev_do_read()
1287 spin_unlock(&fiq->lock); in fuse_dev_do_read()
1354 spin_unlock(&fiq->lock); in fuse_dev_do_read()
2097 struct fuse_iqueue *fiq; in fuse_dev_poll() local
2103 fiq = &fud->fc->iq; in fuse_dev_poll()
2104 poll_wait(file, &fiq->waitq, wait); in fuse_dev_poll()
2106 spin_lock(&fiq->lock); in fuse_dev_poll()
2107 if (!fiq->connected) in fuse_dev_poll()
2109 else if (request_pending(fiq)) in fuse_dev_poll()
2111 spin_unlock(&fiq->lock); in fuse_dev_poll()
2164 struct fuse_iqueue *fiq = &fc->iq; in fuse_abort_conn() local
2206 spin_lock(&fiq->lock); in fuse_abort_conn()
2207 fiq->connected = 0; in fuse_abort_conn()
2208 list_for_each_entry(req, &fiq->pending, list) in fuse_abort_conn()
2210 list_splice_tail_init(&fiq->pending, &to_end); in fuse_abort_conn()
2211 while (forget_pending(fiq)) in fuse_abort_conn()
2212 kfree(fuse_dequeue_forget(fiq, 1, NULL)); in fuse_abort_conn()
2213 wake_up_all(&fiq->waitq); in fuse_abort_conn()
2214 spin_unlock(&fiq->lock); in fuse_abort_conn()
2215 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); in fuse_abort_conn()