Lines Matching +full:fiq +full:- +full:device
1 // SPDX-License-Identifier: GPL-2.0
3 * virtio-fs: Virtio Filesystem
27 /* List of virtio-fs device instances and a lock for the list. Also provides
28 * mutual exclusion in device removal and mounting path
40 /* Per-virtqueue state */
43 struct virtqueue *vq; /* protected by ->lock */
55 /* A virtio-fs device instance */
104 struct fuse_fs_context *ctx = fc->fs_private; in virtio_fs_parse_param()
113 ctx->dax = 1; in virtio_fs_parse_param()
116 return -EINVAL; in virtio_fs_parse_param()
124 struct fuse_fs_context *ctx = fc->fs_private; in virtio_fs_free_fc()
131 struct virtio_fs *fs = vq->vdev->priv; in vq_to_fsvq()
133 return &fs->vqs[vq->index]; in vq_to_fsvq()
138 return &vq_to_fsvq(vq)->fud->pq; in vq_to_fpq()
141 /* Should be called with fsvq->lock held. */
144 fsvq->in_flight++; in inc_in_flight_req()
147 /* Should be called with fsvq->lock held. */
150 WARN_ON(fsvq->in_flight <= 0); in dec_in_flight_req()
151 fsvq->in_flight--; in dec_in_flight_req()
152 if (!fsvq->in_flight) in dec_in_flight_req()
153 complete(&fsvq->in_flight_zero); in dec_in_flight_req()
160 kfree(vfs->vqs); in release_virtio_fs_obj()
167 kref_put(&fs->refcount, release_virtio_fs_obj); in virtio_fs_put()
170 static void virtio_fs_fiq_release(struct fuse_iqueue *fiq) in virtio_fs_fiq_release() argument
172 struct virtio_fs *vfs = fiq->priv; in virtio_fs_fiq_release()
181 WARN_ON(fsvq->in_flight < 0); in virtio_fs_drain_queue()
184 spin_lock(&fsvq->lock); in virtio_fs_drain_queue()
185 if (fsvq->in_flight) { in virtio_fs_drain_queue()
189 reinit_completion(&fsvq->in_flight_zero); in virtio_fs_drain_queue()
190 spin_unlock(&fsvq->lock); in virtio_fs_drain_queue()
191 wait_for_completion(&fsvq->in_flight_zero); in virtio_fs_drain_queue()
193 spin_unlock(&fsvq->lock); in virtio_fs_drain_queue()
196 flush_work(&fsvq->done_work); in virtio_fs_drain_queue()
197 flush_delayed_work(&fsvq->dispatch_work); in virtio_fs_drain_queue()
205 for (i = 0; i < fs->nvqs; i++) { in virtio_fs_drain_all_queues_locked()
206 fsvq = &fs->vqs[i]; in virtio_fs_drain_all_queues_locked()
213 /* Provides mutual exclusion between ->remove and ->kill_sb in virtio_fs_drain_all_queues()
229 for (i = 0; i < fs->nvqs; i++) { in virtio_fs_start_all_queues()
230 fsvq = &fs->vqs[i]; in virtio_fs_start_all_queues()
231 spin_lock(&fsvq->lock); in virtio_fs_start_all_queues()
232 fsvq->connected = true; in virtio_fs_start_all_queues()
233 spin_unlock(&fsvq->lock); in virtio_fs_start_all_queues()
237 /* Add a new instance to the list or return -EEXIST if tag name exists*/
246 if (strcmp(fs->tag, fs2->tag) == 0) in virtio_fs_add_instance()
251 list_add_tail(&fs->list, &virtio_fs_instances); in virtio_fs_add_instance()
256 return -EEXIST; in virtio_fs_add_instance()
268 if (strcmp(fs->tag, tag) == 0) { in virtio_fs_find_instance()
269 kref_get(&fs->refcount); in virtio_fs_find_instance()
286 for (i = 0; i < fs->nvqs; i++) { in virtio_fs_free_devs()
287 struct virtio_fs_vq *fsvq = &fs->vqs[i]; in virtio_fs_free_devs()
289 if (!fsvq->fud) in virtio_fs_free_devs()
292 fuse_dev_free(fsvq->fud); in virtio_fs_free_devs()
293 fsvq->fud = NULL; in virtio_fs_free_devs()
297 /* Read filesystem name from virtio config into fs->tag (must kfree()). */
308 return -EINVAL; /* empty tag */ in virtio_fs_read_tag()
312 len = end - tag_buf; in virtio_fs_read_tag()
313 fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL); in virtio_fs_read_tag()
314 if (!fs->tag) in virtio_fs_read_tag()
315 return -ENOMEM; in virtio_fs_read_tag()
316 memcpy(fs->tag, tag_buf, len); in virtio_fs_read_tag()
317 fs->tag[len] = '\0'; in virtio_fs_read_tag()
326 struct virtqueue *vq = fsvq->vq; in virtio_fs_hiprio_done_work()
329 spin_lock(&fsvq->lock); in virtio_fs_hiprio_done_work()
341 spin_unlock(&fsvq->lock); in virtio_fs_hiprio_done_work()
351 pr_debug("virtio-fs: worker %s called.\n", __func__); in virtio_fs_request_dispatch_work()
353 spin_lock(&fsvq->lock); in virtio_fs_request_dispatch_work()
354 req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req, in virtio_fs_request_dispatch_work()
357 spin_unlock(&fsvq->lock); in virtio_fs_request_dispatch_work()
361 list_del_init(&req->list); in virtio_fs_request_dispatch_work()
362 spin_unlock(&fsvq->lock); in virtio_fs_request_dispatch_work()
368 spin_lock(&fsvq->lock); in virtio_fs_request_dispatch_work()
369 req = list_first_entry_or_null(&fsvq->queued_reqs, in virtio_fs_request_dispatch_work()
372 spin_unlock(&fsvq->lock); in virtio_fs_request_dispatch_work()
375 list_del_init(&req->list); in virtio_fs_request_dispatch_work()
376 spin_unlock(&fsvq->lock); in virtio_fs_request_dispatch_work()
380 if (ret == -ENOMEM || ret == -ENOSPC) { in virtio_fs_request_dispatch_work()
381 spin_lock(&fsvq->lock); in virtio_fs_request_dispatch_work()
382 list_add_tail(&req->list, &fsvq->queued_reqs); in virtio_fs_request_dispatch_work()
383 schedule_delayed_work(&fsvq->dispatch_work, in virtio_fs_request_dispatch_work()
385 spin_unlock(&fsvq->lock); in virtio_fs_request_dispatch_work()
388 req->out.h.error = ret; in virtio_fs_request_dispatch_work()
389 spin_lock(&fsvq->lock); in virtio_fs_request_dispatch_work()
391 spin_unlock(&fsvq->lock); in virtio_fs_request_dispatch_work()
392 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", in virtio_fs_request_dispatch_work()
411 struct virtio_fs_forget_req *req = &forget->req; in send_forget_request()
413 spin_lock(&fsvq->lock); in send_forget_request()
414 if (!fsvq->connected) { in send_forget_request()
422 vq = fsvq->vq; in send_forget_request()
423 dev_dbg(&vq->vdev->dev, "%s\n", __func__); in send_forget_request()
427 if (ret == -ENOMEM || ret == -ENOSPC) { in send_forget_request()
428 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n", in send_forget_request()
430 list_add_tail(&forget->list, &fsvq->queued_reqs); in send_forget_request()
431 schedule_delayed_work(&fsvq->dispatch_work, in send_forget_request()
438 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n", in send_forget_request()
450 spin_unlock(&fsvq->lock); in send_forget_request()
456 spin_unlock(&fsvq->lock); in send_forget_request()
465 pr_debug("virtio-fs: worker %s called.\n", __func__); in virtio_fs_hiprio_dispatch_work()
467 spin_lock(&fsvq->lock); in virtio_fs_hiprio_dispatch_work()
468 forget = list_first_entry_or_null(&fsvq->queued_reqs, in virtio_fs_hiprio_dispatch_work()
471 spin_unlock(&fsvq->lock); in virtio_fs_hiprio_dispatch_work()
475 list_del(&forget->list); in virtio_fs_hiprio_dispatch_work()
476 spin_unlock(&fsvq->lock); in virtio_fs_hiprio_dispatch_work()
482 /* Allocate and copy args into req->argbuf */
485 struct fuse_args *args = req->args; in copy_args_to_argbuf()
492 num_in = args->in_numargs - args->in_pages; in copy_args_to_argbuf()
493 num_out = args->out_numargs - args->out_pages; in copy_args_to_argbuf()
494 len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) + in copy_args_to_argbuf()
495 fuse_len_args(num_out, args->out_args); in copy_args_to_argbuf()
497 req->argbuf = kmalloc(len, GFP_ATOMIC); in copy_args_to_argbuf()
498 if (!req->argbuf) in copy_args_to_argbuf()
499 return -ENOMEM; in copy_args_to_argbuf()
502 memcpy(req->argbuf + offset, in copy_args_to_argbuf()
503 args->in_args[i].value, in copy_args_to_argbuf()
504 args->in_args[i].size); in copy_args_to_argbuf()
505 offset += args->in_args[i].size; in copy_args_to_argbuf()
511 /* Copy args out of and free req->argbuf */
520 remaining = req->out.h.len - sizeof(req->out.h); in copy_args_from_argbuf()
521 num_in = args->in_numargs - args->in_pages; in copy_args_from_argbuf()
522 num_out = args->out_numargs - args->out_pages; in copy_args_from_argbuf()
523 offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args); in copy_args_from_argbuf()
526 unsigned int argsize = args->out_args[i].size; in copy_args_from_argbuf()
528 if (args->out_argvar && in copy_args_from_argbuf()
529 i == args->out_numargs - 1 && in copy_args_from_argbuf()
534 memcpy(args->out_args[i].value, req->argbuf + offset, argsize); in copy_args_from_argbuf()
537 if (i != args->out_numargs - 1) in copy_args_from_argbuf()
538 remaining -= argsize; in copy_args_from_argbuf()
541 /* Store the actual size of the variable-length arg */ in copy_args_from_argbuf()
542 if (args->out_argvar) in copy_args_from_argbuf()
543 args->out_args[args->out_numargs - 1].size = remaining; in copy_args_from_argbuf()
545 kfree(req->argbuf); in copy_args_from_argbuf()
546 req->argbuf = NULL; in copy_args_from_argbuf()
553 struct fuse_pqueue *fpq = &fsvq->fud->pq; in virtio_fs_request_complete()
563 args = req->args; in virtio_fs_request_complete()
566 if (args->out_pages && args->page_zeroing) { in virtio_fs_request_complete()
567 len = args->out_args[args->out_numargs - 1].size; in virtio_fs_request_complete()
569 for (i = 0; i < ap->num_pages; i++) { in virtio_fs_request_complete()
570 thislen = ap->descs[i].length; in virtio_fs_request_complete()
572 WARN_ON(ap->descs[i].offset); in virtio_fs_request_complete()
573 page = ap->pages[i]; in virtio_fs_request_complete()
577 len -= thislen; in virtio_fs_request_complete()
582 spin_lock(&fpq->lock); in virtio_fs_request_complete()
583 clear_bit(FR_SENT, &req->flags); in virtio_fs_request_complete()
584 spin_unlock(&fpq->lock); in virtio_fs_request_complete()
587 spin_lock(&fsvq->lock); in virtio_fs_request_complete()
589 spin_unlock(&fsvq->lock); in virtio_fs_request_complete()
597 virtio_fs_request_complete(w->req, w->fsvq); in virtio_fs_complete_req_work()
605 struct fuse_pqueue *fpq = &fsvq->fud->pq; in virtio_fs_requests_done_work()
606 struct virtqueue *vq = fsvq->vq; in virtio_fs_requests_done_work()
613 spin_lock(&fsvq->lock); in virtio_fs_requests_done_work()
618 spin_lock(&fpq->lock); in virtio_fs_requests_done_work()
619 list_move_tail(&req->list, &reqs); in virtio_fs_requests_done_work()
620 spin_unlock(&fpq->lock); in virtio_fs_requests_done_work()
623 spin_unlock(&fsvq->lock); in virtio_fs_requests_done_work()
627 list_del_init(&req->list); in virtio_fs_requests_done_work()
630 if (req->args->may_block) { in virtio_fs_requests_done_work()
634 INIT_WORK(&w->done_work, virtio_fs_complete_req_work); in virtio_fs_requests_done_work()
635 w->fsvq = fsvq; in virtio_fs_requests_done_work()
636 w->req = req; in virtio_fs_requests_done_work()
637 schedule_work(&w->done_work); in virtio_fs_requests_done_work()
649 dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name); in virtio_fs_vq_done()
651 schedule_work(&fsvq->done_work); in virtio_fs_vq_done()
657 strncpy(fsvq->name, name, VQ_NAME_LEN); in virtio_fs_init_vq()
658 spin_lock_init(&fsvq->lock); in virtio_fs_init_vq()
659 INIT_LIST_HEAD(&fsvq->queued_reqs); in virtio_fs_init_vq()
660 INIT_LIST_HEAD(&fsvq->end_reqs); in virtio_fs_init_vq()
661 init_completion(&fsvq->in_flight_zero); in virtio_fs_init_vq()
664 INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work); in virtio_fs_init_vq()
665 INIT_DELAYED_WORK(&fsvq->dispatch_work, in virtio_fs_init_vq()
668 INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work); in virtio_fs_init_vq()
669 INIT_DELAYED_WORK(&fsvq->dispatch_work, in virtio_fs_init_vq()
685 &fs->num_request_queues); in virtio_fs_setup_vqs()
686 if (fs->num_request_queues == 0) in virtio_fs_setup_vqs()
687 return -EINVAL; in virtio_fs_setup_vqs()
689 fs->nvqs = VQ_REQUEST + fs->num_request_queues; in virtio_fs_setup_vqs()
690 fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL); in virtio_fs_setup_vqs()
691 if (!fs->vqs) in virtio_fs_setup_vqs()
692 return -ENOMEM; in virtio_fs_setup_vqs()
694 vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL); in virtio_fs_setup_vqs()
695 callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]), in virtio_fs_setup_vqs()
697 names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL); in virtio_fs_setup_vqs()
699 ret = -ENOMEM; in virtio_fs_setup_vqs()
705 virtio_fs_init_vq(&fs->vqs[VQ_HIPRIO], "hiprio", VQ_HIPRIO); in virtio_fs_setup_vqs()
706 names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name; in virtio_fs_setup_vqs()
709 for (i = VQ_REQUEST; i < fs->nvqs; i++) { in virtio_fs_setup_vqs()
712 snprintf(vq_name, VQ_NAME_LEN, "requests.%u", i - VQ_REQUEST); in virtio_fs_setup_vqs()
713 virtio_fs_init_vq(&fs->vqs[i], vq_name, VQ_REQUEST); in virtio_fs_setup_vqs()
715 names[i] = fs->vqs[i].name; in virtio_fs_setup_vqs()
718 ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL); in virtio_fs_setup_vqs()
722 for (i = 0; i < fs->nvqs; i++) in virtio_fs_setup_vqs()
723 fs->vqs[i].vq = vqs[i]; in virtio_fs_setup_vqs()
731 kfree(fs->vqs); in virtio_fs_setup_vqs()
735 /* Free virtqueues (device must already be reset) */
739 vdev->config->del_vqs(vdev); in virtio_fs_cleanup_vqs()
751 size_t max_nr_pages = fs->window_len/PAGE_SIZE - pgoff; in virtio_fs_direct_access()
754 *kaddr = fs->window_kaddr + offset; in virtio_fs_direct_access()
756 *pfn = phys_to_pfn_t(fs->window_phys_addr + offset, in virtio_fs_direct_access()
817 dev_notice(&vdev->dev, "%s: No cache capability\n", __func__); in virtio_fs_setup_dax()
821 if (!devm_request_mem_region(&vdev->dev, cache_reg.addr, cache_reg.len, in virtio_fs_setup_dax()
822 dev_name(&vdev->dev))) { in virtio_fs_setup_dax()
823 dev_warn(&vdev->dev, "could not reserve region addr=0x%llx len=0x%llx\n", in virtio_fs_setup_dax()
825 return -EBUSY; in virtio_fs_setup_dax()
828 dev_notice(&vdev->dev, "Cache len: 0x%llx @ 0x%llx\n", cache_reg.len, in virtio_fs_setup_dax()
831 pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL); in virtio_fs_setup_dax()
833 return -ENOMEM; in virtio_fs_setup_dax()
835 pgmap->type = MEMORY_DEVICE_FS_DAX; in virtio_fs_setup_dax()
842 pgmap->range = (struct range) { in virtio_fs_setup_dax()
844 .end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1, in virtio_fs_setup_dax()
846 pgmap->nr_range = 1; in virtio_fs_setup_dax()
848 fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap); in virtio_fs_setup_dax()
849 if (IS_ERR(fs->window_kaddr)) in virtio_fs_setup_dax()
850 return PTR_ERR(fs->window_kaddr); in virtio_fs_setup_dax()
852 fs->window_phys_addr = (phys_addr_t) cache_reg.addr; in virtio_fs_setup_dax()
853 fs->window_len = (phys_addr_t) cache_reg.len; in virtio_fs_setup_dax()
855 dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n", in virtio_fs_setup_dax()
856 __func__, fs->window_kaddr, cache_reg.addr, cache_reg.len); in virtio_fs_setup_dax()
858 fs->dax_dev = alloc_dax(fs, NULL, &virtio_fs_dax_ops, 0); in virtio_fs_setup_dax()
859 if (IS_ERR(fs->dax_dev)) in virtio_fs_setup_dax()
860 return PTR_ERR(fs->dax_dev); in virtio_fs_setup_dax()
862 return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax, in virtio_fs_setup_dax()
863 fs->dax_dev); in virtio_fs_setup_dax()
873 return -ENOMEM; in virtio_fs_probe()
874 kref_init(&fs->refcount); in virtio_fs_probe()
875 vdev->priv = fs; in virtio_fs_probe()
891 /* Bring the device online in case the filesystem is mounted and in virtio_fs_probe()
903 vdev->config->reset(vdev); in virtio_fs_probe()
905 kfree(fs->vqs); in virtio_fs_probe()
908 vdev->priv = NULL; in virtio_fs_probe()
918 for (i = 0; i < fs->nvqs; i++) { in virtio_fs_stop_all_queues()
919 fsvq = &fs->vqs[i]; in virtio_fs_stop_all_queues()
920 spin_lock(&fsvq->lock); in virtio_fs_stop_all_queues()
921 fsvq->connected = false; in virtio_fs_stop_all_queues()
922 spin_unlock(&fsvq->lock); in virtio_fs_stop_all_queues()
928 struct virtio_fs *fs = vdev->priv; in virtio_fs_remove()
931 /* This device is going away. No one should get new reference */ in virtio_fs_remove()
932 list_del_init(&fs->list); in virtio_fs_remove()
935 vdev->config->reset(vdev); in virtio_fs_remove()
938 vdev->priv = NULL; in virtio_fs_remove()
939 /* Put device reference on virtio_fs object */ in virtio_fs_remove()
948 pr_warn("virtio-fs: suspend/resume not yet supported\n"); in virtio_fs_freeze()
949 return -EOPNOTSUPP; in virtio_fs_freeze()
980 static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq) in virtio_fs_wake_forget_and_unlock() argument
981 __releases(fiq->lock) in virtio_fs_wake_forget_and_unlock()
990 link = fuse_dequeue_forget(fiq, 1, NULL); in virtio_fs_wake_forget_and_unlock()
991 unique = fuse_get_unique(fiq); in virtio_fs_wake_forget_and_unlock()
993 fs = fiq->priv; in virtio_fs_wake_forget_and_unlock()
994 fsvq = &fs->vqs[VQ_HIPRIO]; in virtio_fs_wake_forget_and_unlock()
995 spin_unlock(&fiq->lock); in virtio_fs_wake_forget_and_unlock()
999 req = &forget->req; in virtio_fs_wake_forget_and_unlock()
1001 req->ih = (struct fuse_in_header){ in virtio_fs_wake_forget_and_unlock()
1003 .nodeid = link->forget_one.nodeid, in virtio_fs_wake_forget_and_unlock()
1007 req->arg = (struct fuse_forget_in){ in virtio_fs_wake_forget_and_unlock()
1008 .nlookup = link->forget_one.nlookup, in virtio_fs_wake_forget_and_unlock()
1015 static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq) in virtio_fs_wake_interrupt_and_unlock() argument
1016 __releases(fiq->lock) in virtio_fs_wake_interrupt_and_unlock()
1025 spin_unlock(&fiq->lock); in virtio_fs_wake_interrupt_and_unlock()
1028 /* Count number of scatter-gather elements required */
1038 total_len -= this_len; in sg_count_fuse_pages()
1044 /* Return the number of scatter-gather list elements required */
1047 struct fuse_args *args = req->args; in sg_count_fuse_req()
1051 if (args->in_numargs - args->in_pages) in sg_count_fuse_req()
1054 if (args->in_pages) { in sg_count_fuse_req()
1055 size = args->in_args[args->in_numargs - 1].size; in sg_count_fuse_req()
1056 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages, in sg_count_fuse_req()
1060 if (!test_bit(FR_ISREPLY, &req->flags)) in sg_count_fuse_req()
1065 if (args->out_numargs - args->out_pages) in sg_count_fuse_req()
1068 if (args->out_pages) { in sg_count_fuse_req()
1069 size = args->out_args[args->out_numargs - 1].size; in sg_count_fuse_req()
1070 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages, in sg_count_fuse_req()
1077 /* Add pages to scatter-gather list and return number of elements used */
1091 total_len -= this_len; in sg_init_fuse_pages()
1097 /* Add args to scatter-gather list and return number of elements used */
1106 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args); in sg_init_fuse_args()
1110 len = fuse_len_args(numargs - argpages, args); in sg_init_fuse_args()
1116 ap->pages, ap->descs, in sg_init_fuse_args()
1117 ap->num_pages, in sg_init_fuse_args()
1118 args[numargs - 1].size); in sg_init_fuse_args()
1126 /* Add a request to a virtqueue and kick the device */
1136 struct fuse_args *args = req->args; in virtio_fs_enqueue_req()
1152 ret = -ENOMEM; in virtio_fs_enqueue_req()
1163 sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h)); in virtio_fs_enqueue_req()
1165 (struct fuse_arg *)args->in_args, in virtio_fs_enqueue_req()
1166 args->in_numargs, args->in_pages, in virtio_fs_enqueue_req()
1167 req->argbuf, &argbuf_used); in virtio_fs_enqueue_req()
1170 if (test_bit(FR_ISREPLY, &req->flags)) { in virtio_fs_enqueue_req()
1172 &req->out.h, sizeof(req->out.h)); in virtio_fs_enqueue_req()
1174 args->out_args, args->out_numargs, in virtio_fs_enqueue_req()
1175 args->out_pages, in virtio_fs_enqueue_req()
1176 req->argbuf + argbuf_used, NULL); in virtio_fs_enqueue_req()
1184 spin_lock(&fsvq->lock); in virtio_fs_enqueue_req()
1186 if (!fsvq->connected) { in virtio_fs_enqueue_req()
1187 spin_unlock(&fsvq->lock); in virtio_fs_enqueue_req()
1188 ret = -ENOTCONN; in virtio_fs_enqueue_req()
1192 vq = fsvq->vq; in virtio_fs_enqueue_req()
1195 spin_unlock(&fsvq->lock); in virtio_fs_enqueue_req()
1200 fpq = &fsvq->fud->pq; in virtio_fs_enqueue_req()
1201 spin_lock(&fpq->lock); in virtio_fs_enqueue_req()
1202 list_add_tail(&req->list, fpq->processing); in virtio_fs_enqueue_req()
1203 spin_unlock(&fpq->lock); in virtio_fs_enqueue_req()
1204 set_bit(FR_SENT, &req->flags); in virtio_fs_enqueue_req()
1212 spin_unlock(&fsvq->lock); in virtio_fs_enqueue_req()
1218 if (ret < 0 && req->argbuf) { in virtio_fs_enqueue_req()
1219 kfree(req->argbuf); in virtio_fs_enqueue_req()
1220 req->argbuf = NULL; in virtio_fs_enqueue_req()
1230 static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq) in virtio_fs_wake_pending_and_unlock() argument
1231 __releases(fiq->lock) in virtio_fs_wake_pending_and_unlock()
1239 WARN_ON(list_empty(&fiq->pending)); in virtio_fs_wake_pending_and_unlock()
1240 req = list_last_entry(&fiq->pending, struct fuse_req, list); in virtio_fs_wake_pending_and_unlock()
1241 clear_bit(FR_PENDING, &req->flags); in virtio_fs_wake_pending_and_unlock()
1242 list_del_init(&req->list); in virtio_fs_wake_pending_and_unlock()
1243 WARN_ON(!list_empty(&fiq->pending)); in virtio_fs_wake_pending_and_unlock()
1244 spin_unlock(&fiq->lock); in virtio_fs_wake_pending_and_unlock()
1246 fs = fiq->priv; in virtio_fs_wake_pending_and_unlock()
1249 __func__, req->in.h.opcode, req->in.h.unique, in virtio_fs_wake_pending_and_unlock()
1250 req->in.h.nodeid, req->in.h.len, in virtio_fs_wake_pending_and_unlock()
1251 fuse_len_args(req->args->out_numargs, req->args->out_args)); in virtio_fs_wake_pending_and_unlock()
1253 fsvq = &fs->vqs[queue_id]; in virtio_fs_wake_pending_and_unlock()
1256 if (ret == -ENOMEM || ret == -ENOSPC) { in virtio_fs_wake_pending_and_unlock()
1259 * context as we might be holding fc->bg_lock. in virtio_fs_wake_pending_and_unlock()
1261 spin_lock(&fsvq->lock); in virtio_fs_wake_pending_and_unlock()
1262 list_add_tail(&req->list, &fsvq->queued_reqs); in virtio_fs_wake_pending_and_unlock()
1264 schedule_delayed_work(&fsvq->dispatch_work, in virtio_fs_wake_pending_and_unlock()
1266 spin_unlock(&fsvq->lock); in virtio_fs_wake_pending_and_unlock()
1269 req->out.h.error = ret; in virtio_fs_wake_pending_and_unlock()
1270 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret); in virtio_fs_wake_pending_and_unlock()
1273 spin_lock(&fsvq->lock); in virtio_fs_wake_pending_and_unlock()
1274 list_add_tail(&req->list, &fsvq->end_reqs); in virtio_fs_wake_pending_and_unlock()
1275 schedule_delayed_work(&fsvq->dispatch_work, 0); in virtio_fs_wake_pending_and_unlock()
1276 spin_unlock(&fsvq->lock); in virtio_fs_wake_pending_and_unlock()
1290 ctx->rootmode = S_IFDIR; in virtio_fs_ctx_set_defaults()
1291 ctx->default_permissions = 1; in virtio_fs_ctx_set_defaults()
1292 ctx->allow_other = 1; in virtio_fs_ctx_set_defaults()
1293 ctx->max_read = UINT_MAX; in virtio_fs_ctx_set_defaults()
1294 ctx->blksize = 512; in virtio_fs_ctx_set_defaults()
1295 ctx->destroy = true; in virtio_fs_ctx_set_defaults()
1296 ctx->no_control = true; in virtio_fs_ctx_set_defaults()
1297 ctx->no_force_umount = true; in virtio_fs_ctx_set_defaults()
1303 struct fuse_conn *fc = fm->fc; in virtio_fs_fill_super()
1304 struct virtio_fs *fs = fc->iq.priv; in virtio_fs_fill_super()
1305 struct fuse_fs_context *ctx = fsc->fs_private; in virtio_fs_fill_super()
1312 /* After holding mutex, make sure virtiofs device is still there. in virtio_fs_fill_super()
1313 * Though we are holding a reference to it, drive ->remove might in virtio_fs_fill_super()
1316 err = -EINVAL; in virtio_fs_fill_super()
1317 if (list_empty(&fs->list)) { in virtio_fs_fill_super()
1318 pr_info("virtio-fs: tag <%s> not found\n", fs->tag); in virtio_fs_fill_super()
1322 err = -ENOMEM; in virtio_fs_fill_super()
1324 for (i = 0; i < fs->nvqs; i++) { in virtio_fs_fill_super()
1325 struct virtio_fs_vq *fsvq = &fs->vqs[i]; in virtio_fs_fill_super()
1327 fsvq->fud = fuse_dev_alloc(); in virtio_fs_fill_super()
1328 if (!fsvq->fud) in virtio_fs_fill_super()
1333 ctx->fudptr = NULL; in virtio_fs_fill_super()
1334 if (ctx->dax) { in virtio_fs_fill_super()
1335 if (!fs->dax_dev) { in virtio_fs_fill_super()
1336 err = -EINVAL; in virtio_fs_fill_super()
1337 pr_err("virtio-fs: dax can't be enabled as filesystem" in virtio_fs_fill_super()
1338 " device does not support it.\n"); in virtio_fs_fill_super()
1341 ctx->dax_dev = fs->dax_dev; in virtio_fs_fill_super()
1347 for (i = 0; i < fs->nvqs; i++) { in virtio_fs_fill_super()
1348 struct virtio_fs_vq *fsvq = &fs->vqs[i]; in virtio_fs_fill_super()
1350 fuse_dev_install(fsvq->fud, fc); in virtio_fs_fill_super()
1368 struct fuse_conn *fc = fm->fc; in virtio_fs_conn_destroy()
1369 struct virtio_fs *vfs = fc->iq.priv; in virtio_fs_conn_destroy()
1370 struct virtio_fs_vq *fsvq = &vfs->vqs[VQ_HIPRIO]; in virtio_fs_conn_destroy()
1379 spin_lock(&fsvq->lock); in virtio_fs_conn_destroy()
1380 fsvq->connected = false; in virtio_fs_conn_destroy()
1381 spin_unlock(&fsvq->lock); in virtio_fs_conn_destroy()
1402 if (sb->s_root) { in virtio_kill_sb()
1413 struct fuse_mount *fsc_fm = fsc->s_fs_info; in virtio_fs_test_super()
1416 return fsc_fm->fc->iq.priv == sb_fm->fc->iq.priv; in virtio_fs_test_super()
1424 err = get_anon_bdev(&sb->s_dev); in virtio_fs_set_super()
1426 fuse_mount_get(fsc->s_fs_info); in virtio_fs_set_super()
1438 int err = -EIO; in virtio_fs_get_tree()
1441 * in fc->iq->priv. Once fuse_conn is going away, it calls ->put() in virtio_fs_get_tree()
1444 fs = virtio_fs_find_instance(fsc->source); in virtio_fs_get_tree()
1446 pr_info("virtio-fs: tag <%s> not found\n", fsc->source); in virtio_fs_get_tree()
1447 return -EINVAL; in virtio_fs_get_tree()
1450 virtqueue_size = virtqueue_get_vring_size(fs->vqs[VQ_REQUEST].vq); in virtio_fs_get_tree()
1454 err = -ENOMEM; in virtio_fs_get_tree()
1463 fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs); in virtio_fs_get_tree()
1464 fc->release = fuse_free_conn; in virtio_fs_get_tree()
1465 fc->delete_stale = true; in virtio_fs_get_tree()
1466 fc->auto_submounts = true; in virtio_fs_get_tree()
1469 fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit, in virtio_fs_get_tree()
1470 virtqueue_size - FUSE_HEADER_OVERHEAD); in virtio_fs_get_tree()
1472 fsc->s_fs_info = fm; in virtio_fs_get_tree()
1478 if (!sb->s_root) { in virtio_fs_get_tree()
1482 sb->s_fs_info = NULL; in virtio_fs_get_tree()
1487 sb->s_flags |= SB_ACTIVE; in virtio_fs_get_tree()
1490 WARN_ON(fsc->root); in virtio_fs_get_tree()
1491 fsc->root = dget(sb->s_root); in virtio_fs_get_tree()
1514 return -ENOMEM; in virtio_fs_init_fs_context()
1515 fsc->fs_private = ctx; in virtio_fs_init_fs_context()
1516 fsc->ops = &virtio_fs_context_ops; in virtio_fs_init_fs_context()