• Home
  • Raw
  • Download

Lines Matching +full:sda +full:- +full:open +full:- +full:drain

1 // SPDX-License-Identifier: GPL-2.0-only
3 * fs/kernfs/file.c - kernfs file implementation
5 * Copyright (c) 2001-3 Patrick Mochel
19 #include "kernfs-internal.h"
22 * There's one kernfs_open_file for each open file and one kernfs_open_node
23 * for each kernfs_node with one or more open files.
25 * kernfs_node->attr.open points to kernfs_open_node. attr.open is
28 * filp->private_data points to seq_file whose ->private points to
30 * kernfs_open_node->files, which is protected by kernfs_open_file_mutex.
57 return ((struct seq_file *)file->private_data)->private; in kernfs_of()
66 if (kn->flags & KERNFS_LOCKDEP) in kernfs_ops()
68 return kn->attr.ops; in kernfs_ops()
78 * directly visible to userland, ERR_PTR(-ENODEV) is the only acceptable
82 * operations which may return ERR_PTR(-ENODEV) too. kernfs_seq_stop()
83 * can't distinguish whether ERR_PTR(-ENODEV) is from get_active failure or
85 * should be performed or not only on ERR_PTR(-ENODEV).
89 * kernfs_seq_stop() if ERR_PTR(-ENODEV) while invoking it directly after
90 * custom seq_file operations fail with ERR_PTR(-ENODEV) - this ensures
95 struct kernfs_open_file *of = sf->private; in kernfs_seq_stop_active()
96 const struct kernfs_ops *ops = kernfs_ops(of->kn); in kernfs_seq_stop_active()
98 if (ops->seq_stop) in kernfs_seq_stop_active()
99 ops->seq_stop(sf, v); in kernfs_seq_stop_active()
100 kernfs_put_active(of->kn); in kernfs_seq_stop_active()
105 struct kernfs_open_file *of = sf->private; in kernfs_seq_start()
109 * @of->mutex nests outside active ref and is primarily to ensure that in kernfs_seq_start()
110 * the ops aren't called concurrently for the same open file. in kernfs_seq_start()
112 mutex_lock(&of->mutex); in kernfs_seq_start()
113 if (!kernfs_get_active(of->kn)) in kernfs_seq_start()
114 return ERR_PTR(-ENODEV); in kernfs_seq_start()
116 ops = kernfs_ops(of->kn); in kernfs_seq_start()
117 if (ops->seq_start) { in kernfs_seq_start()
118 void *next = ops->seq_start(sf, ppos); in kernfs_seq_start()
120 if (next == ERR_PTR(-ENODEV)) in kernfs_seq_start()
134 struct kernfs_open_file *of = sf->private; in kernfs_seq_next()
135 const struct kernfs_ops *ops = kernfs_ops(of->kn); in kernfs_seq_next()
137 if (ops->seq_next) { in kernfs_seq_next()
138 void *next = ops->seq_next(sf, v, ppos); in kernfs_seq_next()
140 if (next == ERR_PTR(-ENODEV)) in kernfs_seq_next()
155 struct kernfs_open_file *of = sf->private; in kernfs_seq_stop()
157 if (v != ERR_PTR(-ENODEV)) in kernfs_seq_stop()
159 mutex_unlock(&of->mutex); in kernfs_seq_stop()
164 struct kernfs_open_file *of = sf->private; in kernfs_seq_show()
166 of->event = atomic_read(&of->kn->attr.open->event); in kernfs_seq_show()
168 return of->kn->attr.ops->seq_show(sf, v); in kernfs_seq_show()
179 * As reading a bin file can have side-effects, the exact offset and bytes
186 struct kernfs_open_file *of = kernfs_of(iocb->ki_filp); in kernfs_file_read_iter()
191 buf = of->prealloc_buf; in kernfs_file_read_iter()
193 mutex_lock(&of->prealloc_mutex); in kernfs_file_read_iter()
197 return -ENOMEM; in kernfs_file_read_iter()
200 * @of->mutex nests outside active ref and is used both to ensure that in kernfs_file_read_iter()
201 * the ops aren't called concurrently for the same open file. in kernfs_file_read_iter()
203 mutex_lock(&of->mutex); in kernfs_file_read_iter()
204 if (!kernfs_get_active(of->kn)) { in kernfs_file_read_iter()
205 len = -ENODEV; in kernfs_file_read_iter()
206 mutex_unlock(&of->mutex); in kernfs_file_read_iter()
210 of->event = atomic_read(&of->kn->attr.open->event); in kernfs_file_read_iter()
211 ops = kernfs_ops(of->kn); in kernfs_file_read_iter()
212 if (ops->read) in kernfs_file_read_iter()
213 len = ops->read(of, buf, len, iocb->ki_pos); in kernfs_file_read_iter()
215 len = -EINVAL; in kernfs_file_read_iter()
217 kernfs_put_active(of->kn); in kernfs_file_read_iter()
218 mutex_unlock(&of->mutex); in kernfs_file_read_iter()
224 len = -EFAULT; in kernfs_file_read_iter()
228 iocb->ki_pos += len; in kernfs_file_read_iter()
231 if (buf == of->prealloc_buf) in kernfs_file_read_iter()
232 mutex_unlock(&of->prealloc_mutex); in kernfs_file_read_iter()
240 if (kernfs_of(iocb->ki_filp)->kn->flags & KERNFS_HAS_SEQ_SHOW) in kernfs_fop_read_iter()
257 struct kernfs_open_file *of = kernfs_of(iocb->ki_filp); in kernfs_fop_write_iter()
262 if (of->atomic_write_len) { in kernfs_fop_write_iter()
263 if (len > of->atomic_write_len) in kernfs_fop_write_iter()
264 return -E2BIG; in kernfs_fop_write_iter()
269 buf = of->prealloc_buf; in kernfs_fop_write_iter()
271 mutex_lock(&of->prealloc_mutex); in kernfs_fop_write_iter()
275 return -ENOMEM; in kernfs_fop_write_iter()
278 len = -EFAULT; in kernfs_fop_write_iter()
284 * @of->mutex nests outside active ref and is used both to ensure that in kernfs_fop_write_iter()
285 * the ops aren't called concurrently for the same open file. in kernfs_fop_write_iter()
287 mutex_lock(&of->mutex); in kernfs_fop_write_iter()
288 if (!kernfs_get_active(of->kn)) { in kernfs_fop_write_iter()
289 mutex_unlock(&of->mutex); in kernfs_fop_write_iter()
290 len = -ENODEV; in kernfs_fop_write_iter()
294 ops = kernfs_ops(of->kn); in kernfs_fop_write_iter()
295 if (ops->write) in kernfs_fop_write_iter()
296 len = ops->write(of, buf, len, iocb->ki_pos); in kernfs_fop_write_iter()
298 len = -EINVAL; in kernfs_fop_write_iter()
300 kernfs_put_active(of->kn); in kernfs_fop_write_iter()
301 mutex_unlock(&of->mutex); in kernfs_fop_write_iter()
304 iocb->ki_pos += len; in kernfs_fop_write_iter()
307 if (buf == of->prealloc_buf) in kernfs_fop_write_iter()
308 mutex_unlock(&of->prealloc_mutex); in kernfs_fop_write_iter()
316 struct file *file = vma->vm_file; in kernfs_vma_open()
319 if (!of->vm_ops) in kernfs_vma_open()
322 if (!kernfs_get_active(of->kn)) in kernfs_vma_open()
325 if (of->vm_ops->open) in kernfs_vma_open()
326 of->vm_ops->open(vma); in kernfs_vma_open()
328 kernfs_put_active(of->kn); in kernfs_vma_open()
333 struct file *file = vmf->vma->vm_file; in kernfs_vma_fault()
337 if (!of->vm_ops) in kernfs_vma_fault()
340 if (!kernfs_get_active(of->kn)) in kernfs_vma_fault()
344 if (of->vm_ops->fault) in kernfs_vma_fault()
345 ret = of->vm_ops->fault(vmf); in kernfs_vma_fault()
347 kernfs_put_active(of->kn); in kernfs_vma_fault()
353 struct file *file = vmf->vma->vm_file; in kernfs_vma_page_mkwrite()
357 if (!of->vm_ops) in kernfs_vma_page_mkwrite()
360 if (!kernfs_get_active(of->kn)) in kernfs_vma_page_mkwrite()
364 if (of->vm_ops->page_mkwrite) in kernfs_vma_page_mkwrite()
365 ret = of->vm_ops->page_mkwrite(vmf); in kernfs_vma_page_mkwrite()
369 kernfs_put_active(of->kn); in kernfs_vma_page_mkwrite()
376 struct file *file = vma->vm_file; in kernfs_vma_access()
380 if (!of->vm_ops) in kernfs_vma_access()
381 return -EINVAL; in kernfs_vma_access()
383 if (!kernfs_get_active(of->kn)) in kernfs_vma_access()
384 return -EINVAL; in kernfs_vma_access()
386 ret = -EINVAL; in kernfs_vma_access()
387 if (of->vm_ops->access) in kernfs_vma_access()
388 ret = of->vm_ops->access(vma, addr, buf, len, write); in kernfs_vma_access()
390 kernfs_put_active(of->kn); in kernfs_vma_access()
398 struct file *file = vma->vm_file; in kernfs_vma_set_policy()
402 if (!of->vm_ops) in kernfs_vma_set_policy()
405 if (!kernfs_get_active(of->kn)) in kernfs_vma_set_policy()
406 return -EINVAL; in kernfs_vma_set_policy()
409 if (of->vm_ops->set_policy) in kernfs_vma_set_policy()
410 ret = of->vm_ops->set_policy(vma, new); in kernfs_vma_set_policy()
412 kernfs_put_active(of->kn); in kernfs_vma_set_policy()
419 struct file *file = vma->vm_file; in kernfs_vma_get_policy()
423 if (!of->vm_ops) in kernfs_vma_get_policy()
424 return vma->vm_policy; in kernfs_vma_get_policy()
426 if (!kernfs_get_active(of->kn)) in kernfs_vma_get_policy()
427 return vma->vm_policy; in kernfs_vma_get_policy()
429 pol = vma->vm_policy; in kernfs_vma_get_policy()
430 if (of->vm_ops->get_policy) in kernfs_vma_get_policy()
431 pol = of->vm_ops->get_policy(vma, addr); in kernfs_vma_get_policy()
433 kernfs_put_active(of->kn); in kernfs_vma_get_policy()
440 .open = kernfs_vma_open,
457 * mmap path and of->mutex are prone to triggering spurious lockdep in kernfs_fop_mmap()
460 * without grabbing @of->mutex by testing HAS_MMAP flag. See the in kernfs_fop_mmap()
463 if (!(of->kn->flags & KERNFS_HAS_MMAP)) in kernfs_fop_mmap()
464 return -ENODEV; in kernfs_fop_mmap()
466 mutex_lock(&of->mutex); in kernfs_fop_mmap()
468 rc = -ENODEV; in kernfs_fop_mmap()
469 if (!kernfs_get_active(of->kn)) in kernfs_fop_mmap()
472 ops = kernfs_ops(of->kn); in kernfs_fop_mmap()
473 rc = ops->mmap(of, vma); in kernfs_fop_mmap()
482 if (vma->vm_file != file) in kernfs_fop_mmap()
485 rc = -EINVAL; in kernfs_fop_mmap()
486 if (of->mmapped && of->vm_ops != vma->vm_ops) in kernfs_fop_mmap()
493 rc = -EINVAL; in kernfs_fop_mmap()
494 if (vma->vm_ops && vma->vm_ops->close) in kernfs_fop_mmap()
498 of->mmapped = true; in kernfs_fop_mmap()
499 of->vm_ops = vma->vm_ops; in kernfs_fop_mmap()
500 vma->vm_ops = &kernfs_vm_ops; in kernfs_fop_mmap()
502 kernfs_put_active(of->kn); in kernfs_fop_mmap()
504 mutex_unlock(&of->mutex); in kernfs_fop_mmap()
510 * kernfs_get_open_node - get or create kernfs_open_node
512 * @of: kernfs_open_file for this instance of open
514 * If @kn->attr.open exists, increment its reference count; otherwise,
521 * 0 on success, -errno on failure.
532 if (!kn->attr.open && new_on) { in kernfs_get_open_node()
533 kn->attr.open = new_on; in kernfs_get_open_node()
537 on = kn->attr.open; in kernfs_get_open_node()
539 atomic_inc(&on->refcnt); in kernfs_get_open_node()
540 list_add_tail(&of->list, &on->files); in kernfs_get_open_node()
554 return -ENOMEM; in kernfs_get_open_node()
556 atomic_set(&new_on->refcnt, 0); in kernfs_get_open_node()
557 atomic_set(&new_on->event, 1); in kernfs_get_open_node()
558 init_waitqueue_head(&new_on->poll); in kernfs_get_open_node()
559 INIT_LIST_HEAD(&new_on->files); in kernfs_get_open_node()
564 * kernfs_put_open_node - put kernfs_open_node
568 * Put @kn->attr.open and unlink @of from the files list. If
577 struct kernfs_open_node *on = kn->attr.open; in kernfs_put_open_node()
584 list_del(&of->list); in kernfs_put_open_node()
586 if (atomic_dec_and_test(&on->refcnt)) in kernfs_put_open_node()
587 kn->attr.open = NULL; in kernfs_put_open_node()
599 struct kernfs_node *kn = inode->i_private; in kernfs_fop_open()
604 int error = -EACCES; in kernfs_fop_open()
607 return -ENODEV; in kernfs_fop_open()
611 has_read = ops->seq_show || ops->read || ops->mmap; in kernfs_fop_open()
612 has_write = ops->write || ops->mmap; in kernfs_fop_open()
613 has_mmap = ops->mmap; in kernfs_fop_open()
616 if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) { in kernfs_fop_open()
617 if ((file->f_mode & FMODE_WRITE) && in kernfs_fop_open()
618 (!(inode->i_mode & S_IWUGO) || !has_write)) in kernfs_fop_open()
621 if ((file->f_mode & FMODE_READ) && in kernfs_fop_open()
622 (!(inode->i_mode & S_IRUGO) || !has_read)) in kernfs_fop_open()
627 error = -ENOMEM; in kernfs_fop_open()
634 * @of->mutex for files which implement mmap. This is a rather in kernfs_fop_open()
636 * mm->mmap_lock - mmap nests @of->mutex under mm->mmap_lock and in kernfs_fop_open()
637 * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under in kernfs_fop_open()
638 * which mm->mmap_lock nests, while holding @of->mutex. As each in kernfs_fop_open()
639 * open file has a separate mutex, it's okay as long as those don't in kernfs_fop_open()
645 * look that way and give @of->mutex different static lockdep keys. in kernfs_fop_open()
648 mutex_init(&of->mutex); in kernfs_fop_open()
650 mutex_init(&of->mutex); in kernfs_fop_open()
652 of->kn = kn; in kernfs_fop_open()
653 of->file = file; in kernfs_fop_open()
659 of->atomic_write_len = ops->atomic_write_len; in kernfs_fop_open()
661 error = -EINVAL; in kernfs_fop_open()
663 * ->seq_show is incompatible with ->prealloc, in kernfs_fop_open()
665 * ->read must be used instead. in kernfs_fop_open()
667 if (ops->prealloc && ops->seq_show) in kernfs_fop_open()
669 if (ops->prealloc) { in kernfs_fop_open()
670 int len = of->atomic_write_len ?: PAGE_SIZE; in kernfs_fop_open()
671 of->prealloc_buf = kmalloc(len + 1, GFP_KERNEL); in kernfs_fop_open()
672 error = -ENOMEM; in kernfs_fop_open()
673 if (!of->prealloc_buf) in kernfs_fop_open()
675 mutex_init(&of->prealloc_mutex); in kernfs_fop_open()
683 if (ops->seq_show) in kernfs_fop_open()
690 of->seq_file = file->private_data; in kernfs_fop_open()
691 of->seq_file->private = of; in kernfs_fop_open()
694 if (file->f_mode & FMODE_WRITE) in kernfs_fop_open()
695 file->f_mode |= FMODE_PWRITE; in kernfs_fop_open()
697 /* make sure we have open node struct */ in kernfs_fop_open()
702 if (ops->open) { in kernfs_fop_open()
703 /* nobody has access to @of yet, skip @of->mutex */ in kernfs_fop_open()
704 error = ops->open(of); in kernfs_fop_open()
709 /* open succeeded, put active references */ in kernfs_fop_open()
718 kfree(of->prealloc_buf); in kernfs_fop_open()
725 /* used from release/drain to ensure that ->release() is called exactly once */
731 * we just want to synchronize release and drain paths. in kernfs_release_file()
732 * @kernfs_open_file_mutex is enough. @of->mutex can't be used in kernfs_release_file()
733 * here because drain path may be called from places which can in kernfs_release_file()
738 if (!of->released) { in kernfs_release_file()
744 kn->attr.ops->release(of); in kernfs_release_file()
745 of->released = true; in kernfs_release_file()
751 struct kernfs_node *kn = inode->i_private; in kernfs_fop_release()
754 if (kn->flags & KERNFS_HAS_RELEASE) { in kernfs_fop_release()
762 kfree(of->prealloc_buf); in kernfs_fop_release()
773 if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE))) in kernfs_drain_open_files()
777 on = kn->attr.open; in kernfs_drain_open_files()
779 atomic_inc(&on->refcnt); in kernfs_drain_open_files()
786 list_for_each_entry(of, &on->files, list) { in kernfs_drain_open_files()
787 struct inode *inode = file_inode(of->file); in kernfs_drain_open_files()
789 if (kn->flags & KERNFS_HAS_MMAP) in kernfs_drain_open_files()
790 unmap_mapping_range(inode->i_mapping, 0, 0, 1); in kernfs_drain_open_files()
792 if (kn->flags & KERNFS_HAS_RELEASE) in kernfs_drain_open_files()
809 * need to close and re-open the file, or seek to 0 and read again.
817 struct kernfs_node *kn = kernfs_dentry_node(of->file->f_path.dentry); in kernfs_generic_poll()
818 struct kernfs_open_node *on = kn->attr.open; in kernfs_generic_poll()
820 poll_wait(of->file, &on->poll, wait); in kernfs_generic_poll()
822 if (of->event != atomic_read(&on->event)) in kernfs_generic_poll()
831 struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry); in kernfs_fop_poll()
837 if (kn->attr.ops->poll) in kernfs_fop_poll()
838 ret = kn->attr.ops->poll(of, wait); in kernfs_fop_poll()
858 kernfs_notify_list = kn->attr.notify_next; in kernfs_notify_workfn()
859 kn->attr.notify_next = NULL; in kernfs_notify_workfn()
865 list_for_each_entry(info, &kernfs_root(kn)->supers, node) { in kernfs_notify_workfn()
877 inode = ilookup(info->sb, kernfs_ino(kn)); in kernfs_notify_workfn()
881 name = (struct qstr)QSTR_INIT(kn->name, strlen(kn->name)); in kernfs_notify_workfn()
884 p_inode = ilookup(info->sb, kernfs_ino(parent)); in kernfs_notify_workfn()
907 * kernfs_notify - notify a kernfs file
924 on = kn->attr.open; in kernfs_notify()
926 atomic_inc(&on->event); in kernfs_notify()
927 wake_up_interruptible(&on->poll); in kernfs_notify()
933 if (!kn->attr.notify_next) { in kernfs_notify()
935 kn->attr.notify_next = kernfs_notify_list; in kernfs_notify()
948 .open = kernfs_fop_open,
957 * __kernfs_create_file - kernfs internal function to create a file
988 return ERR_PTR(-ENOMEM); in __kernfs_create_file()
990 kn->attr.ops = ops; in __kernfs_create_file()
991 kn->attr.size = size; in __kernfs_create_file()
992 kn->ns = ns; in __kernfs_create_file()
993 kn->priv = priv; in __kernfs_create_file()
997 lockdep_init_map(&kn->dep_map, "kn->active", key, 0); in __kernfs_create_file()
998 kn->flags |= KERNFS_LOCKDEP; in __kernfs_create_file()
1003 * kn->attr.ops is accesible only while holding active ref. We in __kernfs_create_file()
1007 if (ops->seq_show) in __kernfs_create_file()
1008 kn->flags |= KERNFS_HAS_SEQ_SHOW; in __kernfs_create_file()
1009 if (ops->mmap) in __kernfs_create_file()
1010 kn->flags |= KERNFS_HAS_MMAP; in __kernfs_create_file()
1011 if (ops->release) in __kernfs_create_file()
1012 kn->flags |= KERNFS_HAS_RELEASE; in __kernfs_create_file()