• Home
  • Raw
  • Download

Lines Matching full:files

51  * space if any.  This does not copy the file pointers.  Called with the files
74 * clear the extra space. Called with the files spinlock held for write.
167 * The files->file_lock should be held on entry, and will be held on exit.
169 static int expand_fdtable(struct files_struct *files, unsigned int nr) in expand_fdtable() argument
170 __releases(files->file_lock) in expand_fdtable()
171 __acquires(files->file_lock) in expand_fdtable()
175 spin_unlock(&files->file_lock); in expand_fdtable()
181 if (atomic_read(&files->count) > 1) in expand_fdtable()
184 spin_lock(&files->file_lock); in expand_fdtable()
195 cur_fdt = files_fdtable(files); in expand_fdtable()
198 rcu_assign_pointer(files->fdt, new_fdt); in expand_fdtable()
199 if (cur_fdt != &files->fdtab) in expand_fdtable()
207 * Expand files.
210 * Return <0 error code on error; 0 when nothing done; 1 when files were
212 * The files->file_lock should be held on entry, and will be held on exit.
214 static int expand_files(struct files_struct *files, unsigned int nr) in expand_files() argument
215 __releases(files->file_lock) in expand_files()
216 __acquires(files->file_lock) in expand_files()
222 fdt = files_fdtable(files); in expand_files()
232 if (unlikely(files->resize_in_progress)) { in expand_files()
233 spin_unlock(&files->file_lock); in expand_files()
235 wait_event(files->resize_wait, !files->resize_in_progress); in expand_files()
236 spin_lock(&files->file_lock); in expand_files()
241 files->resize_in_progress = true; in expand_files()
242 expanded = expand_fdtable(files, nr); in expand_files()
243 files->resize_in_progress = false; in expand_files()
245 wake_up_all(&files->resize_wait); in expand_files()
312 * Allocate a new files structure and copy contents from the
313 * passed in files structure.
389 * instantiated in the files array if a sibling thread in dup_fd()
412 static struct fdtable *close_files(struct files_struct * files) in close_files() argument
417 * files structure. in close_files()
419 struct fdtable *fdt = rcu_dereference_raw(files->fdt); in close_files()
432 filp_close(file, files); in close_files()
446 struct files_struct *files; in get_files_struct() local
449 files = task->files; in get_files_struct()
450 if (files) in get_files_struct()
451 atomic_inc(&files->count); in get_files_struct()
454 return files; in get_files_struct()
457 void put_files_struct(struct files_struct *files) in put_files_struct() argument
459 if (atomic_dec_and_test(&files->count)) { in put_files_struct()
460 struct fdtable *fdt = close_files(files); in put_files_struct()
463 if (fdt != &files->fdtab) in put_files_struct()
465 kmem_cache_free(files_cachep, files); in put_files_struct()
469 void reset_files_struct(struct files_struct *files) in reset_files_struct() argument
474 old = tsk->files; in reset_files_struct()
476 tsk->files = files; in reset_files_struct()
483 struct files_struct * files = tsk->files; in exit_files() local
485 if (files) { in exit_files()
487 tsk->files = NULL; in exit_files()
489 put_files_struct(files); in exit_files()
524 int __alloc_fd(struct files_struct *files, in __alloc_fd() argument
531 spin_lock(&files->file_lock); in __alloc_fd()
533 fdt = files_fdtable(files); in __alloc_fd()
535 if (fd < files->next_fd) in __alloc_fd()
536 fd = files->next_fd; in __alloc_fd()
542 * N.B. For clone tasks sharing a files structure, this test in __alloc_fd()
543 * will limit the total number of files that can be opened. in __alloc_fd()
549 error = expand_files(files, fd); in __alloc_fd()
560 if (start <= files->next_fd) in __alloc_fd()
561 files->next_fd = fd + 1; in __alloc_fd()
578 spin_unlock(&files->file_lock); in __alloc_fd()
584 return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags); in alloc_fd()
589 return __alloc_fd(current->files, 0, nofile, flags); in __get_unused_fd_flags()
598 static void __put_unused_fd(struct files_struct *files, unsigned int fd) in __put_unused_fd() argument
600 struct fdtable *fdt = files_fdtable(files); in __put_unused_fd()
602 if (fd < files->next_fd) in __put_unused_fd()
603 files->next_fd = fd; in __put_unused_fd()
608 struct files_struct *files = current->files; in put_unused_fd() local
609 spin_lock(&files->file_lock); in put_unused_fd()
610 __put_unused_fd(files, fd); in put_unused_fd()
611 spin_unlock(&files->file_lock); in put_unused_fd()
619 * The VFS is full of places where we drop the files lock between
630 * your throat. 'files' *MUST* be either current->files or obtained
636 void __fd_install(struct files_struct *files, unsigned int fd, in __fd_install() argument
643 if (unlikely(files->resize_in_progress)) { in __fd_install()
645 spin_lock(&files->file_lock); in __fd_install()
646 fdt = files_fdtable(files); in __fd_install()
649 spin_unlock(&files->file_lock); in __fd_install()
654 fdt = rcu_dereference_sched(files->fdt); in __fd_install()
666 __fd_install(current->files, fd, file); in fd_install()
671 static struct file *pick_file(struct files_struct *files, unsigned fd) in pick_file() argument
676 spin_lock(&files->file_lock); in pick_file()
677 fdt = files_fdtable(files); in pick_file()
685 __put_unused_fd(files, fd); in pick_file()
688 spin_unlock(&files->file_lock); in pick_file()
695 int __close_fd(struct files_struct *files, unsigned fd) in __close_fd() argument
699 file = pick_file(files, fd); in __close_fd()
703 return filp_close(file, files); in __close_fd()
720 struct files_struct *cur_fds = me->files, *fds = NULL; in __close_range()
773 * We're done closing the files we were supposed to. Time to install in __close_range()
777 me->files = cur_fds; in __close_range()
786 * See close_fd_get_file() below, this variant assumes current->files->file_lock
791 struct files_struct *files = current->files; in __close_fd_get_file() local
795 fdt = files_fdtable(files); in __close_fd_get_file()
802 __put_unused_fd(files, fd); in __close_fd_get_file()
818 struct files_struct *files = current->files; in close_fd_get_file() local
821 spin_lock(&files->file_lock); in close_fd_get_file()
823 spin_unlock(&files->file_lock); in close_fd_get_file()
828 void do_close_on_exec(struct files_struct *files) in do_close_on_exec() argument
834 spin_lock(&files->file_lock); in do_close_on_exec()
838 fdt = files_fdtable(files); in do_close_on_exec()
853 __put_unused_fd(files, fd); in do_close_on_exec()
854 spin_unlock(&files->file_lock); in do_close_on_exec()
855 filp_close(file, files); in do_close_on_exec()
857 spin_lock(&files->file_lock); in do_close_on_exec()
861 spin_unlock(&files->file_lock); in do_close_on_exec()
864 static inline struct file *__fget_files_rcu(struct files_struct *files, in __fget_files_rcu() argument
869 struct fdtable *fdt = rcu_dereference_raw(files->fdt); in __fget_files_rcu()
905 if (unlikely(rcu_dereference_raw(files->fdt) != fdt) || in __fget_files_rcu()
919 static struct file *__fget_files(struct files_struct *files, unsigned int fd, in __fget_files() argument
925 file = __fget_files_rcu(files, fd, mask, refs); in __fget_files()
934 return __fget_files(current->files, fd, mask, refs); in __fget()
959 if (task->files) in fget_task()
960 file = __fget_files(task->files, fd, 0, 1); in fget_task()
984 struct files_struct *files = current->files; in __fget_light() local
987 if (atomic_read(&files->count) == 1) { in __fget_light()
988 file = files_lookup_fd_raw(files, fd); in __fget_light()
1051 struct files_struct *files = current->files; in set_close_on_exec() local
1053 spin_lock(&files->file_lock); in set_close_on_exec()
1054 fdt = files_fdtable(files); in set_close_on_exec()
1059 spin_unlock(&files->file_lock); in set_close_on_exec()
1064 struct files_struct *files = current->files; in get_close_on_exec() local
1068 fdt = files_fdtable(files); in get_close_on_exec()
1074 static int do_dup2(struct files_struct *files, in do_dup2() argument
1076 __releases(&files->file_lock) in do_dup2()
1087 * fget() treats larval files as absent. Potentially interesting, in do_dup2()
1095 fdt = files_fdtable(files); in do_dup2()
1107 spin_unlock(&files->file_lock); in do_dup2()
1110 filp_close(tofree, files); in do_dup2()
1115 spin_unlock(&files->file_lock); in do_dup2()
1122 struct files_struct *files = current->files; in replace_fd() local
1125 return __close_fd(files, fd); in replace_fd()
1130 spin_lock(&files->file_lock); in replace_fd()
1131 err = expand_files(files, fd); in replace_fd()
1134 return do_dup2(files, file, fd, flags); in replace_fd()
1137 spin_unlock(&files->file_lock); in replace_fd()
1201 struct files_struct *files = current->files; in ksys_dup3() local
1212 spin_lock(&files->file_lock); in ksys_dup3()
1213 err = expand_files(files, newfd); in ksys_dup3()
1214 file = files_lookup_fd_locked(files, oldfd); in ksys_dup3()
1222 return do_dup2(files, file, newfd, flags); in ksys_dup3()
1227 spin_unlock(&files->file_lock); in ksys_dup3()
1239 struct files_struct *files = current->files; in SYSCALL_DEFINE2() local
1243 if (!fcheck_files(files, oldfd)) in SYSCALL_DEFINE2()
1279 int iterate_fd(struct files_struct *files, unsigned n, in iterate_fd() argument
1285 if (!files) in iterate_fd()
1287 spin_lock(&files->file_lock); in iterate_fd()
1288 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) { in iterate_fd()
1290 file = rcu_dereference_check_fdtable(files, fdt->fd[n]); in iterate_fd()
1297 spin_unlock(&files->file_lock); in iterate_fd()