Lines Matching full:files
46 * space if any. This does not copy the file pointers. Called with the files
69 * clear the extra space. Called with the files spinlock held for write.
146 * The files->file_lock should be held on entry, and will be held on exit.
148 static int expand_fdtable(struct files_struct *files, unsigned int nr) in expand_fdtable() argument
149 __releases(files->file_lock) in expand_fdtable()
150 __acquires(files->file_lock) in expand_fdtable()
154 spin_unlock(&files->file_lock); in expand_fdtable()
160 if (atomic_read(&files->count) > 1) in expand_fdtable()
163 spin_lock(&files->file_lock); in expand_fdtable()
174 cur_fdt = files_fdtable(files); in expand_fdtable()
177 rcu_assign_pointer(files->fdt, new_fdt); in expand_fdtable()
178 if (cur_fdt != &files->fdtab) in expand_fdtable()
186 * Expand files.
189 * Return <0 error code on error; 0 when nothing done; 1 when files were
191 * The files->file_lock should be held on entry, and will be held on exit.
193 static int expand_files(struct files_struct *files, unsigned int nr) in expand_files() argument
194 __releases(files->file_lock) in expand_files()
195 __acquires(files->file_lock) in expand_files()
201 fdt = files_fdtable(files); in expand_files()
211 if (unlikely(files->resize_in_progress)) { in expand_files()
212 spin_unlock(&files->file_lock); in expand_files()
214 wait_event(files->resize_wait, !files->resize_in_progress); in expand_files()
215 spin_lock(&files->file_lock); in expand_files()
220 files->resize_in_progress = true; in expand_files()
221 expanded = expand_fdtable(files, nr); in expand_files()
222 files->resize_in_progress = false; in expand_files()
224 wake_up_all(&files->resize_wait); in expand_files()
268 * Allocate a new files structure and copy contents from the
269 * passed in files structure.
345 * instantiated in the files array if a sibling thread in dup_fd()
368 static struct fdtable *close_files(struct files_struct * files) in close_files() argument
373 * files structure. in close_files()
375 struct fdtable *fdt = rcu_dereference_raw(files->fdt); in close_files()
388 filp_close(file, files); in close_files()
402 struct files_struct *files; in get_files_struct() local
405 files = task->files; in get_files_struct()
406 if (files) in get_files_struct()
407 atomic_inc(&files->count); in get_files_struct()
410 return files; in get_files_struct()
413 void put_files_struct(struct files_struct *files) in put_files_struct() argument
415 if (atomic_dec_and_test(&files->count)) { in put_files_struct()
416 struct fdtable *fdt = close_files(files); in put_files_struct()
419 if (fdt != &files->fdtab) in put_files_struct()
421 kmem_cache_free(files_cachep, files); in put_files_struct()
425 void reset_files_struct(struct files_struct *files) in reset_files_struct() argument
430 old = tsk->files; in reset_files_struct()
432 tsk->files = files; in reset_files_struct()
439 struct files_struct * files = tsk->files; in exit_files() local
441 if (files) { in exit_files()
443 tsk->files = NULL; in exit_files()
445 put_files_struct(files); in exit_files()
480 int __alloc_fd(struct files_struct *files, in __alloc_fd() argument
487 spin_lock(&files->file_lock); in __alloc_fd()
489 fdt = files_fdtable(files); in __alloc_fd()
491 if (fd < files->next_fd) in __alloc_fd()
492 fd = files->next_fd; in __alloc_fd()
498 * N.B. For clone tasks sharing a files structure, this test in __alloc_fd()
499 * will limit the total number of files that can be opened. in __alloc_fd()
505 error = expand_files(files, fd); in __alloc_fd()
516 if (start <= files->next_fd) in __alloc_fd()
517 files->next_fd = fd + 1; in __alloc_fd()
534 spin_unlock(&files->file_lock); in __alloc_fd()
540 return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags); in alloc_fd()
545 return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags); in get_unused_fd_flags()
549 static void __put_unused_fd(struct files_struct *files, unsigned int fd) in __put_unused_fd() argument
551 struct fdtable *fdt = files_fdtable(files); in __put_unused_fd()
553 if (fd < files->next_fd) in __put_unused_fd()
554 files->next_fd = fd; in __put_unused_fd()
559 struct files_struct *files = current->files; in put_unused_fd() local
560 spin_lock(&files->file_lock); in put_unused_fd()
561 __put_unused_fd(files, fd); in put_unused_fd()
562 spin_unlock(&files->file_lock); in put_unused_fd()
570 * The VFS is full of places where we drop the files lock between
581 * your throat. 'files' *MUST* be either current->files or obtained
587 void __fd_install(struct files_struct *files, unsigned int fd, in __fd_install() argument
594 if (unlikely(files->resize_in_progress)) { in __fd_install()
596 spin_lock(&files->file_lock); in __fd_install()
597 fdt = files_fdtable(files); in __fd_install()
600 spin_unlock(&files->file_lock); in __fd_install()
605 fdt = rcu_dereference_sched(files->fdt); in __fd_install()
613 __fd_install(current->files, fd, file); in fd_install()
621 int __close_fd(struct files_struct *files, unsigned fd) in __close_fd() argument
626 spin_lock(&files->file_lock); in __close_fd()
627 fdt = files_fdtable(files); in __close_fd()
634 __put_unused_fd(files, fd); in __close_fd()
635 spin_unlock(&files->file_lock); in __close_fd()
636 return filp_close(file, files); in __close_fd()
639 spin_unlock(&files->file_lock); in __close_fd()
644 void do_close_on_exec(struct files_struct *files) in do_close_on_exec() argument
650 spin_lock(&files->file_lock); in do_close_on_exec()
654 fdt = files_fdtable(files); in do_close_on_exec()
669 __put_unused_fd(files, fd); in do_close_on_exec()
670 spin_unlock(&files->file_lock); in do_close_on_exec()
671 filp_close(file, files); in do_close_on_exec()
673 spin_lock(&files->file_lock); in do_close_on_exec()
677 spin_unlock(&files->file_lock); in do_close_on_exec()
682 struct files_struct *files = current->files; in __fget() local
687 file = fcheck_files(files, fd); in __fget()
733 struct files_struct *files = current->files; in __fget_light() local
736 if (atomic_read(&files->count) == 1) { in __fget_light()
737 file = __fcheck_files(files, fd); in __fget_light()
786 struct files_struct *files = current->files; in set_close_on_exec() local
788 spin_lock(&files->file_lock); in set_close_on_exec()
789 fdt = files_fdtable(files); in set_close_on_exec()
794 spin_unlock(&files->file_lock); in set_close_on_exec()
799 struct files_struct *files = current->files; in get_close_on_exec() local
803 fdt = files_fdtable(files); in get_close_on_exec()
809 static int do_dup2(struct files_struct *files, in do_dup2() argument
811 __releases(&files->file_lock) in do_dup2()
822 * fget() treats larval files as absent. Potentially interesting, in do_dup2()
830 fdt = files_fdtable(files); in do_dup2()
841 spin_unlock(&files->file_lock); in do_dup2()
844 filp_close(tofree, files); in do_dup2()
849 spin_unlock(&files->file_lock); in do_dup2()
856 struct files_struct *files = current->files; in replace_fd() local
859 return __close_fd(files, fd); in replace_fd()
864 spin_lock(&files->file_lock); in replace_fd()
865 err = expand_files(files, fd); in replace_fd()
868 return do_dup2(files, file, fd, flags); in replace_fd()
871 spin_unlock(&files->file_lock); in replace_fd()
879 struct files_struct *files = current->files; in ksys_dup3() local
890 spin_lock(&files->file_lock); in ksys_dup3()
891 err = expand_files(files, newfd); in ksys_dup3()
900 return do_dup2(files, file, newfd, flags); in ksys_dup3()
905 spin_unlock(&files->file_lock); in ksys_dup3()
917 struct files_struct *files = current->files; in SYSCALL_DEFINE2() local
921 if (!fcheck_files(files, oldfd)) in SYSCALL_DEFINE2()
962 int iterate_fd(struct files_struct *files, unsigned n, in iterate_fd() argument
968 if (!files) in iterate_fd()
970 spin_lock(&files->file_lock); in iterate_fd()
971 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) { in iterate_fd()
973 file = rcu_dereference_check_fdtable(files, fdt->fd[n]); in iterate_fd()
980 spin_unlock(&files->file_lock); in iterate_fd()