1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/file.c
4 *
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 *
7 * Manage the dynamic fd arrays in the process files_struct.
8 */
9
10 #include <linux/syscalls.h>
11 #include <linux/export.h>
12 #include <linux/fs.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/file.h>
18 #include <linux/fdtable.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/rcupdate.h>
22 #include <linux/close_range.h>
23 #include <net/sock.h>
24 #include <linux/init_task.h>
25
26 #include "internal.h"
27
28 unsigned int sysctl_nr_open __read_mostly = 1024*1024;
29 unsigned int sysctl_nr_open_min = BITS_PER_LONG;
30 /* our min() is unusable in constant expressions ;-/ */
31 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
32 unsigned int sysctl_nr_open_max =
33 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
34
__free_fdtable(struct fdtable * fdt)35 static void __free_fdtable(struct fdtable *fdt)
36 {
37 kvfree(fdt->fd);
38 kvfree(fdt->open_fds);
39 kfree(fdt);
40 }
41
free_fdtable_rcu(struct rcu_head * rcu)42 static void free_fdtable_rcu(struct rcu_head *rcu)
43 {
44 __free_fdtable(container_of(rcu, struct fdtable, rcu));
45 }
46
47 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
48 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
49
50 #define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds
51 /*
52 * Copy 'count' fd bits from the old table to the new table and clear the extra
53 * space if any. This does not copy the file pointers. Called with the files
54 * spinlock held for write.
55 */
copy_fd_bitmaps(struct fdtable * nfdt,struct fdtable * ofdt,unsigned int copy_words)56 static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
57 unsigned int copy_words)
58 {
59 unsigned int nwords = fdt_words(nfdt);
60
61 bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds,
62 copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
63 bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec,
64 copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
65 bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits,
66 copy_words, nwords);
67 }
68
69 /*
70 * Copy all file descriptors from the old table to the new, expanded table and
71 * clear the extra space. Called with the files spinlock held for write.
72 */
copy_fdtable(struct fdtable * nfdt,struct fdtable * ofdt)73 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
74 {
75 size_t cpy, set;
76
77 BUG_ON(nfdt->max_fds < ofdt->max_fds);
78
79 cpy = ofdt->max_fds * sizeof(struct file *);
80 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
81 memcpy(nfdt->fd, ofdt->fd, cpy);
82 memset((char *)nfdt->fd + cpy, 0, set);
83
84 copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt));
85 }
86
87 /*
88 * Note how the fdtable bitmap allocations very much have to be a multiple of
89 * BITS_PER_LONG. This is not only because we walk those things in chunks of
90 * 'unsigned long' in some places, but simply because that is how the Linux
91 * kernel bitmaps are defined to work: they are not "bits in an array of bytes",
92 * they are very much "bits in an array of unsigned long".
93 */
alloc_fdtable(unsigned int slots_wanted)94 static struct fdtable *alloc_fdtable(unsigned int slots_wanted)
95 {
96 struct fdtable *fdt;
97 unsigned int nr;
98 void *data;
99
100 /*
101 * Figure out how many fds we actually want to support in this fdtable.
102 * Allocation steps are keyed to the size of the fdarray, since it
103 * grows far faster than any of the other dynamic data. We try to fit
104 * the fdarray into comfortable page-tuned chunks: starting at 1024B
105 * and growing in powers of two from there on. Since we called only
106 * with slots_wanted > BITS_PER_LONG (embedded instance in files->fdtab
107 * already gives BITS_PER_LONG slots), the above boils down to
108 * 1. use the smallest power of two large enough to give us that many
109 * slots.
110 * 2. on 32bit skip 64 and 128 - the minimal capacity we want there is
111 * 256 slots (i.e. 1Kb fd array).
112 * 3. on 64bit don't skip anything, 1Kb fd array means 128 slots there
113 * and we are never going to be asked for 64 or less.
114 */
115 if (IS_ENABLED(CONFIG_32BIT) && slots_wanted < 256)
116 nr = 256;
117 else
118 nr = roundup_pow_of_two(slots_wanted);
119 /*
120 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
121 * had been set lower between the check in expand_files() and here.
122 *
123 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
124 * bitmaps handling below becomes unpleasant, to put it mildly...
125 */
126 if (unlikely(nr > sysctl_nr_open)) {
127 nr = round_down(sysctl_nr_open, BITS_PER_LONG);
128 if (nr < slots_wanted)
129 return ERR_PTR(-EMFILE);
130 }
131
132 /*
133 * Check if the allocation size would exceed INT_MAX. kvmalloc_array()
134 * and kvmalloc() will warn if the allocation size is greater than
135 * INT_MAX, as filp_cachep objects are not __GFP_NOWARN.
136 *
137 * This can happen when sysctl_nr_open is set to a very high value and
138 * a process tries to use a file descriptor near that limit. For example,
139 * if sysctl_nr_open is set to 1073741816 (0x3ffffff8) - which is what
140 * systemd typically sets it to - then trying to use a file descriptor
141 * close to that value will require allocating a file descriptor table
142 * that exceeds 8GB in size.
143 */
144 if (unlikely(nr > INT_MAX / sizeof(struct file *)))
145 return ERR_PTR(-EMFILE);
146
147 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
148 if (!fdt)
149 goto out;
150 fdt->max_fds = nr;
151 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
152 if (!data)
153 goto out_fdt;
154 fdt->fd = data;
155
156 data = kvmalloc(max_t(size_t,
157 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
158 GFP_KERNEL_ACCOUNT);
159 if (!data)
160 goto out_arr;
161 fdt->open_fds = data;
162 data += nr / BITS_PER_BYTE;
163 fdt->close_on_exec = data;
164 data += nr / BITS_PER_BYTE;
165 fdt->full_fds_bits = data;
166
167 return fdt;
168
169 out_arr:
170 kvfree(fdt->fd);
171 out_fdt:
172 kfree(fdt);
173 out:
174 return ERR_PTR(-ENOMEM);
175 }
176
177 /*
178 * Expand the file descriptor table.
179 * This function will allocate a new fdtable and both fd array and fdset, of
180 * the given size.
181 * Return <0 error code on error; 1 on successful completion.
182 * The files->file_lock should be held on entry, and will be held on exit.
183 */
expand_fdtable(struct files_struct * files,unsigned int nr)184 static int expand_fdtable(struct files_struct *files, unsigned int nr)
185 __releases(files->file_lock)
186 __acquires(files->file_lock)
187 {
188 struct fdtable *new_fdt, *cur_fdt;
189
190 spin_unlock(&files->file_lock);
191 new_fdt = alloc_fdtable(nr + 1);
192
193 /* make sure all fd_install() have seen resize_in_progress
194 * or have finished their rcu_read_lock_sched() section.
195 */
196 if (atomic_read(&files->count) > 1)
197 synchronize_rcu();
198
199 spin_lock(&files->file_lock);
200 if (IS_ERR(new_fdt))
201 return PTR_ERR(new_fdt);
202 cur_fdt = files_fdtable(files);
203 BUG_ON(nr < cur_fdt->max_fds);
204 copy_fdtable(new_fdt, cur_fdt);
205 rcu_assign_pointer(files->fdt, new_fdt);
206 if (cur_fdt != &files->fdtab)
207 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
208 /* coupled with smp_rmb() in fd_install() */
209 smp_wmb();
210 return 1;
211 }
212
213 /*
214 * Expand files.
215 * This function will expand the file structures, if the requested size exceeds
216 * the current capacity and there is room for expansion.
217 * Return <0 error code on error; 0 when nothing done; 1 when files were
218 * expanded and execution may have blocked.
219 * The files->file_lock should be held on entry, and will be held on exit.
220 */
expand_files(struct files_struct * files,unsigned int nr)221 static int expand_files(struct files_struct *files, unsigned int nr)
222 __releases(files->file_lock)
223 __acquires(files->file_lock)
224 {
225 struct fdtable *fdt;
226 int expanded = 0;
227
228 repeat:
229 fdt = files_fdtable(files);
230
231 /* Do we need to expand? */
232 if (nr < fdt->max_fds)
233 return expanded;
234
235 /* Can we expand? */
236 if (nr >= sysctl_nr_open)
237 return -EMFILE;
238
239 if (unlikely(files->resize_in_progress)) {
240 spin_unlock(&files->file_lock);
241 expanded = 1;
242 wait_event(files->resize_wait, !files->resize_in_progress);
243 spin_lock(&files->file_lock);
244 goto repeat;
245 }
246
247 /* All good, so we try */
248 files->resize_in_progress = true;
249 expanded = expand_fdtable(files, nr);
250 files->resize_in_progress = false;
251
252 wake_up_all(&files->resize_wait);
253 return expanded;
254 }
255
__set_close_on_exec(unsigned int fd,struct fdtable * fdt)256 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
257 {
258 __set_bit(fd, fdt->close_on_exec);
259 }
260
__clear_close_on_exec(unsigned int fd,struct fdtable * fdt)261 static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
262 {
263 if (test_bit(fd, fdt->close_on_exec))
264 __clear_bit(fd, fdt->close_on_exec);
265 }
266
__set_open_fd(unsigned int fd,struct fdtable * fdt)267 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
268 {
269 __set_bit(fd, fdt->open_fds);
270 fd /= BITS_PER_LONG;
271 if (!~fdt->open_fds[fd])
272 __set_bit(fd, fdt->full_fds_bits);
273 }
274
__clear_open_fd(unsigned int fd,struct fdtable * fdt)275 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
276 {
277 __clear_bit(fd, fdt->open_fds);
278 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
279 }
280
fd_is_open(unsigned int fd,const struct fdtable * fdt)281 static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt)
282 {
283 return test_bit(fd, fdt->open_fds);
284 }
285
286 /*
287 * Note that a sane fdtable size always has to be a multiple of
288 * BITS_PER_LONG, since we have bitmaps that are sized by this.
289 *
290 * punch_hole is optional - when close_range() is asked to unshare
291 * and close, we don't need to copy descriptors in that range, so
292 * a smaller cloned descriptor table might suffice if the last
293 * currently opened descriptor falls into that range.
294 */
sane_fdtable_size(struct fdtable * fdt,struct fd_range * punch_hole)295 static unsigned int sane_fdtable_size(struct fdtable *fdt, struct fd_range *punch_hole)
296 {
297 unsigned int last = find_last_bit(fdt->open_fds, fdt->max_fds);
298
299 if (last == fdt->max_fds)
300 return NR_OPEN_DEFAULT;
301 if (punch_hole && punch_hole->to >= last && punch_hole->from <= last) {
302 last = find_last_bit(fdt->open_fds, punch_hole->from);
303 if (last == punch_hole->from)
304 return NR_OPEN_DEFAULT;
305 }
306 return ALIGN(last + 1, BITS_PER_LONG);
307 }
308
309 /*
310 * Allocate a new descriptor table and copy contents from the passed in
311 * instance. Returns a pointer to cloned table on success, ERR_PTR()
312 * on failure. For 'punch_hole' see sane_fdtable_size().
313 */
dup_fd(struct files_struct * oldf,struct fd_range * punch_hole)314 struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_hole)
315 {
316 struct files_struct *newf;
317 struct file **old_fds, **new_fds;
318 unsigned int open_files, i;
319 struct fdtable *old_fdt, *new_fdt;
320
321 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
322 if (!newf)
323 return ERR_PTR(-ENOMEM);
324
325 atomic_set(&newf->count, 1);
326
327 spin_lock_init(&newf->file_lock);
328 newf->resize_in_progress = false;
329 init_waitqueue_head(&newf->resize_wait);
330 newf->next_fd = 0;
331 new_fdt = &newf->fdtab;
332 new_fdt->max_fds = NR_OPEN_DEFAULT;
333 new_fdt->close_on_exec = newf->close_on_exec_init;
334 new_fdt->open_fds = newf->open_fds_init;
335 new_fdt->full_fds_bits = newf->full_fds_bits_init;
336 new_fdt->fd = &newf->fd_array[0];
337
338 spin_lock(&oldf->file_lock);
339 old_fdt = files_fdtable(oldf);
340 open_files = sane_fdtable_size(old_fdt, punch_hole);
341
342 /*
343 * Check whether we need to allocate a larger fd array and fd set.
344 */
345 while (unlikely(open_files > new_fdt->max_fds)) {
346 spin_unlock(&oldf->file_lock);
347
348 if (new_fdt != &newf->fdtab)
349 __free_fdtable(new_fdt);
350
351 new_fdt = alloc_fdtable(open_files);
352 if (IS_ERR(new_fdt)) {
353 kmem_cache_free(files_cachep, newf);
354 return ERR_CAST(new_fdt);
355 }
356
357 /*
358 * Reacquire the oldf lock and a pointer to its fd table
359 * who knows it may have a new bigger fd table. We need
360 * the latest pointer.
361 */
362 spin_lock(&oldf->file_lock);
363 old_fdt = files_fdtable(oldf);
364 open_files = sane_fdtable_size(old_fdt, punch_hole);
365 }
366
367 copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG);
368
369 old_fds = old_fdt->fd;
370 new_fds = new_fdt->fd;
371
372 /*
373 * We may be racing against fd allocation from other threads using this
374 * files_struct, despite holding ->file_lock.
375 *
376 * alloc_fd() might have already claimed a slot, while fd_install()
377 * did not populate it yet. Note the latter operates locklessly, so
378 * the file can show up as we are walking the array below.
379 *
380 * At the same time we know no files will disappear as all other
381 * operations take the lock.
382 *
383 * Instead of trying to placate userspace racing with itself, we
384 * ref the file if we see it and mark the fd slot as unused otherwise.
385 */
386 for (i = open_files; i != 0; i--) {
387 struct file *f = rcu_dereference_raw(*old_fds++);
388 if (f) {
389 get_file(f);
390 } else {
391 __clear_open_fd(open_files - i, new_fdt);
392 }
393 rcu_assign_pointer(*new_fds++, f);
394 }
395 spin_unlock(&oldf->file_lock);
396
397 /* clear the remainder */
398 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
399
400 rcu_assign_pointer(newf->fdt, new_fdt);
401
402 return newf;
403 }
404
close_files(struct files_struct * files)405 static struct fdtable *close_files(struct files_struct * files)
406 {
407 /*
408 * It is safe to dereference the fd table without RCU or
409 * ->file_lock because this is the last reference to the
410 * files structure.
411 */
412 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
413 unsigned int i, j = 0;
414
415 for (;;) {
416 unsigned long set;
417 i = j * BITS_PER_LONG;
418 if (i >= fdt->max_fds)
419 break;
420 set = fdt->open_fds[j++];
421 while (set) {
422 if (set & 1) {
423 struct file * file = xchg(&fdt->fd[i], NULL);
424 if (file) {
425 filp_close(file, files);
426 cond_resched();
427 }
428 }
429 i++;
430 set >>= 1;
431 }
432 }
433
434 return fdt;
435 }
436
put_files_struct(struct files_struct * files)437 void put_files_struct(struct files_struct *files)
438 {
439 if (atomic_dec_and_test(&files->count)) {
440 struct fdtable *fdt = close_files(files);
441
442 /* free the arrays if they are not embedded */
443 if (fdt != &files->fdtab)
444 __free_fdtable(fdt);
445 kmem_cache_free(files_cachep, files);
446 }
447 }
448
exit_files(struct task_struct * tsk)449 void exit_files(struct task_struct *tsk)
450 {
451 struct files_struct * files = tsk->files;
452
453 if (files) {
454 task_lock(tsk);
455 tsk->files = NULL;
456 task_unlock(tsk);
457 put_files_struct(files);
458 }
459 }
460
461 struct files_struct init_files = {
462 .count = ATOMIC_INIT(1),
463 .fdt = &init_files.fdtab,
464 .fdtab = {
465 .max_fds = NR_OPEN_DEFAULT,
466 .fd = &init_files.fd_array[0],
467 .close_on_exec = init_files.close_on_exec_init,
468 .open_fds = init_files.open_fds_init,
469 .full_fds_bits = init_files.full_fds_bits_init,
470 },
471 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
472 .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
473 };
474
find_next_fd(struct fdtable * fdt,unsigned int start)475 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
476 {
477 unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */
478 unsigned int maxbit = maxfd / BITS_PER_LONG;
479 unsigned int bitbit = start / BITS_PER_LONG;
480
481 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
482 if (bitbit >= maxfd)
483 return maxfd;
484 if (bitbit > start)
485 start = bitbit;
486 return find_next_zero_bit(fdt->open_fds, maxfd, start);
487 }
488
489 /*
490 * allocate a file descriptor, mark it busy.
491 */
alloc_fd(unsigned start,unsigned end,unsigned flags)492 static int alloc_fd(unsigned start, unsigned end, unsigned flags)
493 {
494 struct files_struct *files = current->files;
495 unsigned int fd;
496 int error;
497 struct fdtable *fdt;
498
499 spin_lock(&files->file_lock);
500 repeat:
501 fdt = files_fdtable(files);
502 fd = start;
503 if (fd < files->next_fd)
504 fd = files->next_fd;
505
506 if (fd < fdt->max_fds)
507 fd = find_next_fd(fdt, fd);
508
509 /*
510 * N.B. For clone tasks sharing a files structure, this test
511 * will limit the total number of files that can be opened.
512 */
513 error = -EMFILE;
514 if (fd >= end)
515 goto out;
516
517 error = expand_files(files, fd);
518 if (error < 0)
519 goto out;
520
521 /*
522 * If we needed to expand the fs array we
523 * might have blocked - try again.
524 */
525 if (error)
526 goto repeat;
527
528 if (start <= files->next_fd)
529 files->next_fd = fd + 1;
530
531 __set_open_fd(fd, fdt);
532 if (flags & O_CLOEXEC)
533 __set_close_on_exec(fd, fdt);
534 else
535 __clear_close_on_exec(fd, fdt);
536 error = fd;
537 #if 1
538 /* Sanity check */
539 if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
540 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
541 rcu_assign_pointer(fdt->fd[fd], NULL);
542 }
543 #endif
544
545 out:
546 spin_unlock(&files->file_lock);
547 return error;
548 }
549
__get_unused_fd_flags(unsigned flags,unsigned long nofile)550 int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
551 {
552 return alloc_fd(0, nofile, flags);
553 }
554
get_unused_fd_flags(unsigned flags)555 int get_unused_fd_flags(unsigned flags)
556 {
557 return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
558 }
559 EXPORT_SYMBOL(get_unused_fd_flags);
560
__put_unused_fd(struct files_struct * files,unsigned int fd)561 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
562 {
563 struct fdtable *fdt = files_fdtable(files);
564 __clear_open_fd(fd, fdt);
565 if (fd < files->next_fd)
566 files->next_fd = fd;
567 }
568
put_unused_fd(unsigned int fd)569 void put_unused_fd(unsigned int fd)
570 {
571 struct files_struct *files = current->files;
572 spin_lock(&files->file_lock);
573 __put_unused_fd(files, fd);
574 spin_unlock(&files->file_lock);
575 }
576
577 EXPORT_SYMBOL(put_unused_fd);
578
579 /*
580 * Install a file pointer in the fd array.
581 *
582 * The VFS is full of places where we drop the files lock between
583 * setting the open_fds bitmap and installing the file in the file
584 * array. At any such point, we are vulnerable to a dup2() race
585 * installing a file in the array before us. We need to detect this and
586 * fput() the struct file we are about to overwrite in this case.
587 *
588 * It should never happen - if we allow dup2() do it, _really_ bad things
589 * will follow.
590 *
591 * This consumes the "file" refcount, so callers should treat it
592 * as if they had called fput(file).
593 */
594
fd_install(unsigned int fd,struct file * file)595 void fd_install(unsigned int fd, struct file *file)
596 {
597 struct files_struct *files = current->files;
598 struct fdtable *fdt;
599
600 if (WARN_ON_ONCE(unlikely(file->f_mode & FMODE_BACKING)))
601 return;
602
603 rcu_read_lock_sched();
604
605 if (unlikely(files->resize_in_progress)) {
606 rcu_read_unlock_sched();
607 spin_lock(&files->file_lock);
608 fdt = files_fdtable(files);
609 BUG_ON(fdt->fd[fd] != NULL);
610 rcu_assign_pointer(fdt->fd[fd], file);
611 spin_unlock(&files->file_lock);
612 return;
613 }
614 /* coupled with smp_wmb() in expand_fdtable() */
615 smp_rmb();
616 fdt = rcu_dereference_sched(files->fdt);
617 BUG_ON(fdt->fd[fd] != NULL);
618 rcu_assign_pointer(fdt->fd[fd], file);
619 rcu_read_unlock_sched();
620 }
621
622 EXPORT_SYMBOL(fd_install);
623
624 /**
625 * file_close_fd_locked - return file associated with fd
626 * @files: file struct to retrieve file from
627 * @fd: file descriptor to retrieve file for
628 *
629 * Doesn't take a separate reference count.
630 *
631 * Context: files_lock must be held.
632 *
633 * Returns: The file associated with @fd (NULL if @fd is not open)
634 */
file_close_fd_locked(struct files_struct * files,unsigned fd)635 struct file *file_close_fd_locked(struct files_struct *files, unsigned fd)
636 {
637 struct fdtable *fdt = files_fdtable(files);
638 struct file *file;
639
640 lockdep_assert_held(&files->file_lock);
641
642 if (fd >= fdt->max_fds)
643 return NULL;
644
645 fd = array_index_nospec(fd, fdt->max_fds);
646 file = rcu_dereference_raw(fdt->fd[fd]);
647 if (file) {
648 rcu_assign_pointer(fdt->fd[fd], NULL);
649 __put_unused_fd(files, fd);
650 }
651 return file;
652 }
653
close_fd(unsigned fd)654 int close_fd(unsigned fd)
655 {
656 struct files_struct *files = current->files;
657 struct file *file;
658
659 spin_lock(&files->file_lock);
660 file = file_close_fd_locked(files, fd);
661 spin_unlock(&files->file_lock);
662 if (!file)
663 return -EBADF;
664
665 return filp_close(file, files);
666 }
667 EXPORT_SYMBOL(close_fd);
668
669 /**
670 * last_fd - return last valid index into fd table
671 * @fdt: File descriptor table.
672 *
673 * Context: Either rcu read lock or files_lock must be held.
674 *
675 * Returns: Last valid index into fdtable.
676 */
last_fd(struct fdtable * fdt)677 static inline unsigned last_fd(struct fdtable *fdt)
678 {
679 return fdt->max_fds - 1;
680 }
681
__range_cloexec(struct files_struct * cur_fds,unsigned int fd,unsigned int max_fd)682 static inline void __range_cloexec(struct files_struct *cur_fds,
683 unsigned int fd, unsigned int max_fd)
684 {
685 struct fdtable *fdt;
686
687 /* make sure we're using the correct maximum value */
688 spin_lock(&cur_fds->file_lock);
689 fdt = files_fdtable(cur_fds);
690 max_fd = min(last_fd(fdt), max_fd);
691 if (fd <= max_fd)
692 bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
693 spin_unlock(&cur_fds->file_lock);
694 }
695
__range_close(struct files_struct * files,unsigned int fd,unsigned int max_fd)696 static inline void __range_close(struct files_struct *files, unsigned int fd,
697 unsigned int max_fd)
698 {
699 struct file *file;
700 unsigned n;
701
702 spin_lock(&files->file_lock);
703 n = last_fd(files_fdtable(files));
704 max_fd = min(max_fd, n);
705
706 for (; fd <= max_fd; fd++) {
707 file = file_close_fd_locked(files, fd);
708 if (file) {
709 spin_unlock(&files->file_lock);
710 filp_close(file, files);
711 cond_resched();
712 spin_lock(&files->file_lock);
713 } else if (need_resched()) {
714 spin_unlock(&files->file_lock);
715 cond_resched();
716 spin_lock(&files->file_lock);
717 }
718 }
719 spin_unlock(&files->file_lock);
720 }
721
722 /**
723 * __close_range() - Close all file descriptors in a given range.
724 *
725 * @fd: starting file descriptor to close
726 * @max_fd: last file descriptor to close
727 * @flags: CLOSE_RANGE flags.
728 *
729 * This closes a range of file descriptors. All file descriptors
730 * from @fd up to and including @max_fd are closed.
731 */
__close_range(unsigned fd,unsigned max_fd,unsigned int flags)732 int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
733 {
734 struct task_struct *me = current;
735 struct files_struct *cur_fds = me->files, *fds = NULL;
736
737 if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC))
738 return -EINVAL;
739
740 if (fd > max_fd)
741 return -EINVAL;
742
743 if ((flags & CLOSE_RANGE_UNSHARE) && atomic_read(&cur_fds->count) > 1) {
744 struct fd_range range = {fd, max_fd}, *punch_hole = ⦥
745
746 /*
747 * If the caller requested all fds to be made cloexec we always
748 * copy all of the file descriptors since they still want to
749 * use them.
750 */
751 if (flags & CLOSE_RANGE_CLOEXEC)
752 punch_hole = NULL;
753
754 fds = dup_fd(cur_fds, punch_hole);
755 if (IS_ERR(fds))
756 return PTR_ERR(fds);
757 /*
758 * We used to share our file descriptor table, and have now
759 * created a private one, make sure we're using it below.
760 */
761 swap(cur_fds, fds);
762 }
763
764 if (flags & CLOSE_RANGE_CLOEXEC)
765 __range_cloexec(cur_fds, fd, max_fd);
766 else
767 __range_close(cur_fds, fd, max_fd);
768
769 if (fds) {
770 /*
771 * We're done closing the files we were supposed to. Time to install
772 * the new file descriptor table and drop the old one.
773 */
774 task_lock(me);
775 me->files = cur_fds;
776 task_unlock(me);
777 put_files_struct(fds);
778 }
779
780 return 0;
781 }
782
783 /**
784 * file_close_fd - return file associated with fd
785 * @fd: file descriptor to retrieve file for
786 *
787 * Doesn't take a separate reference count.
788 *
789 * Returns: The file associated with @fd (NULL if @fd is not open)
790 */
file_close_fd(unsigned int fd)791 struct file *file_close_fd(unsigned int fd)
792 {
793 struct files_struct *files = current->files;
794 struct file *file;
795
796 spin_lock(&files->file_lock);
797 file = file_close_fd_locked(files, fd);
798 spin_unlock(&files->file_lock);
799
800 return file;
801 }
802 EXPORT_SYMBOL_GPL(file_close_fd);
803
do_close_on_exec(struct files_struct * files)804 void do_close_on_exec(struct files_struct *files)
805 {
806 unsigned i;
807 struct fdtable *fdt;
808
809 /* exec unshares first */
810 spin_lock(&files->file_lock);
811 for (i = 0; ; i++) {
812 unsigned long set;
813 unsigned fd = i * BITS_PER_LONG;
814 fdt = files_fdtable(files);
815 if (fd >= fdt->max_fds)
816 break;
817 set = fdt->close_on_exec[i];
818 if (!set)
819 continue;
820 fdt->close_on_exec[i] = 0;
821 for ( ; set ; fd++, set >>= 1) {
822 struct file *file;
823 if (!(set & 1))
824 continue;
825 file = fdt->fd[fd];
826 if (!file)
827 continue;
828 rcu_assign_pointer(fdt->fd[fd], NULL);
829 __put_unused_fd(files, fd);
830 spin_unlock(&files->file_lock);
831 filp_close(file, files);
832 cond_resched();
833 spin_lock(&files->file_lock);
834 }
835
836 }
837 spin_unlock(&files->file_lock);
838 }
839
__get_file_rcu(struct file __rcu ** f)840 static struct file *__get_file_rcu(struct file __rcu **f)
841 {
842 struct file __rcu *file;
843 struct file __rcu *file_reloaded;
844 struct file __rcu *file_reloaded_cmp;
845
846 file = rcu_dereference_raw(*f);
847 if (!file)
848 return NULL;
849
850 if (unlikely(!atomic_long_inc_not_zero(&file->f_count)))
851 return ERR_PTR(-EAGAIN);
852
853 file_reloaded = rcu_dereference_raw(*f);
854
855 /*
856 * Ensure that all accesses have a dependency on the load from
857 * rcu_dereference_raw() above so we get correct ordering
858 * between reuse/allocation and the pointer check below.
859 */
860 file_reloaded_cmp = file_reloaded;
861 OPTIMIZER_HIDE_VAR(file_reloaded_cmp);
862
863 /*
864 * atomic_long_inc_not_zero() above provided a full memory
865 * barrier when we acquired a reference.
866 *
867 * This is paired with the write barrier from assigning to the
868 * __rcu protected file pointer so that if that pointer still
869 * matches the current file, we know we have successfully
870 * acquired a reference to the right file.
871 *
872 * If the pointers don't match the file has been reallocated by
873 * SLAB_TYPESAFE_BY_RCU.
874 */
875 if (file == file_reloaded_cmp)
876 return file_reloaded;
877
878 fput(file);
879 return ERR_PTR(-EAGAIN);
880 }
881
882 /**
883 * get_file_rcu - try go get a reference to a file under rcu
884 * @f: the file to get a reference on
885 *
886 * This function tries to get a reference on @f carefully verifying that
887 * @f hasn't been reused.
888 *
889 * This function should rarely have to be used and only by users who
890 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
891 *
892 * Return: Returns @f with the reference count increased or NULL.
893 */
get_file_rcu(struct file __rcu ** f)894 struct file *get_file_rcu(struct file __rcu **f)
895 {
896 for (;;) {
897 struct file __rcu *file;
898
899 file = __get_file_rcu(f);
900 if (!IS_ERR(file))
901 return file;
902 }
903 }
904 EXPORT_SYMBOL_GPL(get_file_rcu);
905
906 /**
907 * get_file_active - try go get a reference to a file
908 * @f: the file to get a reference on
909 *
910 * In contast to get_file_rcu() the pointer itself isn't part of the
911 * reference counting.
912 *
913 * This function should rarely have to be used and only by users who
914 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
915 *
916 * Return: Returns @f with the reference count increased or NULL.
917 */
get_file_active(struct file ** f)918 struct file *get_file_active(struct file **f)
919 {
920 struct file __rcu *file;
921
922 rcu_read_lock();
923 file = __get_file_rcu(f);
924 rcu_read_unlock();
925 if (IS_ERR(file))
926 file = NULL;
927 return file;
928 }
929 EXPORT_SYMBOL_GPL(get_file_active);
930
__fget_files_rcu(struct files_struct * files,unsigned int fd,fmode_t mask)931 static inline struct file *__fget_files_rcu(struct files_struct *files,
932 unsigned int fd, fmode_t mask)
933 {
934 for (;;) {
935 struct file *file;
936 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
937 struct file __rcu **fdentry;
938 unsigned long nospec_mask;
939
940 /* Mask is a 0 for invalid fd's, ~0 for valid ones */
941 nospec_mask = array_index_mask_nospec(fd, fdt->max_fds);
942
943 /*
944 * fdentry points to the 'fd' offset, or fdt->fd[0].
945 * Loading from fdt->fd[0] is always safe, because the
946 * array always exists.
947 */
948 fdentry = fdt->fd + (fd & nospec_mask);
949
950 /* Do the load, then mask any invalid result */
951 file = rcu_dereference_raw(*fdentry);
952 file = (void *)(nospec_mask & (unsigned long)file);
953 if (unlikely(!file))
954 return NULL;
955
956 /*
957 * Ok, we have a file pointer that was valid at
958 * some point, but it might have become stale since.
959 *
960 * We need to confirm it by incrementing the refcount
961 * and then check the lookup again.
962 *
963 * atomic_long_inc_not_zero() gives us a full memory
964 * barrier. We only really need an 'acquire' one to
965 * protect the loads below, but we don't have that.
966 */
967 if (unlikely(!atomic_long_inc_not_zero(&file->f_count)))
968 continue;
969
970 /*
971 * Such a race can take two forms:
972 *
973 * (a) the file ref already went down to zero and the
974 * file hasn't been reused yet or the file count
975 * isn't zero but the file has already been reused.
976 *
977 * (b) the file table entry has changed under us.
978 * Note that we don't need to re-check the 'fdt->fd'
979 * pointer having changed, because it always goes
980 * hand-in-hand with 'fdt'.
981 *
982 * If so, we need to put our ref and try again.
983 */
984 if (unlikely(file != rcu_dereference_raw(*fdentry)) ||
985 unlikely(rcu_dereference_raw(files->fdt) != fdt)) {
986 fput(file);
987 continue;
988 }
989
990 /*
991 * This isn't the file we're looking for or we're not
992 * allowed to get a reference to it.
993 */
994 if (unlikely(file->f_mode & mask)) {
995 fput(file);
996 return NULL;
997 }
998
999 /*
1000 * Ok, we have a ref to the file, and checked that it
1001 * still exists.
1002 */
1003 return file;
1004 }
1005 }
1006
__fget_files(struct files_struct * files,unsigned int fd,fmode_t mask)1007 static struct file *__fget_files(struct files_struct *files, unsigned int fd,
1008 fmode_t mask)
1009 {
1010 struct file *file;
1011
1012 rcu_read_lock();
1013 file = __fget_files_rcu(files, fd, mask);
1014 rcu_read_unlock();
1015
1016 return file;
1017 }
1018
__fget(unsigned int fd,fmode_t mask)1019 static inline struct file *__fget(unsigned int fd, fmode_t mask)
1020 {
1021 return __fget_files(current->files, fd, mask);
1022 }
1023
fget(unsigned int fd)1024 struct file *fget(unsigned int fd)
1025 {
1026 return __fget(fd, FMODE_PATH);
1027 }
1028 EXPORT_SYMBOL(fget);
1029
fget_raw(unsigned int fd)1030 struct file *fget_raw(unsigned int fd)
1031 {
1032 return __fget(fd, 0);
1033 }
1034 EXPORT_SYMBOL(fget_raw);
1035
fget_task(struct task_struct * task,unsigned int fd)1036 struct file *fget_task(struct task_struct *task, unsigned int fd)
1037 {
1038 struct file *file = NULL;
1039
1040 task_lock(task);
1041 if (task->files)
1042 file = __fget_files(task->files, fd, 0);
1043 task_unlock(task);
1044
1045 return file;
1046 }
1047
lookup_fdget_rcu(unsigned int fd)1048 struct file *lookup_fdget_rcu(unsigned int fd)
1049 {
1050 return __fget_files_rcu(current->files, fd, 0);
1051
1052 }
1053 EXPORT_SYMBOL_GPL(lookup_fdget_rcu);
1054
task_lookup_fdget_rcu(struct task_struct * task,unsigned int fd)1055 struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd)
1056 {
1057 /* Must be called with rcu_read_lock held */
1058 struct files_struct *files;
1059 struct file *file = NULL;
1060
1061 task_lock(task);
1062 files = task->files;
1063 if (files)
1064 file = __fget_files_rcu(files, fd, 0);
1065 task_unlock(task);
1066
1067 return file;
1068 }
1069
task_lookup_next_fdget_rcu(struct task_struct * task,unsigned int * ret_fd)1070 struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *ret_fd)
1071 {
1072 /* Must be called with rcu_read_lock held */
1073 struct files_struct *files;
1074 unsigned int fd = *ret_fd;
1075 struct file *file = NULL;
1076
1077 task_lock(task);
1078 files = task->files;
1079 if (files) {
1080 for (; fd < files_fdtable(files)->max_fds; fd++) {
1081 file = __fget_files_rcu(files, fd, 0);
1082 if (file)
1083 break;
1084 }
1085 }
1086 task_unlock(task);
1087 *ret_fd = fd;
1088 return file;
1089 }
1090 EXPORT_SYMBOL(task_lookup_next_fdget_rcu);
1091
1092 /*
1093 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
1094 *
1095 * You can use this instead of fget if you satisfy all of the following
1096 * conditions:
1097 * 1) You must call fput_light before exiting the syscall and returning control
1098 * to userspace (i.e. you cannot remember the returned struct file * after
1099 * returning to userspace).
1100 * 2) You must not call filp_close on the returned struct file * in between
1101 * calls to fget_light and fput_light.
1102 * 3) You must not clone the current task in between the calls to fget_light
1103 * and fput_light.
1104 *
1105 * The fput_needed flag returned by fget_light should be passed to the
1106 * corresponding fput_light.
1107 *
1108 * (As an exception to rule 2, you can call filp_close between fget_light and
1109 * fput_light provided that you capture a real refcount with get_file before
1110 * the call to filp_close, and ensure that this real refcount is fput *after*
1111 * the fput_light call.)
1112 *
1113 * See also the documentation in rust/kernel/file.rs.
1114 */
__fget_light(unsigned int fd,fmode_t mask)1115 static inline struct fd __fget_light(unsigned int fd, fmode_t mask)
1116 {
1117 struct files_struct *files = current->files;
1118 struct file *file;
1119
1120 /*
1121 * If another thread is concurrently calling close_fd() followed
1122 * by put_files_struct(), we must not observe the old table
1123 * entry combined with the new refcount - otherwise we could
1124 * return a file that is concurrently being freed.
1125 *
1126 * atomic_read_acquire() pairs with atomic_dec_and_test() in
1127 * put_files_struct().
1128 */
1129 if (likely(atomic_read_acquire(&files->count) == 1)) {
1130 file = files_lookup_fd_raw(files, fd);
1131 if (!file || unlikely(file->f_mode & mask))
1132 return EMPTY_FD;
1133 return BORROWED_FD(file);
1134 } else {
1135 file = __fget_files(files, fd, mask);
1136 if (!file)
1137 return EMPTY_FD;
1138 return CLONED_FD(file);
1139 }
1140 }
fdget(unsigned int fd)1141 struct fd fdget(unsigned int fd)
1142 {
1143 return __fget_light(fd, FMODE_PATH);
1144 }
1145 EXPORT_SYMBOL(fdget);
1146
fdget_raw(unsigned int fd)1147 struct fd fdget_raw(unsigned int fd)
1148 {
1149 return __fget_light(fd, 0);
1150 }
1151
1152 /*
1153 * Try to avoid f_pos locking. We only need it if the
1154 * file is marked for FMODE_ATOMIC_POS, and it can be
1155 * accessed multiple ways.
1156 *
1157 * Always do it for directories, because pidfd_getfd()
1158 * can make a file accessible even if it otherwise would
1159 * not be, and for directories this is a correctness
1160 * issue, not a "POSIX requirement".
1161 */
file_needs_f_pos_lock(struct file * file)1162 static inline bool file_needs_f_pos_lock(struct file *file)
1163 {
1164 return (file->f_mode & FMODE_ATOMIC_POS) &&
1165 (file_count(file) > 1 || file->f_op->iterate_shared);
1166 }
1167
fdget_pos(unsigned int fd)1168 struct fd fdget_pos(unsigned int fd)
1169 {
1170 struct fd f = fdget(fd);
1171 struct file *file = fd_file(f);
1172
1173 if (file && file_needs_f_pos_lock(file)) {
1174 f.word |= FDPUT_POS_UNLOCK;
1175 mutex_lock(&file->f_pos_lock);
1176 }
1177 return f;
1178 }
1179
__f_unlock_pos(struct file * f)1180 void __f_unlock_pos(struct file *f)
1181 {
1182 mutex_unlock(&f->f_pos_lock);
1183 }
1184
1185 /*
1186 * We only lock f_pos if we have threads or if the file might be
1187 * shared with another process. In both cases we'll have an elevated
1188 * file count (done either by fdget() or by fork()).
1189 */
1190
set_close_on_exec(unsigned int fd,int flag)1191 void set_close_on_exec(unsigned int fd, int flag)
1192 {
1193 struct files_struct *files = current->files;
1194 struct fdtable *fdt;
1195 spin_lock(&files->file_lock);
1196 fdt = files_fdtable(files);
1197 if (flag)
1198 __set_close_on_exec(fd, fdt);
1199 else
1200 __clear_close_on_exec(fd, fdt);
1201 spin_unlock(&files->file_lock);
1202 }
1203
get_close_on_exec(unsigned int fd)1204 bool get_close_on_exec(unsigned int fd)
1205 {
1206 bool res;
1207 rcu_read_lock();
1208 res = close_on_exec(fd, current->files);
1209 rcu_read_unlock();
1210 return res;
1211 }
1212
do_dup2(struct files_struct * files,struct file * file,unsigned fd,unsigned flags)1213 static int do_dup2(struct files_struct *files,
1214 struct file *file, unsigned fd, unsigned flags)
1215 __releases(&files->file_lock)
1216 {
1217 struct file *tofree;
1218 struct fdtable *fdt;
1219
1220 /*
1221 * We need to detect attempts to do dup2() over allocated but still
1222 * not finished descriptor. NB: OpenBSD avoids that at the price of
1223 * extra work in their equivalent of fget() - they insert struct
1224 * file immediately after grabbing descriptor, mark it larval if
1225 * more work (e.g. actual opening) is needed and make sure that
1226 * fget() treats larval files as absent. Potentially interesting,
1227 * but while extra work in fget() is trivial, locking implications
1228 * and amount of surgery on open()-related paths in VFS are not.
1229 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
1230 * deadlocks in rather amusing ways, AFAICS. All of that is out of
1231 * scope of POSIX or SUS, since neither considers shared descriptor
1232 * tables and this condition does not arise without those.
1233 */
1234 fdt = files_fdtable(files);
1235 fd = array_index_nospec(fd, fdt->max_fds);
1236 tofree = rcu_dereference_raw(fdt->fd[fd]);
1237 if (!tofree && fd_is_open(fd, fdt))
1238 goto Ebusy;
1239 get_file(file);
1240 rcu_assign_pointer(fdt->fd[fd], file);
1241 __set_open_fd(fd, fdt);
1242 if (flags & O_CLOEXEC)
1243 __set_close_on_exec(fd, fdt);
1244 else
1245 __clear_close_on_exec(fd, fdt);
1246 spin_unlock(&files->file_lock);
1247
1248 if (tofree)
1249 filp_close(tofree, files);
1250
1251 return fd;
1252
1253 Ebusy:
1254 spin_unlock(&files->file_lock);
1255 return -EBUSY;
1256 }
1257
replace_fd(unsigned fd,struct file * file,unsigned flags)1258 int replace_fd(unsigned fd, struct file *file, unsigned flags)
1259 {
1260 int err;
1261 struct files_struct *files = current->files;
1262
1263 if (!file)
1264 return close_fd(fd);
1265
1266 if (fd >= rlimit(RLIMIT_NOFILE))
1267 return -EBADF;
1268
1269 spin_lock(&files->file_lock);
1270 err = expand_files(files, fd);
1271 if (unlikely(err < 0))
1272 goto out_unlock;
1273 return do_dup2(files, file, fd, flags);
1274
1275 out_unlock:
1276 spin_unlock(&files->file_lock);
1277 return err;
1278 }
1279
1280 /**
1281 * receive_fd() - Install received file into file descriptor table
1282 * @file: struct file that was received from another process
1283 * @ufd: __user pointer to write new fd number to
1284 * @o_flags: the O_* flags to apply to the new fd entry
1285 *
1286 * Installs a received file into the file descriptor table, with appropriate
1287 * checks and count updates. Optionally writes the fd number to userspace, if
1288 * @ufd is non-NULL.
1289 *
1290 * This helper handles its own reference counting of the incoming
1291 * struct file.
1292 *
1293 * Returns newly install fd or -ve on error.
1294 */
receive_fd(struct file * file,int __user * ufd,unsigned int o_flags)1295 int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
1296 {
1297 int new_fd;
1298 int error;
1299
1300 error = security_file_receive(file);
1301 if (error)
1302 return error;
1303
1304 new_fd = get_unused_fd_flags(o_flags);
1305 if (new_fd < 0)
1306 return new_fd;
1307
1308 if (ufd) {
1309 error = put_user(new_fd, ufd);
1310 if (error) {
1311 put_unused_fd(new_fd);
1312 return error;
1313 }
1314 }
1315
1316 fd_install(new_fd, get_file(file));
1317 __receive_sock(file);
1318 return new_fd;
1319 }
1320 EXPORT_SYMBOL_GPL(receive_fd);
1321
receive_fd_replace(int new_fd,struct file * file,unsigned int o_flags)1322 int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags)
1323 {
1324 int error;
1325
1326 error = security_file_receive(file);
1327 if (error)
1328 return error;
1329 error = replace_fd(new_fd, file, o_flags);
1330 if (error)
1331 return error;
1332 __receive_sock(file);
1333 return new_fd;
1334 }
1335
ksys_dup3(unsigned int oldfd,unsigned int newfd,int flags)1336 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
1337 {
1338 int err = -EBADF;
1339 struct file *file;
1340 struct files_struct *files = current->files;
1341
1342 if ((flags & ~O_CLOEXEC) != 0)
1343 return -EINVAL;
1344
1345 if (unlikely(oldfd == newfd))
1346 return -EINVAL;
1347
1348 if (newfd >= rlimit(RLIMIT_NOFILE))
1349 return -EBADF;
1350
1351 spin_lock(&files->file_lock);
1352 err = expand_files(files, newfd);
1353 file = files_lookup_fd_locked(files, oldfd);
1354 if (unlikely(!file))
1355 goto Ebadf;
1356 if (unlikely(err < 0)) {
1357 if (err == -EMFILE)
1358 goto Ebadf;
1359 goto out_unlock;
1360 }
1361 return do_dup2(files, file, newfd, flags);
1362
1363 Ebadf:
1364 err = -EBADF;
1365 out_unlock:
1366 spin_unlock(&files->file_lock);
1367 return err;
1368 }
1369
SYSCALL_DEFINE3(dup3,unsigned int,oldfd,unsigned int,newfd,int,flags)1370 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
1371 {
1372 return ksys_dup3(oldfd, newfd, flags);
1373 }
1374
SYSCALL_DEFINE2(dup2,unsigned int,oldfd,unsigned int,newfd)1375 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
1376 {
1377 if (unlikely(newfd == oldfd)) { /* corner case */
1378 struct files_struct *files = current->files;
1379 struct file *f;
1380 int retval = oldfd;
1381
1382 rcu_read_lock();
1383 f = __fget_files_rcu(files, oldfd, 0);
1384 if (!f)
1385 retval = -EBADF;
1386 rcu_read_unlock();
1387 if (f)
1388 fput(f);
1389 return retval;
1390 }
1391 return ksys_dup3(oldfd, newfd, 0);
1392 }
1393
SYSCALL_DEFINE1(dup,unsigned int,fildes)1394 SYSCALL_DEFINE1(dup, unsigned int, fildes)
1395 {
1396 int ret = -EBADF;
1397 struct file *file = fget_raw(fildes);
1398
1399 if (file) {
1400 ret = get_unused_fd_flags(0);
1401 if (ret >= 0)
1402 fd_install(ret, file);
1403 else
1404 fput(file);
1405 }
1406 return ret;
1407 }
1408
f_dupfd(unsigned int from,struct file * file,unsigned flags)1409 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
1410 {
1411 unsigned long nofile = rlimit(RLIMIT_NOFILE);
1412 int err;
1413 if (from >= nofile)
1414 return -EINVAL;
1415 err = alloc_fd(from, nofile, flags);
1416 if (err >= 0) {
1417 get_file(file);
1418 fd_install(err, file);
1419 }
1420 return err;
1421 }
1422
iterate_fd(struct files_struct * files,unsigned n,int (* f)(const void *,struct file *,unsigned),const void * p)1423 int iterate_fd(struct files_struct *files, unsigned n,
1424 int (*f)(const void *, struct file *, unsigned),
1425 const void *p)
1426 {
1427 struct fdtable *fdt;
1428 int res = 0;
1429 if (!files)
1430 return 0;
1431 spin_lock(&files->file_lock);
1432 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1433 struct file *file;
1434 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1435 if (!file)
1436 continue;
1437 res = f(p, file, n);
1438 if (res)
1439 break;
1440 }
1441 spin_unlock(&files->file_lock);
1442 return res;
1443 }
1444 EXPORT_SYMBOL(iterate_fd);
1445