• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/file.c
4  *
5  *  Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6  *
7  *  Manage the dynamic fd arrays in the process files_struct.
8  */
9 
10 #include <linux/syscalls.h>
11 #include <linux/export.h>
12 #include <linux/fs.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/file.h>
18 #include <linux/fdtable.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/rcupdate.h>
22 #include <linux/close_range.h>
23 #include <net/sock.h>
24 
25 #include "internal.h"
26 
27 unsigned int sysctl_nr_open __read_mostly = 1024*1024;
28 unsigned int sysctl_nr_open_min = BITS_PER_LONG;
29 /* our min() is unusable in constant expressions ;-/ */
30 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
31 unsigned int sysctl_nr_open_max =
32 	__const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
33 
__free_fdtable(struct fdtable * fdt)34 static void __free_fdtable(struct fdtable *fdt)
35 {
36 	kvfree(fdt->fd);
37 	kvfree(fdt->open_fds);
38 	kfree(fdt);
39 }
40 
free_fdtable_rcu(struct rcu_head * rcu)41 static void free_fdtable_rcu(struct rcu_head *rcu)
42 {
43 	__free_fdtable(container_of(rcu, struct fdtable, rcu));
44 }
45 
46 #define BITBIT_NR(nr)	BITS_TO_LONGS(BITS_TO_LONGS(nr))
47 #define BITBIT_SIZE(nr)	(BITBIT_NR(nr) * sizeof(long))
48 
49 #define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds
50 /*
51  * Copy 'count' fd bits from the old table to the new table and clear the extra
52  * space if any.  This does not copy the file pointers.  Called with the files
53  * spinlock held for write.
54  */
copy_fd_bitmaps(struct fdtable * nfdt,struct fdtable * ofdt,unsigned int copy_words)55 static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
56 			    unsigned int copy_words)
57 {
58 	unsigned int nwords = fdt_words(nfdt);
59 
60 	bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds,
61 			copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
62 	bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec,
63 			copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
64 	bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits,
65 			copy_words, nwords);
66 }
67 
68 /*
69  * Copy all file descriptors from the old table to the new, expanded table and
70  * clear the extra space.  Called with the files spinlock held for write.
71  */
copy_fdtable(struct fdtable * nfdt,struct fdtable * ofdt)72 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
73 {
74 	size_t cpy, set;
75 
76 	BUG_ON(nfdt->max_fds < ofdt->max_fds);
77 
78 	cpy = ofdt->max_fds * sizeof(struct file *);
79 	set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
80 	memcpy(nfdt->fd, ofdt->fd, cpy);
81 	memset((char *)nfdt->fd + cpy, 0, set);
82 
83 	copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt));
84 }
85 
86 /*
87  * Note how the fdtable bitmap allocations very much have to be a multiple of
88  * BITS_PER_LONG. This is not only because we walk those things in chunks of
89  * 'unsigned long' in some places, but simply because that is how the Linux
90  * kernel bitmaps are defined to work: they are not "bits in an array of bytes",
91  * they are very much "bits in an array of unsigned long".
92  *
93  * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied
94  * by that "1024/sizeof(ptr)" before, we already know there are sufficient
95  * clear low bits. Clang seems to realize that, gcc ends up being confused.
96  *
97  * On a 128-bit machine, the ALIGN() would actually matter. In the meantime,
98  * let's consider it documentation (and maybe a test-case for gcc to improve
99  * its code generation ;)
100  */
alloc_fdtable(unsigned int nr)101 static struct fdtable * alloc_fdtable(unsigned int nr)
102 {
103 	struct fdtable *fdt;
104 	void *data;
105 
106 	/*
107 	 * Figure out how many fds we actually want to support in this fdtable.
108 	 * Allocation steps are keyed to the size of the fdarray, since it
109 	 * grows far faster than any of the other dynamic data. We try to fit
110 	 * the fdarray into comfortable page-tuned chunks: starting at 1024B
111 	 * and growing in powers of two from there on.
112 	 */
113 	nr /= (1024 / sizeof(struct file *));
114 	nr = roundup_pow_of_two(nr + 1);
115 	nr *= (1024 / sizeof(struct file *));
116 	nr = ALIGN(nr, BITS_PER_LONG);
117 	/*
118 	 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
119 	 * had been set lower between the check in expand_files() and here.  Deal
120 	 * with that in caller, it's cheaper that way.
121 	 *
122 	 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
123 	 * bitmaps handling below becomes unpleasant, to put it mildly...
124 	 */
125 	if (unlikely(nr > sysctl_nr_open))
126 		nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
127 
128 	fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
129 	if (!fdt)
130 		goto out;
131 	fdt->max_fds = nr;
132 	data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
133 	if (!data)
134 		goto out_fdt;
135 	fdt->fd = data;
136 
137 	data = kvmalloc(max_t(size_t,
138 				 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
139 				 GFP_KERNEL_ACCOUNT);
140 	if (!data)
141 		goto out_arr;
142 	fdt->open_fds = data;
143 	data += nr / BITS_PER_BYTE;
144 	fdt->close_on_exec = data;
145 	data += nr / BITS_PER_BYTE;
146 	fdt->full_fds_bits = data;
147 
148 	return fdt;
149 
150 out_arr:
151 	kvfree(fdt->fd);
152 out_fdt:
153 	kfree(fdt);
154 out:
155 	return NULL;
156 }
157 
158 /*
159  * Expand the file descriptor table.
160  * This function will allocate a new fdtable and both fd array and fdset, of
161  * the given size.
162  * Return <0 error code on error; 1 on successful completion.
163  * The files->file_lock should be held on entry, and will be held on exit.
164  */
expand_fdtable(struct files_struct * files,unsigned int nr)165 static int expand_fdtable(struct files_struct *files, unsigned int nr)
166 	__releases(files->file_lock)
167 	__acquires(files->file_lock)
168 {
169 	struct fdtable *new_fdt, *cur_fdt;
170 
171 	spin_unlock(&files->file_lock);
172 	new_fdt = alloc_fdtable(nr);
173 
174 	/* make sure all fd_install() have seen resize_in_progress
175 	 * or have finished their rcu_read_lock_sched() section.
176 	 */
177 	if (atomic_read(&files->count) > 1)
178 		synchronize_rcu();
179 
180 	spin_lock(&files->file_lock);
181 	if (!new_fdt)
182 		return -ENOMEM;
183 	/*
184 	 * extremely unlikely race - sysctl_nr_open decreased between the check in
185 	 * caller and alloc_fdtable().  Cheaper to catch it here...
186 	 */
187 	if (unlikely(new_fdt->max_fds <= nr)) {
188 		__free_fdtable(new_fdt);
189 		return -EMFILE;
190 	}
191 	cur_fdt = files_fdtable(files);
192 	BUG_ON(nr < cur_fdt->max_fds);
193 	copy_fdtable(new_fdt, cur_fdt);
194 	rcu_assign_pointer(files->fdt, new_fdt);
195 	if (cur_fdt != &files->fdtab)
196 		call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
197 	/* coupled with smp_rmb() in fd_install() */
198 	smp_wmb();
199 	return 1;
200 }
201 
202 /*
203  * Expand files.
204  * This function will expand the file structures, if the requested size exceeds
205  * the current capacity and there is room for expansion.
206  * Return <0 error code on error; 0 when nothing done; 1 when files were
207  * expanded and execution may have blocked.
208  * The files->file_lock should be held on entry, and will be held on exit.
209  */
expand_files(struct files_struct * files,unsigned int nr)210 static int expand_files(struct files_struct *files, unsigned int nr)
211 	__releases(files->file_lock)
212 	__acquires(files->file_lock)
213 {
214 	struct fdtable *fdt;
215 	int expanded = 0;
216 
217 repeat:
218 	fdt = files_fdtable(files);
219 
220 	/* Do we need to expand? */
221 	if (nr < fdt->max_fds)
222 		return expanded;
223 
224 	/* Can we expand? */
225 	if (nr >= sysctl_nr_open)
226 		return -EMFILE;
227 
228 	if (unlikely(files->resize_in_progress)) {
229 		spin_unlock(&files->file_lock);
230 		expanded = 1;
231 		wait_event(files->resize_wait, !files->resize_in_progress);
232 		spin_lock(&files->file_lock);
233 		goto repeat;
234 	}
235 
236 	/* All good, so we try */
237 	files->resize_in_progress = true;
238 	expanded = expand_fdtable(files, nr);
239 	files->resize_in_progress = false;
240 
241 	wake_up_all(&files->resize_wait);
242 	return expanded;
243 }
244 
__set_close_on_exec(unsigned int fd,struct fdtable * fdt)245 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
246 {
247 	__set_bit(fd, fdt->close_on_exec);
248 }
249 
__clear_close_on_exec(unsigned int fd,struct fdtable * fdt)250 static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
251 {
252 	if (test_bit(fd, fdt->close_on_exec))
253 		__clear_bit(fd, fdt->close_on_exec);
254 }
255 
__set_open_fd(unsigned int fd,struct fdtable * fdt)256 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
257 {
258 	__set_bit(fd, fdt->open_fds);
259 	fd /= BITS_PER_LONG;
260 	if (!~fdt->open_fds[fd])
261 		__set_bit(fd, fdt->full_fds_bits);
262 }
263 
__clear_open_fd(unsigned int fd,struct fdtable * fdt)264 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
265 {
266 	__clear_bit(fd, fdt->open_fds);
267 	__clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
268 }
269 
count_open_files(struct fdtable * fdt)270 static unsigned int count_open_files(struct fdtable *fdt)
271 {
272 	unsigned int size = fdt->max_fds;
273 	unsigned int i;
274 
275 	/* Find the last open fd */
276 	for (i = size / BITS_PER_LONG; i > 0; ) {
277 		if (fdt->open_fds[--i])
278 			break;
279 	}
280 	i = (i + 1) * BITS_PER_LONG;
281 	return i;
282 }
283 
284 /*
285  * Note that a sane fdtable size always has to be a multiple of
286  * BITS_PER_LONG, since we have bitmaps that are sized by this.
287  *
288  * 'max_fds' will normally already be properly aligned, but it
289  * turns out that in the close_range() -> __close_range() ->
290  * unshare_fd() -> dup_fd() -> sane_fdtable_size() we can end
291  * up having a 'max_fds' value that isn't already aligned.
292  *
293  * Rather than make close_range() have to worry about this,
294  * just make that BITS_PER_LONG alignment be part of a sane
295  * fdtable size. Becuase that's really what it is.
296  */
sane_fdtable_size(struct fdtable * fdt,unsigned int max_fds)297 static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds)
298 {
299 	unsigned int count;
300 
301 	count = count_open_files(fdt);
302 	if (max_fds < NR_OPEN_DEFAULT)
303 		max_fds = NR_OPEN_DEFAULT;
304 	return ALIGN(min(count, max_fds), BITS_PER_LONG);
305 }
306 
307 /*
308  * Allocate a new files structure and copy contents from the
309  * passed in files structure.
310  * errorp will be valid only when the returned files_struct is NULL.
311  */
dup_fd(struct files_struct * oldf,unsigned int max_fds,int * errorp)312 struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp)
313 {
314 	struct files_struct *newf;
315 	struct file **old_fds, **new_fds;
316 	unsigned int open_files, i;
317 	struct fdtable *old_fdt, *new_fdt;
318 
319 	*errorp = -ENOMEM;
320 	newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
321 	if (!newf)
322 		goto out;
323 
324 	atomic_set(&newf->count, 1);
325 
326 	spin_lock_init(&newf->file_lock);
327 	newf->resize_in_progress = false;
328 	init_waitqueue_head(&newf->resize_wait);
329 	newf->next_fd = 0;
330 	new_fdt = &newf->fdtab;
331 	new_fdt->max_fds = NR_OPEN_DEFAULT;
332 	new_fdt->close_on_exec = newf->close_on_exec_init;
333 	new_fdt->open_fds = newf->open_fds_init;
334 	new_fdt->full_fds_bits = newf->full_fds_bits_init;
335 	new_fdt->fd = &newf->fd_array[0];
336 
337 	spin_lock(&oldf->file_lock);
338 	old_fdt = files_fdtable(oldf);
339 	open_files = sane_fdtable_size(old_fdt, max_fds);
340 
341 	/*
342 	 * Check whether we need to allocate a larger fd array and fd set.
343 	 */
344 	while (unlikely(open_files > new_fdt->max_fds)) {
345 		spin_unlock(&oldf->file_lock);
346 
347 		if (new_fdt != &newf->fdtab)
348 			__free_fdtable(new_fdt);
349 
350 		new_fdt = alloc_fdtable(open_files - 1);
351 		if (!new_fdt) {
352 			*errorp = -ENOMEM;
353 			goto out_release;
354 		}
355 
356 		/* beyond sysctl_nr_open; nothing to do */
357 		if (unlikely(new_fdt->max_fds < open_files)) {
358 			__free_fdtable(new_fdt);
359 			*errorp = -EMFILE;
360 			goto out_release;
361 		}
362 
363 		/*
364 		 * Reacquire the oldf lock and a pointer to its fd table
365 		 * who knows it may have a new bigger fd table. We need
366 		 * the latest pointer.
367 		 */
368 		spin_lock(&oldf->file_lock);
369 		old_fdt = files_fdtable(oldf);
370 		open_files = sane_fdtable_size(old_fdt, max_fds);
371 	}
372 
373 	copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG);
374 
375 	old_fds = old_fdt->fd;
376 	new_fds = new_fdt->fd;
377 
378 	for (i = open_files; i != 0; i--) {
379 		struct file *f = *old_fds++;
380 		if (f) {
381 			get_file(f);
382 		} else {
383 			/*
384 			 * The fd may be claimed in the fd bitmap but not yet
385 			 * instantiated in the files array if a sibling thread
386 			 * is partway through open().  So make sure that this
387 			 * fd is available to the new process.
388 			 */
389 			__clear_open_fd(open_files - i, new_fdt);
390 		}
391 		rcu_assign_pointer(*new_fds++, f);
392 	}
393 	spin_unlock(&oldf->file_lock);
394 
395 	/* clear the remainder */
396 	memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
397 
398 	rcu_assign_pointer(newf->fdt, new_fdt);
399 
400 	return newf;
401 
402 out_release:
403 	kmem_cache_free(files_cachep, newf);
404 out:
405 	return NULL;
406 }
407 
close_files(struct files_struct * files)408 static struct fdtable *close_files(struct files_struct * files)
409 {
410 	/*
411 	 * It is safe to dereference the fd table without RCU or
412 	 * ->file_lock because this is the last reference to the
413 	 * files structure.
414 	 */
415 	struct fdtable *fdt = rcu_dereference_raw(files->fdt);
416 	unsigned int i, j = 0;
417 
418 	for (;;) {
419 		unsigned long set;
420 		i = j * BITS_PER_LONG;
421 		if (i >= fdt->max_fds)
422 			break;
423 		set = fdt->open_fds[j++];
424 		while (set) {
425 			if (set & 1) {
426 				struct file * file = xchg(&fdt->fd[i], NULL);
427 				if (file) {
428 					filp_close(file, files);
429 					cond_resched();
430 				}
431 			}
432 			i++;
433 			set >>= 1;
434 		}
435 	}
436 
437 	return fdt;
438 }
439 
put_files_struct(struct files_struct * files)440 void put_files_struct(struct files_struct *files)
441 {
442 	if (atomic_dec_and_test(&files->count)) {
443 		struct fdtable *fdt = close_files(files);
444 
445 		/* free the arrays if they are not embedded */
446 		if (fdt != &files->fdtab)
447 			__free_fdtable(fdt);
448 		kmem_cache_free(files_cachep, files);
449 	}
450 }
451 
exit_files(struct task_struct * tsk)452 void exit_files(struct task_struct *tsk)
453 {
454 	struct files_struct * files = tsk->files;
455 
456 	if (files) {
457 		task_lock(tsk);
458 		tsk->files = NULL;
459 		task_unlock(tsk);
460 		put_files_struct(files);
461 	}
462 }
463 
464 struct files_struct init_files = {
465 	.count		= ATOMIC_INIT(1),
466 	.fdt		= &init_files.fdtab,
467 	.fdtab		= {
468 		.max_fds	= NR_OPEN_DEFAULT,
469 		.fd		= &init_files.fd_array[0],
470 		.close_on_exec	= init_files.close_on_exec_init,
471 		.open_fds	= init_files.open_fds_init,
472 		.full_fds_bits	= init_files.full_fds_bits_init,
473 	},
474 	.file_lock	= __SPIN_LOCK_UNLOCKED(init_files.file_lock),
475 	.resize_wait	= __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
476 };
477 
find_next_fd(struct fdtable * fdt,unsigned int start)478 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
479 {
480 	unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */
481 	unsigned int maxbit = maxfd / BITS_PER_LONG;
482 	unsigned int bitbit = start / BITS_PER_LONG;
483 
484 	bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
485 	if (bitbit >= maxfd)
486 		return maxfd;
487 	if (bitbit > start)
488 		start = bitbit;
489 	return find_next_zero_bit(fdt->open_fds, maxfd, start);
490 }
491 
492 /*
493  * allocate a file descriptor, mark it busy.
494  */
alloc_fd(unsigned start,unsigned end,unsigned flags)495 static int alloc_fd(unsigned start, unsigned end, unsigned flags)
496 {
497 	struct files_struct *files = current->files;
498 	unsigned int fd;
499 	int error;
500 	struct fdtable *fdt;
501 
502 	spin_lock(&files->file_lock);
503 repeat:
504 	fdt = files_fdtable(files);
505 	fd = start;
506 	if (fd < files->next_fd)
507 		fd = files->next_fd;
508 
509 	if (fd < fdt->max_fds)
510 		fd = find_next_fd(fdt, fd);
511 
512 	/*
513 	 * N.B. For clone tasks sharing a files structure, this test
514 	 * will limit the total number of files that can be opened.
515 	 */
516 	error = -EMFILE;
517 	if (fd >= end)
518 		goto out;
519 
520 	error = expand_files(files, fd);
521 	if (error < 0)
522 		goto out;
523 
524 	/*
525 	 * If we needed to expand the fs array we
526 	 * might have blocked - try again.
527 	 */
528 	if (error)
529 		goto repeat;
530 
531 	if (start <= files->next_fd)
532 		files->next_fd = fd + 1;
533 
534 	__set_open_fd(fd, fdt);
535 	if (flags & O_CLOEXEC)
536 		__set_close_on_exec(fd, fdt);
537 	else
538 		__clear_close_on_exec(fd, fdt);
539 	error = fd;
540 #if 1
541 	/* Sanity check */
542 	if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
543 		printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
544 		rcu_assign_pointer(fdt->fd[fd], NULL);
545 	}
546 #endif
547 
548 out:
549 	spin_unlock(&files->file_lock);
550 	return error;
551 }
552 
__get_unused_fd_flags(unsigned flags,unsigned long nofile)553 int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
554 {
555 	return alloc_fd(0, nofile, flags);
556 }
557 
get_unused_fd_flags(unsigned flags)558 int get_unused_fd_flags(unsigned flags)
559 {
560 	return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
561 }
562 EXPORT_SYMBOL(get_unused_fd_flags);
563 
__put_unused_fd(struct files_struct * files,unsigned int fd)564 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
565 {
566 	struct fdtable *fdt = files_fdtable(files);
567 	__clear_open_fd(fd, fdt);
568 	if (fd < files->next_fd)
569 		files->next_fd = fd;
570 }
571 
put_unused_fd(unsigned int fd)572 void put_unused_fd(unsigned int fd)
573 {
574 	struct files_struct *files = current->files;
575 	spin_lock(&files->file_lock);
576 	__put_unused_fd(files, fd);
577 	spin_unlock(&files->file_lock);
578 }
579 
580 EXPORT_SYMBOL(put_unused_fd);
581 
582 /*
583  * Install a file pointer in the fd array.
584  *
585  * The VFS is full of places where we drop the files lock between
586  * setting the open_fds bitmap and installing the file in the file
587  * array.  At any such point, we are vulnerable to a dup2() race
588  * installing a file in the array before us.  We need to detect this and
589  * fput() the struct file we are about to overwrite in this case.
590  *
591  * It should never happen - if we allow dup2() do it, _really_ bad things
592  * will follow.
593  *
594  * This consumes the "file" refcount, so callers should treat it
595  * as if they had called fput(file).
596  */
597 
fd_install(unsigned int fd,struct file * file)598 void fd_install(unsigned int fd, struct file *file)
599 {
600 	struct files_struct *files = current->files;
601 	struct fdtable *fdt;
602 
603 	rcu_read_lock_sched();
604 
605 	if (unlikely(files->resize_in_progress)) {
606 		rcu_read_unlock_sched();
607 		spin_lock(&files->file_lock);
608 		fdt = files_fdtable(files);
609 		BUG_ON(fdt->fd[fd] != NULL);
610 		rcu_assign_pointer(fdt->fd[fd], file);
611 		spin_unlock(&files->file_lock);
612 		return;
613 	}
614 	/* coupled with smp_wmb() in expand_fdtable() */
615 	smp_rmb();
616 	fdt = rcu_dereference_sched(files->fdt);
617 	BUG_ON(fdt->fd[fd] != NULL);
618 	rcu_assign_pointer(fdt->fd[fd], file);
619 	rcu_read_unlock_sched();
620 }
621 
622 EXPORT_SYMBOL(fd_install);
623 
624 /**
625  * pick_file - return file associatd with fd
626  * @files: file struct to retrieve file from
627  * @fd: file descriptor to retrieve file for
628  *
629  * Context: files_lock must be held.
630  *
631  * Returns: The file associated with @fd (NULL if @fd is not open)
632  */
pick_file(struct files_struct * files,unsigned fd)633 static struct file *pick_file(struct files_struct *files, unsigned fd)
634 {
635 	struct fdtable *fdt = files_fdtable(files);
636 	struct file *file;
637 
638 	if (fd >= fdt->max_fds)
639 		return NULL;
640 
641 	fd = array_index_nospec(fd, fdt->max_fds);
642 	file = fdt->fd[fd];
643 	if (file) {
644 		rcu_assign_pointer(fdt->fd[fd], NULL);
645 		__put_unused_fd(files, fd);
646 	}
647 	return file;
648 }
649 
close_fd(unsigned fd)650 int close_fd(unsigned fd)
651 {
652 	struct files_struct *files = current->files;
653 	struct file *file;
654 
655 	spin_lock(&files->file_lock);
656 	file = pick_file(files, fd);
657 	spin_unlock(&files->file_lock);
658 	if (!file)
659 		return -EBADF;
660 
661 	return filp_close(file, files);
662 }
663 EXPORT_SYMBOL_NS(close_fd, ANDROID_GKI_VFS_EXPORT_ONLY); /* for ksys_close() */
664 
665 /**
666  * last_fd - return last valid index into fd table
667  * @fdt: File descriptor table.
668  *
669  * Context: Either rcu read lock or files_lock must be held.
670  *
671  * Returns: Last valid index into fdtable.
672  */
last_fd(struct fdtable * fdt)673 static inline unsigned last_fd(struct fdtable *fdt)
674 {
675 	return fdt->max_fds - 1;
676 }
677 
__range_cloexec(struct files_struct * cur_fds,unsigned int fd,unsigned int max_fd)678 static inline void __range_cloexec(struct files_struct *cur_fds,
679 				   unsigned int fd, unsigned int max_fd)
680 {
681 	struct fdtable *fdt;
682 
683 	/* make sure we're using the correct maximum value */
684 	spin_lock(&cur_fds->file_lock);
685 	fdt = files_fdtable(cur_fds);
686 	max_fd = min(last_fd(fdt), max_fd);
687 	if (fd <= max_fd)
688 		bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
689 	spin_unlock(&cur_fds->file_lock);
690 }
691 
__range_close(struct files_struct * files,unsigned int fd,unsigned int max_fd)692 static inline void __range_close(struct files_struct *files, unsigned int fd,
693 				 unsigned int max_fd)
694 {
695 	struct file *file;
696 	unsigned n;
697 
698 	spin_lock(&files->file_lock);
699 	n = last_fd(files_fdtable(files));
700 	max_fd = min(max_fd, n);
701 
702 	for (; fd <= max_fd; fd++) {
703 		file = pick_file(files, fd);
704 		if (file) {
705 			spin_unlock(&files->file_lock);
706 			filp_close(file, files);
707 			cond_resched();
708 			spin_lock(&files->file_lock);
709 		} else if (need_resched()) {
710 			spin_unlock(&files->file_lock);
711 			cond_resched();
712 			spin_lock(&files->file_lock);
713 		}
714 	}
715 	spin_unlock(&files->file_lock);
716 }
717 
718 /**
719  * __close_range() - Close all file descriptors in a given range.
720  *
721  * @fd:     starting file descriptor to close
722  * @max_fd: last file descriptor to close
723  * @flags:  CLOSE_RANGE flags.
724  *
725  * This closes a range of file descriptors. All file descriptors
726  * from @fd up to and including @max_fd are closed.
727  */
__close_range(unsigned fd,unsigned max_fd,unsigned int flags)728 int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
729 {
730 	struct task_struct *me = current;
731 	struct files_struct *cur_fds = me->files, *fds = NULL;
732 
733 	if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC))
734 		return -EINVAL;
735 
736 	if (fd > max_fd)
737 		return -EINVAL;
738 
739 	if (flags & CLOSE_RANGE_UNSHARE) {
740 		int ret;
741 		unsigned int max_unshare_fds = NR_OPEN_MAX;
742 
743 		/*
744 		 * If the caller requested all fds to be made cloexec we always
745 		 * copy all of the file descriptors since they still want to
746 		 * use them.
747 		 */
748 		if (!(flags & CLOSE_RANGE_CLOEXEC)) {
749 			/*
750 			 * If the requested range is greater than the current
751 			 * maximum, we're closing everything so only copy all
752 			 * file descriptors beneath the lowest file descriptor.
753 			 */
754 			rcu_read_lock();
755 			if (max_fd >= last_fd(files_fdtable(cur_fds)))
756 				max_unshare_fds = fd;
757 			rcu_read_unlock();
758 		}
759 
760 		ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds);
761 		if (ret)
762 			return ret;
763 
764 		/*
765 		 * We used to share our file descriptor table, and have now
766 		 * created a private one, make sure we're using it below.
767 		 */
768 		if (fds)
769 			swap(cur_fds, fds);
770 	}
771 
772 	if (flags & CLOSE_RANGE_CLOEXEC)
773 		__range_cloexec(cur_fds, fd, max_fd);
774 	else
775 		__range_close(cur_fds, fd, max_fd);
776 
777 	if (fds) {
778 		/*
779 		 * We're done closing the files we were supposed to. Time to install
780 		 * the new file descriptor table and drop the old one.
781 		 */
782 		task_lock(me);
783 		me->files = cur_fds;
784 		task_unlock(me);
785 		put_files_struct(fds);
786 	}
787 
788 	return 0;
789 }
790 
791 /*
792  * See close_fd_get_file() below, this variant assumes current->files->file_lock
793  * is held.
794  */
__close_fd_get_file(unsigned int fd)795 struct file *__close_fd_get_file(unsigned int fd)
796 {
797 	return pick_file(current->files, fd);
798 }
799 
800 /*
801  * variant of close_fd that gets a ref on the file for later fput.
802  * The caller must ensure that filp_close() called on the file.
803  */
close_fd_get_file(unsigned int fd)804 struct file *close_fd_get_file(unsigned int fd)
805 {
806 	struct files_struct *files = current->files;
807 	struct file *file;
808 
809 	spin_lock(&files->file_lock);
810 	file = pick_file(files, fd);
811 	spin_unlock(&files->file_lock);
812 
813 	return file;
814 }
815 
do_close_on_exec(struct files_struct * files)816 void do_close_on_exec(struct files_struct *files)
817 {
818 	unsigned i;
819 	struct fdtable *fdt;
820 
821 	/* exec unshares first */
822 	spin_lock(&files->file_lock);
823 	for (i = 0; ; i++) {
824 		unsigned long set;
825 		unsigned fd = i * BITS_PER_LONG;
826 		fdt = files_fdtable(files);
827 		if (fd >= fdt->max_fds)
828 			break;
829 		set = fdt->close_on_exec[i];
830 		if (!set)
831 			continue;
832 		fdt->close_on_exec[i] = 0;
833 		for ( ; set ; fd++, set >>= 1) {
834 			struct file *file;
835 			if (!(set & 1))
836 				continue;
837 			file = fdt->fd[fd];
838 			if (!file)
839 				continue;
840 			rcu_assign_pointer(fdt->fd[fd], NULL);
841 			__put_unused_fd(files, fd);
842 			spin_unlock(&files->file_lock);
843 			filp_close(file, files);
844 			cond_resched();
845 			spin_lock(&files->file_lock);
846 		}
847 
848 	}
849 	spin_unlock(&files->file_lock);
850 }
851 
__fget_files_rcu(struct files_struct * files,unsigned int fd,fmode_t mask)852 static inline struct file *__fget_files_rcu(struct files_struct *files,
853 	unsigned int fd, fmode_t mask)
854 {
855 	for (;;) {
856 		struct file *file;
857 		struct fdtable *fdt = rcu_dereference_raw(files->fdt);
858 		struct file __rcu **fdentry;
859 
860 		if (unlikely(fd >= fdt->max_fds))
861 			return NULL;
862 
863 		fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds);
864 		file = rcu_dereference_raw(*fdentry);
865 		if (unlikely(!file))
866 			return NULL;
867 
868 		if (unlikely(file->f_mode & mask))
869 			return NULL;
870 
871 		/*
872 		 * Ok, we have a file pointer. However, because we do
873 		 * this all locklessly under RCU, we may be racing with
874 		 * that file being closed.
875 		 *
876 		 * Such a race can take two forms:
877 		 *
878 		 *  (a) the file ref already went down to zero,
879 		 *      and get_file_rcu() fails. Just try again:
880 		 */
881 		if (unlikely(!get_file_rcu(file)))
882 			continue;
883 
884 		/*
885 		 *  (b) the file table entry has changed under us.
886 		 *       Note that we don't need to re-check the 'fdt->fd'
887 		 *       pointer having changed, because it always goes
888 		 *       hand-in-hand with 'fdt'.
889 		 *
890 		 * If so, we need to put our ref and try again.
891 		 */
892 		if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
893 		    unlikely(rcu_dereference_raw(*fdentry) != file)) {
894 			fput(file);
895 			continue;
896 		}
897 
898 		/*
899 		 * Ok, we have a ref to the file, and checked that it
900 		 * still exists.
901 		 */
902 		return file;
903 	}
904 }
905 
__fget_files(struct files_struct * files,unsigned int fd,fmode_t mask)906 static struct file *__fget_files(struct files_struct *files, unsigned int fd,
907 				 fmode_t mask)
908 {
909 	struct file *file;
910 
911 	rcu_read_lock();
912 	file = __fget_files_rcu(files, fd, mask);
913 	rcu_read_unlock();
914 
915 	return file;
916 }
917 
__fget(unsigned int fd,fmode_t mask)918 static inline struct file *__fget(unsigned int fd, fmode_t mask)
919 {
920 	return __fget_files(current->files, fd, mask);
921 }
922 
fget(unsigned int fd)923 struct file *fget(unsigned int fd)
924 {
925 	return __fget(fd, FMODE_PATH);
926 }
927 EXPORT_SYMBOL(fget);
928 
fget_raw(unsigned int fd)929 struct file *fget_raw(unsigned int fd)
930 {
931 	return __fget(fd, 0);
932 }
933 EXPORT_SYMBOL(fget_raw);
934 
fget_task(struct task_struct * task,unsigned int fd)935 struct file *fget_task(struct task_struct *task, unsigned int fd)
936 {
937 	struct file *file = NULL;
938 
939 	task_lock(task);
940 	if (task->files)
941 		file = __fget_files(task->files, fd, 0);
942 	task_unlock(task);
943 
944 	return file;
945 }
946 
task_lookup_fd_rcu(struct task_struct * task,unsigned int fd)947 struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd)
948 {
949 	/* Must be called with rcu_read_lock held */
950 	struct files_struct *files;
951 	struct file *file = NULL;
952 
953 	task_lock(task);
954 	files = task->files;
955 	if (files)
956 		file = files_lookup_fd_rcu(files, fd);
957 	task_unlock(task);
958 
959 	return file;
960 }
961 
task_lookup_next_fd_rcu(struct task_struct * task,unsigned int * ret_fd)962 struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret_fd)
963 {
964 	/* Must be called with rcu_read_lock held */
965 	struct files_struct *files;
966 	unsigned int fd = *ret_fd;
967 	struct file *file = NULL;
968 
969 	task_lock(task);
970 	files = task->files;
971 	if (files) {
972 		for (; fd < files_fdtable(files)->max_fds; fd++) {
973 			file = files_lookup_fd_rcu(files, fd);
974 			if (file)
975 				break;
976 		}
977 	}
978 	task_unlock(task);
979 	*ret_fd = fd;
980 	return file;
981 }
982 EXPORT_SYMBOL(task_lookup_next_fd_rcu);
983 
984 /*
985  * Lightweight file lookup - no refcnt increment if fd table isn't shared.
986  *
987  * You can use this instead of fget if you satisfy all of the following
988  * conditions:
989  * 1) You must call fput_light before exiting the syscall and returning control
990  *    to userspace (i.e. you cannot remember the returned struct file * after
991  *    returning to userspace).
992  * 2) You must not call filp_close on the returned struct file * in between
993  *    calls to fget_light and fput_light.
994  * 3) You must not clone the current task in between the calls to fget_light
995  *    and fput_light.
996  *
997  * The fput_needed flag returned by fget_light should be passed to the
998  * corresponding fput_light.
999  */
__fget_light(unsigned int fd,fmode_t mask)1000 static unsigned long __fget_light(unsigned int fd, fmode_t mask)
1001 {
1002 	struct files_struct *files = current->files;
1003 	struct file *file;
1004 
1005 	/*
1006 	 * If another thread is concurrently calling close_fd() followed
1007 	 * by put_files_struct(), we must not observe the old table
1008 	 * entry combined with the new refcount - otherwise we could
1009 	 * return a file that is concurrently being freed.
1010 	 *
1011 	 * atomic_read_acquire() pairs with atomic_dec_and_test() in
1012 	 * put_files_struct().
1013 	 */
1014 	if (atomic_read_acquire(&files->count) == 1) {
1015 		file = files_lookup_fd_raw(files, fd);
1016 		if (!file || unlikely(file->f_mode & mask))
1017 			return 0;
1018 		return (unsigned long)file;
1019 	} else {
1020 		file = __fget(fd, mask);
1021 		if (!file)
1022 			return 0;
1023 		return FDPUT_FPUT | (unsigned long)file;
1024 	}
1025 }
__fdget(unsigned int fd)1026 unsigned long __fdget(unsigned int fd)
1027 {
1028 	return __fget_light(fd, FMODE_PATH);
1029 }
1030 EXPORT_SYMBOL(__fdget);
1031 
__fdget_raw(unsigned int fd)1032 unsigned long __fdget_raw(unsigned int fd)
1033 {
1034 	return __fget_light(fd, 0);
1035 }
1036 
1037 /*
1038  * Try to avoid f_pos locking. We only need it if the
1039  * file is marked for FMODE_ATOMIC_POS, and it can be
1040  * accessed multiple ways.
1041  *
1042  * Always do it for directories, because pidfd_getfd()
1043  * can make a file accessible even if it otherwise would
1044  * not be, and for directories this is a correctness
1045  * issue, not a "POSIX requirement".
1046  */
file_needs_f_pos_lock(struct file * file)1047 static inline bool file_needs_f_pos_lock(struct file *file)
1048 {
1049 	return (file->f_mode & FMODE_ATOMIC_POS) &&
1050 		(file_count(file) > 1 || file->f_op->iterate_shared);
1051 }
1052 
__fdget_pos(unsigned int fd)1053 unsigned long __fdget_pos(unsigned int fd)
1054 {
1055 	unsigned long v = __fdget(fd);
1056 	struct file *file = (struct file *)(v & ~3);
1057 
1058 	if (file && file_needs_f_pos_lock(file)) {
1059 		v |= FDPUT_POS_UNLOCK;
1060 		mutex_lock(&file->f_pos_lock);
1061 	}
1062 	return v;
1063 }
1064 
__f_unlock_pos(struct file * f)1065 void __f_unlock_pos(struct file *f)
1066 {
1067 	mutex_unlock(&f->f_pos_lock);
1068 }
1069 
1070 /*
1071  * We only lock f_pos if we have threads or if the file might be
1072  * shared with another process. In both cases we'll have an elevated
1073  * file count (done either by fdget() or by fork()).
1074  */
1075 
set_close_on_exec(unsigned int fd,int flag)1076 void set_close_on_exec(unsigned int fd, int flag)
1077 {
1078 	struct files_struct *files = current->files;
1079 	struct fdtable *fdt;
1080 	spin_lock(&files->file_lock);
1081 	fdt = files_fdtable(files);
1082 	if (flag)
1083 		__set_close_on_exec(fd, fdt);
1084 	else
1085 		__clear_close_on_exec(fd, fdt);
1086 	spin_unlock(&files->file_lock);
1087 }
1088 
get_close_on_exec(unsigned int fd)1089 bool get_close_on_exec(unsigned int fd)
1090 {
1091 	struct files_struct *files = current->files;
1092 	struct fdtable *fdt;
1093 	bool res;
1094 	rcu_read_lock();
1095 	fdt = files_fdtable(files);
1096 	res = close_on_exec(fd, fdt);
1097 	rcu_read_unlock();
1098 	return res;
1099 }
1100 
do_dup2(struct files_struct * files,struct file * file,unsigned fd,unsigned flags)1101 static int do_dup2(struct files_struct *files,
1102 	struct file *file, unsigned fd, unsigned flags)
1103 __releases(&files->file_lock)
1104 {
1105 	struct file *tofree;
1106 	struct fdtable *fdt;
1107 
1108 	/*
1109 	 * We need to detect attempts to do dup2() over allocated but still
1110 	 * not finished descriptor.  NB: OpenBSD avoids that at the price of
1111 	 * extra work in their equivalent of fget() - they insert struct
1112 	 * file immediately after grabbing descriptor, mark it larval if
1113 	 * more work (e.g. actual opening) is needed and make sure that
1114 	 * fget() treats larval files as absent.  Potentially interesting,
1115 	 * but while extra work in fget() is trivial, locking implications
1116 	 * and amount of surgery on open()-related paths in VFS are not.
1117 	 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
1118 	 * deadlocks in rather amusing ways, AFAICS.  All of that is out of
1119 	 * scope of POSIX or SUS, since neither considers shared descriptor
1120 	 * tables and this condition does not arise without those.
1121 	 */
1122 	fdt = files_fdtable(files);
1123 	fd = array_index_nospec(fd, fdt->max_fds);
1124 	tofree = fdt->fd[fd];
1125 	if (!tofree && fd_is_open(fd, fdt))
1126 		goto Ebusy;
1127 	get_file(file);
1128 	rcu_assign_pointer(fdt->fd[fd], file);
1129 	__set_open_fd(fd, fdt);
1130 	if (flags & O_CLOEXEC)
1131 		__set_close_on_exec(fd, fdt);
1132 	else
1133 		__clear_close_on_exec(fd, fdt);
1134 	spin_unlock(&files->file_lock);
1135 
1136 	if (tofree)
1137 		filp_close(tofree, files);
1138 
1139 	return fd;
1140 
1141 Ebusy:
1142 	spin_unlock(&files->file_lock);
1143 	return -EBUSY;
1144 }
1145 
replace_fd(unsigned fd,struct file * file,unsigned flags)1146 int replace_fd(unsigned fd, struct file *file, unsigned flags)
1147 {
1148 	int err;
1149 	struct files_struct *files = current->files;
1150 
1151 	if (!file)
1152 		return close_fd(fd);
1153 
1154 	if (fd >= rlimit(RLIMIT_NOFILE))
1155 		return -EBADF;
1156 
1157 	spin_lock(&files->file_lock);
1158 	err = expand_files(files, fd);
1159 	if (unlikely(err < 0))
1160 		goto out_unlock;
1161 	return do_dup2(files, file, fd, flags);
1162 
1163 out_unlock:
1164 	spin_unlock(&files->file_lock);
1165 	return err;
1166 }
1167 
1168 /**
1169  * __receive_fd() - Install received file into file descriptor table
1170  * @file: struct file that was received from another process
1171  * @ufd: __user pointer to write new fd number to
1172  * @o_flags: the O_* flags to apply to the new fd entry
1173  *
1174  * Installs a received file into the file descriptor table, with appropriate
1175  * checks and count updates. Optionally writes the fd number to userspace, if
1176  * @ufd is non-NULL.
1177  *
1178  * This helper handles its own reference counting of the incoming
1179  * struct file.
1180  *
1181  * Returns newly install fd or -ve on error.
1182  */
__receive_fd(struct file * file,int __user * ufd,unsigned int o_flags)1183 int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
1184 {
1185 	int new_fd;
1186 	int error;
1187 
1188 	error = security_file_receive(file);
1189 	if (error)
1190 		return error;
1191 
1192 	new_fd = get_unused_fd_flags(o_flags);
1193 	if (new_fd < 0)
1194 		return new_fd;
1195 
1196 	if (ufd) {
1197 		error = put_user(new_fd, ufd);
1198 		if (error) {
1199 			put_unused_fd(new_fd);
1200 			return error;
1201 		}
1202 	}
1203 
1204 	fd_install(new_fd, get_file(file));
1205 	__receive_sock(file);
1206 	return new_fd;
1207 }
1208 
receive_fd_replace(int new_fd,struct file * file,unsigned int o_flags)1209 int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags)
1210 {
1211 	int error;
1212 
1213 	error = security_file_receive(file);
1214 	if (error)
1215 		return error;
1216 	error = replace_fd(new_fd, file, o_flags);
1217 	if (error)
1218 		return error;
1219 	__receive_sock(file);
1220 	return new_fd;
1221 }
1222 
receive_fd(struct file * file,unsigned int o_flags)1223 int receive_fd(struct file *file, unsigned int o_flags)
1224 {
1225 	return __receive_fd(file, NULL, o_flags);
1226 }
1227 EXPORT_SYMBOL_GPL(receive_fd);
1228 
ksys_dup3(unsigned int oldfd,unsigned int newfd,int flags)1229 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
1230 {
1231 	int err = -EBADF;
1232 	struct file *file;
1233 	struct files_struct *files = current->files;
1234 
1235 	if ((flags & ~O_CLOEXEC) != 0)
1236 		return -EINVAL;
1237 
1238 	if (unlikely(oldfd == newfd))
1239 		return -EINVAL;
1240 
1241 	if (newfd >= rlimit(RLIMIT_NOFILE))
1242 		return -EBADF;
1243 
1244 	spin_lock(&files->file_lock);
1245 	err = expand_files(files, newfd);
1246 	file = files_lookup_fd_locked(files, oldfd);
1247 	if (unlikely(!file))
1248 		goto Ebadf;
1249 	if (unlikely(err < 0)) {
1250 		if (err == -EMFILE)
1251 			goto Ebadf;
1252 		goto out_unlock;
1253 	}
1254 	return do_dup2(files, file, newfd, flags);
1255 
1256 Ebadf:
1257 	err = -EBADF;
1258 out_unlock:
1259 	spin_unlock(&files->file_lock);
1260 	return err;
1261 }
1262 
SYSCALL_DEFINE3(dup3,unsigned int,oldfd,unsigned int,newfd,int,flags)1263 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
1264 {
1265 	return ksys_dup3(oldfd, newfd, flags);
1266 }
1267 
SYSCALL_DEFINE2(dup2,unsigned int,oldfd,unsigned int,newfd)1268 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
1269 {
1270 	if (unlikely(newfd == oldfd)) { /* corner case */
1271 		struct files_struct *files = current->files;
1272 		int retval = oldfd;
1273 
1274 		rcu_read_lock();
1275 		if (!files_lookup_fd_rcu(files, oldfd))
1276 			retval = -EBADF;
1277 		rcu_read_unlock();
1278 		return retval;
1279 	}
1280 	return ksys_dup3(oldfd, newfd, 0);
1281 }
1282 
SYSCALL_DEFINE1(dup,unsigned int,fildes)1283 SYSCALL_DEFINE1(dup, unsigned int, fildes)
1284 {
1285 	int ret = -EBADF;
1286 	struct file *file = fget_raw(fildes);
1287 
1288 	if (file) {
1289 		ret = get_unused_fd_flags(0);
1290 		if (ret >= 0)
1291 			fd_install(ret, file);
1292 		else
1293 			fput(file);
1294 	}
1295 	return ret;
1296 }
1297 
f_dupfd(unsigned int from,struct file * file,unsigned flags)1298 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
1299 {
1300 	unsigned long nofile = rlimit(RLIMIT_NOFILE);
1301 	int err;
1302 	if (from >= nofile)
1303 		return -EINVAL;
1304 	err = alloc_fd(from, nofile, flags);
1305 	if (err >= 0) {
1306 		get_file(file);
1307 		fd_install(err, file);
1308 	}
1309 	return err;
1310 }
1311 
iterate_fd(struct files_struct * files,unsigned n,int (* f)(const void *,struct file *,unsigned),const void * p)1312 int iterate_fd(struct files_struct *files, unsigned n,
1313 		int (*f)(const void *, struct file *, unsigned),
1314 		const void *p)
1315 {
1316 	struct fdtable *fdt;
1317 	int res = 0;
1318 	if (!files)
1319 		return 0;
1320 	spin_lock(&files->file_lock);
1321 	for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1322 		struct file *file;
1323 		file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1324 		if (!file)
1325 			continue;
1326 		res = f(p, file, n);
1327 		if (res)
1328 			break;
1329 	}
1330 	spin_unlock(&files->file_lock);
1331 	return res;
1332 }
1333 EXPORT_SYMBOL(iterate_fd);
1334