• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/fs/file_table.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *  Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6  */
7 
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/fdtable.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/security.h>
16 #include <linux/eventpoll.h>
17 #include <linux/rcupdate.h>
18 #include <linux/mount.h>
19 #include <linux/capability.h>
20 #include <linux/cdev.h>
21 #include <linux/fsnotify.h>
22 #include <linux/sysctl.h>
23 #include <linux/lglock.h>
24 #include <linux/percpu_counter.h>
25 #include <linux/percpu.h>
26 #include <linux/hardirq.h>
27 #include <linux/task_work.h>
28 #include <linux/ima.h>
29 
30 #include <linux/atomic.h>
31 
32 #include "internal.h"
33 
34 /* sysctl tunables... */
35 struct files_stat_struct files_stat = {
36 	.max_files = NR_FILE
37 };
38 
39 DEFINE_STATIC_LGLOCK(files_lglock);
40 
41 /* SLAB cache for file structures */
42 static struct kmem_cache *filp_cachep __read_mostly;
43 
44 static struct percpu_counter nr_files __cacheline_aligned_in_smp;
45 
file_free_rcu(struct rcu_head * head)46 static void file_free_rcu(struct rcu_head *head)
47 {
48 	struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
49 
50 	put_cred(f->f_cred);
51 	kmem_cache_free(filp_cachep, f);
52 }
53 
file_free(struct file * f)54 static inline void file_free(struct file *f)
55 {
56 	percpu_counter_dec(&nr_files);
57 	file_check_state(f);
58 	call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
59 }
60 
61 /*
62  * Return the total number of open files in the system
63  */
get_nr_files(void)64 static long get_nr_files(void)
65 {
66 	return percpu_counter_read_positive(&nr_files);
67 }
68 
69 /*
70  * Return the maximum number of open files in the system
71  */
get_max_files(void)72 unsigned long get_max_files(void)
73 {
74 	return files_stat.max_files;
75 }
76 EXPORT_SYMBOL_GPL(get_max_files);
77 
78 /*
79  * Handle nr_files sysctl
80  */
81 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
proc_nr_files(ctl_table * table,int write,void __user * buffer,size_t * lenp,loff_t * ppos)82 int proc_nr_files(ctl_table *table, int write,
83                      void __user *buffer, size_t *lenp, loff_t *ppos)
84 {
85 	files_stat.nr_files = get_nr_files();
86 	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
87 }
88 #else
proc_nr_files(ctl_table * table,int write,void __user * buffer,size_t * lenp,loff_t * ppos)89 int proc_nr_files(ctl_table *table, int write,
90                      void __user *buffer, size_t *lenp, loff_t *ppos)
91 {
92 	return -ENOSYS;
93 }
94 #endif
95 
96 /* Find an unused file structure and return a pointer to it.
97  * Returns an error pointer if some error happend e.g. we over file
98  * structures limit, run out of memory or operation is not permitted.
99  *
100  * Be very careful using this.  You are responsible for
101  * getting write access to any mount that you might assign
102  * to this filp, if it is opened for write.  If this is not
103  * done, you will imbalance int the mount's writer count
104  * and a warning at __fput() time.
105  */
get_empty_filp(void)106 struct file *get_empty_filp(void)
107 {
108 	const struct cred *cred = current_cred();
109 	static long old_max;
110 	struct file *f;
111 	int error;
112 
113 	/*
114 	 * Privileged users can go above max_files
115 	 */
116 	if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
117 		/*
118 		 * percpu_counters are inaccurate.  Do an expensive check before
119 		 * we go and fail.
120 		 */
121 		if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
122 			goto over;
123 	}
124 
125 	f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
126 	if (unlikely(!f))
127 		return ERR_PTR(-ENOMEM);
128 
129 	percpu_counter_inc(&nr_files);
130 	f->f_cred = get_cred(cred);
131 	error = security_file_alloc(f);
132 	if (unlikely(error)) {
133 		file_free(f);
134 		return ERR_PTR(error);
135 	}
136 
137 	INIT_LIST_HEAD(&f->f_u.fu_list);
138 	atomic_long_set(&f->f_count, 1);
139 	rwlock_init(&f->f_owner.lock);
140 	spin_lock_init(&f->f_lock);
141 	eventpoll_init_file(f);
142 	/* f->f_version: 0 */
143 	return f;
144 
145 over:
146 	/* Ran out of filps - report that */
147 	if (get_nr_files() > old_max) {
148 		pr_info("VFS: file-max limit %lu reached\n", get_max_files());
149 		old_max = get_nr_files();
150 	}
151 	return ERR_PTR(-ENFILE);
152 }
153 
154 /**
155  * alloc_file - allocate and initialize a 'struct file'
156  * @mnt: the vfsmount on which the file will reside
157  * @dentry: the dentry representing the new file
158  * @mode: the mode with which the new file will be opened
159  * @fop: the 'struct file_operations' for the new file
160  *
161  * Use this instead of get_empty_filp() to get a new
162  * 'struct file'.  Do so because of the same initialization
163  * pitfalls reasons listed for init_file().  This is a
164  * preferred interface to using init_file().
165  *
166  * If all the callers of init_file() are eliminated, its
167  * code should be moved into this function.
168  */
alloc_file(struct path * path,fmode_t mode,const struct file_operations * fop)169 struct file *alloc_file(struct path *path, fmode_t mode,
170 		const struct file_operations *fop)
171 {
172 	struct file *file;
173 
174 	file = get_empty_filp();
175 	if (IS_ERR(file))
176 		return file;
177 
178 	file->f_path = *path;
179 	file->f_inode = path->dentry->d_inode;
180 	file->f_mapping = path->dentry->d_inode->i_mapping;
181 	file->f_mode = mode;
182 	file->f_op = fop;
183 
184 	/*
185 	 * These mounts don't really matter in practice
186 	 * for r/o bind mounts.  They aren't userspace-
187 	 * visible.  We do this for consistency, and so
188 	 * that we can do debugging checks at __fput()
189 	 */
190 	if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
191 		file_take_write(file);
192 		WARN_ON(mnt_clone_write(path->mnt));
193 	}
194 	if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
195 		i_readcount_inc(path->dentry->d_inode);
196 	return file;
197 }
198 EXPORT_SYMBOL(alloc_file);
199 
200 /**
201  * drop_file_write_access - give up ability to write to a file
202  * @file: the file to which we will stop writing
203  *
204  * This is a central place which will give up the ability
205  * to write to @file, along with access to write through
206  * its vfsmount.
207  */
drop_file_write_access(struct file * file)208 static void drop_file_write_access(struct file *file)
209 {
210 	struct vfsmount *mnt = file->f_path.mnt;
211 	struct dentry *dentry = file->f_path.dentry;
212 	struct inode *inode = dentry->d_inode;
213 
214 	put_write_access(inode);
215 
216 	if (special_file(inode->i_mode))
217 		return;
218 	if (file_check_writeable(file) != 0)
219 		return;
220 	__mnt_drop_write(mnt);
221 	file_release_write(file);
222 }
223 
224 /* the real guts of fput() - releasing the last reference to file
225  */
__fput(struct file * file)226 static void __fput(struct file *file)
227 {
228 	struct dentry *dentry = file->f_path.dentry;
229 	struct vfsmount *mnt = file->f_path.mnt;
230 	struct inode *inode = dentry->d_inode;
231 
232 	might_sleep();
233 
234 	fsnotify_close(file);
235 	/*
236 	 * The function eventpoll_release() should be the first called
237 	 * in the file cleanup chain.
238 	 */
239 	eventpoll_release(file);
240 	locks_remove_flock(file);
241 
242 	if (unlikely(file->f_flags & FASYNC)) {
243 		if (file->f_op && file->f_op->fasync)
244 			file->f_op->fasync(-1, file, 0);
245 	}
246 	ima_file_free(file);
247 	if (file->f_op && file->f_op->release)
248 		file->f_op->release(inode, file);
249 	security_file_free(file);
250 	if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
251 		     !(file->f_mode & FMODE_PATH))) {
252 		cdev_put(inode->i_cdev);
253 	}
254 	fops_put(file->f_op);
255 	put_pid(file->f_owner.pid);
256 	if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
257 		i_readcount_dec(inode);
258 	if (file->f_mode & FMODE_WRITE)
259 		drop_file_write_access(file);
260 	file->f_path.dentry = NULL;
261 	file->f_path.mnt = NULL;
262 	file->f_inode = NULL;
263 	file_free(file);
264 	dput(dentry);
265 	mntput(mnt);
266 }
267 
268 static DEFINE_SPINLOCK(delayed_fput_lock);
269 static LIST_HEAD(delayed_fput_list);
delayed_fput(struct work_struct * unused)270 static void delayed_fput(struct work_struct *unused)
271 {
272 	LIST_HEAD(head);
273 	spin_lock_irq(&delayed_fput_lock);
274 	list_splice_init(&delayed_fput_list, &head);
275 	spin_unlock_irq(&delayed_fput_lock);
276 	while (!list_empty(&head)) {
277 		struct file *f = list_first_entry(&head, struct file, f_u.fu_list);
278 		list_del_init(&f->f_u.fu_list);
279 		__fput(f);
280 	}
281 }
282 
____fput(struct callback_head * work)283 static void ____fput(struct callback_head *work)
284 {
285 	__fput(container_of(work, struct file, f_u.fu_rcuhead));
286 }
287 
288 /*
289  * If kernel thread really needs to have the final fput() it has done
290  * to complete, call this.  The only user right now is the boot - we
291  * *do* need to make sure our writes to binaries on initramfs has
292  * not left us with opened struct file waiting for __fput() - execve()
293  * won't work without that.  Please, don't add more callers without
294  * very good reasons; in particular, never call that with locks
295  * held and never call that from a thread that might need to do
296  * some work on any kind of umount.
297  */
flush_delayed_fput(void)298 void flush_delayed_fput(void)
299 {
300 	delayed_fput(NULL);
301 }
302 
303 static DECLARE_WORK(delayed_fput_work, delayed_fput);
304 
fput(struct file * file)305 void fput(struct file *file)
306 {
307 	if (atomic_long_dec_and_test(&file->f_count)) {
308 		struct task_struct *task = current;
309 		unsigned long flags;
310 
311 		file_sb_list_del(file);
312 		if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
313 			init_task_work(&file->f_u.fu_rcuhead, ____fput);
314 			if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
315 				return;
316 		}
317 		spin_lock_irqsave(&delayed_fput_lock, flags);
318 		list_add(&file->f_u.fu_list, &delayed_fput_list);
319 		schedule_work(&delayed_fput_work);
320 		spin_unlock_irqrestore(&delayed_fput_lock, flags);
321 	}
322 }
323 
324 /*
325  * synchronous analog of fput(); for kernel threads that might be needed
326  * in some umount() (and thus can't use flush_delayed_fput() without
327  * risking deadlocks), need to wait for completion of __fput() and know
328  * for this specific struct file it won't involve anything that would
329  * need them.  Use only if you really need it - at the very least,
330  * don't blindly convert fput() by kernel thread to that.
331  */
__fput_sync(struct file * file)332 void __fput_sync(struct file *file)
333 {
334 	if (atomic_long_dec_and_test(&file->f_count)) {
335 		struct task_struct *task = current;
336 		file_sb_list_del(file);
337 		BUG_ON(!(task->flags & PF_KTHREAD));
338 		__fput(file);
339 	}
340 }
341 
342 EXPORT_SYMBOL(fput);
343 
put_filp(struct file * file)344 void put_filp(struct file *file)
345 {
346 	if (atomic_long_dec_and_test(&file->f_count)) {
347 		security_file_free(file);
348 		file_sb_list_del(file);
349 		file_free(file);
350 	}
351 }
352 
file_list_cpu(struct file * file)353 static inline int file_list_cpu(struct file *file)
354 {
355 #ifdef CONFIG_SMP
356 	return file->f_sb_list_cpu;
357 #else
358 	return smp_processor_id();
359 #endif
360 }
361 
362 /* helper for file_sb_list_add to reduce ifdefs */
__file_sb_list_add(struct file * file,struct super_block * sb)363 static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
364 {
365 	struct list_head *list;
366 #ifdef CONFIG_SMP
367 	int cpu;
368 	cpu = smp_processor_id();
369 	file->f_sb_list_cpu = cpu;
370 	list = per_cpu_ptr(sb->s_files, cpu);
371 #else
372 	list = &sb->s_files;
373 #endif
374 	list_add(&file->f_u.fu_list, list);
375 }
376 
377 /**
378  * file_sb_list_add - add a file to the sb's file list
379  * @file: file to add
380  * @sb: sb to add it to
381  *
382  * Use this function to associate a file with the superblock of the inode it
383  * refers to.
384  */
file_sb_list_add(struct file * file,struct super_block * sb)385 void file_sb_list_add(struct file *file, struct super_block *sb)
386 {
387 	lg_local_lock(&files_lglock);
388 	__file_sb_list_add(file, sb);
389 	lg_local_unlock(&files_lglock);
390 }
391 
392 /**
393  * file_sb_list_del - remove a file from the sb's file list
394  * @file: file to remove
395  * @sb: sb to remove it from
396  *
397  * Use this function to remove a file from its superblock.
398  */
file_sb_list_del(struct file * file)399 void file_sb_list_del(struct file *file)
400 {
401 	if (!list_empty(&file->f_u.fu_list)) {
402 		lg_local_lock_cpu(&files_lglock, file_list_cpu(file));
403 		list_del_init(&file->f_u.fu_list);
404 		lg_local_unlock_cpu(&files_lglock, file_list_cpu(file));
405 	}
406 }
407 
408 #ifdef CONFIG_SMP
409 
410 /*
411  * These macros iterate all files on all CPUs for a given superblock.
412  * files_lglock must be held globally.
413  */
414 #define do_file_list_for_each_entry(__sb, __file)		\
415 {								\
416 	int i;							\
417 	for_each_possible_cpu(i) {				\
418 		struct list_head *list;				\
419 		list = per_cpu_ptr((__sb)->s_files, i);		\
420 		list_for_each_entry((__file), list, f_u.fu_list)
421 
422 #define while_file_list_for_each_entry				\
423 	}							\
424 }
425 
426 #else
427 
428 #define do_file_list_for_each_entry(__sb, __file)		\
429 {								\
430 	struct list_head *list;					\
431 	list = &(sb)->s_files;					\
432 	list_for_each_entry((__file), list, f_u.fu_list)
433 
434 #define while_file_list_for_each_entry				\
435 }
436 
437 #endif
438 
439 /**
440  *	mark_files_ro - mark all files read-only
441  *	@sb: superblock in question
442  *
443  *	All files are marked read-only.  We don't care about pending
444  *	delete files so this should be used in 'force' mode only.
445  */
mark_files_ro(struct super_block * sb)446 void mark_files_ro(struct super_block *sb)
447 {
448 	struct file *f;
449 
450 	lg_global_lock(&files_lglock);
451 	do_file_list_for_each_entry(sb, f) {
452 		if (!S_ISREG(file_inode(f)->i_mode))
453 		       continue;
454 		if (!file_count(f))
455 			continue;
456 		if (!(f->f_mode & FMODE_WRITE))
457 			continue;
458 		spin_lock(&f->f_lock);
459 		f->f_mode &= ~FMODE_WRITE;
460 		spin_unlock(&f->f_lock);
461 		if (file_check_writeable(f) != 0)
462 			continue;
463 		__mnt_drop_write(f->f_path.mnt);
464 		file_release_write(f);
465 	} while_file_list_for_each_entry;
466 	lg_global_unlock(&files_lglock);
467 }
468 
files_init(unsigned long mempages)469 void __init files_init(unsigned long mempages)
470 {
471 	unsigned long n;
472 
473 	filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
474 			SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
475 
476 	/*
477 	 * One file with associated inode and dcache is very roughly 1K.
478 	 * Per default don't use more than 10% of our memory for files.
479 	 */
480 
481 	n = (mempages * (PAGE_SIZE / 1024)) / 10;
482 	files_stat.max_files = max_t(unsigned long, n, NR_FILE);
483 	files_defer_init();
484 	lg_lock_init(&files_lglock, "files_lglock");
485 	percpu_counter_init(&nr_files, 0);
486 }
487