1 /*
2 * proc/fs/generic.c --- generic routines for the proc-fs
3 *
4 * This file contains generic proc-fs routines for handling
5 * directories and files.
6 *
7 * Copyright (C) 1991, 1992 Linus Torvalds.
8 * Copyright (C) 1997 Theodore Ts'o
9 */
10
11 #include <linux/errno.h>
12 #include <linux/time.h>
13 #include <linux/proc_fs.h>
14 #include <linux/stat.h>
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/mount.h>
19 #include <linux/init.h>
20 #include <linux/idr.h>
21 #include <linux/namei.h>
22 #include <linux/bitops.h>
23 #include <linux/spinlock.h>
24 #include <linux/completion.h>
25 #include <asm/uaccess.h>
26
27 #include "internal.h"
28
29 DEFINE_SPINLOCK(proc_subdir_lock);
30
proc_match(unsigned int len,const char * name,struct proc_dir_entry * de)31 static int proc_match(unsigned int len, const char *name, struct proc_dir_entry *de)
32 {
33 if (de->namelen != len)
34 return 0;
35 return !memcmp(name, de->name, len);
36 }
37
38 /* buffer size is one page but our output routines use some slack for overruns */
39 #define PROC_BLOCK_SIZE (PAGE_SIZE - 1024)
40
41 static ssize_t
__proc_file_read(struct file * file,char __user * buf,size_t nbytes,loff_t * ppos)42 __proc_file_read(struct file *file, char __user *buf, size_t nbytes,
43 loff_t *ppos)
44 {
45 struct inode * inode = file->f_path.dentry->d_inode;
46 char *page;
47 ssize_t retval=0;
48 int eof=0;
49 ssize_t n, count;
50 char *start;
51 struct proc_dir_entry * dp;
52 unsigned long long pos;
53
54 /*
55 * Gaah, please just use "seq_file" instead. The legacy /proc
56 * interfaces cut loff_t down to off_t for reads, and ignore
57 * the offset entirely for writes..
58 */
59 pos = *ppos;
60 if (pos > MAX_NON_LFS)
61 return 0;
62 if (nbytes > MAX_NON_LFS - pos)
63 nbytes = MAX_NON_LFS - pos;
64
65 dp = PDE(inode);
66 if (!(page = (char*) __get_free_page(GFP_TEMPORARY)))
67 return -ENOMEM;
68
69 while ((nbytes > 0) && !eof) {
70 count = min_t(size_t, PROC_BLOCK_SIZE, nbytes);
71
72 start = NULL;
73 if (dp->read_proc) {
74 /*
75 * How to be a proc read function
76 * ------------------------------
77 * Prototype:
78 * int f(char *buffer, char **start, off_t offset,
79 * int count, int *peof, void *dat)
80 *
81 * Assume that the buffer is "count" bytes in size.
82 *
83 * If you know you have supplied all the data you
84 * have, set *peof.
85 *
86 * You have three ways to return data:
87 * 0) Leave *start = NULL. (This is the default.)
88 * Put the data of the requested offset at that
89 * offset within the buffer. Return the number (n)
90 * of bytes there are from the beginning of the
91 * buffer up to the last byte of data. If the
92 * number of supplied bytes (= n - offset) is
93 * greater than zero and you didn't signal eof
94 * and the reader is prepared to take more data
95 * you will be called again with the requested
96 * offset advanced by the number of bytes
97 * absorbed. This interface is useful for files
98 * no larger than the buffer.
99 * 1) Set *start = an unsigned long value less than
100 * the buffer address but greater than zero.
101 * Put the data of the requested offset at the
102 * beginning of the buffer. Return the number of
103 * bytes of data placed there. If this number is
104 * greater than zero and you didn't signal eof
105 * and the reader is prepared to take more data
106 * you will be called again with the requested
107 * offset advanced by *start. This interface is
108 * useful when you have a large file consisting
109 * of a series of blocks which you want to count
110 * and return as wholes.
111 * (Hack by Paul.Russell@rustcorp.com.au)
112 * 2) Set *start = an address within the buffer.
113 * Put the data of the requested offset at *start.
114 * Return the number of bytes of data placed there.
115 * If this number is greater than zero and you
116 * didn't signal eof and the reader is prepared to
117 * take more data you will be called again with the
118 * requested offset advanced by the number of bytes
119 * absorbed.
120 */
121 n = dp->read_proc(page, &start, *ppos,
122 count, &eof, dp->data);
123 } else
124 break;
125
126 if (n == 0) /* end of file */
127 break;
128 if (n < 0) { /* error */
129 if (retval == 0)
130 retval = n;
131 break;
132 }
133
134 if (start == NULL) {
135 if (n > PAGE_SIZE) {
136 printk(KERN_ERR
137 "proc_file_read: Apparent buffer overflow!\n");
138 n = PAGE_SIZE;
139 }
140 n -= *ppos;
141 if (n <= 0)
142 break;
143 if (n > count)
144 n = count;
145 start = page + *ppos;
146 } else if (start < page) {
147 if (n > PAGE_SIZE) {
148 printk(KERN_ERR
149 "proc_file_read: Apparent buffer overflow!\n");
150 n = PAGE_SIZE;
151 }
152 if (n > count) {
153 /*
154 * Don't reduce n because doing so might
155 * cut off part of a data block.
156 */
157 printk(KERN_WARNING
158 "proc_file_read: Read count exceeded\n");
159 }
160 } else /* start >= page */ {
161 unsigned long startoff = (unsigned long)(start - page);
162 if (n > (PAGE_SIZE - startoff)) {
163 printk(KERN_ERR
164 "proc_file_read: Apparent buffer overflow!\n");
165 n = PAGE_SIZE - startoff;
166 }
167 if (n > count)
168 n = count;
169 }
170
171 n -= copy_to_user(buf, start < page ? page : start, n);
172 if (n == 0) {
173 if (retval == 0)
174 retval = -EFAULT;
175 break;
176 }
177
178 *ppos += start < page ? (unsigned long)start : n;
179 nbytes -= n;
180 buf += n;
181 retval += n;
182 }
183 free_page((unsigned long) page);
184 return retval;
185 }
186
187 static ssize_t
proc_file_read(struct file * file,char __user * buf,size_t nbytes,loff_t * ppos)188 proc_file_read(struct file *file, char __user *buf, size_t nbytes,
189 loff_t *ppos)
190 {
191 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
192 ssize_t rv = -EIO;
193
194 spin_lock(&pde->pde_unload_lock);
195 if (!pde->proc_fops) {
196 spin_unlock(&pde->pde_unload_lock);
197 return rv;
198 }
199 pde->pde_users++;
200 spin_unlock(&pde->pde_unload_lock);
201
202 rv = __proc_file_read(file, buf, nbytes, ppos);
203
204 pde_users_dec(pde);
205 return rv;
206 }
207
208 static ssize_t
proc_file_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)209 proc_file_write(struct file *file, const char __user *buffer,
210 size_t count, loff_t *ppos)
211 {
212 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
213 ssize_t rv = -EIO;
214
215 if (pde->write_proc) {
216 spin_lock(&pde->pde_unload_lock);
217 if (!pde->proc_fops) {
218 spin_unlock(&pde->pde_unload_lock);
219 return rv;
220 }
221 pde->pde_users++;
222 spin_unlock(&pde->pde_unload_lock);
223
224 /* FIXME: does this routine need ppos? probably... */
225 rv = pde->write_proc(file, buffer, count, pde->data);
226 pde_users_dec(pde);
227 }
228 return rv;
229 }
230
231
232 static loff_t
proc_file_lseek(struct file * file,loff_t offset,int orig)233 proc_file_lseek(struct file *file, loff_t offset, int orig)
234 {
235 loff_t retval = -EINVAL;
236 switch (orig) {
237 case 1:
238 offset += file->f_pos;
239 /* fallthrough */
240 case 0:
241 if (offset < 0 || offset > MAX_NON_LFS)
242 break;
243 file->f_pos = retval = offset;
244 }
245 return retval;
246 }
247
248 static const struct file_operations proc_file_operations = {
249 .llseek = proc_file_lseek,
250 .read = proc_file_read,
251 .write = proc_file_write,
252 };
253
proc_notify_change(struct dentry * dentry,struct iattr * iattr)254 static int proc_notify_change(struct dentry *dentry, struct iattr *iattr)
255 {
256 struct inode *inode = dentry->d_inode;
257 struct proc_dir_entry *de = PDE(inode);
258 int error;
259
260 error = inode_change_ok(inode, iattr);
261 if (error)
262 return error;
263
264 if ((iattr->ia_valid & ATTR_SIZE) &&
265 iattr->ia_size != i_size_read(inode)) {
266 error = vmtruncate(inode, iattr->ia_size);
267 if (error)
268 return error;
269 }
270
271 setattr_copy(inode, iattr);
272 mark_inode_dirty(inode);
273
274 de->uid = inode->i_uid;
275 de->gid = inode->i_gid;
276 de->mode = inode->i_mode;
277 return 0;
278 }
279
proc_getattr(struct vfsmount * mnt,struct dentry * dentry,struct kstat * stat)280 static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry,
281 struct kstat *stat)
282 {
283 struct inode *inode = dentry->d_inode;
284 struct proc_dir_entry *de = PROC_I(inode)->pde;
285 if (de && de->nlink)
286 set_nlink(inode, de->nlink);
287
288 generic_fillattr(inode, stat);
289 return 0;
290 }
291
292 static const struct inode_operations proc_file_inode_operations = {
293 .setattr = proc_notify_change,
294 };
295
296 /*
297 * This function parses a name such as "tty/driver/serial", and
298 * returns the struct proc_dir_entry for "/proc/tty/driver", and
299 * returns "serial" in residual.
300 */
__xlate_proc_name(const char * name,struct proc_dir_entry ** ret,const char ** residual)301 static int __xlate_proc_name(const char *name, struct proc_dir_entry **ret,
302 const char **residual)
303 {
304 const char *cp = name, *next;
305 struct proc_dir_entry *de;
306 unsigned int len;
307
308 de = *ret;
309 if (!de)
310 de = &proc_root;
311
312 while (1) {
313 next = strchr(cp, '/');
314 if (!next)
315 break;
316
317 len = next - cp;
318 for (de = de->subdir; de ; de = de->next) {
319 if (proc_match(len, cp, de))
320 break;
321 }
322 if (!de) {
323 WARN(1, "name '%s'\n", name);
324 return -ENOENT;
325 }
326 cp += len + 1;
327 }
328 *residual = cp;
329 *ret = de;
330 return 0;
331 }
332
xlate_proc_name(const char * name,struct proc_dir_entry ** ret,const char ** residual)333 static int xlate_proc_name(const char *name, struct proc_dir_entry **ret,
334 const char **residual)
335 {
336 int rv;
337
338 spin_lock(&proc_subdir_lock);
339 rv = __xlate_proc_name(name, ret, residual);
340 spin_unlock(&proc_subdir_lock);
341 return rv;
342 }
343
344 static DEFINE_IDA(proc_inum_ida);
345 static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */
346
347 #define PROC_DYNAMIC_FIRST 0xF0000000U
348
349 /*
350 * Return an inode number between PROC_DYNAMIC_FIRST and
351 * 0xffffffff, or zero on failure.
352 */
proc_alloc_inum(unsigned int * inum)353 int proc_alloc_inum(unsigned int *inum)
354 {
355 unsigned int i;
356 int error;
357
358 retry:
359 if (!ida_pre_get(&proc_inum_ida, GFP_KERNEL))
360 return -ENOMEM;
361
362 spin_lock_irq(&proc_inum_lock);
363 error = ida_get_new(&proc_inum_ida, &i);
364 spin_unlock_irq(&proc_inum_lock);
365 if (error == -EAGAIN)
366 goto retry;
367 else if (error)
368 return error;
369
370 if (i > UINT_MAX - PROC_DYNAMIC_FIRST) {
371 spin_lock_irq(&proc_inum_lock);
372 ida_remove(&proc_inum_ida, i);
373 spin_unlock_irq(&proc_inum_lock);
374 return -ENOSPC;
375 }
376 *inum = PROC_DYNAMIC_FIRST + i;
377 return 0;
378 }
379
proc_free_inum(unsigned int inum)380 void proc_free_inum(unsigned int inum)
381 {
382 unsigned long flags;
383 spin_lock_irqsave(&proc_inum_lock, flags);
384 ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
385 spin_unlock_irqrestore(&proc_inum_lock, flags);
386 }
387
proc_follow_link(struct dentry * dentry,struct nameidata * nd)388 static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
389 {
390 nd_set_link(nd, PDE(dentry->d_inode)->data);
391 return NULL;
392 }
393
394 static const struct inode_operations proc_link_inode_operations = {
395 .readlink = generic_readlink,
396 .follow_link = proc_follow_link,
397 };
398
399 /*
400 * As some entries in /proc are volatile, we want to
401 * get rid of unused dentries. This could be made
402 * smarter: we could keep a "volatile" flag in the
403 * inode to indicate which ones to keep.
404 */
proc_delete_dentry(const struct dentry * dentry)405 static int proc_delete_dentry(const struct dentry * dentry)
406 {
407 return 1;
408 }
409
410 static const struct dentry_operations proc_dentry_operations =
411 {
412 .d_delete = proc_delete_dentry,
413 };
414
415 /*
416 * Don't create negative dentries here, return -ENOENT by hand
417 * instead.
418 */
proc_lookup_de(struct proc_dir_entry * de,struct inode * dir,struct dentry * dentry)419 struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
420 struct dentry *dentry)
421 {
422 struct inode *inode = NULL;
423 int error = -ENOENT;
424
425 spin_lock(&proc_subdir_lock);
426 for (de = de->subdir; de ; de = de->next) {
427 if (de->namelen != dentry->d_name.len)
428 continue;
429 if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
430 pde_get(de);
431 spin_unlock(&proc_subdir_lock);
432 error = -EINVAL;
433 inode = proc_get_inode(dir->i_sb, de);
434 goto out_unlock;
435 }
436 }
437 spin_unlock(&proc_subdir_lock);
438 out_unlock:
439
440 if (inode) {
441 d_set_d_op(dentry, &proc_dentry_operations);
442 d_add(dentry, inode);
443 return NULL;
444 }
445 if (de)
446 pde_put(de);
447 return ERR_PTR(error);
448 }
449
proc_lookup(struct inode * dir,struct dentry * dentry,struct nameidata * nd)450 struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
451 struct nameidata *nd)
452 {
453 return proc_lookup_de(PDE(dir), dir, dentry);
454 }
455
456 /*
457 * This returns non-zero if at EOF, so that the /proc
458 * root directory can use this and check if it should
459 * continue with the <pid> entries..
460 *
461 * Note that the VFS-layer doesn't care about the return
462 * value of the readdir() call, as long as it's non-negative
463 * for success..
464 */
proc_readdir_de(struct proc_dir_entry * de,struct file * filp,void * dirent,filldir_t filldir)465 int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
466 filldir_t filldir)
467 {
468 unsigned int ino;
469 int i;
470 struct inode *inode = filp->f_path.dentry->d_inode;
471 int ret = 0;
472
473 ino = inode->i_ino;
474 i = filp->f_pos;
475 switch (i) {
476 case 0:
477 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
478 goto out;
479 i++;
480 filp->f_pos++;
481 /* fall through */
482 case 1:
483 if (filldir(dirent, "..", 2, i,
484 parent_ino(filp->f_path.dentry),
485 DT_DIR) < 0)
486 goto out;
487 i++;
488 filp->f_pos++;
489 /* fall through */
490 default:
491 spin_lock(&proc_subdir_lock);
492 de = de->subdir;
493 i -= 2;
494 for (;;) {
495 if (!de) {
496 ret = 1;
497 spin_unlock(&proc_subdir_lock);
498 goto out;
499 }
500 if (!i)
501 break;
502 de = de->next;
503 i--;
504 }
505
506 do {
507 struct proc_dir_entry *next;
508
509 /* filldir passes info to user space */
510 pde_get(de);
511 spin_unlock(&proc_subdir_lock);
512 if (filldir(dirent, de->name, de->namelen, filp->f_pos,
513 de->low_ino, de->mode >> 12) < 0) {
514 pde_put(de);
515 goto out;
516 }
517 spin_lock(&proc_subdir_lock);
518 filp->f_pos++;
519 next = de->next;
520 pde_put(de);
521 de = next;
522 } while (de);
523 spin_unlock(&proc_subdir_lock);
524 }
525 ret = 1;
526 out:
527 return ret;
528 }
529
proc_readdir(struct file * filp,void * dirent,filldir_t filldir)530 int proc_readdir(struct file *filp, void *dirent, filldir_t filldir)
531 {
532 struct inode *inode = filp->f_path.dentry->d_inode;
533
534 return proc_readdir_de(PDE(inode), filp, dirent, filldir);
535 }
536
537 /*
538 * These are the generic /proc directory operations. They
539 * use the in-memory "struct proc_dir_entry" tree to parse
540 * the /proc directory.
541 */
542 static const struct file_operations proc_dir_operations = {
543 .llseek = generic_file_llseek,
544 .read = generic_read_dir,
545 .readdir = proc_readdir,
546 };
547
548 /*
549 * proc directories can do almost nothing..
550 */
551 static const struct inode_operations proc_dir_inode_operations = {
552 .lookup = proc_lookup,
553 .getattr = proc_getattr,
554 .setattr = proc_notify_change,
555 };
556
proc_register(struct proc_dir_entry * dir,struct proc_dir_entry * dp)557 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
558 {
559 struct proc_dir_entry *tmp;
560 int ret;
561
562 ret = proc_alloc_inum(&dp->low_ino);
563 if (ret)
564 return ret;
565
566 if (S_ISDIR(dp->mode)) {
567 if (dp->proc_iops == NULL) {
568 dp->proc_fops = &proc_dir_operations;
569 dp->proc_iops = &proc_dir_inode_operations;
570 }
571 dir->nlink++;
572 } else if (S_ISLNK(dp->mode)) {
573 if (dp->proc_iops == NULL)
574 dp->proc_iops = &proc_link_inode_operations;
575 } else if (S_ISREG(dp->mode)) {
576 if (dp->proc_fops == NULL)
577 dp->proc_fops = &proc_file_operations;
578 if (dp->proc_iops == NULL)
579 dp->proc_iops = &proc_file_inode_operations;
580 }
581
582 spin_lock(&proc_subdir_lock);
583
584 for (tmp = dir->subdir; tmp; tmp = tmp->next)
585 if (strcmp(tmp->name, dp->name) == 0) {
586 WARN(1, KERN_WARNING "proc_dir_entry '%s/%s' already registered\n",
587 dir->name, dp->name);
588 break;
589 }
590
591 dp->next = dir->subdir;
592 dp->parent = dir;
593 dir->subdir = dp;
594 spin_unlock(&proc_subdir_lock);
595
596 return 0;
597 }
598
__proc_create(struct proc_dir_entry ** parent,const char * name,umode_t mode,nlink_t nlink)599 static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
600 const char *name,
601 umode_t mode,
602 nlink_t nlink)
603 {
604 struct proc_dir_entry *ent = NULL;
605 const char *fn = name;
606 unsigned int len;
607
608 /* make sure name is valid */
609 if (!name || !strlen(name)) goto out;
610
611 if (xlate_proc_name(name, parent, &fn) != 0)
612 goto out;
613
614 /* At this point there must not be any '/' characters beyond *fn */
615 if (strchr(fn, '/'))
616 goto out;
617
618 len = strlen(fn);
619
620 ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
621 if (!ent) goto out;
622
623 memset(ent, 0, sizeof(struct proc_dir_entry));
624 memcpy(ent->name, fn, len + 1);
625 ent->namelen = len;
626 ent->mode = mode;
627 ent->nlink = nlink;
628 atomic_set(&ent->count, 1);
629 ent->pde_users = 0;
630 spin_lock_init(&ent->pde_unload_lock);
631 ent->pde_unload_completion = NULL;
632 INIT_LIST_HEAD(&ent->pde_openers);
633 out:
634 return ent;
635 }
636
proc_symlink(const char * name,struct proc_dir_entry * parent,const char * dest)637 struct proc_dir_entry *proc_symlink(const char *name,
638 struct proc_dir_entry *parent, const char *dest)
639 {
640 struct proc_dir_entry *ent;
641
642 ent = __proc_create(&parent, name,
643 (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1);
644
645 if (ent) {
646 ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL);
647 if (ent->data) {
648 strcpy((char*)ent->data,dest);
649 if (proc_register(parent, ent) < 0) {
650 kfree(ent->data);
651 kfree(ent);
652 ent = NULL;
653 }
654 } else {
655 kfree(ent);
656 ent = NULL;
657 }
658 }
659 return ent;
660 }
661 EXPORT_SYMBOL(proc_symlink);
662
proc_mkdir_mode(const char * name,umode_t mode,struct proc_dir_entry * parent)663 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
664 struct proc_dir_entry *parent)
665 {
666 struct proc_dir_entry *ent;
667
668 ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
669 if (ent) {
670 if (proc_register(parent, ent) < 0) {
671 kfree(ent);
672 ent = NULL;
673 }
674 }
675 return ent;
676 }
677 EXPORT_SYMBOL(proc_mkdir_mode);
678
proc_net_mkdir(struct net * net,const char * name,struct proc_dir_entry * parent)679 struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
680 struct proc_dir_entry *parent)
681 {
682 struct proc_dir_entry *ent;
683
684 ent = __proc_create(&parent, name, S_IFDIR | S_IRUGO | S_IXUGO, 2);
685 if (ent) {
686 ent->data = net;
687 if (proc_register(parent, ent) < 0) {
688 kfree(ent);
689 ent = NULL;
690 }
691 }
692 return ent;
693 }
694 EXPORT_SYMBOL_GPL(proc_net_mkdir);
695
proc_mkdir(const char * name,struct proc_dir_entry * parent)696 struct proc_dir_entry *proc_mkdir(const char *name,
697 struct proc_dir_entry *parent)
698 {
699 return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent);
700 }
701 EXPORT_SYMBOL(proc_mkdir);
702
create_proc_entry(const char * name,umode_t mode,struct proc_dir_entry * parent)703 struct proc_dir_entry *create_proc_entry(const char *name, umode_t mode,
704 struct proc_dir_entry *parent)
705 {
706 struct proc_dir_entry *ent;
707 nlink_t nlink;
708
709 if (S_ISDIR(mode)) {
710 if ((mode & S_IALLUGO) == 0)
711 mode |= S_IRUGO | S_IXUGO;
712 nlink = 2;
713 } else {
714 if ((mode & S_IFMT) == 0)
715 mode |= S_IFREG;
716 if ((mode & S_IALLUGO) == 0)
717 mode |= S_IRUGO;
718 nlink = 1;
719 }
720
721 ent = __proc_create(&parent, name, mode, nlink);
722 if (ent) {
723 if (proc_register(parent, ent) < 0) {
724 kfree(ent);
725 ent = NULL;
726 }
727 }
728 return ent;
729 }
730 EXPORT_SYMBOL(create_proc_entry);
731
proc_create_data(const char * name,umode_t mode,struct proc_dir_entry * parent,const struct file_operations * proc_fops,void * data)732 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
733 struct proc_dir_entry *parent,
734 const struct file_operations *proc_fops,
735 void *data)
736 {
737 struct proc_dir_entry *pde;
738 nlink_t nlink;
739
740 if (S_ISDIR(mode)) {
741 if ((mode & S_IALLUGO) == 0)
742 mode |= S_IRUGO | S_IXUGO;
743 nlink = 2;
744 } else {
745 if ((mode & S_IFMT) == 0)
746 mode |= S_IFREG;
747 if ((mode & S_IALLUGO) == 0)
748 mode |= S_IRUGO;
749 nlink = 1;
750 }
751
752 pde = __proc_create(&parent, name, mode, nlink);
753 if (!pde)
754 goto out;
755 pde->proc_fops = proc_fops;
756 pde->data = data;
757 if (proc_register(parent, pde) < 0)
758 goto out_free;
759 return pde;
760 out_free:
761 kfree(pde);
762 out:
763 return NULL;
764 }
765 EXPORT_SYMBOL(proc_create_data);
766
free_proc_entry(struct proc_dir_entry * de)767 static void free_proc_entry(struct proc_dir_entry *de)
768 {
769 proc_free_inum(de->low_ino);
770
771 if (S_ISLNK(de->mode))
772 kfree(de->data);
773 kfree(de);
774 }
775
pde_put(struct proc_dir_entry * pde)776 void pde_put(struct proc_dir_entry *pde)
777 {
778 if (atomic_dec_and_test(&pde->count))
779 free_proc_entry(pde);
780 }
781
782 /*
783 * Remove a /proc entry and free it if it's not currently in use.
784 */
remove_proc_entry(const char * name,struct proc_dir_entry * parent)785 void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
786 {
787 struct proc_dir_entry **p;
788 struct proc_dir_entry *de = NULL;
789 const char *fn = name;
790 unsigned int len;
791
792 spin_lock(&proc_subdir_lock);
793 if (__xlate_proc_name(name, &parent, &fn) != 0) {
794 spin_unlock(&proc_subdir_lock);
795 return;
796 }
797 len = strlen(fn);
798
799 for (p = &parent->subdir; *p; p=&(*p)->next ) {
800 if (proc_match(len, fn, *p)) {
801 de = *p;
802 *p = de->next;
803 de->next = NULL;
804 break;
805 }
806 }
807 spin_unlock(&proc_subdir_lock);
808 if (!de) {
809 WARN(1, "name '%s'\n", name);
810 return;
811 }
812
813 spin_lock(&de->pde_unload_lock);
814 /*
815 * Stop accepting new callers into module. If you're
816 * dynamically allocating ->proc_fops, save a pointer somewhere.
817 */
818 de->proc_fops = NULL;
819 /* Wait until all existing callers into module are done. */
820 if (de->pde_users > 0) {
821 DECLARE_COMPLETION_ONSTACK(c);
822
823 if (!de->pde_unload_completion)
824 de->pde_unload_completion = &c;
825
826 spin_unlock(&de->pde_unload_lock);
827
828 wait_for_completion(de->pde_unload_completion);
829
830 spin_lock(&de->pde_unload_lock);
831 }
832
833 while (!list_empty(&de->pde_openers)) {
834 struct pde_opener *pdeo;
835
836 pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh);
837 list_del(&pdeo->lh);
838 spin_unlock(&de->pde_unload_lock);
839 pdeo->release(pdeo->inode, pdeo->file);
840 kfree(pdeo);
841 spin_lock(&de->pde_unload_lock);
842 }
843 spin_unlock(&de->pde_unload_lock);
844
845 if (S_ISDIR(de->mode))
846 parent->nlink--;
847 de->nlink = 0;
848 WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory "
849 "'%s/%s', leaking at least '%s'\n", __func__,
850 de->parent->name, de->name, de->subdir->name);
851 pde_put(de);
852 }
853 EXPORT_SYMBOL(remove_proc_entry);
854