1 /*
2 * /proc/uid support
3 */
4
5 #include <linux/cpufreq_times.h>
6 #include <linux/fs.h>
7 #include <linux/hashtable.h>
8 #include <linux/init.h>
9 #include <linux/proc_fs.h>
10 #include <linux/rtmutex.h>
11 #include <linux/sched.h>
12 #include <linux/seq_file.h>
13 #include <linux/slab.h>
14 #include "internal.h"
15
16 static struct proc_dir_entry *proc_uid;
17
18 #define UID_HASH_BITS 10
19
20 static DECLARE_HASHTABLE(proc_uid_hash_table, UID_HASH_BITS);
21
22 /*
23 * use rt_mutex here to avoid priority inversion between high-priority readers
24 * of these files and tasks calling proc_register_uid().
25 */
26 static DEFINE_RT_MUTEX(proc_uid_lock); /* proc_uid_hash_table */
27
28 struct uid_hash_entry {
29 uid_t uid;
30 struct hlist_node hash;
31 };
32
33 /* Caller must hold proc_uid_lock */
uid_hash_entry_exists_locked(uid_t uid)34 static bool uid_hash_entry_exists_locked(uid_t uid)
35 {
36 struct uid_hash_entry *entry;
37
38 hash_for_each_possible(proc_uid_hash_table, entry, hash, uid) {
39 if (entry->uid == uid)
40 return true;
41 }
42 return false;
43 }
44
proc_register_uid(kuid_t kuid)45 void proc_register_uid(kuid_t kuid)
46 {
47 struct uid_hash_entry *entry;
48 bool exists;
49 uid_t uid = from_kuid_munged(current_user_ns(), kuid);
50
51 rt_mutex_lock(&proc_uid_lock);
52 exists = uid_hash_entry_exists_locked(uid);
53 rt_mutex_unlock(&proc_uid_lock);
54 if (exists)
55 return;
56
57 entry = kzalloc(sizeof(struct uid_hash_entry), GFP_KERNEL);
58 if (!entry)
59 return;
60 entry->uid = uid;
61
62 rt_mutex_lock(&proc_uid_lock);
63 if (uid_hash_entry_exists_locked(uid))
64 kfree(entry);
65 else
66 hash_add(proc_uid_hash_table, &entry->hash, uid);
67 rt_mutex_unlock(&proc_uid_lock);
68 }
69
70 struct uid_entry {
71 const char *name;
72 int len;
73 umode_t mode;
74 const struct inode_operations *iop;
75 const struct file_operations *fop;
76 };
77
78 #define NOD(NAME, MODE, IOP, FOP) { \
79 .name = (NAME), \
80 .len = sizeof(NAME) - 1, \
81 .mode = MODE, \
82 .iop = IOP, \
83 .fop = FOP, \
84 }
85
86 #ifdef CONFIG_CPU_FREQ_TIMES
87 static const struct file_operations proc_uid_time_in_state_operations = {
88 .open = single_uid_time_in_state_open,
89 .read = seq_read,
90 .llseek = seq_lseek,
91 .release = single_release,
92 };
93 #endif
94
95 static const struct uid_entry uid_base_stuff[] = {
96 #ifdef CONFIG_CPU_FREQ_TIMES
97 NOD("time_in_state", 0444, NULL, &proc_uid_time_in_state_operations),
98 #endif
99 };
100
101 static const struct inode_operations proc_uid_def_inode_operations = {
102 .setattr = proc_setattr,
103 };
104
proc_uid_make_inode(struct super_block * sb,kuid_t kuid)105 static struct inode *proc_uid_make_inode(struct super_block *sb, kuid_t kuid)
106 {
107 struct inode *inode;
108
109 inode = new_inode(sb);
110 if (!inode)
111 return NULL;
112
113 inode->i_ino = get_next_ino();
114 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
115 inode->i_op = &proc_uid_def_inode_operations;
116 inode->i_uid = kuid;
117
118 return inode;
119 }
120
proc_uident_instantiate(struct inode * dir,struct dentry * dentry,struct task_struct * unused,const void * ptr)121 static int proc_uident_instantiate(struct inode *dir, struct dentry *dentry,
122 struct task_struct *unused, const void *ptr)
123 {
124 const struct uid_entry *u = ptr;
125 struct inode *inode;
126
127 inode = proc_uid_make_inode(dir->i_sb, dir->i_uid);
128 if (!inode)
129 return -ENOENT;
130
131 inode->i_mode = u->mode;
132 if (S_ISDIR(inode->i_mode))
133 set_nlink(inode, 2);
134 if (u->iop)
135 inode->i_op = u->iop;
136 if (u->fop)
137 inode->i_fop = u->fop;
138 d_add(dentry, inode);
139 return 0;
140 }
141
proc_uid_base_lookup(struct inode * dir,struct dentry * dentry,unsigned int flags)142 static struct dentry *proc_uid_base_lookup(struct inode *dir,
143 struct dentry *dentry,
144 unsigned int flags)
145 {
146 const struct uid_entry *u, *last;
147 unsigned int nents = ARRAY_SIZE(uid_base_stuff);
148
149 if (nents == 0)
150 return ERR_PTR(-ENOENT);
151
152 last = &uid_base_stuff[nents - 1];
153 for (u = uid_base_stuff; u <= last; u++) {
154 if (u->len != dentry->d_name.len)
155 continue;
156 if (!memcmp(dentry->d_name.name, u->name, u->len))
157 break;
158 }
159 if (u > last)
160 return ERR_PTR(-ENOENT);
161
162 return ERR_PTR(proc_uident_instantiate(dir, dentry, NULL, u));
163 }
164
proc_uid_base_readdir(struct file * file,struct dir_context * ctx)165 static int proc_uid_base_readdir(struct file *file, struct dir_context *ctx)
166 {
167 unsigned int nents = ARRAY_SIZE(uid_base_stuff);
168 const struct uid_entry *u;
169
170 if (!dir_emit_dots(file, ctx))
171 return 0;
172
173 if (ctx->pos >= nents + 2)
174 return 0;
175
176 for (u = uid_base_stuff + (ctx->pos - 2);
177 u <= uid_base_stuff + nents - 1; u++) {
178 if (!proc_fill_cache(file, ctx, u->name, u->len,
179 proc_uident_instantiate, NULL, u))
180 break;
181 ctx->pos++;
182 }
183
184 return 0;
185 }
186
187 static const struct inode_operations proc_uid_base_inode_operations = {
188 .lookup = proc_uid_base_lookup,
189 .setattr = proc_setattr,
190 };
191
192 static const struct file_operations proc_uid_base_operations = {
193 .read = generic_read_dir,
194 .iterate = proc_uid_base_readdir,
195 .llseek = default_llseek,
196 };
197
proc_uid_instantiate(struct inode * dir,struct dentry * dentry,struct task_struct * unused,const void * ptr)198 static int proc_uid_instantiate(struct inode *dir, struct dentry *dentry,
199 struct task_struct *unused, const void *ptr)
200 {
201 unsigned int i, len;
202 nlink_t nlinks;
203 kuid_t *kuid = (kuid_t *)ptr;
204 struct inode *inode = proc_uid_make_inode(dir->i_sb, *kuid);
205
206 if (!inode)
207 return -ENOENT;
208
209 inode->i_mode = S_IFDIR | 0555;
210 inode->i_op = &proc_uid_base_inode_operations;
211 inode->i_fop = &proc_uid_base_operations;
212 inode->i_flags |= S_IMMUTABLE;
213
214 nlinks = 2;
215 len = ARRAY_SIZE(uid_base_stuff);
216 for (i = 0; i < len; ++i) {
217 if (S_ISDIR(uid_base_stuff[i].mode))
218 ++nlinks;
219 }
220 set_nlink(inode, nlinks);
221
222 d_add(dentry, inode);
223
224 return 0;
225 }
226
proc_uid_readdir(struct file * file,struct dir_context * ctx)227 static int proc_uid_readdir(struct file *file, struct dir_context *ctx)
228 {
229 int last_shown, i;
230 unsigned long bkt;
231 struct uid_hash_entry *entry;
232
233 if (!dir_emit_dots(file, ctx))
234 return 0;
235
236 i = 0;
237 last_shown = ctx->pos - 2;
238 rt_mutex_lock(&proc_uid_lock);
239 hash_for_each(proc_uid_hash_table, bkt, entry, hash) {
240 int len;
241 char buf[PROC_NUMBUF];
242
243 if (i < last_shown)
244 continue;
245 len = snprintf(buf, sizeof(buf), "%u", entry->uid);
246 if (!proc_fill_cache(file, ctx, buf, len,
247 proc_uid_instantiate, NULL, &entry->uid))
248 break;
249 i++;
250 ctx->pos++;
251 }
252 rt_mutex_unlock(&proc_uid_lock);
253 return 0;
254 }
255
proc_uid_lookup(struct inode * dir,struct dentry * dentry,unsigned int flags)256 static struct dentry *proc_uid_lookup(struct inode *dir, struct dentry *dentry,
257 unsigned int flags)
258 {
259 int result = -ENOENT;
260
261 uid_t uid = name_to_int(&dentry->d_name);
262 bool uid_exists;
263
264 rt_mutex_lock(&proc_uid_lock);
265 uid_exists = uid_hash_entry_exists_locked(uid);
266 rt_mutex_unlock(&proc_uid_lock);
267 if (uid_exists) {
268 kuid_t kuid = make_kuid(current_user_ns(), uid);
269
270 result = proc_uid_instantiate(dir, dentry, NULL, &kuid);
271 }
272 return ERR_PTR(result);
273 }
274
275 static const struct file_operations proc_uid_operations = {
276 .read = generic_read_dir,
277 .iterate = proc_uid_readdir,
278 .llseek = default_llseek,
279 };
280
281 static const struct inode_operations proc_uid_inode_operations = {
282 .lookup = proc_uid_lookup,
283 .setattr = proc_setattr,
284 };
285
proc_uid_init(void)286 int __init proc_uid_init(void)
287 {
288 proc_uid = proc_mkdir("uid", NULL);
289 if (!proc_uid)
290 return -ENOMEM;
291 proc_uid->proc_iops = &proc_uid_inode_operations;
292 proc_uid->proc_fops = &proc_uid_operations;
293
294 return 0;
295 }
296