• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/fs/stat.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/export.h>
8 #include <linux/mm.h>
9 #include <linux/errno.h>
10 #include <linux/file.h>
11 #include <linux/highuid.h>
12 #include <linux/fs.h>
13 #include <linux/namei.h>
14 #include <linux/security.h>
15 #include <linux/syscalls.h>
16 #include <linux/pagemap.h>
17 
18 #include <asm/uaccess.h>
19 #include <asm/unistd.h>
20 
generic_fillattr(struct inode * inode,struct kstat * stat)21 void generic_fillattr(struct inode *inode, struct kstat *stat)
22 {
23 	stat->dev = inode->i_sb->s_dev;
24 	stat->ino = inode->i_ino;
25 	stat->mode = inode->i_mode;
26 	stat->nlink = inode->i_nlink;
27 	stat->uid = inode->i_uid;
28 	stat->gid = inode->i_gid;
29 	stat->rdev = inode->i_rdev;
30 	stat->size = i_size_read(inode);
31 	stat->atime = inode->i_atime;
32 	stat->mtime = inode->i_mtime;
33 	stat->ctime = inode->i_ctime;
34 	stat->blksize = i_blocksize(inode);
35 	stat->blocks = inode->i_blocks;
36 }
37 
38 EXPORT_SYMBOL(generic_fillattr);
39 
40 /**
41  * vfs_getattr_nosec - getattr without security checks
42  * @path: file to get attributes from
43  * @stat: structure to return attributes in
44  *
45  * Get attributes without calling security_inode_getattr.
46  *
47  * Currently the only caller other than vfs_getattr is internal to the
48  * filehandle lookup code, which uses only the inode number and returns
49  * no attributes to any user.  Any other code probably wants
50  * vfs_getattr.
51  */
vfs_getattr_nosec(struct path * path,struct kstat * stat)52 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
53 {
54 	struct inode *inode = d_backing_inode(path->dentry);
55 
56 	if (inode->i_op->getattr)
57 		return inode->i_op->getattr(path->mnt, path->dentry, stat);
58 
59 	generic_fillattr(inode, stat);
60 	return 0;
61 }
62 
63 EXPORT_SYMBOL(vfs_getattr_nosec);
64 
vfs_getattr(struct path * path,struct kstat * stat)65 int vfs_getattr(struct path *path, struct kstat *stat)
66 {
67 	int retval;
68 
69 	retval = security_inode_getattr(path);
70 	if (retval)
71 		return retval;
72 	return vfs_getattr_nosec(path, stat);
73 }
74 
75 EXPORT_SYMBOL(vfs_getattr);
76 
vfs_fstat(unsigned int fd,struct kstat * stat)77 int vfs_fstat(unsigned int fd, struct kstat *stat)
78 {
79 	struct fd f = fdget_raw(fd);
80 	int error = -EBADF;
81 
82 	if (f.file) {
83 		error = vfs_getattr(&f.file->f_path, stat);
84 		fdput(f);
85 	}
86 	return error;
87 }
88 EXPORT_SYMBOL(vfs_fstat);
89 
vfs_fstatat(int dfd,const char __user * filename,struct kstat * stat,int flag)90 int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
91 		int flag)
92 {
93 	struct path path;
94 	int error = -EINVAL;
95 	unsigned int lookup_flags = 0;
96 
97 	if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
98 		      AT_EMPTY_PATH)) != 0)
99 		goto out;
100 
101 	if (!(flag & AT_SYMLINK_NOFOLLOW))
102 		lookup_flags |= LOOKUP_FOLLOW;
103 	if (flag & AT_EMPTY_PATH)
104 		lookup_flags |= LOOKUP_EMPTY;
105 retry:
106 	error = user_path_at(dfd, filename, lookup_flags, &path);
107 	if (error)
108 		goto out;
109 
110 	error = vfs_getattr(&path, stat);
111 	path_put(&path);
112 	if (retry_estale(error, lookup_flags)) {
113 		lookup_flags |= LOOKUP_REVAL;
114 		goto retry;
115 	}
116 out:
117 	return error;
118 }
119 EXPORT_SYMBOL(vfs_fstatat);
120 
vfs_stat(const char __user * name,struct kstat * stat)121 int vfs_stat(const char __user *name, struct kstat *stat)
122 {
123 	return vfs_fstatat(AT_FDCWD, name, stat, 0);
124 }
125 EXPORT_SYMBOL(vfs_stat);
126 
vfs_lstat(const char __user * name,struct kstat * stat)127 int vfs_lstat(const char __user *name, struct kstat *stat)
128 {
129 	return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW);
130 }
131 EXPORT_SYMBOL(vfs_lstat);
132 
133 
134 #ifdef __ARCH_WANT_OLD_STAT
135 
136 /*
137  * For backward compatibility?  Maybe this should be moved
138  * into arch/i386 instead?
139  */
cp_old_stat(struct kstat * stat,struct __old_kernel_stat __user * statbuf)140 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
141 {
142 	static int warncount = 5;
143 	struct __old_kernel_stat tmp;
144 
145 	if (warncount > 0) {
146 		warncount--;
147 		printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
148 			current->comm);
149 	} else if (warncount < 0) {
150 		/* it's laughable, but... */
151 		warncount = 0;
152 	}
153 
154 	memset(&tmp, 0, sizeof(struct __old_kernel_stat));
155 	tmp.st_dev = old_encode_dev(stat->dev);
156 	tmp.st_ino = stat->ino;
157 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
158 		return -EOVERFLOW;
159 	tmp.st_mode = stat->mode;
160 	tmp.st_nlink = stat->nlink;
161 	if (tmp.st_nlink != stat->nlink)
162 		return -EOVERFLOW;
163 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
164 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
165 	tmp.st_rdev = old_encode_dev(stat->rdev);
166 #if BITS_PER_LONG == 32
167 	if (stat->size > MAX_NON_LFS)
168 		return -EOVERFLOW;
169 #endif
170 	tmp.st_size = stat->size;
171 	tmp.st_atime = stat->atime.tv_sec;
172 	tmp.st_mtime = stat->mtime.tv_sec;
173 	tmp.st_ctime = stat->ctime.tv_sec;
174 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
175 }
176 
SYSCALL_DEFINE2(stat,const char __user *,filename,struct __old_kernel_stat __user *,statbuf)177 SYSCALL_DEFINE2(stat, const char __user *, filename,
178 		struct __old_kernel_stat __user *, statbuf)
179 {
180 	struct kstat stat;
181 	int error;
182 
183 	error = vfs_stat(filename, &stat);
184 	if (error)
185 		return error;
186 
187 	return cp_old_stat(&stat, statbuf);
188 }
189 
SYSCALL_DEFINE2(lstat,const char __user *,filename,struct __old_kernel_stat __user *,statbuf)190 SYSCALL_DEFINE2(lstat, const char __user *, filename,
191 		struct __old_kernel_stat __user *, statbuf)
192 {
193 	struct kstat stat;
194 	int error;
195 
196 	error = vfs_lstat(filename, &stat);
197 	if (error)
198 		return error;
199 
200 	return cp_old_stat(&stat, statbuf);
201 }
202 
SYSCALL_DEFINE2(fstat,unsigned int,fd,struct __old_kernel_stat __user *,statbuf)203 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
204 {
205 	struct kstat stat;
206 	int error = vfs_fstat(fd, &stat);
207 
208 	if (!error)
209 		error = cp_old_stat(&stat, statbuf);
210 
211 	return error;
212 }
213 
214 #endif /* __ARCH_WANT_OLD_STAT */
215 
216 #if BITS_PER_LONG == 32
217 #  define choose_32_64(a,b) a
218 #else
219 #  define choose_32_64(a,b) b
220 #endif
221 
222 #define valid_dev(x)  choose_32_64(old_valid_dev,new_valid_dev)(x)
223 #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
224 
225 #ifndef INIT_STRUCT_STAT_PADDING
226 #  define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
227 #endif
228 
cp_new_stat(struct kstat * stat,struct stat __user * statbuf)229 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
230 {
231 	struct stat tmp;
232 
233 	if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
234 		return -EOVERFLOW;
235 #if BITS_PER_LONG == 32
236 	if (stat->size > MAX_NON_LFS)
237 		return -EOVERFLOW;
238 #endif
239 
240 	INIT_STRUCT_STAT_PADDING(tmp);
241 	tmp.st_dev = encode_dev(stat->dev);
242 	tmp.st_ino = stat->ino;
243 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
244 		return -EOVERFLOW;
245 	tmp.st_mode = stat->mode;
246 	tmp.st_nlink = stat->nlink;
247 	if (tmp.st_nlink != stat->nlink)
248 		return -EOVERFLOW;
249 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
250 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
251 	tmp.st_rdev = encode_dev(stat->rdev);
252 	tmp.st_size = stat->size;
253 	tmp.st_atime = stat->atime.tv_sec;
254 	tmp.st_mtime = stat->mtime.tv_sec;
255 	tmp.st_ctime = stat->ctime.tv_sec;
256 #ifdef STAT_HAVE_NSEC
257 	tmp.st_atime_nsec = stat->atime.tv_nsec;
258 	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
259 	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
260 #endif
261 	tmp.st_blocks = stat->blocks;
262 	tmp.st_blksize = stat->blksize;
263 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
264 }
265 
SYSCALL_DEFINE2(newstat,const char __user *,filename,struct stat __user *,statbuf)266 SYSCALL_DEFINE2(newstat, const char __user *, filename,
267 		struct stat __user *, statbuf)
268 {
269 	struct kstat stat;
270 	int error = vfs_stat(filename, &stat);
271 
272 	if (error)
273 		return error;
274 	return cp_new_stat(&stat, statbuf);
275 }
276 
SYSCALL_DEFINE2(newlstat,const char __user *,filename,struct stat __user *,statbuf)277 SYSCALL_DEFINE2(newlstat, const char __user *, filename,
278 		struct stat __user *, statbuf)
279 {
280 	struct kstat stat;
281 	int error;
282 
283 	error = vfs_lstat(filename, &stat);
284 	if (error)
285 		return error;
286 
287 	return cp_new_stat(&stat, statbuf);
288 }
289 
290 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
SYSCALL_DEFINE4(newfstatat,int,dfd,const char __user *,filename,struct stat __user *,statbuf,int,flag)291 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
292 		struct stat __user *, statbuf, int, flag)
293 {
294 	struct kstat stat;
295 	int error;
296 
297 	error = vfs_fstatat(dfd, filename, &stat, flag);
298 	if (error)
299 		return error;
300 	return cp_new_stat(&stat, statbuf);
301 }
302 #endif
303 
SYSCALL_DEFINE2(newfstat,unsigned int,fd,struct stat __user *,statbuf)304 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
305 {
306 	struct kstat stat;
307 	int error = vfs_fstat(fd, &stat);
308 
309 	if (!error)
310 		error = cp_new_stat(&stat, statbuf);
311 
312 	return error;
313 }
314 
SYSCALL_DEFINE4(readlinkat,int,dfd,const char __user *,pathname,char __user *,buf,int,bufsiz)315 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
316 		char __user *, buf, int, bufsiz)
317 {
318 	struct path path;
319 	int error;
320 	int empty = 0;
321 	unsigned int lookup_flags = LOOKUP_EMPTY;
322 
323 	if (bufsiz <= 0)
324 		return -EINVAL;
325 
326 retry:
327 	error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
328 	if (!error) {
329 		struct inode *inode = d_backing_inode(path.dentry);
330 
331 		error = empty ? -ENOENT : -EINVAL;
332 		if (inode->i_op->readlink) {
333 			error = security_inode_readlink(path.dentry);
334 			if (!error) {
335 				touch_atime(&path);
336 				error = inode->i_op->readlink(path.dentry,
337 							      buf, bufsiz);
338 			}
339 		}
340 		path_put(&path);
341 		if (retry_estale(error, lookup_flags)) {
342 			lookup_flags |= LOOKUP_REVAL;
343 			goto retry;
344 		}
345 	}
346 	return error;
347 }
348 
SYSCALL_DEFINE3(readlink,const char __user *,path,char __user *,buf,int,bufsiz)349 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
350 		int, bufsiz)
351 {
352 	return sys_readlinkat(AT_FDCWD, path, buf, bufsiz);
353 }
354 
355 
356 /* ---------- LFS-64 ----------- */
357 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
358 
359 #ifndef INIT_STRUCT_STAT64_PADDING
360 #  define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
361 #endif
362 
cp_new_stat64(struct kstat * stat,struct stat64 __user * statbuf)363 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
364 {
365 	struct stat64 tmp;
366 
367 	INIT_STRUCT_STAT64_PADDING(tmp);
368 #ifdef CONFIG_MIPS
369 	/* mips has weird padding, so we don't get 64 bits there */
370 	tmp.st_dev = new_encode_dev(stat->dev);
371 	tmp.st_rdev = new_encode_dev(stat->rdev);
372 #else
373 	tmp.st_dev = huge_encode_dev(stat->dev);
374 	tmp.st_rdev = huge_encode_dev(stat->rdev);
375 #endif
376 	tmp.st_ino = stat->ino;
377 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
378 		return -EOVERFLOW;
379 #ifdef STAT64_HAS_BROKEN_ST_INO
380 	tmp.__st_ino = stat->ino;
381 #endif
382 	tmp.st_mode = stat->mode;
383 	tmp.st_nlink = stat->nlink;
384 	tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
385 	tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
386 	tmp.st_atime = stat->atime.tv_sec;
387 	tmp.st_atime_nsec = stat->atime.tv_nsec;
388 	tmp.st_mtime = stat->mtime.tv_sec;
389 	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
390 	tmp.st_ctime = stat->ctime.tv_sec;
391 	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
392 	tmp.st_size = stat->size;
393 	tmp.st_blocks = stat->blocks;
394 	tmp.st_blksize = stat->blksize;
395 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
396 }
397 
SYSCALL_DEFINE2(stat64,const char __user *,filename,struct stat64 __user *,statbuf)398 SYSCALL_DEFINE2(stat64, const char __user *, filename,
399 		struct stat64 __user *, statbuf)
400 {
401 	struct kstat stat;
402 	int error = vfs_stat(filename, &stat);
403 
404 	if (!error)
405 		error = cp_new_stat64(&stat, statbuf);
406 
407 	return error;
408 }
409 
SYSCALL_DEFINE2(lstat64,const char __user *,filename,struct stat64 __user *,statbuf)410 SYSCALL_DEFINE2(lstat64, const char __user *, filename,
411 		struct stat64 __user *, statbuf)
412 {
413 	struct kstat stat;
414 	int error = vfs_lstat(filename, &stat);
415 
416 	if (!error)
417 		error = cp_new_stat64(&stat, statbuf);
418 
419 	return error;
420 }
421 
SYSCALL_DEFINE2(fstat64,unsigned long,fd,struct stat64 __user *,statbuf)422 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
423 {
424 	struct kstat stat;
425 	int error = vfs_fstat(fd, &stat);
426 
427 	if (!error)
428 		error = cp_new_stat64(&stat, statbuf);
429 
430 	return error;
431 }
432 
SYSCALL_DEFINE4(fstatat64,int,dfd,const char __user *,filename,struct stat64 __user *,statbuf,int,flag)433 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
434 		struct stat64 __user *, statbuf, int, flag)
435 {
436 	struct kstat stat;
437 	int error;
438 
439 	error = vfs_fstatat(dfd, filename, &stat, flag);
440 	if (error)
441 		return error;
442 	return cp_new_stat64(&stat, statbuf);
443 }
444 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
445 
446 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
__inode_add_bytes(struct inode * inode,loff_t bytes)447 void __inode_add_bytes(struct inode *inode, loff_t bytes)
448 {
449 	inode->i_blocks += bytes >> 9;
450 	bytes &= 511;
451 	inode->i_bytes += bytes;
452 	if (inode->i_bytes >= 512) {
453 		inode->i_blocks++;
454 		inode->i_bytes -= 512;
455 	}
456 }
457 EXPORT_SYMBOL(__inode_add_bytes);
458 
inode_add_bytes(struct inode * inode,loff_t bytes)459 void inode_add_bytes(struct inode *inode, loff_t bytes)
460 {
461 	spin_lock(&inode->i_lock);
462 	__inode_add_bytes(inode, bytes);
463 	spin_unlock(&inode->i_lock);
464 }
465 
466 EXPORT_SYMBOL(inode_add_bytes);
467 
__inode_sub_bytes(struct inode * inode,loff_t bytes)468 void __inode_sub_bytes(struct inode *inode, loff_t bytes)
469 {
470 	inode->i_blocks -= bytes >> 9;
471 	bytes &= 511;
472 	if (inode->i_bytes < bytes) {
473 		inode->i_blocks--;
474 		inode->i_bytes += 512;
475 	}
476 	inode->i_bytes -= bytes;
477 }
478 
479 EXPORT_SYMBOL(__inode_sub_bytes);
480 
inode_sub_bytes(struct inode * inode,loff_t bytes)481 void inode_sub_bytes(struct inode *inode, loff_t bytes)
482 {
483 	spin_lock(&inode->i_lock);
484 	__inode_sub_bytes(inode, bytes);
485 	spin_unlock(&inode->i_lock);
486 }
487 
488 EXPORT_SYMBOL(inode_sub_bytes);
489 
inode_get_bytes(struct inode * inode)490 loff_t inode_get_bytes(struct inode *inode)
491 {
492 	loff_t ret;
493 
494 	spin_lock(&inode->i_lock);
495 	ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
496 	spin_unlock(&inode->i_lock);
497 	return ret;
498 }
499 
500 EXPORT_SYMBOL(inode_get_bytes);
501 
inode_set_bytes(struct inode * inode,loff_t bytes)502 void inode_set_bytes(struct inode *inode, loff_t bytes)
503 {
504 	/* Caller is here responsible for sufficient locking
505 	 * (ie. inode->i_lock) */
506 	inode->i_blocks = bytes >> 9;
507 	inode->i_bytes = bytes & 511;
508 }
509 
510 EXPORT_SYMBOL(inode_set_bytes);
511