• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/ipc/shm.c
3  * Copyright (C) 1992, 1993 Krishna Balasubramanian
4  *	 Many improvements/fixes by Bruno Haible.
5  * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6  * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7  *
8  * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9  * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10  * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11  * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12  * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13  * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14  * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15  *
16  * support for audit of ipc object properties and permission changes
17  * Dustin Kirkland <dustin.kirkland@us.ibm.com>
18  *
19  * namespaces support
20  * OpenVZ, SWsoft Inc.
21  * Pavel Emelianov <xemul@openvz.org>
22  */
23 
24 #include <linux/slab.h>
25 #include <linux/mm.h>
26 #include <linux/hugetlb.h>
27 #include <linux/shm.h>
28 #include <linux/init.h>
29 #include <linux/file.h>
30 #include <linux/mman.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/security.h>
33 #include <linux/syscalls.h>
34 #include <linux/audit.h>
35 #include <linux/capability.h>
36 #include <linux/ptrace.h>
37 #include <linux/seq_file.h>
38 #include <linux/rwsem.h>
39 #include <linux/nsproxy.h>
40 #include <linux/mount.h>
41 #include <linux/ipc_namespace.h>
42 
43 #include <asm/uaccess.h>
44 
45 #include "util.h"
46 
47 struct shm_file_data {
48 	int id;
49 	struct ipc_namespace *ns;
50 	struct file *file;
51 	const struct vm_operations_struct *vm_ops;
52 };
53 
54 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
55 
56 static const struct file_operations shm_file_operations;
57 static struct vm_operations_struct shm_vm_ops;
58 
59 #define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
60 
61 #define shm_unlock(shp)			\
62 	ipc_unlock(&(shp)->shm_perm)
63 
64 static int newseg(struct ipc_namespace *, struct ipc_params *);
65 static void shm_open(struct vm_area_struct *vma);
66 static void shm_close(struct vm_area_struct *vma);
67 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
68 #ifdef CONFIG_PROC_FS
69 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
70 #endif
71 
shm_init_ns(struct ipc_namespace * ns)72 void shm_init_ns(struct ipc_namespace *ns)
73 {
74 	ns->shm_ctlmax = SHMMAX;
75 	ns->shm_ctlall = SHMALL;
76 	ns->shm_ctlmni = SHMMNI;
77 	ns->shm_tot = 0;
78 	ipc_init_ids(&shm_ids(ns));
79 }
80 
81 /*
82  * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
83  * Only shm_ids.rw_mutex remains locked on exit.
84  */
do_shm_rmid(struct ipc_namespace * ns,struct kern_ipc_perm * ipcp)85 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
86 {
87 	struct shmid_kernel *shp;
88 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
89 
90 	if (shp->shm_nattch){
91 		shp->shm_perm.mode |= SHM_DEST;
92 		/* Do not find it any more */
93 		shp->shm_perm.key = IPC_PRIVATE;
94 		shm_unlock(shp);
95 	} else
96 		shm_destroy(ns, shp);
97 }
98 
99 #ifdef CONFIG_IPC_NS
shm_exit_ns(struct ipc_namespace * ns)100 void shm_exit_ns(struct ipc_namespace *ns)
101 {
102 	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
103 }
104 #endif
105 
shm_init(void)106 void __init shm_init (void)
107 {
108 	shm_init_ns(&init_ipc_ns);
109 	ipc_init_proc_interface("sysvipc/shm",
110 				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime\n",
111 				IPC_SHM_IDS, sysvipc_shm_proc_show);
112 }
113 
114 /*
115  * shm_lock_(check_) routines are called in the paths where the rw_mutex
116  * is not necessarily held.
117  */
shm_lock(struct ipc_namespace * ns,int id)118 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
119 {
120 	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
121 
122 	if (IS_ERR(ipcp))
123 		return (struct shmid_kernel *)ipcp;
124 
125 	return container_of(ipcp, struct shmid_kernel, shm_perm);
126 }
127 
shm_lock_check(struct ipc_namespace * ns,int id)128 static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
129 						int id)
130 {
131 	struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
132 
133 	if (IS_ERR(ipcp))
134 		return (struct shmid_kernel *)ipcp;
135 
136 	return container_of(ipcp, struct shmid_kernel, shm_perm);
137 }
138 
shm_rmid(struct ipc_namespace * ns,struct shmid_kernel * s)139 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
140 {
141 	ipc_rmid(&shm_ids(ns), &s->shm_perm);
142 }
143 
144 
145 /* This is called by fork, once for every shm attach. */
shm_open(struct vm_area_struct * vma)146 static void shm_open(struct vm_area_struct *vma)
147 {
148 	struct file *file = vma->vm_file;
149 	struct shm_file_data *sfd = shm_file_data(file);
150 	struct shmid_kernel *shp;
151 
152 	shp = shm_lock(sfd->ns, sfd->id);
153 	BUG_ON(IS_ERR(shp));
154 	shp->shm_atim = get_seconds();
155 	shp->shm_lprid = task_tgid_vnr(current);
156 	shp->shm_nattch++;
157 	shm_unlock(shp);
158 }
159 
160 /*
161  * shm_destroy - free the struct shmid_kernel
162  *
163  * @ns: namespace
164  * @shp: struct to free
165  *
166  * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
167  * but returns with shp unlocked and freed.
168  */
shm_destroy(struct ipc_namespace * ns,struct shmid_kernel * shp)169 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
170 {
171 	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
172 	shm_rmid(ns, shp);
173 	shm_unlock(shp);
174 	if (!is_file_hugepages(shp->shm_file))
175 		shmem_lock(shp->shm_file, 0, shp->mlock_user);
176 	else
177 		user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
178 						shp->mlock_user);
179 	fput (shp->shm_file);
180 	security_shm_free(shp);
181 	ipc_rcu_putref(shp);
182 }
183 
184 /*
185  * remove the attach descriptor vma.
186  * free memory for segment if it is marked destroyed.
187  * The descriptor has already been removed from the current->mm->mmap list
188  * and will later be kfree()d.
189  */
shm_close(struct vm_area_struct * vma)190 static void shm_close(struct vm_area_struct *vma)
191 {
192 	struct file * file = vma->vm_file;
193 	struct shm_file_data *sfd = shm_file_data(file);
194 	struct shmid_kernel *shp;
195 	struct ipc_namespace *ns = sfd->ns;
196 
197 	down_write(&shm_ids(ns).rw_mutex);
198 	/* remove from the list of attaches of the shm segment */
199 	shp = shm_lock(ns, sfd->id);
200 	BUG_ON(IS_ERR(shp));
201 	shp->shm_lprid = task_tgid_vnr(current);
202 	shp->shm_dtim = get_seconds();
203 	shp->shm_nattch--;
204 	if(shp->shm_nattch == 0 &&
205 	   shp->shm_perm.mode & SHM_DEST)
206 		shm_destroy(ns, shp);
207 	else
208 		shm_unlock(shp);
209 	up_write(&shm_ids(ns).rw_mutex);
210 }
211 
shm_fault(struct vm_area_struct * vma,struct vm_fault * vmf)212 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
213 {
214 	struct file *file = vma->vm_file;
215 	struct shm_file_data *sfd = shm_file_data(file);
216 
217 	return sfd->vm_ops->fault(vma, vmf);
218 }
219 
220 #ifdef CONFIG_NUMA
shm_set_policy(struct vm_area_struct * vma,struct mempolicy * new)221 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
222 {
223 	struct file *file = vma->vm_file;
224 	struct shm_file_data *sfd = shm_file_data(file);
225 	int err = 0;
226 	if (sfd->vm_ops->set_policy)
227 		err = sfd->vm_ops->set_policy(vma, new);
228 	return err;
229 }
230 
shm_get_policy(struct vm_area_struct * vma,unsigned long addr)231 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
232 					unsigned long addr)
233 {
234 	struct file *file = vma->vm_file;
235 	struct shm_file_data *sfd = shm_file_data(file);
236 	struct mempolicy *pol = NULL;
237 
238 	if (sfd->vm_ops->get_policy)
239 		pol = sfd->vm_ops->get_policy(vma, addr);
240 	else if (vma->vm_policy)
241 		pol = vma->vm_policy;
242 
243 	return pol;
244 }
245 #endif
246 
shm_mmap(struct file * file,struct vm_area_struct * vma)247 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
248 {
249 	struct shm_file_data *sfd = shm_file_data(file);
250 	int ret;
251 
252 	ret = sfd->file->f_op->mmap(sfd->file, vma);
253 	if (ret != 0)
254 		return ret;
255 	sfd->vm_ops = vma->vm_ops;
256 #ifdef CONFIG_MMU
257 	BUG_ON(!sfd->vm_ops->fault);
258 #endif
259 	vma->vm_ops = &shm_vm_ops;
260 	shm_open(vma);
261 
262 	return ret;
263 }
264 
shm_release(struct inode * ino,struct file * file)265 static int shm_release(struct inode *ino, struct file *file)
266 {
267 	struct shm_file_data *sfd = shm_file_data(file);
268 
269 	put_ipc_ns(sfd->ns);
270 	shm_file_data(file) = NULL;
271 	kfree(sfd);
272 	return 0;
273 }
274 
shm_fsync(struct file * file,struct dentry * dentry,int datasync)275 static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
276 {
277 	int (*fsync) (struct file *, struct dentry *, int datasync);
278 	struct shm_file_data *sfd = shm_file_data(file);
279 	int ret = -EINVAL;
280 
281 	fsync = sfd->file->f_op->fsync;
282 	if (fsync)
283 		ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
284 	return ret;
285 }
286 
shm_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)287 static unsigned long shm_get_unmapped_area(struct file *file,
288 	unsigned long addr, unsigned long len, unsigned long pgoff,
289 	unsigned long flags)
290 {
291 	struct shm_file_data *sfd = shm_file_data(file);
292 	return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
293 }
294 
is_file_shm_hugepages(struct file * file)295 int is_file_shm_hugepages(struct file *file)
296 {
297 	int ret = 0;
298 
299 	if (file->f_op == &shm_file_operations) {
300 		struct shm_file_data *sfd;
301 		sfd = shm_file_data(file);
302 		ret = is_file_hugepages(sfd->file);
303 	}
304 	return ret;
305 }
306 
307 static const struct file_operations shm_file_operations = {
308 	.mmap		= shm_mmap,
309 	.fsync		= shm_fsync,
310 	.release	= shm_release,
311 	.get_unmapped_area	= shm_get_unmapped_area,
312 };
313 
314 static struct vm_operations_struct shm_vm_ops = {
315 	.open	= shm_open,	/* callback for a new vm-area open */
316 	.close	= shm_close,	/* callback for when the vm-area is released */
317 	.fault	= shm_fault,
318 #if defined(CONFIG_NUMA)
319 	.set_policy = shm_set_policy,
320 	.get_policy = shm_get_policy,
321 #endif
322 };
323 
324 /**
325  * newseg - Create a new shared memory segment
326  * @ns: namespace
327  * @params: ptr to the structure that contains key, size and shmflg
328  *
329  * Called with shm_ids.rw_mutex held as a writer.
330  */
331 
newseg(struct ipc_namespace * ns,struct ipc_params * params)332 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
333 {
334 	key_t key = params->key;
335 	int shmflg = params->flg;
336 	size_t size = params->u.size;
337 	int error;
338 	struct shmid_kernel *shp;
339 	int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
340 	struct file * file;
341 	char name[13];
342 	int id;
343 	int acctflag = 0;
344 
345 	if (size < SHMMIN || size > ns->shm_ctlmax)
346 		return -EINVAL;
347 
348 	if (ns->shm_tot + numpages > ns->shm_ctlall)
349 		return -ENOSPC;
350 
351 	shp = ipc_rcu_alloc(sizeof(*shp));
352 	if (!shp)
353 		return -ENOMEM;
354 
355 	shp->shm_perm.key = key;
356 	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
357 	shp->mlock_user = NULL;
358 
359 	shp->shm_perm.security = NULL;
360 	error = security_shm_alloc(shp);
361 	if (error) {
362 		ipc_rcu_putref(shp);
363 		return error;
364 	}
365 
366 	sprintf (name, "SYSV%08x", key);
367 	if (shmflg & SHM_HUGETLB) {
368 		/* hugetlb_file_setup applies strict accounting */
369 		if (shmflg & SHM_NORESERVE)
370 			acctflag = VM_NORESERVE;
371 		file = hugetlb_file_setup(name, size, acctflag);
372 		shp->mlock_user = current_user();
373 	} else {
374 		/*
375 		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
376 	 	 * if it's asked for.
377 		 */
378 		if  ((shmflg & SHM_NORESERVE) &&
379 				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
380 			acctflag = VM_NORESERVE;
381 		file = shmem_file_setup(name, size, acctflag);
382 	}
383 	error = PTR_ERR(file);
384 	if (IS_ERR(file))
385 		goto no_file;
386 
387 	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
388 	if (id < 0) {
389 		error = id;
390 		goto no_id;
391 	}
392 
393 	shp->shm_cprid = task_tgid_vnr(current);
394 	shp->shm_lprid = 0;
395 	shp->shm_atim = shp->shm_dtim = 0;
396 	shp->shm_ctim = get_seconds();
397 	shp->shm_segsz = size;
398 	shp->shm_nattch = 0;
399 	shp->shm_file = file;
400 	/*
401 	 * shmid gets reported as "inode#" in /proc/pid/maps.
402 	 * proc-ps tools use this. Changing this will break them.
403 	 */
404 	file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
405 
406 	ns->shm_tot += numpages;
407 	error = shp->shm_perm.id;
408 	shm_unlock(shp);
409 	return error;
410 
411 no_id:
412 	fput(file);
413 no_file:
414 	security_shm_free(shp);
415 	ipc_rcu_putref(shp);
416 	return error;
417 }
418 
419 /*
420  * Called with shm_ids.rw_mutex and ipcp locked.
421  */
shm_security(struct kern_ipc_perm * ipcp,int shmflg)422 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
423 {
424 	struct shmid_kernel *shp;
425 
426 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
427 	return security_shm_associate(shp, shmflg);
428 }
429 
430 /*
431  * Called with shm_ids.rw_mutex and ipcp locked.
432  */
shm_more_checks(struct kern_ipc_perm * ipcp,struct ipc_params * params)433 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
434 				struct ipc_params *params)
435 {
436 	struct shmid_kernel *shp;
437 
438 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
439 	if (shp->shm_segsz < params->u.size)
440 		return -EINVAL;
441 
442 	return 0;
443 }
444 
SYSCALL_DEFINE3(shmget,key_t,key,size_t,size,int,shmflg)445 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
446 {
447 	struct ipc_namespace *ns;
448 	struct ipc_ops shm_ops;
449 	struct ipc_params shm_params;
450 
451 	ns = current->nsproxy->ipc_ns;
452 
453 	shm_ops.getnew = newseg;
454 	shm_ops.associate = shm_security;
455 	shm_ops.more_checks = shm_more_checks;
456 
457 	shm_params.key = key;
458 	shm_params.flg = shmflg;
459 	shm_params.u.size = size;
460 
461 	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
462 }
463 
copy_shmid_to_user(void __user * buf,struct shmid64_ds * in,int version)464 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
465 {
466 	switch(version) {
467 	case IPC_64:
468 		return copy_to_user(buf, in, sizeof(*in));
469 	case IPC_OLD:
470 	    {
471 		struct shmid_ds out;
472 
473 		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
474 		out.shm_segsz	= in->shm_segsz;
475 		out.shm_atime	= in->shm_atime;
476 		out.shm_dtime	= in->shm_dtime;
477 		out.shm_ctime	= in->shm_ctime;
478 		out.shm_cpid	= in->shm_cpid;
479 		out.shm_lpid	= in->shm_lpid;
480 		out.shm_nattch	= in->shm_nattch;
481 
482 		return copy_to_user(buf, &out, sizeof(out));
483 	    }
484 	default:
485 		return -EINVAL;
486 	}
487 }
488 
489 static inline unsigned long
copy_shmid_from_user(struct shmid64_ds * out,void __user * buf,int version)490 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
491 {
492 	switch(version) {
493 	case IPC_64:
494 		if (copy_from_user(out, buf, sizeof(*out)))
495 			return -EFAULT;
496 		return 0;
497 	case IPC_OLD:
498 	    {
499 		struct shmid_ds tbuf_old;
500 
501 		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
502 			return -EFAULT;
503 
504 		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
505 		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
506 		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
507 
508 		return 0;
509 	    }
510 	default:
511 		return -EINVAL;
512 	}
513 }
514 
copy_shminfo_to_user(void __user * buf,struct shminfo64 * in,int version)515 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
516 {
517 	switch(version) {
518 	case IPC_64:
519 		return copy_to_user(buf, in, sizeof(*in));
520 	case IPC_OLD:
521 	    {
522 		struct shminfo out;
523 
524 		if(in->shmmax > INT_MAX)
525 			out.shmmax = INT_MAX;
526 		else
527 			out.shmmax = (int)in->shmmax;
528 
529 		out.shmmin	= in->shmmin;
530 		out.shmmni	= in->shmmni;
531 		out.shmseg	= in->shmseg;
532 		out.shmall	= in->shmall;
533 
534 		return copy_to_user(buf, &out, sizeof(out));
535 	    }
536 	default:
537 		return -EINVAL;
538 	}
539 }
540 
541 /*
542  * Called with shm_ids.rw_mutex held as a reader
543  */
shm_get_stat(struct ipc_namespace * ns,unsigned long * rss,unsigned long * swp)544 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
545 		unsigned long *swp)
546 {
547 	int next_id;
548 	int total, in_use;
549 
550 	*rss = 0;
551 	*swp = 0;
552 
553 	in_use = shm_ids(ns).in_use;
554 
555 	for (total = 0, next_id = 0; total < in_use; next_id++) {
556 		struct shmid_kernel *shp;
557 		struct inode *inode;
558 
559 		shp = idr_find(&shm_ids(ns).ipcs_idr, next_id);
560 		if (shp == NULL)
561 			continue;
562 
563 		inode = shp->shm_file->f_path.dentry->d_inode;
564 
565 		if (is_file_hugepages(shp->shm_file)) {
566 			struct address_space *mapping = inode->i_mapping;
567 			struct hstate *h = hstate_file(shp->shm_file);
568 			*rss += pages_per_huge_page(h) * mapping->nrpages;
569 		} else {
570 #ifdef CONFIG_SHMEM
571 			struct shmem_inode_info *info = SHMEM_I(inode);
572 			spin_lock(&info->lock);
573 			*rss += inode->i_mapping->nrpages;
574 			*swp += info->swapped;
575 			spin_unlock(&info->lock);
576 #else
577 			*rss += inode->i_mapping->nrpages;
578 #endif
579 		}
580 
581 		total++;
582 	}
583 }
584 
585 /*
586  * This function handles some shmctl commands which require the rw_mutex
587  * to be held in write mode.
588  * NOTE: no locks must be held, the rw_mutex is taken inside this function.
589  */
shmctl_down(struct ipc_namespace * ns,int shmid,int cmd,struct shmid_ds __user * buf,int version)590 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
591 		       struct shmid_ds __user *buf, int version)
592 {
593 	struct kern_ipc_perm *ipcp;
594 	struct shmid64_ds shmid64;
595 	struct shmid_kernel *shp;
596 	int err;
597 
598 	if (cmd == IPC_SET) {
599 		if (copy_shmid_from_user(&shmid64, buf, version))
600 			return -EFAULT;
601 	}
602 
603 	ipcp = ipcctl_pre_down(&shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0);
604 	if (IS_ERR(ipcp))
605 		return PTR_ERR(ipcp);
606 
607 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
608 
609 	err = security_shm_shmctl(shp, cmd);
610 	if (err)
611 		goto out_unlock;
612 	switch (cmd) {
613 	case IPC_RMID:
614 		do_shm_rmid(ns, ipcp);
615 		goto out_up;
616 	case IPC_SET:
617 		ipc_update_perm(&shmid64.shm_perm, ipcp);
618 		shp->shm_ctim = get_seconds();
619 		break;
620 	default:
621 		err = -EINVAL;
622 	}
623 out_unlock:
624 	shm_unlock(shp);
625 out_up:
626 	up_write(&shm_ids(ns).rw_mutex);
627 	return err;
628 }
629 
SYSCALL_DEFINE3(shmctl,int,shmid,int,cmd,struct shmid_ds __user *,buf)630 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
631 {
632 	struct shmid_kernel *shp;
633 	int err, version;
634 	struct ipc_namespace *ns;
635 
636 	if (cmd < 0 || shmid < 0) {
637 		err = -EINVAL;
638 		goto out;
639 	}
640 
641 	version = ipc_parse_version(&cmd);
642 	ns = current->nsproxy->ipc_ns;
643 
644 	switch (cmd) { /* replace with proc interface ? */
645 	case IPC_INFO:
646 	{
647 		struct shminfo64 shminfo;
648 
649 		err = security_shm_shmctl(NULL, cmd);
650 		if (err)
651 			return err;
652 
653 		memset(&shminfo, 0, sizeof(shminfo));
654 		shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
655 		shminfo.shmmax = ns->shm_ctlmax;
656 		shminfo.shmall = ns->shm_ctlall;
657 
658 		shminfo.shmmin = SHMMIN;
659 		if(copy_shminfo_to_user (buf, &shminfo, version))
660 			return -EFAULT;
661 
662 		down_read(&shm_ids(ns).rw_mutex);
663 		err = ipc_get_maxid(&shm_ids(ns));
664 		up_read(&shm_ids(ns).rw_mutex);
665 
666 		if(err<0)
667 			err = 0;
668 		goto out;
669 	}
670 	case SHM_INFO:
671 	{
672 		struct shm_info shm_info;
673 
674 		err = security_shm_shmctl(NULL, cmd);
675 		if (err)
676 			return err;
677 
678 		memset(&shm_info, 0, sizeof(shm_info));
679 		down_read(&shm_ids(ns).rw_mutex);
680 		shm_info.used_ids = shm_ids(ns).in_use;
681 		shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
682 		shm_info.shm_tot = ns->shm_tot;
683 		shm_info.swap_attempts = 0;
684 		shm_info.swap_successes = 0;
685 		err = ipc_get_maxid(&shm_ids(ns));
686 		up_read(&shm_ids(ns).rw_mutex);
687 		if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
688 			err = -EFAULT;
689 			goto out;
690 		}
691 
692 		err = err < 0 ? 0 : err;
693 		goto out;
694 	}
695 	case SHM_STAT:
696 	case IPC_STAT:
697 	{
698 		struct shmid64_ds tbuf;
699 		int result;
700 
701 		if (cmd == SHM_STAT) {
702 			shp = shm_lock(ns, shmid);
703 			if (IS_ERR(shp)) {
704 				err = PTR_ERR(shp);
705 				goto out;
706 			}
707 			result = shp->shm_perm.id;
708 		} else {
709 			shp = shm_lock_check(ns, shmid);
710 			if (IS_ERR(shp)) {
711 				err = PTR_ERR(shp);
712 				goto out;
713 			}
714 			result = 0;
715 		}
716 		err = -EACCES;
717 		if (ipcperms (&shp->shm_perm, S_IRUGO))
718 			goto out_unlock;
719 		err = security_shm_shmctl(shp, cmd);
720 		if (err)
721 			goto out_unlock;
722 		memset(&tbuf, 0, sizeof(tbuf));
723 		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
724 		tbuf.shm_segsz	= shp->shm_segsz;
725 		tbuf.shm_atime	= shp->shm_atim;
726 		tbuf.shm_dtime	= shp->shm_dtim;
727 		tbuf.shm_ctime	= shp->shm_ctim;
728 		tbuf.shm_cpid	= shp->shm_cprid;
729 		tbuf.shm_lpid	= shp->shm_lprid;
730 		tbuf.shm_nattch	= shp->shm_nattch;
731 		shm_unlock(shp);
732 		if(copy_shmid_to_user (buf, &tbuf, version))
733 			err = -EFAULT;
734 		else
735 			err = result;
736 		goto out;
737 	}
738 	case SHM_LOCK:
739 	case SHM_UNLOCK:
740 	{
741 		struct file *uninitialized_var(shm_file);
742 
743 		lru_add_drain_all();  /* drain pagevecs to lru lists */
744 
745 		shp = shm_lock_check(ns, shmid);
746 		if (IS_ERR(shp)) {
747 			err = PTR_ERR(shp);
748 			goto out;
749 		}
750 
751 		audit_ipc_obj(&(shp->shm_perm));
752 
753 		if (!capable(CAP_IPC_LOCK)) {
754 			uid_t euid = current_euid();
755 			err = -EPERM;
756 			if (euid != shp->shm_perm.uid &&
757 			    euid != shp->shm_perm.cuid)
758 				goto out_unlock;
759 			if (cmd == SHM_LOCK &&
760 			    !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
761 				goto out_unlock;
762 		}
763 
764 		err = security_shm_shmctl(shp, cmd);
765 		if (err)
766 			goto out_unlock;
767 
768 		if(cmd==SHM_LOCK) {
769 			struct user_struct *user = current_user();
770 			if (!is_file_hugepages(shp->shm_file)) {
771 				err = shmem_lock(shp->shm_file, 1, user);
772 				if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
773 					shp->shm_perm.mode |= SHM_LOCKED;
774 					shp->mlock_user = user;
775 				}
776 			}
777 		} else if (!is_file_hugepages(shp->shm_file)) {
778 			shmem_lock(shp->shm_file, 0, shp->mlock_user);
779 			shp->shm_perm.mode &= ~SHM_LOCKED;
780 			shp->mlock_user = NULL;
781 		}
782 		shm_unlock(shp);
783 		goto out;
784 	}
785 	case IPC_RMID:
786 	case IPC_SET:
787 		err = shmctl_down(ns, shmid, cmd, buf, version);
788 		return err;
789 	default:
790 		return -EINVAL;
791 	}
792 
793 out_unlock:
794 	shm_unlock(shp);
795 out:
796 	return err;
797 }
798 
799 /*
800  * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
801  *
802  * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
803  * "raddr" thing points to kernel space, and there has to be a wrapper around
804  * this.
805  */
do_shmat(int shmid,char __user * shmaddr,int shmflg,ulong * raddr)806 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
807 {
808 	struct shmid_kernel *shp;
809 	unsigned long addr;
810 	unsigned long size;
811 	struct file * file;
812 	int    err;
813 	unsigned long flags;
814 	unsigned long prot;
815 	int acc_mode;
816 	unsigned long user_addr;
817 	struct ipc_namespace *ns;
818 	struct shm_file_data *sfd;
819 	struct path path;
820 	fmode_t f_mode;
821 
822 	err = -EINVAL;
823 	if (shmid < 0)
824 		goto out;
825 	else if ((addr = (ulong)shmaddr)) {
826 		if (addr & (SHMLBA-1)) {
827 			if (shmflg & SHM_RND)
828 				addr &= ~(SHMLBA-1);	   /* round down */
829 			else
830 #ifndef __ARCH_FORCE_SHMLBA
831 				if (addr & ~PAGE_MASK)
832 #endif
833 					goto out;
834 		}
835 		flags = MAP_SHARED | MAP_FIXED;
836 	} else {
837 		if ((shmflg & SHM_REMAP))
838 			goto out;
839 
840 		flags = MAP_SHARED;
841 	}
842 
843 	if (shmflg & SHM_RDONLY) {
844 		prot = PROT_READ;
845 		acc_mode = S_IRUGO;
846 		f_mode = FMODE_READ;
847 	} else {
848 		prot = PROT_READ | PROT_WRITE;
849 		acc_mode = S_IRUGO | S_IWUGO;
850 		f_mode = FMODE_READ | FMODE_WRITE;
851 	}
852 	if (shmflg & SHM_EXEC) {
853 		prot |= PROT_EXEC;
854 		acc_mode |= S_IXUGO;
855 	}
856 
857 	/*
858 	 * We cannot rely on the fs check since SYSV IPC does have an
859 	 * additional creator id...
860 	 */
861 	ns = current->nsproxy->ipc_ns;
862 	shp = shm_lock_check(ns, shmid);
863 	if (IS_ERR(shp)) {
864 		err = PTR_ERR(shp);
865 		goto out;
866 	}
867 
868 	err = -EACCES;
869 	if (ipcperms(&shp->shm_perm, acc_mode))
870 		goto out_unlock;
871 
872 	err = security_shm_shmat(shp, shmaddr, shmflg);
873 	if (err)
874 		goto out_unlock;
875 
876 	path.dentry = dget(shp->shm_file->f_path.dentry);
877 	path.mnt    = shp->shm_file->f_path.mnt;
878 	shp->shm_nattch++;
879 	size = i_size_read(path.dentry->d_inode);
880 	shm_unlock(shp);
881 
882 	err = -ENOMEM;
883 	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
884 	if (!sfd)
885 		goto out_put_dentry;
886 
887 	file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations);
888 	if (!file)
889 		goto out_free;
890 
891 	file->private_data = sfd;
892 	file->f_mapping = shp->shm_file->f_mapping;
893 	sfd->id = shp->shm_perm.id;
894 	sfd->ns = get_ipc_ns(ns);
895 	sfd->file = shp->shm_file;
896 	sfd->vm_ops = NULL;
897 
898 	down_write(&current->mm->mmap_sem);
899 	if (addr && !(shmflg & SHM_REMAP)) {
900 		err = -EINVAL;
901 		if (find_vma_intersection(current->mm, addr, addr + size))
902 			goto invalid;
903 		/*
904 		 * If shm segment goes below stack, make sure there is some
905 		 * space left for the stack to grow (at least 4 pages).
906 		 */
907 		if (addr < current->mm->start_stack &&
908 		    addr > current->mm->start_stack - size - PAGE_SIZE * 5)
909 			goto invalid;
910 	}
911 
912 	user_addr = do_mmap (file, addr, size, prot, flags, 0);
913 	*raddr = user_addr;
914 	err = 0;
915 	if (IS_ERR_VALUE(user_addr))
916 		err = (long)user_addr;
917 invalid:
918 	up_write(&current->mm->mmap_sem);
919 
920 	fput(file);
921 
922 out_nattch:
923 	down_write(&shm_ids(ns).rw_mutex);
924 	shp = shm_lock(ns, shmid);
925 	BUG_ON(IS_ERR(shp));
926 	shp->shm_nattch--;
927 	if(shp->shm_nattch == 0 &&
928 	   shp->shm_perm.mode & SHM_DEST)
929 		shm_destroy(ns, shp);
930 	else
931 		shm_unlock(shp);
932 	up_write(&shm_ids(ns).rw_mutex);
933 
934 out:
935 	return err;
936 
937 out_unlock:
938 	shm_unlock(shp);
939 	goto out;
940 
941 out_free:
942 	kfree(sfd);
943 out_put_dentry:
944 	dput(path.dentry);
945 	goto out_nattch;
946 }
947 
SYSCALL_DEFINE3(shmat,int,shmid,char __user *,shmaddr,int,shmflg)948 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
949 {
950 	unsigned long ret;
951 	long err;
952 
953 	err = do_shmat(shmid, shmaddr, shmflg, &ret);
954 	if (err)
955 		return err;
956 	force_successful_syscall_return();
957 	return (long)ret;
958 }
959 
960 /*
961  * detach and kill segment if marked destroyed.
962  * The work is done in shm_close.
963  */
SYSCALL_DEFINE1(shmdt,char __user *,shmaddr)964 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
965 {
966 	struct mm_struct *mm = current->mm;
967 	struct vm_area_struct *vma, *next;
968 	unsigned long addr = (unsigned long)shmaddr;
969 	loff_t size = 0;
970 	int retval = -EINVAL;
971 
972 	if (addr & ~PAGE_MASK)
973 		return retval;
974 
975 	down_write(&mm->mmap_sem);
976 
977 	/*
978 	 * This function tries to be smart and unmap shm segments that
979 	 * were modified by partial mlock or munmap calls:
980 	 * - It first determines the size of the shm segment that should be
981 	 *   unmapped: It searches for a vma that is backed by shm and that
982 	 *   started at address shmaddr. It records it's size and then unmaps
983 	 *   it.
984 	 * - Then it unmaps all shm vmas that started at shmaddr and that
985 	 *   are within the initially determined size.
986 	 * Errors from do_munmap are ignored: the function only fails if
987 	 * it's called with invalid parameters or if it's called to unmap
988 	 * a part of a vma. Both calls in this function are for full vmas,
989 	 * the parameters are directly copied from the vma itself and always
990 	 * valid - therefore do_munmap cannot fail. (famous last words?)
991 	 */
992 	/*
993 	 * If it had been mremap()'d, the starting address would not
994 	 * match the usual checks anyway. So assume all vma's are
995 	 * above the starting address given.
996 	 */
997 	vma = find_vma(mm, addr);
998 
999 #ifdef CONFIG_MMU
1000 	while (vma) {
1001 		next = vma->vm_next;
1002 
1003 		/*
1004 		 * Check if the starting address would match, i.e. it's
1005 		 * a fragment created by mprotect() and/or munmap(), or it
1006 		 * otherwise it starts at this address with no hassles.
1007 		 */
1008 		if ((vma->vm_ops == &shm_vm_ops) &&
1009 			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1010 
1011 
1012 			size = vma->vm_file->f_path.dentry->d_inode->i_size;
1013 			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1014 			/*
1015 			 * We discovered the size of the shm segment, so
1016 			 * break out of here and fall through to the next
1017 			 * loop that uses the size information to stop
1018 			 * searching for matching vma's.
1019 			 */
1020 			retval = 0;
1021 			vma = next;
1022 			break;
1023 		}
1024 		vma = next;
1025 	}
1026 
1027 	/*
1028 	 * We need look no further than the maximum address a fragment
1029 	 * could possibly have landed at. Also cast things to loff_t to
1030 	 * prevent overflows and make comparisions vs. equal-width types.
1031 	 */
1032 	size = PAGE_ALIGN(size);
1033 	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1034 		next = vma->vm_next;
1035 
1036 		/* finding a matching vma now does not alter retval */
1037 		if ((vma->vm_ops == &shm_vm_ops) &&
1038 			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1039 
1040 			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1041 		vma = next;
1042 	}
1043 
1044 #else /* CONFIG_MMU */
1045 	/* under NOMMU conditions, the exact address to be destroyed must be
1046 	 * given */
1047 	retval = -EINVAL;
1048 	if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1049 		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1050 		retval = 0;
1051 	}
1052 
1053 #endif
1054 
1055 	up_write(&mm->mmap_sem);
1056 	return retval;
1057 }
1058 
1059 #ifdef CONFIG_PROC_FS
sysvipc_shm_proc_show(struct seq_file * s,void * it)1060 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1061 {
1062 	struct shmid_kernel *shp = it;
1063 
1064 #if BITS_PER_LONG <= 32
1065 #define SIZE_SPEC "%10lu"
1066 #else
1067 #define SIZE_SPEC "%21lu"
1068 #endif
1069 
1070 	return seq_printf(s,
1071 			  "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1072 			  "%5lu %5u %5u %5u %5u %10lu %10lu %10lu\n",
1073 			  shp->shm_perm.key,
1074 			  shp->shm_perm.id,
1075 			  shp->shm_perm.mode,
1076 			  shp->shm_segsz,
1077 			  shp->shm_cprid,
1078 			  shp->shm_lprid,
1079 			  shp->shm_nattch,
1080 			  shp->shm_perm.uid,
1081 			  shp->shm_perm.gid,
1082 			  shp->shm_perm.cuid,
1083 			  shp->shm_perm.cgid,
1084 			  shp->shm_atim,
1085 			  shp->shm_dtim,
1086 			  shp->shm_ctim);
1087 }
1088 #endif
1089