• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/ipc/shm.c
3  * Copyright (C) 1992, 1993 Krishna Balasubramanian
4  *	 Many improvements/fixes by Bruno Haible.
5  * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6  * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7  *
8  * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9  * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10  * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11  * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12  * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13  * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14  * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15  *
16  * support for audit of ipc object properties and permission changes
17  * Dustin Kirkland <dustin.kirkland@us.ibm.com>
18  *
19  * namespaces support
20  * OpenVZ, SWsoft Inc.
21  * Pavel Emelianov <xemul@openvz.org>
22  *
23  * Better ipc lock (kern_ipc_perm.lock) handling
24  * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
25  */
26 
27 #include <linux/slab.h>
28 #include <linux/mm.h>
29 #include <linux/hugetlb.h>
30 #include <linux/shm.h>
31 #include <linux/init.h>
32 #include <linux/file.h>
33 #include <linux/mman.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/security.h>
36 #include <linux/syscalls.h>
37 #include <linux/audit.h>
38 #include <linux/capability.h>
39 #include <linux/ptrace.h>
40 #include <linux/seq_file.h>
41 #include <linux/rwsem.h>
42 #include <linux/nsproxy.h>
43 #include <linux/mount.h>
44 #include <linux/ipc_namespace.h>
45 
46 #include <linux/uaccess.h>
47 
48 #include "util.h"
49 
50 struct shm_file_data {
51 	int id;
52 	struct ipc_namespace *ns;
53 	struct file *file;
54 	const struct vm_operations_struct *vm_ops;
55 };
56 
57 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
58 
59 static const struct file_operations shm_file_operations;
60 static const struct vm_operations_struct shm_vm_ops;
61 
62 #define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
63 
64 #define shm_unlock(shp)			\
65 	ipc_unlock(&(shp)->shm_perm)
66 
67 static int newseg(struct ipc_namespace *, struct ipc_params *);
68 static void shm_open(struct vm_area_struct *vma);
69 static void shm_close(struct vm_area_struct *vma);
70 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
71 #ifdef CONFIG_PROC_FS
72 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
73 #endif
74 
shm_init_ns(struct ipc_namespace * ns)75 void shm_init_ns(struct ipc_namespace *ns)
76 {
77 	ns->shm_ctlmax = SHMMAX;
78 	ns->shm_ctlall = SHMALL;
79 	ns->shm_ctlmni = SHMMNI;
80 	ns->shm_rmid_forced = 0;
81 	ns->shm_tot = 0;
82 	ipc_init_ids(&shm_ids(ns));
83 }
84 
85 /*
86  * Called with shm_ids.rwsem (writer) and the shp structure locked.
87  * Only shm_ids.rwsem remains locked on exit.
88  */
do_shm_rmid(struct ipc_namespace * ns,struct kern_ipc_perm * ipcp)89 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
90 {
91 	struct shmid_kernel *shp;
92 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
93 
94 	if (shp->shm_nattch) {
95 		shp->shm_perm.mode |= SHM_DEST;
96 		/* Do not find it any more */
97 		shp->shm_perm.key = IPC_PRIVATE;
98 		shm_unlock(shp);
99 	} else
100 		shm_destroy(ns, shp);
101 }
102 
103 #ifdef CONFIG_IPC_NS
shm_exit_ns(struct ipc_namespace * ns)104 void shm_exit_ns(struct ipc_namespace *ns)
105 {
106 	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
107 	idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
108 }
109 #endif
110 
ipc_ns_init(void)111 static int __init ipc_ns_init(void)
112 {
113 	shm_init_ns(&init_ipc_ns);
114 	return 0;
115 }
116 
117 pure_initcall(ipc_ns_init);
118 
shm_init(void)119 void __init shm_init(void)
120 {
121 	ipc_init_proc_interface("sysvipc/shm",
122 #if BITS_PER_LONG <= 32
123 				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
124 #else
125 				"       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
126 #endif
127 				IPC_SHM_IDS, sysvipc_shm_proc_show);
128 }
129 
shm_obtain_object(struct ipc_namespace * ns,int id)130 static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
131 {
132 	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
133 
134 	if (IS_ERR(ipcp))
135 		return ERR_CAST(ipcp);
136 
137 	return container_of(ipcp, struct shmid_kernel, shm_perm);
138 }
139 
shm_obtain_object_check(struct ipc_namespace * ns,int id)140 static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
141 {
142 	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
143 
144 	if (IS_ERR(ipcp))
145 		return ERR_CAST(ipcp);
146 
147 	return container_of(ipcp, struct shmid_kernel, shm_perm);
148 }
149 
150 /*
151  * shm_lock_(check_) routines are called in the paths where the rwsem
152  * is not necessarily held.
153  */
shm_lock(struct ipc_namespace * ns,int id)154 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
155 {
156 	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
157 
158 	/*
159 	 * Callers of shm_lock() must validate the status of the returned ipc
160 	 * object pointer (as returned by ipc_lock()), and error out as
161 	 * appropriate.
162 	 */
163 	if (IS_ERR(ipcp))
164 		return (void *)ipcp;
165 	return container_of(ipcp, struct shmid_kernel, shm_perm);
166 }
167 
shm_lock_by_ptr(struct shmid_kernel * ipcp)168 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
169 {
170 	rcu_read_lock();
171 	ipc_lock_object(&ipcp->shm_perm);
172 }
173 
shm_rcu_free(struct rcu_head * head)174 static void shm_rcu_free(struct rcu_head *head)
175 {
176 	struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
177 	struct shmid_kernel *shp = ipc_rcu_to_struct(p);
178 
179 	security_shm_free(shp);
180 	ipc_rcu_free(head);
181 }
182 
shm_rmid(struct ipc_namespace * ns,struct shmid_kernel * s)183 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
184 {
185 	list_del(&s->shm_clist);
186 	ipc_rmid(&shm_ids(ns), &s->shm_perm);
187 }
188 
189 
__shm_open(struct vm_area_struct * vma)190 static int __shm_open(struct vm_area_struct *vma)
191 {
192 	struct file *file = vma->vm_file;
193 	struct shm_file_data *sfd = shm_file_data(file);
194 	struct shmid_kernel *shp;
195 
196 	shp = shm_lock(sfd->ns, sfd->id);
197 
198 	if (IS_ERR(shp))
199 		return PTR_ERR(shp);
200 
201 	if (shp->shm_file != sfd->file) {
202 		/* ID was reused */
203 		shm_unlock(shp);
204 		return -EINVAL;
205 	}
206 
207 	shp->shm_atim = get_seconds();
208 	shp->shm_lprid = task_tgid_vnr(current);
209 	shp->shm_nattch++;
210 	shm_unlock(shp);
211 	return 0;
212 }
213 
214 /* This is called by fork, once for every shm attach. */
shm_open(struct vm_area_struct * vma)215 static void shm_open(struct vm_area_struct *vma)
216 {
217 	int err = __shm_open(vma);
218 	/*
219 	 * We raced in the idr lookup or with shm_destroy().
220 	 * Either way, the ID is busted.
221 	 */
222 	WARN_ON_ONCE(err);
223 }
224 
225 /*
226  * shm_destroy - free the struct shmid_kernel
227  *
228  * @ns: namespace
229  * @shp: struct to free
230  *
231  * It has to be called with shp and shm_ids.rwsem (writer) locked,
232  * but returns with shp unlocked and freed.
233  */
shm_destroy(struct ipc_namespace * ns,struct shmid_kernel * shp)234 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
235 {
236 	struct file *shm_file;
237 
238 	shm_file = shp->shm_file;
239 	shp->shm_file = NULL;
240 	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
241 	shm_rmid(ns, shp);
242 	shm_unlock(shp);
243 	if (!is_file_hugepages(shm_file))
244 		shmem_lock(shm_file, 0, shp->mlock_user);
245 	else if (shp->mlock_user)
246 		user_shm_unlock(i_size_read(file_inode(shm_file)),
247 				shp->mlock_user);
248 	fput(shm_file);
249 	ipc_rcu_putref(shp, shm_rcu_free);
250 }
251 
252 /*
253  * shm_may_destroy - identifies whether shm segment should be destroyed now
254  *
255  * Returns true if and only if there are no active users of the segment and
256  * one of the following is true:
257  *
258  * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
259  *
260  * 2) sysctl kernel.shm_rmid_forced is set to 1.
261  */
shm_may_destroy(struct ipc_namespace * ns,struct shmid_kernel * shp)262 static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
263 {
264 	return (shp->shm_nattch == 0) &&
265 	       (ns->shm_rmid_forced ||
266 		(shp->shm_perm.mode & SHM_DEST));
267 }
268 
269 /*
270  * remove the attach descriptor vma.
271  * free memory for segment if it is marked destroyed.
272  * The descriptor has already been removed from the current->mm->mmap list
273  * and will later be kfree()d.
274  */
shm_close(struct vm_area_struct * vma)275 static void shm_close(struct vm_area_struct *vma)
276 {
277 	struct file *file = vma->vm_file;
278 	struct shm_file_data *sfd = shm_file_data(file);
279 	struct shmid_kernel *shp;
280 	struct ipc_namespace *ns = sfd->ns;
281 
282 	down_write(&shm_ids(ns).rwsem);
283 	/* remove from the list of attaches of the shm segment */
284 	shp = shm_lock(ns, sfd->id);
285 
286 	/*
287 	 * We raced in the idr lookup or with shm_destroy().
288 	 * Either way, the ID is busted.
289 	 */
290 	if (WARN_ON_ONCE(IS_ERR(shp)))
291 		goto done; /* no-op */
292 
293 	shp->shm_lprid = task_tgid_vnr(current);
294 	shp->shm_dtim = get_seconds();
295 	shp->shm_nattch--;
296 	if (shm_may_destroy(ns, shp))
297 		shm_destroy(ns, shp);
298 	else
299 		shm_unlock(shp);
300 done:
301 	up_write(&shm_ids(ns).rwsem);
302 }
303 
304 /* Called with ns->shm_ids(ns).rwsem locked */
shm_try_destroy_orphaned(int id,void * p,void * data)305 static int shm_try_destroy_orphaned(int id, void *p, void *data)
306 {
307 	struct ipc_namespace *ns = data;
308 	struct kern_ipc_perm *ipcp = p;
309 	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
310 
311 	/*
312 	 * We want to destroy segments without users and with already
313 	 * exit'ed originating process.
314 	 *
315 	 * As shp->* are changed under rwsem, it's safe to skip shp locking.
316 	 */
317 	if (shp->shm_creator != NULL)
318 		return 0;
319 
320 	if (shm_may_destroy(ns, shp)) {
321 		shm_lock_by_ptr(shp);
322 		shm_destroy(ns, shp);
323 	}
324 	return 0;
325 }
326 
shm_destroy_orphaned(struct ipc_namespace * ns)327 void shm_destroy_orphaned(struct ipc_namespace *ns)
328 {
329 	down_write(&shm_ids(ns).rwsem);
330 	if (shm_ids(ns).in_use)
331 		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
332 	up_write(&shm_ids(ns).rwsem);
333 }
334 
335 /* Locking assumes this will only be called with task == current */
exit_shm(struct task_struct * task)336 void exit_shm(struct task_struct *task)
337 {
338 	struct ipc_namespace *ns = task->nsproxy->ipc_ns;
339 	struct shmid_kernel *shp, *n;
340 
341 	if (list_empty(&task->sysvshm.shm_clist))
342 		return;
343 
344 	/*
345 	 * If kernel.shm_rmid_forced is not set then only keep track of
346 	 * which shmids are orphaned, so that a later set of the sysctl
347 	 * can clean them up.
348 	 */
349 	if (!ns->shm_rmid_forced) {
350 		down_read(&shm_ids(ns).rwsem);
351 		list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
352 			shp->shm_creator = NULL;
353 		/*
354 		 * Only under read lock but we are only called on current
355 		 * so no entry on the list will be shared.
356 		 */
357 		list_del(&task->sysvshm.shm_clist);
358 		up_read(&shm_ids(ns).rwsem);
359 		return;
360 	}
361 
362 	/*
363 	 * Destroy all already created segments, that were not yet mapped,
364 	 * and mark any mapped as orphan to cover the sysctl toggling.
365 	 * Destroy is skipped if shm_may_destroy() returns false.
366 	 */
367 	down_write(&shm_ids(ns).rwsem);
368 	list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
369 		shp->shm_creator = NULL;
370 
371 		if (shm_may_destroy(ns, shp)) {
372 			shm_lock_by_ptr(shp);
373 			shm_destroy(ns, shp);
374 		}
375 	}
376 
377 	/* Remove the list head from any segments still attached. */
378 	list_del(&task->sysvshm.shm_clist);
379 	up_write(&shm_ids(ns).rwsem);
380 }
381 
shm_fault(struct vm_area_struct * vma,struct vm_fault * vmf)382 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
383 {
384 	struct file *file = vma->vm_file;
385 	struct shm_file_data *sfd = shm_file_data(file);
386 
387 	return sfd->vm_ops->fault(vma, vmf);
388 }
389 
shm_split(struct vm_area_struct * vma,unsigned long addr)390 static int shm_split(struct vm_area_struct *vma, unsigned long addr)
391 {
392 	struct file *file = vma->vm_file;
393 	struct shm_file_data *sfd = shm_file_data(file);
394 
395 	if (sfd->vm_ops && sfd->vm_ops->split)
396 		return sfd->vm_ops->split(vma, addr);
397 
398 	return 0;
399 }
400 
401 #ifdef CONFIG_NUMA
shm_set_policy(struct vm_area_struct * vma,struct mempolicy * new)402 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
403 {
404 	struct file *file = vma->vm_file;
405 	struct shm_file_data *sfd = shm_file_data(file);
406 	int err = 0;
407 	if (sfd->vm_ops->set_policy)
408 		err = sfd->vm_ops->set_policy(vma, new);
409 	return err;
410 }
411 
shm_get_policy(struct vm_area_struct * vma,unsigned long addr)412 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
413 					unsigned long addr)
414 {
415 	struct file *file = vma->vm_file;
416 	struct shm_file_data *sfd = shm_file_data(file);
417 	struct mempolicy *pol = NULL;
418 
419 	if (sfd->vm_ops->get_policy)
420 		pol = sfd->vm_ops->get_policy(vma, addr);
421 	else if (vma->vm_policy)
422 		pol = vma->vm_policy;
423 
424 	return pol;
425 }
426 #endif
427 
shm_mmap(struct file * file,struct vm_area_struct * vma)428 static int shm_mmap(struct file *file, struct vm_area_struct *vma)
429 {
430 	struct shm_file_data *sfd = shm_file_data(file);
431 	int ret;
432 
433 	/*
434 	 * In case of remap_file_pages() emulation, the file can represent an
435 	 * IPC ID that was removed, and possibly even reused by another shm
436 	 * segment already.  Propagate this case as an error to caller.
437 	 */
438 	ret =__shm_open(vma);
439 	if (ret)
440 		return ret;
441 
442 	ret = sfd->file->f_op->mmap(sfd->file, vma);
443 	if (ret) {
444 		shm_close(vma);
445 		return ret;
446 	}
447 	sfd->vm_ops = vma->vm_ops;
448 #ifdef CONFIG_MMU
449 	WARN_ON(!sfd->vm_ops->fault);
450 #endif
451 	vma->vm_ops = &shm_vm_ops;
452 	return 0;
453 }
454 
shm_release(struct inode * ino,struct file * file)455 static int shm_release(struct inode *ino, struct file *file)
456 {
457 	struct shm_file_data *sfd = shm_file_data(file);
458 
459 	put_ipc_ns(sfd->ns);
460 	fput(sfd->file);
461 	shm_file_data(file) = NULL;
462 	kfree(sfd);
463 	return 0;
464 }
465 
shm_fsync(struct file * file,loff_t start,loff_t end,int datasync)466 static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
467 {
468 	struct shm_file_data *sfd = shm_file_data(file);
469 
470 	if (!sfd->file->f_op->fsync)
471 		return -EINVAL;
472 	return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
473 }
474 
shm_fallocate(struct file * file,int mode,loff_t offset,loff_t len)475 static long shm_fallocate(struct file *file, int mode, loff_t offset,
476 			  loff_t len)
477 {
478 	struct shm_file_data *sfd = shm_file_data(file);
479 
480 	if (!sfd->file->f_op->fallocate)
481 		return -EOPNOTSUPP;
482 	return sfd->file->f_op->fallocate(file, mode, offset, len);
483 }
484 
shm_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)485 static unsigned long shm_get_unmapped_area(struct file *file,
486 	unsigned long addr, unsigned long len, unsigned long pgoff,
487 	unsigned long flags)
488 {
489 	struct shm_file_data *sfd = shm_file_data(file);
490 	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
491 						pgoff, flags);
492 }
493 
494 static const struct file_operations shm_file_operations = {
495 	.mmap		= shm_mmap,
496 	.fsync		= shm_fsync,
497 	.release	= shm_release,
498 	.get_unmapped_area	= shm_get_unmapped_area,
499 	.llseek		= noop_llseek,
500 	.fallocate	= shm_fallocate,
501 };
502 
503 /*
504  * shm_file_operations_huge is now identical to shm_file_operations,
505  * but we keep it distinct for the sake of is_file_shm_hugepages().
506  */
507 static const struct file_operations shm_file_operations_huge = {
508 	.mmap		= shm_mmap,
509 	.fsync		= shm_fsync,
510 	.release	= shm_release,
511 	.get_unmapped_area	= shm_get_unmapped_area,
512 	.llseek		= noop_llseek,
513 	.fallocate	= shm_fallocate,
514 };
515 
is_file_shm_hugepages(struct file * file)516 bool is_file_shm_hugepages(struct file *file)
517 {
518 	return file->f_op == &shm_file_operations_huge;
519 }
520 
521 static const struct vm_operations_struct shm_vm_ops = {
522 	.open	= shm_open,	/* callback for a new vm-area open */
523 	.close	= shm_close,	/* callback for when the vm-area is released */
524 	.fault	= shm_fault,
525 	.split	= shm_split,
526 #if defined(CONFIG_NUMA)
527 	.set_policy = shm_set_policy,
528 	.get_policy = shm_get_policy,
529 #endif
530 };
531 
532 /**
533  * newseg - Create a new shared memory segment
534  * @ns: namespace
535  * @params: ptr to the structure that contains key, size and shmflg
536  *
537  * Called with shm_ids.rwsem held as a writer.
538  */
newseg(struct ipc_namespace * ns,struct ipc_params * params)539 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
540 {
541 	key_t key = params->key;
542 	int shmflg = params->flg;
543 	size_t size = params->u.size;
544 	int error;
545 	struct shmid_kernel *shp;
546 	size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
547 	struct file *file;
548 	char name[13];
549 	int id;
550 	vm_flags_t acctflag = 0;
551 
552 	if (size < SHMMIN || size > ns->shm_ctlmax)
553 		return -EINVAL;
554 
555 	if (numpages << PAGE_SHIFT < size)
556 		return -ENOSPC;
557 
558 	if (ns->shm_tot + numpages < ns->shm_tot ||
559 			ns->shm_tot + numpages > ns->shm_ctlall)
560 		return -ENOSPC;
561 
562 	shp = ipc_rcu_alloc(sizeof(*shp));
563 	if (!shp)
564 		return -ENOMEM;
565 
566 	shp->shm_perm.key = key;
567 	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
568 	shp->mlock_user = NULL;
569 
570 	shp->shm_perm.security = NULL;
571 	error = security_shm_alloc(shp);
572 	if (error) {
573 		ipc_rcu_putref(shp, ipc_rcu_free);
574 		return error;
575 	}
576 
577 	sprintf(name, "SYSV%08x", key);
578 	if (shmflg & SHM_HUGETLB) {
579 		struct hstate *hs;
580 		size_t hugesize;
581 
582 		hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
583 		if (!hs) {
584 			error = -EINVAL;
585 			goto no_file;
586 		}
587 		hugesize = ALIGN(size, huge_page_size(hs));
588 
589 		/* hugetlb_file_setup applies strict accounting */
590 		if (shmflg & SHM_NORESERVE)
591 			acctflag = VM_NORESERVE;
592 		file = hugetlb_file_setup(name, hugesize, acctflag,
593 				  &shp->mlock_user, HUGETLB_SHMFS_INODE,
594 				(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
595 	} else {
596 		/*
597 		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
598 		 * if it's asked for.
599 		 */
600 		if  ((shmflg & SHM_NORESERVE) &&
601 				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
602 			acctflag = VM_NORESERVE;
603 		file = shmem_kernel_file_setup(name, size, acctflag);
604 	}
605 	error = PTR_ERR(file);
606 	if (IS_ERR(file))
607 		goto no_file;
608 
609 	shp->shm_cprid = task_tgid_vnr(current);
610 	shp->shm_lprid = 0;
611 	shp->shm_atim = shp->shm_dtim = 0;
612 	shp->shm_ctim = get_seconds();
613 	shp->shm_segsz = size;
614 	shp->shm_nattch = 0;
615 	shp->shm_file = file;
616 	shp->shm_creator = current;
617 
618 	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
619 	if (id < 0) {
620 		error = id;
621 		goto no_id;
622 	}
623 
624 	list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
625 
626 	/*
627 	 * shmid gets reported as "inode#" in /proc/pid/maps.
628 	 * proc-ps tools use this. Changing this will break them.
629 	 */
630 	file_inode(file)->i_ino = shp->shm_perm.id;
631 
632 	ns->shm_tot += numpages;
633 	error = shp->shm_perm.id;
634 
635 	ipc_unlock_object(&shp->shm_perm);
636 	rcu_read_unlock();
637 	return error;
638 
639 no_id:
640 	if (is_file_hugepages(file) && shp->mlock_user)
641 		user_shm_unlock(size, shp->mlock_user);
642 	fput(file);
643 no_file:
644 	ipc_rcu_putref(shp, shm_rcu_free);
645 	return error;
646 }
647 
648 /*
649  * Called with shm_ids.rwsem and ipcp locked.
650  */
shm_security(struct kern_ipc_perm * ipcp,int shmflg)651 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
652 {
653 	struct shmid_kernel *shp;
654 
655 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
656 	return security_shm_associate(shp, shmflg);
657 }
658 
659 /*
660  * Called with shm_ids.rwsem and ipcp locked.
661  */
shm_more_checks(struct kern_ipc_perm * ipcp,struct ipc_params * params)662 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
663 				struct ipc_params *params)
664 {
665 	struct shmid_kernel *shp;
666 
667 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
668 	if (shp->shm_segsz < params->u.size)
669 		return -EINVAL;
670 
671 	return 0;
672 }
673 
SYSCALL_DEFINE3(shmget,key_t,key,size_t,size,int,shmflg)674 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
675 {
676 	struct ipc_namespace *ns;
677 	static const struct ipc_ops shm_ops = {
678 		.getnew = newseg,
679 		.associate = shm_security,
680 		.more_checks = shm_more_checks,
681 	};
682 	struct ipc_params shm_params;
683 
684 	ns = current->nsproxy->ipc_ns;
685 
686 	shm_params.key = key;
687 	shm_params.flg = shmflg;
688 	shm_params.u.size = size;
689 
690 	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
691 }
692 
copy_shmid_to_user(void __user * buf,struct shmid64_ds * in,int version)693 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
694 {
695 	switch (version) {
696 	case IPC_64:
697 		return copy_to_user(buf, in, sizeof(*in));
698 	case IPC_OLD:
699 	    {
700 		struct shmid_ds out;
701 
702 		memset(&out, 0, sizeof(out));
703 		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
704 		out.shm_segsz	= in->shm_segsz;
705 		out.shm_atime	= in->shm_atime;
706 		out.shm_dtime	= in->shm_dtime;
707 		out.shm_ctime	= in->shm_ctime;
708 		out.shm_cpid	= in->shm_cpid;
709 		out.shm_lpid	= in->shm_lpid;
710 		out.shm_nattch	= in->shm_nattch;
711 
712 		return copy_to_user(buf, &out, sizeof(out));
713 	    }
714 	default:
715 		return -EINVAL;
716 	}
717 }
718 
719 static inline unsigned long
copy_shmid_from_user(struct shmid64_ds * out,void __user * buf,int version)720 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
721 {
722 	switch (version) {
723 	case IPC_64:
724 		if (copy_from_user(out, buf, sizeof(*out)))
725 			return -EFAULT;
726 		return 0;
727 	case IPC_OLD:
728 	    {
729 		struct shmid_ds tbuf_old;
730 
731 		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
732 			return -EFAULT;
733 
734 		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
735 		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
736 		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
737 
738 		return 0;
739 	    }
740 	default:
741 		return -EINVAL;
742 	}
743 }
744 
copy_shminfo_to_user(void __user * buf,struct shminfo64 * in,int version)745 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
746 {
747 	switch (version) {
748 	case IPC_64:
749 		return copy_to_user(buf, in, sizeof(*in));
750 	case IPC_OLD:
751 	    {
752 		struct shminfo out;
753 
754 		if (in->shmmax > INT_MAX)
755 			out.shmmax = INT_MAX;
756 		else
757 			out.shmmax = (int)in->shmmax;
758 
759 		out.shmmin	= in->shmmin;
760 		out.shmmni	= in->shmmni;
761 		out.shmseg	= in->shmseg;
762 		out.shmall	= in->shmall;
763 
764 		return copy_to_user(buf, &out, sizeof(out));
765 	    }
766 	default:
767 		return -EINVAL;
768 	}
769 }
770 
771 /*
772  * Calculate and add used RSS and swap pages of a shm.
773  * Called with shm_ids.rwsem held as a reader
774  */
shm_add_rss_swap(struct shmid_kernel * shp,unsigned long * rss_add,unsigned long * swp_add)775 static void shm_add_rss_swap(struct shmid_kernel *shp,
776 	unsigned long *rss_add, unsigned long *swp_add)
777 {
778 	struct inode *inode;
779 
780 	inode = file_inode(shp->shm_file);
781 
782 	if (is_file_hugepages(shp->shm_file)) {
783 		struct address_space *mapping = inode->i_mapping;
784 		struct hstate *h = hstate_file(shp->shm_file);
785 		*rss_add += pages_per_huge_page(h) * mapping->nrpages;
786 	} else {
787 #ifdef CONFIG_SHMEM
788 		struct shmem_inode_info *info = SHMEM_I(inode);
789 		spin_lock_irq(&info->lock);
790 		*rss_add += inode->i_mapping->nrpages;
791 		*swp_add += info->swapped;
792 		spin_unlock_irq(&info->lock);
793 #else
794 		*rss_add += inode->i_mapping->nrpages;
795 #endif
796 	}
797 }
798 
799 /*
800  * Called with shm_ids.rwsem held as a reader
801  */
shm_get_stat(struct ipc_namespace * ns,unsigned long * rss,unsigned long * swp)802 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
803 		unsigned long *swp)
804 {
805 	int next_id;
806 	int total, in_use;
807 
808 	*rss = 0;
809 	*swp = 0;
810 
811 	in_use = shm_ids(ns).in_use;
812 
813 	for (total = 0, next_id = 0; total < in_use; next_id++) {
814 		struct kern_ipc_perm *ipc;
815 		struct shmid_kernel *shp;
816 
817 		ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
818 		if (ipc == NULL)
819 			continue;
820 		shp = container_of(ipc, struct shmid_kernel, shm_perm);
821 
822 		shm_add_rss_swap(shp, rss, swp);
823 
824 		total++;
825 	}
826 }
827 
828 /*
829  * This function handles some shmctl commands which require the rwsem
830  * to be held in write mode.
831  * NOTE: no locks must be held, the rwsem is taken inside this function.
832  */
shmctl_down(struct ipc_namespace * ns,int shmid,int cmd,struct shmid_ds __user * buf,int version)833 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
834 		       struct shmid_ds __user *buf, int version)
835 {
836 	struct kern_ipc_perm *ipcp;
837 	struct shmid64_ds shmid64;
838 	struct shmid_kernel *shp;
839 	int err;
840 
841 	if (cmd == IPC_SET) {
842 		if (copy_shmid_from_user(&shmid64, buf, version))
843 			return -EFAULT;
844 	}
845 
846 	down_write(&shm_ids(ns).rwsem);
847 	rcu_read_lock();
848 
849 	ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
850 				      &shmid64.shm_perm, 0);
851 	if (IS_ERR(ipcp)) {
852 		err = PTR_ERR(ipcp);
853 		goto out_unlock1;
854 	}
855 
856 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
857 
858 	err = security_shm_shmctl(shp, cmd);
859 	if (err)
860 		goto out_unlock1;
861 
862 	switch (cmd) {
863 	case IPC_RMID:
864 		ipc_lock_object(&shp->shm_perm);
865 		/* do_shm_rmid unlocks the ipc object and rcu */
866 		do_shm_rmid(ns, ipcp);
867 		goto out_up;
868 	case IPC_SET:
869 		ipc_lock_object(&shp->shm_perm);
870 		err = ipc_update_perm(&shmid64.shm_perm, ipcp);
871 		if (err)
872 			goto out_unlock0;
873 		shp->shm_ctim = get_seconds();
874 		break;
875 	default:
876 		err = -EINVAL;
877 		goto out_unlock1;
878 	}
879 
880 out_unlock0:
881 	ipc_unlock_object(&shp->shm_perm);
882 out_unlock1:
883 	rcu_read_unlock();
884 out_up:
885 	up_write(&shm_ids(ns).rwsem);
886 	return err;
887 }
888 
shmctl_nolock(struct ipc_namespace * ns,int shmid,int cmd,int version,void __user * buf)889 static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
890 			 int cmd, int version, void __user *buf)
891 {
892 	int err;
893 	struct shmid_kernel *shp;
894 
895 	/* preliminary security checks for *_INFO */
896 	if (cmd == IPC_INFO || cmd == SHM_INFO) {
897 		err = security_shm_shmctl(NULL, cmd);
898 		if (err)
899 			return err;
900 	}
901 
902 	switch (cmd) {
903 	case IPC_INFO:
904 	{
905 		struct shminfo64 shminfo;
906 
907 		memset(&shminfo, 0, sizeof(shminfo));
908 		shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
909 		shminfo.shmmax = ns->shm_ctlmax;
910 		shminfo.shmall = ns->shm_ctlall;
911 
912 		shminfo.shmmin = SHMMIN;
913 		if (copy_shminfo_to_user(buf, &shminfo, version))
914 			return -EFAULT;
915 
916 		down_read(&shm_ids(ns).rwsem);
917 		err = ipc_get_maxid(&shm_ids(ns));
918 		up_read(&shm_ids(ns).rwsem);
919 
920 		if (err < 0)
921 			err = 0;
922 		goto out;
923 	}
924 	case SHM_INFO:
925 	{
926 		struct shm_info shm_info;
927 
928 		memset(&shm_info, 0, sizeof(shm_info));
929 		down_read(&shm_ids(ns).rwsem);
930 		shm_info.used_ids = shm_ids(ns).in_use;
931 		shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp);
932 		shm_info.shm_tot = ns->shm_tot;
933 		shm_info.swap_attempts = 0;
934 		shm_info.swap_successes = 0;
935 		err = ipc_get_maxid(&shm_ids(ns));
936 		up_read(&shm_ids(ns).rwsem);
937 		if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
938 			err = -EFAULT;
939 			goto out;
940 		}
941 
942 		err = err < 0 ? 0 : err;
943 		goto out;
944 	}
945 	case SHM_STAT:
946 	case IPC_STAT:
947 	{
948 		struct shmid64_ds tbuf;
949 		int result;
950 
951 		rcu_read_lock();
952 		if (cmd == SHM_STAT) {
953 			shp = shm_obtain_object(ns, shmid);
954 			if (IS_ERR(shp)) {
955 				err = PTR_ERR(shp);
956 				goto out_unlock;
957 			}
958 			result = shp->shm_perm.id;
959 		} else {
960 			shp = shm_obtain_object_check(ns, shmid);
961 			if (IS_ERR(shp)) {
962 				err = PTR_ERR(shp);
963 				goto out_unlock;
964 			}
965 			result = 0;
966 		}
967 
968 		err = -EACCES;
969 		if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
970 			goto out_unlock;
971 
972 		err = security_shm_shmctl(shp, cmd);
973 		if (err)
974 			goto out_unlock;
975 
976 		memset(&tbuf, 0, sizeof(tbuf));
977 		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
978 		tbuf.shm_segsz	= shp->shm_segsz;
979 		tbuf.shm_atime	= shp->shm_atim;
980 		tbuf.shm_dtime	= shp->shm_dtim;
981 		tbuf.shm_ctime	= shp->shm_ctim;
982 		tbuf.shm_cpid	= shp->shm_cprid;
983 		tbuf.shm_lpid	= shp->shm_lprid;
984 		tbuf.shm_nattch	= shp->shm_nattch;
985 		rcu_read_unlock();
986 
987 		if (copy_shmid_to_user(buf, &tbuf, version))
988 			err = -EFAULT;
989 		else
990 			err = result;
991 		goto out;
992 	}
993 	default:
994 		return -EINVAL;
995 	}
996 
997 out_unlock:
998 	rcu_read_unlock();
999 out:
1000 	return err;
1001 }
1002 
SYSCALL_DEFINE3(shmctl,int,shmid,int,cmd,struct shmid_ds __user *,buf)1003 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1004 {
1005 	struct shmid_kernel *shp;
1006 	int err, version;
1007 	struct ipc_namespace *ns;
1008 
1009 	if (cmd < 0 || shmid < 0)
1010 		return -EINVAL;
1011 
1012 	version = ipc_parse_version(&cmd);
1013 	ns = current->nsproxy->ipc_ns;
1014 
1015 	switch (cmd) {
1016 	case IPC_INFO:
1017 	case SHM_INFO:
1018 	case SHM_STAT:
1019 	case IPC_STAT:
1020 		return shmctl_nolock(ns, shmid, cmd, version, buf);
1021 	case IPC_RMID:
1022 	case IPC_SET:
1023 		return shmctl_down(ns, shmid, cmd, buf, version);
1024 	case SHM_LOCK:
1025 	case SHM_UNLOCK:
1026 	{
1027 		struct file *shm_file;
1028 
1029 		rcu_read_lock();
1030 		shp = shm_obtain_object_check(ns, shmid);
1031 		if (IS_ERR(shp)) {
1032 			err = PTR_ERR(shp);
1033 			goto out_unlock1;
1034 		}
1035 
1036 		audit_ipc_obj(&(shp->shm_perm));
1037 		err = security_shm_shmctl(shp, cmd);
1038 		if (err)
1039 			goto out_unlock1;
1040 
1041 		ipc_lock_object(&shp->shm_perm);
1042 
1043 		/* check if shm_destroy() is tearing down shp */
1044 		if (!ipc_valid_object(&shp->shm_perm)) {
1045 			err = -EIDRM;
1046 			goto out_unlock0;
1047 		}
1048 
1049 		if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1050 			kuid_t euid = current_euid();
1051 			if (!uid_eq(euid, shp->shm_perm.uid) &&
1052 			    !uid_eq(euid, shp->shm_perm.cuid)) {
1053 				err = -EPERM;
1054 				goto out_unlock0;
1055 			}
1056 			if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1057 				err = -EPERM;
1058 				goto out_unlock0;
1059 			}
1060 		}
1061 
1062 		shm_file = shp->shm_file;
1063 		if (is_file_hugepages(shm_file))
1064 			goto out_unlock0;
1065 
1066 		if (cmd == SHM_LOCK) {
1067 			struct user_struct *user = current_user();
1068 			err = shmem_lock(shm_file, 1, user);
1069 			if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1070 				shp->shm_perm.mode |= SHM_LOCKED;
1071 				shp->mlock_user = user;
1072 			}
1073 			goto out_unlock0;
1074 		}
1075 
1076 		/* SHM_UNLOCK */
1077 		if (!(shp->shm_perm.mode & SHM_LOCKED))
1078 			goto out_unlock0;
1079 		shmem_lock(shm_file, 0, shp->mlock_user);
1080 		shp->shm_perm.mode &= ~SHM_LOCKED;
1081 		shp->mlock_user = NULL;
1082 		get_file(shm_file);
1083 		ipc_unlock_object(&shp->shm_perm);
1084 		rcu_read_unlock();
1085 		shmem_unlock_mapping(shm_file->f_mapping);
1086 
1087 		fput(shm_file);
1088 		return err;
1089 	}
1090 	default:
1091 		return -EINVAL;
1092 	}
1093 
1094 out_unlock0:
1095 	ipc_unlock_object(&shp->shm_perm);
1096 out_unlock1:
1097 	rcu_read_unlock();
1098 	return err;
1099 }
1100 
1101 /*
1102  * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1103  *
1104  * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1105  * "raddr" thing points to kernel space, and there has to be a wrapper around
1106  * this.
1107  */
do_shmat(int shmid,char __user * shmaddr,int shmflg,ulong * raddr,unsigned long shmlba)1108 long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1109 	      ulong *raddr, unsigned long shmlba)
1110 {
1111 	struct shmid_kernel *shp;
1112 	unsigned long addr;
1113 	unsigned long size;
1114 	struct file *file;
1115 	int    err;
1116 	unsigned long flags;
1117 	unsigned long prot;
1118 	int acc_mode;
1119 	struct ipc_namespace *ns;
1120 	struct shm_file_data *sfd;
1121 	struct path path;
1122 	fmode_t f_mode;
1123 	unsigned long populate = 0;
1124 
1125 	err = -EINVAL;
1126 	if (shmid < 0)
1127 		goto out;
1128 	else if ((addr = (ulong)shmaddr)) {
1129 		if (addr & (shmlba - 1)) {
1130 			/*
1131 			 * Round down to the nearest multiple of shmlba.
1132 			 * For sane do_mmap_pgoff() parameters, avoid
1133 			 * round downs that trigger nil-page and MAP_FIXED.
1134 			 */
1135 			if ((shmflg & SHM_RND) && addr >= shmlba)
1136 				addr &= ~(shmlba - 1);
1137 			else
1138 #ifndef __ARCH_FORCE_SHMLBA
1139 				if (addr & ~PAGE_MASK)
1140 #endif
1141 					goto out;
1142 		}
1143 		flags = MAP_SHARED | MAP_FIXED;
1144 	} else {
1145 		if ((shmflg & SHM_REMAP))
1146 			goto out;
1147 
1148 		flags = MAP_SHARED;
1149 	}
1150 
1151 	if (shmflg & SHM_RDONLY) {
1152 		prot = PROT_READ;
1153 		acc_mode = S_IRUGO;
1154 		f_mode = FMODE_READ;
1155 	} else {
1156 		prot = PROT_READ | PROT_WRITE;
1157 		acc_mode = S_IRUGO | S_IWUGO;
1158 		f_mode = FMODE_READ | FMODE_WRITE;
1159 	}
1160 	if (shmflg & SHM_EXEC) {
1161 		prot |= PROT_EXEC;
1162 		acc_mode |= S_IXUGO;
1163 	}
1164 
1165 	/*
1166 	 * We cannot rely on the fs check since SYSV IPC does have an
1167 	 * additional creator id...
1168 	 */
1169 	ns = current->nsproxy->ipc_ns;
1170 	rcu_read_lock();
1171 	shp = shm_obtain_object_check(ns, shmid);
1172 	if (IS_ERR(shp)) {
1173 		err = PTR_ERR(shp);
1174 		goto out_unlock;
1175 	}
1176 
1177 	err = -EACCES;
1178 	if (ipcperms(ns, &shp->shm_perm, acc_mode))
1179 		goto out_unlock;
1180 
1181 	err = security_shm_shmat(shp, shmaddr, shmflg);
1182 	if (err)
1183 		goto out_unlock;
1184 
1185 	ipc_lock_object(&shp->shm_perm);
1186 
1187 	/* check if shm_destroy() is tearing down shp */
1188 	if (!ipc_valid_object(&shp->shm_perm)) {
1189 		ipc_unlock_object(&shp->shm_perm);
1190 		err = -EIDRM;
1191 		goto out_unlock;
1192 	}
1193 
1194 	path = shp->shm_file->f_path;
1195 	path_get(&path);
1196 	shp->shm_nattch++;
1197 	size = i_size_read(d_inode(path.dentry));
1198 	ipc_unlock_object(&shp->shm_perm);
1199 	rcu_read_unlock();
1200 
1201 	err = -ENOMEM;
1202 	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1203 	if (!sfd) {
1204 		path_put(&path);
1205 		goto out_nattch;
1206 	}
1207 
1208 	file = alloc_file(&path, f_mode,
1209 			  is_file_hugepages(shp->shm_file) ?
1210 				&shm_file_operations_huge :
1211 				&shm_file_operations);
1212 	err = PTR_ERR(file);
1213 	if (IS_ERR(file)) {
1214 		kfree(sfd);
1215 		path_put(&path);
1216 		goto out_nattch;
1217 	}
1218 
1219 	file->private_data = sfd;
1220 	file->f_mapping = shp->shm_file->f_mapping;
1221 	sfd->id = shp->shm_perm.id;
1222 	sfd->ns = get_ipc_ns(ns);
1223 	/*
1224 	 * We need to take a reference to the real shm file to prevent the
1225 	 * pointer from becoming stale in cases where the lifetime of the outer
1226 	 * file extends beyond that of the shm segment.  It's not usually
1227 	 * possible, but it can happen during remap_file_pages() emulation as
1228 	 * that unmaps the memory, then does ->mmap() via file reference only.
1229 	 * We'll deny the ->mmap() if the shm segment was since removed, but to
1230 	 * detect shm ID reuse we need to compare the file pointers.
1231 	 */
1232 	sfd->file = get_file(shp->shm_file);
1233 	sfd->vm_ops = NULL;
1234 
1235 	err = security_mmap_file(file, prot, flags);
1236 	if (err)
1237 		goto out_fput;
1238 
1239 	if (down_write_killable(&current->mm->mmap_sem)) {
1240 		err = -EINTR;
1241 		goto out_fput;
1242 	}
1243 
1244 	if (addr && !(shmflg & SHM_REMAP)) {
1245 		err = -EINVAL;
1246 		if (addr + size < addr)
1247 			goto invalid;
1248 
1249 		if (find_vma_intersection(current->mm, addr, addr + size))
1250 			goto invalid;
1251 	}
1252 
1253 	addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
1254 	*raddr = addr;
1255 	err = 0;
1256 	if (IS_ERR_VALUE(addr))
1257 		err = (long)addr;
1258 invalid:
1259 	up_write(&current->mm->mmap_sem);
1260 	if (populate)
1261 		mm_populate(addr, populate);
1262 
1263 out_fput:
1264 	fput(file);
1265 
1266 out_nattch:
1267 	down_write(&shm_ids(ns).rwsem);
1268 	shp = shm_lock(ns, shmid);
1269 	shp->shm_nattch--;
1270 	if (shm_may_destroy(ns, shp))
1271 		shm_destroy(ns, shp);
1272 	else
1273 		shm_unlock(shp);
1274 	up_write(&shm_ids(ns).rwsem);
1275 	return err;
1276 
1277 out_unlock:
1278 	rcu_read_unlock();
1279 out:
1280 	return err;
1281 }
1282 
SYSCALL_DEFINE3(shmat,int,shmid,char __user *,shmaddr,int,shmflg)1283 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1284 {
1285 	unsigned long ret;
1286 	long err;
1287 
1288 	err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1289 	if (err)
1290 		return err;
1291 	force_successful_syscall_return();
1292 	return (long)ret;
1293 }
1294 
1295 /*
1296  * detach and kill segment if marked destroyed.
1297  * The work is done in shm_close.
1298  */
SYSCALL_DEFINE1(shmdt,char __user *,shmaddr)1299 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1300 {
1301 	struct mm_struct *mm = current->mm;
1302 	struct vm_area_struct *vma;
1303 	unsigned long addr = (unsigned long)shmaddr;
1304 	int retval = -EINVAL;
1305 #ifdef CONFIG_MMU
1306 	loff_t size = 0;
1307 	struct file *file;
1308 	struct vm_area_struct *next;
1309 #endif
1310 
1311 	if (addr & ~PAGE_MASK)
1312 		return retval;
1313 
1314 	if (down_write_killable(&mm->mmap_sem))
1315 		return -EINTR;
1316 
1317 	/*
1318 	 * This function tries to be smart and unmap shm segments that
1319 	 * were modified by partial mlock or munmap calls:
1320 	 * - It first determines the size of the shm segment that should be
1321 	 *   unmapped: It searches for a vma that is backed by shm and that
1322 	 *   started at address shmaddr. It records it's size and then unmaps
1323 	 *   it.
1324 	 * - Then it unmaps all shm vmas that started at shmaddr and that
1325 	 *   are within the initially determined size and that are from the
1326 	 *   same shm segment from which we determined the size.
1327 	 * Errors from do_munmap are ignored: the function only fails if
1328 	 * it's called with invalid parameters or if it's called to unmap
1329 	 * a part of a vma. Both calls in this function are for full vmas,
1330 	 * the parameters are directly copied from the vma itself and always
1331 	 * valid - therefore do_munmap cannot fail. (famous last words?)
1332 	 */
1333 	/*
1334 	 * If it had been mremap()'d, the starting address would not
1335 	 * match the usual checks anyway. So assume all vma's are
1336 	 * above the starting address given.
1337 	 */
1338 	vma = find_vma(mm, addr);
1339 
1340 #ifdef CONFIG_MMU
1341 	while (vma) {
1342 		next = vma->vm_next;
1343 
1344 		/*
1345 		 * Check if the starting address would match, i.e. it's
1346 		 * a fragment created by mprotect() and/or munmap(), or it
1347 		 * otherwise it starts at this address with no hassles.
1348 		 */
1349 		if ((vma->vm_ops == &shm_vm_ops) &&
1350 			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1351 
1352 			/*
1353 			 * Record the file of the shm segment being
1354 			 * unmapped.  With mremap(), someone could place
1355 			 * page from another segment but with equal offsets
1356 			 * in the range we are unmapping.
1357 			 */
1358 			file = vma->vm_file;
1359 			size = i_size_read(file_inode(vma->vm_file));
1360 			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1361 			/*
1362 			 * We discovered the size of the shm segment, so
1363 			 * break out of here and fall through to the next
1364 			 * loop that uses the size information to stop
1365 			 * searching for matching vma's.
1366 			 */
1367 			retval = 0;
1368 			vma = next;
1369 			break;
1370 		}
1371 		vma = next;
1372 	}
1373 
1374 	/*
1375 	 * We need look no further than the maximum address a fragment
1376 	 * could possibly have landed at. Also cast things to loff_t to
1377 	 * prevent overflows and make comparisons vs. equal-width types.
1378 	 */
1379 	size = PAGE_ALIGN(size);
1380 	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1381 		next = vma->vm_next;
1382 
1383 		/* finding a matching vma now does not alter retval */
1384 		if ((vma->vm_ops == &shm_vm_ops) &&
1385 		    ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1386 		    (vma->vm_file == file))
1387 			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1388 		vma = next;
1389 	}
1390 
1391 #else /* CONFIG_MMU */
1392 	/* under NOMMU conditions, the exact address to be destroyed must be
1393 	 * given */
1394 	if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1395 		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1396 		retval = 0;
1397 	}
1398 
1399 #endif
1400 
1401 	up_write(&mm->mmap_sem);
1402 	return retval;
1403 }
1404 
1405 #ifdef CONFIG_PROC_FS
sysvipc_shm_proc_show(struct seq_file * s,void * it)1406 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1407 {
1408 	struct user_namespace *user_ns = seq_user_ns(s);
1409 	struct shmid_kernel *shp = it;
1410 	unsigned long rss = 0, swp = 0;
1411 
1412 	shm_add_rss_swap(shp, &rss, &swp);
1413 
1414 #if BITS_PER_LONG <= 32
1415 #define SIZE_SPEC "%10lu"
1416 #else
1417 #define SIZE_SPEC "%21lu"
1418 #endif
1419 
1420 	seq_printf(s,
1421 		   "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1422 		   "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1423 		   SIZE_SPEC " " SIZE_SPEC "\n",
1424 		   shp->shm_perm.key,
1425 		   shp->shm_perm.id,
1426 		   shp->shm_perm.mode,
1427 		   shp->shm_segsz,
1428 		   shp->shm_cprid,
1429 		   shp->shm_lprid,
1430 		   shp->shm_nattch,
1431 		   from_kuid_munged(user_ns, shp->shm_perm.uid),
1432 		   from_kgid_munged(user_ns, shp->shm_perm.gid),
1433 		   from_kuid_munged(user_ns, shp->shm_perm.cuid),
1434 		   from_kgid_munged(user_ns, shp->shm_perm.cgid),
1435 		   shp->shm_atim,
1436 		   shp->shm_dtim,
1437 		   shp->shm_ctim,
1438 		   rss * PAGE_SIZE,
1439 		   swp * PAGE_SIZE);
1440 
1441 	return 0;
1442 }
1443 #endif
1444