• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/pipe.c
4  *
5  *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/magic.h>
18 #include <linux/pipe_fs_i.h>
19 #include <linux/uio.h>
20 #include <linux/highmem.h>
21 #include <linux/pagemap.h>
22 #include <linux/audit.h>
23 #include <linux/syscalls.h>
24 #include <linux/fcntl.h>
25 #include <linux/memcontrol.h>
26 
27 #include <linux/uaccess.h>
28 #include <asm/ioctls.h>
29 
30 #include "internal.h"
31 
32 /*
33  * The max size that a non-root user is allowed to grow the pipe. Can
34  * be set by root in /proc/sys/fs/pipe-max-size
35  */
36 unsigned int pipe_max_size = 1048576;
37 
38 /*
39  * Minimum pipe size, as required by POSIX
40  */
41 unsigned int pipe_min_size = PAGE_SIZE;
42 
43 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
44  * matches default values.
45  */
46 unsigned long pipe_user_pages_hard;
47 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
48 
49 /*
50  * We use a start+len construction, which provides full use of the
51  * allocated memory.
52  * -- Florian Coosmann (FGC)
53  *
54  * Reads with count = 0 should always return 0.
55  * -- Julian Bradfield 1999-06-07.
56  *
57  * FIFOs and Pipes now generate SIGIO for both readers and writers.
58  * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
59  *
60  * pipe_read & write cleanup
61  * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
62  */
63 
pipe_lock_nested(struct pipe_inode_info * pipe,int subclass)64 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
65 {
66 	if (pipe->files)
67 		mutex_lock_nested(&pipe->mutex, subclass);
68 }
69 
pipe_lock(struct pipe_inode_info * pipe)70 void pipe_lock(struct pipe_inode_info *pipe)
71 {
72 	/*
73 	 * pipe_lock() nests non-pipe inode locks (for writing to a file)
74 	 */
75 	pipe_lock_nested(pipe, I_MUTEX_PARENT);
76 }
77 EXPORT_SYMBOL(pipe_lock);
78 
pipe_unlock(struct pipe_inode_info * pipe)79 void pipe_unlock(struct pipe_inode_info *pipe)
80 {
81 	if (pipe->files)
82 		mutex_unlock(&pipe->mutex);
83 }
84 EXPORT_SYMBOL(pipe_unlock);
85 
__pipe_lock(struct pipe_inode_info * pipe)86 static inline void __pipe_lock(struct pipe_inode_info *pipe)
87 {
88 	mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
89 }
90 
__pipe_unlock(struct pipe_inode_info * pipe)91 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
92 {
93 	mutex_unlock(&pipe->mutex);
94 }
95 
pipe_double_lock(struct pipe_inode_info * pipe1,struct pipe_inode_info * pipe2)96 void pipe_double_lock(struct pipe_inode_info *pipe1,
97 		      struct pipe_inode_info *pipe2)
98 {
99 	BUG_ON(pipe1 == pipe2);
100 
101 	if (pipe1 < pipe2) {
102 		pipe_lock_nested(pipe1, I_MUTEX_PARENT);
103 		pipe_lock_nested(pipe2, I_MUTEX_CHILD);
104 	} else {
105 		pipe_lock_nested(pipe2, I_MUTEX_PARENT);
106 		pipe_lock_nested(pipe1, I_MUTEX_CHILD);
107 	}
108 }
109 
110 /* Drop the inode semaphore and wait for a pipe event, atomically */
pipe_wait(struct pipe_inode_info * pipe)111 void pipe_wait(struct pipe_inode_info *pipe)
112 {
113 	DEFINE_WAIT(wait);
114 
115 	/*
116 	 * Pipes are system-local resources, so sleeping on them
117 	 * is considered a noninteractive wait:
118 	 */
119 	prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
120 	pipe_unlock(pipe);
121 	schedule();
122 	finish_wait(&pipe->wait, &wait);
123 	pipe_lock(pipe);
124 }
125 
anon_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)126 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
127 				  struct pipe_buffer *buf)
128 {
129 	struct page *page = buf->page;
130 
131 	/*
132 	 * If nobody else uses this page, and we don't already have a
133 	 * temporary page, let's keep track of it as a one-deep
134 	 * allocation cache. (Otherwise just release our reference to it)
135 	 */
136 	if (page_count(page) == 1 && !pipe->tmp_page)
137 		pipe->tmp_page = page;
138 	else
139 		put_page(page);
140 }
141 
anon_pipe_buf_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)142 static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
143 			       struct pipe_buffer *buf)
144 {
145 	struct page *page = buf->page;
146 
147 	if (page_count(page) == 1) {
148 		if (memcg_kmem_enabled())
149 			memcg_kmem_uncharge(page, 0);
150 		__SetPageLocked(page);
151 		return 0;
152 	}
153 	return 1;
154 }
155 
156 /**
157  * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
158  * @pipe:	the pipe that the buffer belongs to
159  * @buf:	the buffer to attempt to steal
160  *
161  * Description:
162  *	This function attempts to steal the &struct page attached to
163  *	@buf. If successful, this function returns 0 and returns with
164  *	the page locked. The caller may then reuse the page for whatever
165  *	he wishes; the typical use is insertion into a different file
166  *	page cache.
167  */
generic_pipe_buf_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)168 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
169 			   struct pipe_buffer *buf)
170 {
171 	struct page *page = buf->page;
172 
173 	/*
174 	 * A reference of one is golden, that means that the owner of this
175 	 * page is the only one holding a reference to it. lock the page
176 	 * and return OK.
177 	 */
178 	if (page_count(page) == 1) {
179 		lock_page(page);
180 		return 0;
181 	}
182 
183 	return 1;
184 }
185 EXPORT_SYMBOL(generic_pipe_buf_steal);
186 
187 /**
188  * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
189  * @pipe:	the pipe that the buffer belongs to
190  * @buf:	the buffer to get a reference to
191  *
192  * Description:
193  *	This function grabs an extra reference to @buf. It's used in
194  *	in the tee() system call, when we duplicate the buffers in one
195  *	pipe into another.
196  */
generic_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)197 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
198 {
199 	return try_get_page(buf->page);
200 }
201 EXPORT_SYMBOL(generic_pipe_buf_get);
202 
203 /**
204  * generic_pipe_buf_confirm - verify contents of the pipe buffer
205  * @info:	the pipe that the buffer belongs to
206  * @buf:	the buffer to confirm
207  *
208  * Description:
209  *	This function does nothing, because the generic pipe code uses
210  *	pages that are always good when inserted into the pipe.
211  */
generic_pipe_buf_confirm(struct pipe_inode_info * info,struct pipe_buffer * buf)212 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
213 			     struct pipe_buffer *buf)
214 {
215 	return 0;
216 }
217 EXPORT_SYMBOL(generic_pipe_buf_confirm);
218 
219 /**
220  * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
221  * @pipe:	the pipe that the buffer belongs to
222  * @buf:	the buffer to put a reference to
223  *
224  * Description:
225  *	This function releases a reference to @buf.
226  */
generic_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)227 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
228 			      struct pipe_buffer *buf)
229 {
230 	put_page(buf->page);
231 }
232 EXPORT_SYMBOL(generic_pipe_buf_release);
233 
234 static const struct pipe_buf_operations anon_pipe_buf_ops = {
235 	.can_merge = 1,
236 	.confirm = generic_pipe_buf_confirm,
237 	.release = anon_pipe_buf_release,
238 	.steal = anon_pipe_buf_steal,
239 	.get = generic_pipe_buf_get,
240 };
241 
242 static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
243 	.can_merge = 0,
244 	.confirm = generic_pipe_buf_confirm,
245 	.release = anon_pipe_buf_release,
246 	.steal = anon_pipe_buf_steal,
247 	.get = generic_pipe_buf_get,
248 };
249 
250 static const struct pipe_buf_operations packet_pipe_buf_ops = {
251 	.can_merge = 0,
252 	.confirm = generic_pipe_buf_confirm,
253 	.release = anon_pipe_buf_release,
254 	.steal = anon_pipe_buf_steal,
255 	.get = generic_pipe_buf_get,
256 };
257 
pipe_buf_mark_unmergeable(struct pipe_buffer * buf)258 void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
259 {
260 	if (buf->ops == &anon_pipe_buf_ops)
261 		buf->ops = &anon_pipe_buf_nomerge_ops;
262 }
263 
264 static ssize_t
pipe_read(struct kiocb * iocb,struct iov_iter * to)265 pipe_read(struct kiocb *iocb, struct iov_iter *to)
266 {
267 	size_t total_len = iov_iter_count(to);
268 	struct file *filp = iocb->ki_filp;
269 	struct pipe_inode_info *pipe = filp->private_data;
270 	int do_wakeup;
271 	ssize_t ret;
272 
273 	/* Null read succeeds. */
274 	if (unlikely(total_len == 0))
275 		return 0;
276 
277 	do_wakeup = 0;
278 	ret = 0;
279 	__pipe_lock(pipe);
280 	for (;;) {
281 		int bufs = pipe->nrbufs;
282 		if (bufs) {
283 			int curbuf = pipe->curbuf;
284 			struct pipe_buffer *buf = pipe->bufs + curbuf;
285 			size_t chars = buf->len;
286 			size_t written;
287 			int error;
288 
289 			if (chars > total_len)
290 				chars = total_len;
291 
292 			error = pipe_buf_confirm(pipe, buf);
293 			if (error) {
294 				if (!ret)
295 					ret = error;
296 				break;
297 			}
298 
299 			written = copy_page_to_iter(buf->page, buf->offset, chars, to);
300 			if (unlikely(written < chars)) {
301 				if (!ret)
302 					ret = -EFAULT;
303 				break;
304 			}
305 			ret += chars;
306 			buf->offset += chars;
307 			buf->len -= chars;
308 
309 			/* Was it a packet buffer? Clean up and exit */
310 			if (buf->flags & PIPE_BUF_FLAG_PACKET) {
311 				total_len = chars;
312 				buf->len = 0;
313 			}
314 
315 			if (!buf->len) {
316 				pipe_buf_release(pipe, buf);
317 				curbuf = (curbuf + 1) & (pipe->buffers - 1);
318 				pipe->curbuf = curbuf;
319 				pipe->nrbufs = --bufs;
320 				do_wakeup = 1;
321 			}
322 			total_len -= chars;
323 			if (!total_len)
324 				break;	/* common path: read succeeded */
325 		}
326 		if (bufs)	/* More to do? */
327 			continue;
328 		if (!pipe->writers)
329 			break;
330 		if (!pipe->waiting_writers) {
331 			/* syscall merging: Usually we must not sleep
332 			 * if O_NONBLOCK is set, or if we got some data.
333 			 * But if a writer sleeps in kernel space, then
334 			 * we can wait for that data without violating POSIX.
335 			 */
336 			if (ret)
337 				break;
338 			if (filp->f_flags & O_NONBLOCK) {
339 				ret = -EAGAIN;
340 				break;
341 			}
342 		}
343 		if (signal_pending(current)) {
344 			if (!ret)
345 				ret = -ERESTARTSYS;
346 			break;
347 		}
348 		if (do_wakeup) {
349 			wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
350  			kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
351 		}
352 		pipe_wait(pipe);
353 	}
354 	__pipe_unlock(pipe);
355 
356 	/* Signal writers asynchronously that there is more room. */
357 	if (do_wakeup) {
358 		wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
359 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
360 	}
361 	if (ret > 0)
362 		file_accessed(filp);
363 	return ret;
364 }
365 
is_packetized(struct file * file)366 static inline int is_packetized(struct file *file)
367 {
368 	return (file->f_flags & O_DIRECT) != 0;
369 }
370 
371 static ssize_t
pipe_write(struct kiocb * iocb,struct iov_iter * from)372 pipe_write(struct kiocb *iocb, struct iov_iter *from)
373 {
374 	struct file *filp = iocb->ki_filp;
375 	struct pipe_inode_info *pipe = filp->private_data;
376 	ssize_t ret = 0;
377 	int do_wakeup = 0;
378 	size_t total_len = iov_iter_count(from);
379 	ssize_t chars;
380 
381 	/* Null write succeeds. */
382 	if (unlikely(total_len == 0))
383 		return 0;
384 
385 	__pipe_lock(pipe);
386 
387 	if (!pipe->readers) {
388 		send_sig(SIGPIPE, current, 0);
389 		ret = -EPIPE;
390 		goto out;
391 	}
392 
393 	/* We try to merge small writes */
394 	chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
395 	if (pipe->nrbufs && chars != 0) {
396 		int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
397 							(pipe->buffers - 1);
398 		struct pipe_buffer *buf = pipe->bufs + lastbuf;
399 		int offset = buf->offset + buf->len;
400 
401 		if (buf->ops->can_merge && offset + chars <= PAGE_SIZE) {
402 			ret = pipe_buf_confirm(pipe, buf);
403 			if (ret)
404 				goto out;
405 
406 			ret = copy_page_from_iter(buf->page, offset, chars, from);
407 			if (unlikely(ret < chars)) {
408 				ret = -EFAULT;
409 				goto out;
410 			}
411 			do_wakeup = 1;
412 			buf->len += ret;
413 			if (!iov_iter_count(from))
414 				goto out;
415 		}
416 	}
417 
418 	for (;;) {
419 		int bufs;
420 
421 		if (!pipe->readers) {
422 			send_sig(SIGPIPE, current, 0);
423 			if (!ret)
424 				ret = -EPIPE;
425 			break;
426 		}
427 		bufs = pipe->nrbufs;
428 		if (bufs < pipe->buffers) {
429 			int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
430 			struct pipe_buffer *buf = pipe->bufs + newbuf;
431 			struct page *page = pipe->tmp_page;
432 			int copied;
433 
434 			if (!page) {
435 				page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
436 				if (unlikely(!page)) {
437 					ret = ret ? : -ENOMEM;
438 					break;
439 				}
440 				pipe->tmp_page = page;
441 			}
442 			/* Always wake up, even if the copy fails. Otherwise
443 			 * we lock up (O_NONBLOCK-)readers that sleep due to
444 			 * syscall merging.
445 			 * FIXME! Is this really true?
446 			 */
447 			do_wakeup = 1;
448 			copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
449 			if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
450 				if (!ret)
451 					ret = -EFAULT;
452 				break;
453 			}
454 			ret += copied;
455 
456 			/* Insert it into the buffer array */
457 			buf->page = page;
458 			buf->ops = &anon_pipe_buf_ops;
459 			buf->offset = 0;
460 			buf->len = copied;
461 			buf->flags = 0;
462 			if (is_packetized(filp)) {
463 				buf->ops = &packet_pipe_buf_ops;
464 				buf->flags = PIPE_BUF_FLAG_PACKET;
465 			}
466 			pipe->nrbufs = ++bufs;
467 			pipe->tmp_page = NULL;
468 
469 			if (!iov_iter_count(from))
470 				break;
471 		}
472 		if (bufs < pipe->buffers)
473 			continue;
474 		if (filp->f_flags & O_NONBLOCK) {
475 			if (!ret)
476 				ret = -EAGAIN;
477 			break;
478 		}
479 		if (signal_pending(current)) {
480 			if (!ret)
481 				ret = -ERESTARTSYS;
482 			break;
483 		}
484 		if (do_wakeup) {
485 			wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
486 			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
487 			do_wakeup = 0;
488 		}
489 		pipe->waiting_writers++;
490 		pipe_wait(pipe);
491 		pipe->waiting_writers--;
492 	}
493 out:
494 	__pipe_unlock(pipe);
495 	if (do_wakeup) {
496 		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
497 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
498 	}
499 	if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
500 		int err = file_update_time(filp);
501 		if (err)
502 			ret = err;
503 		sb_end_write(file_inode(filp)->i_sb);
504 	}
505 	return ret;
506 }
507 
pipe_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)508 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
509 {
510 	struct pipe_inode_info *pipe = filp->private_data;
511 	int count, buf, nrbufs;
512 
513 	switch (cmd) {
514 		case FIONREAD:
515 			__pipe_lock(pipe);
516 			count = 0;
517 			buf = pipe->curbuf;
518 			nrbufs = pipe->nrbufs;
519 			while (--nrbufs >= 0) {
520 				count += pipe->bufs[buf].len;
521 				buf = (buf+1) & (pipe->buffers - 1);
522 			}
523 			__pipe_unlock(pipe);
524 
525 			return put_user(count, (int __user *)arg);
526 		default:
527 			return -ENOIOCTLCMD;
528 	}
529 }
530 
531 /* No kernel lock held - fine */
532 static unsigned int
pipe_poll(struct file * filp,poll_table * wait)533 pipe_poll(struct file *filp, poll_table *wait)
534 {
535 	unsigned int mask;
536 	struct pipe_inode_info *pipe = filp->private_data;
537 	int nrbufs;
538 
539 	poll_wait(filp, &pipe->wait, wait);
540 
541 	/* Reading only -- no need for acquiring the semaphore.  */
542 	nrbufs = pipe->nrbufs;
543 	mask = 0;
544 	if (filp->f_mode & FMODE_READ) {
545 		mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
546 		if (!pipe->writers && filp->f_version != pipe->w_counter)
547 			mask |= POLLHUP;
548 	}
549 
550 	if (filp->f_mode & FMODE_WRITE) {
551 		mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
552 		/*
553 		 * Most Unices do not set POLLERR for FIFOs but on Linux they
554 		 * behave exactly like pipes for poll().
555 		 */
556 		if (!pipe->readers)
557 			mask |= POLLERR;
558 	}
559 
560 	return mask;
561 }
562 
put_pipe_info(struct inode * inode,struct pipe_inode_info * pipe)563 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
564 {
565 	int kill = 0;
566 
567 	spin_lock(&inode->i_lock);
568 	if (!--pipe->files) {
569 		inode->i_pipe = NULL;
570 		kill = 1;
571 	}
572 	spin_unlock(&inode->i_lock);
573 
574 	if (kill)
575 		free_pipe_info(pipe);
576 }
577 
578 static int
pipe_release(struct inode * inode,struct file * file)579 pipe_release(struct inode *inode, struct file *file)
580 {
581 	struct pipe_inode_info *pipe = file->private_data;
582 
583 	__pipe_lock(pipe);
584 	if (file->f_mode & FMODE_READ)
585 		pipe->readers--;
586 	if (file->f_mode & FMODE_WRITE)
587 		pipe->writers--;
588 
589 	if (pipe->readers || pipe->writers) {
590 		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
591 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
592 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
593 	}
594 	__pipe_unlock(pipe);
595 
596 	put_pipe_info(inode, pipe);
597 	return 0;
598 }
599 
600 static int
pipe_fasync(int fd,struct file * filp,int on)601 pipe_fasync(int fd, struct file *filp, int on)
602 {
603 	struct pipe_inode_info *pipe = filp->private_data;
604 	int retval = 0;
605 
606 	__pipe_lock(pipe);
607 	if (filp->f_mode & FMODE_READ)
608 		retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
609 	if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
610 		retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
611 		if (retval < 0 && (filp->f_mode & FMODE_READ))
612 			/* this can happen only if on == T */
613 			fasync_helper(-1, filp, 0, &pipe->fasync_readers);
614 	}
615 	__pipe_unlock(pipe);
616 	return retval;
617 }
618 
account_pipe_buffers(struct user_struct * user,unsigned long old,unsigned long new)619 static unsigned long account_pipe_buffers(struct user_struct *user,
620                                  unsigned long old, unsigned long new)
621 {
622 	return atomic_long_add_return(new - old, &user->pipe_bufs);
623 }
624 
too_many_pipe_buffers_soft(unsigned long user_bufs)625 static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
626 {
627 	return pipe_user_pages_soft && user_bufs > pipe_user_pages_soft;
628 }
629 
too_many_pipe_buffers_hard(unsigned long user_bufs)630 static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
631 {
632 	return pipe_user_pages_hard && user_bufs > pipe_user_pages_hard;
633 }
634 
is_unprivileged_user(void)635 static bool is_unprivileged_user(void)
636 {
637 	return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
638 }
639 
alloc_pipe_info(void)640 struct pipe_inode_info *alloc_pipe_info(void)
641 {
642 	struct pipe_inode_info *pipe;
643 	unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
644 	struct user_struct *user = get_current_user();
645 	unsigned long user_bufs;
646 
647 	pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
648 	if (pipe == NULL)
649 		goto out_free_uid;
650 
651 	if (pipe_bufs * PAGE_SIZE > pipe_max_size && !capable(CAP_SYS_RESOURCE))
652 		pipe_bufs = pipe_max_size >> PAGE_SHIFT;
653 
654 	user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
655 
656 	if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
657 		user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
658 		pipe_bufs = 1;
659 	}
660 
661 	if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
662 		goto out_revert_acct;
663 
664 	pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
665 			     GFP_KERNEL_ACCOUNT);
666 
667 	if (pipe->bufs) {
668 		init_waitqueue_head(&pipe->wait);
669 		pipe->r_counter = pipe->w_counter = 1;
670 		pipe->buffers = pipe_bufs;
671 		pipe->user = user;
672 		mutex_init(&pipe->mutex);
673 		return pipe;
674 	}
675 
676 out_revert_acct:
677 	(void) account_pipe_buffers(user, pipe_bufs, 0);
678 	kfree(pipe);
679 out_free_uid:
680 	free_uid(user);
681 	return NULL;
682 }
683 
free_pipe_info(struct pipe_inode_info * pipe)684 void free_pipe_info(struct pipe_inode_info *pipe)
685 {
686 	int i;
687 
688 	(void) account_pipe_buffers(pipe->user, pipe->buffers, 0);
689 	free_uid(pipe->user);
690 	for (i = 0; i < pipe->buffers; i++) {
691 		struct pipe_buffer *buf = pipe->bufs + i;
692 		if (buf->ops)
693 			pipe_buf_release(pipe, buf);
694 	}
695 	if (pipe->tmp_page)
696 		__free_page(pipe->tmp_page);
697 	kfree(pipe->bufs);
698 	kfree(pipe);
699 }
700 
701 static struct vfsmount *pipe_mnt __read_mostly;
702 
703 /*
704  * pipefs_dname() is called from d_path().
705  */
pipefs_dname(struct dentry * dentry,char * buffer,int buflen)706 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
707 {
708 	return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
709 				d_inode(dentry)->i_ino);
710 }
711 
712 static const struct dentry_operations pipefs_dentry_operations = {
713 	.d_dname	= pipefs_dname,
714 };
715 
get_pipe_inode(void)716 static struct inode * get_pipe_inode(void)
717 {
718 	struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
719 	struct pipe_inode_info *pipe;
720 
721 	if (!inode)
722 		goto fail_inode;
723 
724 	inode->i_ino = get_next_ino();
725 
726 	pipe = alloc_pipe_info();
727 	if (!pipe)
728 		goto fail_iput;
729 
730 	inode->i_pipe = pipe;
731 	pipe->files = 2;
732 	pipe->readers = pipe->writers = 1;
733 	inode->i_fop = &pipefifo_fops;
734 
735 	/*
736 	 * Mark the inode dirty from the very beginning,
737 	 * that way it will never be moved to the dirty
738 	 * list because "mark_inode_dirty()" will think
739 	 * that it already _is_ on the dirty list.
740 	 */
741 	inode->i_state = I_DIRTY;
742 	inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
743 	inode->i_uid = current_fsuid();
744 	inode->i_gid = current_fsgid();
745 	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
746 
747 	return inode;
748 
749 fail_iput:
750 	iput(inode);
751 
752 fail_inode:
753 	return NULL;
754 }
755 
create_pipe_files(struct file ** res,int flags)756 int create_pipe_files(struct file **res, int flags)
757 {
758 	int err;
759 	struct inode *inode = get_pipe_inode();
760 	struct file *f;
761 	struct path path;
762 
763 	if (!inode)
764 		return -ENFILE;
765 
766 	err = -ENOMEM;
767 	path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &empty_name);
768 	if (!path.dentry)
769 		goto err_inode;
770 	path.mnt = mntget(pipe_mnt);
771 
772 	d_instantiate(path.dentry, inode);
773 
774 	f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
775 	if (IS_ERR(f)) {
776 		err = PTR_ERR(f);
777 		goto err_dentry;
778 	}
779 
780 	f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
781 	f->private_data = inode->i_pipe;
782 
783 	res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
784 	if (IS_ERR(res[0])) {
785 		err = PTR_ERR(res[0]);
786 		goto err_file;
787 	}
788 
789 	path_get(&path);
790 	res[0]->private_data = inode->i_pipe;
791 	res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
792 	res[1] = f;
793 	return 0;
794 
795 err_file:
796 	put_filp(f);
797 err_dentry:
798 	free_pipe_info(inode->i_pipe);
799 	path_put(&path);
800 	return err;
801 
802 err_inode:
803 	free_pipe_info(inode->i_pipe);
804 	iput(inode);
805 	return err;
806 }
807 
__do_pipe_flags(int * fd,struct file ** files,int flags)808 static int __do_pipe_flags(int *fd, struct file **files, int flags)
809 {
810 	int error;
811 	int fdw, fdr;
812 
813 	if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
814 		return -EINVAL;
815 
816 	error = create_pipe_files(files, flags);
817 	if (error)
818 		return error;
819 
820 	error = get_unused_fd_flags(flags);
821 	if (error < 0)
822 		goto err_read_pipe;
823 	fdr = error;
824 
825 	error = get_unused_fd_flags(flags);
826 	if (error < 0)
827 		goto err_fdr;
828 	fdw = error;
829 
830 	audit_fd_pair(fdr, fdw);
831 	fd[0] = fdr;
832 	fd[1] = fdw;
833 	return 0;
834 
835  err_fdr:
836 	put_unused_fd(fdr);
837  err_read_pipe:
838 	fput(files[0]);
839 	fput(files[1]);
840 	return error;
841 }
842 
do_pipe_flags(int * fd,int flags)843 int do_pipe_flags(int *fd, int flags)
844 {
845 	struct file *files[2];
846 	int error = __do_pipe_flags(fd, files, flags);
847 	if (!error) {
848 		fd_install(fd[0], files[0]);
849 		fd_install(fd[1], files[1]);
850 	}
851 	return error;
852 }
853 
854 /*
855  * sys_pipe() is the normal C calling standard for creating
856  * a pipe. It's not the way Unix traditionally does this, though.
857  */
SYSCALL_DEFINE2(pipe2,int __user *,fildes,int,flags)858 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
859 {
860 	struct file *files[2];
861 	int fd[2];
862 	int error;
863 
864 	error = __do_pipe_flags(fd, files, flags);
865 	if (!error) {
866 		if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
867 			fput(files[0]);
868 			fput(files[1]);
869 			put_unused_fd(fd[0]);
870 			put_unused_fd(fd[1]);
871 			error = -EFAULT;
872 		} else {
873 			fd_install(fd[0], files[0]);
874 			fd_install(fd[1], files[1]);
875 		}
876 	}
877 	return error;
878 }
879 
SYSCALL_DEFINE1(pipe,int __user *,fildes)880 SYSCALL_DEFINE1(pipe, int __user *, fildes)
881 {
882 	return sys_pipe2(fildes, 0);
883 }
884 
wait_for_partner(struct pipe_inode_info * pipe,unsigned int * cnt)885 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
886 {
887 	int cur = *cnt;
888 
889 	while (cur == *cnt) {
890 		pipe_wait(pipe);
891 		if (signal_pending(current))
892 			break;
893 	}
894 	return cur == *cnt ? -ERESTARTSYS : 0;
895 }
896 
wake_up_partner(struct pipe_inode_info * pipe)897 static void wake_up_partner(struct pipe_inode_info *pipe)
898 {
899 	wake_up_interruptible(&pipe->wait);
900 }
901 
fifo_open(struct inode * inode,struct file * filp)902 static int fifo_open(struct inode *inode, struct file *filp)
903 {
904 	struct pipe_inode_info *pipe;
905 	bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
906 	int ret;
907 
908 	filp->f_version = 0;
909 
910 	spin_lock(&inode->i_lock);
911 	if (inode->i_pipe) {
912 		pipe = inode->i_pipe;
913 		pipe->files++;
914 		spin_unlock(&inode->i_lock);
915 	} else {
916 		spin_unlock(&inode->i_lock);
917 		pipe = alloc_pipe_info();
918 		if (!pipe)
919 			return -ENOMEM;
920 		pipe->files = 1;
921 		spin_lock(&inode->i_lock);
922 		if (unlikely(inode->i_pipe)) {
923 			inode->i_pipe->files++;
924 			spin_unlock(&inode->i_lock);
925 			free_pipe_info(pipe);
926 			pipe = inode->i_pipe;
927 		} else {
928 			inode->i_pipe = pipe;
929 			spin_unlock(&inode->i_lock);
930 		}
931 	}
932 	filp->private_data = pipe;
933 	/* OK, we have a pipe and it's pinned down */
934 
935 	__pipe_lock(pipe);
936 
937 	/* We can only do regular read/write on fifos */
938 	filp->f_mode &= (FMODE_READ | FMODE_WRITE);
939 
940 	switch (filp->f_mode) {
941 	case FMODE_READ:
942 	/*
943 	 *  O_RDONLY
944 	 *  POSIX.1 says that O_NONBLOCK means return with the FIFO
945 	 *  opened, even when there is no process writing the FIFO.
946 	 */
947 		pipe->r_counter++;
948 		if (pipe->readers++ == 0)
949 			wake_up_partner(pipe);
950 
951 		if (!is_pipe && !pipe->writers) {
952 			if ((filp->f_flags & O_NONBLOCK)) {
953 				/* suppress POLLHUP until we have
954 				 * seen a writer */
955 				filp->f_version = pipe->w_counter;
956 			} else {
957 				if (wait_for_partner(pipe, &pipe->w_counter))
958 					goto err_rd;
959 			}
960 		}
961 		break;
962 
963 	case FMODE_WRITE:
964 	/*
965 	 *  O_WRONLY
966 	 *  POSIX.1 says that O_NONBLOCK means return -1 with
967 	 *  errno=ENXIO when there is no process reading the FIFO.
968 	 */
969 		ret = -ENXIO;
970 		if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
971 			goto err;
972 
973 		pipe->w_counter++;
974 		if (!pipe->writers++)
975 			wake_up_partner(pipe);
976 
977 		if (!is_pipe && !pipe->readers) {
978 			if (wait_for_partner(pipe, &pipe->r_counter))
979 				goto err_wr;
980 		}
981 		break;
982 
983 	case FMODE_READ | FMODE_WRITE:
984 	/*
985 	 *  O_RDWR
986 	 *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
987 	 *  This implementation will NEVER block on a O_RDWR open, since
988 	 *  the process can at least talk to itself.
989 	 */
990 
991 		pipe->readers++;
992 		pipe->writers++;
993 		pipe->r_counter++;
994 		pipe->w_counter++;
995 		if (pipe->readers == 1 || pipe->writers == 1)
996 			wake_up_partner(pipe);
997 		break;
998 
999 	default:
1000 		ret = -EINVAL;
1001 		goto err;
1002 	}
1003 
1004 	/* Ok! */
1005 	__pipe_unlock(pipe);
1006 	return 0;
1007 
1008 err_rd:
1009 	if (!--pipe->readers)
1010 		wake_up_interruptible(&pipe->wait);
1011 	ret = -ERESTARTSYS;
1012 	goto err;
1013 
1014 err_wr:
1015 	if (!--pipe->writers)
1016 		wake_up_interruptible(&pipe->wait);
1017 	ret = -ERESTARTSYS;
1018 	goto err;
1019 
1020 err:
1021 	__pipe_unlock(pipe);
1022 
1023 	put_pipe_info(inode, pipe);
1024 	return ret;
1025 }
1026 
1027 const struct file_operations pipefifo_fops = {
1028 	.open		= fifo_open,
1029 	.llseek		= no_llseek,
1030 	.read_iter	= pipe_read,
1031 	.write_iter	= pipe_write,
1032 	.poll		= pipe_poll,
1033 	.unlocked_ioctl	= pipe_ioctl,
1034 	.release	= pipe_release,
1035 	.fasync		= pipe_fasync,
1036 };
1037 
1038 /*
1039  * Currently we rely on the pipe array holding a power-of-2 number
1040  * of pages. Returns 0 on error.
1041  */
round_pipe_size(unsigned int size)1042 static inline unsigned int round_pipe_size(unsigned int size)
1043 {
1044 	unsigned long nr_pages;
1045 
1046 	if (size < pipe_min_size)
1047 		size = pipe_min_size;
1048 
1049 	nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1050 	if (nr_pages == 0)
1051 		return 0;
1052 
1053 	return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
1054 }
1055 
1056 /*
1057  * Allocate a new array of pipe buffers and copy the info over. Returns the
1058  * pipe size if successful, or return -ERROR on error.
1059  */
pipe_set_size(struct pipe_inode_info * pipe,unsigned long arg)1060 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1061 {
1062 	struct pipe_buffer *bufs;
1063 	unsigned int size, nr_pages;
1064 	unsigned long user_bufs;
1065 	long ret = 0;
1066 
1067 	size = round_pipe_size(arg);
1068 	if (size == 0)
1069 		return -EINVAL;
1070 	nr_pages = size >> PAGE_SHIFT;
1071 
1072 	if (!nr_pages)
1073 		return -EINVAL;
1074 
1075 	/*
1076 	 * If trying to increase the pipe capacity, check that an
1077 	 * unprivileged user is not trying to exceed various limits
1078 	 * (soft limit check here, hard limit check just below).
1079 	 * Decreasing the pipe capacity is always permitted, even
1080 	 * if the user is currently over a limit.
1081 	 */
1082 	if (nr_pages > pipe->buffers &&
1083 			size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1084 		return -EPERM;
1085 
1086 	user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages);
1087 
1088 	if (nr_pages > pipe->buffers &&
1089 			(too_many_pipe_buffers_hard(user_bufs) ||
1090 			 too_many_pipe_buffers_soft(user_bufs)) &&
1091 			is_unprivileged_user()) {
1092 		ret = -EPERM;
1093 		goto out_revert_acct;
1094 	}
1095 
1096 	/*
1097 	 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1098 	 * expect a lot of shrink+grow operations, just free and allocate
1099 	 * again like we would do for growing. If the pipe currently
1100 	 * contains more buffers than arg, then return busy.
1101 	 */
1102 	if (nr_pages < pipe->nrbufs) {
1103 		ret = -EBUSY;
1104 		goto out_revert_acct;
1105 	}
1106 
1107 	bufs = kcalloc(nr_pages, sizeof(*bufs),
1108 		       GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1109 	if (unlikely(!bufs)) {
1110 		ret = -ENOMEM;
1111 		goto out_revert_acct;
1112 	}
1113 
1114 	/*
1115 	 * The pipe array wraps around, so just start the new one at zero
1116 	 * and adjust the indexes.
1117 	 */
1118 	if (pipe->nrbufs) {
1119 		unsigned int tail;
1120 		unsigned int head;
1121 
1122 		tail = pipe->curbuf + pipe->nrbufs;
1123 		if (tail < pipe->buffers)
1124 			tail = 0;
1125 		else
1126 			tail &= (pipe->buffers - 1);
1127 
1128 		head = pipe->nrbufs - tail;
1129 		if (head)
1130 			memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1131 		if (tail)
1132 			memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1133 	}
1134 
1135 	pipe->curbuf = 0;
1136 	kfree(pipe->bufs);
1137 	pipe->bufs = bufs;
1138 	pipe->buffers = nr_pages;
1139 	return nr_pages * PAGE_SIZE;
1140 
1141 out_revert_acct:
1142 	(void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers);
1143 	return ret;
1144 }
1145 
1146 /*
1147  * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
1148  * will return an error.
1149  */
pipe_proc_fn(struct ctl_table * table,int write,void __user * buf,size_t * lenp,loff_t * ppos)1150 int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
1151 		 size_t *lenp, loff_t *ppos)
1152 {
1153 	unsigned int rounded_pipe_max_size;
1154 	int ret;
1155 
1156 	ret = proc_douintvec_minmax(table, write, buf, lenp, ppos);
1157 	if (ret < 0 || !write)
1158 		return ret;
1159 
1160 	rounded_pipe_max_size = round_pipe_size(pipe_max_size);
1161 	if (rounded_pipe_max_size == 0)
1162 		return -EINVAL;
1163 
1164 	pipe_max_size = rounded_pipe_max_size;
1165 	return ret;
1166 }
1167 
1168 /*
1169  * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1170  * location, so checking ->i_pipe is not enough to verify that this is a
1171  * pipe.
1172  */
get_pipe_info(struct file * file)1173 struct pipe_inode_info *get_pipe_info(struct file *file)
1174 {
1175 	return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1176 }
1177 
pipe_fcntl(struct file * file,unsigned int cmd,unsigned long arg)1178 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1179 {
1180 	struct pipe_inode_info *pipe;
1181 	long ret;
1182 
1183 	pipe = get_pipe_info(file);
1184 	if (!pipe)
1185 		return -EBADF;
1186 
1187 	__pipe_lock(pipe);
1188 
1189 	switch (cmd) {
1190 	case F_SETPIPE_SZ:
1191 		ret = pipe_set_size(pipe, arg);
1192 		break;
1193 	case F_GETPIPE_SZ:
1194 		ret = pipe->buffers * PAGE_SIZE;
1195 		break;
1196 	default:
1197 		ret = -EINVAL;
1198 		break;
1199 	}
1200 
1201 	__pipe_unlock(pipe);
1202 	return ret;
1203 }
1204 
1205 static const struct super_operations pipefs_ops = {
1206 	.destroy_inode = free_inode_nonrcu,
1207 	.statfs = simple_statfs,
1208 };
1209 
1210 /*
1211  * pipefs should _never_ be mounted by userland - too much of security hassle,
1212  * no real gain from having the whole whorehouse mounted. So we don't need
1213  * any operations on the root directory. However, we need a non-trivial
1214  * d_name - pipe: will go nicely and kill the special-casing in procfs.
1215  */
pipefs_mount(struct file_system_type * fs_type,int flags,const char * dev_name,void * data)1216 static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1217 			 int flags, const char *dev_name, void *data)
1218 {
1219 	return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1220 			&pipefs_dentry_operations, PIPEFS_MAGIC);
1221 }
1222 
1223 static struct file_system_type pipe_fs_type = {
1224 	.name		= "pipefs",
1225 	.mount		= pipefs_mount,
1226 	.kill_sb	= kill_anon_super,
1227 };
1228 
init_pipe_fs(void)1229 static int __init init_pipe_fs(void)
1230 {
1231 	int err = register_filesystem(&pipe_fs_type);
1232 
1233 	if (!err) {
1234 		pipe_mnt = kern_mount(&pipe_fs_type);
1235 		if (IS_ERR(pipe_mnt)) {
1236 			err = PTR_ERR(pipe_mnt);
1237 			unregister_filesystem(&pipe_fs_type);
1238 		}
1239 	}
1240 	return err;
1241 }
1242 
1243 fs_initcall(init_pipe_fs);
1244