• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/pipe.c
4  *
5  *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/magic.h>
18 #include <linux/pipe_fs_i.h>
19 #include <linux/uio.h>
20 #include <linux/highmem.h>
21 #include <linux/pagemap.h>
22 #include <linux/audit.h>
23 #include <linux/syscalls.h>
24 #include <linux/fcntl.h>
25 #include <linux/memcontrol.h>
26 
27 #include <linux/uaccess.h>
28 #include <asm/ioctls.h>
29 
30 #include "internal.h"
31 
32 /*
33  * The max size that a non-root user is allowed to grow the pipe. Can
34  * be set by root in /proc/sys/fs/pipe-max-size
35  */
36 unsigned int pipe_max_size = 1048576;
37 
38 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
39  * matches default values.
40  */
41 unsigned long pipe_user_pages_hard;
42 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
43 
44 /*
45  * We use a start+len construction, which provides full use of the
46  * allocated memory.
47  * -- Florian Coosmann (FGC)
48  *
49  * Reads with count = 0 should always return 0.
50  * -- Julian Bradfield 1999-06-07.
51  *
52  * FIFOs and Pipes now generate SIGIO for both readers and writers.
53  * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
54  *
55  * pipe_read & write cleanup
56  * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
57  */
58 
pipe_lock_nested(struct pipe_inode_info * pipe,int subclass)59 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
60 {
61 	if (pipe->files)
62 		mutex_lock_nested(&pipe->mutex, subclass);
63 }
64 
pipe_lock(struct pipe_inode_info * pipe)65 void pipe_lock(struct pipe_inode_info *pipe)
66 {
67 	/*
68 	 * pipe_lock() nests non-pipe inode locks (for writing to a file)
69 	 */
70 	pipe_lock_nested(pipe, I_MUTEX_PARENT);
71 }
72 EXPORT_SYMBOL(pipe_lock);
73 
pipe_unlock(struct pipe_inode_info * pipe)74 void pipe_unlock(struct pipe_inode_info *pipe)
75 {
76 	if (pipe->files)
77 		mutex_unlock(&pipe->mutex);
78 }
79 EXPORT_SYMBOL(pipe_unlock);
80 
__pipe_lock(struct pipe_inode_info * pipe)81 static inline void __pipe_lock(struct pipe_inode_info *pipe)
82 {
83 	mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
84 }
85 
__pipe_unlock(struct pipe_inode_info * pipe)86 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
87 {
88 	mutex_unlock(&pipe->mutex);
89 }
90 
pipe_double_lock(struct pipe_inode_info * pipe1,struct pipe_inode_info * pipe2)91 void pipe_double_lock(struct pipe_inode_info *pipe1,
92 		      struct pipe_inode_info *pipe2)
93 {
94 	BUG_ON(pipe1 == pipe2);
95 
96 	if (pipe1 < pipe2) {
97 		pipe_lock_nested(pipe1, I_MUTEX_PARENT);
98 		pipe_lock_nested(pipe2, I_MUTEX_CHILD);
99 	} else {
100 		pipe_lock_nested(pipe2, I_MUTEX_PARENT);
101 		pipe_lock_nested(pipe1, I_MUTEX_CHILD);
102 	}
103 }
104 
105 /* Drop the inode semaphore and wait for a pipe event, atomically */
pipe_wait(struct pipe_inode_info * pipe)106 void pipe_wait(struct pipe_inode_info *pipe)
107 {
108 	DEFINE_WAIT(wait);
109 
110 	/*
111 	 * Pipes are system-local resources, so sleeping on them
112 	 * is considered a noninteractive wait:
113 	 */
114 	prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
115 	pipe_unlock(pipe);
116 	schedule();
117 	finish_wait(&pipe->wait, &wait);
118 	pipe_lock(pipe);
119 }
120 
anon_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)121 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
122 				  struct pipe_buffer *buf)
123 {
124 	struct page *page = buf->page;
125 
126 	/*
127 	 * If nobody else uses this page, and we don't already have a
128 	 * temporary page, let's keep track of it as a one-deep
129 	 * allocation cache. (Otherwise just release our reference to it)
130 	 */
131 	if (page_count(page) == 1 && !pipe->tmp_page)
132 		pipe->tmp_page = page;
133 	else
134 		put_page(page);
135 }
136 
anon_pipe_buf_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)137 static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
138 			       struct pipe_buffer *buf)
139 {
140 	struct page *page = buf->page;
141 
142 	if (page_count(page) == 1) {
143 		if (memcg_kmem_enabled())
144 			memcg_kmem_uncharge(page, 0);
145 		__SetPageLocked(page);
146 		return 0;
147 	}
148 	return 1;
149 }
150 
151 /**
152  * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
153  * @pipe:	the pipe that the buffer belongs to
154  * @buf:	the buffer to attempt to steal
155  *
156  * Description:
157  *	This function attempts to steal the &struct page attached to
158  *	@buf. If successful, this function returns 0 and returns with
159  *	the page locked. The caller may then reuse the page for whatever
160  *	he wishes; the typical use is insertion into a different file
161  *	page cache.
162  */
generic_pipe_buf_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)163 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
164 			   struct pipe_buffer *buf)
165 {
166 	struct page *page = buf->page;
167 
168 	/*
169 	 * A reference of one is golden, that means that the owner of this
170 	 * page is the only one holding a reference to it. lock the page
171 	 * and return OK.
172 	 */
173 	if (page_count(page) == 1) {
174 		lock_page(page);
175 		return 0;
176 	}
177 
178 	return 1;
179 }
180 EXPORT_SYMBOL(generic_pipe_buf_steal);
181 
182 /**
183  * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
184  * @pipe:	the pipe that the buffer belongs to
185  * @buf:	the buffer to get a reference to
186  *
187  * Description:
188  *	This function grabs an extra reference to @buf. It's used in
189  *	in the tee() system call, when we duplicate the buffers in one
190  *	pipe into another.
191  */
generic_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)192 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
193 {
194 	return try_get_page(buf->page);
195 }
196 EXPORT_SYMBOL(generic_pipe_buf_get);
197 
198 /**
199  * generic_pipe_buf_confirm - verify contents of the pipe buffer
200  * @info:	the pipe that the buffer belongs to
201  * @buf:	the buffer to confirm
202  *
203  * Description:
204  *	This function does nothing, because the generic pipe code uses
205  *	pages that are always good when inserted into the pipe.
206  */
generic_pipe_buf_confirm(struct pipe_inode_info * info,struct pipe_buffer * buf)207 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
208 			     struct pipe_buffer *buf)
209 {
210 	return 0;
211 }
212 EXPORT_SYMBOL(generic_pipe_buf_confirm);
213 
214 /**
215  * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
216  * @pipe:	the pipe that the buffer belongs to
217  * @buf:	the buffer to put a reference to
218  *
219  * Description:
220  *	This function releases a reference to @buf.
221  */
generic_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)222 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
223 			      struct pipe_buffer *buf)
224 {
225 	put_page(buf->page);
226 }
227 EXPORT_SYMBOL(generic_pipe_buf_release);
228 
229 static const struct pipe_buf_operations anon_pipe_buf_ops = {
230 	.can_merge = 1,
231 	.confirm = generic_pipe_buf_confirm,
232 	.release = anon_pipe_buf_release,
233 	.steal = anon_pipe_buf_steal,
234 	.get = generic_pipe_buf_get,
235 };
236 
237 static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
238 	.can_merge = 0,
239 	.confirm = generic_pipe_buf_confirm,
240 	.release = anon_pipe_buf_release,
241 	.steal = anon_pipe_buf_steal,
242 	.get = generic_pipe_buf_get,
243 };
244 
245 static const struct pipe_buf_operations packet_pipe_buf_ops = {
246 	.can_merge = 0,
247 	.confirm = generic_pipe_buf_confirm,
248 	.release = anon_pipe_buf_release,
249 	.steal = anon_pipe_buf_steal,
250 	.get = generic_pipe_buf_get,
251 };
252 
pipe_buf_mark_unmergeable(struct pipe_buffer * buf)253 void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
254 {
255 	if (buf->ops == &anon_pipe_buf_ops)
256 		buf->ops = &anon_pipe_buf_nomerge_ops;
257 }
258 
259 static ssize_t
pipe_read(struct kiocb * iocb,struct iov_iter * to)260 pipe_read(struct kiocb *iocb, struct iov_iter *to)
261 {
262 	size_t total_len = iov_iter_count(to);
263 	struct file *filp = iocb->ki_filp;
264 	struct pipe_inode_info *pipe = filp->private_data;
265 	int do_wakeup;
266 	ssize_t ret;
267 
268 	/* Null read succeeds. */
269 	if (unlikely(total_len == 0))
270 		return 0;
271 
272 	do_wakeup = 0;
273 	ret = 0;
274 	__pipe_lock(pipe);
275 	for (;;) {
276 		int bufs = pipe->nrbufs;
277 		if (bufs) {
278 			int curbuf = pipe->curbuf;
279 			struct pipe_buffer *buf = pipe->bufs + curbuf;
280 			size_t chars = buf->len;
281 			size_t written;
282 			int error;
283 
284 			if (chars > total_len)
285 				chars = total_len;
286 
287 			error = pipe_buf_confirm(pipe, buf);
288 			if (error) {
289 				if (!ret)
290 					ret = error;
291 				break;
292 			}
293 
294 			written = copy_page_to_iter(buf->page, buf->offset, chars, to);
295 			if (unlikely(written < chars)) {
296 				if (!ret)
297 					ret = -EFAULT;
298 				break;
299 			}
300 			ret += chars;
301 			buf->offset += chars;
302 			buf->len -= chars;
303 
304 			/* Was it a packet buffer? Clean up and exit */
305 			if (buf->flags & PIPE_BUF_FLAG_PACKET) {
306 				total_len = chars;
307 				buf->len = 0;
308 			}
309 
310 			if (!buf->len) {
311 				pipe_buf_release(pipe, buf);
312 				curbuf = (curbuf + 1) & (pipe->buffers - 1);
313 				pipe->curbuf = curbuf;
314 				pipe->nrbufs = --bufs;
315 				do_wakeup = 1;
316 			}
317 			total_len -= chars;
318 			if (!total_len)
319 				break;	/* common path: read succeeded */
320 		}
321 		if (bufs)	/* More to do? */
322 			continue;
323 		if (!pipe->writers)
324 			break;
325 		if (!pipe->waiting_writers) {
326 			/* syscall merging: Usually we must not sleep
327 			 * if O_NONBLOCK is set, or if we got some data.
328 			 * But if a writer sleeps in kernel space, then
329 			 * we can wait for that data without violating POSIX.
330 			 */
331 			if (ret)
332 				break;
333 			if (filp->f_flags & O_NONBLOCK) {
334 				ret = -EAGAIN;
335 				break;
336 			}
337 		}
338 		if (signal_pending(current)) {
339 			if (!ret)
340 				ret = -ERESTARTSYS;
341 			break;
342 		}
343 		if (do_wakeup) {
344 			wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
345  			kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
346 		}
347 		pipe_wait(pipe);
348 	}
349 	__pipe_unlock(pipe);
350 
351 	/* Signal writers asynchronously that there is more room. */
352 	if (do_wakeup) {
353 		wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
354 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
355 	}
356 	if (ret > 0)
357 		file_accessed(filp);
358 	return ret;
359 }
360 
is_packetized(struct file * file)361 static inline int is_packetized(struct file *file)
362 {
363 	return (file->f_flags & O_DIRECT) != 0;
364 }
365 
366 static ssize_t
pipe_write(struct kiocb * iocb,struct iov_iter * from)367 pipe_write(struct kiocb *iocb, struct iov_iter *from)
368 {
369 	struct file *filp = iocb->ki_filp;
370 	struct pipe_inode_info *pipe = filp->private_data;
371 	ssize_t ret = 0;
372 	int do_wakeup = 0;
373 	size_t total_len = iov_iter_count(from);
374 	ssize_t chars;
375 
376 	/* Null write succeeds. */
377 	if (unlikely(total_len == 0))
378 		return 0;
379 
380 	__pipe_lock(pipe);
381 
382 	if (!pipe->readers) {
383 		send_sig(SIGPIPE, current, 0);
384 		ret = -EPIPE;
385 		goto out;
386 	}
387 
388 	/* We try to merge small writes */
389 	chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
390 	if (pipe->nrbufs && chars != 0) {
391 		int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
392 							(pipe->buffers - 1);
393 		struct pipe_buffer *buf = pipe->bufs + lastbuf;
394 		int offset = buf->offset + buf->len;
395 
396 		if (buf->ops->can_merge && offset + chars <= PAGE_SIZE) {
397 			ret = pipe_buf_confirm(pipe, buf);
398 			if (ret)
399 				goto out;
400 
401 			ret = copy_page_from_iter(buf->page, offset, chars, from);
402 			if (unlikely(ret < chars)) {
403 				ret = -EFAULT;
404 				goto out;
405 			}
406 			do_wakeup = 1;
407 			buf->len += ret;
408 			if (!iov_iter_count(from))
409 				goto out;
410 		}
411 	}
412 
413 	for (;;) {
414 		int bufs;
415 
416 		if (!pipe->readers) {
417 			send_sig(SIGPIPE, current, 0);
418 			if (!ret)
419 				ret = -EPIPE;
420 			break;
421 		}
422 		bufs = pipe->nrbufs;
423 		if (bufs < pipe->buffers) {
424 			int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
425 			struct pipe_buffer *buf = pipe->bufs + newbuf;
426 			struct page *page = pipe->tmp_page;
427 			int copied;
428 
429 			if (!page) {
430 				page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
431 				if (unlikely(!page)) {
432 					ret = ret ? : -ENOMEM;
433 					break;
434 				}
435 				pipe->tmp_page = page;
436 			}
437 			/* Always wake up, even if the copy fails. Otherwise
438 			 * we lock up (O_NONBLOCK-)readers that sleep due to
439 			 * syscall merging.
440 			 * FIXME! Is this really true?
441 			 */
442 			do_wakeup = 1;
443 			copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
444 			if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
445 				if (!ret)
446 					ret = -EFAULT;
447 				break;
448 			}
449 			ret += copied;
450 
451 			/* Insert it into the buffer array */
452 			buf->page = page;
453 			buf->ops = &anon_pipe_buf_ops;
454 			buf->offset = 0;
455 			buf->len = copied;
456 			buf->flags = 0;
457 			if (is_packetized(filp)) {
458 				buf->ops = &packet_pipe_buf_ops;
459 				buf->flags = PIPE_BUF_FLAG_PACKET;
460 			}
461 			pipe->nrbufs = ++bufs;
462 			pipe->tmp_page = NULL;
463 
464 			if (!iov_iter_count(from))
465 				break;
466 		}
467 		if (bufs < pipe->buffers)
468 			continue;
469 		if (filp->f_flags & O_NONBLOCK) {
470 			if (!ret)
471 				ret = -EAGAIN;
472 			break;
473 		}
474 		if (signal_pending(current)) {
475 			if (!ret)
476 				ret = -ERESTARTSYS;
477 			break;
478 		}
479 		if (do_wakeup) {
480 			wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
481 			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
482 			do_wakeup = 0;
483 		}
484 		pipe->waiting_writers++;
485 		pipe_wait(pipe);
486 		pipe->waiting_writers--;
487 	}
488 out:
489 	__pipe_unlock(pipe);
490 	if (do_wakeup) {
491 		wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
492 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
493 	}
494 	if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
495 		int err = file_update_time(filp);
496 		if (err)
497 			ret = err;
498 		sb_end_write(file_inode(filp)->i_sb);
499 	}
500 	return ret;
501 }
502 
pipe_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)503 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
504 {
505 	struct pipe_inode_info *pipe = filp->private_data;
506 	int count, buf, nrbufs;
507 
508 	switch (cmd) {
509 		case FIONREAD:
510 			__pipe_lock(pipe);
511 			count = 0;
512 			buf = pipe->curbuf;
513 			nrbufs = pipe->nrbufs;
514 			while (--nrbufs >= 0) {
515 				count += pipe->bufs[buf].len;
516 				buf = (buf+1) & (pipe->buffers - 1);
517 			}
518 			__pipe_unlock(pipe);
519 
520 			return put_user(count, (int __user *)arg);
521 		default:
522 			return -ENOIOCTLCMD;
523 	}
524 }
525 
526 /* No kernel lock held - fine */
527 static __poll_t
pipe_poll(struct file * filp,poll_table * wait)528 pipe_poll(struct file *filp, poll_table *wait)
529 {
530 	__poll_t mask;
531 	struct pipe_inode_info *pipe = filp->private_data;
532 	int nrbufs;
533 
534 	poll_wait(filp, &pipe->wait, wait);
535 
536 	/* Reading only -- no need for acquiring the semaphore.  */
537 	nrbufs = pipe->nrbufs;
538 	mask = 0;
539 	if (filp->f_mode & FMODE_READ) {
540 		mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
541 		if (!pipe->writers && filp->f_version != pipe->w_counter)
542 			mask |= EPOLLHUP;
543 	}
544 
545 	if (filp->f_mode & FMODE_WRITE) {
546 		mask |= (nrbufs < pipe->buffers) ? EPOLLOUT | EPOLLWRNORM : 0;
547 		/*
548 		 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
549 		 * behave exactly like pipes for poll().
550 		 */
551 		if (!pipe->readers)
552 			mask |= EPOLLERR;
553 	}
554 
555 	return mask;
556 }
557 
put_pipe_info(struct inode * inode,struct pipe_inode_info * pipe)558 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
559 {
560 	int kill = 0;
561 
562 	spin_lock(&inode->i_lock);
563 	if (!--pipe->files) {
564 		inode->i_pipe = NULL;
565 		kill = 1;
566 	}
567 	spin_unlock(&inode->i_lock);
568 
569 	if (kill)
570 		free_pipe_info(pipe);
571 }
572 
573 static int
pipe_release(struct inode * inode,struct file * file)574 pipe_release(struct inode *inode, struct file *file)
575 {
576 	struct pipe_inode_info *pipe = file->private_data;
577 
578 	__pipe_lock(pipe);
579 	if (file->f_mode & FMODE_READ)
580 		pipe->readers--;
581 	if (file->f_mode & FMODE_WRITE)
582 		pipe->writers--;
583 
584 	if (pipe->readers || pipe->writers) {
585 		wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
586 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
587 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
588 	}
589 	__pipe_unlock(pipe);
590 
591 	put_pipe_info(inode, pipe);
592 	return 0;
593 }
594 
595 static int
pipe_fasync(int fd,struct file * filp,int on)596 pipe_fasync(int fd, struct file *filp, int on)
597 {
598 	struct pipe_inode_info *pipe = filp->private_data;
599 	int retval = 0;
600 
601 	__pipe_lock(pipe);
602 	if (filp->f_mode & FMODE_READ)
603 		retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
604 	if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
605 		retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
606 		if (retval < 0 && (filp->f_mode & FMODE_READ))
607 			/* this can happen only if on == T */
608 			fasync_helper(-1, filp, 0, &pipe->fasync_readers);
609 	}
610 	__pipe_unlock(pipe);
611 	return retval;
612 }
613 
account_pipe_buffers(struct user_struct * user,unsigned long old,unsigned long new)614 static unsigned long account_pipe_buffers(struct user_struct *user,
615                                  unsigned long old, unsigned long new)
616 {
617 	return atomic_long_add_return(new - old, &user->pipe_bufs);
618 }
619 
too_many_pipe_buffers_soft(unsigned long user_bufs)620 static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
621 {
622 	unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
623 
624 	return soft_limit && user_bufs > soft_limit;
625 }
626 
too_many_pipe_buffers_hard(unsigned long user_bufs)627 static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
628 {
629 	unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
630 
631 	return hard_limit && user_bufs > hard_limit;
632 }
633 
is_unprivileged_user(void)634 static bool is_unprivileged_user(void)
635 {
636 	return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
637 }
638 
alloc_pipe_info(void)639 struct pipe_inode_info *alloc_pipe_info(void)
640 {
641 	struct pipe_inode_info *pipe;
642 	unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
643 	struct user_struct *user = get_current_user();
644 	unsigned long user_bufs;
645 	unsigned int max_size = READ_ONCE(pipe_max_size);
646 
647 	pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
648 	if (pipe == NULL)
649 		goto out_free_uid;
650 
651 	if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
652 		pipe_bufs = max_size >> PAGE_SHIFT;
653 
654 	user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
655 
656 	if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
657 		user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
658 		pipe_bufs = 1;
659 	}
660 
661 	if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
662 		goto out_revert_acct;
663 
664 	pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
665 			     GFP_KERNEL_ACCOUNT);
666 
667 	if (pipe->bufs) {
668 		init_waitqueue_head(&pipe->wait);
669 		pipe->r_counter = pipe->w_counter = 1;
670 		pipe->buffers = pipe_bufs;
671 		pipe->user = user;
672 		mutex_init(&pipe->mutex);
673 		return pipe;
674 	}
675 
676 out_revert_acct:
677 	(void) account_pipe_buffers(user, pipe_bufs, 0);
678 	kfree(pipe);
679 out_free_uid:
680 	free_uid(user);
681 	return NULL;
682 }
683 
free_pipe_info(struct pipe_inode_info * pipe)684 void free_pipe_info(struct pipe_inode_info *pipe)
685 {
686 	int i;
687 
688 	(void) account_pipe_buffers(pipe->user, pipe->buffers, 0);
689 	free_uid(pipe->user);
690 	for (i = 0; i < pipe->buffers; i++) {
691 		struct pipe_buffer *buf = pipe->bufs + i;
692 		if (buf->ops)
693 			pipe_buf_release(pipe, buf);
694 	}
695 	if (pipe->tmp_page)
696 		__free_page(pipe->tmp_page);
697 	kfree(pipe->bufs);
698 	kfree(pipe);
699 }
700 
701 static struct vfsmount *pipe_mnt __read_mostly;
702 
703 /*
704  * pipefs_dname() is called from d_path().
705  */
pipefs_dname(struct dentry * dentry,char * buffer,int buflen)706 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
707 {
708 	return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
709 				d_inode(dentry)->i_ino);
710 }
711 
712 static const struct dentry_operations pipefs_dentry_operations = {
713 	.d_dname	= pipefs_dname,
714 };
715 
get_pipe_inode(void)716 static struct inode * get_pipe_inode(void)
717 {
718 	struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
719 	struct pipe_inode_info *pipe;
720 
721 	if (!inode)
722 		goto fail_inode;
723 
724 	inode->i_ino = get_next_ino();
725 
726 	pipe = alloc_pipe_info();
727 	if (!pipe)
728 		goto fail_iput;
729 
730 	inode->i_pipe = pipe;
731 	pipe->files = 2;
732 	pipe->readers = pipe->writers = 1;
733 	inode->i_fop = &pipefifo_fops;
734 
735 	/*
736 	 * Mark the inode dirty from the very beginning,
737 	 * that way it will never be moved to the dirty
738 	 * list because "mark_inode_dirty()" will think
739 	 * that it already _is_ on the dirty list.
740 	 */
741 	inode->i_state = I_DIRTY;
742 	inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
743 	inode->i_uid = current_fsuid();
744 	inode->i_gid = current_fsgid();
745 	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
746 
747 	return inode;
748 
749 fail_iput:
750 	iput(inode);
751 
752 fail_inode:
753 	return NULL;
754 }
755 
create_pipe_files(struct file ** res,int flags)756 int create_pipe_files(struct file **res, int flags)
757 {
758 	struct inode *inode = get_pipe_inode();
759 	struct file *f;
760 
761 	if (!inode)
762 		return -ENFILE;
763 
764 	f = alloc_file_pseudo(inode, pipe_mnt, "",
765 				O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
766 				&pipefifo_fops);
767 	if (IS_ERR(f)) {
768 		free_pipe_info(inode->i_pipe);
769 		iput(inode);
770 		return PTR_ERR(f);
771 	}
772 
773 	f->private_data = inode->i_pipe;
774 
775 	res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
776 				  &pipefifo_fops);
777 	if (IS_ERR(res[0])) {
778 		put_pipe_info(inode, inode->i_pipe);
779 		fput(f);
780 		return PTR_ERR(res[0]);
781 	}
782 	res[0]->private_data = inode->i_pipe;
783 	res[1] = f;
784 	return 0;
785 }
786 
__do_pipe_flags(int * fd,struct file ** files,int flags)787 static int __do_pipe_flags(int *fd, struct file **files, int flags)
788 {
789 	int error;
790 	int fdw, fdr;
791 
792 	if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
793 		return -EINVAL;
794 
795 	error = create_pipe_files(files, flags);
796 	if (error)
797 		return error;
798 
799 	error = get_unused_fd_flags(flags);
800 	if (error < 0)
801 		goto err_read_pipe;
802 	fdr = error;
803 
804 	error = get_unused_fd_flags(flags);
805 	if (error < 0)
806 		goto err_fdr;
807 	fdw = error;
808 
809 	audit_fd_pair(fdr, fdw);
810 	fd[0] = fdr;
811 	fd[1] = fdw;
812 	return 0;
813 
814  err_fdr:
815 	put_unused_fd(fdr);
816  err_read_pipe:
817 	fput(files[0]);
818 	fput(files[1]);
819 	return error;
820 }
821 
do_pipe_flags(int * fd,int flags)822 int do_pipe_flags(int *fd, int flags)
823 {
824 	struct file *files[2];
825 	int error = __do_pipe_flags(fd, files, flags);
826 	if (!error) {
827 		fd_install(fd[0], files[0]);
828 		fd_install(fd[1], files[1]);
829 	}
830 	return error;
831 }
832 
833 /*
834  * sys_pipe() is the normal C calling standard for creating
835  * a pipe. It's not the way Unix traditionally does this, though.
836  */
do_pipe2(int __user * fildes,int flags)837 static int do_pipe2(int __user *fildes, int flags)
838 {
839 	struct file *files[2];
840 	int fd[2];
841 	int error;
842 
843 	error = __do_pipe_flags(fd, files, flags);
844 	if (!error) {
845 		if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
846 			fput(files[0]);
847 			fput(files[1]);
848 			put_unused_fd(fd[0]);
849 			put_unused_fd(fd[1]);
850 			error = -EFAULT;
851 		} else {
852 			fd_install(fd[0], files[0]);
853 			fd_install(fd[1], files[1]);
854 		}
855 	}
856 	return error;
857 }
858 
SYSCALL_DEFINE2(pipe2,int __user *,fildes,int,flags)859 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
860 {
861 	return do_pipe2(fildes, flags);
862 }
863 
SYSCALL_DEFINE1(pipe,int __user *,fildes)864 SYSCALL_DEFINE1(pipe, int __user *, fildes)
865 {
866 	return do_pipe2(fildes, 0);
867 }
868 
wait_for_partner(struct pipe_inode_info * pipe,unsigned int * cnt)869 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
870 {
871 	int cur = *cnt;
872 
873 	while (cur == *cnt) {
874 		pipe_wait(pipe);
875 		if (signal_pending(current))
876 			break;
877 	}
878 	return cur == *cnt ? -ERESTARTSYS : 0;
879 }
880 
wake_up_partner(struct pipe_inode_info * pipe)881 static void wake_up_partner(struct pipe_inode_info *pipe)
882 {
883 	wake_up_interruptible(&pipe->wait);
884 }
885 
fifo_open(struct inode * inode,struct file * filp)886 static int fifo_open(struct inode *inode, struct file *filp)
887 {
888 	struct pipe_inode_info *pipe;
889 	bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
890 	int ret;
891 
892 	filp->f_version = 0;
893 
894 	spin_lock(&inode->i_lock);
895 	if (inode->i_pipe) {
896 		pipe = inode->i_pipe;
897 		pipe->files++;
898 		spin_unlock(&inode->i_lock);
899 	} else {
900 		spin_unlock(&inode->i_lock);
901 		pipe = alloc_pipe_info();
902 		if (!pipe)
903 			return -ENOMEM;
904 		pipe->files = 1;
905 		spin_lock(&inode->i_lock);
906 		if (unlikely(inode->i_pipe)) {
907 			inode->i_pipe->files++;
908 			spin_unlock(&inode->i_lock);
909 			free_pipe_info(pipe);
910 			pipe = inode->i_pipe;
911 		} else {
912 			inode->i_pipe = pipe;
913 			spin_unlock(&inode->i_lock);
914 		}
915 	}
916 	filp->private_data = pipe;
917 	/* OK, we have a pipe and it's pinned down */
918 
919 	__pipe_lock(pipe);
920 
921 	/* We can only do regular read/write on fifos */
922 	filp->f_mode &= (FMODE_READ | FMODE_WRITE);
923 
924 	switch (filp->f_mode) {
925 	case FMODE_READ:
926 	/*
927 	 *  O_RDONLY
928 	 *  POSIX.1 says that O_NONBLOCK means return with the FIFO
929 	 *  opened, even when there is no process writing the FIFO.
930 	 */
931 		pipe->r_counter++;
932 		if (pipe->readers++ == 0)
933 			wake_up_partner(pipe);
934 
935 		if (!is_pipe && !pipe->writers) {
936 			if ((filp->f_flags & O_NONBLOCK)) {
937 				/* suppress EPOLLHUP until we have
938 				 * seen a writer */
939 				filp->f_version = pipe->w_counter;
940 			} else {
941 				if (wait_for_partner(pipe, &pipe->w_counter))
942 					goto err_rd;
943 			}
944 		}
945 		break;
946 
947 	case FMODE_WRITE:
948 	/*
949 	 *  O_WRONLY
950 	 *  POSIX.1 says that O_NONBLOCK means return -1 with
951 	 *  errno=ENXIO when there is no process reading the FIFO.
952 	 */
953 		ret = -ENXIO;
954 		if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
955 			goto err;
956 
957 		pipe->w_counter++;
958 		if (!pipe->writers++)
959 			wake_up_partner(pipe);
960 
961 		if (!is_pipe && !pipe->readers) {
962 			if (wait_for_partner(pipe, &pipe->r_counter))
963 				goto err_wr;
964 		}
965 		break;
966 
967 	case FMODE_READ | FMODE_WRITE:
968 	/*
969 	 *  O_RDWR
970 	 *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
971 	 *  This implementation will NEVER block on a O_RDWR open, since
972 	 *  the process can at least talk to itself.
973 	 */
974 
975 		pipe->readers++;
976 		pipe->writers++;
977 		pipe->r_counter++;
978 		pipe->w_counter++;
979 		if (pipe->readers == 1 || pipe->writers == 1)
980 			wake_up_partner(pipe);
981 		break;
982 
983 	default:
984 		ret = -EINVAL;
985 		goto err;
986 	}
987 
988 	/* Ok! */
989 	__pipe_unlock(pipe);
990 	return 0;
991 
992 err_rd:
993 	if (!--pipe->readers)
994 		wake_up_interruptible(&pipe->wait);
995 	ret = -ERESTARTSYS;
996 	goto err;
997 
998 err_wr:
999 	if (!--pipe->writers)
1000 		wake_up_interruptible(&pipe->wait);
1001 	ret = -ERESTARTSYS;
1002 	goto err;
1003 
1004 err:
1005 	__pipe_unlock(pipe);
1006 
1007 	put_pipe_info(inode, pipe);
1008 	return ret;
1009 }
1010 
1011 const struct file_operations pipefifo_fops = {
1012 	.open		= fifo_open,
1013 	.llseek		= no_llseek,
1014 	.read_iter	= pipe_read,
1015 	.write_iter	= pipe_write,
1016 	.poll		= pipe_poll,
1017 	.unlocked_ioctl	= pipe_ioctl,
1018 	.release	= pipe_release,
1019 	.fasync		= pipe_fasync,
1020 };
1021 
1022 /*
1023  * Currently we rely on the pipe array holding a power-of-2 number
1024  * of pages. Returns 0 on error.
1025  */
round_pipe_size(unsigned long size)1026 unsigned int round_pipe_size(unsigned long size)
1027 {
1028 	if (size > (1U << 31))
1029 		return 0;
1030 
1031 	/* Minimum pipe size, as required by POSIX */
1032 	if (size < PAGE_SIZE)
1033 		return PAGE_SIZE;
1034 
1035 	return roundup_pow_of_two(size);
1036 }
1037 
1038 /*
1039  * Allocate a new array of pipe buffers and copy the info over. Returns the
1040  * pipe size if successful, or return -ERROR on error.
1041  */
pipe_set_size(struct pipe_inode_info * pipe,unsigned long arg)1042 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1043 {
1044 	struct pipe_buffer *bufs;
1045 	unsigned int size, nr_pages;
1046 	unsigned long user_bufs;
1047 	long ret = 0;
1048 
1049 	size = round_pipe_size(arg);
1050 	nr_pages = size >> PAGE_SHIFT;
1051 
1052 	if (!nr_pages)
1053 		return -EINVAL;
1054 
1055 	/*
1056 	 * If trying to increase the pipe capacity, check that an
1057 	 * unprivileged user is not trying to exceed various limits
1058 	 * (soft limit check here, hard limit check just below).
1059 	 * Decreasing the pipe capacity is always permitted, even
1060 	 * if the user is currently over a limit.
1061 	 */
1062 	if (nr_pages > pipe->buffers &&
1063 			size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1064 		return -EPERM;
1065 
1066 	user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages);
1067 
1068 	if (nr_pages > pipe->buffers &&
1069 			(too_many_pipe_buffers_hard(user_bufs) ||
1070 			 too_many_pipe_buffers_soft(user_bufs)) &&
1071 			is_unprivileged_user()) {
1072 		ret = -EPERM;
1073 		goto out_revert_acct;
1074 	}
1075 
1076 	/*
1077 	 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1078 	 * expect a lot of shrink+grow operations, just free and allocate
1079 	 * again like we would do for growing. If the pipe currently
1080 	 * contains more buffers than arg, then return busy.
1081 	 */
1082 	if (nr_pages < pipe->nrbufs) {
1083 		ret = -EBUSY;
1084 		goto out_revert_acct;
1085 	}
1086 
1087 	bufs = kcalloc(nr_pages, sizeof(*bufs),
1088 		       GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1089 	if (unlikely(!bufs)) {
1090 		ret = -ENOMEM;
1091 		goto out_revert_acct;
1092 	}
1093 
1094 	/*
1095 	 * The pipe array wraps around, so just start the new one at zero
1096 	 * and adjust the indexes.
1097 	 */
1098 	if (pipe->nrbufs) {
1099 		unsigned int tail;
1100 		unsigned int head;
1101 
1102 		tail = pipe->curbuf + pipe->nrbufs;
1103 		if (tail < pipe->buffers)
1104 			tail = 0;
1105 		else
1106 			tail &= (pipe->buffers - 1);
1107 
1108 		head = pipe->nrbufs - tail;
1109 		if (head)
1110 			memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1111 		if (tail)
1112 			memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1113 	}
1114 
1115 	pipe->curbuf = 0;
1116 	kfree(pipe->bufs);
1117 	pipe->bufs = bufs;
1118 	pipe->buffers = nr_pages;
1119 	return nr_pages * PAGE_SIZE;
1120 
1121 out_revert_acct:
1122 	(void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers);
1123 	return ret;
1124 }
1125 
1126 /*
1127  * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1128  * location, so checking ->i_pipe is not enough to verify that this is a
1129  * pipe.
1130  */
get_pipe_info(struct file * file)1131 struct pipe_inode_info *get_pipe_info(struct file *file)
1132 {
1133 	return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1134 }
1135 
pipe_fcntl(struct file * file,unsigned int cmd,unsigned long arg)1136 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1137 {
1138 	struct pipe_inode_info *pipe;
1139 	long ret;
1140 
1141 	pipe = get_pipe_info(file);
1142 	if (!pipe)
1143 		return -EBADF;
1144 
1145 	__pipe_lock(pipe);
1146 
1147 	switch (cmd) {
1148 	case F_SETPIPE_SZ:
1149 		ret = pipe_set_size(pipe, arg);
1150 		break;
1151 	case F_GETPIPE_SZ:
1152 		ret = pipe->buffers * PAGE_SIZE;
1153 		break;
1154 	default:
1155 		ret = -EINVAL;
1156 		break;
1157 	}
1158 
1159 	__pipe_unlock(pipe);
1160 	return ret;
1161 }
1162 
1163 static const struct super_operations pipefs_ops = {
1164 	.destroy_inode = free_inode_nonrcu,
1165 	.statfs = simple_statfs,
1166 };
1167 
1168 /*
1169  * pipefs should _never_ be mounted by userland - too much of security hassle,
1170  * no real gain from having the whole whorehouse mounted. So we don't need
1171  * any operations on the root directory. However, we need a non-trivial
1172  * d_name - pipe: will go nicely and kill the special-casing in procfs.
1173  */
pipefs_mount(struct file_system_type * fs_type,int flags,const char * dev_name,void * data)1174 static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1175 			 int flags, const char *dev_name, void *data)
1176 {
1177 	return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1178 			&pipefs_dentry_operations, PIPEFS_MAGIC);
1179 }
1180 
1181 static struct file_system_type pipe_fs_type = {
1182 	.name		= "pipefs",
1183 	.mount		= pipefs_mount,
1184 	.kill_sb	= kill_anon_super,
1185 };
1186 
init_pipe_fs(void)1187 static int __init init_pipe_fs(void)
1188 {
1189 	int err = register_filesystem(&pipe_fs_type);
1190 
1191 	if (!err) {
1192 		pipe_mnt = kern_mount(&pipe_fs_type);
1193 		if (IS_ERR(pipe_mnt)) {
1194 			err = PTR_ERR(pipe_mnt);
1195 			unregister_filesystem(&pipe_fs_type);
1196 		}
1197 	}
1198 	return err;
1199 }
1200 
1201 fs_initcall(init_pipe_fs);
1202