• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/pipe.c
4  *
5  *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/magic.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/uio.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/audit.h>
24 #include <linux/syscalls.h>
25 #include <linux/fcntl.h>
26 #include <linux/memcontrol.h>
27 #include <linux/watch_queue.h>
28 
29 #include <linux/uaccess.h>
30 #include <asm/ioctls.h>
31 
32 #include "internal.h"
33 
34 /*
35  * New pipe buffers will be restricted to this size while the user is exceeding
36  * their pipe buffer quota. The general pipe use case needs at least two
37  * buffers: one for data yet to be read, and one for new data. If this is less
38  * than two, then a write to a non-empty pipe may block even if the pipe is not
39  * full. This can occur with GNU make jobserver or similar uses of pipes as
40  * semaphores: multiple processes may be waiting to write tokens back to the
41  * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
42  *
43  * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
44  * own risk, namely: pipe writes to non-full pipes may block until the pipe is
45  * emptied.
46  */
47 #define PIPE_MIN_DEF_BUFFERS 2
48 
49 /*
50  * The max size that a non-root user is allowed to grow the pipe. Can
51  * be set by root in /proc/sys/fs/pipe-max-size
52  */
53 unsigned int pipe_max_size = 1048576;
54 
55 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
56  * matches default values.
57  */
58 unsigned long pipe_user_pages_hard;
59 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
60 
61 /*
62  * We use head and tail indices that aren't masked off, except at the point of
63  * dereference, but rather they're allowed to wrap naturally.  This means there
64  * isn't a dead spot in the buffer, but the ring has to be a power of two and
65  * <= 2^31.
66  * -- David Howells 2019-09-23.
67  *
68  * Reads with count = 0 should always return 0.
69  * -- Julian Bradfield 1999-06-07.
70  *
71  * FIFOs and Pipes now generate SIGIO for both readers and writers.
72  * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
73  *
74  * pipe_read & write cleanup
75  * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
76  */
77 
pipe_lock_nested(struct pipe_inode_info * pipe,int subclass)78 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
79 {
80 	if (pipe->files)
81 		mutex_lock_nested(&pipe->mutex, subclass);
82 }
83 
pipe_lock(struct pipe_inode_info * pipe)84 void pipe_lock(struct pipe_inode_info *pipe)
85 {
86 	/*
87 	 * pipe_lock() nests non-pipe inode locks (for writing to a file)
88 	 */
89 	pipe_lock_nested(pipe, I_MUTEX_PARENT);
90 }
91 EXPORT_SYMBOL(pipe_lock);
92 
pipe_unlock(struct pipe_inode_info * pipe)93 void pipe_unlock(struct pipe_inode_info *pipe)
94 {
95 	if (pipe->files)
96 		mutex_unlock(&pipe->mutex);
97 }
98 EXPORT_SYMBOL(pipe_unlock);
99 
__pipe_lock(struct pipe_inode_info * pipe)100 static inline void __pipe_lock(struct pipe_inode_info *pipe)
101 {
102 	mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
103 }
104 
__pipe_unlock(struct pipe_inode_info * pipe)105 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
106 {
107 	mutex_unlock(&pipe->mutex);
108 }
109 
pipe_double_lock(struct pipe_inode_info * pipe1,struct pipe_inode_info * pipe2)110 void pipe_double_lock(struct pipe_inode_info *pipe1,
111 		      struct pipe_inode_info *pipe2)
112 {
113 	BUG_ON(pipe1 == pipe2);
114 
115 	if (pipe1 < pipe2) {
116 		pipe_lock_nested(pipe1, I_MUTEX_PARENT);
117 		pipe_lock_nested(pipe2, I_MUTEX_CHILD);
118 	} else {
119 		pipe_lock_nested(pipe2, I_MUTEX_PARENT);
120 		pipe_lock_nested(pipe1, I_MUTEX_CHILD);
121 	}
122 }
123 
anon_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)124 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
125 				  struct pipe_buffer *buf)
126 {
127 	struct page *page = buf->page;
128 
129 	/*
130 	 * If nobody else uses this page, and we don't already have a
131 	 * temporary page, let's keep track of it as a one-deep
132 	 * allocation cache. (Otherwise just release our reference to it)
133 	 */
134 	if (page_count(page) == 1 && !pipe->tmp_page)
135 		pipe->tmp_page = page;
136 	else
137 		put_page(page);
138 }
139 
anon_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)140 static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe,
141 		struct pipe_buffer *buf)
142 {
143 	struct page *page = buf->page;
144 
145 	if (page_count(page) != 1)
146 		return false;
147 	memcg_kmem_uncharge_page(page, 0);
148 	__SetPageLocked(page);
149 	return true;
150 }
151 
152 /**
153  * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
154  * @pipe:	the pipe that the buffer belongs to
155  * @buf:	the buffer to attempt to steal
156  *
157  * Description:
158  *	This function attempts to steal the &struct page attached to
159  *	@buf. If successful, this function returns 0 and returns with
160  *	the page locked. The caller may then reuse the page for whatever
161  *	he wishes; the typical use is insertion into a different file
162  *	page cache.
163  */
generic_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)164 bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe,
165 		struct pipe_buffer *buf)
166 {
167 	struct page *page = buf->page;
168 
169 	/*
170 	 * A reference of one is golden, that means that the owner of this
171 	 * page is the only one holding a reference to it. lock the page
172 	 * and return OK.
173 	 */
174 	if (page_count(page) == 1) {
175 		lock_page(page);
176 		return true;
177 	}
178 	return false;
179 }
180 EXPORT_SYMBOL(generic_pipe_buf_try_steal);
181 
182 /**
183  * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
184  * @pipe:	the pipe that the buffer belongs to
185  * @buf:	the buffer to get a reference to
186  *
187  * Description:
188  *	This function grabs an extra reference to @buf. It's used in
189  *	in the tee() system call, when we duplicate the buffers in one
190  *	pipe into another.
191  */
generic_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)192 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
193 {
194 	return try_get_page(buf->page);
195 }
196 EXPORT_SYMBOL(generic_pipe_buf_get);
197 
198 /**
199  * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
200  * @pipe:	the pipe that the buffer belongs to
201  * @buf:	the buffer to put a reference to
202  *
203  * Description:
204  *	This function releases a reference to @buf.
205  */
generic_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)206 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
207 			      struct pipe_buffer *buf)
208 {
209 	put_page(buf->page);
210 }
211 EXPORT_SYMBOL(generic_pipe_buf_release);
212 
213 static const struct pipe_buf_operations anon_pipe_buf_ops = {
214 	.release	= anon_pipe_buf_release,
215 	.try_steal	= anon_pipe_buf_try_steal,
216 	.get		= generic_pipe_buf_get,
217 };
218 
219 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
pipe_readable(const struct pipe_inode_info * pipe)220 static inline bool pipe_readable(const struct pipe_inode_info *pipe)
221 {
222 	unsigned int head = READ_ONCE(pipe->head);
223 	unsigned int tail = READ_ONCE(pipe->tail);
224 	unsigned int writers = READ_ONCE(pipe->writers);
225 
226 	return !pipe_empty(head, tail) || !writers;
227 }
228 
229 static ssize_t
pipe_read(struct kiocb * iocb,struct iov_iter * to)230 pipe_read(struct kiocb *iocb, struct iov_iter *to)
231 {
232 	size_t total_len = iov_iter_count(to);
233 	struct file *filp = iocb->ki_filp;
234 	struct pipe_inode_info *pipe = filp->private_data;
235 	bool was_full, wake_next_reader = false;
236 	ssize_t ret;
237 
238 	/* Null read succeeds. */
239 	if (unlikely(total_len == 0))
240 		return 0;
241 
242 	ret = 0;
243 	__pipe_lock(pipe);
244 
245 	/*
246 	 * We only wake up writers if the pipe was full when we started
247 	 * reading in order to avoid unnecessary wakeups.
248 	 *
249 	 * But when we do wake up writers, we do so using a sync wakeup
250 	 * (WF_SYNC), because we want them to get going and generate more
251 	 * data for us.
252 	 */
253 	was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
254 	for (;;) {
255 		/* Read ->head with a barrier vs post_one_notification() */
256 		unsigned int head = smp_load_acquire(&pipe->head);
257 		unsigned int tail = pipe->tail;
258 		unsigned int mask = pipe->ring_size - 1;
259 
260 #ifdef CONFIG_WATCH_QUEUE
261 		if (pipe->note_loss) {
262 			struct watch_notification n;
263 
264 			if (total_len < 8) {
265 				if (ret == 0)
266 					ret = -ENOBUFS;
267 				break;
268 			}
269 
270 			n.type = WATCH_TYPE_META;
271 			n.subtype = WATCH_META_LOSS_NOTIFICATION;
272 			n.info = watch_sizeof(n);
273 			if (copy_to_iter(&n, sizeof(n), to) != sizeof(n)) {
274 				if (ret == 0)
275 					ret = -EFAULT;
276 				break;
277 			}
278 			ret += sizeof(n);
279 			total_len -= sizeof(n);
280 			pipe->note_loss = false;
281 		}
282 #endif
283 
284 		if (!pipe_empty(head, tail)) {
285 			struct pipe_buffer *buf = &pipe->bufs[tail & mask];
286 			size_t chars = buf->len;
287 			size_t written;
288 			int error;
289 
290 			if (chars > total_len) {
291 				if (buf->flags & PIPE_BUF_FLAG_WHOLE) {
292 					if (ret == 0)
293 						ret = -ENOBUFS;
294 					break;
295 				}
296 				chars = total_len;
297 			}
298 
299 			error = pipe_buf_confirm(pipe, buf);
300 			if (error) {
301 				if (!ret)
302 					ret = error;
303 				break;
304 			}
305 
306 			written = copy_page_to_iter(buf->page, buf->offset, chars, to);
307 			if (unlikely(written < chars)) {
308 				if (!ret)
309 					ret = -EFAULT;
310 				break;
311 			}
312 			ret += chars;
313 			buf->offset += chars;
314 			buf->len -= chars;
315 
316 			/* Was it a packet buffer? Clean up and exit */
317 			if (buf->flags & PIPE_BUF_FLAG_PACKET) {
318 				total_len = chars;
319 				buf->len = 0;
320 			}
321 
322 			if (!buf->len) {
323 				pipe_buf_release(pipe, buf);
324 				spin_lock_irq(&pipe->rd_wait.lock);
325 #ifdef CONFIG_WATCH_QUEUE
326 				if (buf->flags & PIPE_BUF_FLAG_LOSS)
327 					pipe->note_loss = true;
328 #endif
329 				tail++;
330 				pipe->tail = tail;
331 				spin_unlock_irq(&pipe->rd_wait.lock);
332 			}
333 			total_len -= chars;
334 			if (!total_len)
335 				break;	/* common path: read succeeded */
336 			if (!pipe_empty(head, tail))	/* More to do? */
337 				continue;
338 		}
339 
340 		if (!pipe->writers)
341 			break;
342 		if (ret)
343 			break;
344 		if (filp->f_flags & O_NONBLOCK) {
345 			ret = -EAGAIN;
346 			break;
347 		}
348 		__pipe_unlock(pipe);
349 
350 		/*
351 		 * We only get here if we didn't actually read anything.
352 		 *
353 		 * However, we could have seen (and removed) a zero-sized
354 		 * pipe buffer, and might have made space in the buffers
355 		 * that way.
356 		 *
357 		 * You can't make zero-sized pipe buffers by doing an empty
358 		 * write (not even in packet mode), but they can happen if
359 		 * the writer gets an EFAULT when trying to fill a buffer
360 		 * that already got allocated and inserted in the buffer
361 		 * array.
362 		 *
363 		 * So we still need to wake up any pending writers in the
364 		 * _very_ unlikely case that the pipe was full, but we got
365 		 * no data.
366 		 */
367 		if (unlikely(was_full))
368 			wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
369 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
370 
371 		/*
372 		 * But because we didn't read anything, at this point we can
373 		 * just return directly with -ERESTARTSYS if we're interrupted,
374 		 * since we've done any required wakeups and there's no need
375 		 * to mark anything accessed. And we've dropped the lock.
376 		 */
377 		if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
378 			return -ERESTARTSYS;
379 
380 		__pipe_lock(pipe);
381 		was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
382 		wake_next_reader = true;
383 	}
384 	if (pipe_empty(pipe->head, pipe->tail))
385 		wake_next_reader = false;
386 	__pipe_unlock(pipe);
387 
388 	if (was_full)
389 		wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
390 	if (wake_next_reader)
391 		wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
392 	kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
393 	if (ret > 0)
394 		file_accessed(filp);
395 	return ret;
396 }
397 
is_packetized(struct file * file)398 static inline int is_packetized(struct file *file)
399 {
400 	return (file->f_flags & O_DIRECT) != 0;
401 }
402 
403 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
pipe_writable(const struct pipe_inode_info * pipe)404 static inline bool pipe_writable(const struct pipe_inode_info *pipe)
405 {
406 	unsigned int head = READ_ONCE(pipe->head);
407 	unsigned int tail = READ_ONCE(pipe->tail);
408 	unsigned int max_usage = READ_ONCE(pipe->max_usage);
409 
410 	return !pipe_full(head, tail, max_usage) ||
411 		!READ_ONCE(pipe->readers);
412 }
413 
414 static ssize_t
pipe_write(struct kiocb * iocb,struct iov_iter * from)415 pipe_write(struct kiocb *iocb, struct iov_iter *from)
416 {
417 	struct file *filp = iocb->ki_filp;
418 	struct pipe_inode_info *pipe = filp->private_data;
419 	unsigned int head;
420 	ssize_t ret = 0;
421 	size_t total_len = iov_iter_count(from);
422 	ssize_t chars;
423 	bool was_empty = false;
424 	bool wake_next_writer = false;
425 
426 	/* Null write succeeds. */
427 	if (unlikely(total_len == 0))
428 		return 0;
429 
430 	__pipe_lock(pipe);
431 
432 	if (!pipe->readers) {
433 		send_sig(SIGPIPE, current, 0);
434 		ret = -EPIPE;
435 		goto out;
436 	}
437 
438 #ifdef CONFIG_WATCH_QUEUE
439 	if (pipe->watch_queue) {
440 		ret = -EXDEV;
441 		goto out;
442 	}
443 #endif
444 
445 	/*
446 	 * If it wasn't empty we try to merge new data into
447 	 * the last buffer.
448 	 *
449 	 * That naturally merges small writes, but it also
450 	 * page-aligns the rest of the writes for large writes
451 	 * spanning multiple pages.
452 	 */
453 	head = pipe->head;
454 	was_empty = pipe_empty(head, pipe->tail);
455 	chars = total_len & (PAGE_SIZE-1);
456 	if (chars && !was_empty) {
457 		unsigned int mask = pipe->ring_size - 1;
458 		struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
459 		int offset = buf->offset + buf->len;
460 
461 		if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
462 		    offset + chars <= PAGE_SIZE) {
463 			ret = pipe_buf_confirm(pipe, buf);
464 			if (ret)
465 				goto out;
466 
467 			ret = copy_page_from_iter(buf->page, offset, chars, from);
468 			if (unlikely(ret < chars)) {
469 				ret = -EFAULT;
470 				goto out;
471 			}
472 
473 			buf->len += ret;
474 			if (!iov_iter_count(from))
475 				goto out;
476 		}
477 	}
478 
479 	for (;;) {
480 		if (!pipe->readers) {
481 			send_sig(SIGPIPE, current, 0);
482 			if (!ret)
483 				ret = -EPIPE;
484 			break;
485 		}
486 
487 		head = pipe->head;
488 		if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
489 			unsigned int mask = pipe->ring_size - 1;
490 			struct pipe_buffer *buf = &pipe->bufs[head & mask];
491 			struct page *page = pipe->tmp_page;
492 			int copied;
493 
494 			if (!page) {
495 				page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
496 				if (unlikely(!page)) {
497 					ret = ret ? : -ENOMEM;
498 					break;
499 				}
500 				pipe->tmp_page = page;
501 			}
502 
503 			/* Allocate a slot in the ring in advance and attach an
504 			 * empty buffer.  If we fault or otherwise fail to use
505 			 * it, either the reader will consume it or it'll still
506 			 * be there for the next write.
507 			 */
508 			spin_lock_irq(&pipe->rd_wait.lock);
509 
510 			head = pipe->head;
511 			if (pipe_full(head, pipe->tail, pipe->max_usage)) {
512 				spin_unlock_irq(&pipe->rd_wait.lock);
513 				continue;
514 			}
515 
516 			pipe->head = head + 1;
517 			spin_unlock_irq(&pipe->rd_wait.lock);
518 
519 			/* Insert it into the buffer array */
520 			buf = &pipe->bufs[head & mask];
521 			buf->page = page;
522 			buf->ops = &anon_pipe_buf_ops;
523 			buf->offset = 0;
524 			buf->len = 0;
525 			if (is_packetized(filp))
526 				buf->flags = PIPE_BUF_FLAG_PACKET;
527 			else
528 				buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
529 			pipe->tmp_page = NULL;
530 
531 			copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
532 			if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
533 				if (!ret)
534 					ret = -EFAULT;
535 				break;
536 			}
537 			ret += copied;
538 			buf->offset = 0;
539 			buf->len = copied;
540 
541 			if (!iov_iter_count(from))
542 				break;
543 		}
544 
545 		if (!pipe_full(head, pipe->tail, pipe->max_usage))
546 			continue;
547 
548 		/* Wait for buffer space to become available. */
549 		if (filp->f_flags & O_NONBLOCK) {
550 			if (!ret)
551 				ret = -EAGAIN;
552 			break;
553 		}
554 		if (signal_pending(current)) {
555 			if (!ret)
556 				ret = -ERESTARTSYS;
557 			break;
558 		}
559 
560 		/*
561 		 * We're going to release the pipe lock and wait for more
562 		 * space. We wake up any readers if necessary, and then
563 		 * after waiting we need to re-check whether the pipe
564 		 * become empty while we dropped the lock.
565 		 */
566 		__pipe_unlock(pipe);
567 		if (was_empty)
568 			wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
569 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
570 		wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
571 		__pipe_lock(pipe);
572 		was_empty = pipe_empty(pipe->head, pipe->tail);
573 		wake_next_writer = true;
574 	}
575 out:
576 	if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
577 		wake_next_writer = false;
578 	__pipe_unlock(pipe);
579 
580 	/*
581 	 * If we do do a wakeup event, we do a 'sync' wakeup, because we
582 	 * want the reader to start processing things asap, rather than
583 	 * leave the data pending.
584 	 *
585 	 * This is particularly important for small writes, because of
586 	 * how (for example) the GNU make jobserver uses small writes to
587 	 * wake up pending jobs
588 	 *
589 	 * Epoll nonsensically wants a wakeup whether the pipe
590 	 * was already empty or not.
591 	 */
592 	if (was_empty || pipe->poll_usage)
593 		wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
594 	kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
595 	if (wake_next_writer)
596 		wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
597 	if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
598 		int err = file_update_time(filp);
599 		if (err)
600 			ret = err;
601 		sb_end_write(file_inode(filp)->i_sb);
602 	}
603 	return ret;
604 }
605 
pipe_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)606 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
607 {
608 	struct pipe_inode_info *pipe = filp->private_data;
609 	int count, head, tail, mask;
610 
611 	switch (cmd) {
612 	case FIONREAD:
613 		__pipe_lock(pipe);
614 		count = 0;
615 		head = pipe->head;
616 		tail = pipe->tail;
617 		mask = pipe->ring_size - 1;
618 
619 		while (tail != head) {
620 			count += pipe->bufs[tail & mask].len;
621 			tail++;
622 		}
623 		__pipe_unlock(pipe);
624 
625 		return put_user(count, (int __user *)arg);
626 
627 #ifdef CONFIG_WATCH_QUEUE
628 	case IOC_WATCH_QUEUE_SET_SIZE: {
629 		int ret;
630 		__pipe_lock(pipe);
631 		ret = watch_queue_set_size(pipe, arg);
632 		__pipe_unlock(pipe);
633 		return ret;
634 	}
635 
636 	case IOC_WATCH_QUEUE_SET_FILTER:
637 		return watch_queue_set_filter(
638 			pipe, (struct watch_notification_filter __user *)arg);
639 #endif
640 
641 	default:
642 		return -ENOIOCTLCMD;
643 	}
644 }
645 
646 /* No kernel lock held - fine */
647 static __poll_t
pipe_poll(struct file * filp,poll_table * wait)648 pipe_poll(struct file *filp, poll_table *wait)
649 {
650 	__poll_t mask;
651 	struct pipe_inode_info *pipe = filp->private_data;
652 	unsigned int head, tail;
653 
654 	/* Epoll has some historical nasty semantics, this enables them */
655 	WRITE_ONCE(pipe->poll_usage, true);
656 
657 	/*
658 	 * Reading pipe state only -- no need for acquiring the semaphore.
659 	 *
660 	 * But because this is racy, the code has to add the
661 	 * entry to the poll table _first_ ..
662 	 */
663 	if (filp->f_mode & FMODE_READ)
664 		poll_wait(filp, &pipe->rd_wait, wait);
665 	if (filp->f_mode & FMODE_WRITE)
666 		poll_wait(filp, &pipe->wr_wait, wait);
667 
668 	/*
669 	 * .. and only then can you do the racy tests. That way,
670 	 * if something changes and you got it wrong, the poll
671 	 * table entry will wake you up and fix it.
672 	 */
673 	head = READ_ONCE(pipe->head);
674 	tail = READ_ONCE(pipe->tail);
675 
676 	mask = 0;
677 	if (filp->f_mode & FMODE_READ) {
678 		if (!pipe_empty(head, tail))
679 			mask |= EPOLLIN | EPOLLRDNORM;
680 		if (!pipe->writers && filp->f_version != pipe->w_counter)
681 			mask |= EPOLLHUP;
682 	}
683 
684 	if (filp->f_mode & FMODE_WRITE) {
685 		if (!pipe_full(head, tail, pipe->max_usage))
686 			mask |= EPOLLOUT | EPOLLWRNORM;
687 		/*
688 		 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
689 		 * behave exactly like pipes for poll().
690 		 */
691 		if (!pipe->readers)
692 			mask |= EPOLLERR;
693 	}
694 
695 	return mask;
696 }
697 
put_pipe_info(struct inode * inode,struct pipe_inode_info * pipe)698 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
699 {
700 	int kill = 0;
701 
702 	spin_lock(&inode->i_lock);
703 	if (!--pipe->files) {
704 		inode->i_pipe = NULL;
705 		kill = 1;
706 	}
707 	spin_unlock(&inode->i_lock);
708 
709 	if (kill)
710 		free_pipe_info(pipe);
711 }
712 
713 static int
pipe_release(struct inode * inode,struct file * file)714 pipe_release(struct inode *inode, struct file *file)
715 {
716 	struct pipe_inode_info *pipe = file->private_data;
717 
718 	__pipe_lock(pipe);
719 	if (file->f_mode & FMODE_READ)
720 		pipe->readers--;
721 	if (file->f_mode & FMODE_WRITE)
722 		pipe->writers--;
723 
724 	/* Was that the last reader or writer, but not the other side? */
725 	if (!pipe->readers != !pipe->writers) {
726 		wake_up_interruptible_all(&pipe->rd_wait);
727 		wake_up_interruptible_all(&pipe->wr_wait);
728 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
729 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
730 	}
731 	__pipe_unlock(pipe);
732 
733 	put_pipe_info(inode, pipe);
734 	return 0;
735 }
736 
737 static int
pipe_fasync(int fd,struct file * filp,int on)738 pipe_fasync(int fd, struct file *filp, int on)
739 {
740 	struct pipe_inode_info *pipe = filp->private_data;
741 	int retval = 0;
742 
743 	__pipe_lock(pipe);
744 	if (filp->f_mode & FMODE_READ)
745 		retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
746 	if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
747 		retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
748 		if (retval < 0 && (filp->f_mode & FMODE_READ))
749 			/* this can happen only if on == T */
750 			fasync_helper(-1, filp, 0, &pipe->fasync_readers);
751 	}
752 	__pipe_unlock(pipe);
753 	return retval;
754 }
755 
account_pipe_buffers(struct user_struct * user,unsigned long old,unsigned long new)756 unsigned long account_pipe_buffers(struct user_struct *user,
757 				   unsigned long old, unsigned long new)
758 {
759 	return atomic_long_add_return(new - old, &user->pipe_bufs);
760 }
761 
too_many_pipe_buffers_soft(unsigned long user_bufs)762 bool too_many_pipe_buffers_soft(unsigned long user_bufs)
763 {
764 	unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
765 
766 	return soft_limit && user_bufs > soft_limit;
767 }
768 
too_many_pipe_buffers_hard(unsigned long user_bufs)769 bool too_many_pipe_buffers_hard(unsigned long user_bufs)
770 {
771 	unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
772 
773 	return hard_limit && user_bufs > hard_limit;
774 }
775 
pipe_is_unprivileged_user(void)776 bool pipe_is_unprivileged_user(void)
777 {
778 	return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
779 }
780 
alloc_pipe_info(void)781 struct pipe_inode_info *alloc_pipe_info(void)
782 {
783 	struct pipe_inode_info *pipe;
784 	unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
785 	struct user_struct *user = get_current_user();
786 	unsigned long user_bufs;
787 	unsigned int max_size = READ_ONCE(pipe_max_size);
788 
789 	pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
790 	if (pipe == NULL)
791 		goto out_free_uid;
792 
793 	if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
794 		pipe_bufs = max_size >> PAGE_SHIFT;
795 
796 	user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
797 
798 	if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
799 		user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
800 		pipe_bufs = PIPE_MIN_DEF_BUFFERS;
801 	}
802 
803 	if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
804 		goto out_revert_acct;
805 
806 	pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
807 			     GFP_KERNEL_ACCOUNT);
808 
809 	if (pipe->bufs) {
810 		init_waitqueue_head(&pipe->rd_wait);
811 		init_waitqueue_head(&pipe->wr_wait);
812 		pipe->r_counter = pipe->w_counter = 1;
813 		pipe->max_usage = pipe_bufs;
814 		pipe->ring_size = pipe_bufs;
815 		pipe->nr_accounted = pipe_bufs;
816 		pipe->user = user;
817 		mutex_init(&pipe->mutex);
818 		return pipe;
819 	}
820 
821 out_revert_acct:
822 	(void) account_pipe_buffers(user, pipe_bufs, 0);
823 	kfree(pipe);
824 out_free_uid:
825 	free_uid(user);
826 	return NULL;
827 }
828 
free_pipe_info(struct pipe_inode_info * pipe)829 void free_pipe_info(struct pipe_inode_info *pipe)
830 {
831 	int i;
832 
833 #ifdef CONFIG_WATCH_QUEUE
834 	if (pipe->watch_queue)
835 		watch_queue_clear(pipe->watch_queue);
836 #endif
837 
838 	(void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
839 	free_uid(pipe->user);
840 	for (i = 0; i < pipe->ring_size; i++) {
841 		struct pipe_buffer *buf = pipe->bufs + i;
842 		if (buf->ops)
843 			pipe_buf_release(pipe, buf);
844 	}
845 #ifdef CONFIG_WATCH_QUEUE
846 	if (pipe->watch_queue)
847 		put_watch_queue(pipe->watch_queue);
848 #endif
849 	if (pipe->tmp_page)
850 		__free_page(pipe->tmp_page);
851 	kfree(pipe->bufs);
852 	kfree(pipe);
853 }
854 
855 static struct vfsmount *pipe_mnt __read_mostly;
856 
857 /*
858  * pipefs_dname() is called from d_path().
859  */
pipefs_dname(struct dentry * dentry,char * buffer,int buflen)860 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
861 {
862 	return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
863 				d_inode(dentry)->i_ino);
864 }
865 
866 static const struct dentry_operations pipefs_dentry_operations = {
867 	.d_dname	= pipefs_dname,
868 };
869 
get_pipe_inode(void)870 static struct inode * get_pipe_inode(void)
871 {
872 	struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
873 	struct pipe_inode_info *pipe;
874 
875 	if (!inode)
876 		goto fail_inode;
877 
878 	inode->i_ino = get_next_ino();
879 
880 	pipe = alloc_pipe_info();
881 	if (!pipe)
882 		goto fail_iput;
883 
884 	inode->i_pipe = pipe;
885 	pipe->files = 2;
886 	pipe->readers = pipe->writers = 1;
887 	inode->i_fop = &pipefifo_fops;
888 
889 	/*
890 	 * Mark the inode dirty from the very beginning,
891 	 * that way it will never be moved to the dirty
892 	 * list because "mark_inode_dirty()" will think
893 	 * that it already _is_ on the dirty list.
894 	 */
895 	inode->i_state = I_DIRTY;
896 	inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
897 	inode->i_uid = current_fsuid();
898 	inode->i_gid = current_fsgid();
899 	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
900 
901 	return inode;
902 
903 fail_iput:
904 	iput(inode);
905 
906 fail_inode:
907 	return NULL;
908 }
909 
create_pipe_files(struct file ** res,int flags)910 int create_pipe_files(struct file **res, int flags)
911 {
912 	struct inode *inode = get_pipe_inode();
913 	struct file *f;
914 	int error;
915 
916 	if (!inode)
917 		return -ENFILE;
918 
919 	if (flags & O_NOTIFICATION_PIPE) {
920 		error = watch_queue_init(inode->i_pipe);
921 		if (error) {
922 			free_pipe_info(inode->i_pipe);
923 			iput(inode);
924 			return error;
925 		}
926 	}
927 
928 	f = alloc_file_pseudo(inode, pipe_mnt, "",
929 				O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
930 				&pipefifo_fops);
931 	if (IS_ERR(f)) {
932 		free_pipe_info(inode->i_pipe);
933 		iput(inode);
934 		return PTR_ERR(f);
935 	}
936 
937 	f->private_data = inode->i_pipe;
938 
939 	res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
940 				  &pipefifo_fops);
941 	if (IS_ERR(res[0])) {
942 		put_pipe_info(inode, inode->i_pipe);
943 		fput(f);
944 		return PTR_ERR(res[0]);
945 	}
946 	res[0]->private_data = inode->i_pipe;
947 	res[1] = f;
948 	stream_open(inode, res[0]);
949 	stream_open(inode, res[1]);
950 	return 0;
951 }
952 
__do_pipe_flags(int * fd,struct file ** files,int flags)953 static int __do_pipe_flags(int *fd, struct file **files, int flags)
954 {
955 	int error;
956 	int fdw, fdr;
957 
958 	if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE))
959 		return -EINVAL;
960 
961 	error = create_pipe_files(files, flags);
962 	if (error)
963 		return error;
964 
965 	error = get_unused_fd_flags(flags);
966 	if (error < 0)
967 		goto err_read_pipe;
968 	fdr = error;
969 
970 	error = get_unused_fd_flags(flags);
971 	if (error < 0)
972 		goto err_fdr;
973 	fdw = error;
974 
975 	audit_fd_pair(fdr, fdw);
976 	fd[0] = fdr;
977 	fd[1] = fdw;
978 	return 0;
979 
980  err_fdr:
981 	put_unused_fd(fdr);
982  err_read_pipe:
983 	fput(files[0]);
984 	fput(files[1]);
985 	return error;
986 }
987 
do_pipe_flags(int * fd,int flags)988 int do_pipe_flags(int *fd, int flags)
989 {
990 	struct file *files[2];
991 	int error = __do_pipe_flags(fd, files, flags);
992 	if (!error) {
993 		fd_install(fd[0], files[0]);
994 		fd_install(fd[1], files[1]);
995 	}
996 	return error;
997 }
998 
999 /*
1000  * sys_pipe() is the normal C calling standard for creating
1001  * a pipe. It's not the way Unix traditionally does this, though.
1002  */
do_pipe2(int __user * fildes,int flags)1003 static int do_pipe2(int __user *fildes, int flags)
1004 {
1005 	struct file *files[2];
1006 	int fd[2];
1007 	int error;
1008 
1009 	error = __do_pipe_flags(fd, files, flags);
1010 	if (!error) {
1011 		if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
1012 			fput(files[0]);
1013 			fput(files[1]);
1014 			put_unused_fd(fd[0]);
1015 			put_unused_fd(fd[1]);
1016 			error = -EFAULT;
1017 		} else {
1018 			fd_install(fd[0], files[0]);
1019 			fd_install(fd[1], files[1]);
1020 		}
1021 	}
1022 	return error;
1023 }
1024 
SYSCALL_DEFINE2(pipe2,int __user *,fildes,int,flags)1025 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1026 {
1027 	return do_pipe2(fildes, flags);
1028 }
1029 
SYSCALL_DEFINE1(pipe,int __user *,fildes)1030 SYSCALL_DEFINE1(pipe, int __user *, fildes)
1031 {
1032 	return do_pipe2(fildes, 0);
1033 }
1034 
1035 /*
1036  * This is the stupid "wait for pipe to be readable or writable"
1037  * model.
1038  *
1039  * See pipe_read/write() for the proper kind of exclusive wait,
1040  * but that requires that we wake up any other readers/writers
1041  * if we then do not end up reading everything (ie the whole
1042  * "wake_next_reader/writer" logic in pipe_read/write()).
1043  */
pipe_wait_readable(struct pipe_inode_info * pipe)1044 void pipe_wait_readable(struct pipe_inode_info *pipe)
1045 {
1046 	pipe_unlock(pipe);
1047 	wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe));
1048 	pipe_lock(pipe);
1049 }
1050 
pipe_wait_writable(struct pipe_inode_info * pipe)1051 void pipe_wait_writable(struct pipe_inode_info *pipe)
1052 {
1053 	pipe_unlock(pipe);
1054 	wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe));
1055 	pipe_lock(pipe);
1056 }
1057 
1058 /*
1059  * This depends on both the wait (here) and the wakeup (wake_up_partner)
1060  * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot
1061  * race with the count check and waitqueue prep.
1062  *
1063  * Normally in order to avoid races, you'd do the prepare_to_wait() first,
1064  * then check the condition you're waiting for, and only then sleep. But
1065  * because of the pipe lock, we can check the condition before being on
1066  * the wait queue.
1067  *
1068  * We use the 'rd_wait' waitqueue for pipe partner waiting.
1069  */
wait_for_partner(struct pipe_inode_info * pipe,unsigned int * cnt)1070 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
1071 {
1072 	DEFINE_WAIT(rdwait);
1073 	int cur = *cnt;
1074 
1075 	while (cur == *cnt) {
1076 		prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE);
1077 		pipe_unlock(pipe);
1078 		schedule();
1079 		finish_wait(&pipe->rd_wait, &rdwait);
1080 		pipe_lock(pipe);
1081 		if (signal_pending(current))
1082 			break;
1083 	}
1084 	return cur == *cnt ? -ERESTARTSYS : 0;
1085 }
1086 
wake_up_partner(struct pipe_inode_info * pipe)1087 static void wake_up_partner(struct pipe_inode_info *pipe)
1088 {
1089 	wake_up_interruptible_all(&pipe->rd_wait);
1090 }
1091 
fifo_open(struct inode * inode,struct file * filp)1092 static int fifo_open(struct inode *inode, struct file *filp)
1093 {
1094 	struct pipe_inode_info *pipe;
1095 	bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1096 	int ret;
1097 
1098 	filp->f_version = 0;
1099 
1100 	spin_lock(&inode->i_lock);
1101 	if (inode->i_pipe) {
1102 		pipe = inode->i_pipe;
1103 		pipe->files++;
1104 		spin_unlock(&inode->i_lock);
1105 	} else {
1106 		spin_unlock(&inode->i_lock);
1107 		pipe = alloc_pipe_info();
1108 		if (!pipe)
1109 			return -ENOMEM;
1110 		pipe->files = 1;
1111 		spin_lock(&inode->i_lock);
1112 		if (unlikely(inode->i_pipe)) {
1113 			inode->i_pipe->files++;
1114 			spin_unlock(&inode->i_lock);
1115 			free_pipe_info(pipe);
1116 			pipe = inode->i_pipe;
1117 		} else {
1118 			inode->i_pipe = pipe;
1119 			spin_unlock(&inode->i_lock);
1120 		}
1121 	}
1122 	filp->private_data = pipe;
1123 	/* OK, we have a pipe and it's pinned down */
1124 
1125 	__pipe_lock(pipe);
1126 
1127 	/* We can only do regular read/write on fifos */
1128 	stream_open(inode, filp);
1129 
1130 	switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
1131 	case FMODE_READ:
1132 	/*
1133 	 *  O_RDONLY
1134 	 *  POSIX.1 says that O_NONBLOCK means return with the FIFO
1135 	 *  opened, even when there is no process writing the FIFO.
1136 	 */
1137 		pipe->r_counter++;
1138 		if (pipe->readers++ == 0)
1139 			wake_up_partner(pipe);
1140 
1141 		if (!is_pipe && !pipe->writers) {
1142 			if ((filp->f_flags & O_NONBLOCK)) {
1143 				/* suppress EPOLLHUP until we have
1144 				 * seen a writer */
1145 				filp->f_version = pipe->w_counter;
1146 			} else {
1147 				if (wait_for_partner(pipe, &pipe->w_counter))
1148 					goto err_rd;
1149 			}
1150 		}
1151 		break;
1152 
1153 	case FMODE_WRITE:
1154 	/*
1155 	 *  O_WRONLY
1156 	 *  POSIX.1 says that O_NONBLOCK means return -1 with
1157 	 *  errno=ENXIO when there is no process reading the FIFO.
1158 	 */
1159 		ret = -ENXIO;
1160 		if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1161 			goto err;
1162 
1163 		pipe->w_counter++;
1164 		if (!pipe->writers++)
1165 			wake_up_partner(pipe);
1166 
1167 		if (!is_pipe && !pipe->readers) {
1168 			if (wait_for_partner(pipe, &pipe->r_counter))
1169 				goto err_wr;
1170 		}
1171 		break;
1172 
1173 	case FMODE_READ | FMODE_WRITE:
1174 	/*
1175 	 *  O_RDWR
1176 	 *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1177 	 *  This implementation will NEVER block on a O_RDWR open, since
1178 	 *  the process can at least talk to itself.
1179 	 */
1180 
1181 		pipe->readers++;
1182 		pipe->writers++;
1183 		pipe->r_counter++;
1184 		pipe->w_counter++;
1185 		if (pipe->readers == 1 || pipe->writers == 1)
1186 			wake_up_partner(pipe);
1187 		break;
1188 
1189 	default:
1190 		ret = -EINVAL;
1191 		goto err;
1192 	}
1193 
1194 	/* Ok! */
1195 	__pipe_unlock(pipe);
1196 	return 0;
1197 
1198 err_rd:
1199 	if (!--pipe->readers)
1200 		wake_up_interruptible(&pipe->wr_wait);
1201 	ret = -ERESTARTSYS;
1202 	goto err;
1203 
1204 err_wr:
1205 	if (!--pipe->writers)
1206 		wake_up_interruptible_all(&pipe->rd_wait);
1207 	ret = -ERESTARTSYS;
1208 	goto err;
1209 
1210 err:
1211 	__pipe_unlock(pipe);
1212 
1213 	put_pipe_info(inode, pipe);
1214 	return ret;
1215 }
1216 
1217 const struct file_operations pipefifo_fops = {
1218 	.open		= fifo_open,
1219 	.llseek		= no_llseek,
1220 	.read_iter	= pipe_read,
1221 	.write_iter	= pipe_write,
1222 	.poll		= pipe_poll,
1223 	.unlocked_ioctl	= pipe_ioctl,
1224 	.release	= pipe_release,
1225 	.fasync		= pipe_fasync,
1226 	.splice_write	= iter_file_splice_write,
1227 };
1228 
1229 /*
1230  * Currently we rely on the pipe array holding a power-of-2 number
1231  * of pages. Returns 0 on error.
1232  */
round_pipe_size(unsigned long size)1233 unsigned int round_pipe_size(unsigned long size)
1234 {
1235 	if (size > (1U << 31))
1236 		return 0;
1237 
1238 	/* Minimum pipe size, as required by POSIX */
1239 	if (size < PAGE_SIZE)
1240 		return PAGE_SIZE;
1241 
1242 	return roundup_pow_of_two(size);
1243 }
1244 
1245 /*
1246  * Resize the pipe ring to a number of slots.
1247  *
1248  * Note the pipe can be reduced in capacity, but only if the current
1249  * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
1250  * returned instead.
1251  */
pipe_resize_ring(struct pipe_inode_info * pipe,unsigned int nr_slots)1252 int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1253 {
1254 	struct pipe_buffer *bufs;
1255 	unsigned int head, tail, mask, n;
1256 
1257 	bufs = kcalloc(nr_slots, sizeof(*bufs),
1258 		       GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1259 	if (unlikely(!bufs))
1260 		return -ENOMEM;
1261 
1262 	spin_lock_irq(&pipe->rd_wait.lock);
1263 	mask = pipe->ring_size - 1;
1264 	head = pipe->head;
1265 	tail = pipe->tail;
1266 
1267 	n = pipe_occupancy(head, tail);
1268 	if (nr_slots < n) {
1269 		spin_unlock_irq(&pipe->rd_wait.lock);
1270 		kfree(bufs);
1271 		return -EBUSY;
1272 	}
1273 
1274 	/*
1275 	 * The pipe array wraps around, so just start the new one at zero
1276 	 * and adjust the indices.
1277 	 */
1278 	if (n > 0) {
1279 		unsigned int h = head & mask;
1280 		unsigned int t = tail & mask;
1281 		if (h > t) {
1282 			memcpy(bufs, pipe->bufs + t,
1283 			       n * sizeof(struct pipe_buffer));
1284 		} else {
1285 			unsigned int tsize = pipe->ring_size - t;
1286 			if (h > 0)
1287 				memcpy(bufs + tsize, pipe->bufs,
1288 				       h * sizeof(struct pipe_buffer));
1289 			memcpy(bufs, pipe->bufs + t,
1290 			       tsize * sizeof(struct pipe_buffer));
1291 		}
1292 	}
1293 
1294 	head = n;
1295 	tail = 0;
1296 
1297 	kfree(pipe->bufs);
1298 	pipe->bufs = bufs;
1299 	pipe->ring_size = nr_slots;
1300 	if (pipe->max_usage > nr_slots)
1301 		pipe->max_usage = nr_slots;
1302 	pipe->tail = tail;
1303 	pipe->head = head;
1304 
1305 	spin_unlock_irq(&pipe->rd_wait.lock);
1306 
1307 	/* This might have made more room for writers */
1308 	wake_up_interruptible(&pipe->wr_wait);
1309 	return 0;
1310 }
1311 
1312 /*
1313  * Allocate a new array of pipe buffers and copy the info over. Returns the
1314  * pipe size if successful, or return -ERROR on error.
1315  */
pipe_set_size(struct pipe_inode_info * pipe,unsigned long arg)1316 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1317 {
1318 	unsigned long user_bufs;
1319 	unsigned int nr_slots, size;
1320 	long ret = 0;
1321 
1322 #ifdef CONFIG_WATCH_QUEUE
1323 	if (pipe->watch_queue)
1324 		return -EBUSY;
1325 #endif
1326 
1327 	size = round_pipe_size(arg);
1328 	nr_slots = size >> PAGE_SHIFT;
1329 
1330 	if (!nr_slots)
1331 		return -EINVAL;
1332 
1333 	/*
1334 	 * If trying to increase the pipe capacity, check that an
1335 	 * unprivileged user is not trying to exceed various limits
1336 	 * (soft limit check here, hard limit check just below).
1337 	 * Decreasing the pipe capacity is always permitted, even
1338 	 * if the user is currently over a limit.
1339 	 */
1340 	if (nr_slots > pipe->max_usage &&
1341 			size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1342 		return -EPERM;
1343 
1344 	user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots);
1345 
1346 	if (nr_slots > pipe->max_usage &&
1347 			(too_many_pipe_buffers_hard(user_bufs) ||
1348 			 too_many_pipe_buffers_soft(user_bufs)) &&
1349 			pipe_is_unprivileged_user()) {
1350 		ret = -EPERM;
1351 		goto out_revert_acct;
1352 	}
1353 
1354 	ret = pipe_resize_ring(pipe, nr_slots);
1355 	if (ret < 0)
1356 		goto out_revert_acct;
1357 
1358 	pipe->max_usage = nr_slots;
1359 	pipe->nr_accounted = nr_slots;
1360 	return pipe->max_usage * PAGE_SIZE;
1361 
1362 out_revert_acct:
1363 	(void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted);
1364 	return ret;
1365 }
1366 
1367 /*
1368  * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1369  * location, so checking ->i_pipe is not enough to verify that this is a
1370  * pipe.
1371  */
get_pipe_info(struct file * file,bool for_splice)1372 struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
1373 {
1374 	struct pipe_inode_info *pipe = file->private_data;
1375 
1376 	if (file->f_op != &pipefifo_fops || !pipe)
1377 		return NULL;
1378 #ifdef CONFIG_WATCH_QUEUE
1379 	if (for_splice && pipe->watch_queue)
1380 		return NULL;
1381 #endif
1382 	return pipe;
1383 }
1384 
pipe_fcntl(struct file * file,unsigned int cmd,unsigned long arg)1385 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1386 {
1387 	struct pipe_inode_info *pipe;
1388 	long ret;
1389 
1390 	pipe = get_pipe_info(file, false);
1391 	if (!pipe)
1392 		return -EBADF;
1393 
1394 	__pipe_lock(pipe);
1395 
1396 	switch (cmd) {
1397 	case F_SETPIPE_SZ:
1398 		ret = pipe_set_size(pipe, arg);
1399 		break;
1400 	case F_GETPIPE_SZ:
1401 		ret = pipe->max_usage * PAGE_SIZE;
1402 		break;
1403 	default:
1404 		ret = -EINVAL;
1405 		break;
1406 	}
1407 
1408 	__pipe_unlock(pipe);
1409 	return ret;
1410 }
1411 
1412 static const struct super_operations pipefs_ops = {
1413 	.destroy_inode = free_inode_nonrcu,
1414 	.statfs = simple_statfs,
1415 };
1416 
1417 /*
1418  * pipefs should _never_ be mounted by userland - too much of security hassle,
1419  * no real gain from having the whole whorehouse mounted. So we don't need
1420  * any operations on the root directory. However, we need a non-trivial
1421  * d_name - pipe: will go nicely and kill the special-casing in procfs.
1422  */
1423 
pipefs_init_fs_context(struct fs_context * fc)1424 static int pipefs_init_fs_context(struct fs_context *fc)
1425 {
1426 	struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1427 	if (!ctx)
1428 		return -ENOMEM;
1429 	ctx->ops = &pipefs_ops;
1430 	ctx->dops = &pipefs_dentry_operations;
1431 	return 0;
1432 }
1433 
1434 static struct file_system_type pipe_fs_type = {
1435 	.name		= "pipefs",
1436 	.init_fs_context = pipefs_init_fs_context,
1437 	.kill_sb	= kill_anon_super,
1438 };
1439 
init_pipe_fs(void)1440 static int __init init_pipe_fs(void)
1441 {
1442 	int err = register_filesystem(&pipe_fs_type);
1443 
1444 	if (!err) {
1445 		pipe_mnt = kern_mount(&pipe_fs_type);
1446 		if (IS_ERR(pipe_mnt)) {
1447 			err = PTR_ERR(pipe_mnt);
1448 			unregister_filesystem(&pipe_fs_type);
1449 		}
1450 	}
1451 	return err;
1452 }
1453 
1454 fs_initcall(init_pipe_fs);
1455