1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/pipe.c
4 *
5 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
6 */
7
8 #include <linux/mm.h>
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/magic.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/uio.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/audit.h>
24 #include <linux/syscalls.h>
25 #include <linux/fcntl.h>
26 #include <linux/memcontrol.h>
27
28 #include <linux/uaccess.h>
29 #include <asm/ioctls.h>
30
31 #include "internal.h"
32
33 /*
34 * New pipe buffers will be restricted to this size while the user is exceeding
35 * their pipe buffer quota. The general pipe use case needs at least two
36 * buffers: one for data yet to be read, and one for new data. If this is less
37 * than two, then a write to a non-empty pipe may block even if the pipe is not
38 * full. This can occur with GNU make jobserver or similar uses of pipes as
39 * semaphores: multiple processes may be waiting to write tokens back to the
40 * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
41 *
42 * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
43 * own risk, namely: pipe writes to non-full pipes may block until the pipe is
44 * emptied.
45 */
46 #define PIPE_MIN_DEF_BUFFERS 2
47
48 /*
49 * The max size that a non-root user is allowed to grow the pipe. Can
50 * be set by root in /proc/sys/fs/pipe-max-size
51 */
52 unsigned int pipe_max_size = 1048576;
53
54 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
55 * matches default values.
56 */
57 unsigned long pipe_user_pages_hard;
58 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
59
60 /*
61 * We use a start+len construction, which provides full use of the
62 * allocated memory.
63 * -- Florian Coosmann (FGC)
64 *
65 * Reads with count = 0 should always return 0.
66 * -- Julian Bradfield 1999-06-07.
67 *
68 * FIFOs and Pipes now generate SIGIO for both readers and writers.
69 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
70 *
71 * pipe_read & write cleanup
72 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
73 */
74
pipe_lock_nested(struct pipe_inode_info * pipe,int subclass)75 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
76 {
77 if (pipe->files)
78 mutex_lock_nested(&pipe->mutex, subclass);
79 }
80
pipe_lock(struct pipe_inode_info * pipe)81 void pipe_lock(struct pipe_inode_info *pipe)
82 {
83 /*
84 * pipe_lock() nests non-pipe inode locks (for writing to a file)
85 */
86 pipe_lock_nested(pipe, I_MUTEX_PARENT);
87 }
88 EXPORT_SYMBOL(pipe_lock);
89
pipe_unlock(struct pipe_inode_info * pipe)90 void pipe_unlock(struct pipe_inode_info *pipe)
91 {
92 if (pipe->files)
93 mutex_unlock(&pipe->mutex);
94 }
95 EXPORT_SYMBOL(pipe_unlock);
96
__pipe_lock(struct pipe_inode_info * pipe)97 static inline void __pipe_lock(struct pipe_inode_info *pipe)
98 {
99 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
100 }
101
__pipe_unlock(struct pipe_inode_info * pipe)102 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
103 {
104 mutex_unlock(&pipe->mutex);
105 }
106
pipe_double_lock(struct pipe_inode_info * pipe1,struct pipe_inode_info * pipe2)107 void pipe_double_lock(struct pipe_inode_info *pipe1,
108 struct pipe_inode_info *pipe2)
109 {
110 BUG_ON(pipe1 == pipe2);
111
112 if (pipe1 < pipe2) {
113 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
114 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
115 } else {
116 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
117 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
118 }
119 }
120
121 /* Drop the inode semaphore and wait for a pipe event, atomically */
pipe_wait(struct pipe_inode_info * pipe)122 void pipe_wait(struct pipe_inode_info *pipe)
123 {
124 DEFINE_WAIT(wait);
125
126 /*
127 * Pipes are system-local resources, so sleeping on them
128 * is considered a noninteractive wait:
129 */
130 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
131 pipe_unlock(pipe);
132 schedule();
133 finish_wait(&pipe->wait, &wait);
134 pipe_lock(pipe);
135 }
136
anon_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)137 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
138 struct pipe_buffer *buf)
139 {
140 struct page *page = buf->page;
141
142 /*
143 * If nobody else uses this page, and we don't already have a
144 * temporary page, let's keep track of it as a one-deep
145 * allocation cache. (Otherwise just release our reference to it)
146 */
147 if (page_count(page) == 1 && !pipe->tmp_page)
148 pipe->tmp_page = page;
149 else
150 put_page(page);
151 }
152
anon_pipe_buf_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)153 static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
154 struct pipe_buffer *buf)
155 {
156 struct page *page = buf->page;
157
158 if (page_count(page) == 1) {
159 memcg_kmem_uncharge(page, 0);
160 __SetPageLocked(page);
161 return 0;
162 }
163 return 1;
164 }
165
166 /**
167 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
168 * @pipe: the pipe that the buffer belongs to
169 * @buf: the buffer to attempt to steal
170 *
171 * Description:
172 * This function attempts to steal the &struct page attached to
173 * @buf. If successful, this function returns 0 and returns with
174 * the page locked. The caller may then reuse the page for whatever
175 * he wishes; the typical use is insertion into a different file
176 * page cache.
177 */
generic_pipe_buf_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)178 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
179 struct pipe_buffer *buf)
180 {
181 struct page *page = buf->page;
182
183 /*
184 * A reference of one is golden, that means that the owner of this
185 * page is the only one holding a reference to it. lock the page
186 * and return OK.
187 */
188 if (page_count(page) == 1) {
189 lock_page(page);
190 return 0;
191 }
192
193 return 1;
194 }
195 EXPORT_SYMBOL(generic_pipe_buf_steal);
196
197 /**
198 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
199 * @pipe: the pipe that the buffer belongs to
200 * @buf: the buffer to get a reference to
201 *
202 * Description:
203 * This function grabs an extra reference to @buf. It's used in
204 * in the tee() system call, when we duplicate the buffers in one
205 * pipe into another.
206 */
generic_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)207 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
208 {
209 return try_get_page(buf->page);
210 }
211 EXPORT_SYMBOL(generic_pipe_buf_get);
212
213 /**
214 * generic_pipe_buf_confirm - verify contents of the pipe buffer
215 * @info: the pipe that the buffer belongs to
216 * @buf: the buffer to confirm
217 *
218 * Description:
219 * This function does nothing, because the generic pipe code uses
220 * pages that are always good when inserted into the pipe.
221 */
generic_pipe_buf_confirm(struct pipe_inode_info * info,struct pipe_buffer * buf)222 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
223 struct pipe_buffer *buf)
224 {
225 return 0;
226 }
227 EXPORT_SYMBOL(generic_pipe_buf_confirm);
228
229 /**
230 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
231 * @pipe: the pipe that the buffer belongs to
232 * @buf: the buffer to put a reference to
233 *
234 * Description:
235 * This function releases a reference to @buf.
236 */
generic_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)237 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
238 struct pipe_buffer *buf)
239 {
240 put_page(buf->page);
241 }
242 EXPORT_SYMBOL(generic_pipe_buf_release);
243
244 /* New data written to a pipe may be appended to a buffer with this type. */
245 static const struct pipe_buf_operations anon_pipe_buf_ops = {
246 .confirm = generic_pipe_buf_confirm,
247 .release = anon_pipe_buf_release,
248 .steal = anon_pipe_buf_steal,
249 .get = generic_pipe_buf_get,
250 };
251
252 static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
253 .confirm = generic_pipe_buf_confirm,
254 .release = anon_pipe_buf_release,
255 .steal = anon_pipe_buf_steal,
256 .get = generic_pipe_buf_get,
257 };
258
259 static const struct pipe_buf_operations packet_pipe_buf_ops = {
260 .confirm = generic_pipe_buf_confirm,
261 .release = anon_pipe_buf_release,
262 .steal = anon_pipe_buf_steal,
263 .get = generic_pipe_buf_get,
264 };
265
266 /**
267 * pipe_buf_mark_unmergeable - mark a &struct pipe_buffer as unmergeable
268 * @buf: the buffer to mark
269 *
270 * Description:
271 * This function ensures that no future writes will be merged into the
272 * given &struct pipe_buffer. This is necessary when multiple pipe buffers
273 * share the same backing page.
274 */
pipe_buf_mark_unmergeable(struct pipe_buffer * buf)275 void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
276 {
277 if (buf->ops == &anon_pipe_buf_ops)
278 buf->ops = &anon_pipe_buf_nomerge_ops;
279 }
280
pipe_buf_can_merge(struct pipe_buffer * buf)281 static bool pipe_buf_can_merge(struct pipe_buffer *buf)
282 {
283 return buf->ops == &anon_pipe_buf_ops;
284 }
285
286 static ssize_t
pipe_read(struct kiocb * iocb,struct iov_iter * to)287 pipe_read(struct kiocb *iocb, struct iov_iter *to)
288 {
289 size_t total_len = iov_iter_count(to);
290 struct file *filp = iocb->ki_filp;
291 struct pipe_inode_info *pipe = filp->private_data;
292 int do_wakeup;
293 ssize_t ret;
294
295 /* Null read succeeds. */
296 if (unlikely(total_len == 0))
297 return 0;
298
299 do_wakeup = 0;
300 ret = 0;
301 __pipe_lock(pipe);
302 for (;;) {
303 int bufs = pipe->nrbufs;
304 if (bufs) {
305 int curbuf = pipe->curbuf;
306 struct pipe_buffer *buf = pipe->bufs + curbuf;
307 size_t chars = buf->len;
308 size_t written;
309 int error;
310
311 if (chars > total_len)
312 chars = total_len;
313
314 error = pipe_buf_confirm(pipe, buf);
315 if (error) {
316 if (!ret)
317 ret = error;
318 break;
319 }
320
321 written = copy_page_to_iter(buf->page, buf->offset, chars, to);
322 if (unlikely(written < chars)) {
323 if (!ret)
324 ret = -EFAULT;
325 break;
326 }
327 ret += chars;
328 buf->offset += chars;
329 buf->len -= chars;
330
331 /* Was it a packet buffer? Clean up and exit */
332 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
333 total_len = chars;
334 buf->len = 0;
335 }
336
337 if (!buf->len) {
338 pipe_buf_release(pipe, buf);
339 curbuf = (curbuf + 1) & (pipe->buffers - 1);
340 pipe->curbuf = curbuf;
341 pipe->nrbufs = --bufs;
342 do_wakeup = 1;
343 }
344 total_len -= chars;
345 if (!total_len)
346 break; /* common path: read succeeded */
347 }
348 if (bufs) /* More to do? */
349 continue;
350 if (!pipe->writers)
351 break;
352 if (!pipe->waiting_writers) {
353 /* syscall merging: Usually we must not sleep
354 * if O_NONBLOCK is set, or if we got some data.
355 * But if a writer sleeps in kernel space, then
356 * we can wait for that data without violating POSIX.
357 */
358 if (ret)
359 break;
360 if (filp->f_flags & O_NONBLOCK) {
361 ret = -EAGAIN;
362 break;
363 }
364 }
365 if (signal_pending(current)) {
366 if (!ret)
367 ret = -ERESTARTSYS;
368 break;
369 }
370 if (do_wakeup) {
371 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
372 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
373 }
374 pipe_wait(pipe);
375 }
376 __pipe_unlock(pipe);
377
378 /* Signal writers asynchronously that there is more room. */
379 if (do_wakeup) {
380 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
381 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
382 }
383 if (ret > 0)
384 file_accessed(filp);
385 return ret;
386 }
387
is_packetized(struct file * file)388 static inline int is_packetized(struct file *file)
389 {
390 return (file->f_flags & O_DIRECT) != 0;
391 }
392
393 static ssize_t
pipe_write(struct kiocb * iocb,struct iov_iter * from)394 pipe_write(struct kiocb *iocb, struct iov_iter *from)
395 {
396 struct file *filp = iocb->ki_filp;
397 struct pipe_inode_info *pipe = filp->private_data;
398 ssize_t ret = 0;
399 int do_wakeup = 0;
400 size_t total_len = iov_iter_count(from);
401 ssize_t chars;
402
403 /* Null write succeeds. */
404 if (unlikely(total_len == 0))
405 return 0;
406
407 __pipe_lock(pipe);
408
409 if (!pipe->readers) {
410 send_sig(SIGPIPE, current, 0);
411 ret = -EPIPE;
412 goto out;
413 }
414
415 /* We try to merge small writes */
416 chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
417 if (pipe->nrbufs && chars != 0) {
418 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
419 (pipe->buffers - 1);
420 struct pipe_buffer *buf = pipe->bufs + lastbuf;
421 int offset = buf->offset + buf->len;
422
423 if (pipe_buf_can_merge(buf) && offset + chars <= PAGE_SIZE) {
424 ret = pipe_buf_confirm(pipe, buf);
425 if (ret)
426 goto out;
427
428 ret = copy_page_from_iter(buf->page, offset, chars, from);
429 if (unlikely(ret < chars)) {
430 ret = -EFAULT;
431 goto out;
432 }
433 do_wakeup = 1;
434 buf->len += ret;
435 if (!iov_iter_count(from))
436 goto out;
437 }
438 }
439
440 for (;;) {
441 int bufs;
442
443 if (!pipe->readers) {
444 send_sig(SIGPIPE, current, 0);
445 if (!ret)
446 ret = -EPIPE;
447 break;
448 }
449 bufs = pipe->nrbufs;
450 if (bufs < pipe->buffers) {
451 int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
452 struct pipe_buffer *buf = pipe->bufs + newbuf;
453 struct page *page = pipe->tmp_page;
454 int copied;
455
456 if (!page) {
457 page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
458 if (unlikely(!page)) {
459 ret = ret ? : -ENOMEM;
460 break;
461 }
462 pipe->tmp_page = page;
463 }
464 /* Always wake up, even if the copy fails. Otherwise
465 * we lock up (O_NONBLOCK-)readers that sleep due to
466 * syscall merging.
467 * FIXME! Is this really true?
468 */
469 do_wakeup = 1;
470 copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
471 if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
472 if (!ret)
473 ret = -EFAULT;
474 break;
475 }
476 ret += copied;
477
478 /* Insert it into the buffer array */
479 buf->page = page;
480 buf->ops = &anon_pipe_buf_ops;
481 buf->offset = 0;
482 buf->len = copied;
483 buf->flags = 0;
484 if (is_packetized(filp)) {
485 buf->ops = &packet_pipe_buf_ops;
486 buf->flags = PIPE_BUF_FLAG_PACKET;
487 }
488 pipe->nrbufs = ++bufs;
489 pipe->tmp_page = NULL;
490
491 if (!iov_iter_count(from))
492 break;
493 }
494 if (bufs < pipe->buffers)
495 continue;
496 if (filp->f_flags & O_NONBLOCK) {
497 if (!ret)
498 ret = -EAGAIN;
499 break;
500 }
501 if (signal_pending(current)) {
502 if (!ret)
503 ret = -ERESTARTSYS;
504 break;
505 }
506 if (do_wakeup) {
507 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
508 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
509 do_wakeup = 0;
510 }
511 pipe->waiting_writers++;
512 pipe_wait(pipe);
513 pipe->waiting_writers--;
514 }
515 out:
516 __pipe_unlock(pipe);
517 if (do_wakeup) {
518 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
519 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
520 }
521 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
522 int err = file_update_time(filp);
523 if (err)
524 ret = err;
525 sb_end_write(file_inode(filp)->i_sb);
526 }
527 return ret;
528 }
529
pipe_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)530 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
531 {
532 struct pipe_inode_info *pipe = filp->private_data;
533 int count, buf, nrbufs;
534
535 switch (cmd) {
536 case FIONREAD:
537 __pipe_lock(pipe);
538 count = 0;
539 buf = pipe->curbuf;
540 nrbufs = pipe->nrbufs;
541 while (--nrbufs >= 0) {
542 count += pipe->bufs[buf].len;
543 buf = (buf+1) & (pipe->buffers - 1);
544 }
545 __pipe_unlock(pipe);
546
547 return put_user(count, (int __user *)arg);
548 default:
549 return -ENOIOCTLCMD;
550 }
551 }
552
553 /* No kernel lock held - fine */
554 static __poll_t
pipe_poll(struct file * filp,poll_table * wait)555 pipe_poll(struct file *filp, poll_table *wait)
556 {
557 __poll_t mask;
558 struct pipe_inode_info *pipe = filp->private_data;
559 int nrbufs;
560
561 poll_wait(filp, &pipe->wait, wait);
562
563 /* Reading only -- no need for acquiring the semaphore. */
564 nrbufs = pipe->nrbufs;
565 mask = 0;
566 if (filp->f_mode & FMODE_READ) {
567 mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
568 if (!pipe->writers && filp->f_version != pipe->w_counter)
569 mask |= EPOLLHUP;
570 }
571
572 if (filp->f_mode & FMODE_WRITE) {
573 mask |= (nrbufs < pipe->buffers) ? EPOLLOUT | EPOLLWRNORM : 0;
574 /*
575 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
576 * behave exactly like pipes for poll().
577 */
578 if (!pipe->readers)
579 mask |= EPOLLERR;
580 }
581
582 return mask;
583 }
584
put_pipe_info(struct inode * inode,struct pipe_inode_info * pipe)585 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
586 {
587 int kill = 0;
588
589 spin_lock(&inode->i_lock);
590 if (!--pipe->files) {
591 inode->i_pipe = NULL;
592 kill = 1;
593 }
594 spin_unlock(&inode->i_lock);
595
596 if (kill)
597 free_pipe_info(pipe);
598 }
599
600 static int
pipe_release(struct inode * inode,struct file * file)601 pipe_release(struct inode *inode, struct file *file)
602 {
603 struct pipe_inode_info *pipe = file->private_data;
604
605 __pipe_lock(pipe);
606 if (file->f_mode & FMODE_READ)
607 pipe->readers--;
608 if (file->f_mode & FMODE_WRITE)
609 pipe->writers--;
610
611 if (pipe->readers || pipe->writers) {
612 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
613 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
614 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
615 }
616 __pipe_unlock(pipe);
617
618 put_pipe_info(inode, pipe);
619 return 0;
620 }
621
622 static int
pipe_fasync(int fd,struct file * filp,int on)623 pipe_fasync(int fd, struct file *filp, int on)
624 {
625 struct pipe_inode_info *pipe = filp->private_data;
626 int retval = 0;
627
628 __pipe_lock(pipe);
629 if (filp->f_mode & FMODE_READ)
630 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
631 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
632 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
633 if (retval < 0 && (filp->f_mode & FMODE_READ))
634 /* this can happen only if on == T */
635 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
636 }
637 __pipe_unlock(pipe);
638 return retval;
639 }
640
account_pipe_buffers(struct user_struct * user,unsigned long old,unsigned long new)641 static unsigned long account_pipe_buffers(struct user_struct *user,
642 unsigned long old, unsigned long new)
643 {
644 return atomic_long_add_return(new - old, &user->pipe_bufs);
645 }
646
too_many_pipe_buffers_soft(unsigned long user_bufs)647 static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
648 {
649 unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
650
651 return soft_limit && user_bufs > soft_limit;
652 }
653
too_many_pipe_buffers_hard(unsigned long user_bufs)654 static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
655 {
656 unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
657
658 return hard_limit && user_bufs > hard_limit;
659 }
660
is_unprivileged_user(void)661 static bool is_unprivileged_user(void)
662 {
663 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
664 }
665
alloc_pipe_info(void)666 struct pipe_inode_info *alloc_pipe_info(void)
667 {
668 struct pipe_inode_info *pipe;
669 unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
670 struct user_struct *user = get_current_user();
671 unsigned long user_bufs;
672 unsigned int max_size = READ_ONCE(pipe_max_size);
673
674 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
675 if (pipe == NULL)
676 goto out_free_uid;
677
678 if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
679 pipe_bufs = max_size >> PAGE_SHIFT;
680
681 user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
682
683 if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
684 user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
685 pipe_bufs = PIPE_MIN_DEF_BUFFERS;
686 }
687
688 if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
689 goto out_revert_acct;
690
691 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
692 GFP_KERNEL_ACCOUNT);
693
694 if (pipe->bufs) {
695 init_waitqueue_head(&pipe->wait);
696 pipe->r_counter = pipe->w_counter = 1;
697 pipe->buffers = pipe_bufs;
698 pipe->user = user;
699 mutex_init(&pipe->mutex);
700 return pipe;
701 }
702
703 out_revert_acct:
704 (void) account_pipe_buffers(user, pipe_bufs, 0);
705 kfree(pipe);
706 out_free_uid:
707 free_uid(user);
708 return NULL;
709 }
710
free_pipe_info(struct pipe_inode_info * pipe)711 void free_pipe_info(struct pipe_inode_info *pipe)
712 {
713 int i;
714
715 (void) account_pipe_buffers(pipe->user, pipe->buffers, 0);
716 free_uid(pipe->user);
717 for (i = 0; i < pipe->buffers; i++) {
718 struct pipe_buffer *buf = pipe->bufs + i;
719 if (buf->ops)
720 pipe_buf_release(pipe, buf);
721 }
722 if (pipe->tmp_page)
723 __free_page(pipe->tmp_page);
724 kfree(pipe->bufs);
725 kfree(pipe);
726 }
727
728 static struct vfsmount *pipe_mnt __read_mostly;
729
730 /*
731 * pipefs_dname() is called from d_path().
732 */
pipefs_dname(struct dentry * dentry,char * buffer,int buflen)733 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
734 {
735 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
736 d_inode(dentry)->i_ino);
737 }
738
739 static const struct dentry_operations pipefs_dentry_operations = {
740 .d_dname = pipefs_dname,
741 };
742
get_pipe_inode(void)743 static struct inode * get_pipe_inode(void)
744 {
745 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
746 struct pipe_inode_info *pipe;
747
748 if (!inode)
749 goto fail_inode;
750
751 inode->i_ino = get_next_ino();
752
753 pipe = alloc_pipe_info();
754 if (!pipe)
755 goto fail_iput;
756
757 inode->i_pipe = pipe;
758 pipe->files = 2;
759 pipe->readers = pipe->writers = 1;
760 inode->i_fop = &pipefifo_fops;
761
762 /*
763 * Mark the inode dirty from the very beginning,
764 * that way it will never be moved to the dirty
765 * list because "mark_inode_dirty()" will think
766 * that it already _is_ on the dirty list.
767 */
768 inode->i_state = I_DIRTY;
769 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
770 inode->i_uid = current_fsuid();
771 inode->i_gid = current_fsgid();
772 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
773
774 return inode;
775
776 fail_iput:
777 iput(inode);
778
779 fail_inode:
780 return NULL;
781 }
782
create_pipe_files(struct file ** res,int flags)783 int create_pipe_files(struct file **res, int flags)
784 {
785 struct inode *inode = get_pipe_inode();
786 struct file *f;
787
788 if (!inode)
789 return -ENFILE;
790
791 f = alloc_file_pseudo(inode, pipe_mnt, "",
792 O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
793 &pipefifo_fops);
794 if (IS_ERR(f)) {
795 free_pipe_info(inode->i_pipe);
796 iput(inode);
797 return PTR_ERR(f);
798 }
799
800 f->private_data = inode->i_pipe;
801
802 res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
803 &pipefifo_fops);
804 if (IS_ERR(res[0])) {
805 put_pipe_info(inode, inode->i_pipe);
806 fput(f);
807 return PTR_ERR(res[0]);
808 }
809 res[0]->private_data = inode->i_pipe;
810 res[1] = f;
811 return 0;
812 }
813
__do_pipe_flags(int * fd,struct file ** files,int flags)814 static int __do_pipe_flags(int *fd, struct file **files, int flags)
815 {
816 int error;
817 int fdw, fdr;
818
819 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
820 return -EINVAL;
821
822 error = create_pipe_files(files, flags);
823 if (error)
824 return error;
825
826 error = get_unused_fd_flags(flags);
827 if (error < 0)
828 goto err_read_pipe;
829 fdr = error;
830
831 error = get_unused_fd_flags(flags);
832 if (error < 0)
833 goto err_fdr;
834 fdw = error;
835
836 audit_fd_pair(fdr, fdw);
837 fd[0] = fdr;
838 fd[1] = fdw;
839 return 0;
840
841 err_fdr:
842 put_unused_fd(fdr);
843 err_read_pipe:
844 fput(files[0]);
845 fput(files[1]);
846 return error;
847 }
848
do_pipe_flags(int * fd,int flags)849 int do_pipe_flags(int *fd, int flags)
850 {
851 struct file *files[2];
852 int error = __do_pipe_flags(fd, files, flags);
853 if (!error) {
854 fd_install(fd[0], files[0]);
855 fd_install(fd[1], files[1]);
856 }
857 return error;
858 }
859
860 /*
861 * sys_pipe() is the normal C calling standard for creating
862 * a pipe. It's not the way Unix traditionally does this, though.
863 */
do_pipe2(int __user * fildes,int flags)864 static int do_pipe2(int __user *fildes, int flags)
865 {
866 struct file *files[2];
867 int fd[2];
868 int error;
869
870 error = __do_pipe_flags(fd, files, flags);
871 if (!error) {
872 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
873 fput(files[0]);
874 fput(files[1]);
875 put_unused_fd(fd[0]);
876 put_unused_fd(fd[1]);
877 error = -EFAULT;
878 } else {
879 fd_install(fd[0], files[0]);
880 fd_install(fd[1], files[1]);
881 }
882 }
883 return error;
884 }
885
SYSCALL_DEFINE2(pipe2,int __user *,fildes,int,flags)886 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
887 {
888 return do_pipe2(fildes, flags);
889 }
890
SYSCALL_DEFINE1(pipe,int __user *,fildes)891 SYSCALL_DEFINE1(pipe, int __user *, fildes)
892 {
893 return do_pipe2(fildes, 0);
894 }
895
wait_for_partner(struct pipe_inode_info * pipe,unsigned int * cnt)896 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
897 {
898 int cur = *cnt;
899
900 while (cur == *cnt) {
901 pipe_wait(pipe);
902 if (signal_pending(current))
903 break;
904 }
905 return cur == *cnt ? -ERESTARTSYS : 0;
906 }
907
wake_up_partner(struct pipe_inode_info * pipe)908 static void wake_up_partner(struct pipe_inode_info *pipe)
909 {
910 wake_up_interruptible(&pipe->wait);
911 }
912
fifo_open(struct inode * inode,struct file * filp)913 static int fifo_open(struct inode *inode, struct file *filp)
914 {
915 struct pipe_inode_info *pipe;
916 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
917 int ret;
918
919 filp->f_version = 0;
920
921 spin_lock(&inode->i_lock);
922 if (inode->i_pipe) {
923 pipe = inode->i_pipe;
924 pipe->files++;
925 spin_unlock(&inode->i_lock);
926 } else {
927 spin_unlock(&inode->i_lock);
928 pipe = alloc_pipe_info();
929 if (!pipe)
930 return -ENOMEM;
931 pipe->files = 1;
932 spin_lock(&inode->i_lock);
933 if (unlikely(inode->i_pipe)) {
934 inode->i_pipe->files++;
935 spin_unlock(&inode->i_lock);
936 free_pipe_info(pipe);
937 pipe = inode->i_pipe;
938 } else {
939 inode->i_pipe = pipe;
940 spin_unlock(&inode->i_lock);
941 }
942 }
943 filp->private_data = pipe;
944 /* OK, we have a pipe and it's pinned down */
945
946 __pipe_lock(pipe);
947
948 /* We can only do regular read/write on fifos */
949 filp->f_mode &= (FMODE_READ | FMODE_WRITE);
950
951 switch (filp->f_mode) {
952 case FMODE_READ:
953 /*
954 * O_RDONLY
955 * POSIX.1 says that O_NONBLOCK means return with the FIFO
956 * opened, even when there is no process writing the FIFO.
957 */
958 pipe->r_counter++;
959 if (pipe->readers++ == 0)
960 wake_up_partner(pipe);
961
962 if (!is_pipe && !pipe->writers) {
963 if ((filp->f_flags & O_NONBLOCK)) {
964 /* suppress EPOLLHUP until we have
965 * seen a writer */
966 filp->f_version = pipe->w_counter;
967 } else {
968 if (wait_for_partner(pipe, &pipe->w_counter))
969 goto err_rd;
970 }
971 }
972 break;
973
974 case FMODE_WRITE:
975 /*
976 * O_WRONLY
977 * POSIX.1 says that O_NONBLOCK means return -1 with
978 * errno=ENXIO when there is no process reading the FIFO.
979 */
980 ret = -ENXIO;
981 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
982 goto err;
983
984 pipe->w_counter++;
985 if (!pipe->writers++)
986 wake_up_partner(pipe);
987
988 if (!is_pipe && !pipe->readers) {
989 if (wait_for_partner(pipe, &pipe->r_counter))
990 goto err_wr;
991 }
992 break;
993
994 case FMODE_READ | FMODE_WRITE:
995 /*
996 * O_RDWR
997 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
998 * This implementation will NEVER block on a O_RDWR open, since
999 * the process can at least talk to itself.
1000 */
1001
1002 pipe->readers++;
1003 pipe->writers++;
1004 pipe->r_counter++;
1005 pipe->w_counter++;
1006 if (pipe->readers == 1 || pipe->writers == 1)
1007 wake_up_partner(pipe);
1008 break;
1009
1010 default:
1011 ret = -EINVAL;
1012 goto err;
1013 }
1014
1015 /* Ok! */
1016 __pipe_unlock(pipe);
1017 return 0;
1018
1019 err_rd:
1020 if (!--pipe->readers)
1021 wake_up_interruptible(&pipe->wait);
1022 ret = -ERESTARTSYS;
1023 goto err;
1024
1025 err_wr:
1026 if (!--pipe->writers)
1027 wake_up_interruptible(&pipe->wait);
1028 ret = -ERESTARTSYS;
1029 goto err;
1030
1031 err:
1032 __pipe_unlock(pipe);
1033
1034 put_pipe_info(inode, pipe);
1035 return ret;
1036 }
1037
1038 const struct file_operations pipefifo_fops = {
1039 .open = fifo_open,
1040 .llseek = no_llseek,
1041 .read_iter = pipe_read,
1042 .write_iter = pipe_write,
1043 .poll = pipe_poll,
1044 .unlocked_ioctl = pipe_ioctl,
1045 .release = pipe_release,
1046 .fasync = pipe_fasync,
1047 };
1048
1049 /*
1050 * Currently we rely on the pipe array holding a power-of-2 number
1051 * of pages. Returns 0 on error.
1052 */
round_pipe_size(unsigned long size)1053 unsigned int round_pipe_size(unsigned long size)
1054 {
1055 if (size > (1U << 31))
1056 return 0;
1057
1058 /* Minimum pipe size, as required by POSIX */
1059 if (size < PAGE_SIZE)
1060 return PAGE_SIZE;
1061
1062 return roundup_pow_of_two(size);
1063 }
1064
1065 /*
1066 * Allocate a new array of pipe buffers and copy the info over. Returns the
1067 * pipe size if successful, or return -ERROR on error.
1068 */
pipe_set_size(struct pipe_inode_info * pipe,unsigned long arg)1069 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1070 {
1071 struct pipe_buffer *bufs;
1072 unsigned int size, nr_pages;
1073 unsigned long user_bufs;
1074 long ret = 0;
1075
1076 size = round_pipe_size(arg);
1077 nr_pages = size >> PAGE_SHIFT;
1078
1079 if (!nr_pages)
1080 return -EINVAL;
1081
1082 /*
1083 * If trying to increase the pipe capacity, check that an
1084 * unprivileged user is not trying to exceed various limits
1085 * (soft limit check here, hard limit check just below).
1086 * Decreasing the pipe capacity is always permitted, even
1087 * if the user is currently over a limit.
1088 */
1089 if (nr_pages > pipe->buffers &&
1090 size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1091 return -EPERM;
1092
1093 user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages);
1094
1095 if (nr_pages > pipe->buffers &&
1096 (too_many_pipe_buffers_hard(user_bufs) ||
1097 too_many_pipe_buffers_soft(user_bufs)) &&
1098 is_unprivileged_user()) {
1099 ret = -EPERM;
1100 goto out_revert_acct;
1101 }
1102
1103 /*
1104 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1105 * expect a lot of shrink+grow operations, just free and allocate
1106 * again like we would do for growing. If the pipe currently
1107 * contains more buffers than arg, then return busy.
1108 */
1109 if (nr_pages < pipe->nrbufs) {
1110 ret = -EBUSY;
1111 goto out_revert_acct;
1112 }
1113
1114 bufs = kcalloc(nr_pages, sizeof(*bufs),
1115 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1116 if (unlikely(!bufs)) {
1117 ret = -ENOMEM;
1118 goto out_revert_acct;
1119 }
1120
1121 /*
1122 * The pipe array wraps around, so just start the new one at zero
1123 * and adjust the indexes.
1124 */
1125 if (pipe->nrbufs) {
1126 unsigned int tail;
1127 unsigned int head;
1128
1129 tail = pipe->curbuf + pipe->nrbufs;
1130 if (tail < pipe->buffers)
1131 tail = 0;
1132 else
1133 tail &= (pipe->buffers - 1);
1134
1135 head = pipe->nrbufs - tail;
1136 if (head)
1137 memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1138 if (tail)
1139 memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1140 }
1141
1142 pipe->curbuf = 0;
1143 kfree(pipe->bufs);
1144 pipe->bufs = bufs;
1145 pipe->buffers = nr_pages;
1146 return nr_pages * PAGE_SIZE;
1147
1148 out_revert_acct:
1149 (void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers);
1150 return ret;
1151 }
1152
1153 /*
1154 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1155 * location, so checking ->i_pipe is not enough to verify that this is a
1156 * pipe.
1157 */
get_pipe_info(struct file * file)1158 struct pipe_inode_info *get_pipe_info(struct file *file)
1159 {
1160 return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1161 }
1162
pipe_fcntl(struct file * file,unsigned int cmd,unsigned long arg)1163 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1164 {
1165 struct pipe_inode_info *pipe;
1166 long ret;
1167
1168 pipe = get_pipe_info(file);
1169 if (!pipe)
1170 return -EBADF;
1171
1172 __pipe_lock(pipe);
1173
1174 switch (cmd) {
1175 case F_SETPIPE_SZ:
1176 ret = pipe_set_size(pipe, arg);
1177 break;
1178 case F_GETPIPE_SZ:
1179 ret = pipe->buffers * PAGE_SIZE;
1180 break;
1181 default:
1182 ret = -EINVAL;
1183 break;
1184 }
1185
1186 __pipe_unlock(pipe);
1187 return ret;
1188 }
1189
1190 static const struct super_operations pipefs_ops = {
1191 .destroy_inode = free_inode_nonrcu,
1192 .statfs = simple_statfs,
1193 };
1194
1195 /*
1196 * pipefs should _never_ be mounted by userland - too much of security hassle,
1197 * no real gain from having the whole whorehouse mounted. So we don't need
1198 * any operations on the root directory. However, we need a non-trivial
1199 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1200 */
1201
pipefs_init_fs_context(struct fs_context * fc)1202 static int pipefs_init_fs_context(struct fs_context *fc)
1203 {
1204 struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1205 if (!ctx)
1206 return -ENOMEM;
1207 ctx->ops = &pipefs_ops;
1208 ctx->dops = &pipefs_dentry_operations;
1209 return 0;
1210 }
1211
1212 static struct file_system_type pipe_fs_type = {
1213 .name = "pipefs",
1214 .init_fs_context = pipefs_init_fs_context,
1215 .kill_sb = kill_anon_super,
1216 };
1217
init_pipe_fs(void)1218 static int __init init_pipe_fs(void)
1219 {
1220 int err = register_filesystem(&pipe_fs_type);
1221
1222 if (!err) {
1223 pipe_mnt = kern_mount(&pipe_fs_type);
1224 if (IS_ERR(pipe_mnt)) {
1225 err = PTR_ERR(pipe_mnt);
1226 unregister_filesystem(&pipe_fs_type);
1227 }
1228 }
1229 return err;
1230 }
1231
1232 fs_initcall(init_pipe_fs);
1233