1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/pipe.c
4 *
5 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
6 */
7
8 #include <linux/mm.h>
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/magic.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/uio.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/audit.h>
24 #include <linux/syscalls.h>
25 #include <linux/fcntl.h>
26 #include <linux/memcontrol.h>
27 #include <linux/watch_queue.h>
28
29 #include <linux/uaccess.h>
30 #include <asm/ioctls.h>
31
32 #include "internal.h"
33
34 /*
35 * New pipe buffers will be restricted to this size while the user is exceeding
36 * their pipe buffer quota. The general pipe use case needs at least two
37 * buffers: one for data yet to be read, and one for new data. If this is less
38 * than two, then a write to a non-empty pipe may block even if the pipe is not
39 * full. This can occur with GNU make jobserver or similar uses of pipes as
40 * semaphores: multiple processes may be waiting to write tokens back to the
41 * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
42 *
43 * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
44 * own risk, namely: pipe writes to non-full pipes may block until the pipe is
45 * emptied.
46 */
47 #define PIPE_MIN_DEF_BUFFERS 2
48
49 /*
50 * The max size that a non-root user is allowed to grow the pipe. Can
51 * be set by root in /proc/sys/fs/pipe-max-size
52 */
53 unsigned int pipe_max_size = 1048576;
54
55 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
56 * matches default values.
57 */
58 unsigned long pipe_user_pages_hard;
59 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
60
61 /*
62 * We use head and tail indices that aren't masked off, except at the point of
63 * dereference, but rather they're allowed to wrap naturally. This means there
64 * isn't a dead spot in the buffer, but the ring has to be a power of two and
65 * <= 2^31.
66 * -- David Howells 2019-09-23.
67 *
68 * Reads with count = 0 should always return 0.
69 * -- Julian Bradfield 1999-06-07.
70 *
71 * FIFOs and Pipes now generate SIGIO for both readers and writers.
72 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
73 *
74 * pipe_read & write cleanup
75 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
76 */
77
pipe_lock_nested(struct pipe_inode_info * pipe,int subclass)78 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
79 {
80 if (pipe->files)
81 mutex_lock_nested(&pipe->mutex, subclass);
82 }
83
pipe_lock(struct pipe_inode_info * pipe)84 void pipe_lock(struct pipe_inode_info *pipe)
85 {
86 /*
87 * pipe_lock() nests non-pipe inode locks (for writing to a file)
88 */
89 pipe_lock_nested(pipe, I_MUTEX_PARENT);
90 }
91 EXPORT_SYMBOL(pipe_lock);
92
pipe_unlock(struct pipe_inode_info * pipe)93 void pipe_unlock(struct pipe_inode_info *pipe)
94 {
95 if (pipe->files)
96 mutex_unlock(&pipe->mutex);
97 }
98 EXPORT_SYMBOL(pipe_unlock);
99
__pipe_lock(struct pipe_inode_info * pipe)100 static inline void __pipe_lock(struct pipe_inode_info *pipe)
101 {
102 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
103 }
104
__pipe_unlock(struct pipe_inode_info * pipe)105 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
106 {
107 mutex_unlock(&pipe->mutex);
108 }
109
pipe_double_lock(struct pipe_inode_info * pipe1,struct pipe_inode_info * pipe2)110 void pipe_double_lock(struct pipe_inode_info *pipe1,
111 struct pipe_inode_info *pipe2)
112 {
113 BUG_ON(pipe1 == pipe2);
114
115 if (pipe1 < pipe2) {
116 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
117 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
118 } else {
119 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
120 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
121 }
122 }
123
anon_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)124 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
125 struct pipe_buffer *buf)
126 {
127 struct page *page = buf->page;
128
129 /*
130 * If nobody else uses this page, and we don't already have a
131 * temporary page, let's keep track of it as a one-deep
132 * allocation cache. (Otherwise just release our reference to it)
133 */
134 if (page_count(page) == 1 && !pipe->tmp_page)
135 pipe->tmp_page = page;
136 else
137 put_page(page);
138 }
139
anon_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)140 static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe,
141 struct pipe_buffer *buf)
142 {
143 struct page *page = buf->page;
144
145 if (page_count(page) != 1)
146 return false;
147 memcg_kmem_uncharge_page(page, 0);
148 __SetPageLocked(page);
149 return true;
150 }
151
152 /**
153 * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
154 * @pipe: the pipe that the buffer belongs to
155 * @buf: the buffer to attempt to steal
156 *
157 * Description:
158 * This function attempts to steal the &struct page attached to
159 * @buf. If successful, this function returns 0 and returns with
160 * the page locked. The caller may then reuse the page for whatever
161 * he wishes; the typical use is insertion into a different file
162 * page cache.
163 */
generic_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)164 bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe,
165 struct pipe_buffer *buf)
166 {
167 struct page *page = buf->page;
168
169 /*
170 * A reference of one is golden, that means that the owner of this
171 * page is the only one holding a reference to it. lock the page
172 * and return OK.
173 */
174 if (page_count(page) == 1) {
175 lock_page(page);
176 return true;
177 }
178 return false;
179 }
180 EXPORT_SYMBOL(generic_pipe_buf_try_steal);
181
182 /**
183 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
184 * @pipe: the pipe that the buffer belongs to
185 * @buf: the buffer to get a reference to
186 *
187 * Description:
188 * This function grabs an extra reference to @buf. It's used in
189 * in the tee() system call, when we duplicate the buffers in one
190 * pipe into another.
191 */
generic_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)192 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
193 {
194 return try_get_page(buf->page);
195 }
196 EXPORT_SYMBOL(generic_pipe_buf_get);
197
198 /**
199 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
200 * @pipe: the pipe that the buffer belongs to
201 * @buf: the buffer to put a reference to
202 *
203 * Description:
204 * This function releases a reference to @buf.
205 */
generic_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)206 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
207 struct pipe_buffer *buf)
208 {
209 put_page(buf->page);
210 }
211 EXPORT_SYMBOL(generic_pipe_buf_release);
212
213 static const struct pipe_buf_operations anon_pipe_buf_ops = {
214 .release = anon_pipe_buf_release,
215 .try_steal = anon_pipe_buf_try_steal,
216 .get = generic_pipe_buf_get,
217 };
218
219 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
pipe_readable(const struct pipe_inode_info * pipe)220 static inline bool pipe_readable(const struct pipe_inode_info *pipe)
221 {
222 unsigned int head = READ_ONCE(pipe->head);
223 unsigned int tail = READ_ONCE(pipe->tail);
224 unsigned int writers = READ_ONCE(pipe->writers);
225
226 return !pipe_empty(head, tail) || !writers;
227 }
228
229 static ssize_t
pipe_read(struct kiocb * iocb,struct iov_iter * to)230 pipe_read(struct kiocb *iocb, struct iov_iter *to)
231 {
232 size_t total_len = iov_iter_count(to);
233 struct file *filp = iocb->ki_filp;
234 struct pipe_inode_info *pipe = filp->private_data;
235 bool was_full, wake_next_reader = false;
236 ssize_t ret;
237
238 /* Null read succeeds. */
239 if (unlikely(total_len == 0))
240 return 0;
241
242 ret = 0;
243 __pipe_lock(pipe);
244
245 /*
246 * We only wake up writers if the pipe was full when we started
247 * reading in order to avoid unnecessary wakeups.
248 *
249 * But when we do wake up writers, we do so using a sync wakeup
250 * (WF_SYNC), because we want them to get going and generate more
251 * data for us.
252 */
253 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
254 for (;;) {
255 /* Read ->head with a barrier vs post_one_notification() */
256 unsigned int head = smp_load_acquire(&pipe->head);
257 unsigned int tail = pipe->tail;
258 unsigned int mask = pipe->ring_size - 1;
259
260 #ifdef CONFIG_WATCH_QUEUE
261 if (pipe->note_loss) {
262 struct watch_notification n;
263
264 if (total_len < 8) {
265 if (ret == 0)
266 ret = -ENOBUFS;
267 break;
268 }
269
270 n.type = WATCH_TYPE_META;
271 n.subtype = WATCH_META_LOSS_NOTIFICATION;
272 n.info = watch_sizeof(n);
273 if (copy_to_iter(&n, sizeof(n), to) != sizeof(n)) {
274 if (ret == 0)
275 ret = -EFAULT;
276 break;
277 }
278 ret += sizeof(n);
279 total_len -= sizeof(n);
280 pipe->note_loss = false;
281 }
282 #endif
283
284 if (!pipe_empty(head, tail)) {
285 struct pipe_buffer *buf = &pipe->bufs[tail & mask];
286 size_t chars = buf->len;
287 size_t written;
288 int error;
289
290 if (chars > total_len) {
291 if (buf->flags & PIPE_BUF_FLAG_WHOLE) {
292 if (ret == 0)
293 ret = -ENOBUFS;
294 break;
295 }
296 chars = total_len;
297 }
298
299 error = pipe_buf_confirm(pipe, buf);
300 if (error) {
301 if (!ret)
302 ret = error;
303 break;
304 }
305
306 written = copy_page_to_iter(buf->page, buf->offset, chars, to);
307 if (unlikely(written < chars)) {
308 if (!ret)
309 ret = -EFAULT;
310 break;
311 }
312 ret += chars;
313 buf->offset += chars;
314 buf->len -= chars;
315
316 /* Was it a packet buffer? Clean up and exit */
317 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
318 total_len = chars;
319 buf->len = 0;
320 }
321
322 if (!buf->len) {
323 pipe_buf_release(pipe, buf);
324 spin_lock_irq(&pipe->rd_wait.lock);
325 #ifdef CONFIG_WATCH_QUEUE
326 if (buf->flags & PIPE_BUF_FLAG_LOSS)
327 pipe->note_loss = true;
328 #endif
329 tail++;
330 pipe->tail = tail;
331 spin_unlock_irq(&pipe->rd_wait.lock);
332 }
333 total_len -= chars;
334 if (!total_len)
335 break; /* common path: read succeeded */
336 if (!pipe_empty(head, tail)) /* More to do? */
337 continue;
338 }
339
340 if (!pipe->writers)
341 break;
342 if (ret)
343 break;
344 if (filp->f_flags & O_NONBLOCK) {
345 ret = -EAGAIN;
346 break;
347 }
348 __pipe_unlock(pipe);
349
350 /*
351 * We only get here if we didn't actually read anything.
352 *
353 * However, we could have seen (and removed) a zero-sized
354 * pipe buffer, and might have made space in the buffers
355 * that way.
356 *
357 * You can't make zero-sized pipe buffers by doing an empty
358 * write (not even in packet mode), but they can happen if
359 * the writer gets an EFAULT when trying to fill a buffer
360 * that already got allocated and inserted in the buffer
361 * array.
362 *
363 * So we still need to wake up any pending writers in the
364 * _very_ unlikely case that the pipe was full, but we got
365 * no data.
366 */
367 if (unlikely(was_full)) {
368 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
369 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
370 }
371
372 /*
373 * But because we didn't read anything, at this point we can
374 * just return directly with -ERESTARTSYS if we're interrupted,
375 * since we've done any required wakeups and there's no need
376 * to mark anything accessed. And we've dropped the lock.
377 */
378 if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
379 return -ERESTARTSYS;
380
381 __pipe_lock(pipe);
382 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
383 wake_next_reader = true;
384 }
385 if (pipe_empty(pipe->head, pipe->tail))
386 wake_next_reader = false;
387 __pipe_unlock(pipe);
388
389 if (was_full) {
390 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
391 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
392 }
393 if (wake_next_reader)
394 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
395 if (ret > 0)
396 file_accessed(filp);
397 return ret;
398 }
399
is_packetized(struct file * file)400 static inline int is_packetized(struct file *file)
401 {
402 return (file->f_flags & O_DIRECT) != 0;
403 }
404
405 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
pipe_writable(const struct pipe_inode_info * pipe)406 static inline bool pipe_writable(const struct pipe_inode_info *pipe)
407 {
408 unsigned int head = READ_ONCE(pipe->head);
409 unsigned int tail = READ_ONCE(pipe->tail);
410 unsigned int max_usage = READ_ONCE(pipe->max_usage);
411
412 return !pipe_full(head, tail, max_usage) ||
413 !READ_ONCE(pipe->readers);
414 }
415
416 static ssize_t
pipe_write(struct kiocb * iocb,struct iov_iter * from)417 pipe_write(struct kiocb *iocb, struct iov_iter *from)
418 {
419 struct file *filp = iocb->ki_filp;
420 struct pipe_inode_info *pipe = filp->private_data;
421 unsigned int head;
422 ssize_t ret = 0;
423 size_t total_len = iov_iter_count(from);
424 ssize_t chars;
425 bool was_empty = false;
426 bool wake_next_writer = false;
427
428 /* Null write succeeds. */
429 if (unlikely(total_len == 0))
430 return 0;
431
432 __pipe_lock(pipe);
433
434 if (!pipe->readers) {
435 send_sig(SIGPIPE, current, 0);
436 ret = -EPIPE;
437 goto out;
438 }
439
440 if (pipe_has_watch_queue(pipe)) {
441 ret = -EXDEV;
442 goto out;
443 }
444
445 /*
446 * Epoll nonsensically wants a wakeup whether the pipe
447 * was already empty or not.
448 *
449 * If it wasn't empty we try to merge new data into
450 * the last buffer.
451 *
452 * That naturally merges small writes, but it also
453 * page-aligns the rest of the writes for large writes
454 * spanning multiple pages.
455 */
456 head = pipe->head;
457 was_empty = true;
458 chars = total_len & (PAGE_SIZE-1);
459 if (chars && !pipe_empty(head, pipe->tail)) {
460 unsigned int mask = pipe->ring_size - 1;
461 struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
462 int offset = buf->offset + buf->len;
463
464 if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
465 offset + chars <= PAGE_SIZE) {
466 ret = pipe_buf_confirm(pipe, buf);
467 if (ret)
468 goto out;
469
470 ret = copy_page_from_iter(buf->page, offset, chars, from);
471 if (unlikely(ret < chars)) {
472 ret = -EFAULT;
473 goto out;
474 }
475
476 buf->len += ret;
477 if (!iov_iter_count(from))
478 goto out;
479 }
480 }
481
482 for (;;) {
483 if (!pipe->readers) {
484 send_sig(SIGPIPE, current, 0);
485 if (!ret)
486 ret = -EPIPE;
487 break;
488 }
489
490 head = pipe->head;
491 if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
492 unsigned int mask = pipe->ring_size - 1;
493 struct pipe_buffer *buf = &pipe->bufs[head & mask];
494 struct page *page = pipe->tmp_page;
495 int copied;
496
497 if (!page) {
498 page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
499 if (unlikely(!page)) {
500 ret = ret ? : -ENOMEM;
501 break;
502 }
503 pipe->tmp_page = page;
504 }
505
506 /* Allocate a slot in the ring in advance and attach an
507 * empty buffer. If we fault or otherwise fail to use
508 * it, either the reader will consume it or it'll still
509 * be there for the next write.
510 */
511 spin_lock_irq(&pipe->rd_wait.lock);
512
513 head = pipe->head;
514 if (pipe_full(head, pipe->tail, pipe->max_usage)) {
515 spin_unlock_irq(&pipe->rd_wait.lock);
516 continue;
517 }
518
519 pipe->head = head + 1;
520 spin_unlock_irq(&pipe->rd_wait.lock);
521
522 /* Insert it into the buffer array */
523 buf = &pipe->bufs[head & mask];
524 buf->page = page;
525 buf->ops = &anon_pipe_buf_ops;
526 buf->offset = 0;
527 buf->len = 0;
528 if (is_packetized(filp))
529 buf->flags = PIPE_BUF_FLAG_PACKET;
530 else
531 buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
532 pipe->tmp_page = NULL;
533
534 copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
535 if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
536 if (!ret)
537 ret = -EFAULT;
538 break;
539 }
540 ret += copied;
541 buf->offset = 0;
542 buf->len = copied;
543
544 if (!iov_iter_count(from))
545 break;
546 }
547
548 if (!pipe_full(head, pipe->tail, pipe->max_usage))
549 continue;
550
551 /* Wait for buffer space to become available. */
552 if (filp->f_flags & O_NONBLOCK) {
553 if (!ret)
554 ret = -EAGAIN;
555 break;
556 }
557 if (signal_pending(current)) {
558 if (!ret)
559 ret = -ERESTARTSYS;
560 break;
561 }
562
563 /*
564 * We're going to release the pipe lock and wait for more
565 * space. We wake up any readers if necessary, and then
566 * after waiting we need to re-check whether the pipe
567 * become empty while we dropped the lock.
568 */
569 __pipe_unlock(pipe);
570 if (was_empty) {
571 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
572 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
573 }
574 wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
575 __pipe_lock(pipe);
576 was_empty = pipe_empty(pipe->head, pipe->tail);
577 wake_next_writer = true;
578 }
579 out:
580 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
581 wake_next_writer = false;
582 __pipe_unlock(pipe);
583
584 /*
585 * If we do do a wakeup event, we do a 'sync' wakeup, because we
586 * want the reader to start processing things asap, rather than
587 * leave the data pending.
588 *
589 * This is particularly important for small writes, because of
590 * how (for example) the GNU make jobserver uses small writes to
591 * wake up pending jobs
592 */
593 if (was_empty) {
594 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
595 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
596 }
597 if (wake_next_writer)
598 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
599 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
600 int err = file_update_time(filp);
601 if (err)
602 ret = err;
603 sb_end_write(file_inode(filp)->i_sb);
604 }
605 return ret;
606 }
607
pipe_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)608 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
609 {
610 struct pipe_inode_info *pipe = filp->private_data;
611 int count, head, tail, mask;
612
613 switch (cmd) {
614 case FIONREAD:
615 __pipe_lock(pipe);
616 count = 0;
617 head = pipe->head;
618 tail = pipe->tail;
619 mask = pipe->ring_size - 1;
620
621 while (tail != head) {
622 count += pipe->bufs[tail & mask].len;
623 tail++;
624 }
625 __pipe_unlock(pipe);
626
627 return put_user(count, (int __user *)arg);
628
629 #ifdef CONFIG_WATCH_QUEUE
630 case IOC_WATCH_QUEUE_SET_SIZE: {
631 int ret;
632 __pipe_lock(pipe);
633 ret = watch_queue_set_size(pipe, arg);
634 __pipe_unlock(pipe);
635 return ret;
636 }
637
638 case IOC_WATCH_QUEUE_SET_FILTER:
639 return watch_queue_set_filter(
640 pipe, (struct watch_notification_filter __user *)arg);
641 #endif
642
643 default:
644 return -ENOIOCTLCMD;
645 }
646 }
647
648 /* No kernel lock held - fine */
649 static __poll_t
pipe_poll(struct file * filp,poll_table * wait)650 pipe_poll(struct file *filp, poll_table *wait)
651 {
652 __poll_t mask;
653 struct pipe_inode_info *pipe = filp->private_data;
654 unsigned int head, tail;
655
656 /*
657 * Reading pipe state only -- no need for acquiring the semaphore.
658 *
659 * But because this is racy, the code has to add the
660 * entry to the poll table _first_ ..
661 */
662 if (filp->f_mode & FMODE_READ)
663 poll_wait(filp, &pipe->rd_wait, wait);
664 if (filp->f_mode & FMODE_WRITE)
665 poll_wait(filp, &pipe->wr_wait, wait);
666
667 /*
668 * .. and only then can you do the racy tests. That way,
669 * if something changes and you got it wrong, the poll
670 * table entry will wake you up and fix it.
671 */
672 head = READ_ONCE(pipe->head);
673 tail = READ_ONCE(pipe->tail);
674
675 mask = 0;
676 if (filp->f_mode & FMODE_READ) {
677 if (!pipe_empty(head, tail))
678 mask |= EPOLLIN | EPOLLRDNORM;
679 if (!pipe->writers && filp->f_version != pipe->w_counter)
680 mask |= EPOLLHUP;
681 }
682
683 if (filp->f_mode & FMODE_WRITE) {
684 if (!pipe_full(head, tail, pipe->max_usage))
685 mask |= EPOLLOUT | EPOLLWRNORM;
686 /*
687 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
688 * behave exactly like pipes for poll().
689 */
690 if (!pipe->readers)
691 mask |= EPOLLERR;
692 }
693
694 return mask;
695 }
696
put_pipe_info(struct inode * inode,struct pipe_inode_info * pipe)697 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
698 {
699 int kill = 0;
700
701 spin_lock(&inode->i_lock);
702 if (!--pipe->files) {
703 inode->i_pipe = NULL;
704 kill = 1;
705 }
706 spin_unlock(&inode->i_lock);
707
708 if (kill)
709 free_pipe_info(pipe);
710 }
711
712 static int
pipe_release(struct inode * inode,struct file * file)713 pipe_release(struct inode *inode, struct file *file)
714 {
715 struct pipe_inode_info *pipe = file->private_data;
716
717 __pipe_lock(pipe);
718 if (file->f_mode & FMODE_READ)
719 pipe->readers--;
720 if (file->f_mode & FMODE_WRITE)
721 pipe->writers--;
722
723 /* Was that the last reader or writer, but not the other side? */
724 if (!pipe->readers != !pipe->writers) {
725 wake_up_interruptible_all(&pipe->rd_wait);
726 wake_up_interruptible_all(&pipe->wr_wait);
727 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
728 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
729 }
730 __pipe_unlock(pipe);
731
732 put_pipe_info(inode, pipe);
733 return 0;
734 }
735
736 static int
pipe_fasync(int fd,struct file * filp,int on)737 pipe_fasync(int fd, struct file *filp, int on)
738 {
739 struct pipe_inode_info *pipe = filp->private_data;
740 int retval = 0;
741
742 __pipe_lock(pipe);
743 if (filp->f_mode & FMODE_READ)
744 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
745 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
746 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
747 if (retval < 0 && (filp->f_mode & FMODE_READ))
748 /* this can happen only if on == T */
749 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
750 }
751 __pipe_unlock(pipe);
752 return retval;
753 }
754
account_pipe_buffers(struct user_struct * user,unsigned long old,unsigned long new)755 unsigned long account_pipe_buffers(struct user_struct *user,
756 unsigned long old, unsigned long new)
757 {
758 return atomic_long_add_return(new - old, &user->pipe_bufs);
759 }
760
too_many_pipe_buffers_soft(unsigned long user_bufs)761 bool too_many_pipe_buffers_soft(unsigned long user_bufs)
762 {
763 unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
764
765 return soft_limit && user_bufs > soft_limit;
766 }
767
too_many_pipe_buffers_hard(unsigned long user_bufs)768 bool too_many_pipe_buffers_hard(unsigned long user_bufs)
769 {
770 unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
771
772 return hard_limit && user_bufs > hard_limit;
773 }
774
pipe_is_unprivileged_user(void)775 bool pipe_is_unprivileged_user(void)
776 {
777 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
778 }
779
alloc_pipe_info(void)780 struct pipe_inode_info *alloc_pipe_info(void)
781 {
782 struct pipe_inode_info *pipe;
783 unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
784 struct user_struct *user = get_current_user();
785 unsigned long user_bufs;
786 unsigned int max_size = READ_ONCE(pipe_max_size);
787
788 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
789 if (pipe == NULL)
790 goto out_free_uid;
791
792 if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
793 pipe_bufs = max_size >> PAGE_SHIFT;
794
795 user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
796
797 if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
798 user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
799 pipe_bufs = PIPE_MIN_DEF_BUFFERS;
800 }
801
802 if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
803 goto out_revert_acct;
804
805 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
806 GFP_KERNEL_ACCOUNT);
807
808 if (pipe->bufs) {
809 init_waitqueue_head(&pipe->rd_wait);
810 init_waitqueue_head(&pipe->wr_wait);
811 pipe->r_counter = pipe->w_counter = 1;
812 pipe->max_usage = pipe_bufs;
813 pipe->ring_size = pipe_bufs;
814 pipe->nr_accounted = pipe_bufs;
815 pipe->user = user;
816 mutex_init(&pipe->mutex);
817 return pipe;
818 }
819
820 out_revert_acct:
821 (void) account_pipe_buffers(user, pipe_bufs, 0);
822 kfree(pipe);
823 out_free_uid:
824 free_uid(user);
825 return NULL;
826 }
827
free_pipe_info(struct pipe_inode_info * pipe)828 void free_pipe_info(struct pipe_inode_info *pipe)
829 {
830 int i;
831
832 #ifdef CONFIG_WATCH_QUEUE
833 if (pipe->watch_queue)
834 watch_queue_clear(pipe->watch_queue);
835 #endif
836
837 (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
838 free_uid(pipe->user);
839 for (i = 0; i < pipe->ring_size; i++) {
840 struct pipe_buffer *buf = pipe->bufs + i;
841 if (buf->ops)
842 pipe_buf_release(pipe, buf);
843 }
844 #ifdef CONFIG_WATCH_QUEUE
845 if (pipe->watch_queue)
846 put_watch_queue(pipe->watch_queue);
847 #endif
848 if (pipe->tmp_page)
849 __free_page(pipe->tmp_page);
850 kfree(pipe->bufs);
851 kfree(pipe);
852 }
853
854 static struct vfsmount *pipe_mnt __read_mostly;
855
856 /*
857 * pipefs_dname() is called from d_path().
858 */
pipefs_dname(struct dentry * dentry,char * buffer,int buflen)859 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
860 {
861 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
862 d_inode(dentry)->i_ino);
863 }
864
865 static const struct dentry_operations pipefs_dentry_operations = {
866 .d_dname = pipefs_dname,
867 };
868
get_pipe_inode(void)869 static struct inode * get_pipe_inode(void)
870 {
871 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
872 struct pipe_inode_info *pipe;
873
874 if (!inode)
875 goto fail_inode;
876
877 inode->i_ino = get_next_ino();
878
879 pipe = alloc_pipe_info();
880 if (!pipe)
881 goto fail_iput;
882
883 inode->i_pipe = pipe;
884 pipe->files = 2;
885 pipe->readers = pipe->writers = 1;
886 inode->i_fop = &pipefifo_fops;
887
888 /*
889 * Mark the inode dirty from the very beginning,
890 * that way it will never be moved to the dirty
891 * list because "mark_inode_dirty()" will think
892 * that it already _is_ on the dirty list.
893 */
894 inode->i_state = I_DIRTY;
895 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
896 inode->i_uid = current_fsuid();
897 inode->i_gid = current_fsgid();
898 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
899
900 return inode;
901
902 fail_iput:
903 iput(inode);
904
905 fail_inode:
906 return NULL;
907 }
908
create_pipe_files(struct file ** res,int flags)909 int create_pipe_files(struct file **res, int flags)
910 {
911 struct inode *inode = get_pipe_inode();
912 struct file *f;
913 int error;
914
915 if (!inode)
916 return -ENFILE;
917
918 if (flags & O_NOTIFICATION_PIPE) {
919 error = watch_queue_init(inode->i_pipe);
920 if (error) {
921 free_pipe_info(inode->i_pipe);
922 iput(inode);
923 return error;
924 }
925 }
926
927 f = alloc_file_pseudo(inode, pipe_mnt, "",
928 O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
929 &pipefifo_fops);
930 if (IS_ERR(f)) {
931 free_pipe_info(inode->i_pipe);
932 iput(inode);
933 return PTR_ERR(f);
934 }
935
936 f->private_data = inode->i_pipe;
937
938 res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
939 &pipefifo_fops);
940 if (IS_ERR(res[0])) {
941 put_pipe_info(inode, inode->i_pipe);
942 fput(f);
943 return PTR_ERR(res[0]);
944 }
945 res[0]->private_data = inode->i_pipe;
946 res[1] = f;
947 stream_open(inode, res[0]);
948 stream_open(inode, res[1]);
949 return 0;
950 }
951
__do_pipe_flags(int * fd,struct file ** files,int flags)952 static int __do_pipe_flags(int *fd, struct file **files, int flags)
953 {
954 int error;
955 int fdw, fdr;
956
957 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE))
958 return -EINVAL;
959
960 error = create_pipe_files(files, flags);
961 if (error)
962 return error;
963
964 error = get_unused_fd_flags(flags);
965 if (error < 0)
966 goto err_read_pipe;
967 fdr = error;
968
969 error = get_unused_fd_flags(flags);
970 if (error < 0)
971 goto err_fdr;
972 fdw = error;
973
974 audit_fd_pair(fdr, fdw);
975 fd[0] = fdr;
976 fd[1] = fdw;
977 return 0;
978
979 err_fdr:
980 put_unused_fd(fdr);
981 err_read_pipe:
982 fput(files[0]);
983 fput(files[1]);
984 return error;
985 }
986
do_pipe_flags(int * fd,int flags)987 int do_pipe_flags(int *fd, int flags)
988 {
989 struct file *files[2];
990 int error = __do_pipe_flags(fd, files, flags);
991 if (!error) {
992 fd_install(fd[0], files[0]);
993 fd_install(fd[1], files[1]);
994 }
995 return error;
996 }
997
998 /*
999 * sys_pipe() is the normal C calling standard for creating
1000 * a pipe. It's not the way Unix traditionally does this, though.
1001 */
do_pipe2(int __user * fildes,int flags)1002 static int do_pipe2(int __user *fildes, int flags)
1003 {
1004 struct file *files[2];
1005 int fd[2];
1006 int error;
1007
1008 error = __do_pipe_flags(fd, files, flags);
1009 if (!error) {
1010 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
1011 fput(files[0]);
1012 fput(files[1]);
1013 put_unused_fd(fd[0]);
1014 put_unused_fd(fd[1]);
1015 error = -EFAULT;
1016 } else {
1017 fd_install(fd[0], files[0]);
1018 fd_install(fd[1], files[1]);
1019 }
1020 }
1021 return error;
1022 }
1023
SYSCALL_DEFINE2(pipe2,int __user *,fildes,int,flags)1024 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1025 {
1026 return do_pipe2(fildes, flags);
1027 }
1028
SYSCALL_DEFINE1(pipe,int __user *,fildes)1029 SYSCALL_DEFINE1(pipe, int __user *, fildes)
1030 {
1031 return do_pipe2(fildes, 0);
1032 }
1033
1034 /*
1035 * This is the stupid "wait for pipe to be readable or writable"
1036 * model.
1037 *
1038 * See pipe_read/write() for the proper kind of exclusive wait,
1039 * but that requires that we wake up any other readers/writers
1040 * if we then do not end up reading everything (ie the whole
1041 * "wake_next_reader/writer" logic in pipe_read/write()).
1042 */
pipe_wait_readable(struct pipe_inode_info * pipe)1043 void pipe_wait_readable(struct pipe_inode_info *pipe)
1044 {
1045 pipe_unlock(pipe);
1046 wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe));
1047 pipe_lock(pipe);
1048 }
1049
pipe_wait_writable(struct pipe_inode_info * pipe)1050 void pipe_wait_writable(struct pipe_inode_info *pipe)
1051 {
1052 pipe_unlock(pipe);
1053 wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe));
1054 pipe_lock(pipe);
1055 }
1056
1057 /*
1058 * This depends on both the wait (here) and the wakeup (wake_up_partner)
1059 * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot
1060 * race with the count check and waitqueue prep.
1061 *
1062 * Normally in order to avoid races, you'd do the prepare_to_wait() first,
1063 * then check the condition you're waiting for, and only then sleep. But
1064 * because of the pipe lock, we can check the condition before being on
1065 * the wait queue.
1066 *
1067 * We use the 'rd_wait' waitqueue for pipe partner waiting.
1068 */
wait_for_partner(struct pipe_inode_info * pipe,unsigned int * cnt)1069 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
1070 {
1071 DEFINE_WAIT(rdwait);
1072 int cur = *cnt;
1073
1074 while (cur == *cnt) {
1075 prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE);
1076 pipe_unlock(pipe);
1077 schedule();
1078 finish_wait(&pipe->rd_wait, &rdwait);
1079 pipe_lock(pipe);
1080 if (signal_pending(current))
1081 break;
1082 }
1083 return cur == *cnt ? -ERESTARTSYS : 0;
1084 }
1085
wake_up_partner(struct pipe_inode_info * pipe)1086 static void wake_up_partner(struct pipe_inode_info *pipe)
1087 {
1088 wake_up_interruptible_all(&pipe->rd_wait);
1089 }
1090
fifo_open(struct inode * inode,struct file * filp)1091 static int fifo_open(struct inode *inode, struct file *filp)
1092 {
1093 struct pipe_inode_info *pipe;
1094 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1095 int ret;
1096
1097 filp->f_version = 0;
1098
1099 spin_lock(&inode->i_lock);
1100 if (inode->i_pipe) {
1101 pipe = inode->i_pipe;
1102 pipe->files++;
1103 spin_unlock(&inode->i_lock);
1104 } else {
1105 spin_unlock(&inode->i_lock);
1106 pipe = alloc_pipe_info();
1107 if (!pipe)
1108 return -ENOMEM;
1109 pipe->files = 1;
1110 spin_lock(&inode->i_lock);
1111 if (unlikely(inode->i_pipe)) {
1112 inode->i_pipe->files++;
1113 spin_unlock(&inode->i_lock);
1114 free_pipe_info(pipe);
1115 pipe = inode->i_pipe;
1116 } else {
1117 inode->i_pipe = pipe;
1118 spin_unlock(&inode->i_lock);
1119 }
1120 }
1121 filp->private_data = pipe;
1122 /* OK, we have a pipe and it's pinned down */
1123
1124 __pipe_lock(pipe);
1125
1126 /* We can only do regular read/write on fifos */
1127 stream_open(inode, filp);
1128
1129 switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
1130 case FMODE_READ:
1131 /*
1132 * O_RDONLY
1133 * POSIX.1 says that O_NONBLOCK means return with the FIFO
1134 * opened, even when there is no process writing the FIFO.
1135 */
1136 pipe->r_counter++;
1137 if (pipe->readers++ == 0)
1138 wake_up_partner(pipe);
1139
1140 if (!is_pipe && !pipe->writers) {
1141 if ((filp->f_flags & O_NONBLOCK)) {
1142 /* suppress EPOLLHUP until we have
1143 * seen a writer */
1144 filp->f_version = pipe->w_counter;
1145 } else {
1146 if (wait_for_partner(pipe, &pipe->w_counter))
1147 goto err_rd;
1148 }
1149 }
1150 break;
1151
1152 case FMODE_WRITE:
1153 /*
1154 * O_WRONLY
1155 * POSIX.1 says that O_NONBLOCK means return -1 with
1156 * errno=ENXIO when there is no process reading the FIFO.
1157 */
1158 ret = -ENXIO;
1159 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1160 goto err;
1161
1162 pipe->w_counter++;
1163 if (!pipe->writers++)
1164 wake_up_partner(pipe);
1165
1166 if (!is_pipe && !pipe->readers) {
1167 if (wait_for_partner(pipe, &pipe->r_counter))
1168 goto err_wr;
1169 }
1170 break;
1171
1172 case FMODE_READ | FMODE_WRITE:
1173 /*
1174 * O_RDWR
1175 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1176 * This implementation will NEVER block on a O_RDWR open, since
1177 * the process can at least talk to itself.
1178 */
1179
1180 pipe->readers++;
1181 pipe->writers++;
1182 pipe->r_counter++;
1183 pipe->w_counter++;
1184 if (pipe->readers == 1 || pipe->writers == 1)
1185 wake_up_partner(pipe);
1186 break;
1187
1188 default:
1189 ret = -EINVAL;
1190 goto err;
1191 }
1192
1193 /* Ok! */
1194 __pipe_unlock(pipe);
1195 return 0;
1196
1197 err_rd:
1198 if (!--pipe->readers)
1199 wake_up_interruptible(&pipe->wr_wait);
1200 ret = -ERESTARTSYS;
1201 goto err;
1202
1203 err_wr:
1204 if (!--pipe->writers)
1205 wake_up_interruptible_all(&pipe->rd_wait);
1206 ret = -ERESTARTSYS;
1207 goto err;
1208
1209 err:
1210 __pipe_unlock(pipe);
1211
1212 put_pipe_info(inode, pipe);
1213 return ret;
1214 }
1215
1216 const struct file_operations pipefifo_fops = {
1217 .open = fifo_open,
1218 .llseek = no_llseek,
1219 .read_iter = pipe_read,
1220 .write_iter = pipe_write,
1221 .poll = pipe_poll,
1222 .unlocked_ioctl = pipe_ioctl,
1223 .release = pipe_release,
1224 .fasync = pipe_fasync,
1225 .splice_write = iter_file_splice_write,
1226 };
1227
1228 /*
1229 * Currently we rely on the pipe array holding a power-of-2 number
1230 * of pages. Returns 0 on error.
1231 */
round_pipe_size(unsigned long size)1232 unsigned int round_pipe_size(unsigned long size)
1233 {
1234 if (size > (1U << 31))
1235 return 0;
1236
1237 /* Minimum pipe size, as required by POSIX */
1238 if (size < PAGE_SIZE)
1239 return PAGE_SIZE;
1240
1241 return roundup_pow_of_two(size);
1242 }
1243
1244 /*
1245 * Resize the pipe ring to a number of slots.
1246 *
1247 * Note the pipe can be reduced in capacity, but only if the current
1248 * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
1249 * returned instead.
1250 */
pipe_resize_ring(struct pipe_inode_info * pipe,unsigned int nr_slots)1251 int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1252 {
1253 struct pipe_buffer *bufs;
1254 unsigned int head, tail, mask, n;
1255
1256 bufs = kcalloc(nr_slots, sizeof(*bufs),
1257 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1258 if (unlikely(!bufs))
1259 return -ENOMEM;
1260
1261 spin_lock_irq(&pipe->rd_wait.lock);
1262 mask = pipe->ring_size - 1;
1263 head = pipe->head;
1264 tail = pipe->tail;
1265
1266 n = pipe_occupancy(head, tail);
1267 if (nr_slots < n) {
1268 spin_unlock_irq(&pipe->rd_wait.lock);
1269 kfree(bufs);
1270 return -EBUSY;
1271 }
1272
1273 /*
1274 * The pipe array wraps around, so just start the new one at zero
1275 * and adjust the indices.
1276 */
1277 if (n > 0) {
1278 unsigned int h = head & mask;
1279 unsigned int t = tail & mask;
1280 if (h > t) {
1281 memcpy(bufs, pipe->bufs + t,
1282 n * sizeof(struct pipe_buffer));
1283 } else {
1284 unsigned int tsize = pipe->ring_size - t;
1285 if (h > 0)
1286 memcpy(bufs + tsize, pipe->bufs,
1287 h * sizeof(struct pipe_buffer));
1288 memcpy(bufs, pipe->bufs + t,
1289 tsize * sizeof(struct pipe_buffer));
1290 }
1291 }
1292
1293 head = n;
1294 tail = 0;
1295
1296 kfree(pipe->bufs);
1297 pipe->bufs = bufs;
1298 pipe->ring_size = nr_slots;
1299 if (pipe->max_usage > nr_slots)
1300 pipe->max_usage = nr_slots;
1301 pipe->tail = tail;
1302 pipe->head = head;
1303
1304 if (!pipe_has_watch_queue(pipe)) {
1305 pipe->max_usage = nr_slots;
1306 pipe->nr_accounted = nr_slots;
1307 }
1308
1309 spin_unlock_irq(&pipe->rd_wait.lock);
1310
1311 /* This might have made more room for writers */
1312 wake_up_interruptible(&pipe->wr_wait);
1313 return 0;
1314 }
1315
1316 /*
1317 * Allocate a new array of pipe buffers and copy the info over. Returns the
1318 * pipe size if successful, or return -ERROR on error.
1319 */
pipe_set_size(struct pipe_inode_info * pipe,unsigned long arg)1320 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1321 {
1322 unsigned long user_bufs;
1323 unsigned int nr_slots, size;
1324 long ret = 0;
1325
1326 if (pipe_has_watch_queue(pipe))
1327 return -EBUSY;
1328
1329 size = round_pipe_size(arg);
1330 nr_slots = size >> PAGE_SHIFT;
1331
1332 if (!nr_slots)
1333 return -EINVAL;
1334
1335 /*
1336 * If trying to increase the pipe capacity, check that an
1337 * unprivileged user is not trying to exceed various limits
1338 * (soft limit check here, hard limit check just below).
1339 * Decreasing the pipe capacity is always permitted, even
1340 * if the user is currently over a limit.
1341 */
1342 if (nr_slots > pipe->max_usage &&
1343 size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1344 return -EPERM;
1345
1346 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots);
1347
1348 if (nr_slots > pipe->max_usage &&
1349 (too_many_pipe_buffers_hard(user_bufs) ||
1350 too_many_pipe_buffers_soft(user_bufs)) &&
1351 pipe_is_unprivileged_user()) {
1352 ret = -EPERM;
1353 goto out_revert_acct;
1354 }
1355
1356 ret = pipe_resize_ring(pipe, nr_slots);
1357 if (ret < 0)
1358 goto out_revert_acct;
1359
1360 return pipe->max_usage * PAGE_SIZE;
1361
1362 out_revert_acct:
1363 (void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted);
1364 return ret;
1365 }
1366
1367 /*
1368 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1369 * location, so checking ->i_pipe is not enough to verify that this is a
1370 * pipe.
1371 */
get_pipe_info(struct file * file,bool for_splice)1372 struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
1373 {
1374 struct pipe_inode_info *pipe = file->private_data;
1375
1376 if (file->f_op != &pipefifo_fops || !pipe)
1377 return NULL;
1378 if (for_splice && pipe_has_watch_queue(pipe))
1379 return NULL;
1380 return pipe;
1381 }
1382
pipe_fcntl(struct file * file,unsigned int cmd,unsigned long arg)1383 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1384 {
1385 struct pipe_inode_info *pipe;
1386 long ret;
1387
1388 pipe = get_pipe_info(file, false);
1389 if (!pipe)
1390 return -EBADF;
1391
1392 __pipe_lock(pipe);
1393
1394 switch (cmd) {
1395 case F_SETPIPE_SZ:
1396 ret = pipe_set_size(pipe, arg);
1397 break;
1398 case F_GETPIPE_SZ:
1399 ret = pipe->max_usage * PAGE_SIZE;
1400 break;
1401 default:
1402 ret = -EINVAL;
1403 break;
1404 }
1405
1406 __pipe_unlock(pipe);
1407 return ret;
1408 }
1409
1410 static const struct super_operations pipefs_ops = {
1411 .destroy_inode = free_inode_nonrcu,
1412 .statfs = simple_statfs,
1413 };
1414
1415 /*
1416 * pipefs should _never_ be mounted by userland - too much of security hassle,
1417 * no real gain from having the whole whorehouse mounted. So we don't need
1418 * any operations on the root directory. However, we need a non-trivial
1419 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1420 */
1421
pipefs_init_fs_context(struct fs_context * fc)1422 static int pipefs_init_fs_context(struct fs_context *fc)
1423 {
1424 struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1425 if (!ctx)
1426 return -ENOMEM;
1427 ctx->ops = &pipefs_ops;
1428 ctx->dops = &pipefs_dentry_operations;
1429 return 0;
1430 }
1431
1432 static struct file_system_type pipe_fs_type = {
1433 .name = "pipefs",
1434 .init_fs_context = pipefs_init_fs_context,
1435 .kill_sb = kill_anon_super,
1436 };
1437
init_pipe_fs(void)1438 static int __init init_pipe_fs(void)
1439 {
1440 int err = register_filesystem(&pipe_fs_type);
1441
1442 if (!err) {
1443 pipe_mnt = kern_mount(&pipe_fs_type);
1444 if (IS_ERR(pipe_mnt)) {
1445 err = PTR_ERR(pipe_mnt);
1446 unregister_filesystem(&pipe_fs_type);
1447 }
1448 }
1449 return err;
1450 }
1451
1452 fs_initcall(init_pipe_fs);
1453