1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/pipe.c
4 *
5 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
6 */
7
8 #include <linux/mm.h>
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/magic.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/uio.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/audit.h>
24 #include <linux/syscalls.h>
25 #include <linux/fcntl.h>
26 #include <linux/memcontrol.h>
27 #include <linux/watch_queue.h>
28
29 #include <linux/uaccess.h>
30 #include <asm/ioctls.h>
31
32 #include "internal.h"
33
34 /*
35 * New pipe buffers will be restricted to this size while the user is exceeding
36 * their pipe buffer quota. The general pipe use case needs at least two
37 * buffers: one for data yet to be read, and one for new data. If this is less
38 * than two, then a write to a non-empty pipe may block even if the pipe is not
39 * full. This can occur with GNU make jobserver or similar uses of pipes as
40 * semaphores: multiple processes may be waiting to write tokens back to the
41 * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
42 *
43 * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
44 * own risk, namely: pipe writes to non-full pipes may block until the pipe is
45 * emptied.
46 */
47 #define PIPE_MIN_DEF_BUFFERS 2
48
49 /*
50 * The max size that a non-root user is allowed to grow the pipe. Can
51 * be set by root in /proc/sys/fs/pipe-max-size
52 */
53 unsigned int pipe_max_size = 1048576;
54
55 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
56 * matches default values.
57 */
58 unsigned long pipe_user_pages_hard;
59 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
60
61 /*
62 * We use head and tail indices that aren't masked off, except at the point of
63 * dereference, but rather they're allowed to wrap naturally. This means there
64 * isn't a dead spot in the buffer, but the ring has to be a power of two and
65 * <= 2^31.
66 * -- David Howells 2019-09-23.
67 *
68 * Reads with count = 0 should always return 0.
69 * -- Julian Bradfield 1999-06-07.
70 *
71 * FIFOs and Pipes now generate SIGIO for both readers and writers.
72 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
73 *
74 * pipe_read & write cleanup
75 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
76 */
77
pipe_lock_nested(struct pipe_inode_info * pipe,int subclass)78 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
79 {
80 if (pipe->files)
81 mutex_lock_nested(&pipe->mutex, subclass);
82 }
83
pipe_lock(struct pipe_inode_info * pipe)84 void pipe_lock(struct pipe_inode_info *pipe)
85 {
86 /*
87 * pipe_lock() nests non-pipe inode locks (for writing to a file)
88 */
89 pipe_lock_nested(pipe, I_MUTEX_PARENT);
90 }
91 EXPORT_SYMBOL(pipe_lock);
92
pipe_unlock(struct pipe_inode_info * pipe)93 void pipe_unlock(struct pipe_inode_info *pipe)
94 {
95 if (pipe->files)
96 mutex_unlock(&pipe->mutex);
97 }
98 EXPORT_SYMBOL(pipe_unlock);
99
__pipe_lock(struct pipe_inode_info * pipe)100 static inline void __pipe_lock(struct pipe_inode_info *pipe)
101 {
102 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
103 }
104
__pipe_unlock(struct pipe_inode_info * pipe)105 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
106 {
107 mutex_unlock(&pipe->mutex);
108 }
109
pipe_double_lock(struct pipe_inode_info * pipe1,struct pipe_inode_info * pipe2)110 void pipe_double_lock(struct pipe_inode_info *pipe1,
111 struct pipe_inode_info *pipe2)
112 {
113 BUG_ON(pipe1 == pipe2);
114
115 if (pipe1 < pipe2) {
116 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
117 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
118 } else {
119 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
120 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
121 }
122 }
123
anon_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)124 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
125 struct pipe_buffer *buf)
126 {
127 struct page *page = buf->page;
128
129 /*
130 * If nobody else uses this page, and we don't already have a
131 * temporary page, let's keep track of it as a one-deep
132 * allocation cache. (Otherwise just release our reference to it)
133 */
134 if (page_count(page) == 1 && !pipe->tmp_page)
135 pipe->tmp_page = page;
136 else
137 put_page(page);
138 }
139
anon_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)140 static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe,
141 struct pipe_buffer *buf)
142 {
143 struct page *page = buf->page;
144
145 if (page_count(page) != 1)
146 return false;
147 memcg_kmem_uncharge_page(page, 0);
148 __SetPageLocked(page);
149 return true;
150 }
151
152 /**
153 * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
154 * @pipe: the pipe that the buffer belongs to
155 * @buf: the buffer to attempt to steal
156 *
157 * Description:
158 * This function attempts to steal the &struct page attached to
159 * @buf. If successful, this function returns 0 and returns with
160 * the page locked. The caller may then reuse the page for whatever
161 * he wishes; the typical use is insertion into a different file
162 * page cache.
163 */
generic_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)164 bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe,
165 struct pipe_buffer *buf)
166 {
167 struct page *page = buf->page;
168
169 /*
170 * A reference of one is golden, that means that the owner of this
171 * page is the only one holding a reference to it. lock the page
172 * and return OK.
173 */
174 if (page_count(page) == 1) {
175 lock_page(page);
176 return true;
177 }
178 return false;
179 }
180 EXPORT_SYMBOL(generic_pipe_buf_try_steal);
181
182 /**
183 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
184 * @pipe: the pipe that the buffer belongs to
185 * @buf: the buffer to get a reference to
186 *
187 * Description:
188 * This function grabs an extra reference to @buf. It's used in
189 * the tee() system call, when we duplicate the buffers in one
190 * pipe into another.
191 */
generic_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)192 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
193 {
194 return try_get_page(buf->page);
195 }
196 EXPORT_SYMBOL(generic_pipe_buf_get);
197
198 /**
199 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
200 * @pipe: the pipe that the buffer belongs to
201 * @buf: the buffer to put a reference to
202 *
203 * Description:
204 * This function releases a reference to @buf.
205 */
generic_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)206 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
207 struct pipe_buffer *buf)
208 {
209 put_page(buf->page);
210 }
211 EXPORT_SYMBOL(generic_pipe_buf_release);
212
213 static const struct pipe_buf_operations anon_pipe_buf_ops = {
214 .release = anon_pipe_buf_release,
215 .try_steal = anon_pipe_buf_try_steal,
216 .get = generic_pipe_buf_get,
217 };
218
219 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
pipe_readable(const struct pipe_inode_info * pipe)220 static inline bool pipe_readable(const struct pipe_inode_info *pipe)
221 {
222 unsigned int head = READ_ONCE(pipe->head);
223 unsigned int tail = READ_ONCE(pipe->tail);
224 unsigned int writers = READ_ONCE(pipe->writers);
225
226 return !pipe_empty(head, tail) || !writers;
227 }
228
229 static ssize_t
pipe_read(struct kiocb * iocb,struct iov_iter * to)230 pipe_read(struct kiocb *iocb, struct iov_iter *to)
231 {
232 size_t total_len = iov_iter_count(to);
233 struct file *filp = iocb->ki_filp;
234 struct pipe_inode_info *pipe = filp->private_data;
235 bool was_full, wake_next_reader = false;
236 ssize_t ret;
237
238 /* Null read succeeds. */
239 if (unlikely(total_len == 0))
240 return 0;
241
242 ret = 0;
243 __pipe_lock(pipe);
244
245 /*
246 * We only wake up writers if the pipe was full when we started
247 * reading in order to avoid unnecessary wakeups.
248 *
249 * But when we do wake up writers, we do so using a sync wakeup
250 * (WF_SYNC), because we want them to get going and generate more
251 * data for us.
252 */
253 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
254 for (;;) {
255 /* Read ->head with a barrier vs post_one_notification() */
256 unsigned int head = smp_load_acquire(&pipe->head);
257 unsigned int tail = pipe->tail;
258 unsigned int mask = pipe->ring_size - 1;
259
260 #ifdef CONFIG_WATCH_QUEUE
261 if (pipe->note_loss) {
262 struct watch_notification n;
263
264 if (total_len < 8) {
265 if (ret == 0)
266 ret = -ENOBUFS;
267 break;
268 }
269
270 n.type = WATCH_TYPE_META;
271 n.subtype = WATCH_META_LOSS_NOTIFICATION;
272 n.info = watch_sizeof(n);
273 if (copy_to_iter(&n, sizeof(n), to) != sizeof(n)) {
274 if (ret == 0)
275 ret = -EFAULT;
276 break;
277 }
278 ret += sizeof(n);
279 total_len -= sizeof(n);
280 pipe->note_loss = false;
281 }
282 #endif
283
284 if (!pipe_empty(head, tail)) {
285 struct pipe_buffer *buf = &pipe->bufs[tail & mask];
286 size_t chars = buf->len;
287 size_t written;
288 int error;
289
290 if (chars > total_len) {
291 if (buf->flags & PIPE_BUF_FLAG_WHOLE) {
292 if (ret == 0)
293 ret = -ENOBUFS;
294 break;
295 }
296 chars = total_len;
297 }
298
299 error = pipe_buf_confirm(pipe, buf);
300 if (error) {
301 if (!ret)
302 ret = error;
303 break;
304 }
305
306 written = copy_page_to_iter(buf->page, buf->offset, chars, to);
307 if (unlikely(written < chars)) {
308 if (!ret)
309 ret = -EFAULT;
310 break;
311 }
312 ret += chars;
313 buf->offset += chars;
314 buf->len -= chars;
315
316 /* Was it a packet buffer? Clean up and exit */
317 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
318 total_len = chars;
319 buf->len = 0;
320 }
321
322 if (!buf->len) {
323 pipe_buf_release(pipe, buf);
324 spin_lock_irq(&pipe->rd_wait.lock);
325 #ifdef CONFIG_WATCH_QUEUE
326 if (buf->flags & PIPE_BUF_FLAG_LOSS)
327 pipe->note_loss = true;
328 #endif
329 tail++;
330 pipe->tail = tail;
331 spin_unlock_irq(&pipe->rd_wait.lock);
332 }
333 total_len -= chars;
334 if (!total_len)
335 break; /* common path: read succeeded */
336 if (!pipe_empty(head, tail)) /* More to do? */
337 continue;
338 }
339
340 if (!pipe->writers)
341 break;
342 if (ret)
343 break;
344 if (filp->f_flags & O_NONBLOCK) {
345 ret = -EAGAIN;
346 break;
347 }
348 __pipe_unlock(pipe);
349
350 /*
351 * We only get here if we didn't actually read anything.
352 *
353 * However, we could have seen (and removed) a zero-sized
354 * pipe buffer, and might have made space in the buffers
355 * that way.
356 *
357 * You can't make zero-sized pipe buffers by doing an empty
358 * write (not even in packet mode), but they can happen if
359 * the writer gets an EFAULT when trying to fill a buffer
360 * that already got allocated and inserted in the buffer
361 * array.
362 *
363 * So we still need to wake up any pending writers in the
364 * _very_ unlikely case that the pipe was full, but we got
365 * no data.
366 */
367 if (unlikely(was_full))
368 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
369 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
370
371 /*
372 * But because we didn't read anything, at this point we can
373 * just return directly with -ERESTARTSYS if we're interrupted,
374 * since we've done any required wakeups and there's no need
375 * to mark anything accessed. And we've dropped the lock.
376 */
377 if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
378 return -ERESTARTSYS;
379
380 __pipe_lock(pipe);
381 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
382 wake_next_reader = true;
383 }
384 if (pipe_empty(pipe->head, pipe->tail))
385 wake_next_reader = false;
386 __pipe_unlock(pipe);
387
388 if (was_full)
389 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
390 if (wake_next_reader)
391 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
392 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
393 if (ret > 0)
394 file_accessed(filp);
395 return ret;
396 }
397
is_packetized(struct file * file)398 static inline int is_packetized(struct file *file)
399 {
400 return (file->f_flags & O_DIRECT) != 0;
401 }
402
403 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
pipe_writable(const struct pipe_inode_info * pipe)404 static inline bool pipe_writable(const struct pipe_inode_info *pipe)
405 {
406 unsigned int head = READ_ONCE(pipe->head);
407 unsigned int tail = READ_ONCE(pipe->tail);
408 unsigned int max_usage = READ_ONCE(pipe->max_usage);
409
410 return !pipe_full(head, tail, max_usage) ||
411 !READ_ONCE(pipe->readers);
412 }
413
414 static ssize_t
pipe_write(struct kiocb * iocb,struct iov_iter * from)415 pipe_write(struct kiocb *iocb, struct iov_iter *from)
416 {
417 struct file *filp = iocb->ki_filp;
418 struct pipe_inode_info *pipe = filp->private_data;
419 unsigned int head;
420 ssize_t ret = 0;
421 size_t total_len = iov_iter_count(from);
422 ssize_t chars;
423 bool was_empty = false;
424 bool wake_next_writer = false;
425
426 /* Null write succeeds. */
427 if (unlikely(total_len == 0))
428 return 0;
429
430 __pipe_lock(pipe);
431
432 if (!pipe->readers) {
433 send_sig(SIGPIPE, current, 0);
434 ret = -EPIPE;
435 goto out;
436 }
437
438 if (pipe_has_watch_queue(pipe)) {
439 ret = -EXDEV;
440 goto out;
441 }
442
443 /*
444 * If it wasn't empty we try to merge new data into
445 * the last buffer.
446 *
447 * That naturally merges small writes, but it also
448 * page-aligns the rest of the writes for large writes
449 * spanning multiple pages.
450 */
451 head = pipe->head;
452 was_empty = pipe_empty(head, pipe->tail);
453 chars = total_len & (PAGE_SIZE-1);
454 if (chars && !was_empty) {
455 unsigned int mask = pipe->ring_size - 1;
456 struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
457 int offset = buf->offset + buf->len;
458
459 if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
460 offset + chars <= PAGE_SIZE) {
461 ret = pipe_buf_confirm(pipe, buf);
462 if (ret)
463 goto out;
464
465 ret = copy_page_from_iter(buf->page, offset, chars, from);
466 if (unlikely(ret < chars)) {
467 ret = -EFAULT;
468 goto out;
469 }
470
471 buf->len += ret;
472 if (!iov_iter_count(from))
473 goto out;
474 }
475 }
476
477 for (;;) {
478 if (!pipe->readers) {
479 send_sig(SIGPIPE, current, 0);
480 if (!ret)
481 ret = -EPIPE;
482 break;
483 }
484
485 head = pipe->head;
486 if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
487 unsigned int mask = pipe->ring_size - 1;
488 struct pipe_buffer *buf = &pipe->bufs[head & mask];
489 struct page *page = pipe->tmp_page;
490 int copied;
491
492 if (!page) {
493 page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
494 if (unlikely(!page)) {
495 ret = ret ? : -ENOMEM;
496 break;
497 }
498 pipe->tmp_page = page;
499 }
500
501 /* Allocate a slot in the ring in advance and attach an
502 * empty buffer. If we fault or otherwise fail to use
503 * it, either the reader will consume it or it'll still
504 * be there for the next write.
505 */
506 spin_lock_irq(&pipe->rd_wait.lock);
507
508 head = pipe->head;
509 if (pipe_full(head, pipe->tail, pipe->max_usage)) {
510 spin_unlock_irq(&pipe->rd_wait.lock);
511 continue;
512 }
513
514 pipe->head = head + 1;
515 spin_unlock_irq(&pipe->rd_wait.lock);
516
517 /* Insert it into the buffer array */
518 buf = &pipe->bufs[head & mask];
519 buf->page = page;
520 buf->ops = &anon_pipe_buf_ops;
521 buf->offset = 0;
522 buf->len = 0;
523 if (is_packetized(filp))
524 buf->flags = PIPE_BUF_FLAG_PACKET;
525 else
526 buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
527 pipe->tmp_page = NULL;
528
529 copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
530 if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
531 if (!ret)
532 ret = -EFAULT;
533 break;
534 }
535 ret += copied;
536 buf->offset = 0;
537 buf->len = copied;
538
539 if (!iov_iter_count(from))
540 break;
541 }
542
543 if (!pipe_full(head, pipe->tail, pipe->max_usage))
544 continue;
545
546 /* Wait for buffer space to become available. */
547 if (filp->f_flags & O_NONBLOCK) {
548 if (!ret)
549 ret = -EAGAIN;
550 break;
551 }
552 if (signal_pending(current)) {
553 if (!ret)
554 ret = -ERESTARTSYS;
555 break;
556 }
557
558 /*
559 * We're going to release the pipe lock and wait for more
560 * space. We wake up any readers if necessary, and then
561 * after waiting we need to re-check whether the pipe
562 * become empty while we dropped the lock.
563 */
564 __pipe_unlock(pipe);
565 if (was_empty)
566 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
567 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
568 wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
569 __pipe_lock(pipe);
570 was_empty = pipe_empty(pipe->head, pipe->tail);
571 wake_next_writer = true;
572 }
573 out:
574 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
575 wake_next_writer = false;
576 __pipe_unlock(pipe);
577
578 /*
579 * If we do do a wakeup event, we do a 'sync' wakeup, because we
580 * want the reader to start processing things asap, rather than
581 * leave the data pending.
582 *
583 * This is particularly important for small writes, because of
584 * how (for example) the GNU make jobserver uses small writes to
585 * wake up pending jobs
586 *
587 * Epoll nonsensically wants a wakeup whether the pipe
588 * was already empty or not.
589 */
590 if (was_empty || pipe->poll_usage)
591 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
592 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
593 if (wake_next_writer)
594 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
595 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
596 int err = file_update_time(filp);
597 if (err)
598 ret = err;
599 sb_end_write(file_inode(filp)->i_sb);
600 }
601 return ret;
602 }
603
pipe_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)604 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
605 {
606 struct pipe_inode_info *pipe = filp->private_data;
607 int count, head, tail, mask;
608
609 switch (cmd) {
610 case FIONREAD:
611 __pipe_lock(pipe);
612 count = 0;
613 head = pipe->head;
614 tail = pipe->tail;
615 mask = pipe->ring_size - 1;
616
617 while (tail != head) {
618 count += pipe->bufs[tail & mask].len;
619 tail++;
620 }
621 __pipe_unlock(pipe);
622
623 return put_user(count, (int __user *)arg);
624
625 #ifdef CONFIG_WATCH_QUEUE
626 case IOC_WATCH_QUEUE_SET_SIZE: {
627 int ret;
628 __pipe_lock(pipe);
629 ret = watch_queue_set_size(pipe, arg);
630 __pipe_unlock(pipe);
631 return ret;
632 }
633
634 case IOC_WATCH_QUEUE_SET_FILTER:
635 return watch_queue_set_filter(
636 pipe, (struct watch_notification_filter __user *)arg);
637 #endif
638
639 default:
640 return -ENOIOCTLCMD;
641 }
642 }
643
644 /* No kernel lock held - fine */
645 static __poll_t
pipe_poll(struct file * filp,poll_table * wait)646 pipe_poll(struct file *filp, poll_table *wait)
647 {
648 __poll_t mask;
649 struct pipe_inode_info *pipe = filp->private_data;
650 unsigned int head, tail;
651
652 /* Epoll has some historical nasty semantics, this enables them */
653 WRITE_ONCE(pipe->poll_usage, true);
654
655 /*
656 * Reading pipe state only -- no need for acquiring the semaphore.
657 *
658 * But because this is racy, the code has to add the
659 * entry to the poll table _first_ ..
660 */
661 if (filp->f_mode & FMODE_READ)
662 poll_wait(filp, &pipe->rd_wait, wait);
663 if (filp->f_mode & FMODE_WRITE)
664 poll_wait(filp, &pipe->wr_wait, wait);
665
666 /*
667 * .. and only then can you do the racy tests. That way,
668 * if something changes and you got it wrong, the poll
669 * table entry will wake you up and fix it.
670 */
671 head = READ_ONCE(pipe->head);
672 tail = READ_ONCE(pipe->tail);
673
674 mask = 0;
675 if (filp->f_mode & FMODE_READ) {
676 if (!pipe_empty(head, tail))
677 mask |= EPOLLIN | EPOLLRDNORM;
678 if (!pipe->writers && filp->f_version != pipe->w_counter)
679 mask |= EPOLLHUP;
680 }
681
682 if (filp->f_mode & FMODE_WRITE) {
683 if (!pipe_full(head, tail, pipe->max_usage))
684 mask |= EPOLLOUT | EPOLLWRNORM;
685 /*
686 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
687 * behave exactly like pipes for poll().
688 */
689 if (!pipe->readers)
690 mask |= EPOLLERR;
691 }
692
693 return mask;
694 }
695
put_pipe_info(struct inode * inode,struct pipe_inode_info * pipe)696 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
697 {
698 int kill = 0;
699
700 spin_lock(&inode->i_lock);
701 if (!--pipe->files) {
702 inode->i_pipe = NULL;
703 kill = 1;
704 }
705 spin_unlock(&inode->i_lock);
706
707 if (kill)
708 free_pipe_info(pipe);
709 }
710
711 static int
pipe_release(struct inode * inode,struct file * file)712 pipe_release(struct inode *inode, struct file *file)
713 {
714 struct pipe_inode_info *pipe = file->private_data;
715
716 __pipe_lock(pipe);
717 if (file->f_mode & FMODE_READ)
718 pipe->readers--;
719 if (file->f_mode & FMODE_WRITE)
720 pipe->writers--;
721
722 /* Was that the last reader or writer, but not the other side? */
723 if (!pipe->readers != !pipe->writers) {
724 wake_up_interruptible_all(&pipe->rd_wait);
725 wake_up_interruptible_all(&pipe->wr_wait);
726 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
727 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
728 }
729 __pipe_unlock(pipe);
730
731 put_pipe_info(inode, pipe);
732 return 0;
733 }
734
735 static int
pipe_fasync(int fd,struct file * filp,int on)736 pipe_fasync(int fd, struct file *filp, int on)
737 {
738 struct pipe_inode_info *pipe = filp->private_data;
739 int retval = 0;
740
741 __pipe_lock(pipe);
742 if (filp->f_mode & FMODE_READ)
743 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
744 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
745 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
746 if (retval < 0 && (filp->f_mode & FMODE_READ))
747 /* this can happen only if on == T */
748 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
749 }
750 __pipe_unlock(pipe);
751 return retval;
752 }
753
account_pipe_buffers(struct user_struct * user,unsigned long old,unsigned long new)754 unsigned long account_pipe_buffers(struct user_struct *user,
755 unsigned long old, unsigned long new)
756 {
757 return atomic_long_add_return(new - old, &user->pipe_bufs);
758 }
759
too_many_pipe_buffers_soft(unsigned long user_bufs)760 bool too_many_pipe_buffers_soft(unsigned long user_bufs)
761 {
762 unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
763
764 return soft_limit && user_bufs > soft_limit;
765 }
766
too_many_pipe_buffers_hard(unsigned long user_bufs)767 bool too_many_pipe_buffers_hard(unsigned long user_bufs)
768 {
769 unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
770
771 return hard_limit && user_bufs > hard_limit;
772 }
773
pipe_is_unprivileged_user(void)774 bool pipe_is_unprivileged_user(void)
775 {
776 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
777 }
778
alloc_pipe_info(void)779 struct pipe_inode_info *alloc_pipe_info(void)
780 {
781 struct pipe_inode_info *pipe;
782 unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
783 struct user_struct *user = get_current_user();
784 unsigned long user_bufs;
785 unsigned int max_size = READ_ONCE(pipe_max_size);
786
787 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
788 if (pipe == NULL)
789 goto out_free_uid;
790
791 if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
792 pipe_bufs = max_size >> PAGE_SHIFT;
793
794 user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
795
796 if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
797 user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
798 pipe_bufs = PIPE_MIN_DEF_BUFFERS;
799 }
800
801 if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
802 goto out_revert_acct;
803
804 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
805 GFP_KERNEL_ACCOUNT);
806
807 if (pipe->bufs) {
808 init_waitqueue_head(&pipe->rd_wait);
809 init_waitqueue_head(&pipe->wr_wait);
810 pipe->r_counter = pipe->w_counter = 1;
811 pipe->max_usage = pipe_bufs;
812 pipe->ring_size = pipe_bufs;
813 pipe->nr_accounted = pipe_bufs;
814 pipe->user = user;
815 mutex_init(&pipe->mutex);
816 return pipe;
817 }
818
819 out_revert_acct:
820 (void) account_pipe_buffers(user, pipe_bufs, 0);
821 kfree(pipe);
822 out_free_uid:
823 free_uid(user);
824 return NULL;
825 }
826
free_pipe_info(struct pipe_inode_info * pipe)827 void free_pipe_info(struct pipe_inode_info *pipe)
828 {
829 int i;
830
831 #ifdef CONFIG_WATCH_QUEUE
832 if (pipe->watch_queue)
833 watch_queue_clear(pipe->watch_queue);
834 #endif
835
836 (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
837 free_uid(pipe->user);
838 for (i = 0; i < pipe->ring_size; i++) {
839 struct pipe_buffer *buf = pipe->bufs + i;
840 if (buf->ops)
841 pipe_buf_release(pipe, buf);
842 }
843 #ifdef CONFIG_WATCH_QUEUE
844 if (pipe->watch_queue)
845 put_watch_queue(pipe->watch_queue);
846 #endif
847 if (pipe->tmp_page)
848 __free_page(pipe->tmp_page);
849 kfree(pipe->bufs);
850 kfree(pipe);
851 }
852
853 static struct vfsmount *pipe_mnt __read_mostly;
854
855 /*
856 * pipefs_dname() is called from d_path().
857 */
pipefs_dname(struct dentry * dentry,char * buffer,int buflen)858 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
859 {
860 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
861 d_inode(dentry)->i_ino);
862 }
863
864 static const struct dentry_operations pipefs_dentry_operations = {
865 .d_dname = pipefs_dname,
866 };
867
get_pipe_inode(void)868 static struct inode * get_pipe_inode(void)
869 {
870 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
871 struct pipe_inode_info *pipe;
872
873 if (!inode)
874 goto fail_inode;
875
876 inode->i_ino = get_next_ino();
877
878 pipe = alloc_pipe_info();
879 if (!pipe)
880 goto fail_iput;
881
882 inode->i_pipe = pipe;
883 pipe->files = 2;
884 pipe->readers = pipe->writers = 1;
885 inode->i_fop = &pipefifo_fops;
886
887 /*
888 * Mark the inode dirty from the very beginning,
889 * that way it will never be moved to the dirty
890 * list because "mark_inode_dirty()" will think
891 * that it already _is_ on the dirty list.
892 */
893 inode->i_state = I_DIRTY;
894 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
895 inode->i_uid = current_fsuid();
896 inode->i_gid = current_fsgid();
897 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
898
899 return inode;
900
901 fail_iput:
902 iput(inode);
903
904 fail_inode:
905 return NULL;
906 }
907
create_pipe_files(struct file ** res,int flags)908 int create_pipe_files(struct file **res, int flags)
909 {
910 struct inode *inode = get_pipe_inode();
911 struct file *f;
912 int error;
913
914 if (!inode)
915 return -ENFILE;
916
917 if (flags & O_NOTIFICATION_PIPE) {
918 error = watch_queue_init(inode->i_pipe);
919 if (error) {
920 free_pipe_info(inode->i_pipe);
921 iput(inode);
922 return error;
923 }
924 }
925
926 f = alloc_file_pseudo(inode, pipe_mnt, "",
927 O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
928 &pipefifo_fops);
929 if (IS_ERR(f)) {
930 free_pipe_info(inode->i_pipe);
931 iput(inode);
932 return PTR_ERR(f);
933 }
934
935 f->private_data = inode->i_pipe;
936
937 res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
938 &pipefifo_fops);
939 if (IS_ERR(res[0])) {
940 put_pipe_info(inode, inode->i_pipe);
941 fput(f);
942 return PTR_ERR(res[0]);
943 }
944 res[0]->private_data = inode->i_pipe;
945 res[1] = f;
946 stream_open(inode, res[0]);
947 stream_open(inode, res[1]);
948 return 0;
949 }
950
__do_pipe_flags(int * fd,struct file ** files,int flags)951 static int __do_pipe_flags(int *fd, struct file **files, int flags)
952 {
953 int error;
954 int fdw, fdr;
955
956 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE))
957 return -EINVAL;
958
959 error = create_pipe_files(files, flags);
960 if (error)
961 return error;
962
963 error = get_unused_fd_flags(flags);
964 if (error < 0)
965 goto err_read_pipe;
966 fdr = error;
967
968 error = get_unused_fd_flags(flags);
969 if (error < 0)
970 goto err_fdr;
971 fdw = error;
972
973 audit_fd_pair(fdr, fdw);
974 fd[0] = fdr;
975 fd[1] = fdw;
976 return 0;
977
978 err_fdr:
979 put_unused_fd(fdr);
980 err_read_pipe:
981 fput(files[0]);
982 fput(files[1]);
983 return error;
984 }
985
do_pipe_flags(int * fd,int flags)986 int do_pipe_flags(int *fd, int flags)
987 {
988 struct file *files[2];
989 int error = __do_pipe_flags(fd, files, flags);
990 if (!error) {
991 fd_install(fd[0], files[0]);
992 fd_install(fd[1], files[1]);
993 }
994 return error;
995 }
996
997 /*
998 * sys_pipe() is the normal C calling standard for creating
999 * a pipe. It's not the way Unix traditionally does this, though.
1000 */
do_pipe2(int __user * fildes,int flags)1001 static int do_pipe2(int __user *fildes, int flags)
1002 {
1003 struct file *files[2];
1004 int fd[2];
1005 int error;
1006
1007 error = __do_pipe_flags(fd, files, flags);
1008 if (!error) {
1009 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
1010 fput(files[0]);
1011 fput(files[1]);
1012 put_unused_fd(fd[0]);
1013 put_unused_fd(fd[1]);
1014 error = -EFAULT;
1015 } else {
1016 fd_install(fd[0], files[0]);
1017 fd_install(fd[1], files[1]);
1018 }
1019 }
1020 return error;
1021 }
1022
SYSCALL_DEFINE2(pipe2,int __user *,fildes,int,flags)1023 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1024 {
1025 return do_pipe2(fildes, flags);
1026 }
1027
SYSCALL_DEFINE1(pipe,int __user *,fildes)1028 SYSCALL_DEFINE1(pipe, int __user *, fildes)
1029 {
1030 return do_pipe2(fildes, 0);
1031 }
1032
1033 /*
1034 * This is the stupid "wait for pipe to be readable or writable"
1035 * model.
1036 *
1037 * See pipe_read/write() for the proper kind of exclusive wait,
1038 * but that requires that we wake up any other readers/writers
1039 * if we then do not end up reading everything (ie the whole
1040 * "wake_next_reader/writer" logic in pipe_read/write()).
1041 */
pipe_wait_readable(struct pipe_inode_info * pipe)1042 void pipe_wait_readable(struct pipe_inode_info *pipe)
1043 {
1044 pipe_unlock(pipe);
1045 wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe));
1046 pipe_lock(pipe);
1047 }
1048
pipe_wait_writable(struct pipe_inode_info * pipe)1049 void pipe_wait_writable(struct pipe_inode_info *pipe)
1050 {
1051 pipe_unlock(pipe);
1052 wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe));
1053 pipe_lock(pipe);
1054 }
1055
1056 /*
1057 * This depends on both the wait (here) and the wakeup (wake_up_partner)
1058 * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot
1059 * race with the count check and waitqueue prep.
1060 *
1061 * Normally in order to avoid races, you'd do the prepare_to_wait() first,
1062 * then check the condition you're waiting for, and only then sleep. But
1063 * because of the pipe lock, we can check the condition before being on
1064 * the wait queue.
1065 *
1066 * We use the 'rd_wait' waitqueue for pipe partner waiting.
1067 */
wait_for_partner(struct pipe_inode_info * pipe,unsigned int * cnt)1068 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
1069 {
1070 DEFINE_WAIT(rdwait);
1071 int cur = *cnt;
1072
1073 while (cur == *cnt) {
1074 prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE);
1075 pipe_unlock(pipe);
1076 schedule();
1077 finish_wait(&pipe->rd_wait, &rdwait);
1078 pipe_lock(pipe);
1079 if (signal_pending(current))
1080 break;
1081 }
1082 return cur == *cnt ? -ERESTARTSYS : 0;
1083 }
1084
wake_up_partner(struct pipe_inode_info * pipe)1085 static void wake_up_partner(struct pipe_inode_info *pipe)
1086 {
1087 wake_up_interruptible_all(&pipe->rd_wait);
1088 }
1089
fifo_open(struct inode * inode,struct file * filp)1090 static int fifo_open(struct inode *inode, struct file *filp)
1091 {
1092 struct pipe_inode_info *pipe;
1093 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1094 int ret;
1095
1096 filp->f_version = 0;
1097
1098 spin_lock(&inode->i_lock);
1099 if (inode->i_pipe) {
1100 pipe = inode->i_pipe;
1101 pipe->files++;
1102 spin_unlock(&inode->i_lock);
1103 } else {
1104 spin_unlock(&inode->i_lock);
1105 pipe = alloc_pipe_info();
1106 if (!pipe)
1107 return -ENOMEM;
1108 pipe->files = 1;
1109 spin_lock(&inode->i_lock);
1110 if (unlikely(inode->i_pipe)) {
1111 inode->i_pipe->files++;
1112 spin_unlock(&inode->i_lock);
1113 free_pipe_info(pipe);
1114 pipe = inode->i_pipe;
1115 } else {
1116 inode->i_pipe = pipe;
1117 spin_unlock(&inode->i_lock);
1118 }
1119 }
1120 filp->private_data = pipe;
1121 /* OK, we have a pipe and it's pinned down */
1122
1123 __pipe_lock(pipe);
1124
1125 /* We can only do regular read/write on fifos */
1126 stream_open(inode, filp);
1127
1128 switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
1129 case FMODE_READ:
1130 /*
1131 * O_RDONLY
1132 * POSIX.1 says that O_NONBLOCK means return with the FIFO
1133 * opened, even when there is no process writing the FIFO.
1134 */
1135 pipe->r_counter++;
1136 if (pipe->readers++ == 0)
1137 wake_up_partner(pipe);
1138
1139 if (!is_pipe && !pipe->writers) {
1140 if ((filp->f_flags & O_NONBLOCK)) {
1141 /* suppress EPOLLHUP until we have
1142 * seen a writer */
1143 filp->f_version = pipe->w_counter;
1144 } else {
1145 if (wait_for_partner(pipe, &pipe->w_counter))
1146 goto err_rd;
1147 }
1148 }
1149 break;
1150
1151 case FMODE_WRITE:
1152 /*
1153 * O_WRONLY
1154 * POSIX.1 says that O_NONBLOCK means return -1 with
1155 * errno=ENXIO when there is no process reading the FIFO.
1156 */
1157 ret = -ENXIO;
1158 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1159 goto err;
1160
1161 pipe->w_counter++;
1162 if (!pipe->writers++)
1163 wake_up_partner(pipe);
1164
1165 if (!is_pipe && !pipe->readers) {
1166 if (wait_for_partner(pipe, &pipe->r_counter))
1167 goto err_wr;
1168 }
1169 break;
1170
1171 case FMODE_READ | FMODE_WRITE:
1172 /*
1173 * O_RDWR
1174 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1175 * This implementation will NEVER block on a O_RDWR open, since
1176 * the process can at least talk to itself.
1177 */
1178
1179 pipe->readers++;
1180 pipe->writers++;
1181 pipe->r_counter++;
1182 pipe->w_counter++;
1183 if (pipe->readers == 1 || pipe->writers == 1)
1184 wake_up_partner(pipe);
1185 break;
1186
1187 default:
1188 ret = -EINVAL;
1189 goto err;
1190 }
1191
1192 /* Ok! */
1193 __pipe_unlock(pipe);
1194 return 0;
1195
1196 err_rd:
1197 if (!--pipe->readers)
1198 wake_up_interruptible(&pipe->wr_wait);
1199 ret = -ERESTARTSYS;
1200 goto err;
1201
1202 err_wr:
1203 if (!--pipe->writers)
1204 wake_up_interruptible_all(&pipe->rd_wait);
1205 ret = -ERESTARTSYS;
1206 goto err;
1207
1208 err:
1209 __pipe_unlock(pipe);
1210
1211 put_pipe_info(inode, pipe);
1212 return ret;
1213 }
1214
1215 const struct file_operations pipefifo_fops = {
1216 .open = fifo_open,
1217 .llseek = no_llseek,
1218 .read_iter = pipe_read,
1219 .write_iter = pipe_write,
1220 .poll = pipe_poll,
1221 .unlocked_ioctl = pipe_ioctl,
1222 .release = pipe_release,
1223 .fasync = pipe_fasync,
1224 .splice_write = iter_file_splice_write,
1225 };
1226
1227 /*
1228 * Currently we rely on the pipe array holding a power-of-2 number
1229 * of pages. Returns 0 on error.
1230 */
round_pipe_size(unsigned long size)1231 unsigned int round_pipe_size(unsigned long size)
1232 {
1233 if (size > (1U << 31))
1234 return 0;
1235
1236 /* Minimum pipe size, as required by POSIX */
1237 if (size < PAGE_SIZE)
1238 return PAGE_SIZE;
1239
1240 return roundup_pow_of_two(size);
1241 }
1242
1243 /*
1244 * Resize the pipe ring to a number of slots.
1245 *
1246 * Note the pipe can be reduced in capacity, but only if the current
1247 * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
1248 * returned instead.
1249 */
pipe_resize_ring(struct pipe_inode_info * pipe,unsigned int nr_slots)1250 int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1251 {
1252 struct pipe_buffer *bufs;
1253 unsigned int head, tail, mask, n;
1254
1255 bufs = kcalloc(nr_slots, sizeof(*bufs),
1256 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1257 if (unlikely(!bufs))
1258 return -ENOMEM;
1259
1260 spin_lock_irq(&pipe->rd_wait.lock);
1261 mask = pipe->ring_size - 1;
1262 head = pipe->head;
1263 tail = pipe->tail;
1264
1265 n = pipe_occupancy(head, tail);
1266 if (nr_slots < n) {
1267 spin_unlock_irq(&pipe->rd_wait.lock);
1268 kfree(bufs);
1269 return -EBUSY;
1270 }
1271
1272 /*
1273 * The pipe array wraps around, so just start the new one at zero
1274 * and adjust the indices.
1275 */
1276 if (n > 0) {
1277 unsigned int h = head & mask;
1278 unsigned int t = tail & mask;
1279 if (h > t) {
1280 memcpy(bufs, pipe->bufs + t,
1281 n * sizeof(struct pipe_buffer));
1282 } else {
1283 unsigned int tsize = pipe->ring_size - t;
1284 if (h > 0)
1285 memcpy(bufs + tsize, pipe->bufs,
1286 h * sizeof(struct pipe_buffer));
1287 memcpy(bufs, pipe->bufs + t,
1288 tsize * sizeof(struct pipe_buffer));
1289 }
1290 }
1291
1292 head = n;
1293 tail = 0;
1294
1295 kfree(pipe->bufs);
1296 pipe->bufs = bufs;
1297 pipe->ring_size = nr_slots;
1298 if (pipe->max_usage > nr_slots)
1299 pipe->max_usage = nr_slots;
1300 pipe->tail = tail;
1301 pipe->head = head;
1302
1303 if (!pipe_has_watch_queue(pipe)) {
1304 pipe->max_usage = nr_slots;
1305 pipe->nr_accounted = nr_slots;
1306 }
1307
1308 spin_unlock_irq(&pipe->rd_wait.lock);
1309
1310 /* This might have made more room for writers */
1311 wake_up_interruptible(&pipe->wr_wait);
1312 return 0;
1313 }
1314
1315 /*
1316 * Allocate a new array of pipe buffers and copy the info over. Returns the
1317 * pipe size if successful, or return -ERROR on error.
1318 */
pipe_set_size(struct pipe_inode_info * pipe,unsigned long arg)1319 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1320 {
1321 unsigned long user_bufs;
1322 unsigned int nr_slots, size;
1323 long ret = 0;
1324
1325 if (pipe_has_watch_queue(pipe))
1326 return -EBUSY;
1327
1328 size = round_pipe_size(arg);
1329 nr_slots = size >> PAGE_SHIFT;
1330
1331 if (!nr_slots)
1332 return -EINVAL;
1333
1334 /*
1335 * If trying to increase the pipe capacity, check that an
1336 * unprivileged user is not trying to exceed various limits
1337 * (soft limit check here, hard limit check just below).
1338 * Decreasing the pipe capacity is always permitted, even
1339 * if the user is currently over a limit.
1340 */
1341 if (nr_slots > pipe->max_usage &&
1342 size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1343 return -EPERM;
1344
1345 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots);
1346
1347 if (nr_slots > pipe->max_usage &&
1348 (too_many_pipe_buffers_hard(user_bufs) ||
1349 too_many_pipe_buffers_soft(user_bufs)) &&
1350 pipe_is_unprivileged_user()) {
1351 ret = -EPERM;
1352 goto out_revert_acct;
1353 }
1354
1355 ret = pipe_resize_ring(pipe, nr_slots);
1356 if (ret < 0)
1357 goto out_revert_acct;
1358
1359 return pipe->max_usage * PAGE_SIZE;
1360
1361 out_revert_acct:
1362 (void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted);
1363 return ret;
1364 }
1365
1366 /*
1367 * Note that i_pipe and i_cdev share the same location, so checking ->i_pipe is
1368 * not enough to verify that this is a pipe.
1369 */
get_pipe_info(struct file * file,bool for_splice)1370 struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
1371 {
1372 struct pipe_inode_info *pipe = file->private_data;
1373
1374 if (file->f_op != &pipefifo_fops || !pipe)
1375 return NULL;
1376 if (for_splice && pipe_has_watch_queue(pipe))
1377 return NULL;
1378 return pipe;
1379 }
1380
pipe_fcntl(struct file * file,unsigned int cmd,unsigned long arg)1381 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1382 {
1383 struct pipe_inode_info *pipe;
1384 long ret;
1385
1386 pipe = get_pipe_info(file, false);
1387 if (!pipe)
1388 return -EBADF;
1389
1390 __pipe_lock(pipe);
1391
1392 switch (cmd) {
1393 case F_SETPIPE_SZ:
1394 ret = pipe_set_size(pipe, arg);
1395 break;
1396 case F_GETPIPE_SZ:
1397 ret = pipe->max_usage * PAGE_SIZE;
1398 break;
1399 default:
1400 ret = -EINVAL;
1401 break;
1402 }
1403
1404 __pipe_unlock(pipe);
1405 return ret;
1406 }
1407
1408 static const struct super_operations pipefs_ops = {
1409 .destroy_inode = free_inode_nonrcu,
1410 .statfs = simple_statfs,
1411 };
1412
1413 /*
1414 * pipefs should _never_ be mounted by userland - too much of security hassle,
1415 * no real gain from having the whole whorehouse mounted. So we don't need
1416 * any operations on the root directory. However, we need a non-trivial
1417 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1418 */
1419
pipefs_init_fs_context(struct fs_context * fc)1420 static int pipefs_init_fs_context(struct fs_context *fc)
1421 {
1422 struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1423 if (!ctx)
1424 return -ENOMEM;
1425 ctx->ops = &pipefs_ops;
1426 ctx->dops = &pipefs_dentry_operations;
1427 return 0;
1428 }
1429
1430 static struct file_system_type pipe_fs_type = {
1431 .name = "pipefs",
1432 .init_fs_context = pipefs_init_fs_context,
1433 .kill_sb = kill_anon_super,
1434 };
1435
init_pipe_fs(void)1436 static int __init init_pipe_fs(void)
1437 {
1438 int err = register_filesystem(&pipe_fs_type);
1439
1440 if (!err) {
1441 pipe_mnt = kern_mount(&pipe_fs_type);
1442 if (IS_ERR(pipe_mnt)) {
1443 err = PTR_ERR(pipe_mnt);
1444 unregister_filesystem(&pipe_fs_type);
1445 }
1446 }
1447 return err;
1448 }
1449
1450 fs_initcall(init_pipe_fs);
1451