1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PIPE_FS_I_H
3 #define _LINUX_PIPE_FS_I_H
4
5 #define PIPE_DEF_BUFFERS 16
6
7 #define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */
8 #define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */
9 #define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
10 #define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */
11 #define PIPE_BUF_FLAG_CAN_MERGE 0x10 /* can merge buffers */
12 #define PIPE_BUF_FLAG_WHOLE 0x20 /* read() must return entire buffer or error */
13 #ifdef CONFIG_WATCH_QUEUE
14 #define PIPE_BUF_FLAG_LOSS 0x40 /* Message loss happened after this buffer */
15 #endif
16
17 /**
18 * struct pipe_buffer - a linux kernel pipe buffer
19 * @page: the page containing the data for the pipe buffer
20 * @offset: offset of data inside the @page
21 * @len: length of data inside the @page
22 * @ops: operations associated with this buffer. See @pipe_buf_operations.
23 * @flags: pipe buffer flags. See above.
24 * @private: private data owned by the ops.
25 **/
26 struct pipe_buffer {
27 struct page *page;
28 unsigned int offset, len;
29 const struct pipe_buf_operations *ops;
30 unsigned int flags;
31 unsigned long private;
32 };
33
34 /**
35 * struct pipe_inode_info - a linux kernel pipe
36 * @mutex: mutex protecting the whole thing
37 * @rd_wait: reader wait point in case of empty pipe
38 * @wr_wait: writer wait point in case of full pipe
39 * @head: The point of buffer production
40 * @tail: The point of buffer consumption
41 * @note_loss: The next read() should insert a data-lost message
42 * @max_usage: The maximum number of slots that may be used in the ring
43 * @ring_size: total number of buffers (should be a power of 2)
44 * @nr_accounted: The amount this pipe accounts for in user->pipe_bufs
45 * @tmp_page: cached released page
46 * @readers: number of current readers of this pipe
47 * @writers: number of current writers of this pipe
48 * @files: number of struct file referring this pipe (protected by ->i_lock)
49 * @r_counter: reader counter
50 * @w_counter: writer counter
51 * @poll_usage: is this pipe used for epoll, which has crazy wakeups?
52 * @fasync_readers: reader side fasync
53 * @fasync_writers: writer side fasync
54 * @bufs: the circular array of pipe buffers
55 * @user: the user who created this pipe
56 * @watch_queue: If this pipe is a watch_queue, this is the stuff for that
57 **/
58 struct pipe_inode_info {
59 struct mutex mutex;
60 wait_queue_head_t rd_wait, wr_wait;
61 unsigned int head;
62 unsigned int tail;
63 unsigned int max_usage;
64 unsigned int ring_size;
65 #ifdef CONFIG_WATCH_QUEUE
66 bool note_loss;
67 #endif
68 unsigned int nr_accounted;
69 unsigned int readers;
70 unsigned int writers;
71 unsigned int files;
72 unsigned int r_counter;
73 unsigned int w_counter;
74 bool poll_usage;
75 struct page *tmp_page;
76 struct fasync_struct *fasync_readers;
77 struct fasync_struct *fasync_writers;
78 struct pipe_buffer *bufs;
79 struct user_struct *user;
80 #ifdef CONFIG_WATCH_QUEUE
81 struct watch_queue *watch_queue;
82 #endif
83 };
84
85 /*
86 * Note on the nesting of these functions:
87 *
88 * ->confirm()
89 * ->try_steal()
90 *
91 * That is, ->try_steal() must be called on a confirmed buffer. See below for
92 * the meaning of each operation. Also see the kerneldoc in fs/pipe.c for the
93 * pipe and generic variants of these hooks.
94 */
95 struct pipe_buf_operations {
96 /*
97 * ->confirm() verifies that the data in the pipe buffer is there
98 * and that the contents are good. If the pages in the pipe belong
99 * to a file system, we may need to wait for IO completion in this
100 * hook. Returns 0 for good, or a negative error value in case of
101 * error. If not present all pages are considered good.
102 */
103 int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);
104
105 /*
106 * When the contents of this pipe buffer has been completely
107 * consumed by a reader, ->release() is called.
108 */
109 void (*release)(struct pipe_inode_info *, struct pipe_buffer *);
110
111 /*
112 * Attempt to take ownership of the pipe buffer and its contents.
113 * ->try_steal() returns %true for success, in which case the contents
114 * of the pipe (the buf->page) is locked and now completely owned by the
115 * caller. The page may then be transferred to a different mapping, the
116 * most often used case is insertion into different file address space
117 * cache.
118 */
119 bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *);
120
121 /*
122 * Get a reference to the pipe buffer.
123 */
124 bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
125 };
126
127 /**
128 * pipe_empty - Return true if the pipe is empty
129 * @head: The pipe ring head pointer
130 * @tail: The pipe ring tail pointer
131 */
pipe_empty(unsigned int head,unsigned int tail)132 static inline bool pipe_empty(unsigned int head, unsigned int tail)
133 {
134 return head == tail;
135 }
136
137 /**
138 * pipe_occupancy - Return number of slots used in the pipe
139 * @head: The pipe ring head pointer
140 * @tail: The pipe ring tail pointer
141 */
pipe_occupancy(unsigned int head,unsigned int tail)142 static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail)
143 {
144 return head - tail;
145 }
146
147 /**
148 * pipe_full - Return true if the pipe is full
149 * @head: The pipe ring head pointer
150 * @tail: The pipe ring tail pointer
151 * @limit: The maximum amount of slots available.
152 */
pipe_full(unsigned int head,unsigned int tail,unsigned int limit)153 static inline bool pipe_full(unsigned int head, unsigned int tail,
154 unsigned int limit)
155 {
156 return pipe_occupancy(head, tail) >= limit;
157 }
158
159 /**
160 * pipe_space_for_user - Return number of slots available to userspace
161 * @head: The pipe ring head pointer
162 * @tail: The pipe ring tail pointer
163 * @pipe: The pipe info structure
164 */
pipe_space_for_user(unsigned int head,unsigned int tail,struct pipe_inode_info * pipe)165 static inline unsigned int pipe_space_for_user(unsigned int head, unsigned int tail,
166 struct pipe_inode_info *pipe)
167 {
168 unsigned int p_occupancy, p_space;
169
170 p_occupancy = pipe_occupancy(head, tail);
171 if (p_occupancy >= pipe->max_usage)
172 return 0;
173 p_space = pipe->ring_size - p_occupancy;
174 if (p_space > pipe->max_usage)
175 p_space = pipe->max_usage;
176 return p_space;
177 }
178
179 /**
180 * pipe_buf_get - get a reference to a pipe_buffer
181 * @pipe: the pipe that the buffer belongs to
182 * @buf: the buffer to get a reference to
183 *
184 * Return: %true if the reference was successfully obtained.
185 */
pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)186 static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe,
187 struct pipe_buffer *buf)
188 {
189 return buf->ops->get(pipe, buf);
190 }
191
192 /**
193 * pipe_buf_release - put a reference to a pipe_buffer
194 * @pipe: the pipe that the buffer belongs to
195 * @buf: the buffer to put a reference to
196 */
pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)197 static inline void pipe_buf_release(struct pipe_inode_info *pipe,
198 struct pipe_buffer *buf)
199 {
200 const struct pipe_buf_operations *ops = buf->ops;
201
202 buf->ops = NULL;
203 ops->release(pipe, buf);
204 }
205
206 /**
207 * pipe_buf_confirm - verify contents of the pipe buffer
208 * @pipe: the pipe that the buffer belongs to
209 * @buf: the buffer to confirm
210 */
pipe_buf_confirm(struct pipe_inode_info * pipe,struct pipe_buffer * buf)211 static inline int pipe_buf_confirm(struct pipe_inode_info *pipe,
212 struct pipe_buffer *buf)
213 {
214 if (!buf->ops->confirm)
215 return 0;
216 return buf->ops->confirm(pipe, buf);
217 }
218
219 /**
220 * pipe_buf_try_steal - attempt to take ownership of a pipe_buffer
221 * @pipe: the pipe that the buffer belongs to
222 * @buf: the buffer to attempt to steal
223 */
pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)224 static inline bool pipe_buf_try_steal(struct pipe_inode_info *pipe,
225 struct pipe_buffer *buf)
226 {
227 if (!buf->ops->try_steal)
228 return false;
229 return buf->ops->try_steal(pipe, buf);
230 }
231
232 /* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
233 memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
234 #define PIPE_SIZE PAGE_SIZE
235
236 /* Pipe lock and unlock operations */
237 void pipe_lock(struct pipe_inode_info *);
238 void pipe_unlock(struct pipe_inode_info *);
239 void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
240
241 extern unsigned int pipe_max_size;
242 extern unsigned long pipe_user_pages_hard;
243 extern unsigned long pipe_user_pages_soft;
244
245 /* Wait for a pipe to be readable/writable while dropping the pipe lock */
246 void pipe_wait_readable(struct pipe_inode_info *);
247 void pipe_wait_writable(struct pipe_inode_info *);
248
249 struct pipe_inode_info *alloc_pipe_info(void);
250 void free_pipe_info(struct pipe_inode_info *);
251
252 /* Generic pipe buffer ops functions */
253 bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
254 bool generic_pipe_buf_try_steal(struct pipe_inode_info *, struct pipe_buffer *);
255 void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
256
257 extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
258
259 #ifdef CONFIG_WATCH_QUEUE
260 unsigned long account_pipe_buffers(struct user_struct *user,
261 unsigned long old, unsigned long new);
262 bool too_many_pipe_buffers_soft(unsigned long user_bufs);
263 bool too_many_pipe_buffers_hard(unsigned long user_bufs);
264 bool pipe_is_unprivileged_user(void);
265 #endif
266
267 /* for F_SETPIPE_SZ and F_GETPIPE_SZ */
268 #ifdef CONFIG_WATCH_QUEUE
269 int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots);
270 #endif
271 long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
272 struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice);
273
274 int create_pipe_files(struct file **, int);
275 unsigned int round_pipe_size(unsigned long size);
276
277 #endif
278