1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Berkeley style UIO structures - Alan Cox 1994.
4 */
5 #ifndef __LINUX_UIO_H
6 #define __LINUX_UIO_H
7
8 #include <linux/kernel.h>
9 #include <linux/thread_info.h>
10 #include <linux/mm_types.h>
11 #include <uapi/linux/uio.h>
12
13 struct page;
14 struct pipe_inode_info;
15
16 struct kvec {
17 void *iov_base; /* and that should *never* hold a userland pointer */
18 size_t iov_len;
19 };
20
21 enum iter_type {
22 /* iter types */
23 ITER_IOVEC,
24 ITER_KVEC,
25 ITER_BVEC,
26 ITER_PIPE,
27 ITER_XARRAY,
28 ITER_DISCARD,
29 ITER_UBUF,
30 };
31
32 #define ITER_SOURCE 1 // == WRITE
33 #define ITER_DEST 0 // == READ
34
35 struct iov_iter_state {
36 size_t iov_offset;
37 size_t count;
38 unsigned long nr_segs;
39 };
40
41 struct iov_iter {
42 u8 iter_type;
43 bool nofault;
44 bool data_source;
45 bool user_backed;
46 union {
47 size_t iov_offset;
48 int last_offset;
49 };
50 size_t count;
51 union {
52 const struct iovec *iov;
53 const struct kvec *kvec;
54 const struct bio_vec *bvec;
55 struct xarray *xarray;
56 struct pipe_inode_info *pipe;
57 void __user *ubuf;
58 };
59 union {
60 unsigned long nr_segs;
61 struct {
62 unsigned int head;
63 unsigned int start_head;
64 };
65 loff_t xarray_start;
66 };
67 };
68
iov_iter_type(const struct iov_iter * i)69 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
70 {
71 return i->iter_type;
72 }
73
iov_iter_save_state(struct iov_iter * iter,struct iov_iter_state * state)74 static inline void iov_iter_save_state(struct iov_iter *iter,
75 struct iov_iter_state *state)
76 {
77 state->iov_offset = iter->iov_offset;
78 state->count = iter->count;
79 state->nr_segs = iter->nr_segs;
80 }
81
iter_is_ubuf(const struct iov_iter * i)82 static inline bool iter_is_ubuf(const struct iov_iter *i)
83 {
84 return iov_iter_type(i) == ITER_UBUF;
85 }
86
iter_is_iovec(const struct iov_iter * i)87 static inline bool iter_is_iovec(const struct iov_iter *i)
88 {
89 return iov_iter_type(i) == ITER_IOVEC;
90 }
91
iov_iter_is_kvec(const struct iov_iter * i)92 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
93 {
94 return iov_iter_type(i) == ITER_KVEC;
95 }
96
iov_iter_is_bvec(const struct iov_iter * i)97 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
98 {
99 return iov_iter_type(i) == ITER_BVEC;
100 }
101
iov_iter_is_pipe(const struct iov_iter * i)102 static inline bool iov_iter_is_pipe(const struct iov_iter *i)
103 {
104 return iov_iter_type(i) == ITER_PIPE;
105 }
106
iov_iter_is_discard(const struct iov_iter * i)107 static inline bool iov_iter_is_discard(const struct iov_iter *i)
108 {
109 return iov_iter_type(i) == ITER_DISCARD;
110 }
111
iov_iter_is_xarray(const struct iov_iter * i)112 static inline bool iov_iter_is_xarray(const struct iov_iter *i)
113 {
114 return iov_iter_type(i) == ITER_XARRAY;
115 }
116
iov_iter_rw(const struct iov_iter * i)117 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
118 {
119 return i->data_source ? WRITE : READ;
120 }
121
user_backed_iter(const struct iov_iter * i)122 static inline bool user_backed_iter(const struct iov_iter *i)
123 {
124 return i->user_backed;
125 }
126
127 /*
128 * Total number of bytes covered by an iovec.
129 *
130 * NOTE that it is not safe to use this function until all the iovec's
131 * segment lengths have been validated. Because the individual lengths can
132 * overflow a size_t when added together.
133 */
iov_length(const struct iovec * iov,unsigned long nr_segs)134 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
135 {
136 unsigned long seg;
137 size_t ret = 0;
138
139 for (seg = 0; seg < nr_segs; seg++)
140 ret += iov[seg].iov_len;
141 return ret;
142 }
143
iov_iter_iovec(const struct iov_iter * iter)144 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
145 {
146 return (struct iovec) {
147 .iov_base = iter->iov->iov_base + iter->iov_offset,
148 .iov_len = min(iter->count,
149 iter->iov->iov_len - iter->iov_offset),
150 };
151 }
152
153 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
154 size_t bytes, struct iov_iter *i);
155 void iov_iter_advance(struct iov_iter *i, size_t bytes);
156 void iov_iter_revert(struct iov_iter *i, size_t bytes);
157 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
158 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
159 size_t iov_iter_single_seg_count(const struct iov_iter *i);
160 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
161 struct iov_iter *i);
162 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
163 struct iov_iter *i);
164
165 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
166 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
167 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
168
copy_folio_to_iter(struct folio * folio,size_t offset,size_t bytes,struct iov_iter * i)169 static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
170 size_t bytes, struct iov_iter *i)
171 {
172 return copy_page_to_iter(&folio->page, offset, bytes, i);
173 }
174
175 static __always_inline __must_check
copy_to_iter(const void * addr,size_t bytes,struct iov_iter * i)176 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
177 {
178 if (check_copy_size(addr, bytes, true))
179 return _copy_to_iter(addr, bytes, i);
180 return 0;
181 }
182
183 static __always_inline __must_check
copy_from_iter(void * addr,size_t bytes,struct iov_iter * i)184 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
185 {
186 if (check_copy_size(addr, bytes, false))
187 return _copy_from_iter(addr, bytes, i);
188 return 0;
189 }
190
191 static __always_inline __must_check
copy_from_iter_full(void * addr,size_t bytes,struct iov_iter * i)192 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
193 {
194 size_t copied = copy_from_iter(addr, bytes, i);
195 if (likely(copied == bytes))
196 return true;
197 iov_iter_revert(i, copied);
198 return false;
199 }
200
201 static __always_inline __must_check
copy_from_iter_nocache(void * addr,size_t bytes,struct iov_iter * i)202 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
203 {
204 if (check_copy_size(addr, bytes, false))
205 return _copy_from_iter_nocache(addr, bytes, i);
206 return 0;
207 }
208
209 static __always_inline __must_check
copy_from_iter_full_nocache(void * addr,size_t bytes,struct iov_iter * i)210 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
211 {
212 size_t copied = copy_from_iter_nocache(addr, bytes, i);
213 if (likely(copied == bytes))
214 return true;
215 iov_iter_revert(i, copied);
216 return false;
217 }
218
219 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
220 /*
221 * Note, users like pmem that depend on the stricter semantics of
222 * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
223 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
224 * destination is flushed from the cache on return.
225 */
226 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
227 #else
228 #define _copy_from_iter_flushcache _copy_from_iter_nocache
229 #endif
230
231 #ifdef CONFIG_ARCH_HAS_COPY_MC
232 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
233 #else
234 #define _copy_mc_to_iter _copy_to_iter
235 #endif
236
237 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
238 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
239 unsigned len_mask);
240 unsigned long iov_iter_alignment(const struct iov_iter *i);
241 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
242 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
243 unsigned long nr_segs, size_t count);
244 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
245 unsigned long nr_segs, size_t count);
246 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
247 unsigned long nr_segs, size_t count);
248 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
249 size_t count);
250 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
251 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
252 loff_t start, size_t count);
253 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
254 size_t maxsize, unsigned maxpages, size_t *start);
255 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
256 size_t maxsize, size_t *start);
257 int iov_iter_npages(const struct iov_iter *i, int maxpages);
258 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
259
260 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
261
iov_iter_count(const struct iov_iter * i)262 static inline size_t iov_iter_count(const struct iov_iter *i)
263 {
264 return i->count;
265 }
266
267 /*
268 * Cap the iov_iter by given limit; note that the second argument is
269 * *not* the new size - it's upper limit for such. Passing it a value
270 * greater than the amount of data in iov_iter is fine - it'll just do
271 * nothing in that case.
272 */
iov_iter_truncate(struct iov_iter * i,u64 count)273 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
274 {
275 /*
276 * count doesn't have to fit in size_t - comparison extends both
277 * operands to u64 here and any value that would be truncated by
278 * conversion in assignement is by definition greater than all
279 * values of size_t, including old i->count.
280 */
281 if (i->count > count)
282 i->count = count;
283 }
284
285 /*
286 * reexpand a previously truncated iterator; count must be no more than how much
287 * we had shrunk it.
288 */
iov_iter_reexpand(struct iov_iter * i,size_t count)289 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
290 {
291 i->count = count;
292 }
293
294 static inline int
iov_iter_npages_cap(struct iov_iter * i,int maxpages,size_t max_bytes)295 iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
296 {
297 size_t shorted = 0;
298 int npages;
299
300 if (iov_iter_count(i) > max_bytes) {
301 shorted = iov_iter_count(i) - max_bytes;
302 iov_iter_truncate(i, max_bytes);
303 }
304 npages = iov_iter_npages(i, maxpages);
305 if (shorted)
306 iov_iter_reexpand(i, iov_iter_count(i) + shorted);
307
308 return npages;
309 }
310
311 struct csum_state {
312 __wsum csum;
313 size_t off;
314 };
315
316 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
317 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
318
319 static __always_inline __must_check
csum_and_copy_from_iter_full(void * addr,size_t bytes,__wsum * csum,struct iov_iter * i)320 bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
321 __wsum *csum, struct iov_iter *i)
322 {
323 size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
324 if (likely(copied == bytes))
325 return true;
326 iov_iter_revert(i, copied);
327 return false;
328 }
329 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
330 struct iov_iter *i);
331
332 struct iovec *iovec_from_user(const struct iovec __user *uvector,
333 unsigned long nr_segs, unsigned long fast_segs,
334 struct iovec *fast_iov, bool compat);
335 ssize_t import_iovec(int type, const struct iovec __user *uvec,
336 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
337 struct iov_iter *i);
338 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
339 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
340 struct iov_iter *i, bool compat);
341 int import_single_range(int type, void __user *buf, size_t len,
342 struct iovec *iov, struct iov_iter *i);
343
iov_iter_ubuf(struct iov_iter * i,unsigned int direction,void __user * buf,size_t count)344 static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
345 void __user *buf, size_t count)
346 {
347 WARN_ON(direction & ~(READ | WRITE));
348 *i = (struct iov_iter) {
349 .iter_type = ITER_UBUF,
350 .user_backed = true,
351 .data_source = direction,
352 .ubuf = buf,
353 .count = count
354 };
355 }
356
357 #endif
358