1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
6 #include <linux/uio.h>
7 #include <linux/pagemap.h>
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include <linux/splice.h>
11 #include <linux/compat.h>
12 #include <net/checksum.h>
13 #include <linux/scatterlist.h>
14 #include <linux/instrumented.h>
15
16 #define PIPE_PARANOIA /* for now */
17
18 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
19 size_t left; \
20 size_t wanted = n; \
21 __p = i->iov; \
22 __v.iov_len = min(n, __p->iov_len - skip); \
23 if (likely(__v.iov_len)) { \
24 __v.iov_base = __p->iov_base + skip; \
25 left = (STEP); \
26 __v.iov_len -= left; \
27 skip += __v.iov_len; \
28 n -= __v.iov_len; \
29 } else { \
30 left = 0; \
31 } \
32 while (unlikely(!left && n)) { \
33 __p++; \
34 __v.iov_len = min(n, __p->iov_len); \
35 if (unlikely(!__v.iov_len)) \
36 continue; \
37 __v.iov_base = __p->iov_base; \
38 left = (STEP); \
39 __v.iov_len -= left; \
40 skip = __v.iov_len; \
41 n -= __v.iov_len; \
42 } \
43 n = wanted - n; \
44 }
45
46 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
47 size_t wanted = n; \
48 __p = i->kvec; \
49 __v.iov_len = min(n, __p->iov_len - skip); \
50 if (likely(__v.iov_len)) { \
51 __v.iov_base = __p->iov_base + skip; \
52 (void)(STEP); \
53 skip += __v.iov_len; \
54 n -= __v.iov_len; \
55 } \
56 while (unlikely(n)) { \
57 __p++; \
58 __v.iov_len = min(n, __p->iov_len); \
59 if (unlikely(!__v.iov_len)) \
60 continue; \
61 __v.iov_base = __p->iov_base; \
62 (void)(STEP); \
63 skip = __v.iov_len; \
64 n -= __v.iov_len; \
65 } \
66 n = wanted; \
67 }
68
69 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
70 struct bvec_iter __start; \
71 __start.bi_size = n; \
72 __start.bi_bvec_done = skip; \
73 __start.bi_idx = 0; \
74 for_each_bvec(__v, i->bvec, __bi, __start) { \
75 if (!__v.bv_len) \
76 continue; \
77 (void)(STEP); \
78 } \
79 }
80
81 #define iterate_all_kinds(i, n, v, I, B, K) { \
82 if (likely(n)) { \
83 size_t skip = i->iov_offset; \
84 if (unlikely(i->type & ITER_BVEC)) { \
85 struct bio_vec v; \
86 struct bvec_iter __bi; \
87 iterate_bvec(i, n, v, __bi, skip, (B)) \
88 } else if (unlikely(i->type & ITER_KVEC)) { \
89 const struct kvec *kvec; \
90 struct kvec v; \
91 iterate_kvec(i, n, v, kvec, skip, (K)) \
92 } else if (unlikely(i->type & ITER_DISCARD)) { \
93 } else { \
94 const struct iovec *iov; \
95 struct iovec v; \
96 iterate_iovec(i, n, v, iov, skip, (I)) \
97 } \
98 } \
99 }
100
101 #define iterate_and_advance(i, n, v, I, B, K) { \
102 if (unlikely(i->count < n)) \
103 n = i->count; \
104 if (i->count) { \
105 size_t skip = i->iov_offset; \
106 if (unlikely(i->type & ITER_BVEC)) { \
107 const struct bio_vec *bvec = i->bvec; \
108 struct bio_vec v; \
109 struct bvec_iter __bi; \
110 iterate_bvec(i, n, v, __bi, skip, (B)) \
111 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
112 i->nr_segs -= i->bvec - bvec; \
113 skip = __bi.bi_bvec_done; \
114 } else if (unlikely(i->type & ITER_KVEC)) { \
115 const struct kvec *kvec; \
116 struct kvec v; \
117 iterate_kvec(i, n, v, kvec, skip, (K)) \
118 if (skip == kvec->iov_len) { \
119 kvec++; \
120 skip = 0; \
121 } \
122 i->nr_segs -= kvec - i->kvec; \
123 i->kvec = kvec; \
124 } else if (unlikely(i->type & ITER_DISCARD)) { \
125 skip += n; \
126 } else { \
127 const struct iovec *iov; \
128 struct iovec v; \
129 iterate_iovec(i, n, v, iov, skip, (I)) \
130 if (skip == iov->iov_len) { \
131 iov++; \
132 skip = 0; \
133 } \
134 i->nr_segs -= iov - i->iov; \
135 i->iov = iov; \
136 } \
137 i->count -= n; \
138 i->iov_offset = skip; \
139 } \
140 }
141
copyout(void __user * to,const void * from,size_t n)142 static int copyout(void __user *to, const void *from, size_t n)
143 {
144 if (should_fail_usercopy())
145 return n;
146 if (access_ok(to, n)) {
147 instrument_copy_to_user(to, from, n);
148 n = raw_copy_to_user(to, from, n);
149 }
150 return n;
151 }
152
copyin(void * to,const void __user * from,size_t n)153 static int copyin(void *to, const void __user *from, size_t n)
154 {
155 if (should_fail_usercopy())
156 return n;
157 if (access_ok(from, n)) {
158 instrument_copy_from_user(to, from, n);
159 n = raw_copy_from_user(to, from, n);
160 }
161 return n;
162 }
163
copy_page_to_iter_iovec(struct page * page,size_t offset,size_t bytes,struct iov_iter * i)164 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
165 struct iov_iter *i)
166 {
167 size_t skip, copy, left, wanted;
168 const struct iovec *iov;
169 char __user *buf;
170 void *kaddr, *from;
171
172 if (unlikely(bytes > i->count))
173 bytes = i->count;
174
175 if (unlikely(!bytes))
176 return 0;
177
178 might_fault();
179 wanted = bytes;
180 iov = i->iov;
181 skip = i->iov_offset;
182 buf = iov->iov_base + skip;
183 copy = min(bytes, iov->iov_len - skip);
184
185 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
186 kaddr = kmap_atomic(page);
187 from = kaddr + offset;
188
189 /* first chunk, usually the only one */
190 left = copyout(buf, from, copy);
191 copy -= left;
192 skip += copy;
193 from += copy;
194 bytes -= copy;
195
196 while (unlikely(!left && bytes)) {
197 iov++;
198 buf = iov->iov_base;
199 copy = min(bytes, iov->iov_len);
200 left = copyout(buf, from, copy);
201 copy -= left;
202 skip = copy;
203 from += copy;
204 bytes -= copy;
205 }
206 if (likely(!bytes)) {
207 kunmap_atomic(kaddr);
208 goto done;
209 }
210 offset = from - kaddr;
211 buf += copy;
212 kunmap_atomic(kaddr);
213 copy = min(bytes, iov->iov_len - skip);
214 }
215 /* Too bad - revert to non-atomic kmap */
216
217 kaddr = kmap(page);
218 from = kaddr + offset;
219 left = copyout(buf, from, copy);
220 copy -= left;
221 skip += copy;
222 from += copy;
223 bytes -= copy;
224 while (unlikely(!left && bytes)) {
225 iov++;
226 buf = iov->iov_base;
227 copy = min(bytes, iov->iov_len);
228 left = copyout(buf, from, copy);
229 copy -= left;
230 skip = copy;
231 from += copy;
232 bytes -= copy;
233 }
234 kunmap(page);
235
236 done:
237 if (skip == iov->iov_len) {
238 iov++;
239 skip = 0;
240 }
241 i->count -= wanted - bytes;
242 i->nr_segs -= iov - i->iov;
243 i->iov = iov;
244 i->iov_offset = skip;
245 return wanted - bytes;
246 }
247
copy_page_from_iter_iovec(struct page * page,size_t offset,size_t bytes,struct iov_iter * i)248 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
249 struct iov_iter *i)
250 {
251 size_t skip, copy, left, wanted;
252 const struct iovec *iov;
253 char __user *buf;
254 void *kaddr, *to;
255
256 if (unlikely(bytes > i->count))
257 bytes = i->count;
258
259 if (unlikely(!bytes))
260 return 0;
261
262 might_fault();
263 wanted = bytes;
264 iov = i->iov;
265 skip = i->iov_offset;
266 buf = iov->iov_base + skip;
267 copy = min(bytes, iov->iov_len - skip);
268
269 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
270 kaddr = kmap_atomic(page);
271 to = kaddr + offset;
272
273 /* first chunk, usually the only one */
274 left = copyin(to, buf, copy);
275 copy -= left;
276 skip += copy;
277 to += copy;
278 bytes -= copy;
279
280 while (unlikely(!left && bytes)) {
281 iov++;
282 buf = iov->iov_base;
283 copy = min(bytes, iov->iov_len);
284 left = copyin(to, buf, copy);
285 copy -= left;
286 skip = copy;
287 to += copy;
288 bytes -= copy;
289 }
290 if (likely(!bytes)) {
291 kunmap_atomic(kaddr);
292 goto done;
293 }
294 offset = to - kaddr;
295 buf += copy;
296 kunmap_atomic(kaddr);
297 copy = min(bytes, iov->iov_len - skip);
298 }
299 /* Too bad - revert to non-atomic kmap */
300
301 kaddr = kmap(page);
302 to = kaddr + offset;
303 left = copyin(to, buf, copy);
304 copy -= left;
305 skip += copy;
306 to += copy;
307 bytes -= copy;
308 while (unlikely(!left && bytes)) {
309 iov++;
310 buf = iov->iov_base;
311 copy = min(bytes, iov->iov_len);
312 left = copyin(to, buf, copy);
313 copy -= left;
314 skip = copy;
315 to += copy;
316 bytes -= copy;
317 }
318 kunmap(page);
319
320 done:
321 if (skip == iov->iov_len) {
322 iov++;
323 skip = 0;
324 }
325 i->count -= wanted - bytes;
326 i->nr_segs -= iov - i->iov;
327 i->iov = iov;
328 i->iov_offset = skip;
329 return wanted - bytes;
330 }
331
332 #ifdef PIPE_PARANOIA
sanity(const struct iov_iter * i)333 static bool sanity(const struct iov_iter *i)
334 {
335 struct pipe_inode_info *pipe = i->pipe;
336 unsigned int p_head = pipe->head;
337 unsigned int p_tail = pipe->tail;
338 unsigned int p_mask = pipe->ring_size - 1;
339 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
340 unsigned int i_head = i->head;
341 unsigned int idx;
342
343 if (i->iov_offset) {
344 struct pipe_buffer *p;
345 if (unlikely(p_occupancy == 0))
346 goto Bad; // pipe must be non-empty
347 if (unlikely(i_head != p_head - 1))
348 goto Bad; // must be at the last buffer...
349
350 p = &pipe->bufs[i_head & p_mask];
351 if (unlikely(p->offset + p->len != i->iov_offset))
352 goto Bad; // ... at the end of segment
353 } else {
354 if (i_head != p_head)
355 goto Bad; // must be right after the last buffer
356 }
357 return true;
358 Bad:
359 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
360 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
361 p_head, p_tail, pipe->ring_size);
362 for (idx = 0; idx < pipe->ring_size; idx++)
363 printk(KERN_ERR "[%p %p %d %d]\n",
364 pipe->bufs[idx].ops,
365 pipe->bufs[idx].page,
366 pipe->bufs[idx].offset,
367 pipe->bufs[idx].len);
368 WARN_ON(1);
369 return false;
370 }
371 #else
372 #define sanity(i) true
373 #endif
374
copy_page_to_iter_pipe(struct page * page,size_t offset,size_t bytes,struct iov_iter * i)375 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
376 struct iov_iter *i)
377 {
378 struct pipe_inode_info *pipe = i->pipe;
379 struct pipe_buffer *buf;
380 unsigned int p_tail = pipe->tail;
381 unsigned int p_mask = pipe->ring_size - 1;
382 unsigned int i_head = i->head;
383 size_t off;
384
385 if (unlikely(bytes > i->count))
386 bytes = i->count;
387
388 if (unlikely(!bytes))
389 return 0;
390
391 if (!sanity(i))
392 return 0;
393
394 off = i->iov_offset;
395 buf = &pipe->bufs[i_head & p_mask];
396 if (off) {
397 if (offset == off && buf->page == page) {
398 /* merge with the last one */
399 buf->len += bytes;
400 i->iov_offset += bytes;
401 goto out;
402 }
403 i_head++;
404 buf = &pipe->bufs[i_head & p_mask];
405 }
406 if (pipe_full(i_head, p_tail, pipe->max_usage))
407 return 0;
408
409 buf->ops = &page_cache_pipe_buf_ops;
410 buf->flags = 0;
411 get_page(page);
412 buf->page = page;
413 buf->offset = offset;
414 buf->len = bytes;
415
416 pipe->head = i_head + 1;
417 i->iov_offset = offset + bytes;
418 i->head = i_head;
419 out:
420 i->count -= bytes;
421 return bytes;
422 }
423
424 /*
425 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
426 * bytes. For each iovec, fault in each page that constitutes the iovec.
427 *
428 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
429 * because it is an invalid address).
430 */
iov_iter_fault_in_readable(struct iov_iter * i,size_t bytes)431 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
432 {
433 size_t skip = i->iov_offset;
434 const struct iovec *iov;
435 int err;
436 struct iovec v;
437
438 if (iter_is_iovec(i)) {
439 iterate_iovec(i, bytes, v, iov, skip, ({
440 err = fault_in_pages_readable(v.iov_base, v.iov_len);
441 if (unlikely(err))
442 return err;
443 0;}))
444 }
445 return 0;
446 }
447 EXPORT_SYMBOL(iov_iter_fault_in_readable);
448
iov_iter_init(struct iov_iter * i,unsigned int direction,const struct iovec * iov,unsigned long nr_segs,size_t count)449 void iov_iter_init(struct iov_iter *i, unsigned int direction,
450 const struct iovec *iov, unsigned long nr_segs,
451 size_t count)
452 {
453 WARN_ON(direction & ~(READ | WRITE));
454 direction &= READ | WRITE;
455
456 /* It will get better. Eventually... */
457 if (uaccess_kernel()) {
458 i->type = ITER_KVEC | direction;
459 i->kvec = (struct kvec *)iov;
460 } else {
461 i->type = ITER_IOVEC | direction;
462 i->iov = iov;
463 }
464 i->nr_segs = nr_segs;
465 i->iov_offset = 0;
466 i->count = count;
467 }
468 EXPORT_SYMBOL(iov_iter_init);
469
memcpy_from_page(char * to,struct page * page,size_t offset,size_t len)470 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
471 {
472 char *from = kmap_atomic(page);
473 memcpy(to, from + offset, len);
474 kunmap_atomic(from);
475 }
476
memcpy_to_page(struct page * page,size_t offset,const char * from,size_t len)477 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
478 {
479 char *to = kmap_atomic(page);
480 memcpy(to + offset, from, len);
481 kunmap_atomic(to);
482 }
483
memzero_page(struct page * page,size_t offset,size_t len)484 static void memzero_page(struct page *page, size_t offset, size_t len)
485 {
486 char *addr = kmap_atomic(page);
487 memset(addr + offset, 0, len);
488 kunmap_atomic(addr);
489 }
490
allocated(struct pipe_buffer * buf)491 static inline bool allocated(struct pipe_buffer *buf)
492 {
493 return buf->ops == &default_pipe_buf_ops;
494 }
495
data_start(const struct iov_iter * i,unsigned int * iter_headp,size_t * offp)496 static inline void data_start(const struct iov_iter *i,
497 unsigned int *iter_headp, size_t *offp)
498 {
499 unsigned int p_mask = i->pipe->ring_size - 1;
500 unsigned int iter_head = i->head;
501 size_t off = i->iov_offset;
502
503 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
504 off == PAGE_SIZE)) {
505 iter_head++;
506 off = 0;
507 }
508 *iter_headp = iter_head;
509 *offp = off;
510 }
511
push_pipe(struct iov_iter * i,size_t size,int * iter_headp,size_t * offp)512 static size_t push_pipe(struct iov_iter *i, size_t size,
513 int *iter_headp, size_t *offp)
514 {
515 struct pipe_inode_info *pipe = i->pipe;
516 unsigned int p_tail = pipe->tail;
517 unsigned int p_mask = pipe->ring_size - 1;
518 unsigned int iter_head;
519 size_t off;
520 ssize_t left;
521
522 if (unlikely(size > i->count))
523 size = i->count;
524 if (unlikely(!size))
525 return 0;
526
527 left = size;
528 data_start(i, &iter_head, &off);
529 *iter_headp = iter_head;
530 *offp = off;
531 if (off) {
532 left -= PAGE_SIZE - off;
533 if (left <= 0) {
534 pipe->bufs[iter_head & p_mask].len += size;
535 return size;
536 }
537 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
538 iter_head++;
539 }
540 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
541 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
542 struct page *page = alloc_page(GFP_USER);
543 if (!page)
544 break;
545
546 buf->ops = &default_pipe_buf_ops;
547 buf->flags = 0;
548 buf->page = page;
549 buf->offset = 0;
550 buf->len = min_t(ssize_t, left, PAGE_SIZE);
551 left -= buf->len;
552 iter_head++;
553 pipe->head = iter_head;
554
555 if (left == 0)
556 return size;
557 }
558 return size - left;
559 }
560
copy_pipe_to_iter(const void * addr,size_t bytes,struct iov_iter * i)561 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
562 struct iov_iter *i)
563 {
564 struct pipe_inode_info *pipe = i->pipe;
565 unsigned int p_mask = pipe->ring_size - 1;
566 unsigned int i_head;
567 size_t n, off;
568
569 if (!sanity(i))
570 return 0;
571
572 bytes = n = push_pipe(i, bytes, &i_head, &off);
573 if (unlikely(!n))
574 return 0;
575 do {
576 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
577 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
578 i->head = i_head;
579 i->iov_offset = off + chunk;
580 n -= chunk;
581 addr += chunk;
582 off = 0;
583 i_head++;
584 } while (n);
585 i->count -= bytes;
586 return bytes;
587 }
588
csum_and_memcpy(void * to,const void * from,size_t len,__wsum sum,size_t off)589 static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
590 __wsum sum, size_t off)
591 {
592 __wsum next = csum_partial_copy_nocheck(from, to, len);
593 return csum_block_add(sum, next, off);
594 }
595
csum_and_copy_to_pipe_iter(const void * addr,size_t bytes,struct csum_state * csstate,struct iov_iter * i)596 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
597 struct csum_state *csstate,
598 struct iov_iter *i)
599 {
600 struct pipe_inode_info *pipe = i->pipe;
601 unsigned int p_mask = pipe->ring_size - 1;
602 __wsum sum = csstate->csum;
603 size_t off = csstate->off;
604 unsigned int i_head;
605 size_t n, r;
606
607 if (!sanity(i))
608 return 0;
609
610 bytes = n = push_pipe(i, bytes, &i_head, &r);
611 if (unlikely(!n))
612 return 0;
613 do {
614 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
615 char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
616 sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
617 kunmap_atomic(p);
618 i->head = i_head;
619 i->iov_offset = r + chunk;
620 n -= chunk;
621 off += chunk;
622 addr += chunk;
623 r = 0;
624 i_head++;
625 } while (n);
626 i->count -= bytes;
627 csstate->csum = sum;
628 csstate->off = off;
629 return bytes;
630 }
631
_copy_to_iter(const void * addr,size_t bytes,struct iov_iter * i)632 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
633 {
634 const char *from = addr;
635 if (unlikely(iov_iter_is_pipe(i)))
636 return copy_pipe_to_iter(addr, bytes, i);
637 if (iter_is_iovec(i))
638 might_fault();
639 iterate_and_advance(i, bytes, v,
640 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
641 memcpy_to_page(v.bv_page, v.bv_offset,
642 (from += v.bv_len) - v.bv_len, v.bv_len),
643 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
644 )
645
646 return bytes;
647 }
648 EXPORT_SYMBOL(_copy_to_iter);
649
650 #ifdef CONFIG_ARCH_HAS_COPY_MC
copyout_mc(void __user * to,const void * from,size_t n)651 static int copyout_mc(void __user *to, const void *from, size_t n)
652 {
653 if (access_ok(to, n)) {
654 instrument_copy_to_user(to, from, n);
655 n = copy_mc_to_user((__force void *) to, from, n);
656 }
657 return n;
658 }
659
copy_mc_to_page(struct page * page,size_t offset,const char * from,size_t len)660 static unsigned long copy_mc_to_page(struct page *page, size_t offset,
661 const char *from, size_t len)
662 {
663 unsigned long ret;
664 char *to;
665
666 to = kmap_atomic(page);
667 ret = copy_mc_to_kernel(to + offset, from, len);
668 kunmap_atomic(to);
669
670 return ret;
671 }
672
copy_mc_pipe_to_iter(const void * addr,size_t bytes,struct iov_iter * i)673 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
674 struct iov_iter *i)
675 {
676 struct pipe_inode_info *pipe = i->pipe;
677 unsigned int p_mask = pipe->ring_size - 1;
678 unsigned int i_head;
679 size_t n, off, xfer = 0;
680
681 if (!sanity(i))
682 return 0;
683
684 bytes = n = push_pipe(i, bytes, &i_head, &off);
685 if (unlikely(!n))
686 return 0;
687 do {
688 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
689 unsigned long rem;
690
691 rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
692 off, addr, chunk);
693 i->head = i_head;
694 i->iov_offset = off + chunk - rem;
695 xfer += chunk - rem;
696 if (rem)
697 break;
698 n -= chunk;
699 addr += chunk;
700 off = 0;
701 i_head++;
702 } while (n);
703 i->count -= xfer;
704 return xfer;
705 }
706
707 /**
708 * _copy_mc_to_iter - copy to iter with source memory error exception handling
709 * @addr: source kernel address
710 * @bytes: total transfer length
711 * @iter: destination iterator
712 *
713 * The pmem driver deploys this for the dax operation
714 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
715 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
716 * successfully copied.
717 *
718 * The main differences between this and typical _copy_to_iter().
719 *
720 * * Typical tail/residue handling after a fault retries the copy
721 * byte-by-byte until the fault happens again. Re-triggering machine
722 * checks is potentially fatal so the implementation uses source
723 * alignment and poison alignment assumptions to avoid re-triggering
724 * hardware exceptions.
725 *
726 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
727 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
728 * a short copy.
729 */
_copy_mc_to_iter(const void * addr,size_t bytes,struct iov_iter * i)730 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
731 {
732 const char *from = addr;
733 unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
734
735 if (unlikely(iov_iter_is_pipe(i)))
736 return copy_mc_pipe_to_iter(addr, bytes, i);
737 if (iter_is_iovec(i))
738 might_fault();
739 iterate_and_advance(i, bytes, v,
740 copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
741 v.iov_len),
742 ({
743 rem = copy_mc_to_page(v.bv_page, v.bv_offset,
744 (from += v.bv_len) - v.bv_len, v.bv_len);
745 if (rem) {
746 curr_addr = (unsigned long) from;
747 bytes = curr_addr - s_addr - rem;
748 return bytes;
749 }
750 }),
751 ({
752 rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
753 - v.iov_len, v.iov_len);
754 if (rem) {
755 curr_addr = (unsigned long) from;
756 bytes = curr_addr - s_addr - rem;
757 return bytes;
758 }
759 })
760 )
761
762 return bytes;
763 }
764 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
765 #endif /* CONFIG_ARCH_HAS_COPY_MC */
766
_copy_from_iter(void * addr,size_t bytes,struct iov_iter * i)767 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
768 {
769 char *to = addr;
770 if (unlikely(iov_iter_is_pipe(i))) {
771 WARN_ON(1);
772 return 0;
773 }
774 if (iter_is_iovec(i))
775 might_fault();
776 iterate_and_advance(i, bytes, v,
777 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
778 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
779 v.bv_offset, v.bv_len),
780 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
781 )
782
783 return bytes;
784 }
785 EXPORT_SYMBOL(_copy_from_iter);
786
_copy_from_iter_full(void * addr,size_t bytes,struct iov_iter * i)787 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
788 {
789 char *to = addr;
790 if (unlikely(iov_iter_is_pipe(i))) {
791 WARN_ON(1);
792 return false;
793 }
794 if (unlikely(i->count < bytes))
795 return false;
796
797 if (iter_is_iovec(i))
798 might_fault();
799 iterate_all_kinds(i, bytes, v, ({
800 if (copyin((to += v.iov_len) - v.iov_len,
801 v.iov_base, v.iov_len))
802 return false;
803 0;}),
804 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
805 v.bv_offset, v.bv_len),
806 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
807 )
808
809 iov_iter_advance(i, bytes);
810 return true;
811 }
812 EXPORT_SYMBOL(_copy_from_iter_full);
813
_copy_from_iter_nocache(void * addr,size_t bytes,struct iov_iter * i)814 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
815 {
816 char *to = addr;
817 if (unlikely(iov_iter_is_pipe(i))) {
818 WARN_ON(1);
819 return 0;
820 }
821 iterate_and_advance(i, bytes, v,
822 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
823 v.iov_base, v.iov_len),
824 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
825 v.bv_offset, v.bv_len),
826 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
827 )
828
829 return bytes;
830 }
831 EXPORT_SYMBOL(_copy_from_iter_nocache);
832
833 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
834 /**
835 * _copy_from_iter_flushcache - write destination through cpu cache
836 * @addr: destination kernel address
837 * @bytes: total transfer length
838 * @iter: source iterator
839 *
840 * The pmem driver arranges for filesystem-dax to use this facility via
841 * dax_copy_from_iter() for ensuring that writes to persistent memory
842 * are flushed through the CPU cache. It is differentiated from
843 * _copy_from_iter_nocache() in that guarantees all data is flushed for
844 * all iterator types. The _copy_from_iter_nocache() only attempts to
845 * bypass the cache for the ITER_IOVEC case, and on some archs may use
846 * instructions that strand dirty-data in the cache.
847 */
_copy_from_iter_flushcache(void * addr,size_t bytes,struct iov_iter * i)848 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
849 {
850 char *to = addr;
851 if (unlikely(iov_iter_is_pipe(i))) {
852 WARN_ON(1);
853 return 0;
854 }
855 iterate_and_advance(i, bytes, v,
856 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
857 v.iov_base, v.iov_len),
858 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
859 v.bv_offset, v.bv_len),
860 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
861 v.iov_len)
862 )
863
864 return bytes;
865 }
866 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
867 #endif
868
_copy_from_iter_full_nocache(void * addr,size_t bytes,struct iov_iter * i)869 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
870 {
871 char *to = addr;
872 if (unlikely(iov_iter_is_pipe(i))) {
873 WARN_ON(1);
874 return false;
875 }
876 if (unlikely(i->count < bytes))
877 return false;
878 iterate_all_kinds(i, bytes, v, ({
879 if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
880 v.iov_base, v.iov_len))
881 return false;
882 0;}),
883 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
884 v.bv_offset, v.bv_len),
885 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
886 )
887
888 iov_iter_advance(i, bytes);
889 return true;
890 }
891 EXPORT_SYMBOL(_copy_from_iter_full_nocache);
892
page_copy_sane(struct page * page,size_t offset,size_t n)893 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
894 {
895 struct page *head;
896 size_t v = n + offset;
897
898 /*
899 * The general case needs to access the page order in order
900 * to compute the page size.
901 * However, we mostly deal with order-0 pages and thus can
902 * avoid a possible cache line miss for requests that fit all
903 * page orders.
904 */
905 if (n <= v && v <= PAGE_SIZE)
906 return true;
907
908 head = compound_head(page);
909 v += (page - head) << PAGE_SHIFT;
910
911 if (likely(n <= v && v <= (page_size(head))))
912 return true;
913 WARN_ON(1);
914 return false;
915 }
916
copy_page_to_iter(struct page * page,size_t offset,size_t bytes,struct iov_iter * i)917 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
918 struct iov_iter *i)
919 {
920 if (unlikely(!page_copy_sane(page, offset, bytes)))
921 return 0;
922 if (i->type & (ITER_BVEC|ITER_KVEC)) {
923 void *kaddr = kmap_atomic(page);
924 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
925 kunmap_atomic(kaddr);
926 return wanted;
927 } else if (unlikely(iov_iter_is_discard(i))) {
928 if (unlikely(i->count < bytes))
929 bytes = i->count;
930 i->count -= bytes;
931 return bytes;
932 } else if (likely(!iov_iter_is_pipe(i)))
933 return copy_page_to_iter_iovec(page, offset, bytes, i);
934 else
935 return copy_page_to_iter_pipe(page, offset, bytes, i);
936 }
937 EXPORT_SYMBOL(copy_page_to_iter);
938
copy_page_from_iter(struct page * page,size_t offset,size_t bytes,struct iov_iter * i)939 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
940 struct iov_iter *i)
941 {
942 if (unlikely(!page_copy_sane(page, offset, bytes)))
943 return 0;
944 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
945 WARN_ON(1);
946 return 0;
947 }
948 if (i->type & (ITER_BVEC|ITER_KVEC)) {
949 void *kaddr = kmap_atomic(page);
950 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
951 kunmap_atomic(kaddr);
952 return wanted;
953 } else
954 return copy_page_from_iter_iovec(page, offset, bytes, i);
955 }
956 EXPORT_SYMBOL(copy_page_from_iter);
957
pipe_zero(size_t bytes,struct iov_iter * i)958 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
959 {
960 struct pipe_inode_info *pipe = i->pipe;
961 unsigned int p_mask = pipe->ring_size - 1;
962 unsigned int i_head;
963 size_t n, off;
964
965 if (!sanity(i))
966 return 0;
967
968 bytes = n = push_pipe(i, bytes, &i_head, &off);
969 if (unlikely(!n))
970 return 0;
971
972 do {
973 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
974 memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
975 i->head = i_head;
976 i->iov_offset = off + chunk;
977 n -= chunk;
978 off = 0;
979 i_head++;
980 } while (n);
981 i->count -= bytes;
982 return bytes;
983 }
984
iov_iter_zero(size_t bytes,struct iov_iter * i)985 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
986 {
987 if (unlikely(iov_iter_is_pipe(i)))
988 return pipe_zero(bytes, i);
989 iterate_and_advance(i, bytes, v,
990 clear_user(v.iov_base, v.iov_len),
991 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
992 memset(v.iov_base, 0, v.iov_len)
993 )
994
995 return bytes;
996 }
997 EXPORT_SYMBOL(iov_iter_zero);
998
iov_iter_copy_from_user_atomic(struct page * page,struct iov_iter * i,unsigned long offset,size_t bytes)999 size_t iov_iter_copy_from_user_atomic(struct page *page,
1000 struct iov_iter *i, unsigned long offset, size_t bytes)
1001 {
1002 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
1003 if (unlikely(!page_copy_sane(page, offset, bytes))) {
1004 kunmap_atomic(kaddr);
1005 return 0;
1006 }
1007 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1008 kunmap_atomic(kaddr);
1009 WARN_ON(1);
1010 return 0;
1011 }
1012 iterate_all_kinds(i, bytes, v,
1013 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
1014 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
1015 v.bv_offset, v.bv_len),
1016 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
1017 )
1018 kunmap_atomic(kaddr);
1019 return bytes;
1020 }
1021 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
1022
pipe_truncate(struct iov_iter * i)1023 static inline void pipe_truncate(struct iov_iter *i)
1024 {
1025 struct pipe_inode_info *pipe = i->pipe;
1026 unsigned int p_tail = pipe->tail;
1027 unsigned int p_head = pipe->head;
1028 unsigned int p_mask = pipe->ring_size - 1;
1029
1030 if (!pipe_empty(p_head, p_tail)) {
1031 struct pipe_buffer *buf;
1032 unsigned int i_head = i->head;
1033 size_t off = i->iov_offset;
1034
1035 if (off) {
1036 buf = &pipe->bufs[i_head & p_mask];
1037 buf->len = off - buf->offset;
1038 i_head++;
1039 }
1040 while (p_head != i_head) {
1041 p_head--;
1042 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
1043 }
1044
1045 pipe->head = p_head;
1046 }
1047 }
1048
pipe_advance(struct iov_iter * i,size_t size)1049 static void pipe_advance(struct iov_iter *i, size_t size)
1050 {
1051 struct pipe_inode_info *pipe = i->pipe;
1052 if (unlikely(i->count < size))
1053 size = i->count;
1054 if (size) {
1055 struct pipe_buffer *buf;
1056 unsigned int p_mask = pipe->ring_size - 1;
1057 unsigned int i_head = i->head;
1058 size_t off = i->iov_offset, left = size;
1059
1060 if (off) /* make it relative to the beginning of buffer */
1061 left += off - pipe->bufs[i_head & p_mask].offset;
1062 while (1) {
1063 buf = &pipe->bufs[i_head & p_mask];
1064 if (left <= buf->len)
1065 break;
1066 left -= buf->len;
1067 i_head++;
1068 }
1069 i->head = i_head;
1070 i->iov_offset = buf->offset + left;
1071 }
1072 i->count -= size;
1073 /* ... and discard everything past that point */
1074 pipe_truncate(i);
1075 }
1076
iov_iter_advance(struct iov_iter * i,size_t size)1077 void iov_iter_advance(struct iov_iter *i, size_t size)
1078 {
1079 if (unlikely(iov_iter_is_pipe(i))) {
1080 pipe_advance(i, size);
1081 return;
1082 }
1083 if (unlikely(iov_iter_is_discard(i))) {
1084 i->count -= size;
1085 return;
1086 }
1087 iterate_and_advance(i, size, v, 0, 0, 0)
1088 }
1089 EXPORT_SYMBOL(iov_iter_advance);
1090
iov_iter_revert(struct iov_iter * i,size_t unroll)1091 void iov_iter_revert(struct iov_iter *i, size_t unroll)
1092 {
1093 if (!unroll)
1094 return;
1095 if (WARN_ON(unroll > MAX_RW_COUNT))
1096 return;
1097 i->count += unroll;
1098 if (unlikely(iov_iter_is_pipe(i))) {
1099 struct pipe_inode_info *pipe = i->pipe;
1100 unsigned int p_mask = pipe->ring_size - 1;
1101 unsigned int i_head = i->head;
1102 size_t off = i->iov_offset;
1103 while (1) {
1104 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1105 size_t n = off - b->offset;
1106 if (unroll < n) {
1107 off -= unroll;
1108 break;
1109 }
1110 unroll -= n;
1111 if (!unroll && i_head == i->start_head) {
1112 off = 0;
1113 break;
1114 }
1115 i_head--;
1116 b = &pipe->bufs[i_head & p_mask];
1117 off = b->offset + b->len;
1118 }
1119 i->iov_offset = off;
1120 i->head = i_head;
1121 pipe_truncate(i);
1122 return;
1123 }
1124 if (unlikely(iov_iter_is_discard(i)))
1125 return;
1126 if (unroll <= i->iov_offset) {
1127 i->iov_offset -= unroll;
1128 return;
1129 }
1130 unroll -= i->iov_offset;
1131 if (iov_iter_is_bvec(i)) {
1132 const struct bio_vec *bvec = i->bvec;
1133 while (1) {
1134 size_t n = (--bvec)->bv_len;
1135 i->nr_segs++;
1136 if (unroll <= n) {
1137 i->bvec = bvec;
1138 i->iov_offset = n - unroll;
1139 return;
1140 }
1141 unroll -= n;
1142 }
1143 } else { /* same logics for iovec and kvec */
1144 const struct iovec *iov = i->iov;
1145 while (1) {
1146 size_t n = (--iov)->iov_len;
1147 i->nr_segs++;
1148 if (unroll <= n) {
1149 i->iov = iov;
1150 i->iov_offset = n - unroll;
1151 return;
1152 }
1153 unroll -= n;
1154 }
1155 }
1156 }
1157 EXPORT_SYMBOL(iov_iter_revert);
1158
1159 /*
1160 * Return the count of just the current iov_iter segment.
1161 */
iov_iter_single_seg_count(const struct iov_iter * i)1162 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1163 {
1164 if (unlikely(iov_iter_is_pipe(i)))
1165 return i->count; // it is a silly place, anyway
1166 if (i->nr_segs == 1)
1167 return i->count;
1168 if (unlikely(iov_iter_is_discard(i)))
1169 return i->count;
1170 else if (iov_iter_is_bvec(i))
1171 return min(i->count, i->bvec->bv_len - i->iov_offset);
1172 else
1173 return min(i->count, i->iov->iov_len - i->iov_offset);
1174 }
1175 EXPORT_SYMBOL(iov_iter_single_seg_count);
1176
iov_iter_kvec(struct iov_iter * i,unsigned int direction,const struct kvec * kvec,unsigned long nr_segs,size_t count)1177 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1178 const struct kvec *kvec, unsigned long nr_segs,
1179 size_t count)
1180 {
1181 WARN_ON(direction & ~(READ | WRITE));
1182 i->type = ITER_KVEC | (direction & (READ | WRITE));
1183 i->kvec = kvec;
1184 i->nr_segs = nr_segs;
1185 i->iov_offset = 0;
1186 i->count = count;
1187 }
1188 EXPORT_SYMBOL(iov_iter_kvec);
1189
iov_iter_bvec(struct iov_iter * i,unsigned int direction,const struct bio_vec * bvec,unsigned long nr_segs,size_t count)1190 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1191 const struct bio_vec *bvec, unsigned long nr_segs,
1192 size_t count)
1193 {
1194 WARN_ON(direction & ~(READ | WRITE));
1195 i->type = ITER_BVEC | (direction & (READ | WRITE));
1196 i->bvec = bvec;
1197 i->nr_segs = nr_segs;
1198 i->iov_offset = 0;
1199 i->count = count;
1200 }
1201 EXPORT_SYMBOL(iov_iter_bvec);
1202
iov_iter_pipe(struct iov_iter * i,unsigned int direction,struct pipe_inode_info * pipe,size_t count)1203 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1204 struct pipe_inode_info *pipe,
1205 size_t count)
1206 {
1207 BUG_ON(direction != READ);
1208 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1209 i->type = ITER_PIPE | READ;
1210 i->pipe = pipe;
1211 i->head = pipe->head;
1212 i->iov_offset = 0;
1213 i->count = count;
1214 i->start_head = i->head;
1215 }
1216 EXPORT_SYMBOL(iov_iter_pipe);
1217
1218 /**
1219 * iov_iter_discard - Initialise an I/O iterator that discards data
1220 * @i: The iterator to initialise.
1221 * @direction: The direction of the transfer.
1222 * @count: The size of the I/O buffer in bytes.
1223 *
1224 * Set up an I/O iterator that just discards everything that's written to it.
1225 * It's only available as a READ iterator.
1226 */
iov_iter_discard(struct iov_iter * i,unsigned int direction,size_t count)1227 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1228 {
1229 BUG_ON(direction != READ);
1230 i->type = ITER_DISCARD | READ;
1231 i->count = count;
1232 i->iov_offset = 0;
1233 }
1234 EXPORT_SYMBOL(iov_iter_discard);
1235
iov_iter_alignment(const struct iov_iter * i)1236 unsigned long iov_iter_alignment(const struct iov_iter *i)
1237 {
1238 unsigned long res = 0;
1239 size_t size = i->count;
1240
1241 if (unlikely(iov_iter_is_pipe(i))) {
1242 unsigned int p_mask = i->pipe->ring_size - 1;
1243
1244 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1245 return size | i->iov_offset;
1246 return size;
1247 }
1248 iterate_all_kinds(i, size, v,
1249 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
1250 res |= v.bv_offset | v.bv_len,
1251 res |= (unsigned long)v.iov_base | v.iov_len
1252 )
1253 return res;
1254 }
1255 EXPORT_SYMBOL(iov_iter_alignment);
1256
iov_iter_gap_alignment(const struct iov_iter * i)1257 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1258 {
1259 unsigned long res = 0;
1260 size_t size = i->count;
1261
1262 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1263 WARN_ON(1);
1264 return ~0U;
1265 }
1266
1267 iterate_all_kinds(i, size, v,
1268 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1269 (size != v.iov_len ? size : 0), 0),
1270 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1271 (size != v.bv_len ? size : 0)),
1272 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1273 (size != v.iov_len ? size : 0))
1274 );
1275 return res;
1276 }
1277 EXPORT_SYMBOL(iov_iter_gap_alignment);
1278
__pipe_get_pages(struct iov_iter * i,size_t maxsize,struct page ** pages,int iter_head,size_t * start)1279 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1280 size_t maxsize,
1281 struct page **pages,
1282 int iter_head,
1283 size_t *start)
1284 {
1285 struct pipe_inode_info *pipe = i->pipe;
1286 unsigned int p_mask = pipe->ring_size - 1;
1287 ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1288 if (!n)
1289 return -EFAULT;
1290
1291 maxsize = n;
1292 n += *start;
1293 while (n > 0) {
1294 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1295 iter_head++;
1296 n -= PAGE_SIZE;
1297 }
1298
1299 return maxsize;
1300 }
1301
pipe_get_pages(struct iov_iter * i,struct page ** pages,size_t maxsize,unsigned maxpages,size_t * start)1302 static ssize_t pipe_get_pages(struct iov_iter *i,
1303 struct page **pages, size_t maxsize, unsigned maxpages,
1304 size_t *start)
1305 {
1306 unsigned int iter_head, npages;
1307 size_t capacity;
1308
1309 if (!maxsize)
1310 return 0;
1311
1312 if (!sanity(i))
1313 return -EFAULT;
1314
1315 data_start(i, &iter_head, start);
1316 /* Amount of free space: some of this one + all after this one */
1317 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1318 capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1319
1320 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1321 }
1322
iov_iter_get_pages(struct iov_iter * i,struct page ** pages,size_t maxsize,unsigned maxpages,size_t * start)1323 ssize_t iov_iter_get_pages(struct iov_iter *i,
1324 struct page **pages, size_t maxsize, unsigned maxpages,
1325 size_t *start)
1326 {
1327 if (maxsize > i->count)
1328 maxsize = i->count;
1329
1330 if (unlikely(iov_iter_is_pipe(i)))
1331 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1332 if (unlikely(iov_iter_is_discard(i)))
1333 return -EFAULT;
1334
1335 iterate_all_kinds(i, maxsize, v, ({
1336 unsigned long addr = (unsigned long)v.iov_base;
1337 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1338 int n;
1339 int res;
1340
1341 if (len > maxpages * PAGE_SIZE)
1342 len = maxpages * PAGE_SIZE;
1343 addr &= ~(PAGE_SIZE - 1);
1344 n = DIV_ROUND_UP(len, PAGE_SIZE);
1345 res = get_user_pages_fast(addr, n,
1346 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
1347 pages);
1348 if (unlikely(res <= 0))
1349 return res;
1350 return (res == n ? len : res * PAGE_SIZE) - *start;
1351 0;}),({
1352 /* can't be more than PAGE_SIZE */
1353 *start = v.bv_offset;
1354 get_page(*pages = v.bv_page);
1355 return v.bv_len;
1356 }),({
1357 return -EFAULT;
1358 })
1359 )
1360 return 0;
1361 }
1362 EXPORT_SYMBOL(iov_iter_get_pages);
1363
get_pages_array(size_t n)1364 static struct page **get_pages_array(size_t n)
1365 {
1366 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1367 }
1368
pipe_get_pages_alloc(struct iov_iter * i,struct page *** pages,size_t maxsize,size_t * start)1369 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1370 struct page ***pages, size_t maxsize,
1371 size_t *start)
1372 {
1373 struct page **p;
1374 unsigned int iter_head, npages;
1375 ssize_t n;
1376
1377 if (!maxsize)
1378 return 0;
1379
1380 if (!sanity(i))
1381 return -EFAULT;
1382
1383 data_start(i, &iter_head, start);
1384 /* Amount of free space: some of this one + all after this one */
1385 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1386 n = npages * PAGE_SIZE - *start;
1387 if (maxsize > n)
1388 maxsize = n;
1389 else
1390 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1391 p = get_pages_array(npages);
1392 if (!p)
1393 return -ENOMEM;
1394 n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1395 if (n > 0)
1396 *pages = p;
1397 else
1398 kvfree(p);
1399 return n;
1400 }
1401
iov_iter_get_pages_alloc(struct iov_iter * i,struct page *** pages,size_t maxsize,size_t * start)1402 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1403 struct page ***pages, size_t maxsize,
1404 size_t *start)
1405 {
1406 struct page **p;
1407
1408 if (maxsize > i->count)
1409 maxsize = i->count;
1410
1411 if (unlikely(iov_iter_is_pipe(i)))
1412 return pipe_get_pages_alloc(i, pages, maxsize, start);
1413 if (unlikely(iov_iter_is_discard(i)))
1414 return -EFAULT;
1415
1416 iterate_all_kinds(i, maxsize, v, ({
1417 unsigned long addr = (unsigned long)v.iov_base;
1418 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1419 int n;
1420 int res;
1421
1422 addr &= ~(PAGE_SIZE - 1);
1423 n = DIV_ROUND_UP(len, PAGE_SIZE);
1424 p = get_pages_array(n);
1425 if (!p)
1426 return -ENOMEM;
1427 res = get_user_pages_fast(addr, n,
1428 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p);
1429 if (unlikely(res <= 0)) {
1430 kvfree(p);
1431 *pages = NULL;
1432 return res;
1433 }
1434 *pages = p;
1435 return (res == n ? len : res * PAGE_SIZE) - *start;
1436 0;}),({
1437 /* can't be more than PAGE_SIZE */
1438 *start = v.bv_offset;
1439 *pages = p = get_pages_array(1);
1440 if (!p)
1441 return -ENOMEM;
1442 get_page(*p = v.bv_page);
1443 return v.bv_len;
1444 }),({
1445 return -EFAULT;
1446 })
1447 )
1448 return 0;
1449 }
1450 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1451
csum_and_copy_from_iter(void * addr,size_t bytes,__wsum * csum,struct iov_iter * i)1452 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1453 struct iov_iter *i)
1454 {
1455 char *to = addr;
1456 __wsum sum, next;
1457 size_t off = 0;
1458 sum = *csum;
1459 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1460 WARN_ON(1);
1461 return 0;
1462 }
1463 iterate_and_advance(i, bytes, v, ({
1464 next = csum_and_copy_from_user(v.iov_base,
1465 (to += v.iov_len) - v.iov_len,
1466 v.iov_len);
1467 if (next) {
1468 sum = csum_block_add(sum, next, off);
1469 off += v.iov_len;
1470 }
1471 next ? 0 : v.iov_len;
1472 }), ({
1473 char *p = kmap_atomic(v.bv_page);
1474 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1475 p + v.bv_offset, v.bv_len,
1476 sum, off);
1477 kunmap_atomic(p);
1478 off += v.bv_len;
1479 }),({
1480 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1481 v.iov_base, v.iov_len,
1482 sum, off);
1483 off += v.iov_len;
1484 })
1485 )
1486 *csum = sum;
1487 return bytes;
1488 }
1489 EXPORT_SYMBOL(csum_and_copy_from_iter);
1490
csum_and_copy_from_iter_full(void * addr,size_t bytes,__wsum * csum,struct iov_iter * i)1491 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1492 struct iov_iter *i)
1493 {
1494 char *to = addr;
1495 __wsum sum, next;
1496 size_t off = 0;
1497 sum = *csum;
1498 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1499 WARN_ON(1);
1500 return false;
1501 }
1502 if (unlikely(i->count < bytes))
1503 return false;
1504 iterate_all_kinds(i, bytes, v, ({
1505 next = csum_and_copy_from_user(v.iov_base,
1506 (to += v.iov_len) - v.iov_len,
1507 v.iov_len);
1508 if (!next)
1509 return false;
1510 sum = csum_block_add(sum, next, off);
1511 off += v.iov_len;
1512 0;
1513 }), ({
1514 char *p = kmap_atomic(v.bv_page);
1515 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1516 p + v.bv_offset, v.bv_len,
1517 sum, off);
1518 kunmap_atomic(p);
1519 off += v.bv_len;
1520 }),({
1521 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1522 v.iov_base, v.iov_len,
1523 sum, off);
1524 off += v.iov_len;
1525 })
1526 )
1527 *csum = sum;
1528 iov_iter_advance(i, bytes);
1529 return true;
1530 }
1531 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1532
csum_and_copy_to_iter(const void * addr,size_t bytes,void * _csstate,struct iov_iter * i)1533 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1534 struct iov_iter *i)
1535 {
1536 struct csum_state *csstate = _csstate;
1537 const char *from = addr;
1538 __wsum sum, next;
1539 size_t off;
1540
1541 if (unlikely(iov_iter_is_pipe(i)))
1542 return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
1543
1544 sum = csstate->csum;
1545 off = csstate->off;
1546 if (unlikely(iov_iter_is_discard(i))) {
1547 WARN_ON(1); /* for now */
1548 return 0;
1549 }
1550 iterate_and_advance(i, bytes, v, ({
1551 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1552 v.iov_base,
1553 v.iov_len);
1554 if (next) {
1555 sum = csum_block_add(sum, next, off);
1556 off += v.iov_len;
1557 }
1558 next ? 0 : v.iov_len;
1559 }), ({
1560 char *p = kmap_atomic(v.bv_page);
1561 sum = csum_and_memcpy(p + v.bv_offset,
1562 (from += v.bv_len) - v.bv_len,
1563 v.bv_len, sum, off);
1564 kunmap_atomic(p);
1565 off += v.bv_len;
1566 }),({
1567 sum = csum_and_memcpy(v.iov_base,
1568 (from += v.iov_len) - v.iov_len,
1569 v.iov_len, sum, off);
1570 off += v.iov_len;
1571 })
1572 )
1573 csstate->csum = sum;
1574 csstate->off = off;
1575 return bytes;
1576 }
1577 EXPORT_SYMBOL(csum_and_copy_to_iter);
1578
hash_and_copy_to_iter(const void * addr,size_t bytes,void * hashp,struct iov_iter * i)1579 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1580 struct iov_iter *i)
1581 {
1582 #ifdef CONFIG_CRYPTO_HASH
1583 struct ahash_request *hash = hashp;
1584 struct scatterlist sg;
1585 size_t copied;
1586
1587 copied = copy_to_iter(addr, bytes, i);
1588 sg_init_one(&sg, addr, copied);
1589 ahash_request_set_crypt(hash, &sg, NULL, copied);
1590 crypto_ahash_update(hash);
1591 return copied;
1592 #else
1593 return 0;
1594 #endif
1595 }
1596 EXPORT_SYMBOL(hash_and_copy_to_iter);
1597
iov_iter_npages(const struct iov_iter * i,int maxpages)1598 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1599 {
1600 size_t size = i->count;
1601 int npages = 0;
1602
1603 if (!size)
1604 return 0;
1605 if (unlikely(iov_iter_is_discard(i)))
1606 return 0;
1607
1608 if (unlikely(iov_iter_is_pipe(i))) {
1609 struct pipe_inode_info *pipe = i->pipe;
1610 unsigned int iter_head;
1611 size_t off;
1612
1613 if (!sanity(i))
1614 return 0;
1615
1616 data_start(i, &iter_head, &off);
1617 /* some of this one + all after this one */
1618 npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
1619 if (npages >= maxpages)
1620 return maxpages;
1621 } else iterate_all_kinds(i, size, v, ({
1622 unsigned long p = (unsigned long)v.iov_base;
1623 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1624 - p / PAGE_SIZE;
1625 if (npages >= maxpages)
1626 return maxpages;
1627 0;}),({
1628 npages++;
1629 if (npages >= maxpages)
1630 return maxpages;
1631 }),({
1632 unsigned long p = (unsigned long)v.iov_base;
1633 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1634 - p / PAGE_SIZE;
1635 if (npages >= maxpages)
1636 return maxpages;
1637 })
1638 )
1639 return npages;
1640 }
1641 EXPORT_SYMBOL(iov_iter_npages);
1642
dup_iter(struct iov_iter * new,struct iov_iter * old,gfp_t flags)1643 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1644 {
1645 *new = *old;
1646 if (unlikely(iov_iter_is_pipe(new))) {
1647 WARN_ON(1);
1648 return NULL;
1649 }
1650 if (unlikely(iov_iter_is_discard(new)))
1651 return NULL;
1652 if (iov_iter_is_bvec(new))
1653 return new->bvec = kmemdup(new->bvec,
1654 new->nr_segs * sizeof(struct bio_vec),
1655 flags);
1656 else
1657 /* iovec and kvec have identical layout */
1658 return new->iov = kmemdup(new->iov,
1659 new->nr_segs * sizeof(struct iovec),
1660 flags);
1661 }
1662 EXPORT_SYMBOL(dup_iter);
1663
copy_compat_iovec_from_user(struct iovec * iov,const struct iovec __user * uvec,unsigned long nr_segs)1664 static int copy_compat_iovec_from_user(struct iovec *iov,
1665 const struct iovec __user *uvec, unsigned long nr_segs)
1666 {
1667 const struct compat_iovec __user *uiov =
1668 (const struct compat_iovec __user *)uvec;
1669 int ret = -EFAULT, i;
1670
1671 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1672 return -EFAULT;
1673
1674 for (i = 0; i < nr_segs; i++) {
1675 compat_uptr_t buf;
1676 compat_ssize_t len;
1677
1678 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1679 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1680
1681 /* check for compat_size_t not fitting in compat_ssize_t .. */
1682 if (len < 0) {
1683 ret = -EINVAL;
1684 goto uaccess_end;
1685 }
1686 iov[i].iov_base = compat_ptr(buf);
1687 iov[i].iov_len = len;
1688 }
1689
1690 ret = 0;
1691 uaccess_end:
1692 user_access_end();
1693 return ret;
1694 }
1695
copy_iovec_from_user(struct iovec * iov,const struct iovec __user * uvec,unsigned long nr_segs)1696 static int copy_iovec_from_user(struct iovec *iov,
1697 const struct iovec __user *uvec, unsigned long nr_segs)
1698 {
1699 unsigned long seg;
1700
1701 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1702 return -EFAULT;
1703 for (seg = 0; seg < nr_segs; seg++) {
1704 if ((ssize_t)iov[seg].iov_len < 0)
1705 return -EINVAL;
1706 }
1707
1708 return 0;
1709 }
1710
iovec_from_user(const struct iovec __user * uvec,unsigned long nr_segs,unsigned long fast_segs,struct iovec * fast_iov,bool compat)1711 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1712 unsigned long nr_segs, unsigned long fast_segs,
1713 struct iovec *fast_iov, bool compat)
1714 {
1715 struct iovec *iov = fast_iov;
1716 int ret;
1717
1718 /*
1719 * SuS says "The readv() function *may* fail if the iovcnt argument was
1720 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1721 * traditionally returned zero for zero segments, so...
1722 */
1723 if (nr_segs == 0)
1724 return iov;
1725 if (nr_segs > UIO_MAXIOV)
1726 return ERR_PTR(-EINVAL);
1727 if (nr_segs > fast_segs) {
1728 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1729 if (!iov)
1730 return ERR_PTR(-ENOMEM);
1731 }
1732
1733 if (compat)
1734 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1735 else
1736 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1737 if (ret) {
1738 if (iov != fast_iov)
1739 kfree(iov);
1740 return ERR_PTR(ret);
1741 }
1742
1743 return iov;
1744 }
1745
__import_iovec(int type,const struct iovec __user * uvec,unsigned nr_segs,unsigned fast_segs,struct iovec ** iovp,struct iov_iter * i,bool compat)1746 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1747 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1748 struct iov_iter *i, bool compat)
1749 {
1750 ssize_t total_len = 0;
1751 unsigned long seg;
1752 struct iovec *iov;
1753
1754 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1755 if (IS_ERR(iov)) {
1756 *iovp = NULL;
1757 return PTR_ERR(iov);
1758 }
1759
1760 /*
1761 * According to the Single Unix Specification we should return EINVAL if
1762 * an element length is < 0 when cast to ssize_t or if the total length
1763 * would overflow the ssize_t return value of the system call.
1764 *
1765 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1766 * overflow case.
1767 */
1768 for (seg = 0; seg < nr_segs; seg++) {
1769 ssize_t len = (ssize_t)iov[seg].iov_len;
1770
1771 if (!access_ok(iov[seg].iov_base, len)) {
1772 if (iov != *iovp)
1773 kfree(iov);
1774 *iovp = NULL;
1775 return -EFAULT;
1776 }
1777
1778 if (len > MAX_RW_COUNT - total_len) {
1779 len = MAX_RW_COUNT - total_len;
1780 iov[seg].iov_len = len;
1781 }
1782 total_len += len;
1783 }
1784
1785 iov_iter_init(i, type, iov, nr_segs, total_len);
1786 if (iov == *iovp)
1787 *iovp = NULL;
1788 else
1789 *iovp = iov;
1790 return total_len;
1791 }
1792
1793 /**
1794 * import_iovec() - Copy an array of &struct iovec from userspace
1795 * into the kernel, check that it is valid, and initialize a new
1796 * &struct iov_iter iterator to access it.
1797 *
1798 * @type: One of %READ or %WRITE.
1799 * @uvec: Pointer to the userspace array.
1800 * @nr_segs: Number of elements in userspace array.
1801 * @fast_segs: Number of elements in @iov.
1802 * @iovp: (input and output parameter) Pointer to pointer to (usually small
1803 * on-stack) kernel array.
1804 * @i: Pointer to iterator that will be initialized on success.
1805 *
1806 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1807 * then this function places %NULL in *@iov on return. Otherwise, a new
1808 * array will be allocated and the result placed in *@iov. This means that
1809 * the caller may call kfree() on *@iov regardless of whether the small
1810 * on-stack array was used or not (and regardless of whether this function
1811 * returns an error or not).
1812 *
1813 * Return: Negative error code on error, bytes imported on success
1814 */
import_iovec(int type,const struct iovec __user * uvec,unsigned nr_segs,unsigned fast_segs,struct iovec ** iovp,struct iov_iter * i)1815 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1816 unsigned nr_segs, unsigned fast_segs,
1817 struct iovec **iovp, struct iov_iter *i)
1818 {
1819 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1820 in_compat_syscall());
1821 }
1822 EXPORT_SYMBOL(import_iovec);
1823
import_single_range(int rw,void __user * buf,size_t len,struct iovec * iov,struct iov_iter * i)1824 int import_single_range(int rw, void __user *buf, size_t len,
1825 struct iovec *iov, struct iov_iter *i)
1826 {
1827 if (len > MAX_RW_COUNT)
1828 len = MAX_RW_COUNT;
1829 if (unlikely(!access_ok(buf, len)))
1830 return -EFAULT;
1831
1832 iov->iov_base = buf;
1833 iov->iov_len = len;
1834 iov_iter_init(i, rw, iov, 1, len);
1835 return 0;
1836 }
1837 EXPORT_SYMBOL(import_single_range);
1838
iov_iter_for_each_range(struct iov_iter * i,size_t bytes,int (* f)(struct kvec * vec,void * context),void * context)1839 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
1840 int (*f)(struct kvec *vec, void *context),
1841 void *context)
1842 {
1843 struct kvec w;
1844 int err = -EINVAL;
1845 if (!bytes)
1846 return 0;
1847
1848 iterate_all_kinds(i, bytes, v, -EINVAL, ({
1849 w.iov_base = kmap(v.bv_page) + v.bv_offset;
1850 w.iov_len = v.bv_len;
1851 err = f(&w, context);
1852 kunmap(v.bv_page);
1853 err;}), ({
1854 w = v;
1855 err = f(&w, context);})
1856 )
1857 return err;
1858 }
1859 EXPORT_SYMBOL(iov_iter_for_each_range);
1860