• Home
  • Raw
  • Download

Lines Matching refs:n

18 #define iterate_iovec(i, n, __v, __p, skip, STEP) {	\  argument
20 size_t wanted = n; \
22 __v.iov_len = min(n, __p->iov_len - skip); \
28 n -= __v.iov_len; \
32 while (unlikely(!left && n)) { \
34 __v.iov_len = min(n, __p->iov_len); \
41 n -= __v.iov_len; \
43 n = wanted - n; \
46 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \ argument
47 size_t wanted = n; \
49 __v.iov_len = min(n, __p->iov_len - skip); \
54 n -= __v.iov_len; \
56 while (unlikely(n)) { \
58 __v.iov_len = min(n, __p->iov_len); \
64 n -= __v.iov_len; \
66 n = wanted; \
69 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \ argument
71 __start.bi_size = n; \
81 #define iterate_all_kinds(i, n, v, I, B, K) { \ argument
82 if (likely(n)) { \
87 iterate_bvec(i, n, v, __bi, skip, (B)) \
91 iterate_kvec(i, n, v, kvec, skip, (K)) \
96 iterate_iovec(i, n, v, iov, skip, (I)) \
101 #define iterate_and_advance(i, n, v, I, B, K) { \ argument
102 if (unlikely(i->count < n)) \
103 n = i->count; \
110 iterate_bvec(i, n, v, __bi, skip, (B)) \
117 iterate_kvec(i, n, v, kvec, skip, (K)) \
125 skip += n; \
129 iterate_iovec(i, n, v, iov, skip, (I)) \
137 i->count -= n; \
142 static int copyout(void __user *to, const void *from, size_t n) in copyout() argument
145 return n; in copyout()
146 if (access_ok(to, n)) { in copyout()
147 instrument_copy_to_user(to, from, n); in copyout()
148 n = raw_copy_to_user(to, from, n); in copyout()
150 return n; in copyout()
153 static int copyin(void *to, const void __user *from, size_t n) in copyin() argument
156 return n; in copyin()
157 if (access_ok(from, n)) { in copyin()
158 instrument_copy_from_user(to, from, n); in copyin()
159 n = raw_copy_from_user(to, from, n); in copyin()
161 return n; in copyin()
553 size_t n, off; in copy_pipe_to_iter() local
558 bytes = n = push_pipe(i, bytes, &i_head, &off); in copy_pipe_to_iter()
559 if (unlikely(!n)) in copy_pipe_to_iter()
562 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); in copy_pipe_to_iter()
566 n -= chunk; in copy_pipe_to_iter()
570 } while (n); in copy_pipe_to_iter()
591 size_t n, r; in csum_and_copy_to_pipe_iter() local
596 bytes = n = push_pipe(i, bytes, &i_head, &r); in csum_and_copy_to_pipe_iter()
597 if (unlikely(!n)) in csum_and_copy_to_pipe_iter()
600 size_t chunk = min_t(size_t, n, PAGE_SIZE - r); in csum_and_copy_to_pipe_iter()
606 n -= chunk; in csum_and_copy_to_pipe_iter()
611 } while (n); in csum_and_copy_to_pipe_iter()
637 static int copyout_mc(void __user *to, const void *from, size_t n) in copyout_mc() argument
639 if (access_ok(to, n)) { in copyout_mc()
640 instrument_copy_to_user(to, from, n); in copyout_mc()
641 n = copy_mc_to_user((__force void *) to, from, n); in copyout_mc()
643 return n; in copyout_mc()
665 size_t n, off, xfer = 0; in copy_mc_pipe_to_iter() local
670 bytes = n = push_pipe(i, bytes, &i_head, &off); in copy_mc_pipe_to_iter()
671 if (unlikely(!n)) in copy_mc_pipe_to_iter()
674 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); in copy_mc_pipe_to_iter()
684 n -= chunk; in copy_mc_pipe_to_iter()
688 } while (n); in copy_mc_pipe_to_iter()
879 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) in page_copy_sane() argument
882 size_t v = n + offset; in page_copy_sane()
891 if (n <= v && v <= PAGE_SIZE) in page_copy_sane()
897 if (likely(n <= v && v <= (page_size(head)))) in page_copy_sane()
949 size_t n, off; in pipe_zero() local
954 bytes = n = push_pipe(i, bytes, &i_head, &off); in pipe_zero()
955 if (unlikely(!n)) in pipe_zero()
959 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); in pipe_zero()
963 n -= chunk; in pipe_zero()
966 } while (n); in pipe_zero()
1091 size_t n = off - b->offset; in iov_iter_revert() local
1092 if (unroll < n) { in iov_iter_revert()
1096 unroll -= n; in iov_iter_revert()
1120 size_t n = (--bvec)->bv_len; in iov_iter_revert() local
1122 if (unroll <= n) { in iov_iter_revert()
1124 i->iov_offset = n - unroll; in iov_iter_revert()
1127 unroll -= n; in iov_iter_revert()
1132 size_t n = (--iov)->iov_len; in iov_iter_revert() local
1134 if (unroll <= n) { in iov_iter_revert()
1136 i->iov_offset = n - unroll; in iov_iter_revert()
1139 unroll -= n; in iov_iter_revert()
1273 ssize_t n = push_pipe(i, maxsize, &iter_head, start); in __pipe_get_pages() local
1274 if (!n) in __pipe_get_pages()
1277 maxsize = n; in __pipe_get_pages()
1278 n += *start; in __pipe_get_pages()
1279 while (n > 0) { in __pipe_get_pages()
1282 n -= PAGE_SIZE; in __pipe_get_pages()
1324 int n; in iov_iter_get_pages()
1330 n = DIV_ROUND_UP(len, PAGE_SIZE); in iov_iter_get_pages()
1331 res = get_user_pages_fast(addr, n, in iov_iter_get_pages()
1336 return (res == n ? len : res * PAGE_SIZE) - *start; in iov_iter_get_pages()
1350 static struct page **get_pages_array(size_t n) in get_pages_array() argument
1352 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); in get_pages_array()
1361 ssize_t n; in pipe_get_pages_alloc() local
1372 n = npages * PAGE_SIZE - *start; in pipe_get_pages_alloc()
1373 if (maxsize > n) in pipe_get_pages_alloc()
1374 maxsize = n; in pipe_get_pages_alloc()
1380 n = __pipe_get_pages(i, maxsize, p, iter_head, start); in pipe_get_pages_alloc()
1381 if (n > 0) in pipe_get_pages_alloc()
1385 return n; in pipe_get_pages_alloc()
1405 int n; in iov_iter_get_pages_alloc()
1409 n = DIV_ROUND_UP(len, PAGE_SIZE); in iov_iter_get_pages_alloc()
1410 p = get_pages_array(n); in iov_iter_get_pages_alloc()
1413 res = get_user_pages_fast(addr, n, in iov_iter_get_pages_alloc()
1421 return (res == n ? len : res * PAGE_SIZE) - *start; in iov_iter_get_pages_alloc()