• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *	Berkeley style UIO structures	-	Alan Cox 1994.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  */
9 #ifndef __LINUX_UIO_H
10 #define __LINUX_UIO_H
11 
12 #include <linux/kernel.h>
13 #include <linux/thread_info.h>
14 #include <uapi/linux/uio.h>
15 
16 struct page;
17 struct pipe_inode_info;
18 
19 struct kvec {
20 	void *iov_base; /* and that should *never* hold a userland pointer */
21 	size_t iov_len;
22 };
23 
24 enum {
25 	ITER_IOVEC = 0,
26 	ITER_KVEC = 2,
27 	ITER_BVEC = 4,
28 	ITER_PIPE = 8,
29 };
30 
31 struct iov_iter {
32 	int type;
33 	size_t iov_offset;
34 	size_t count;
35 	union {
36 		const struct iovec *iov;
37 		const struct kvec *kvec;
38 		const struct bio_vec *bvec;
39 		struct pipe_inode_info *pipe;
40 	};
41 	union {
42 		unsigned long nr_segs;
43 		struct {
44 			int idx;
45 			int start_idx;
46 		};
47 	};
48 };
49 
50 /*
51  * Total number of bytes covered by an iovec.
52  *
53  * NOTE that it is not safe to use this function until all the iovec's
54  * segment lengths have been validated.  Because the individual lengths can
55  * overflow a size_t when added together.
56  */
iov_length(const struct iovec * iov,unsigned long nr_segs)57 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
58 {
59 	unsigned long seg;
60 	size_t ret = 0;
61 
62 	for (seg = 0; seg < nr_segs; seg++)
63 		ret += iov[seg].iov_len;
64 	return ret;
65 }
66 
iov_iter_iovec(const struct iov_iter * iter)67 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
68 {
69 	return (struct iovec) {
70 		.iov_base = iter->iov->iov_base + iter->iov_offset,
71 		.iov_len = min(iter->count,
72 			       iter->iov->iov_len - iter->iov_offset),
73 	};
74 }
75 
76 #define iov_for_each(iov, iter, start)				\
77 	if (!((start).type & (ITER_BVEC | ITER_PIPE)))		\
78 	for (iter = (start);					\
79 	     (iter).count &&					\
80 	     ((iov = iov_iter_iovec(&(iter))), 1);		\
81 	     iov_iter_advance(&(iter), (iov).iov_len))
82 
83 unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
84 
85 size_t iov_iter_copy_from_user_atomic(struct page *page,
86 		struct iov_iter *i, unsigned long offset, size_t bytes);
87 void iov_iter_advance(struct iov_iter *i, size_t bytes);
88 void iov_iter_revert(struct iov_iter *i, size_t bytes);
89 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
90 size_t iov_iter_single_seg_count(const struct iov_iter *i);
91 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
92 			 struct iov_iter *i);
93 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
94 			 struct iov_iter *i);
95 
96 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
97 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
98 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
99 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
100 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
101 
102 static __always_inline __must_check
copy_to_iter(const void * addr,size_t bytes,struct iov_iter * i)103 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
104 {
105 	if (unlikely(!check_copy_size(addr, bytes, true)))
106 		return 0;
107 	else
108 		return _copy_to_iter(addr, bytes, i);
109 }
110 
111 static __always_inline __must_check
copy_from_iter(void * addr,size_t bytes,struct iov_iter * i)112 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
113 {
114 	if (unlikely(!check_copy_size(addr, bytes, false)))
115 		return 0;
116 	else
117 		return _copy_from_iter(addr, bytes, i);
118 }
119 
120 static __always_inline __must_check
copy_from_iter_full(void * addr,size_t bytes,struct iov_iter * i)121 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
122 {
123 	if (unlikely(!check_copy_size(addr, bytes, false)))
124 		return false;
125 	else
126 		return _copy_from_iter_full(addr, bytes, i);
127 }
128 
129 static __always_inline __must_check
copy_from_iter_nocache(void * addr,size_t bytes,struct iov_iter * i)130 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
131 {
132 	if (unlikely(!check_copy_size(addr, bytes, false)))
133 		return 0;
134 	else
135 		return _copy_from_iter_nocache(addr, bytes, i);
136 }
137 
138 static __always_inline __must_check
copy_from_iter_full_nocache(void * addr,size_t bytes,struct iov_iter * i)139 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
140 {
141 	if (unlikely(!check_copy_size(addr, bytes, false)))
142 		return false;
143 	else
144 		return _copy_from_iter_full_nocache(addr, bytes, i);
145 }
146 
147 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
148 /*
149  * Note, users like pmem that depend on the stricter semantics of
150  * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
151  * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
152  * destination is flushed from the cache on return.
153  */
154 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
155 #else
156 #define _copy_from_iter_flushcache _copy_from_iter_nocache
157 #endif
158 
159 static __always_inline __must_check
copy_from_iter_flushcache(void * addr,size_t bytes,struct iov_iter * i)160 size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
161 {
162 	if (unlikely(!check_copy_size(addr, bytes, false)))
163 		return 0;
164 	else
165 		return _copy_from_iter_flushcache(addr, bytes, i);
166 }
167 
168 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
169 unsigned long iov_iter_alignment(const struct iov_iter *i);
170 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
171 void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
172 			unsigned long nr_segs, size_t count);
173 void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
174 			unsigned long nr_segs, size_t count);
175 void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec,
176 			unsigned long nr_segs, size_t count);
177 void iov_iter_pipe(struct iov_iter *i, int direction, struct pipe_inode_info *pipe,
178 			size_t count);
179 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
180 			size_t maxsize, unsigned maxpages, size_t *start);
181 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
182 			size_t maxsize, size_t *start);
183 int iov_iter_npages(const struct iov_iter *i, int maxpages);
184 
185 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
186 
iov_iter_count(const struct iov_iter * i)187 static inline size_t iov_iter_count(const struct iov_iter *i)
188 {
189 	return i->count;
190 }
191 
iter_is_iovec(const struct iov_iter * i)192 static inline bool iter_is_iovec(const struct iov_iter *i)
193 {
194 	return !(i->type & (ITER_BVEC | ITER_KVEC | ITER_PIPE));
195 }
196 
197 /*
198  * Get one of READ or WRITE out of iter->type without any other flags OR'd in
199  * with it.
200  *
201  * The ?: is just for type safety.
202  */
203 #define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & (READ | WRITE))
204 
205 /*
206  * Cap the iov_iter by given limit; note that the second argument is
207  * *not* the new size - it's upper limit for such.  Passing it a value
208  * greater than the amount of data in iov_iter is fine - it'll just do
209  * nothing in that case.
210  */
iov_iter_truncate(struct iov_iter * i,u64 count)211 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
212 {
213 	/*
214 	 * count doesn't have to fit in size_t - comparison extends both
215 	 * operands to u64 here and any value that would be truncated by
216 	 * conversion in assignement is by definition greater than all
217 	 * values of size_t, including old i->count.
218 	 */
219 	if (i->count > count)
220 		i->count = count;
221 }
222 
223 /*
224  * reexpand a previously truncated iterator; count must be no more than how much
225  * we had shrunk it.
226  */
iov_iter_reexpand(struct iov_iter * i,size_t count)227 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
228 {
229 	i->count = count;
230 }
231 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
232 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
233 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
234 
235 int import_iovec(int type, const struct iovec __user * uvector,
236 		 unsigned nr_segs, unsigned fast_segs,
237 		 struct iovec **iov, struct iov_iter *i);
238 
239 #ifdef CONFIG_COMPAT
240 struct compat_iovec;
241 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
242 		 unsigned nr_segs, unsigned fast_segs,
243 		 struct iovec **iov, struct iov_iter *i);
244 #endif
245 
246 int import_single_range(int type, void __user *buf, size_t len,
247 		 struct iovec *iov, struct iov_iter *i);
248 
249 #endif
250