• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/mm/process_vm_access.c
3  *
4  * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/mm.h>
13 #include <linux/uio.h>
14 #include <linux/sched.h>
15 #include <linux/highmem.h>
16 #include <linux/ptrace.h>
17 #include <linux/slab.h>
18 #include <linux/syscalls.h>
19 
20 #ifdef CONFIG_COMPAT
21 #include <linux/compat.h>
22 #endif
23 
24 /**
25  * process_vm_rw_pages - read/write pages from task specified
26  * @pages: array of pointers to pages we want to copy
27  * @start_offset: offset in page to start copying from/to
28  * @len: number of bytes to copy
29  * @iter: where to copy to/from locally
30  * @vm_write: 0 means copy from, 1 means copy to
31  * Returns 0 on success, error code otherwise
32  */
process_vm_rw_pages(struct page ** pages,unsigned offset,size_t len,struct iov_iter * iter,int vm_write)33 static int process_vm_rw_pages(struct page **pages,
34 			       unsigned offset,
35 			       size_t len,
36 			       struct iov_iter *iter,
37 			       int vm_write)
38 {
39 	/* Do the copy for each page */
40 	while (len && iov_iter_count(iter)) {
41 		struct page *page = *pages++;
42 		size_t copy = PAGE_SIZE - offset;
43 		size_t copied;
44 
45 		if (copy > len)
46 			copy = len;
47 
48 		if (vm_write) {
49 			copied = copy_page_from_iter(page, offset, copy, iter);
50 			set_page_dirty_lock(page);
51 		} else {
52 			copied = copy_page_to_iter(page, offset, copy, iter);
53 		}
54 		len -= copied;
55 		if (copied < copy && iov_iter_count(iter))
56 			return -EFAULT;
57 		offset = 0;
58 	}
59 	return 0;
60 }
61 
62 /* Maximum number of pages kmalloc'd to hold struct page's during copy */
63 #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
64 
65 /**
66  * process_vm_rw_single_vec - read/write pages from task specified
67  * @addr: start memory address of target process
68  * @len: size of area to copy to/from
69  * @iter: where to copy to/from locally
70  * @process_pages: struct pages area that can store at least
71  *  nr_pages_to_copy struct page pointers
72  * @mm: mm for task
73  * @task: task to read/write from
74  * @vm_write: 0 means copy from, 1 means copy to
75  * Returns 0 on success or on failure error code
76  */
process_vm_rw_single_vec(unsigned long addr,unsigned long len,struct iov_iter * iter,struct page ** process_pages,struct mm_struct * mm,struct task_struct * task,int vm_write)77 static int process_vm_rw_single_vec(unsigned long addr,
78 				    unsigned long len,
79 				    struct iov_iter *iter,
80 				    struct page **process_pages,
81 				    struct mm_struct *mm,
82 				    struct task_struct *task,
83 				    int vm_write)
84 {
85 	unsigned long pa = addr & PAGE_MASK;
86 	unsigned long start_offset = addr - pa;
87 	unsigned long nr_pages;
88 	ssize_t rc = 0;
89 	unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
90 		/ sizeof(struct pages *);
91 	unsigned int flags = 0;
92 
93 	/* Work out address and page range required */
94 	if (len == 0)
95 		return 0;
96 	nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
97 
98 	if (vm_write)
99 		flags |= FOLL_WRITE;
100 
101 	while (!rc && nr_pages && iov_iter_count(iter)) {
102 		int pages = min(nr_pages, max_pages_per_loop);
103 		size_t bytes;
104 
105 		/* Get the pages we're interested in */
106 		pages = get_user_pages_unlocked(task, mm, pa, pages,
107 						process_pages, flags);
108 		if (pages <= 0)
109 			return -EFAULT;
110 
111 		bytes = pages * PAGE_SIZE - start_offset;
112 		if (bytes > len)
113 			bytes = len;
114 
115 		rc = process_vm_rw_pages(process_pages,
116 					 start_offset, bytes, iter,
117 					 vm_write);
118 		len -= bytes;
119 		start_offset = 0;
120 		nr_pages -= pages;
121 		pa += pages * PAGE_SIZE;
122 		while (pages)
123 			put_page(process_pages[--pages]);
124 	}
125 
126 	return rc;
127 }
128 
129 /* Maximum number of entries for process pages array
130    which lives on stack */
131 #define PVM_MAX_PP_ARRAY_COUNT 16
132 
133 /**
134  * process_vm_rw_core - core of reading/writing pages from task specified
135  * @pid: PID of process to read/write from/to
136  * @iter: where to copy to/from locally
137  * @rvec: iovec array specifying where to copy to/from in the other process
138  * @riovcnt: size of rvec array
139  * @flags: currently unused
140  * @vm_write: 0 if reading from other process, 1 if writing to other process
141  * Returns the number of bytes read/written or error code. May
142  *  return less bytes than expected if an error occurs during the copying
143  *  process.
144  */
process_vm_rw_core(pid_t pid,struct iov_iter * iter,const struct iovec * rvec,unsigned long riovcnt,unsigned long flags,int vm_write)145 static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
146 				  const struct iovec *rvec,
147 				  unsigned long riovcnt,
148 				  unsigned long flags, int vm_write)
149 {
150 	struct task_struct *task;
151 	struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
152 	struct page **process_pages = pp_stack;
153 	struct mm_struct *mm;
154 	unsigned long i;
155 	ssize_t rc = 0;
156 	unsigned long nr_pages = 0;
157 	unsigned long nr_pages_iov;
158 	ssize_t iov_len;
159 	size_t total_len = iov_iter_count(iter);
160 
161 	/*
162 	 * Work out how many pages of struct pages we're going to need
163 	 * when eventually calling get_user_pages
164 	 */
165 	for (i = 0; i < riovcnt; i++) {
166 		iov_len = rvec[i].iov_len;
167 		if (iov_len > 0) {
168 			nr_pages_iov = ((unsigned long)rvec[i].iov_base
169 					+ iov_len)
170 				/ PAGE_SIZE - (unsigned long)rvec[i].iov_base
171 				/ PAGE_SIZE + 1;
172 			nr_pages = max(nr_pages, nr_pages_iov);
173 		}
174 	}
175 
176 	if (nr_pages == 0)
177 		return 0;
178 
179 	if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
180 		/* For reliability don't try to kmalloc more than
181 		   2 pages worth */
182 		process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
183 					      sizeof(struct pages *)*nr_pages),
184 					GFP_KERNEL);
185 
186 		if (!process_pages)
187 			return -ENOMEM;
188 	}
189 
190 	/* Get process information */
191 	rcu_read_lock();
192 	task = find_task_by_vpid(pid);
193 	if (task)
194 		get_task_struct(task);
195 	rcu_read_unlock();
196 	if (!task) {
197 		rc = -ESRCH;
198 		goto free_proc_pages;
199 	}
200 
201 	mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
202 	if (!mm || IS_ERR(mm)) {
203 		rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
204 		/*
205 		 * Explicitly map EACCES to EPERM as EPERM is a more a
206 		 * appropriate error code for process_vw_readv/writev
207 		 */
208 		if (rc == -EACCES)
209 			rc = -EPERM;
210 		goto put_task_struct;
211 	}
212 
213 	for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
214 		rc = process_vm_rw_single_vec(
215 			(unsigned long)rvec[i].iov_base, rvec[i].iov_len,
216 			iter, process_pages, mm, task, vm_write);
217 
218 	/* copied = space before - space after */
219 	total_len -= iov_iter_count(iter);
220 
221 	/* If we have managed to copy any data at all then
222 	   we return the number of bytes copied. Otherwise
223 	   we return the error code */
224 	if (total_len)
225 		rc = total_len;
226 
227 	mmput(mm);
228 
229 put_task_struct:
230 	put_task_struct(task);
231 
232 free_proc_pages:
233 	if (process_pages != pp_stack)
234 		kfree(process_pages);
235 	return rc;
236 }
237 
238 /**
239  * process_vm_rw - check iovecs before calling core routine
240  * @pid: PID of process to read/write from/to
241  * @lvec: iovec array specifying where to copy to/from locally
242  * @liovcnt: size of lvec array
243  * @rvec: iovec array specifying where to copy to/from in the other process
244  * @riovcnt: size of rvec array
245  * @flags: currently unused
246  * @vm_write: 0 if reading from other process, 1 if writing to other process
247  * Returns the number of bytes read/written or error code. May
248  *  return less bytes than expected if an error occurs during the copying
249  *  process.
250  */
process_vm_rw(pid_t pid,const struct iovec __user * lvec,unsigned long liovcnt,const struct iovec __user * rvec,unsigned long riovcnt,unsigned long flags,int vm_write)251 static ssize_t process_vm_rw(pid_t pid,
252 			     const struct iovec __user *lvec,
253 			     unsigned long liovcnt,
254 			     const struct iovec __user *rvec,
255 			     unsigned long riovcnt,
256 			     unsigned long flags, int vm_write)
257 {
258 	struct iovec iovstack_l[UIO_FASTIOV];
259 	struct iovec iovstack_r[UIO_FASTIOV];
260 	struct iovec *iov_l = iovstack_l;
261 	struct iovec *iov_r = iovstack_r;
262 	struct iov_iter iter;
263 	ssize_t rc;
264 	int dir = vm_write ? WRITE : READ;
265 
266 	if (flags != 0)
267 		return -EINVAL;
268 
269 	/* Check iovecs */
270 	rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
271 	if (rc < 0)
272 		return rc;
273 	if (!iov_iter_count(&iter))
274 		goto free_iovecs;
275 
276 	rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
277 				   iovstack_r, &iov_r);
278 	if (rc <= 0)
279 		goto free_iovecs;
280 
281 	rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
282 
283 free_iovecs:
284 	if (iov_r != iovstack_r)
285 		kfree(iov_r);
286 	kfree(iov_l);
287 
288 	return rc;
289 }
290 
SYSCALL_DEFINE6(process_vm_readv,pid_t,pid,const struct iovec __user *,lvec,unsigned long,liovcnt,const struct iovec __user *,rvec,unsigned long,riovcnt,unsigned long,flags)291 SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec,
292 		unsigned long, liovcnt, const struct iovec __user *, rvec,
293 		unsigned long, riovcnt,	unsigned long, flags)
294 {
295 	return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0);
296 }
297 
SYSCALL_DEFINE6(process_vm_writev,pid_t,pid,const struct iovec __user *,lvec,unsigned long,liovcnt,const struct iovec __user *,rvec,unsigned long,riovcnt,unsigned long,flags)298 SYSCALL_DEFINE6(process_vm_writev, pid_t, pid,
299 		const struct iovec __user *, lvec,
300 		unsigned long, liovcnt, const struct iovec __user *, rvec,
301 		unsigned long, riovcnt,	unsigned long, flags)
302 {
303 	return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1);
304 }
305 
306 #ifdef CONFIG_COMPAT
307 
308 static ssize_t
compat_process_vm_rw(compat_pid_t pid,const struct compat_iovec __user * lvec,unsigned long liovcnt,const struct compat_iovec __user * rvec,unsigned long riovcnt,unsigned long flags,int vm_write)309 compat_process_vm_rw(compat_pid_t pid,
310 		     const struct compat_iovec __user *lvec,
311 		     unsigned long liovcnt,
312 		     const struct compat_iovec __user *rvec,
313 		     unsigned long riovcnt,
314 		     unsigned long flags, int vm_write)
315 {
316 	struct iovec iovstack_l[UIO_FASTIOV];
317 	struct iovec iovstack_r[UIO_FASTIOV];
318 	struct iovec *iov_l = iovstack_l;
319 	struct iovec *iov_r = iovstack_r;
320 	struct iov_iter iter;
321 	ssize_t rc = -EFAULT;
322 	int dir = vm_write ? WRITE : READ;
323 
324 	if (flags != 0)
325 		return -EINVAL;
326 
327 	rc = compat_import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
328 	if (rc < 0)
329 		return rc;
330 	if (!iov_iter_count(&iter))
331 		goto free_iovecs;
332 	rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
333 					  UIO_FASTIOV, iovstack_r,
334 					  &iov_r);
335 	if (rc <= 0)
336 		goto free_iovecs;
337 
338 	rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
339 
340 free_iovecs:
341 	if (iov_r != iovstack_r)
342 		kfree(iov_r);
343 	kfree(iov_l);
344 	return rc;
345 }
346 
COMPAT_SYSCALL_DEFINE6(process_vm_readv,compat_pid_t,pid,const struct compat_iovec __user *,lvec,compat_ulong_t,liovcnt,const struct compat_iovec __user *,rvec,compat_ulong_t,riovcnt,compat_ulong_t,flags)347 COMPAT_SYSCALL_DEFINE6(process_vm_readv, compat_pid_t, pid,
348 		       const struct compat_iovec __user *, lvec,
349 		       compat_ulong_t, liovcnt,
350 		       const struct compat_iovec __user *, rvec,
351 		       compat_ulong_t, riovcnt,
352 		       compat_ulong_t, flags)
353 {
354 	return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
355 				    riovcnt, flags, 0);
356 }
357 
COMPAT_SYSCALL_DEFINE6(process_vm_writev,compat_pid_t,pid,const struct compat_iovec __user *,lvec,compat_ulong_t,liovcnt,const struct compat_iovec __user *,rvec,compat_ulong_t,riovcnt,compat_ulong_t,flags)358 COMPAT_SYSCALL_DEFINE6(process_vm_writev, compat_pid_t, pid,
359 		       const struct compat_iovec __user *, lvec,
360 		       compat_ulong_t, liovcnt,
361 		       const struct compat_iovec __user *, rvec,
362 		       compat_ulong_t, riovcnt,
363 		       compat_ulong_t, flags)
364 {
365 	return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
366 				    riovcnt, flags, 1);
367 }
368 
369 #endif
370