• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/errno.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/slab.h>
8 #include <linux/vmalloc.h>
9 #include <linux/io_uring.h>
10 #include <linux/io_uring_types.h>
11 #include <asm/shmparam.h>
12 
13 #include "memmap.h"
14 #include "kbuf.h"
15 
io_mem_alloc_compound(struct page ** pages,int nr_pages,size_t size,gfp_t gfp)16 static void *io_mem_alloc_compound(struct page **pages, int nr_pages,
17 				   size_t size, gfp_t gfp)
18 {
19 	struct page *page;
20 	int i, order;
21 
22 	order = get_order(size);
23 	if (order > MAX_PAGE_ORDER)
24 		return ERR_PTR(-ENOMEM);
25 	else if (order)
26 		gfp |= __GFP_COMP;
27 
28 	page = alloc_pages(gfp, order);
29 	if (!page)
30 		return ERR_PTR(-ENOMEM);
31 
32 	for (i = 0; i < nr_pages; i++)
33 		pages[i] = page + i;
34 
35 	return page_address(page);
36 }
37 
io_mem_alloc_single(struct page ** pages,int nr_pages,size_t size,gfp_t gfp)38 static void *io_mem_alloc_single(struct page **pages, int nr_pages, size_t size,
39 				 gfp_t gfp)
40 {
41 	void *ret;
42 	int i;
43 
44 	for (i = 0; i < nr_pages; i++) {
45 		pages[i] = alloc_page(gfp);
46 		if (!pages[i])
47 			goto err;
48 	}
49 
50 	ret = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
51 	if (ret)
52 		return ret;
53 err:
54 	while (i--)
55 		put_page(pages[i]);
56 	return ERR_PTR(-ENOMEM);
57 }
58 
io_pages_map(struct page *** out_pages,unsigned short * npages,size_t size)59 void *io_pages_map(struct page ***out_pages, unsigned short *npages,
60 		   size_t size)
61 {
62 	gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN;
63 	struct page **pages;
64 	int nr_pages;
65 	void *ret;
66 
67 	nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
68 	pages = kvmalloc_array(nr_pages, sizeof(struct page *), gfp);
69 	if (!pages)
70 		return ERR_PTR(-ENOMEM);
71 
72 	ret = io_mem_alloc_compound(pages, nr_pages, size, gfp);
73 	if (!IS_ERR(ret))
74 		goto done;
75 	if (nr_pages == 1)
76 		goto fail;
77 
78 	ret = io_mem_alloc_single(pages, nr_pages, size, gfp);
79 	if (!IS_ERR(ret)) {
80 done:
81 		*out_pages = pages;
82 		*npages = nr_pages;
83 		return ret;
84 	}
85 fail:
86 	kvfree(pages);
87 	*out_pages = NULL;
88 	*npages = 0;
89 	return ret;
90 }
91 
io_pages_unmap(void * ptr,struct page *** pages,unsigned short * npages,bool put_pages)92 void io_pages_unmap(void *ptr, struct page ***pages, unsigned short *npages,
93 		    bool put_pages)
94 {
95 	bool do_vunmap = false;
96 
97 	if (!ptr)
98 		return;
99 
100 	if (put_pages && *npages) {
101 		struct page **to_free = *pages;
102 		int i;
103 
104 		/*
105 		 * Only did vmap for the non-compound multiple page case.
106 		 * For the compound page, we just need to put the head.
107 		 */
108 		if (PageCompound(to_free[0]))
109 			*npages = 1;
110 		else if (*npages > 1)
111 			do_vunmap = true;
112 		for (i = 0; i < *npages; i++)
113 			put_page(to_free[i]);
114 	}
115 	if (do_vunmap)
116 		vunmap(ptr);
117 	kvfree(*pages);
118 	*pages = NULL;
119 	*npages = 0;
120 }
121 
io_pages_free(struct page *** pages,int npages)122 void io_pages_free(struct page ***pages, int npages)
123 {
124 	struct page **page_array = *pages;
125 
126 	if (!page_array)
127 		return;
128 
129 	unpin_user_pages(page_array, npages);
130 	kvfree(page_array);
131 	*pages = NULL;
132 }
133 
io_pin_pages(unsigned long uaddr,unsigned long len,int * npages)134 struct page **io_pin_pages(unsigned long uaddr, unsigned long len, int *npages)
135 {
136 	unsigned long start, end, nr_pages;
137 	struct page **pages;
138 	int ret;
139 
140 	if (check_add_overflow(uaddr, len, &end))
141 		return ERR_PTR(-EOVERFLOW);
142 	if (check_add_overflow(end, PAGE_SIZE - 1, &end))
143 		return ERR_PTR(-EOVERFLOW);
144 
145 	end = end >> PAGE_SHIFT;
146 	start = uaddr >> PAGE_SHIFT;
147 	nr_pages = end - start;
148 	if (WARN_ON_ONCE(!nr_pages))
149 		return ERR_PTR(-EINVAL);
150 
151 	pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
152 	if (!pages)
153 		return ERR_PTR(-ENOMEM);
154 
155 	ret = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
156 					pages);
157 	/* success, mapped all pages */
158 	if (ret == nr_pages) {
159 		*npages = nr_pages;
160 		return pages;
161 	}
162 
163 	/* partial map, or didn't map anything */
164 	if (ret >= 0) {
165 		/* if we did partial map, release any pages we did get */
166 		if (ret)
167 			unpin_user_pages(pages, ret);
168 		ret = -EFAULT;
169 	}
170 	kvfree(pages);
171 	return ERR_PTR(ret);
172 }
173 
__io_uaddr_map(struct page *** pages,unsigned short * npages,unsigned long uaddr,size_t size)174 void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
175 		     unsigned long uaddr, size_t size)
176 {
177 	struct page **page_array;
178 	unsigned int nr_pages;
179 	void *page_addr;
180 
181 	*npages = 0;
182 
183 	if (uaddr & (PAGE_SIZE - 1) || !size)
184 		return ERR_PTR(-EINVAL);
185 
186 	nr_pages = 0;
187 	page_array = io_pin_pages(uaddr, size, &nr_pages);
188 	if (IS_ERR(page_array))
189 		return page_array;
190 
191 	page_addr = vmap(page_array, nr_pages, VM_MAP, PAGE_KERNEL);
192 	if (page_addr) {
193 		*pages = page_array;
194 		*npages = nr_pages;
195 		return page_addr;
196 	}
197 
198 	io_pages_free(&page_array, nr_pages);
199 	return ERR_PTR(-ENOMEM);
200 }
201 
io_uring_validate_mmap_request(struct file * file,loff_t pgoff,size_t sz)202 static void *io_uring_validate_mmap_request(struct file *file, loff_t pgoff,
203 					    size_t sz)
204 {
205 	struct io_ring_ctx *ctx = file->private_data;
206 	loff_t offset = pgoff << PAGE_SHIFT;
207 
208 	switch ((pgoff << PAGE_SHIFT) & IORING_OFF_MMAP_MASK) {
209 	case IORING_OFF_SQ_RING:
210 	case IORING_OFF_CQ_RING:
211 		/* Don't allow mmap if the ring was setup without it */
212 		if (ctx->flags & IORING_SETUP_NO_MMAP)
213 			return ERR_PTR(-EINVAL);
214 		return ctx->rings;
215 	case IORING_OFF_SQES:
216 		/* Don't allow mmap if the ring was setup without it */
217 		if (ctx->flags & IORING_SETUP_NO_MMAP)
218 			return ERR_PTR(-EINVAL);
219 		return ctx->sq_sqes;
220 	case IORING_OFF_PBUF_RING: {
221 		struct io_buffer_list *bl;
222 		unsigned int bgid;
223 		void *ptr;
224 
225 		bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
226 		bl = io_pbuf_get_bl(ctx, bgid);
227 		if (IS_ERR(bl))
228 			return bl;
229 		ptr = bl->buf_ring;
230 		io_put_bl(ctx, bl);
231 		return ptr;
232 		}
233 	}
234 
235 	return ERR_PTR(-EINVAL);
236 }
237 
io_uring_mmap_pages(struct io_ring_ctx * ctx,struct vm_area_struct * vma,struct page ** pages,int npages)238 int io_uring_mmap_pages(struct io_ring_ctx *ctx, struct vm_area_struct *vma,
239 			struct page **pages, int npages)
240 {
241 	unsigned long nr_pages = npages;
242 
243 	vm_flags_set(vma, VM_DONTEXPAND);
244 	return vm_insert_pages(vma, vma->vm_start, pages, &nr_pages);
245 }
246 
247 #ifdef CONFIG_MMU
248 
io_uring_mmap(struct file * file,struct vm_area_struct * vma)249 __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
250 {
251 	struct io_ring_ctx *ctx = file->private_data;
252 	size_t sz = vma->vm_end - vma->vm_start;
253 	long offset = vma->vm_pgoff << PAGE_SHIFT;
254 	unsigned int npages;
255 	void *ptr;
256 
257 	ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
258 	if (IS_ERR(ptr))
259 		return PTR_ERR(ptr);
260 
261 	switch (offset & IORING_OFF_MMAP_MASK) {
262 	case IORING_OFF_SQ_RING:
263 	case IORING_OFF_CQ_RING:
264 		npages = min(ctx->n_ring_pages, (sz + PAGE_SIZE - 1) >> PAGE_SHIFT);
265 		return io_uring_mmap_pages(ctx, vma, ctx->ring_pages, npages);
266 	case IORING_OFF_SQES:
267 		return io_uring_mmap_pages(ctx, vma, ctx->sqe_pages,
268 						ctx->n_sqe_pages);
269 	case IORING_OFF_PBUF_RING:
270 		return io_pbuf_mmap(file, vma);
271 	}
272 
273 	return -EINVAL;
274 }
275 
io_uring_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)276 unsigned long io_uring_get_unmapped_area(struct file *filp, unsigned long addr,
277 					 unsigned long len, unsigned long pgoff,
278 					 unsigned long flags)
279 {
280 	void *ptr;
281 
282 	/*
283 	 * Do not allow to map to user-provided address to avoid breaking the
284 	 * aliasing rules. Userspace is not able to guess the offset address of
285 	 * kernel kmalloc()ed memory area.
286 	 */
287 	if (addr)
288 		return -EINVAL;
289 
290 	ptr = io_uring_validate_mmap_request(filp, pgoff, len);
291 	if (IS_ERR(ptr))
292 		return -ENOMEM;
293 
294 	/*
295 	 * Some architectures have strong cache aliasing requirements.
296 	 * For such architectures we need a coherent mapping which aliases
297 	 * kernel memory *and* userspace memory. To achieve that:
298 	 * - use a NULL file pointer to reference physical memory, and
299 	 * - use the kernel virtual address of the shared io_uring context
300 	 *   (instead of the userspace-provided address, which has to be 0UL
301 	 *   anyway).
302 	 * - use the same pgoff which the get_unmapped_area() uses to
303 	 *   calculate the page colouring.
304 	 * For architectures without such aliasing requirements, the
305 	 * architecture will return any suitable mapping because addr is 0.
306 	 */
307 	filp = NULL;
308 	flags |= MAP_SHARED;
309 	pgoff = 0;	/* has been translated to ptr above */
310 #ifdef SHM_COLOUR
311 	addr = (uintptr_t) ptr;
312 	pgoff = addr >> PAGE_SHIFT;
313 #else
314 	addr = 0UL;
315 #endif
316 	return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
317 }
318 
319 #else /* !CONFIG_MMU */
320 
io_uring_mmap(struct file * file,struct vm_area_struct * vma)321 int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
322 {
323 	return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -EINVAL;
324 }
325 
io_uring_nommu_mmap_capabilities(struct file * file)326 unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
327 {
328 	return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
329 }
330 
io_uring_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)331 unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr,
332 					 unsigned long len, unsigned long pgoff,
333 					 unsigned long flags)
334 {
335 	void *ptr;
336 
337 	ptr = io_uring_validate_mmap_request(file, pgoff, len);
338 	if (IS_ERR(ptr))
339 		return PTR_ERR(ptr);
340 
341 	return (unsigned long) ptr;
342 }
343 
344 #endif /* !CONFIG_MMU */
345