• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  IOMMU helpers in MMU context.
3  *
4  *  Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
5  *
6  *  This program is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU General Public License
8  *  as published by the Free Software Foundation; either version
9  *  2 of the License, or (at your option) any later version.
10  *
11  */
12 
13 #include <linux/sched/signal.h>
14 #include <linux/slab.h>
15 #include <linux/rculist.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mutex.h>
18 #include <linux/migrate.h>
19 #include <linux/hugetlb.h>
20 #include <linux/swap.h>
21 #include <asm/mmu_context.h>
22 #include <asm/pte-walk.h>
23 
24 static DEFINE_MUTEX(mem_list_mutex);
25 
26 struct mm_iommu_table_group_mem_t {
27 	struct list_head next;
28 	struct rcu_head rcu;
29 	unsigned long used;
30 	atomic64_t mapped;
31 	unsigned int pageshift;
32 	u64 ua;			/* userspace address */
33 	u64 entries;		/* number of entries in hpas[] */
34 	u64 *hpas;		/* vmalloc'ed */
35 };
36 
mm_iommu_adjust_locked_vm(struct mm_struct * mm,unsigned long npages,bool incr)37 static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
38 		unsigned long npages, bool incr)
39 {
40 	long ret = 0, locked, lock_limit;
41 
42 	if (!npages)
43 		return 0;
44 
45 	down_write(&mm->mmap_sem);
46 
47 	if (incr) {
48 		locked = mm->locked_vm + npages;
49 		lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
50 		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
51 			ret = -ENOMEM;
52 		else
53 			mm->locked_vm += npages;
54 	} else {
55 		if (WARN_ON_ONCE(npages > mm->locked_vm))
56 			npages = mm->locked_vm;
57 		mm->locked_vm -= npages;
58 	}
59 
60 	pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
61 			current ? current->pid : 0,
62 			incr ? '+' : '-',
63 			npages << PAGE_SHIFT,
64 			mm->locked_vm << PAGE_SHIFT,
65 			rlimit(RLIMIT_MEMLOCK));
66 	up_write(&mm->mmap_sem);
67 
68 	return ret;
69 }
70 
mm_iommu_preregistered(struct mm_struct * mm)71 bool mm_iommu_preregistered(struct mm_struct *mm)
72 {
73 	return !list_empty(&mm->context.iommu_group_mem_list);
74 }
75 EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
76 
77 /*
78  * Taken from alloc_migrate_target with changes to remove CMA allocations
79  */
new_iommu_non_cma_page(struct page * page,unsigned long private,int ** resultp)80 struct page *new_iommu_non_cma_page(struct page *page, unsigned long private,
81 					int **resultp)
82 {
83 	gfp_t gfp_mask = GFP_USER;
84 	struct page *new_page;
85 
86 	if (PageCompound(page))
87 		return NULL;
88 
89 	if (PageHighMem(page))
90 		gfp_mask |= __GFP_HIGHMEM;
91 
92 	/*
93 	 * We don't want the allocation to force an OOM if possibe
94 	 */
95 	new_page = alloc_page(gfp_mask | __GFP_NORETRY | __GFP_NOWARN);
96 	return new_page;
97 }
98 
mm_iommu_move_page_from_cma(struct page * page)99 static int mm_iommu_move_page_from_cma(struct page *page)
100 {
101 	int ret = 0;
102 	LIST_HEAD(cma_migrate_pages);
103 
104 	/* Ignore huge pages for now */
105 	if (PageCompound(page))
106 		return -EBUSY;
107 
108 	lru_add_drain();
109 	ret = isolate_lru_page(page);
110 	if (ret)
111 		return ret;
112 
113 	list_add(&page->lru, &cma_migrate_pages);
114 	put_page(page); /* Drop the gup reference */
115 
116 	ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
117 				NULL, 0, MIGRATE_SYNC, MR_CMA);
118 	if (ret) {
119 		if (!list_empty(&cma_migrate_pages))
120 			putback_movable_pages(&cma_migrate_pages);
121 	}
122 
123 	return 0;
124 }
125 
mm_iommu_get(struct mm_struct * mm,unsigned long ua,unsigned long entries,struct mm_iommu_table_group_mem_t ** pmem)126 long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
127 		struct mm_iommu_table_group_mem_t **pmem)
128 {
129 	struct mm_iommu_table_group_mem_t *mem;
130 	long i, j, ret = 0, locked_entries = 0;
131 	unsigned int pageshift;
132 	unsigned long flags;
133 	unsigned long cur_ua;
134 	struct page *page = NULL;
135 
136 	mutex_lock(&mem_list_mutex);
137 
138 	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
139 			next) {
140 		if ((mem->ua == ua) && (mem->entries == entries)) {
141 			++mem->used;
142 			*pmem = mem;
143 			goto unlock_exit;
144 		}
145 
146 		/* Overlap? */
147 		if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
148 				(ua < (mem->ua +
149 				       (mem->entries << PAGE_SHIFT)))) {
150 			ret = -EINVAL;
151 			goto unlock_exit;
152 		}
153 
154 	}
155 
156 	ret = mm_iommu_adjust_locked_vm(mm, entries, true);
157 	if (ret)
158 		goto unlock_exit;
159 
160 	locked_entries = entries;
161 
162 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
163 	if (!mem) {
164 		ret = -ENOMEM;
165 		goto unlock_exit;
166 	}
167 
168 	/*
169 	 * For a starting point for a maximum page size calculation
170 	 * we use @ua and @entries natural alignment to allow IOMMU pages
171 	 * smaller than huge pages but still bigger than PAGE_SIZE.
172 	 */
173 	mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
174 	mem->hpas = vzalloc(entries * sizeof(mem->hpas[0]));
175 	if (!mem->hpas) {
176 		kfree(mem);
177 		ret = -ENOMEM;
178 		goto unlock_exit;
179 	}
180 
181 	for (i = 0; i < entries; ++i) {
182 		cur_ua = ua + (i << PAGE_SHIFT);
183 		if (1 != get_user_pages_fast(cur_ua,
184 					1/* pages */, 1/* iswrite */, &page)) {
185 			ret = -EFAULT;
186 			for (j = 0; j < i; ++j)
187 				put_page(pfn_to_page(mem->hpas[j] >>
188 						PAGE_SHIFT));
189 			vfree(mem->hpas);
190 			kfree(mem);
191 			goto unlock_exit;
192 		}
193 		/*
194 		 * If we get a page from the CMA zone, since we are going to
195 		 * be pinning these entries, we might as well move them out
196 		 * of the CMA zone if possible. NOTE: faulting in + migration
197 		 * can be expensive. Batching can be considered later
198 		 */
199 		if (is_migrate_cma_page(page)) {
200 			if (mm_iommu_move_page_from_cma(page))
201 				goto populate;
202 			if (1 != get_user_pages_fast(cur_ua,
203 						1/* pages */, 1/* iswrite */,
204 						&page)) {
205 				ret = -EFAULT;
206 				for (j = 0; j < i; ++j)
207 					put_page(pfn_to_page(mem->hpas[j] >>
208 								PAGE_SHIFT));
209 				vfree(mem->hpas);
210 				kfree(mem);
211 				goto unlock_exit;
212 			}
213 		}
214 populate:
215 		pageshift = PAGE_SHIFT;
216 		if (mem->pageshift > PAGE_SHIFT && PageCompound(page)) {
217 			pte_t *pte;
218 			struct page *head = compound_head(page);
219 			unsigned int compshift = compound_order(head);
220 			unsigned int pteshift;
221 
222 			local_irq_save(flags); /* disables as well */
223 			pte = find_linux_pte(mm->pgd, cur_ua, NULL, &pteshift);
224 
225 			/* Double check it is still the same pinned page */
226 			if (pte && pte_page(*pte) == head &&
227 			    pteshift == compshift + PAGE_SHIFT)
228 				pageshift = max_t(unsigned int, pteshift,
229 						PAGE_SHIFT);
230 			local_irq_restore(flags);
231 		}
232 		mem->pageshift = min(mem->pageshift, pageshift);
233 		mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
234 	}
235 
236 	atomic64_set(&mem->mapped, 1);
237 	mem->used = 1;
238 	mem->ua = ua;
239 	mem->entries = entries;
240 	*pmem = mem;
241 
242 	list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
243 
244 unlock_exit:
245 	if (locked_entries && ret)
246 		mm_iommu_adjust_locked_vm(mm, locked_entries, false);
247 
248 	mutex_unlock(&mem_list_mutex);
249 
250 	return ret;
251 }
252 EXPORT_SYMBOL_GPL(mm_iommu_get);
253 
mm_iommu_unpin(struct mm_iommu_table_group_mem_t * mem)254 static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
255 {
256 	long i;
257 	struct page *page = NULL;
258 
259 	for (i = 0; i < mem->entries; ++i) {
260 		if (!mem->hpas[i])
261 			continue;
262 
263 		page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
264 		if (!page)
265 			continue;
266 
267 		put_page(page);
268 		mem->hpas[i] = 0;
269 	}
270 }
271 
mm_iommu_do_free(struct mm_iommu_table_group_mem_t * mem)272 static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
273 {
274 
275 	mm_iommu_unpin(mem);
276 	vfree(mem->hpas);
277 	kfree(mem);
278 }
279 
mm_iommu_free(struct rcu_head * head)280 static void mm_iommu_free(struct rcu_head *head)
281 {
282 	struct mm_iommu_table_group_mem_t *mem = container_of(head,
283 			struct mm_iommu_table_group_mem_t, rcu);
284 
285 	mm_iommu_do_free(mem);
286 }
287 
mm_iommu_release(struct mm_iommu_table_group_mem_t * mem)288 static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
289 {
290 	list_del_rcu(&mem->next);
291 	call_rcu(&mem->rcu, mm_iommu_free);
292 }
293 
mm_iommu_put(struct mm_struct * mm,struct mm_iommu_table_group_mem_t * mem)294 long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
295 {
296 	long ret = 0;
297 
298 	mutex_lock(&mem_list_mutex);
299 
300 	if (mem->used == 0) {
301 		ret = -ENOENT;
302 		goto unlock_exit;
303 	}
304 
305 	--mem->used;
306 	/* There are still users, exit */
307 	if (mem->used)
308 		goto unlock_exit;
309 
310 	/* Are there still mappings? */
311 	if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
312 		++mem->used;
313 		ret = -EBUSY;
314 		goto unlock_exit;
315 	}
316 
317 	/* @mapped became 0 so now mappings are disabled, release the region */
318 	mm_iommu_release(mem);
319 
320 	mm_iommu_adjust_locked_vm(mm, mem->entries, false);
321 
322 unlock_exit:
323 	mutex_unlock(&mem_list_mutex);
324 
325 	return ret;
326 }
327 EXPORT_SYMBOL_GPL(mm_iommu_put);
328 
mm_iommu_lookup(struct mm_struct * mm,unsigned long ua,unsigned long size)329 struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
330 		unsigned long ua, unsigned long size)
331 {
332 	struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
333 
334 	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
335 		if ((mem->ua <= ua) &&
336 				(ua + size <= mem->ua +
337 				 (mem->entries << PAGE_SHIFT))) {
338 			ret = mem;
339 			break;
340 		}
341 	}
342 
343 	return ret;
344 }
345 EXPORT_SYMBOL_GPL(mm_iommu_lookup);
346 
mm_iommu_lookup_rm(struct mm_struct * mm,unsigned long ua,unsigned long size)347 struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
348 		unsigned long ua, unsigned long size)
349 {
350 	struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
351 
352 	list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
353 			next) {
354 		if ((mem->ua <= ua) &&
355 				(ua + size <= mem->ua +
356 				 (mem->entries << PAGE_SHIFT))) {
357 			ret = mem;
358 			break;
359 		}
360 	}
361 
362 	return ret;
363 }
364 EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
365 
mm_iommu_find(struct mm_struct * mm,unsigned long ua,unsigned long entries)366 struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
367 		unsigned long ua, unsigned long entries)
368 {
369 	struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
370 
371 	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
372 		if ((mem->ua == ua) && (mem->entries == entries)) {
373 			ret = mem;
374 			break;
375 		}
376 	}
377 
378 	return ret;
379 }
380 EXPORT_SYMBOL_GPL(mm_iommu_find);
381 
mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t * mem,unsigned long ua,unsigned int pageshift,unsigned long * hpa)382 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
383 		unsigned long ua, unsigned int pageshift, unsigned long *hpa)
384 {
385 	const long entry = (ua - mem->ua) >> PAGE_SHIFT;
386 	u64 *va = &mem->hpas[entry];
387 
388 	if (entry >= mem->entries)
389 		return -EFAULT;
390 
391 	if (pageshift > mem->pageshift)
392 		return -EFAULT;
393 
394 	*hpa = *va | (ua & ~PAGE_MASK);
395 
396 	return 0;
397 }
398 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
399 
mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t * mem,unsigned long ua,unsigned int pageshift,unsigned long * hpa)400 long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
401 		unsigned long ua, unsigned int pageshift, unsigned long *hpa)
402 {
403 	const long entry = (ua - mem->ua) >> PAGE_SHIFT;
404 	void *va = &mem->hpas[entry];
405 	unsigned long *pa;
406 
407 	if (entry >= mem->entries)
408 		return -EFAULT;
409 
410 	if (pageshift > mem->pageshift)
411 		return -EFAULT;
412 
413 	pa = (void *) vmalloc_to_phys(va);
414 	if (!pa)
415 		return -EFAULT;
416 
417 	*hpa = *pa | (ua & ~PAGE_MASK);
418 
419 	return 0;
420 }
421 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);
422 
mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t * mem)423 long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
424 {
425 	if (atomic64_inc_not_zero(&mem->mapped))
426 		return 0;
427 
428 	/* Last mm_iommu_put() has been called, no more mappings allowed() */
429 	return -ENXIO;
430 }
431 EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);
432 
mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t * mem)433 void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
434 {
435 	atomic64_add_unless(&mem->mapped, -1, 1);
436 }
437 EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
438 
mm_iommu_init(struct mm_struct * mm)439 void mm_iommu_init(struct mm_struct *mm)
440 {
441 	INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
442 }
443