1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IOMMU helpers in MMU context.
4 *
5 * Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
6 */
7
8 #include <linux/sched/signal.h>
9 #include <linux/slab.h>
10 #include <linux/rculist.h>
11 #include <linux/vmalloc.h>
12 #include <linux/mutex.h>
13 #include <linux/migrate.h>
14 #include <linux/hugetlb.h>
15 #include <linux/swap.h>
16 #include <linux/sizes.h>
17 #include <linux/mm.h>
18 #include <asm/mmu_context.h>
19 #include <asm/pte-walk.h>
20 #include <linux/mm_inline.h>
21
22 static DEFINE_MUTEX(mem_list_mutex);
23
24 #define MM_IOMMU_TABLE_GROUP_PAGE_DIRTY 0x1
25 #define MM_IOMMU_TABLE_GROUP_PAGE_MASK ~(SZ_4K - 1)
26
27 struct mm_iommu_table_group_mem_t {
28 struct list_head next;
29 struct rcu_head rcu;
30 unsigned long used;
31 atomic64_t mapped;
32 unsigned int pageshift;
33 u64 ua; /* userspace address */
34 u64 entries; /* number of entries in hpas/hpages[] */
35 /*
36 * in mm_iommu_get we temporarily use this to store
37 * struct page address.
38 *
39 * We need to convert ua to hpa in real mode. Make it
40 * simpler by storing physical address.
41 */
42 union {
43 struct page **hpages; /* vmalloc'ed */
44 phys_addr_t *hpas;
45 };
46 #define MM_IOMMU_TABLE_INVALID_HPA ((uint64_t)-1)
47 u64 dev_hpa; /* Device memory base address */
48 };
49
mm_iommu_preregistered(struct mm_struct * mm)50 bool mm_iommu_preregistered(struct mm_struct *mm)
51 {
52 return !list_empty(&mm->context.iommu_group_mem_list);
53 }
54 EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
55
mm_iommu_do_alloc(struct mm_struct * mm,unsigned long ua,unsigned long entries,unsigned long dev_hpa,struct mm_iommu_table_group_mem_t ** pmem)56 static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
57 unsigned long entries, unsigned long dev_hpa,
58 struct mm_iommu_table_group_mem_t **pmem)
59 {
60 struct mm_iommu_table_group_mem_t *mem, *mem2;
61 long i, ret, locked_entries = 0, pinned = 0;
62 unsigned int pageshift;
63 unsigned long entry, chunk;
64
65 if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
66 ret = account_locked_vm(mm, entries, true);
67 if (ret)
68 return ret;
69
70 locked_entries = entries;
71 }
72
73 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
74 if (!mem) {
75 ret = -ENOMEM;
76 goto unlock_exit;
77 }
78
79 if (dev_hpa != MM_IOMMU_TABLE_INVALID_HPA) {
80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT));
81 mem->dev_hpa = dev_hpa;
82 goto good_exit;
83 }
84 mem->dev_hpa = MM_IOMMU_TABLE_INVALID_HPA;
85
86 /*
87 * For a starting point for a maximum page size calculation
88 * we use @ua and @entries natural alignment to allow IOMMU pages
89 * smaller than huge pages but still bigger than PAGE_SIZE.
90 */
91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
92 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
93 if (!mem->hpas) {
94 kfree(mem);
95 ret = -ENOMEM;
96 goto unlock_exit;
97 }
98
99 down_read(&mm->mmap_sem);
100 chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
101 sizeof(struct vm_area_struct *);
102 chunk = min(chunk, entries);
103 for (entry = 0; entry < entries; entry += chunk) {
104 unsigned long n = min(entries - entry, chunk);
105
106 ret = get_user_pages(ua + (entry << PAGE_SHIFT), n,
107 FOLL_WRITE | FOLL_LONGTERM,
108 mem->hpages + entry, NULL);
109 if (ret == n) {
110 pinned += n;
111 continue;
112 }
113 if (ret > 0)
114 pinned += ret;
115 break;
116 }
117 up_read(&mm->mmap_sem);
118 if (pinned != entries) {
119 if (!ret)
120 ret = -EFAULT;
121 goto free_exit;
122 }
123
124 pageshift = PAGE_SHIFT;
125 for (i = 0; i < entries; ++i) {
126 struct page *page = mem->hpages[i];
127
128 /*
129 * Allow to use larger than 64k IOMMU pages. Only do that
130 * if we are backed by hugetlb.
131 */
132 if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
133 pageshift = page_shift(compound_head(page));
134 mem->pageshift = min(mem->pageshift, pageshift);
135 /*
136 * We don't need struct page reference any more, switch
137 * to physical address.
138 */
139 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
140 }
141
142 good_exit:
143 atomic64_set(&mem->mapped, 1);
144 mem->used = 1;
145 mem->ua = ua;
146 mem->entries = entries;
147
148 mutex_lock(&mem_list_mutex);
149
150 list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next) {
151 /* Overlap? */
152 if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
153 (ua < (mem2->ua +
154 (mem2->entries << PAGE_SHIFT)))) {
155 ret = -EINVAL;
156 mutex_unlock(&mem_list_mutex);
157 goto free_exit;
158 }
159 }
160
161 list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
162
163 mutex_unlock(&mem_list_mutex);
164
165 *pmem = mem;
166
167 return 0;
168
169 free_exit:
170 /* free the reference taken */
171 for (i = 0; i < pinned; i++)
172 put_page(mem->hpages[i]);
173
174 vfree(mem->hpas);
175 kfree(mem);
176
177 unlock_exit:
178 account_locked_vm(mm, locked_entries, false);
179
180 return ret;
181 }
182
mm_iommu_new(struct mm_struct * mm,unsigned long ua,unsigned long entries,struct mm_iommu_table_group_mem_t ** pmem)183 long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries,
184 struct mm_iommu_table_group_mem_t **pmem)
185 {
186 return mm_iommu_do_alloc(mm, ua, entries, MM_IOMMU_TABLE_INVALID_HPA,
187 pmem);
188 }
189 EXPORT_SYMBOL_GPL(mm_iommu_new);
190
mm_iommu_newdev(struct mm_struct * mm,unsigned long ua,unsigned long entries,unsigned long dev_hpa,struct mm_iommu_table_group_mem_t ** pmem)191 long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
192 unsigned long entries, unsigned long dev_hpa,
193 struct mm_iommu_table_group_mem_t **pmem)
194 {
195 return mm_iommu_do_alloc(mm, ua, entries, dev_hpa, pmem);
196 }
197 EXPORT_SYMBOL_GPL(mm_iommu_newdev);
198
mm_iommu_unpin(struct mm_iommu_table_group_mem_t * mem)199 static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
200 {
201 long i;
202 struct page *page = NULL;
203
204 if (!mem->hpas)
205 return;
206
207 for (i = 0; i < mem->entries; ++i) {
208 if (!mem->hpas[i])
209 continue;
210
211 page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
212 if (!page)
213 continue;
214
215 if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
216 SetPageDirty(page);
217
218 put_page(page);
219 mem->hpas[i] = 0;
220 }
221 }
222
mm_iommu_do_free(struct mm_iommu_table_group_mem_t * mem)223 static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
224 {
225
226 mm_iommu_unpin(mem);
227 vfree(mem->hpas);
228 kfree(mem);
229 }
230
mm_iommu_free(struct rcu_head * head)231 static void mm_iommu_free(struct rcu_head *head)
232 {
233 struct mm_iommu_table_group_mem_t *mem = container_of(head,
234 struct mm_iommu_table_group_mem_t, rcu);
235
236 mm_iommu_do_free(mem);
237 }
238
mm_iommu_release(struct mm_iommu_table_group_mem_t * mem)239 static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
240 {
241 list_del_rcu(&mem->next);
242 call_rcu(&mem->rcu, mm_iommu_free);
243 }
244
mm_iommu_put(struct mm_struct * mm,struct mm_iommu_table_group_mem_t * mem)245 long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
246 {
247 long ret = 0;
248 unsigned long unlock_entries = 0;
249
250 mutex_lock(&mem_list_mutex);
251
252 if (mem->used == 0) {
253 ret = -ENOENT;
254 goto unlock_exit;
255 }
256
257 --mem->used;
258 /* There are still users, exit */
259 if (mem->used)
260 goto unlock_exit;
261
262 /* Are there still mappings? */
263 if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
264 ++mem->used;
265 ret = -EBUSY;
266 goto unlock_exit;
267 }
268
269 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
270 unlock_entries = mem->entries;
271
272 /* @mapped became 0 so now mappings are disabled, release the region */
273 mm_iommu_release(mem);
274
275 unlock_exit:
276 mutex_unlock(&mem_list_mutex);
277
278 account_locked_vm(mm, unlock_entries, false);
279
280 return ret;
281 }
282 EXPORT_SYMBOL_GPL(mm_iommu_put);
283
mm_iommu_lookup(struct mm_struct * mm,unsigned long ua,unsigned long size)284 struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
285 unsigned long ua, unsigned long size)
286 {
287 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
288
289 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
290 if ((mem->ua <= ua) &&
291 (ua + size <= mem->ua +
292 (mem->entries << PAGE_SHIFT))) {
293 ret = mem;
294 break;
295 }
296 }
297
298 return ret;
299 }
300 EXPORT_SYMBOL_GPL(mm_iommu_lookup);
301
mm_iommu_lookup_rm(struct mm_struct * mm,unsigned long ua,unsigned long size)302 struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
303 unsigned long ua, unsigned long size)
304 {
305 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
306
307 list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
308 next) {
309 if ((mem->ua <= ua) &&
310 (ua + size <= mem->ua +
311 (mem->entries << PAGE_SHIFT))) {
312 ret = mem;
313 break;
314 }
315 }
316
317 return ret;
318 }
319
mm_iommu_get(struct mm_struct * mm,unsigned long ua,unsigned long entries)320 struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
321 unsigned long ua, unsigned long entries)
322 {
323 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
324
325 mutex_lock(&mem_list_mutex);
326
327 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
328 if ((mem->ua == ua) && (mem->entries == entries)) {
329 ret = mem;
330 ++mem->used;
331 break;
332 }
333 }
334
335 mutex_unlock(&mem_list_mutex);
336
337 return ret;
338 }
339 EXPORT_SYMBOL_GPL(mm_iommu_get);
340
mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t * mem,unsigned long ua,unsigned int pageshift,unsigned long * hpa)341 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
342 unsigned long ua, unsigned int pageshift, unsigned long *hpa)
343 {
344 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
345 u64 *va;
346
347 if (entry >= mem->entries)
348 return -EFAULT;
349
350 if (pageshift > mem->pageshift)
351 return -EFAULT;
352
353 if (!mem->hpas) {
354 *hpa = mem->dev_hpa + (ua - mem->ua);
355 return 0;
356 }
357
358 va = &mem->hpas[entry];
359 *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
360
361 return 0;
362 }
363 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
364
mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t * mem,unsigned long ua,unsigned int pageshift,unsigned long * hpa)365 long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
366 unsigned long ua, unsigned int pageshift, unsigned long *hpa)
367 {
368 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
369 unsigned long *pa;
370
371 if (entry >= mem->entries)
372 return -EFAULT;
373
374 if (pageshift > mem->pageshift)
375 return -EFAULT;
376
377 if (!mem->hpas) {
378 *hpa = mem->dev_hpa + (ua - mem->ua);
379 return 0;
380 }
381
382 pa = (void *) vmalloc_to_phys(&mem->hpas[entry]);
383 if (!pa)
384 return -EFAULT;
385
386 *hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
387
388 return 0;
389 }
390
mm_iommu_ua_mark_dirty_rm(struct mm_struct * mm,unsigned long ua)391 extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
392 {
393 struct mm_iommu_table_group_mem_t *mem;
394 long entry;
395 void *va;
396 unsigned long *pa;
397
398 mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE);
399 if (!mem)
400 return;
401
402 if (mem->dev_hpa != MM_IOMMU_TABLE_INVALID_HPA)
403 return;
404
405 entry = (ua - mem->ua) >> PAGE_SHIFT;
406 va = &mem->hpas[entry];
407
408 pa = (void *) vmalloc_to_phys(va);
409 if (!pa)
410 return;
411
412 *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;
413 }
414
mm_iommu_is_devmem(struct mm_struct * mm,unsigned long hpa,unsigned int pageshift,unsigned long * size)415 bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
416 unsigned int pageshift, unsigned long *size)
417 {
418 struct mm_iommu_table_group_mem_t *mem;
419 unsigned long end;
420
421 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
422 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
423 continue;
424
425 end = mem->dev_hpa + (mem->entries << PAGE_SHIFT);
426 if ((mem->dev_hpa <= hpa) && (hpa < end)) {
427 /*
428 * Since the IOMMU page size might be bigger than
429 * PAGE_SIZE, the amount of preregistered memory
430 * starting from @hpa might be smaller than 1<<pageshift
431 * and the caller needs to distinguish this situation.
432 */
433 *size = min(1UL << pageshift, end - hpa);
434 return true;
435 }
436 }
437
438 return false;
439 }
440 EXPORT_SYMBOL_GPL(mm_iommu_is_devmem);
441
mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t * mem)442 long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
443 {
444 if (atomic64_inc_not_zero(&mem->mapped))
445 return 0;
446
447 /* Last mm_iommu_put() has been called, no more mappings allowed() */
448 return -ENXIO;
449 }
450 EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);
451
mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t * mem)452 void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
453 {
454 atomic64_add_unless(&mem->mapped, -1, 1);
455 }
456 EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
457
mm_iommu_init(struct mm_struct * mm)458 void mm_iommu_init(struct mm_struct *mm)
459 {
460 INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
461 }
462