1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright © 2006-2009, Intel Corporation.
4 *
5 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
6 */
7
8 #include <linux/init.h>
9 #include <linux/iova.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/smp.h>
13 #include <linux/bitops.h>
14 #include <linux/cpu.h>
15
16 /* The anchor node sits above the top of the usable address space */
17 #define IOVA_ANCHOR ~0UL
18
19 #define IOMMU_DEFAULT_IOVA_MAX_ALIGN_SHIFT 9
20 static unsigned long iommu_max_align_shift __read_mostly = IOMMU_DEFAULT_IOVA_MAX_ALIGN_SHIFT;
21
22 static bool iova_rcache_insert(struct iova_domain *iovad,
23 unsigned long pfn,
24 unsigned long size);
25 static unsigned long iova_rcache_get(struct iova_domain *iovad,
26 unsigned long size,
27 unsigned long limit_pfn);
28 static void init_iova_rcaches(struct iova_domain *iovad);
29 static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
30 static void free_iova_rcaches(struct iova_domain *iovad);
31 static void fq_destroy_all_entries(struct iova_domain *iovad);
32 static void fq_flush_timeout(struct timer_list *t);
33
limit_align_shift(struct iova_domain * iovad,unsigned long shift)34 static unsigned long limit_align_shift(struct iova_domain *iovad, unsigned long shift)
35 {
36 unsigned long max_align_shift;
37
38 max_align_shift = iommu_max_align_shift + PAGE_SHIFT - iova_shift(iovad);
39 return min_t(unsigned long, max_align_shift, shift);
40 }
41
42 #ifndef MODULE
iommu_set_def_max_align_shift(char * str)43 static int __init iommu_set_def_max_align_shift(char *str)
44 {
45 unsigned long max_align_shift;
46
47 int ret = kstrtoul(str, 10, &max_align_shift);
48
49 if (!ret)
50 iommu_max_align_shift = max_align_shift;
51
52 return 0;
53 }
54 early_param("iommu.max_align_shift", iommu_set_def_max_align_shift);
55 #endif
56
iova_cpuhp_dead(unsigned int cpu,struct hlist_node * node)57 static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
58 {
59 struct iova_domain *iovad;
60
61 iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
62
63 free_cpu_cached_iovas(cpu, iovad);
64 return 0;
65 }
66
67 static void free_global_cached_iovas(struct iova_domain *iovad);
68
to_iova(struct rb_node * node)69 static struct iova *to_iova(struct rb_node *node)
70 {
71 return rb_entry(node, struct iova, node);
72 }
73
74 void
init_iova_domain(struct iova_domain * iovad,unsigned long granule,unsigned long start_pfn)75 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
76 unsigned long start_pfn)
77 {
78 /*
79 * IOVA granularity will normally be equal to the smallest
80 * supported IOMMU page size; both *must* be capable of
81 * representing individual CPU pages exactly.
82 */
83 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
84
85 spin_lock_init(&iovad->iova_rbtree_lock);
86 iovad->rbroot = RB_ROOT;
87 iovad->cached_node = &iovad->anchor.node;
88 iovad->cached32_node = &iovad->anchor.node;
89 iovad->granule = granule;
90 iovad->start_pfn = start_pfn;
91 iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
92 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
93 iovad->flush_cb = NULL;
94 iovad->fq = NULL;
95 iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
96 rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
97 rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
98 cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead);
99 iovad->best_fit = false;
100 init_iova_rcaches(iovad);
101 }
102 EXPORT_SYMBOL_GPL(init_iova_domain);
103
has_iova_flush_queue(struct iova_domain * iovad)104 static bool has_iova_flush_queue(struct iova_domain *iovad)
105 {
106 return !!iovad->fq;
107 }
108
free_iova_flush_queue(struct iova_domain * iovad)109 static void free_iova_flush_queue(struct iova_domain *iovad)
110 {
111 if (!has_iova_flush_queue(iovad))
112 return;
113
114 del_timer_sync(&iovad->fq_timer);
115
116 fq_destroy_all_entries(iovad);
117
118 free_percpu(iovad->fq);
119
120 iovad->fq = NULL;
121 iovad->flush_cb = NULL;
122 iovad->entry_dtor = NULL;
123 }
124
init_iova_flush_queue(struct iova_domain * iovad,iova_flush_cb flush_cb,iova_entry_dtor entry_dtor)125 int init_iova_flush_queue(struct iova_domain *iovad,
126 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
127 {
128 struct iova_fq __percpu *queue;
129 int cpu;
130
131 atomic64_set(&iovad->fq_flush_start_cnt, 0);
132 atomic64_set(&iovad->fq_flush_finish_cnt, 0);
133
134 queue = alloc_percpu(struct iova_fq);
135 if (!queue)
136 return -ENOMEM;
137
138 iovad->flush_cb = flush_cb;
139 iovad->entry_dtor = entry_dtor;
140
141 for_each_possible_cpu(cpu) {
142 struct iova_fq *fq;
143
144 fq = per_cpu_ptr(queue, cpu);
145 fq->head = 0;
146 fq->tail = 0;
147
148 spin_lock_init(&fq->lock);
149 }
150
151 iovad->fq = queue;
152
153 timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
154 atomic_set(&iovad->fq_timer_on, 0);
155
156 return 0;
157 }
158
159 static struct rb_node *
__get_cached_rbnode(struct iova_domain * iovad,unsigned long limit_pfn)160 __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
161 {
162 if (limit_pfn <= iovad->dma_32bit_pfn)
163 return iovad->cached32_node;
164
165 return iovad->cached_node;
166 }
167
168 static void
__cached_rbnode_insert_update(struct iova_domain * iovad,struct iova * new)169 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
170 {
171 if (new->pfn_hi < iovad->dma_32bit_pfn)
172 iovad->cached32_node = &new->node;
173 else
174 iovad->cached_node = &new->node;
175 }
176
177 static void
__cached_rbnode_delete_update(struct iova_domain * iovad,struct iova * free)178 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
179 {
180 struct iova *cached_iova;
181
182 cached_iova = to_iova(iovad->cached32_node);
183 if (free == cached_iova ||
184 (free->pfn_hi < iovad->dma_32bit_pfn &&
185 free->pfn_lo >= cached_iova->pfn_lo))
186 iovad->cached32_node = rb_next(&free->node);
187
188 if (free->pfn_lo < iovad->dma_32bit_pfn)
189 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
190
191 cached_iova = to_iova(iovad->cached_node);
192 if (free->pfn_lo >= cached_iova->pfn_lo)
193 iovad->cached_node = rb_next(&free->node);
194 }
195
iova_find_limit(struct iova_domain * iovad,unsigned long limit_pfn)196 static struct rb_node *iova_find_limit(struct iova_domain *iovad, unsigned long limit_pfn)
197 {
198 struct rb_node *node, *next;
199 /*
200 * Ideally what we'd like to judge here is whether limit_pfn is close
201 * enough to the highest-allocated IOVA that starting the allocation
202 * walk from the anchor node will be quicker than this initial work to
203 * find an exact starting point (especially if that ends up being the
204 * anchor node anyway). This is an incredibly crude approximation which
205 * only really helps the most likely case, but is at least trivially easy.
206 */
207 if (limit_pfn > iovad->dma_32bit_pfn)
208 return &iovad->anchor.node;
209
210 node = iovad->rbroot.rb_node;
211 while (to_iova(node)->pfn_hi < limit_pfn)
212 node = node->rb_right;
213
214 search_left:
215 while (node->rb_left && to_iova(node->rb_left)->pfn_lo >= limit_pfn)
216 node = node->rb_left;
217
218 if (!node->rb_left)
219 return node;
220
221 next = node->rb_left;
222 while (next->rb_right) {
223 next = next->rb_right;
224 if (to_iova(next)->pfn_lo >= limit_pfn) {
225 node = next;
226 goto search_left;
227 }
228 }
229
230 return node;
231 }
232
233 /* Insert the iova into domain rbtree by holding writer lock */
234 static void
iova_insert_rbtree(struct rb_root * root,struct iova * iova,struct rb_node * start)235 iova_insert_rbtree(struct rb_root *root, struct iova *iova,
236 struct rb_node *start)
237 {
238 struct rb_node **new, *parent = NULL;
239
240 new = (start) ? &start : &(root->rb_node);
241 /* Figure out where to put new node */
242 while (*new) {
243 struct iova *this = to_iova(*new);
244
245 parent = *new;
246
247 if (iova->pfn_lo < this->pfn_lo)
248 new = &((*new)->rb_left);
249 else if (iova->pfn_lo > this->pfn_lo)
250 new = &((*new)->rb_right);
251 else {
252 WARN_ON(1); /* this should not happen */
253 return;
254 }
255 }
256 /* Add new node and rebalance tree. */
257 rb_link_node(&iova->node, parent, new);
258 rb_insert_color(&iova->node, root);
259 }
260
__alloc_and_insert_iova_range(struct iova_domain * iovad,unsigned long size,unsigned long limit_pfn,struct iova * new,bool size_aligned)261 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
262 unsigned long size, unsigned long limit_pfn,
263 struct iova *new, bool size_aligned)
264 {
265 struct rb_node *curr, *prev;
266 struct iova *curr_iova;
267 unsigned long flags;
268 unsigned long new_pfn, retry_pfn;
269 unsigned long align_mask = ~0UL;
270 unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
271
272 if (size_aligned)
273 align_mask <<= limit_align_shift(iovad, fls_long(size - 1));
274
275 /* Walk the tree backwards */
276 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
277 if (limit_pfn <= iovad->dma_32bit_pfn &&
278 size >= iovad->max32_alloc_size)
279 goto iova32_full;
280
281 curr = __get_cached_rbnode(iovad, limit_pfn);
282 curr_iova = to_iova(curr);
283 retry_pfn = curr_iova->pfn_hi;
284
285 retry:
286 do {
287 high_pfn = min(high_pfn, curr_iova->pfn_lo);
288 new_pfn = (high_pfn - size) & align_mask;
289 prev = curr;
290 curr = rb_prev(curr);
291 curr_iova = to_iova(curr);
292 } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
293
294 if (high_pfn < size || new_pfn < low_pfn) {
295 if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
296 high_pfn = limit_pfn;
297 low_pfn = retry_pfn + 1;
298 curr = iova_find_limit(iovad, limit_pfn);
299 curr_iova = to_iova(curr);
300 goto retry;
301 }
302 iovad->max32_alloc_size = size;
303 goto iova32_full;
304 }
305
306 /* pfn_lo will point to size aligned address if size_aligned is set */
307 new->pfn_lo = new_pfn;
308 new->pfn_hi = new->pfn_lo + size - 1;
309
310 /* If we have 'prev', it's a valid place to start the insertion. */
311 iova_insert_rbtree(&iovad->rbroot, new, prev);
312 __cached_rbnode_insert_update(iovad, new);
313
314 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
315 return 0;
316
317 iova32_full:
318 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
319 return -ENOMEM;
320 }
321
__alloc_and_insert_iova_best_fit(struct iova_domain * iovad,unsigned long size,unsigned long limit_pfn,struct iova * new,bool size_aligned)322 static int __alloc_and_insert_iova_best_fit(struct iova_domain *iovad,
323 unsigned long size,
324 unsigned long limit_pfn,
325 struct iova *new, bool size_aligned)
326 {
327 struct rb_node *curr, *prev;
328 struct iova *curr_iova, *prev_iova;
329 unsigned long flags;
330 unsigned long align_mask = ~0UL;
331 struct rb_node *candidate_rb_parent;
332 unsigned long new_pfn, candidate_pfn = ~0UL;
333 unsigned long gap, candidate_gap = ~0UL;
334
335 if (size_aligned)
336 align_mask <<= limit_align_shift(iovad, fls_long(size - 1));
337
338 /* Walk the tree backwards */
339 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
340 curr = &iovad->anchor.node;
341 prev = rb_prev(curr);
342 for (; prev; curr = prev, prev = rb_prev(curr)) {
343 curr_iova = rb_entry(curr, struct iova, node);
344 prev_iova = rb_entry(prev, struct iova, node);
345
346 limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
347 new_pfn = (limit_pfn - size) & align_mask;
348 gap = curr_iova->pfn_lo - prev_iova->pfn_hi - 1;
349 if ((limit_pfn >= size) && (new_pfn > prev_iova->pfn_hi)
350 && (gap < candidate_gap)) {
351 candidate_gap = gap;
352 candidate_pfn = new_pfn;
353 candidate_rb_parent = curr;
354 if (gap == size)
355 goto insert;
356 }
357 }
358
359 curr_iova = rb_entry(curr, struct iova, node);
360 limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
361 new_pfn = (limit_pfn - size) & align_mask;
362 gap = curr_iova->pfn_lo - iovad->start_pfn;
363 if (limit_pfn >= size && new_pfn >= iovad->start_pfn &&
364 gap < candidate_gap) {
365 candidate_gap = gap;
366 candidate_pfn = new_pfn;
367 candidate_rb_parent = curr;
368 }
369
370 insert:
371 if (candidate_pfn == ~0UL) {
372 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
373 return -ENOMEM;
374 }
375
376 /* pfn_lo will point to size aligned address if size_aligned is set */
377 new->pfn_lo = candidate_pfn;
378 new->pfn_hi = new->pfn_lo + size - 1;
379
380 /* If we have 'prev', it's a valid place to start the insertion. */
381 iova_insert_rbtree(&iovad->rbroot, new, candidate_rb_parent);
382 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
383 return 0;
384 }
385
386 static struct kmem_cache *iova_cache;
387 static unsigned int iova_cache_users;
388 static DEFINE_MUTEX(iova_cache_mutex);
389
alloc_iova_mem(void)390 static struct iova *alloc_iova_mem(void)
391 {
392 return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
393 }
394
free_iova_mem(struct iova * iova)395 static void free_iova_mem(struct iova *iova)
396 {
397 if (iova->pfn_lo != IOVA_ANCHOR)
398 kmem_cache_free(iova_cache, iova);
399 }
400
iova_cache_get(void)401 int iova_cache_get(void)
402 {
403 mutex_lock(&iova_cache_mutex);
404 if (!iova_cache_users) {
405 int ret;
406
407 ret = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", NULL,
408 iova_cpuhp_dead);
409 if (ret) {
410 mutex_unlock(&iova_cache_mutex);
411 pr_err("Couldn't register cpuhp handler\n");
412 return ret;
413 }
414
415 iova_cache = kmem_cache_create(
416 "iommu_iova", sizeof(struct iova), 0,
417 SLAB_HWCACHE_ALIGN, NULL);
418 if (!iova_cache) {
419 cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
420 mutex_unlock(&iova_cache_mutex);
421 pr_err("Couldn't create iova cache\n");
422 return -ENOMEM;
423 }
424 }
425
426 iova_cache_users++;
427 mutex_unlock(&iova_cache_mutex);
428
429 return 0;
430 }
431 EXPORT_SYMBOL_GPL(iova_cache_get);
432
iova_cache_put(void)433 void iova_cache_put(void)
434 {
435 mutex_lock(&iova_cache_mutex);
436 if (WARN_ON(!iova_cache_users)) {
437 mutex_unlock(&iova_cache_mutex);
438 return;
439 }
440 iova_cache_users--;
441 if (!iova_cache_users) {
442 cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
443 kmem_cache_destroy(iova_cache);
444 }
445 mutex_unlock(&iova_cache_mutex);
446 }
447 EXPORT_SYMBOL_GPL(iova_cache_put);
448
449 /**
450 * alloc_iova - allocates an iova
451 * @iovad: - iova domain in question
452 * @size: - size of page frames to allocate
453 * @limit_pfn: - max limit address
454 * @size_aligned: - set if size_aligned address range is required
455 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
456 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
457 * flag is set then the allocated address iova->pfn_lo will be naturally
458 * aligned on roundup_power_of_two(size).
459 */
460 struct iova *
alloc_iova(struct iova_domain * iovad,unsigned long size,unsigned long limit_pfn,bool size_aligned)461 alloc_iova(struct iova_domain *iovad, unsigned long size,
462 unsigned long limit_pfn,
463 bool size_aligned)
464 {
465 struct iova *new_iova;
466 int ret;
467
468 new_iova = alloc_iova_mem();
469 if (!new_iova)
470 return NULL;
471
472 if (iovad->best_fit) {
473 ret = __alloc_and_insert_iova_best_fit(iovad, size,
474 limit_pfn + 1, new_iova, size_aligned);
475 } else {
476 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
477 new_iova, size_aligned);
478 }
479
480 if (ret) {
481 free_iova_mem(new_iova);
482 return NULL;
483 }
484
485 return new_iova;
486 }
487 EXPORT_SYMBOL_GPL(alloc_iova);
488
489 static struct iova *
private_find_iova(struct iova_domain * iovad,unsigned long pfn)490 private_find_iova(struct iova_domain *iovad, unsigned long pfn)
491 {
492 struct rb_node *node = iovad->rbroot.rb_node;
493
494 assert_spin_locked(&iovad->iova_rbtree_lock);
495
496 while (node) {
497 struct iova *iova = to_iova(node);
498
499 if (pfn < iova->pfn_lo)
500 node = node->rb_left;
501 else if (pfn > iova->pfn_hi)
502 node = node->rb_right;
503 else
504 return iova; /* pfn falls within iova's range */
505 }
506
507 return NULL;
508 }
509
remove_iova(struct iova_domain * iovad,struct iova * iova)510 static void remove_iova(struct iova_domain *iovad, struct iova *iova)
511 {
512 assert_spin_locked(&iovad->iova_rbtree_lock);
513 __cached_rbnode_delete_update(iovad, iova);
514 rb_erase(&iova->node, &iovad->rbroot);
515 }
516
517 /**
518 * find_iova - finds an iova for a given pfn
519 * @iovad: - iova domain in question.
520 * @pfn: - page frame number
521 * This function finds and returns an iova belonging to the
522 * given domain which matches the given pfn.
523 */
find_iova(struct iova_domain * iovad,unsigned long pfn)524 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
525 {
526 unsigned long flags;
527 struct iova *iova;
528
529 /* Take the lock so that no other thread is manipulating the rbtree */
530 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
531 iova = private_find_iova(iovad, pfn);
532 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
533 return iova;
534 }
535 EXPORT_SYMBOL_GPL(find_iova);
536
537 /**
538 * __free_iova - frees the given iova
539 * @iovad: iova domain in question.
540 * @iova: iova in question.
541 * Frees the given iova belonging to the giving domain
542 */
543 void
__free_iova(struct iova_domain * iovad,struct iova * iova)544 __free_iova(struct iova_domain *iovad, struct iova *iova)
545 {
546 unsigned long flags;
547
548 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
549 remove_iova(iovad, iova);
550 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
551 free_iova_mem(iova);
552 }
553 EXPORT_SYMBOL_GPL(__free_iova);
554
555 /**
556 * free_iova - finds and frees the iova for a given pfn
557 * @iovad: - iova domain in question.
558 * @pfn: - pfn that is allocated previously
559 * This functions finds an iova for a given pfn and then
560 * frees the iova from that domain.
561 */
562 void
free_iova(struct iova_domain * iovad,unsigned long pfn)563 free_iova(struct iova_domain *iovad, unsigned long pfn)
564 {
565 unsigned long flags;
566 struct iova *iova;
567
568 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
569 iova = private_find_iova(iovad, pfn);
570 if (!iova) {
571 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
572 return;
573 }
574 remove_iova(iovad, iova);
575 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
576 free_iova_mem(iova);
577 }
578 EXPORT_SYMBOL_GPL(free_iova);
579
580 /**
581 * alloc_iova_fast - allocates an iova from rcache
582 * @iovad: - iova domain in question
583 * @size: - size of page frames to allocate
584 * @limit_pfn: - max limit address
585 * @flush_rcache: - set to flush rcache on regular allocation failure
586 * This function tries to satisfy an iova allocation from the rcache,
587 * and falls back to regular allocation on failure. If regular allocation
588 * fails too and the flush_rcache flag is set then the rcache will be flushed.
589 */
590 unsigned long
alloc_iova_fast(struct iova_domain * iovad,unsigned long size,unsigned long limit_pfn,bool flush_rcache)591 alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
592 unsigned long limit_pfn, bool flush_rcache)
593 {
594 unsigned long iova_pfn;
595 struct iova *new_iova;
596
597 iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
598 if (iova_pfn)
599 return iova_pfn;
600
601 retry:
602 new_iova = alloc_iova(iovad, size, limit_pfn, true);
603 if (!new_iova) {
604 unsigned int cpu;
605
606 if (!flush_rcache)
607 return 0;
608
609 /* Try replenishing IOVAs by flushing rcache. */
610 flush_rcache = false;
611 for_each_online_cpu(cpu)
612 free_cpu_cached_iovas(cpu, iovad);
613 free_global_cached_iovas(iovad);
614 goto retry;
615 }
616
617 return new_iova->pfn_lo;
618 }
619 EXPORT_SYMBOL_GPL(alloc_iova_fast);
620
621 /**
622 * free_iova_fast - free iova pfn range into rcache
623 * @iovad: - iova domain in question.
624 * @pfn: - pfn that is allocated previously
625 * @size: - # of pages in range
626 * This functions frees an iova range by trying to put it into the rcache,
627 * falling back to regular iova deallocation via free_iova() if this fails.
628 */
629 void
free_iova_fast(struct iova_domain * iovad,unsigned long pfn,unsigned long size)630 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
631 {
632 if (iova_rcache_insert(iovad, pfn, size))
633 return;
634
635 free_iova(iovad, pfn);
636 }
637 EXPORT_SYMBOL_GPL(free_iova_fast);
638
639 #define fq_ring_for_each(i, fq) \
640 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
641
fq_full(struct iova_fq * fq)642 static inline bool fq_full(struct iova_fq *fq)
643 {
644 assert_spin_locked(&fq->lock);
645 return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
646 }
647
fq_ring_add(struct iova_fq * fq)648 static inline unsigned fq_ring_add(struct iova_fq *fq)
649 {
650 unsigned idx = fq->tail;
651
652 assert_spin_locked(&fq->lock);
653
654 fq->tail = (idx + 1) % IOVA_FQ_SIZE;
655
656 return idx;
657 }
658
fq_ring_free(struct iova_domain * iovad,struct iova_fq * fq)659 static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
660 {
661 u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
662 unsigned idx;
663
664 assert_spin_locked(&fq->lock);
665
666 fq_ring_for_each(idx, fq) {
667
668 if (fq->entries[idx].counter >= counter)
669 break;
670
671 if (iovad->entry_dtor)
672 iovad->entry_dtor(fq->entries[idx].data);
673
674 free_iova_fast(iovad,
675 fq->entries[idx].iova_pfn,
676 fq->entries[idx].pages);
677
678 fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
679 }
680 }
681
iova_domain_flush(struct iova_domain * iovad)682 static void iova_domain_flush(struct iova_domain *iovad)
683 {
684 atomic64_inc(&iovad->fq_flush_start_cnt);
685 iovad->flush_cb(iovad);
686 atomic64_inc(&iovad->fq_flush_finish_cnt);
687 }
688
fq_destroy_all_entries(struct iova_domain * iovad)689 static void fq_destroy_all_entries(struct iova_domain *iovad)
690 {
691 int cpu;
692
693 /*
694 * This code runs when the iova_domain is being detroyed, so don't
695 * bother to free iovas, just call the entry_dtor on all remaining
696 * entries.
697 */
698 if (!iovad->entry_dtor)
699 return;
700
701 for_each_possible_cpu(cpu) {
702 struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
703 int idx;
704
705 fq_ring_for_each(idx, fq)
706 iovad->entry_dtor(fq->entries[idx].data);
707 }
708 }
709
fq_flush_timeout(struct timer_list * t)710 static void fq_flush_timeout(struct timer_list *t)
711 {
712 struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
713 int cpu;
714
715 atomic_set(&iovad->fq_timer_on, 0);
716 iova_domain_flush(iovad);
717
718 for_each_possible_cpu(cpu) {
719 unsigned long flags;
720 struct iova_fq *fq;
721
722 fq = per_cpu_ptr(iovad->fq, cpu);
723 spin_lock_irqsave(&fq->lock, flags);
724 fq_ring_free(iovad, fq);
725 spin_unlock_irqrestore(&fq->lock, flags);
726 }
727 }
728
queue_iova(struct iova_domain * iovad,unsigned long pfn,unsigned long pages,unsigned long data)729 void queue_iova(struct iova_domain *iovad,
730 unsigned long pfn, unsigned long pages,
731 unsigned long data)
732 {
733 struct iova_fq *fq;
734 unsigned long flags;
735 unsigned idx;
736
737 /*
738 * Order against the IOMMU driver's pagetable update from unmapping
739 * @pte, to guarantee that iova_domain_flush() observes that if called
740 * from a different CPU before we release the lock below. Full barrier
741 * so it also pairs with iommu_dma_init_fq() to avoid seeing partially
742 * written fq state here.
743 */
744 smp_mb();
745
746 fq = raw_cpu_ptr(iovad->fq);
747 spin_lock_irqsave(&fq->lock, flags);
748
749 /*
750 * First remove all entries from the flush queue that have already been
751 * flushed out on another CPU. This makes the fq_full() check below less
752 * likely to be true.
753 */
754 fq_ring_free(iovad, fq);
755
756 if (fq_full(fq)) {
757 iova_domain_flush(iovad);
758 fq_ring_free(iovad, fq);
759 }
760
761 idx = fq_ring_add(fq);
762
763 fq->entries[idx].iova_pfn = pfn;
764 fq->entries[idx].pages = pages;
765 fq->entries[idx].data = data;
766 fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
767
768 spin_unlock_irqrestore(&fq->lock, flags);
769
770 /* Avoid false sharing as much as possible. */
771 if (!atomic_read(&iovad->fq_timer_on) &&
772 !atomic_xchg(&iovad->fq_timer_on, 1))
773 mod_timer(&iovad->fq_timer,
774 jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
775 }
776
777 /**
778 * put_iova_domain - destroys the iova domain
779 * @iovad: - iova domain in question.
780 * All the iova's in that domain are destroyed.
781 */
put_iova_domain(struct iova_domain * iovad)782 void put_iova_domain(struct iova_domain *iovad)
783 {
784 struct iova *iova, *tmp;
785
786 cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
787 &iovad->cpuhp_dead);
788
789 free_iova_flush_queue(iovad);
790 free_iova_rcaches(iovad);
791 rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
792 free_iova_mem(iova);
793 }
794 EXPORT_SYMBOL_GPL(put_iova_domain);
795
796 static int
__is_range_overlap(struct rb_node * node,unsigned long pfn_lo,unsigned long pfn_hi)797 __is_range_overlap(struct rb_node *node,
798 unsigned long pfn_lo, unsigned long pfn_hi)
799 {
800 struct iova *iova = to_iova(node);
801
802 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
803 return 1;
804 return 0;
805 }
806
807 static inline struct iova *
alloc_and_init_iova(unsigned long pfn_lo,unsigned long pfn_hi)808 alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
809 {
810 struct iova *iova;
811
812 iova = alloc_iova_mem();
813 if (iova) {
814 iova->pfn_lo = pfn_lo;
815 iova->pfn_hi = pfn_hi;
816 }
817
818 return iova;
819 }
820
821 static struct iova *
__insert_new_range(struct iova_domain * iovad,unsigned long pfn_lo,unsigned long pfn_hi)822 __insert_new_range(struct iova_domain *iovad,
823 unsigned long pfn_lo, unsigned long pfn_hi)
824 {
825 struct iova *iova;
826
827 iova = alloc_and_init_iova(pfn_lo, pfn_hi);
828 if (iova)
829 iova_insert_rbtree(&iovad->rbroot, iova, NULL);
830
831 return iova;
832 }
833
834 static void
__adjust_overlap_range(struct iova * iova,unsigned long * pfn_lo,unsigned long * pfn_hi)835 __adjust_overlap_range(struct iova *iova,
836 unsigned long *pfn_lo, unsigned long *pfn_hi)
837 {
838 if (*pfn_lo < iova->pfn_lo)
839 iova->pfn_lo = *pfn_lo;
840 if (*pfn_hi > iova->pfn_hi)
841 *pfn_lo = iova->pfn_hi + 1;
842 }
843
844 /**
845 * reserve_iova - reserves an iova in the given range
846 * @iovad: - iova domain pointer
847 * @pfn_lo: - lower page frame address
848 * @pfn_hi:- higher pfn adderss
849 * This function allocates reserves the address range from pfn_lo to pfn_hi so
850 * that this address is not dished out as part of alloc_iova.
851 */
852 struct iova *
reserve_iova(struct iova_domain * iovad,unsigned long pfn_lo,unsigned long pfn_hi)853 reserve_iova(struct iova_domain *iovad,
854 unsigned long pfn_lo, unsigned long pfn_hi)
855 {
856 struct rb_node *node;
857 unsigned long flags;
858 struct iova *iova;
859 unsigned int overlap = 0;
860
861 /* Don't allow nonsensical pfns */
862 if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
863 return NULL;
864
865 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
866 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
867 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
868 iova = to_iova(node);
869 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
870 if ((pfn_lo >= iova->pfn_lo) &&
871 (pfn_hi <= iova->pfn_hi))
872 goto finish;
873 overlap = 1;
874
875 } else if (overlap)
876 break;
877 }
878
879 /* We are here either because this is the first reserver node
880 * or need to insert remaining non overlap addr range
881 */
882 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
883 finish:
884
885 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
886 return iova;
887 }
888 EXPORT_SYMBOL_GPL(reserve_iova);
889
890 /*
891 * Magazine caches for IOVA ranges. For an introduction to magazines,
892 * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
893 * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
894 * For simplicity, we use a static magazine size and don't implement the
895 * dynamic size tuning described in the paper.
896 */
897
898 #define IOVA_MAG_SIZE 128
899
900 struct iova_magazine {
901 unsigned long size;
902 unsigned long pfns[IOVA_MAG_SIZE];
903 };
904
905 struct iova_cpu_rcache {
906 spinlock_t lock;
907 struct iova_magazine *loaded;
908 struct iova_magazine *prev;
909 };
910
iova_magazine_alloc(gfp_t flags)911 static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
912 {
913 return kzalloc(sizeof(struct iova_magazine), flags);
914 }
915
iova_magazine_free(struct iova_magazine * mag)916 static void iova_magazine_free(struct iova_magazine *mag)
917 {
918 kfree(mag);
919 }
920
921 static void
iova_magazine_free_pfns(struct iova_magazine * mag,struct iova_domain * iovad)922 iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
923 {
924 unsigned long flags;
925 int i;
926
927 if (!mag)
928 return;
929
930 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
931
932 for (i = 0 ; i < mag->size; ++i) {
933 struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
934
935 if (WARN_ON(!iova))
936 continue;
937
938 remove_iova(iovad, iova);
939 free_iova_mem(iova);
940 }
941
942 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
943
944 mag->size = 0;
945 }
946
iova_magazine_full(struct iova_magazine * mag)947 static bool iova_magazine_full(struct iova_magazine *mag)
948 {
949 return (mag && mag->size == IOVA_MAG_SIZE);
950 }
951
iova_magazine_empty(struct iova_magazine * mag)952 static bool iova_magazine_empty(struct iova_magazine *mag)
953 {
954 return (!mag || mag->size == 0);
955 }
956
iova_magazine_pop(struct iova_magazine * mag,unsigned long limit_pfn)957 static unsigned long iova_magazine_pop(struct iova_magazine *mag,
958 unsigned long limit_pfn)
959 {
960 int i;
961 unsigned long pfn;
962
963 BUG_ON(iova_magazine_empty(mag));
964
965 /* Only fall back to the rbtree if we have no suitable pfns at all */
966 for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
967 if (i == 0)
968 return 0;
969
970 /* Swap it to pop it */
971 pfn = mag->pfns[i];
972 mag->pfns[i] = mag->pfns[--mag->size];
973
974 return pfn;
975 }
976
iova_magazine_push(struct iova_magazine * mag,unsigned long pfn)977 static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
978 {
979 BUG_ON(iova_magazine_full(mag));
980
981 mag->pfns[mag->size++] = pfn;
982 }
983
init_iova_rcaches(struct iova_domain * iovad)984 static void init_iova_rcaches(struct iova_domain *iovad)
985 {
986 struct iova_cpu_rcache *cpu_rcache;
987 struct iova_rcache *rcache;
988 unsigned int cpu;
989 int i;
990
991 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
992 rcache = &iovad->rcaches[i];
993 spin_lock_init(&rcache->lock);
994 rcache->depot_size = 0;
995 rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
996 if (WARN_ON(!rcache->cpu_rcaches))
997 continue;
998 for_each_possible_cpu(cpu) {
999 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
1000 spin_lock_init(&cpu_rcache->lock);
1001 cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
1002 cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
1003 }
1004 }
1005 }
1006
1007 /*
1008 * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
1009 * return true on success. Can fail if rcache is full and we can't free
1010 * space, and free_iova() (our only caller) will then return the IOVA
1011 * range to the rbtree instead.
1012 */
__iova_rcache_insert(struct iova_domain * iovad,struct iova_rcache * rcache,unsigned long iova_pfn)1013 static bool __iova_rcache_insert(struct iova_domain *iovad,
1014 struct iova_rcache *rcache,
1015 unsigned long iova_pfn)
1016 {
1017 struct iova_magazine *mag_to_free = NULL;
1018 struct iova_cpu_rcache *cpu_rcache;
1019 bool can_insert = false;
1020 unsigned long flags;
1021
1022 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
1023 spin_lock_irqsave(&cpu_rcache->lock, flags);
1024
1025 if (!iova_magazine_full(cpu_rcache->loaded)) {
1026 can_insert = true;
1027 } else if (!iova_magazine_full(cpu_rcache->prev)) {
1028 swap(cpu_rcache->prev, cpu_rcache->loaded);
1029 can_insert = true;
1030 } else {
1031 struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
1032
1033 if (new_mag) {
1034 spin_lock(&rcache->lock);
1035 if (rcache->depot_size < MAX_GLOBAL_MAGS) {
1036 rcache->depot[rcache->depot_size++] =
1037 cpu_rcache->loaded;
1038 } else {
1039 mag_to_free = cpu_rcache->loaded;
1040 }
1041 spin_unlock(&rcache->lock);
1042
1043 cpu_rcache->loaded = new_mag;
1044 can_insert = true;
1045 }
1046 }
1047
1048 if (can_insert)
1049 iova_magazine_push(cpu_rcache->loaded, iova_pfn);
1050
1051 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
1052
1053 if (mag_to_free) {
1054 iova_magazine_free_pfns(mag_to_free, iovad);
1055 iova_magazine_free(mag_to_free);
1056 }
1057
1058 return can_insert;
1059 }
1060
iova_rcache_insert(struct iova_domain * iovad,unsigned long pfn,unsigned long size)1061 static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
1062 unsigned long size)
1063 {
1064 unsigned int log_size = order_base_2(size);
1065
1066 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
1067 return false;
1068
1069 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
1070 }
1071
1072 /*
1073 * Caller wants to allocate a new IOVA range from 'rcache'. If we can
1074 * satisfy the request, return a matching non-NULL range and remove
1075 * it from the 'rcache'.
1076 */
__iova_rcache_get(struct iova_rcache * rcache,unsigned long limit_pfn)1077 static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
1078 unsigned long limit_pfn)
1079 {
1080 struct iova_cpu_rcache *cpu_rcache;
1081 unsigned long iova_pfn = 0;
1082 bool has_pfn = false;
1083 unsigned long flags;
1084
1085 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
1086 spin_lock_irqsave(&cpu_rcache->lock, flags);
1087
1088 if (!iova_magazine_empty(cpu_rcache->loaded)) {
1089 has_pfn = true;
1090 } else if (!iova_magazine_empty(cpu_rcache->prev)) {
1091 swap(cpu_rcache->prev, cpu_rcache->loaded);
1092 has_pfn = true;
1093 } else {
1094 spin_lock(&rcache->lock);
1095 if (rcache->depot_size > 0) {
1096 iova_magazine_free(cpu_rcache->loaded);
1097 cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
1098 has_pfn = true;
1099 }
1100 spin_unlock(&rcache->lock);
1101 }
1102
1103 if (has_pfn)
1104 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
1105
1106 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
1107
1108 return iova_pfn;
1109 }
1110
1111 /*
1112 * Try to satisfy IOVA allocation range from rcache. Fail if requested
1113 * size is too big or the DMA limit we are given isn't satisfied by the
1114 * top element in the magazine.
1115 */
iova_rcache_get(struct iova_domain * iovad,unsigned long size,unsigned long limit_pfn)1116 static unsigned long iova_rcache_get(struct iova_domain *iovad,
1117 unsigned long size,
1118 unsigned long limit_pfn)
1119 {
1120 unsigned int log_size = order_base_2(size);
1121
1122 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
1123 return 0;
1124
1125 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
1126 }
1127
1128 /*
1129 * free rcache data structures.
1130 */
free_iova_rcaches(struct iova_domain * iovad)1131 static void free_iova_rcaches(struct iova_domain *iovad)
1132 {
1133 struct iova_rcache *rcache;
1134 struct iova_cpu_rcache *cpu_rcache;
1135 unsigned int cpu;
1136 int i, j;
1137
1138 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
1139 rcache = &iovad->rcaches[i];
1140 for_each_possible_cpu(cpu) {
1141 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
1142 iova_magazine_free(cpu_rcache->loaded);
1143 iova_magazine_free(cpu_rcache->prev);
1144 }
1145 free_percpu(rcache->cpu_rcaches);
1146 for (j = 0; j < rcache->depot_size; ++j)
1147 iova_magazine_free(rcache->depot[j]);
1148 }
1149 }
1150
1151 /*
1152 * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
1153 */
free_cpu_cached_iovas(unsigned int cpu,struct iova_domain * iovad)1154 static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
1155 {
1156 struct iova_cpu_rcache *cpu_rcache;
1157 struct iova_rcache *rcache;
1158 unsigned long flags;
1159 int i;
1160
1161 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
1162 rcache = &iovad->rcaches[i];
1163 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
1164 spin_lock_irqsave(&cpu_rcache->lock, flags);
1165 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
1166 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
1167 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
1168 }
1169 }
1170
1171 /*
1172 * free all the IOVA ranges of global cache
1173 */
free_global_cached_iovas(struct iova_domain * iovad)1174 static void free_global_cached_iovas(struct iova_domain *iovad)
1175 {
1176 struct iova_rcache *rcache;
1177 unsigned long flags;
1178 int i, j;
1179
1180 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
1181 rcache = &iovad->rcaches[i];
1182 spin_lock_irqsave(&rcache->lock, flags);
1183 for (j = 0; j < rcache->depot_size; ++j) {
1184 iova_magazine_free_pfns(rcache->depot[j], iovad);
1185 iova_magazine_free(rcache->depot[j]);
1186 }
1187 rcache->depot_size = 0;
1188 spin_unlock_irqrestore(&rcache->lock, flags);
1189 }
1190 }
1191 MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
1192 MODULE_LICENSE("GPL");
1193