• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2006-2009, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
18  */
19 
20 #include <linux/iova.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 
24 void
init_iova_domain(struct iova_domain * iovad,unsigned long granule,unsigned long start_pfn,unsigned long pfn_32bit)25 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
26 	unsigned long start_pfn, unsigned long pfn_32bit)
27 {
28 	/*
29 	 * IOVA granularity will normally be equal to the smallest
30 	 * supported IOMMU page size; both *must* be capable of
31 	 * representing individual CPU pages exactly.
32 	 */
33 	BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
34 
35 	spin_lock_init(&iovad->iova_rbtree_lock);
36 	iovad->rbroot = RB_ROOT;
37 	iovad->cached32_node = NULL;
38 	iovad->granule = granule;
39 	iovad->start_pfn = start_pfn;
40 	iovad->dma_32bit_pfn = pfn_32bit;
41 }
42 EXPORT_SYMBOL_GPL(init_iova_domain);
43 
44 static struct rb_node *
__get_cached_rbnode(struct iova_domain * iovad,unsigned long * limit_pfn)45 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
46 {
47 	if ((*limit_pfn != iovad->dma_32bit_pfn) ||
48 		(iovad->cached32_node == NULL))
49 		return rb_last(&iovad->rbroot);
50 	else {
51 		struct rb_node *prev_node = rb_prev(iovad->cached32_node);
52 		struct iova *curr_iova =
53 			container_of(iovad->cached32_node, struct iova, node);
54 		*limit_pfn = curr_iova->pfn_lo - 1;
55 		return prev_node;
56 	}
57 }
58 
59 static void
__cached_rbnode_insert_update(struct iova_domain * iovad,unsigned long limit_pfn,struct iova * new)60 __cached_rbnode_insert_update(struct iova_domain *iovad,
61 	unsigned long limit_pfn, struct iova *new)
62 {
63 	if (limit_pfn != iovad->dma_32bit_pfn)
64 		return;
65 	iovad->cached32_node = &new->node;
66 }
67 
68 static void
__cached_rbnode_delete_update(struct iova_domain * iovad,struct iova * free)69 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
70 {
71 	struct iova *cached_iova;
72 	struct rb_node *curr;
73 
74 	if (!iovad->cached32_node)
75 		return;
76 	curr = iovad->cached32_node;
77 	cached_iova = container_of(curr, struct iova, node);
78 
79 	if (free->pfn_lo >= cached_iova->pfn_lo) {
80 		struct rb_node *node = rb_next(&free->node);
81 		struct iova *iova = container_of(node, struct iova, node);
82 
83 		/* only cache if it's below 32bit pfn */
84 		if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
85 			iovad->cached32_node = node;
86 		else
87 			iovad->cached32_node = NULL;
88 	}
89 }
90 
91 /*
92  * Computes the padding size required, to make the start address
93  * naturally aligned on the power-of-two order of its size
94  */
95 static unsigned int
iova_get_pad_size(unsigned int size,unsigned int limit_pfn)96 iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
97 {
98 	return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
99 }
100 
__alloc_and_insert_iova_range(struct iova_domain * iovad,unsigned long size,unsigned long limit_pfn,struct iova * new,bool size_aligned)101 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
102 		unsigned long size, unsigned long limit_pfn,
103 			struct iova *new, bool size_aligned)
104 {
105 	struct rb_node *prev, *curr = NULL;
106 	unsigned long flags;
107 	unsigned long saved_pfn;
108 	unsigned int pad_size = 0;
109 
110 	/* Walk the tree backwards */
111 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
112 	saved_pfn = limit_pfn;
113 	curr = __get_cached_rbnode(iovad, &limit_pfn);
114 	prev = curr;
115 	while (curr) {
116 		struct iova *curr_iova = container_of(curr, struct iova, node);
117 
118 		if (limit_pfn < curr_iova->pfn_lo)
119 			goto move_left;
120 		else if (limit_pfn < curr_iova->pfn_hi)
121 			goto adjust_limit_pfn;
122 		else {
123 			if (size_aligned)
124 				pad_size = iova_get_pad_size(size, limit_pfn);
125 			if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
126 				break;	/* found a free slot */
127 		}
128 adjust_limit_pfn:
129 		limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0;
130 move_left:
131 		prev = curr;
132 		curr = rb_prev(curr);
133 	}
134 
135 	if (!curr) {
136 		if (size_aligned)
137 			pad_size = iova_get_pad_size(size, limit_pfn);
138 		if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
139 			spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
140 			return -ENOMEM;
141 		}
142 	}
143 
144 	/* pfn_lo will point to size aligned address if size_aligned is set */
145 	new->pfn_lo = limit_pfn - (size + pad_size) + 1;
146 	new->pfn_hi = new->pfn_lo + size - 1;
147 
148 	/* Insert the new_iova into domain rbtree by holding writer lock */
149 	/* Add new node and rebalance tree. */
150 	{
151 		struct rb_node **entry, *parent = NULL;
152 
153 		/* If we have 'prev', it's a valid place to start the
154 		   insertion. Otherwise, start from the root. */
155 		if (prev)
156 			entry = &prev;
157 		else
158 			entry = &iovad->rbroot.rb_node;
159 
160 		/* Figure out where to put new node */
161 		while (*entry) {
162 			struct iova *this = container_of(*entry,
163 							struct iova, node);
164 			parent = *entry;
165 
166 			if (new->pfn_lo < this->pfn_lo)
167 				entry = &((*entry)->rb_left);
168 			else if (new->pfn_lo > this->pfn_lo)
169 				entry = &((*entry)->rb_right);
170 			else
171 				BUG(); /* this should not happen */
172 		}
173 
174 		/* Add new node and rebalance tree. */
175 		rb_link_node(&new->node, parent, entry);
176 		rb_insert_color(&new->node, &iovad->rbroot);
177 	}
178 	__cached_rbnode_insert_update(iovad, saved_pfn, new);
179 
180 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
181 
182 
183 	return 0;
184 }
185 
186 static void
iova_insert_rbtree(struct rb_root * root,struct iova * iova)187 iova_insert_rbtree(struct rb_root *root, struct iova *iova)
188 {
189 	struct rb_node **new = &(root->rb_node), *parent = NULL;
190 	/* Figure out where to put new node */
191 	while (*new) {
192 		struct iova *this = container_of(*new, struct iova, node);
193 
194 		parent = *new;
195 
196 		if (iova->pfn_lo < this->pfn_lo)
197 			new = &((*new)->rb_left);
198 		else if (iova->pfn_lo > this->pfn_lo)
199 			new = &((*new)->rb_right);
200 		else
201 			BUG(); /* this should not happen */
202 	}
203 	/* Add new node and rebalance tree. */
204 	rb_link_node(&iova->node, parent, new);
205 	rb_insert_color(&iova->node, root);
206 }
207 
208 static struct kmem_cache *iova_cache;
209 static unsigned int iova_cache_users;
210 static DEFINE_MUTEX(iova_cache_mutex);
211 
alloc_iova_mem(void)212 struct iova *alloc_iova_mem(void)
213 {
214 	return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
215 }
216 EXPORT_SYMBOL(alloc_iova_mem);
217 
free_iova_mem(struct iova * iova)218 void free_iova_mem(struct iova *iova)
219 {
220 	kmem_cache_free(iova_cache, iova);
221 }
222 EXPORT_SYMBOL(free_iova_mem);
223 
iova_cache_get(void)224 int iova_cache_get(void)
225 {
226 	mutex_lock(&iova_cache_mutex);
227 	if (!iova_cache_users) {
228 		iova_cache = kmem_cache_create(
229 			"iommu_iova", sizeof(struct iova), 0,
230 			SLAB_HWCACHE_ALIGN, NULL);
231 		if (!iova_cache) {
232 			mutex_unlock(&iova_cache_mutex);
233 			printk(KERN_ERR "Couldn't create iova cache\n");
234 			return -ENOMEM;
235 		}
236 	}
237 
238 	iova_cache_users++;
239 	mutex_unlock(&iova_cache_mutex);
240 
241 	return 0;
242 }
243 EXPORT_SYMBOL_GPL(iova_cache_get);
244 
iova_cache_put(void)245 void iova_cache_put(void)
246 {
247 	mutex_lock(&iova_cache_mutex);
248 	if (WARN_ON(!iova_cache_users)) {
249 		mutex_unlock(&iova_cache_mutex);
250 		return;
251 	}
252 	iova_cache_users--;
253 	if (!iova_cache_users)
254 		kmem_cache_destroy(iova_cache);
255 	mutex_unlock(&iova_cache_mutex);
256 }
257 EXPORT_SYMBOL_GPL(iova_cache_put);
258 
259 /**
260  * alloc_iova - allocates an iova
261  * @iovad: - iova domain in question
262  * @size: - size of page frames to allocate
263  * @limit_pfn: - max limit address
264  * @size_aligned: - set if size_aligned address range is required
265  * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
266  * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
267  * flag is set then the allocated address iova->pfn_lo will be naturally
268  * aligned on roundup_power_of_two(size).
269  */
270 struct iova *
alloc_iova(struct iova_domain * iovad,unsigned long size,unsigned long limit_pfn,bool size_aligned)271 alloc_iova(struct iova_domain *iovad, unsigned long size,
272 	unsigned long limit_pfn,
273 	bool size_aligned)
274 {
275 	struct iova *new_iova;
276 	int ret;
277 
278 	new_iova = alloc_iova_mem();
279 	if (!new_iova)
280 		return NULL;
281 
282 	ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
283 			new_iova, size_aligned);
284 
285 	if (ret) {
286 		free_iova_mem(new_iova);
287 		return NULL;
288 	}
289 
290 	return new_iova;
291 }
292 EXPORT_SYMBOL_GPL(alloc_iova);
293 
294 /**
295  * find_iova - find's an iova for a given pfn
296  * @iovad: - iova domain in question.
297  * @pfn: - page frame number
298  * This function finds and returns an iova belonging to the
299  * given doamin which matches the given pfn.
300  */
find_iova(struct iova_domain * iovad,unsigned long pfn)301 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
302 {
303 	unsigned long flags;
304 	struct rb_node *node;
305 
306 	/* Take the lock so that no other thread is manipulating the rbtree */
307 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
308 	node = iovad->rbroot.rb_node;
309 	while (node) {
310 		struct iova *iova = container_of(node, struct iova, node);
311 
312 		/* If pfn falls within iova's range, return iova */
313 		if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
314 			spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
315 			/* We are not holding the lock while this iova
316 			 * is referenced by the caller as the same thread
317 			 * which called this function also calls __free_iova()
318 			 * and it is by design that only one thread can possibly
319 			 * reference a particular iova and hence no conflict.
320 			 */
321 			return iova;
322 		}
323 
324 		if (pfn < iova->pfn_lo)
325 			node = node->rb_left;
326 		else if (pfn > iova->pfn_lo)
327 			node = node->rb_right;
328 	}
329 
330 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
331 	return NULL;
332 }
333 EXPORT_SYMBOL_GPL(find_iova);
334 
335 /**
336  * __free_iova - frees the given iova
337  * @iovad: iova domain in question.
338  * @iova: iova in question.
339  * Frees the given iova belonging to the giving domain
340  */
341 void
__free_iova(struct iova_domain * iovad,struct iova * iova)342 __free_iova(struct iova_domain *iovad, struct iova *iova)
343 {
344 	unsigned long flags;
345 
346 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
347 	__cached_rbnode_delete_update(iovad, iova);
348 	rb_erase(&iova->node, &iovad->rbroot);
349 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
350 	free_iova_mem(iova);
351 }
352 EXPORT_SYMBOL_GPL(__free_iova);
353 
354 /**
355  * free_iova - finds and frees the iova for a given pfn
356  * @iovad: - iova domain in question.
357  * @pfn: - pfn that is allocated previously
358  * This functions finds an iova for a given pfn and then
359  * frees the iova from that domain.
360  */
361 void
free_iova(struct iova_domain * iovad,unsigned long pfn)362 free_iova(struct iova_domain *iovad, unsigned long pfn)
363 {
364 	struct iova *iova = find_iova(iovad, pfn);
365 
366 	if (iova)
367 		__free_iova(iovad, iova);
368 
369 }
370 EXPORT_SYMBOL_GPL(free_iova);
371 
372 /**
373  * put_iova_domain - destroys the iova doamin
374  * @iovad: - iova domain in question.
375  * All the iova's in that domain are destroyed.
376  */
put_iova_domain(struct iova_domain * iovad)377 void put_iova_domain(struct iova_domain *iovad)
378 {
379 	struct rb_node *node;
380 	unsigned long flags;
381 
382 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
383 	node = rb_first(&iovad->rbroot);
384 	while (node) {
385 		struct iova *iova = container_of(node, struct iova, node);
386 
387 		rb_erase(node, &iovad->rbroot);
388 		free_iova_mem(iova);
389 		node = rb_first(&iovad->rbroot);
390 	}
391 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
392 }
393 EXPORT_SYMBOL_GPL(put_iova_domain);
394 
395 static int
__is_range_overlap(struct rb_node * node,unsigned long pfn_lo,unsigned long pfn_hi)396 __is_range_overlap(struct rb_node *node,
397 	unsigned long pfn_lo, unsigned long pfn_hi)
398 {
399 	struct iova *iova = container_of(node, struct iova, node);
400 
401 	if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
402 		return 1;
403 	return 0;
404 }
405 
406 static inline struct iova *
alloc_and_init_iova(unsigned long pfn_lo,unsigned long pfn_hi)407 alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
408 {
409 	struct iova *iova;
410 
411 	iova = alloc_iova_mem();
412 	if (iova) {
413 		iova->pfn_lo = pfn_lo;
414 		iova->pfn_hi = pfn_hi;
415 	}
416 
417 	return iova;
418 }
419 
420 static struct iova *
__insert_new_range(struct iova_domain * iovad,unsigned long pfn_lo,unsigned long pfn_hi)421 __insert_new_range(struct iova_domain *iovad,
422 	unsigned long pfn_lo, unsigned long pfn_hi)
423 {
424 	struct iova *iova;
425 
426 	iova = alloc_and_init_iova(pfn_lo, pfn_hi);
427 	if (iova)
428 		iova_insert_rbtree(&iovad->rbroot, iova);
429 
430 	return iova;
431 }
432 
433 static void
__adjust_overlap_range(struct iova * iova,unsigned long * pfn_lo,unsigned long * pfn_hi)434 __adjust_overlap_range(struct iova *iova,
435 	unsigned long *pfn_lo, unsigned long *pfn_hi)
436 {
437 	if (*pfn_lo < iova->pfn_lo)
438 		iova->pfn_lo = *pfn_lo;
439 	if (*pfn_hi > iova->pfn_hi)
440 		*pfn_lo = iova->pfn_hi + 1;
441 }
442 
443 /**
444  * reserve_iova - reserves an iova in the given range
445  * @iovad: - iova domain pointer
446  * @pfn_lo: - lower page frame address
447  * @pfn_hi:- higher pfn adderss
448  * This function allocates reserves the address range from pfn_lo to pfn_hi so
449  * that this address is not dished out as part of alloc_iova.
450  */
451 struct iova *
reserve_iova(struct iova_domain * iovad,unsigned long pfn_lo,unsigned long pfn_hi)452 reserve_iova(struct iova_domain *iovad,
453 	unsigned long pfn_lo, unsigned long pfn_hi)
454 {
455 	struct rb_node *node;
456 	unsigned long flags;
457 	struct iova *iova;
458 	unsigned int overlap = 0;
459 
460 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
461 	for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
462 		if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
463 			iova = container_of(node, struct iova, node);
464 			__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
465 			if ((pfn_lo >= iova->pfn_lo) &&
466 				(pfn_hi <= iova->pfn_hi))
467 				goto finish;
468 			overlap = 1;
469 
470 		} else if (overlap)
471 				break;
472 	}
473 
474 	/* We are here either because this is the first reserver node
475 	 * or need to insert remaining non overlap addr range
476 	 */
477 	iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
478 finish:
479 
480 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
481 	return iova;
482 }
483 EXPORT_SYMBOL_GPL(reserve_iova);
484 
485 /**
486  * copy_reserved_iova - copies the reserved between domains
487  * @from: - source doamin from where to copy
488  * @to: - destination domin where to copy
489  * This function copies reserved iova's from one doamin to
490  * other.
491  */
492 void
copy_reserved_iova(struct iova_domain * from,struct iova_domain * to)493 copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
494 {
495 	unsigned long flags;
496 	struct rb_node *node;
497 
498 	spin_lock_irqsave(&from->iova_rbtree_lock, flags);
499 	for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
500 		struct iova *iova = container_of(node, struct iova, node);
501 		struct iova *new_iova;
502 
503 		new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
504 		if (!new_iova)
505 			printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
506 				iova->pfn_lo, iova->pfn_lo);
507 	}
508 	spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
509 }
510 EXPORT_SYMBOL_GPL(copy_reserved_iova);
511 
512 struct iova *
split_and_remove_iova(struct iova_domain * iovad,struct iova * iova,unsigned long pfn_lo,unsigned long pfn_hi)513 split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
514 		      unsigned long pfn_lo, unsigned long pfn_hi)
515 {
516 	unsigned long flags;
517 	struct iova *prev = NULL, *next = NULL;
518 
519 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
520 	if (iova->pfn_lo < pfn_lo) {
521 		prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
522 		if (prev == NULL)
523 			goto error;
524 	}
525 	if (iova->pfn_hi > pfn_hi) {
526 		next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
527 		if (next == NULL)
528 			goto error;
529 	}
530 
531 	__cached_rbnode_delete_update(iovad, iova);
532 	rb_erase(&iova->node, &iovad->rbroot);
533 
534 	if (prev) {
535 		iova_insert_rbtree(&iovad->rbroot, prev);
536 		iova->pfn_lo = pfn_lo;
537 	}
538 	if (next) {
539 		iova_insert_rbtree(&iovad->rbroot, next);
540 		iova->pfn_hi = pfn_hi;
541 	}
542 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
543 
544 	return iova;
545 
546 error:
547 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
548 	if (prev)
549 		free_iova_mem(prev);
550 	return NULL;
551 }
552 
553 MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
554 MODULE_LICENSE("GPL");
555