• Home
  • Raw
  • Download

Lines Matching refs:tbl

175 				       struct iommu_table *tbl,  in iommu_range_alloc()  argument
208 pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); in iommu_range_alloc()
211 pool = &(tbl->large_pool); in iommu_range_alloc()
213 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc()
233 if (limit + tbl->it_offset > mask) { in iommu_range_alloc()
234 limit = mask - tbl->it_offset + 1; in iommu_range_alloc()
241 pool = &(tbl->pools[0]); in iommu_range_alloc()
249 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc()
250 dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift), in iommu_range_alloc()
259 } else if (pass <= tbl->nr_pools) { in iommu_range_alloc()
262 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1); in iommu_range_alloc()
263 pool = &tbl->pools[pool_nr]; in iommu_range_alloc()
284 pool->hint = (end + tbl->it_blocksize - 1) & in iommu_range_alloc()
285 ~(tbl->it_blocksize - 1); in iommu_range_alloc()
297 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, in iommu_alloc() argument
307 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc()
312 entry += tbl->it_offset; /* Offset into real TCE table */ in iommu_alloc()
313 ret = entry << tbl->it_page_shift; /* Set the return dma address */ in iommu_alloc()
316 build_fail = tbl->it_ops->set(tbl, entry, npages, in iommu_alloc()
318 IOMMU_PAGE_MASK(tbl), direction, attrs); in iommu_alloc()
326 __iommu_free(tbl, ret, npages); in iommu_alloc()
331 if (tbl->it_ops->flush) in iommu_alloc()
332 tbl->it_ops->flush(tbl); in iommu_alloc()
340 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, in iommu_free_check() argument
345 entry = dma_addr >> tbl->it_page_shift; in iommu_free_check()
346 free_entry = entry - tbl->it_offset; in iommu_free_check()
348 if (((free_entry + npages) > tbl->it_size) || in iommu_free_check()
349 (entry < tbl->it_offset)) { in iommu_free_check()
354 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl); in iommu_free_check()
355 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno); in iommu_free_check()
356 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size); in iommu_free_check()
357 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset); in iommu_free_check()
358 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); in iommu_free_check()
368 static struct iommu_pool *get_pool(struct iommu_table *tbl, in get_pool() argument
372 unsigned long largepool_start = tbl->large_pool.start; in get_pool()
376 p = &tbl->large_pool; in get_pool()
378 unsigned int pool_nr = entry / tbl->poolsize; in get_pool()
380 BUG_ON(pool_nr > tbl->nr_pools); in get_pool()
381 p = &tbl->pools[pool_nr]; in get_pool()
387 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, in __iommu_free() argument
394 entry = dma_addr >> tbl->it_page_shift; in __iommu_free()
395 free_entry = entry - tbl->it_offset; in __iommu_free()
397 pool = get_pool(tbl, free_entry); in __iommu_free()
399 if (!iommu_free_check(tbl, dma_addr, npages)) in __iommu_free()
402 tbl->it_ops->clear(tbl, entry, npages); in __iommu_free()
405 bitmap_clear(tbl->it_map, free_entry, npages); in __iommu_free()
409 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, in iommu_free() argument
412 __iommu_free(tbl, dma_addr, npages); in iommu_free()
418 if (tbl->it_ops->flush) in iommu_free()
419 tbl->it_ops->flush(tbl); in iommu_free()
422 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, in ppc_iommu_map_sg() argument
436 if ((nelems == 0) || !tbl) in ppc_iommu_map_sg()
461 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_map_sg()
463 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE && in ppc_iommu_map_sg()
465 align = PAGE_SHIFT - tbl->it_page_shift; in ppc_iommu_map_sg()
466 entry = iommu_range_alloc(dev, tbl, npages, &handle, in ppc_iommu_map_sg()
467 mask >> tbl->it_page_shift, align); in ppc_iommu_map_sg()
476 "vaddr %lx npages %lu\n", tbl, vaddr, in ppc_iommu_map_sg()
482 entry += tbl->it_offset; in ppc_iommu_map_sg()
483 dma_addr = entry << tbl->it_page_shift; in ppc_iommu_map_sg()
484 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl)); in ppc_iommu_map_sg()
490 build_fail = tbl->it_ops->set(tbl, entry, npages, in ppc_iommu_map_sg()
491 vaddr & IOMMU_PAGE_MASK(tbl), in ppc_iommu_map_sg()
529 if (tbl->it_ops->flush) in ppc_iommu_map_sg()
530 tbl->it_ops->flush(tbl); in ppc_iommu_map_sg()
553 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl); in ppc_iommu_map_sg()
555 IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_map_sg()
556 __iommu_free(tbl, vaddr, npages); in ppc_iommu_map_sg()
567 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, in ppc_iommu_unmap_sg() argument
575 if (!tbl) in ppc_iommu_unmap_sg()
586 IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_unmap_sg()
587 __iommu_free(tbl, dma_handle, npages); in ppc_iommu_unmap_sg()
595 if (tbl->it_ops->flush) in ppc_iommu_unmap_sg()
596 tbl->it_ops->flush(tbl); in ppc_iommu_unmap_sg()
599 static void iommu_table_clear(struct iommu_table *tbl) in iommu_table_clear() argument
608 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size); in iommu_table_clear()
613 if (tbl->it_ops->get) { in iommu_table_clear()
617 for (index = 0; index < tbl->it_size; index++) { in iommu_table_clear()
618 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset); in iommu_table_clear()
623 __set_bit(index, tbl->it_map); in iommu_table_clear()
628 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { in iommu_table_clear()
632 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; in iommu_table_clear()
633 index < tbl->it_size; index++) in iommu_table_clear()
634 __clear_bit(index, tbl->it_map); in iommu_table_clear()
640 static void iommu_table_reserve_pages(struct iommu_table *tbl, in iommu_table_reserve_pages() argument
651 if (tbl->it_offset == 0) in iommu_table_reserve_pages()
652 set_bit(0, tbl->it_map); in iommu_table_reserve_pages()
654 tbl->it_reserved_start = res_start; in iommu_table_reserve_pages()
655 tbl->it_reserved_end = res_end; in iommu_table_reserve_pages()
659 (tbl->it_offset + tbl->it_size < res_start || in iommu_table_reserve_pages()
660 res_end < tbl->it_offset)) in iommu_table_reserve_pages()
663 for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) in iommu_table_reserve_pages()
664 set_bit(i - tbl->it_offset, tbl->it_map); in iommu_table_reserve_pages()
667 static void iommu_table_release_pages(struct iommu_table *tbl) in iommu_table_release_pages() argument
675 if (tbl->it_offset == 0) in iommu_table_release_pages()
676 clear_bit(0, tbl->it_map); in iommu_table_release_pages()
678 for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) in iommu_table_release_pages()
679 clear_bit(i - tbl->it_offset, tbl->it_map); in iommu_table_release_pages()
686 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, in iommu_init_table() argument
695 BUG_ON(!tbl->it_ops); in iommu_init_table()
698 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); in iommu_init_table()
703 tbl->it_map = page_address(page); in iommu_init_table()
704 memset(tbl->it_map, 0, sz); in iommu_init_table()
706 iommu_table_reserve_pages(tbl, res_start, res_end); in iommu_init_table()
709 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024)) in iommu_init_table()
710 tbl->nr_pools = IOMMU_NR_POOLS; in iommu_init_table()
712 tbl->nr_pools = 1; in iommu_init_table()
715 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools; in iommu_init_table()
717 for (i = 0; i < tbl->nr_pools; i++) { in iommu_init_table()
718 p = &tbl->pools[i]; in iommu_init_table()
720 p->start = tbl->poolsize * i; in iommu_init_table()
722 p->end = p->start + tbl->poolsize; in iommu_init_table()
725 p = &tbl->large_pool; in iommu_init_table()
727 p->start = tbl->poolsize * i; in iommu_init_table()
729 p->end = tbl->it_size; in iommu_init_table()
731 iommu_table_clear(tbl); in iommu_init_table()
739 return tbl; in iommu_init_table()
746 struct iommu_table *tbl; in iommu_table_free() local
748 tbl = container_of(kref, struct iommu_table, it_kref); in iommu_table_free()
750 if (tbl->it_ops->free) in iommu_table_free()
751 tbl->it_ops->free(tbl); in iommu_table_free()
753 if (!tbl->it_map) { in iommu_table_free()
754 kfree(tbl); in iommu_table_free()
758 iommu_table_release_pages(tbl); in iommu_table_free()
761 if (!bitmap_empty(tbl->it_map, tbl->it_size)) in iommu_table_free()
765 bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); in iommu_table_free()
769 free_pages((unsigned long) tbl->it_map, order); in iommu_table_free()
772 kfree(tbl); in iommu_table_free()
775 struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl) in iommu_tce_table_get() argument
777 if (kref_get_unless_zero(&tbl->it_kref)) in iommu_tce_table_get()
778 return tbl; in iommu_tce_table_get()
784 int iommu_tce_table_put(struct iommu_table *tbl) in iommu_tce_table_put() argument
786 if (WARN_ON(!tbl)) in iommu_tce_table_put()
789 return kref_put(&tbl->it_kref, iommu_table_free); in iommu_tce_table_put()
798 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, in iommu_map_page() argument
813 if (tbl) { in iommu_map_page()
814 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); in iommu_map_page()
816 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE && in iommu_map_page()
818 align = PAGE_SHIFT - tbl->it_page_shift; in iommu_map_page()
820 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, in iommu_map_page()
821 mask >> tbl->it_page_shift, align, in iommu_map_page()
827 "vaddr %p npages %d\n", tbl, vaddr, in iommu_map_page()
831 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl)); in iommu_map_page()
837 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, in iommu_unmap_page() argument
845 if (tbl) { in iommu_unmap_page()
847 IOMMU_PAGE_SIZE(tbl)); in iommu_unmap_page()
848 iommu_free(tbl, dma_handle, npages); in iommu_unmap_page()
856 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, in iommu_alloc_coherent() argument
880 if (!tbl) in iommu_alloc_coherent()
891 nio_pages = size >> tbl->it_page_shift; in iommu_alloc_coherent()
892 io_order = get_iommu_order(size, tbl); in iommu_alloc_coherent()
893 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, in iommu_alloc_coherent()
894 mask >> tbl->it_page_shift, io_order, 0); in iommu_alloc_coherent()
903 void iommu_free_coherent(struct iommu_table *tbl, size_t size, in iommu_free_coherent() argument
906 if (tbl) { in iommu_free_coherent()
910 nio_pages = size >> tbl->it_page_shift; in iommu_free_coherent()
911 iommu_free(tbl, dma_handle, nio_pages); in iommu_free_coherent()
978 void iommu_flush_tce(struct iommu_table *tbl) in iommu_flush_tce() argument
981 if (tbl->it_ops->flush) in iommu_flush_tce()
982 tbl->it_ops->flush(tbl); in iommu_flush_tce()
1021 struct iommu_table *tbl, in iommu_tce_xchg_no_kill() argument
1028 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false); in iommu_tce_xchg_no_kill()
1031 !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift, in iommu_tce_xchg_no_kill()
1039 void iommu_tce_kill(struct iommu_table *tbl, in iommu_tce_kill() argument
1042 if (tbl->it_ops->tce_kill) in iommu_tce_kill()
1043 tbl->it_ops->tce_kill(tbl, entry, pages, false); in iommu_tce_kill()
1047 int iommu_take_ownership(struct iommu_table *tbl) in iommu_take_ownership() argument
1049 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; in iommu_take_ownership()
1059 if (!tbl->it_ops->xchg_no_kill) in iommu_take_ownership()
1062 spin_lock_irqsave(&tbl->large_pool.lock, flags); in iommu_take_ownership()
1063 for (i = 0; i < tbl->nr_pools; i++) in iommu_take_ownership()
1064 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); in iommu_take_ownership()
1066 iommu_table_release_pages(tbl); in iommu_take_ownership()
1068 if (!bitmap_empty(tbl->it_map, tbl->it_size)) { in iommu_take_ownership()
1072 iommu_table_reserve_pages(tbl, tbl->it_reserved_start, in iommu_take_ownership()
1073 tbl->it_reserved_end); in iommu_take_ownership()
1075 memset(tbl->it_map, 0xff, sz); in iommu_take_ownership()
1078 for (i = 0; i < tbl->nr_pools; i++) in iommu_take_ownership()
1079 spin_unlock(&tbl->pools[i].lock); in iommu_take_ownership()
1080 spin_unlock_irqrestore(&tbl->large_pool.lock, flags); in iommu_take_ownership()
1086 void iommu_release_ownership(struct iommu_table *tbl) in iommu_release_ownership() argument
1088 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; in iommu_release_ownership()
1090 spin_lock_irqsave(&tbl->large_pool.lock, flags); in iommu_release_ownership()
1091 for (i = 0; i < tbl->nr_pools; i++) in iommu_release_ownership()
1092 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); in iommu_release_ownership()
1094 memset(tbl->it_map, 0, sz); in iommu_release_ownership()
1096 iommu_table_reserve_pages(tbl, tbl->it_reserved_start, in iommu_release_ownership()
1097 tbl->it_reserved_end); in iommu_release_ownership()
1099 for (i = 0; i < tbl->nr_pools; i++) in iommu_release_ownership()
1100 spin_unlock(&tbl->pools[i].lock); in iommu_release_ownership()
1101 spin_unlock_irqrestore(&tbl->large_pool.lock, flags); in iommu_release_ownership()