Lines Matching refs:tbl
174 struct iommu_table *tbl, in iommu_range_alloc() argument
208 pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1); in iommu_range_alloc()
211 pool = &(tbl->large_pool); in iommu_range_alloc()
213 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc()
233 if (limit + tbl->it_offset > mask) { in iommu_range_alloc()
234 limit = mask - tbl->it_offset + 1; in iommu_range_alloc()
241 pool = &(tbl->pools[0]); in iommu_range_alloc()
256 n = iommu_area_alloc(tbl->it_map, limit, start, npages, in iommu_range_alloc()
257 tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT, in iommu_range_alloc()
266 } else if (pass <= tbl->nr_pools) { in iommu_range_alloc()
269 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1); in iommu_range_alloc()
270 pool = &tbl->pools[pool_nr]; in iommu_range_alloc()
291 pool->hint = (end + tbl->it_blocksize - 1) & in iommu_range_alloc()
292 ~(tbl->it_blocksize - 1); in iommu_range_alloc()
304 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, in iommu_alloc() argument
314 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc()
319 entry += tbl->it_offset; /* Offset into real TCE table */ in iommu_alloc()
323 build_fail = ppc_md.tce_build(tbl, entry, npages, in iommu_alloc()
333 __iommu_free(tbl, ret, npages); in iommu_alloc()
339 ppc_md.tce_flush(tbl); in iommu_alloc()
347 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, in iommu_free_check() argument
353 free_entry = entry - tbl->it_offset; in iommu_free_check()
355 if (((free_entry + npages) > tbl->it_size) || in iommu_free_check()
356 (entry < tbl->it_offset)) { in iommu_free_check()
361 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl); in iommu_free_check()
362 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno); in iommu_free_check()
363 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size); in iommu_free_check()
364 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset); in iommu_free_check()
365 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); in iommu_free_check()
375 static struct iommu_pool *get_pool(struct iommu_table *tbl, in get_pool() argument
379 unsigned long largepool_start = tbl->large_pool.start; in get_pool()
383 p = &tbl->large_pool; in get_pool()
385 unsigned int pool_nr = entry / tbl->poolsize; in get_pool()
387 BUG_ON(pool_nr > tbl->nr_pools); in get_pool()
388 p = &tbl->pools[pool_nr]; in get_pool()
394 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, in __iommu_free() argument
402 free_entry = entry - tbl->it_offset; in __iommu_free()
404 pool = get_pool(tbl, free_entry); in __iommu_free()
406 if (!iommu_free_check(tbl, dma_addr, npages)) in __iommu_free()
409 ppc_md.tce_free(tbl, entry, npages); in __iommu_free()
412 bitmap_clear(tbl->it_map, free_entry, npages); in __iommu_free()
416 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, in iommu_free() argument
419 __iommu_free(tbl, dma_addr, npages); in iommu_free()
426 ppc_md.tce_flush(tbl); in iommu_free()
429 int iommu_map_sg(struct device *dev, struct iommu_table *tbl, in iommu_map_sg() argument
443 if ((nelems == 0) || !tbl) in iommu_map_sg()
473 entry = iommu_range_alloc(dev, tbl, npages, &handle, in iommu_map_sg()
482 "vaddr %lx npages %lu\n", tbl, vaddr, in iommu_map_sg()
488 entry += tbl->it_offset; in iommu_map_sg()
496 build_fail = ppc_md.tce_build(tbl, entry, npages, in iommu_map_sg()
536 ppc_md.tce_flush(tbl); in iommu_map_sg()
562 __iommu_free(tbl, vaddr, npages); in iommu_map_sg()
573 void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, in iommu_unmap_sg() argument
581 if (!tbl) in iommu_unmap_sg()
593 __iommu_free(tbl, dma_handle, npages); in iommu_unmap_sg()
602 ppc_md.tce_flush(tbl); in iommu_unmap_sg()
605 static void iommu_table_clear(struct iommu_table *tbl) in iommu_table_clear() argument
614 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); in iommu_table_clear()
623 for (index = 0; index < tbl->it_size; index++) { in iommu_table_clear()
624 tceval = ppc_md.tce_get(tbl, index + tbl->it_offset); in iommu_table_clear()
629 __set_bit(index, tbl->it_map); in iommu_table_clear()
634 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { in iommu_table_clear()
638 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; in iommu_table_clear()
639 index < tbl->it_size; index++) in iommu_table_clear()
640 __clear_bit(index, tbl->it_map); in iommu_table_clear()
650 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) in iommu_init_table() argument
659 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); in iommu_init_table()
664 tbl->it_map = page_address(page); in iommu_init_table()
665 memset(tbl->it_map, 0, sz); in iommu_init_table()
672 if (tbl->it_offset == 0) in iommu_init_table()
673 set_bit(0, tbl->it_map); in iommu_init_table()
676 if ((tbl->it_size << IOMMU_PAGE_SHIFT) >= (1UL * 1024 * 1024 * 1024)) in iommu_init_table()
677 tbl->nr_pools = IOMMU_NR_POOLS; in iommu_init_table()
679 tbl->nr_pools = 1; in iommu_init_table()
682 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools; in iommu_init_table()
684 for (i = 0; i < tbl->nr_pools; i++) { in iommu_init_table()
685 p = &tbl->pools[i]; in iommu_init_table()
687 p->start = tbl->poolsize * i; in iommu_init_table()
689 p->end = p->start + tbl->poolsize; in iommu_init_table()
692 p = &tbl->large_pool; in iommu_init_table()
694 p->start = tbl->poolsize * i; in iommu_init_table()
696 p->end = tbl->it_size; in iommu_init_table()
698 iommu_table_clear(tbl); in iommu_init_table()
706 return tbl; in iommu_init_table()
709 void iommu_free_table(struct iommu_table *tbl, const char *node_name) in iommu_free_table() argument
714 if (!tbl || !tbl->it_map) { in iommu_free_table()
724 if (tbl->it_offset == 0) in iommu_free_table()
725 clear_bit(0, tbl->it_map); in iommu_free_table()
728 if (!bitmap_empty(tbl->it_map, tbl->it_size)) in iommu_free_table()
732 bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); in iommu_free_table()
736 free_pages((unsigned long) tbl->it_map, order); in iommu_free_table()
739 kfree(tbl); in iommu_free_table()
747 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, in iommu_map_page() argument
763 if (tbl) { in iommu_map_page()
769 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, in iommu_map_page()
775 "vaddr %p npages %d\n", tbl, vaddr, in iommu_map_page()
785 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, in iommu_unmap_page() argument
793 if (tbl) { in iommu_unmap_page()
795 iommu_free(tbl, dma_handle, npages); in iommu_unmap_page()
803 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, in iommu_alloc_coherent() argument
827 if (!tbl) in iommu_alloc_coherent()
840 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, in iommu_alloc_coherent()
850 void iommu_free_coherent(struct iommu_table *tbl, size_t size, in iommu_free_coherent() argument
853 if (tbl) { in iommu_free_coherent()
858 iommu_free(tbl, dma_handle, nio_pages); in iommu_free_coherent()