Lines Matching refs:arena
62 struct pci_iommu_arena *arena; in iommu_arena_new_node() local
76 arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena)); in iommu_arena_new_node()
77 if (!NODE_DATA(nid) || !arena) { in iommu_arena_new_node()
81 arena = alloc_bootmem(sizeof(*arena)); in iommu_arena_new_node()
84 arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0); in iommu_arena_new_node()
85 if (!NODE_DATA(nid) || !arena->ptes) { in iommu_arena_new_node()
89 arena->ptes = __alloc_bootmem(mem_size, align, 0); in iommu_arena_new_node()
94 arena = alloc_bootmem(sizeof(*arena)); in iommu_arena_new_node()
95 arena->ptes = __alloc_bootmem(mem_size, align, 0); in iommu_arena_new_node()
99 spin_lock_init(&arena->lock); in iommu_arena_new_node()
100 arena->hose = hose; in iommu_arena_new_node()
101 arena->dma_base = base; in iommu_arena_new_node()
102 arena->size = window_size; in iommu_arena_new_node()
103 arena->next_entry = 0; in iommu_arena_new_node()
107 arena->align_entry = 1; in iommu_arena_new_node()
109 return arena; in iommu_arena_new_node()
121 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena, in iommu_arena_find_pages() argument
130 base = arena->dma_base >> PAGE_SHIFT; in iommu_arena_find_pages()
139 ptes = arena->ptes; in iommu_arena_find_pages()
140 nent = arena->size >> PAGE_SHIFT; in iommu_arena_find_pages()
141 p = ALIGN(arena->next_entry, mask + 1); in iommu_arena_find_pages()
163 alpha_mv.mv_pci_tbi(arena->hose, 0, -1); in iommu_arena_find_pages()
179 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n, in iommu_arena_alloc() argument
186 spin_lock_irqsave(&arena->lock, flags); in iommu_arena_alloc()
189 ptes = arena->ptes; in iommu_arena_alloc()
190 mask = max(align, arena->align_entry) - 1; in iommu_arena_alloc()
191 p = iommu_arena_find_pages(dev, arena, n, mask); in iommu_arena_alloc()
193 spin_unlock_irqrestore(&arena->lock, flags); in iommu_arena_alloc()
204 arena->next_entry = p + n; in iommu_arena_alloc()
205 spin_unlock_irqrestore(&arena->lock, flags); in iommu_arena_alloc()
211 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n) in iommu_arena_free() argument
216 p = arena->ptes + ofs; in iommu_arena_free()
256 struct pci_iommu_arena *arena; in pci_map_single_1() local
296 arena = hose->sg_pci; in pci_map_single_1()
297 if (!arena || arena->dma_base + arena->size - 1 > max_dma) in pci_map_single_1()
298 arena = hose->sg_isa; in pci_map_single_1()
305 dma_ofs = iommu_arena_alloc(dev, arena, npages, align); in pci_map_single_1()
314 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr); in pci_map_single_1()
316 ret = arena->dma_base + dma_ofs * PAGE_SIZE; in pci_map_single_1()
377 struct pci_iommu_arena *arena; in alpha_pci_unmap_page() local
398 arena = hose->sg_pci; in alpha_pci_unmap_page()
399 if (!arena || dma_addr < arena->dma_base) in alpha_pci_unmap_page()
400 arena = hose->sg_isa; in alpha_pci_unmap_page()
402 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT; in alpha_pci_unmap_page()
403 if (dma_ofs * PAGE_SIZE >= arena->size) { in alpha_pci_unmap_page()
406 dma_addr, arena->dma_base, arena->size); in alpha_pci_unmap_page()
413 spin_lock_irqsave(&arena->lock, flags); in alpha_pci_unmap_page()
415 iommu_arena_free(arena, dma_ofs, npages); in alpha_pci_unmap_page()
420 if (dma_ofs >= arena->next_entry) in alpha_pci_unmap_page()
423 spin_unlock_irqrestore(&arena->lock, flags); in alpha_pci_unmap_page()
555 struct scatterlist *out, struct pci_iommu_arena *arena, in sg_fill() argument
596 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0); in sg_fill()
605 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed); in sg_fill()
608 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr; in sg_fill()
616 ptes = &arena->ptes[dma_ofs]; in sg_fill()
659 struct pci_iommu_arena *arena; in alpha_pci_map_sg() local
686 arena = hose->sg_pci; in alpha_pci_map_sg()
687 if (!arena || arena->dma_base + arena->size - 1 > max_dma) in alpha_pci_map_sg()
688 arena = hose->sg_isa; in alpha_pci_map_sg()
691 arena = NULL; in alpha_pci_map_sg()
700 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0) in alpha_pci_map_sg()
737 struct pci_iommu_arena *arena; in alpha_pci_unmap_sg() local
749 arena = hose->sg_pci; in alpha_pci_unmap_sg()
750 if (!arena || arena->dma_base + arena->size - 1 > max_dma) in alpha_pci_unmap_sg()
751 arena = hose->sg_isa; in alpha_pci_unmap_sg()
755 spin_lock_irqsave(&arena->lock, flags); in alpha_pci_unmap_sg()
787 ofs = (addr - arena->dma_base) >> PAGE_SHIFT; in alpha_pci_unmap_sg()
788 iommu_arena_free(arena, ofs, npages); in alpha_pci_unmap_sg()
798 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry) in alpha_pci_unmap_sg()
801 spin_unlock_irqrestore(&arena->lock, flags); in alpha_pci_unmap_sg()
813 struct pci_iommu_arena *arena; in alpha_pci_supported() local
825 arena = hose->sg_isa; in alpha_pci_supported()
826 if (arena && arena->dma_base + arena->size - 1 <= mask) in alpha_pci_supported()
828 arena = hose->sg_pci; in alpha_pci_supported()
829 if (arena && arena->dma_base + arena->size - 1 <= mask) in alpha_pci_supported()
844 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) in iommu_reserve() argument
850 if (!arena) return -EINVAL; in iommu_reserve()
852 spin_lock_irqsave(&arena->lock, flags); in iommu_reserve()
855 ptes = arena->ptes; in iommu_reserve()
856 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask); in iommu_reserve()
858 spin_unlock_irqrestore(&arena->lock, flags); in iommu_reserve()
868 arena->next_entry = p + pg_count; in iommu_reserve()
869 spin_unlock_irqrestore(&arena->lock, flags); in iommu_reserve()
875 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) in iommu_release() argument
880 if (!arena) return -EINVAL; in iommu_release()
882 ptes = arena->ptes; in iommu_release()
889 iommu_arena_free(arena, pg_start, pg_count); in iommu_release()
894 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, in iommu_bind() argument
901 if (!arena) return -EINVAL; in iommu_bind()
903 spin_lock_irqsave(&arena->lock, flags); in iommu_bind()
905 ptes = arena->ptes; in iommu_bind()
909 spin_unlock_irqrestore(&arena->lock, flags); in iommu_bind()
917 spin_unlock_irqrestore(&arena->lock, flags); in iommu_bind()
923 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) in iommu_unbind() argument
928 if (!arena) return -EINVAL; in iommu_unbind()
930 p = arena->ptes + pg_start; in iommu_unbind()