Lines Matching refs:npages
57 unsigned long npages; /* Number of pages in list. */ member
71 p->npages = 0; in iommu_batch_start()
83 unsigned long npages = p->npages; in iommu_batch_flush() local
92 while (npages != 0) { in iommu_batch_flush()
96 npages, in iommu_batch_flush()
104 npages, prot, __pa(pglist), in iommu_batch_flush()
109 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), in iommu_batch_flush()
127 npages -= num; in iommu_batch_flush()
132 p->npages = 0; in iommu_batch_flush()
141 if (p->entry + p->npages == entry) in iommu_batch_new_entry()
153 BUG_ON(p->npages >= PGLIST_NENTS); in iommu_batch_add()
155 p->pglist[p->npages++] = phys_page; in iommu_batch_add()
156 if (p->npages == PGLIST_NENTS) in iommu_batch_add()
167 BUG_ON(p->npages >= PGLIST_NENTS); in iommu_batch_end()
177 unsigned long flags, order, first_page, npages, n; in dma_4v_alloc_coherent() local
192 npages = size >> IO_PAGE_SHIFT; in dma_4v_alloc_coherent()
214 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, in dma_4v_alloc_coherent()
231 for (n = 0; n < npages; n++) { in dma_4v_alloc_coherent()
245 iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); in dma_4v_alloc_coherent()
291 unsigned long entry, unsigned long npages) in dma_4v_iommu_demap() argument
301 npages); in dma_4v_iommu_demap()
304 entry, npages, &num); in dma_4v_iommu_demap()
311 npages -= num; in dma_4v_iommu_demap()
312 } while (npages != 0); in dma_4v_iommu_demap()
323 unsigned long order, npages, entry; in dma_4v_free_coherent() local
327 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; in dma_4v_free_coherent()
341 dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages); in dma_4v_free_coherent()
342 iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE); in dma_4v_free_coherent()
357 unsigned long flags, npages, oaddr; in dma_4v_map_page() local
370 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); in dma_4v_map_page()
371 npages >>= IO_PAGE_SHIFT; in dma_4v_map_page()
379 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, in dma_4v_map_page()
399 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { in dma_4v_map_page()
417 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); in dma_4v_map_page()
429 unsigned long npages; in dma_4v_unmap_page() local
445 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); in dma_4v_unmap_page()
446 npages >>= IO_PAGE_SHIFT; in dma_4v_unmap_page()
457 dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages); in dma_4v_unmap_page()
458 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); in dma_4v_unmap_page()
518 unsigned long paddr, npages, entry, out_entry = 0, slen; in dma_4v_map_sg() local
528 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); in dma_4v_map_sg()
529 entry = iommu_tbl_range_alloc(dev, tbl, npages, in dma_4v_map_sg()
535 tbl, paddr, npages); in dma_4v_map_sg()
547 while (npages--) { in dma_4v_map_sg()
601 unsigned long vaddr, npages; in dma_4v_map_sg() local
604 npages = iommu_num_pages(s->dma_address, s->dma_length, in dma_4v_map_sg()
606 iommu_tbl_range_free(tbl, vaddr, npages, in dma_4v_map_sg()
645 unsigned long npages; in dma_4v_unmap_sg() local
651 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); in dma_4v_unmap_sg()
662 entry, npages); in dma_4v_unmap_sg()
663 iommu_tbl_range_free(tbl, dma_handle, npages, in dma_4v_unmap_sg()