/arch/sparc/kernel/ |
D | iommu.c | 158 unsigned long npages) in alloc_npages() argument 162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages() 204 int npages, nid; in dma_4u_alloc_coherent() local 233 npages = size >> IO_PAGE_SHIFT; in dma_4u_alloc_coherent() 235 while (npages--) { in dma_4u_alloc_coherent() 251 unsigned long order, npages; in dma_4u_free_coherent() local 253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; in dma_4u_free_coherent() 256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); in dma_4u_free_coherent() 271 unsigned long flags, npages, oaddr; in dma_4u_map_page() local 283 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); in dma_4u_map_page() [all …]
|
D | pci_sun4v.c | 57 unsigned long npages; /* Number of pages in list. */ member 71 p->npages = 0; in iommu_batch_start() 83 unsigned long npages = p->npages; in iommu_batch_flush() local 92 while (npages != 0) { in iommu_batch_flush() 96 npages, in iommu_batch_flush() 104 npages, prot, __pa(pglist), in iommu_batch_flush() 109 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), in iommu_batch_flush() 127 npages -= num; in iommu_batch_flush() 132 p->npages = 0; in iommu_batch_flush() 141 if (p->entry + p->npages == entry) in iommu_batch_new_entry() [all …]
|
/arch/powerpc/kvm/ |
D | book3s_64_vio.c | 97 unsigned long i, npages = kvmppc_tce_pages(stt->size); in release_spapr_tce_table() local 99 for (i = 0; i < npages; i++) in release_spapr_tce_table() 157 unsigned long npages, size; in kvm_vm_ioctl_create_spapr_tce() local 165 npages = kvmppc_tce_pages(size); in kvm_vm_ioctl_create_spapr_tce() 166 ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true); in kvm_vm_ioctl_create_spapr_tce() 170 stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *), in kvm_vm_ioctl_create_spapr_tce() 181 for (i = 0; i < npages; i++) { in kvm_vm_ioctl_create_spapr_tce() 213 for (i = 0; i < npages; i++) in kvm_vm_ioctl_create_spapr_tce() 219 kvmppc_account_memlimit(kvmppc_stt_pages(npages), false); in kvm_vm_ioctl_create_spapr_tce() 251 unsigned long tce_list, unsigned long npages) in kvmppc_h_put_tce_indirect() argument [all …]
|
D | book3s_64_vio_hv.c | 73 unsigned long ioba, unsigned long npages) in kvmppc_ioba_validate() argument 79 (idx - stt->offset + npages > stt->size) || in kvmppc_ioba_validate() 80 (idx + npages < idx)) in kvmppc_ioba_validate() 237 unsigned long tce_list, unsigned long npages) in kvmppc_rm_h_put_tce_indirect() argument 253 if (npages > 512) in kvmppc_rm_h_put_tce_indirect() 259 ret = kvmppc_ioba_validate(stt, ioba, npages); in kvmppc_rm_h_put_tce_indirect() 282 for (i = 0; i < npages; ++i) { in kvmppc_rm_h_put_tce_indirect() 300 unsigned long tce_value, unsigned long npages) in kvmppc_rm_h_stuff_tce() argument 309 ret = kvmppc_ioba_validate(stt, ioba, npages); in kvmppc_rm_h_stuff_tce() 317 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) in kvmppc_rm_h_stuff_tce()
|
D | book3s_64_mmu_hv.c | 179 unsigned long npages; in kvmppc_map_vrma() local 189 npages = memslot->npages >> (porder - PAGE_SHIFT); in kvmppc_map_vrma() 192 if (npages > 1ul << (40 - porder)) in kvmppc_map_vrma() 193 npages = 1ul << (40 - porder); in kvmppc_map_vrma() 195 if (npages > kvm->arch.hpt_mask + 1) in kvmppc_map_vrma() 196 npages = kvm->arch.hpt_mask + 1; in kvmppc_map_vrma() 203 for (i = 0; i < npages; ++i) { in kvmppc_map_vrma() 449 long index, ret, npages; in kvmppc_book3s_hv_page_fault() local 514 npages = get_user_pages_fast(hva, 1, writing, pages); in kvmppc_book3s_hv_page_fault() 515 if (npages < 1) { in kvmppc_book3s_hv_page_fault() [all …]
|
/arch/powerpc/kernel/ |
D | iommu.c | 178 unsigned long npages, in iommu_range_alloc() argument 185 int largealloc = npages > 15; in iommu_range_alloc() 198 if (unlikely(npages == 0)) { in iommu_range_alloc() 259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc() 285 end = n + npages; in iommu_range_alloc() 307 void *page, unsigned int npages, in iommu_alloc() argument 316 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc() 325 build_fail = tbl->it_ops->set(tbl, entry, npages, in iommu_alloc() 335 __iommu_free(tbl, ret, npages); in iommu_alloc() 350 unsigned int npages) in iommu_free_check() argument [all …]
|
/arch/x86/kernel/ |
D | pci-calgary_64.c | 204 unsigned long start_addr, unsigned int npages) in iommu_range_reserve() argument 216 end = index + npages; in iommu_range_reserve() 222 bitmap_set(tbl->it_map, index, npages); in iommu_range_reserve() 229 unsigned int npages) in iommu_range_alloc() argument 238 BUG_ON(npages == 0); in iommu_range_alloc() 243 npages, 0, boundary_size, 0); in iommu_range_alloc() 248 npages, 0, boundary_size, 0); in iommu_range_alloc() 259 tbl->it_hint = offset + npages; in iommu_range_alloc() 268 void *vaddr, unsigned int npages, int direction) in iommu_alloc() argument 273 entry = iommu_range_alloc(dev, tbl, npages); in iommu_alloc() [all …]
|
D | tce_64.c | 50 unsigned int npages, unsigned long uaddr, int direction) in tce_build() argument 62 while (npages--) { in tce_build() 75 void tce_free(struct iommu_table *tbl, long index, unsigned int npages) in tce_free() argument 81 while (npages--) { in tce_free()
|
D | amd_gart_64.c | 216 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); in dma_map_area() local 223 iommu_page = alloc_iommu(dev, npages, align_mask); in dma_map_area() 233 for (i = 0; i < npages; i++) { in dma_map_area() 269 int npages; in gart_unmap_page() local 277 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); in gart_unmap_page() 278 for (i = 0; i < npages; i++) { in gart_unmap_page() 281 free_iommu(iommu_page, npages); in gart_unmap_page()
|
/arch/powerpc/sysdev/ |
D | dart_iommu.c | 186 long npages, unsigned long uaddr, in dart_build() argument 194 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); in dart_build() 201 l = npages; in dart_build() 209 dart_cache_sync(orig_dp, npages); in dart_build() 213 while (npages--) in dart_build() 222 static void dart_free(struct iommu_table *tbl, long index, long npages) in dart_free() argument 225 long orig_npages = npages; in dart_free() 232 DBG("dart: free at: %lx, %lx\n", index, npages); in dart_free() 236 while (npages--) in dart_free()
|
/arch/sparc/mm/ |
D | iommu.c | 177 static u32 iommu_get_one(struct device *dev, struct page *page, int npages) in iommu_get_one() argument 186 ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page)); in iommu_get_one() 194 for (i = 0; i < npages; i++) { in iommu_get_one() 202 iommu_flush_iotlb(iopte0, npages); in iommu_get_one() 210 int npages; in iommu_get_scsi_one() local 215 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; in iommu_get_scsi_one() 217 busa = iommu_get_one(dev, page, npages); in iommu_get_scsi_one() 283 static void iommu_release_one(struct device *dev, u32 busa, int npages) in iommu_release_one() argument 291 for (i = 0; i < npages; i++) { in iommu_release_one() 296 bit_map_clear(&iommu->usemap, ioptex, npages); in iommu_release_one() [all …]
|
D | io-unit.c | 96 int i, j, k, npages; in iounit_get_area() local 100 npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; in iounit_get_area() 103 switch (npages) { in iounit_get_area() 109 IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages)); in iounit_get_area() 116 if (scan + npages > limit) { in iounit_get_area() 127 for (k = 1, scan++; k < npages; k++) in iounit_get_area() 131 scan -= npages; in iounit_get_area() 134 for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) { in iounit_get_area()
|
/arch/x86/kvm/ |
D | iommu.c | 42 gfn_t base_gfn, unsigned long npages); 45 unsigned long npages) in kvm_pin_pages() argument 51 end_gfn = gfn + npages; in kvm_pin_pages() 64 unsigned long npages) in kvm_unpin_pages() argument 68 for (i = 0; i < npages; ++i) in kvm_unpin_pages() 85 end_gfn = gfn + slot->npages; in kvm_iommu_map_pages() 273 gfn_t base_gfn, unsigned long npages) in kvm_iommu_put_pages() argument 281 end_gfn = base_gfn + npages; in kvm_iommu_put_pages() 317 kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages); in kvm_iommu_unmap_pages()
|
D | page_track.c | 36 unsigned long npages) in kvm_page_track_create_memslot() argument 41 slot->arch.gfn_track[i] = kvm_kvzalloc(npages * in kvm_page_track_create_memslot()
|
/arch/powerpc/platforms/pasemi/ |
D | iommu.c | 89 long npages, unsigned long uaddr, in iobmap_build() argument 97 pr_debug("iobmap: build at: %lx, %lx, addr: %lx\n", index, npages, uaddr); in iobmap_build() 103 while (npages--) { in iobmap_build() 118 long npages) in iobmap_free() argument 123 pr_debug("iobmap: free at: %lx, %lx\n", index, npages); in iobmap_free() 129 while (npages--) { in iobmap_free()
|
/arch/x86/include/asm/ |
D | tce.h | 42 unsigned int npages, unsigned long uaddr, int direction); 43 extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages);
|
D | kvm_page_track.h | 43 unsigned long npages);
|
/arch/alpha/kernel/ |
D | pci_iommu.c | 257 long npages, dma_ofs, i; in pci_map_single_1() local 300 npages = iommu_num_pages(paddr, size, PAGE_SIZE); in pci_map_single_1() 305 dma_ofs = iommu_arena_alloc(dev, arena, npages, align); in pci_map_single_1() 313 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) in pci_map_single_1() 320 cpu_addr, size, npages, ret, __builtin_return_address(0)); in pci_map_single_1() 378 long dma_ofs, npages; in alpha_pci_unmap_page() local 411 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); in alpha_pci_unmap_page() 415 iommu_arena_free(arena, dma_ofs, npages); in alpha_pci_unmap_page() 426 dma_addr, size, npages, __builtin_return_address(0)); in alpha_pci_unmap_page() 562 long npages, dma_ofs, i; in sg_fill() local [all …]
|
/arch/tile/kernel/ |
D | module.c | 43 int npages; in module_alloc() local 45 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in module_alloc() 46 pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL); in module_alloc() 49 for (; i < npages; ++i) { in module_alloc() 58 area->nr_pages = npages; in module_alloc()
|
/arch/powerpc/mm/ |
D | mmu_context_iommu.c | 36 unsigned long npages, bool incr) in mm_iommu_adjust_locked_vm() argument 40 if (!npages) in mm_iommu_adjust_locked_vm() 46 locked = mm->locked_vm + npages; in mm_iommu_adjust_locked_vm() 51 mm->locked_vm += npages; in mm_iommu_adjust_locked_vm() 53 if (WARN_ON_ONCE(npages > mm->locked_vm)) in mm_iommu_adjust_locked_vm() 54 npages = mm->locked_vm; in mm_iommu_adjust_locked_vm() 55 mm->locked_vm -= npages; in mm_iommu_adjust_locked_vm() 61 npages << PAGE_SHIFT, in mm_iommu_adjust_locked_vm()
|
D | subpage-prot.c | 61 int npages) in hpte_flush_range() argument 80 for (; npages > 0; --npages) { in hpte_flush_range()
|
/arch/powerpc/platforms/pseries/ |
D | iommu.c | 124 long npages, unsigned long uaddr, in tce_build_pSeries() argument 139 while (npages--) { in tce_build_pSeries() 151 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) in tce_free_pSeries() argument 157 while (npages--) in tce_free_pSeries() 174 long npages, unsigned long uaddr, in tce_build_pSeriesLP() argument 182 long tcenum_start = tcenum, npages_start = npages; in tce_build_pSeriesLP() 189 while (npages--) { in tce_build_pSeriesLP() 196 (npages_start - (npages + 1))); in tce_build_pSeriesLP() 217 long npages, unsigned long uaddr, in tce_buildmulti_pSeriesLP() argument 226 long tcenum_start = tcenum, npages_start = npages; in tce_buildmulti_pSeriesLP() [all …]
|
/arch/arm/kernel/ |
D | process.c | 447 unsigned int npages) in sigpage_addr() argument 457 last = TASK_SIZE - (npages << PAGE_SHIFT); in sigpage_addr() 488 unsigned long npages; in arch_setup_additional_pages() local 498 npages = 1; /* for sigpage */ in arch_setup_additional_pages() 499 npages += vdso_total_pages; in arch_setup_additional_pages() 503 hint = sigpage_addr(mm, npages); in arch_setup_additional_pages() 504 addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0); in arch_setup_additional_pages()
|
/arch/powerpc/include/asm/ |
D | iommu.h | 53 long index, long npages, 69 long index, long npages); 293 unsigned long npages);
|
/arch/x86/platform/efi/ |
D | efi.c | 552 u64 addr, npages; in efi_set_executable() local 555 npages = md->num_pages; in efi_set_executable() 557 memrange_efi_to_native(&addr, &npages); in efi_set_executable() 560 set_memory_x(addr, npages); in efi_set_executable() 562 set_memory_nx(addr, npages); in efi_set_executable() 581 u64 npages; in efi_memory_uc() local 583 npages = round_up(size, page_shift) / page_shift; in efi_memory_uc() 584 memrange_efi_to_native(&addr, &npages); in efi_memory_uc() 585 set_memory_uc(addr, npages); in efi_memory_uc()
|