/arch/sparc/kernel/ |
D | iommu.c | 159 unsigned long npages) in alloc_npages() argument 163 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages() 205 int npages, nid; in dma_4u_alloc_coherent() local 234 npages = size >> IO_PAGE_SHIFT; in dma_4u_alloc_coherent() 236 while (npages--) { in dma_4u_alloc_coherent() 252 unsigned long order, npages; in dma_4u_free_coherent() local 254 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; in dma_4u_free_coherent() 257 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); in dma_4u_free_coherent() 272 unsigned long flags, npages, oaddr; in dma_4u_map_page() local 284 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); in dma_4u_map_page() [all …]
|
D | pci_sun4v.c | 59 unsigned long npages; /* Number of pages in list. */ member 73 p->npages = 0; in iommu_batch_start() 85 unsigned long npages = p->npages; in iommu_batch_flush() local 94 while (npages != 0) { in iommu_batch_flush() 98 npages, in iommu_batch_flush() 106 npages, prot, __pa(pglist), in iommu_batch_flush() 111 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), in iommu_batch_flush() 129 npages -= num; in iommu_batch_flush() 134 p->npages = 0; in iommu_batch_flush() 143 if (p->entry + p->npages == entry) in iommu_batch_new_entry() [all …]
|
/arch/powerpc/kernel/ |
D | iommu.c | 177 unsigned long npages, in iommu_range_alloc() argument 184 int largealloc = npages > 15; in iommu_range_alloc() 197 if (unlikely(npages == 0)) { in iommu_range_alloc() 258 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc() 284 end = n + npages; in iommu_range_alloc() 306 void *page, unsigned int npages, in iommu_alloc() argument 315 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc() 324 build_fail = tbl->it_ops->set(tbl, entry, npages, in iommu_alloc() 334 __iommu_free(tbl, ret, npages); in iommu_alloc() 349 unsigned int npages) in iommu_free_check() argument [all …]
|
/arch/x86/mm/ |
D | cpu_entry_area.c | 44 int npages; in percpu_setup_debug_store() local 51 npages = sizeof(struct debug_store) / PAGE_SIZE; in percpu_setup_debug_store() 53 cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, in percpu_setup_debug_store() 61 npages = sizeof(struct debug_store_buffers) / PAGE_SIZE; in percpu_setup_debug_store() 62 for (; npages; npages--, cea += PAGE_SIZE) in percpu_setup_debug_store()
|
/arch/x86/kernel/ |
D | pci-calgary_64.c | 206 unsigned long start_addr, unsigned int npages) in iommu_range_reserve() argument 218 end = index + npages; in iommu_range_reserve() 224 bitmap_set(tbl->it_map, index, npages); in iommu_range_reserve() 231 unsigned int npages) in iommu_range_alloc() argument 240 BUG_ON(npages == 0); in iommu_range_alloc() 245 npages, 0, boundary_size, 0); in iommu_range_alloc() 250 npages, 0, boundary_size, 0); in iommu_range_alloc() 261 tbl->it_hint = offset + npages; in iommu_range_alloc() 270 void *vaddr, unsigned int npages, int direction) in iommu_alloc() argument 275 entry = iommu_range_alloc(dev, tbl, npages); in iommu_alloc() [all …]
|
D | tce_64.c | 50 unsigned int npages, unsigned long uaddr, int direction) in tce_build() argument 62 while (npages--) { in tce_build() 75 void tce_free(struct iommu_table *tbl, long index, unsigned int npages) in tce_free() argument 81 while (npages--) { in tce_free()
|
D | ftrace.c | 720 int npages = PAGE_ALIGN(size) >> PAGE_SHIFT; in tramp_free() local 722 set_memory_nx((unsigned long)tramp, npages); in tramp_free() 723 set_memory_rw((unsigned long)tramp, npages); in tramp_free() 872 int ret, npages; in arch_ftrace_update_trampoline() local 881 npages = PAGE_ALIGN(ops->trampoline_size) >> PAGE_SHIFT; in arch_ftrace_update_trampoline() 882 set_memory_rw(ops->trampoline, npages); in arch_ftrace_update_trampoline() 888 npages = PAGE_ALIGN(size) >> PAGE_SHIFT; in arch_ftrace_update_trampoline() 901 set_memory_ro(ops->trampoline, npages); in arch_ftrace_update_trampoline()
|
D | amd_gart_64.c | 217 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); in dma_map_area() local 224 iommu_page = alloc_iommu(dev, npages, align_mask); in dma_map_area() 234 for (i = 0; i < npages; i++) { in dma_map_area() 270 int npages; in gart_unmap_page() local 278 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); in gart_unmap_page() 279 for (i = 0; i < npages; i++) { in gart_unmap_page() 282 free_iommu(iommu_page, npages); in gart_unmap_page()
|
/arch/powerpc/sysdev/ |
D | dart_iommu.c | 186 long npages, unsigned long uaddr, in dart_build() argument 194 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); in dart_build() 201 l = npages; in dart_build() 209 dart_cache_sync(orig_dp, npages); in dart_build() 213 while (npages--) in dart_build() 222 static void dart_free(struct iommu_table *tbl, long index, long npages) in dart_free() argument 225 long orig_npages = npages; in dart_free() 232 DBG("dart: free at: %lx, %lx\n", index, npages); in dart_free() 236 while (npages--) in dart_free()
|
/arch/sparc/mm/ |
D | iommu.c | 178 static u32 iommu_get_one(struct device *dev, struct page *page, int npages) in iommu_get_one() argument 187 ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page)); in iommu_get_one() 195 for (i = 0; i < npages; i++) { in iommu_get_one() 203 iommu_flush_iotlb(iopte0, npages); in iommu_get_one() 211 int npages; in iommu_get_scsi_one() local 216 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; in iommu_get_scsi_one() 218 busa = iommu_get_one(dev, page, npages); in iommu_get_scsi_one() 284 static void iommu_release_one(struct device *dev, u32 busa, int npages) in iommu_release_one() argument 292 for (i = 0; i < npages; i++) { in iommu_release_one() 297 bit_map_clear(&iommu->usemap, ioptex, npages); in iommu_release_one() [all …]
|
D | io-unit.c | 97 int i, j, k, npages; in iounit_get_area() local 101 npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; in iounit_get_area() 104 switch (npages) { in iounit_get_area() 110 IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages)); in iounit_get_area() 117 if (scan + npages > limit) { in iounit_get_area() 128 for (k = 1, scan++; k < npages; k++) in iounit_get_area() 132 scan -= npages; in iounit_get_area() 135 for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) { in iounit_get_area()
|
/arch/powerpc/kvm/ |
D | book3s_64_vio.c | 231 unsigned long i, npages = kvmppc_tce_pages(stt->size); in release_spapr_tce_table() local 233 for (i = 0; i < npages; i++) in release_spapr_tce_table() 300 unsigned long npages, size; in kvm_vm_ioctl_create_spapr_tce() local 308 npages = kvmppc_tce_pages(size); in kvm_vm_ioctl_create_spapr_tce() 309 ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true); in kvm_vm_ioctl_create_spapr_tce() 314 stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *), in kvm_vm_ioctl_create_spapr_tce() 326 for (i = 0; i < npages; i++) { in kvm_vm_ioctl_create_spapr_tce() 358 for (i = 0; i < npages; i++) in kvm_vm_ioctl_create_spapr_tce() 364 kvmppc_account_memlimit(kvmppc_stt_pages(npages), false); in kvm_vm_ioctl_create_spapr_tce() 520 unsigned long tce_list, unsigned long npages) in kvmppc_h_put_tce_indirect() argument [all …]
|
D | book3s_64_vio_hv.c | 390 unsigned long tce_list, unsigned long npages) in kvmppc_rm_h_put_tce_indirect() argument 412 if (npages > 512) in kvmppc_rm_h_put_tce_indirect() 418 ret = kvmppc_ioba_validate(stt, ioba, npages); in kvmppc_rm_h_put_tce_indirect() 468 for (i = 0; i < npages; ++i) { in kvmppc_rm_h_put_tce_indirect() 510 unsigned long tce_value, unsigned long npages) in kvmppc_rm_h_stuff_tce() argument 524 ret = kvmppc_ioba_validate(stt, ioba, npages); in kvmppc_rm_h_stuff_tce() 535 for (i = 0; i < npages; ++i) { in kvmppc_rm_h_stuff_tce() 550 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) in kvmppc_rm_h_stuff_tce()
|
D | book3s_64_mmu_radix.c | 332 long ret, npages, ok; in kvmppc_book3s_radix_page_fault() local 428 npages = get_user_pages_fast(hva, 1, writing, pages); in kvmppc_book3s_radix_page_fault() 429 if (npages < 1) { in kvmppc_book3s_radix_page_fault() 606 int npages; in kvmppc_hv_get_dirty_log_radix() local 619 for (i = 0; i < memslot->npages; i = j) { in kvmppc_hv_get_dirty_log_radix() 620 npages = kvm_radix_test_clear_dirty(kvm, memslot, i); in kvmppc_hv_get_dirty_log_radix() 630 if (npages) in kvmppc_hv_get_dirty_log_radix() 631 for (j = i; npages; ++j, --npages) in kvmppc_hv_get_dirty_log_radix()
|
D | book3s_64_mmu_hv.c | 219 unsigned long npages; in kvmppc_map_vrma() local 229 npages = memslot->npages >> (porder - PAGE_SHIFT); in kvmppc_map_vrma() 232 if (npages > 1ul << (40 - porder)) in kvmppc_map_vrma() 233 npages = 1ul << (40 - porder); in kvmppc_map_vrma() 235 if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1) in kvmppc_map_vrma() 236 npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1; in kvmppc_map_vrma() 243 for (i = 0; i < npages; ++i) { in kvmppc_map_vrma() 493 long index, ret, npages; in kvmppc_book3s_hv_page_fault() local 579 npages = get_user_pages_fast(hva, 1, writing, pages); in kvmppc_book3s_hv_page_fault() 580 if (npages < 1) { in kvmppc_book3s_hv_page_fault() [all …]
|
/arch/powerpc/platforms/pasemi/ |
D | iommu.c | 89 long npages, unsigned long uaddr, in iobmap_build() argument 97 pr_debug("iobmap: build at: %lx, %lx, addr: %lx\n", index, npages, uaddr); in iobmap_build() 103 while (npages--) { in iobmap_build() 118 long npages) in iobmap_free() argument 123 pr_debug("iobmap: free at: %lx, %lx\n", index, npages); in iobmap_free() 129 while (npages--) { in iobmap_free()
|
/arch/alpha/kernel/ |
D | pci_iommu.c | 258 long npages, dma_ofs, i; in pci_map_single_1() local 301 npages = iommu_num_pages(paddr, size, PAGE_SIZE); in pci_map_single_1() 306 dma_ofs = iommu_arena_alloc(dev, arena, npages, align); in pci_map_single_1() 314 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) in pci_map_single_1() 321 cpu_addr, size, npages, ret, __builtin_return_address(0)); in pci_map_single_1() 379 long dma_ofs, npages; in alpha_pci_unmap_page() local 412 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); in alpha_pci_unmap_page() 416 iommu_arena_free(arena, dma_ofs, npages); in alpha_pci_unmap_page() 427 dma_addr, size, npages, __builtin_return_address(0)); in alpha_pci_unmap_page() 563 long npages, dma_ofs, i; in sg_fill() local [all …]
|
/arch/x86/include/asm/ |
D | tce.h | 42 unsigned int npages, unsigned long uaddr, int direction); 43 extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages);
|
/arch/tile/kernel/ |
D | module.c | 43 int npages; in module_alloc() local 45 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in module_alloc() 46 pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL); in module_alloc() 49 for (; i < npages; ++i) { in module_alloc() 58 area->nr_pages = npages; in module_alloc()
|
/arch/powerpc/platforms/pseries/ |
D | iommu.c | 125 long npages, unsigned long uaddr, in tce_build_pSeries() argument 140 while (npages--) { in tce_build_pSeries() 152 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) in tce_free_pSeries() argument 158 while (npages--) in tce_free_pSeries() 175 long npages, unsigned long uaddr, in tce_build_pSeriesLP() argument 183 long tcenum_start = tcenum, npages_start = npages; in tce_build_pSeriesLP() 190 while (npages--) { in tce_build_pSeriesLP() 197 (npages_start - (npages + 1))); in tce_build_pSeriesLP() 218 long npages, unsigned long uaddr, in tce_buildmulti_pSeriesLP() argument 227 long tcenum_start = tcenum, npages_start = npages; in tce_buildmulti_pSeriesLP() [all …]
|
/arch/powerpc/mm/ |
D | mmu_context_iommu.c | 38 unsigned long npages, bool incr) in mm_iommu_adjust_locked_vm() argument 42 if (!npages) in mm_iommu_adjust_locked_vm() 48 locked = mm->locked_vm + npages; in mm_iommu_adjust_locked_vm() 53 mm->locked_vm += npages; in mm_iommu_adjust_locked_vm() 55 if (WARN_ON_ONCE(npages > mm->locked_vm)) in mm_iommu_adjust_locked_vm() 56 npages = mm->locked_vm; in mm_iommu_adjust_locked_vm() 57 mm->locked_vm -= npages; in mm_iommu_adjust_locked_vm() 63 npages << PAGE_SHIFT, in mm_iommu_adjust_locked_vm()
|
D | subpage-prot.c | 61 int npages) in hpte_flush_range() argument 80 for (; npages > 0; --npages) { in hpte_flush_range()
|
/arch/arm/kernel/ |
D | process.c | 375 unsigned int npages) in sigpage_addr() argument 385 last = TASK_SIZE - (npages << PAGE_SHIFT); in sigpage_addr() 424 unsigned long npages; in arch_setup_additional_pages() local 434 npages = 1; /* for sigpage */ in arch_setup_additional_pages() 435 npages += vdso_total_pages; in arch_setup_additional_pages() 439 hint = sigpage_addr(mm, npages); in arch_setup_additional_pages() 440 addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0); in arch_setup_additional_pages()
|
/arch/powerpc/include/asm/ |
D | iommu.h | 53 long index, long npages, 74 long index, long npages); 305 unsigned long ioba, unsigned long npages); 309 #define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \ argument 312 (ioba), (npages)) || (tce_value))
|
/arch/x86/platform/efi/ |
D | efi.c | 550 u64 addr, npages; in efi_set_executable() local 553 npages = md->num_pages; in efi_set_executable() 555 memrange_efi_to_native(&addr, &npages); in efi_set_executable() 558 set_memory_x(addr, npages); in efi_set_executable() 560 set_memory_nx(addr, npages); in efi_set_executable() 579 u64 npages; in efi_memory_uc() local 581 npages = round_up(size, page_shift) / page_shift; in efi_memory_uc() 582 memrange_efi_to_native(&addr, &npages); in efi_memory_uc() 583 set_memory_uc(addr, npages); in efi_memory_uc()
|