/kernel/linux/linux-5.10/drivers/mtd/tests/ |
D | pagetest.c | 34 static int pgsize; variable 66 for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) { in verify_eraseblock() 78 if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) { in verify_eraseblock() 85 if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) { in verify_eraseblock() 99 memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize); in verify_eraseblock() 100 prandom_bytes_state(&rnd_state, boundary + pgsize, pgsize); in verify_eraseblock() 118 pp1 = kcalloc(pgsize, 4, GFP_KERNEL); in crosstest() 121 pp2 = pp1 + pgsize; in crosstest() 122 pp3 = pp2 + pgsize; in crosstest() 123 pp4 = pp3 + pgsize; in crosstest() [all …]
|
D | torturetest.c | 70 static int pgsize; variable 97 addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize; in check_eraseblock() 98 len = pgcnt * pgsize; in check_eraseblock() 151 addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize; in write_pattern() 152 len = pgcnt * pgsize; in write_pattern() 203 pgsize = 512; in tort_init() 205 pgsize = mtd->writesize; in tort_init() 207 if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) { in tort_init() 237 for (i = 0; i < mtd->erasesize / pgsize; i++) { in tort_init() 239 memset(patt_5A5 + i * pgsize, 0x55, pgsize); in tort_init() [all …]
|
D | speedtest.c | 37 static int pgsize; variable 77 err = mtdtest_write(mtd, addr, pgsize, buf); in write_eraseblock_by_page() 80 addr += pgsize; in write_eraseblock_by_page() 81 buf += pgsize; in write_eraseblock_by_page() 89 size_t sz = pgsize * 2; in write_eraseblock_by_2pages() 102 err = mtdtest_write(mtd, addr, pgsize, buf); in write_eraseblock_by_2pages() 121 err = mtdtest_read(mtd, addr, pgsize, buf); in read_eraseblock_by_page() 124 addr += pgsize; in read_eraseblock_by_page() 125 buf += pgsize; in read_eraseblock_by_page() 133 size_t sz = pgsize * 2; in read_eraseblock_by_2pages() [all …]
|
D | readtest.c | 31 static int pgsize; variable 43 memset(buf, 0 , pgsize); in read_eraseblock_by_page() 44 ret = mtdtest_read(mtd, addr, pgsize, buf); in read_eraseblock_by_page() 72 addr += pgsize; in read_eraseblock_by_page() 73 buf += pgsize; in read_eraseblock_by_page() 138 pgsize = 512; in mtd_readtest_init() 140 pgsize = mtd->writesize; in mtd_readtest_init() 145 pgcnt = mtd->erasesize / pgsize; in mtd_readtest_init() 151 pgsize, ebcnt, pgcnt, mtd->oobsize); in mtd_readtest_init()
|
D | stresstest.c | 38 static int pgsize; variable 104 len = ((len + pgsize - 1) / pgsize) * pgsize; in do_write() 163 pgsize = 512; in mtd_stresstest_init() 165 pgsize = mtd->writesize; in mtd_stresstest_init() 170 pgcnt = mtd->erasesize / pgsize; in mtd_stresstest_init() 176 pgsize, ebcnt, pgcnt, mtd->oobsize); in mtd_stresstest_init()
|
/kernel/linux/linux-5.10/arch/arm64/mm/ |
D | hugetlbpage.c | 104 pte_t *ptep, size_t *pgsize) in find_num_contig() argument 111 *pgsize = PAGE_SIZE; in find_num_contig() 116 *pgsize = PMD_SIZE; in find_num_contig() 122 static inline int num_contig_ptes(unsigned long size, size_t *pgsize) in num_contig_ptes() argument 126 *pgsize = size; in num_contig_ptes() 136 *pgsize = PMD_SIZE; in num_contig_ptes() 140 *pgsize = PAGE_SIZE; in num_contig_ptes() 159 unsigned long pgsize, in get_clear_flush() argument 166 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { in get_clear_flush() 200 unsigned long pgsize, in clear_flush() argument [all …]
|
/kernel/linux/linux-5.10/tools/testing/selftests/powerpc/tm/ |
D | tm-vmxcopy.c | 39 unsigned long pgsize = getpagesize(); in test_vmxcopy() local 42 int size = pgsize*16; in test_vmxcopy() 44 char buf[pgsize]; in test_vmxcopy() 54 memset(buf, 0, pgsize); in test_vmxcopy() 55 for (i = 0; i < size; i += pgsize) in test_vmxcopy() 56 assert(write(fd, buf, pgsize) == pgsize); in test_vmxcopy()
|
/kernel/linux/linux-5.10/tools/testing/selftests/powerpc/mm/ |
D | pkey_exec_prot.c | 27 static unsigned long pgsize, numinsns; variable 62 if (mprotect(insns, pgsize, PROT_READ | PROT_WRITE)) { in segv_handler() 84 if (mprotect(insns, pgsize, PROT_EXEC)) { in segv_handler() 129 pgsize = getpagesize(); in test() 130 numinsns = pgsize / sizeof(unsigned int); in test() 131 insns = (unsigned int *) mmap(NULL, pgsize, PROT_READ | PROT_WRITE, in test() 179 FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); in test() 196 FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); in test() 216 FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); in test() 233 FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); in test() [all …]
|
D | pkey_siginfo.c | 38 size_t pgsize; in segv_handler() local 67 pgsize = getpagesize(); in segv_handler() 68 pgstart = (void *) ((unsigned long) fault_addr & ~(pgsize - 1)); in segv_handler() 83 mprotect(pgstart, pgsize, PROT_EXEC)) in segv_handler()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/etnaviv/ |
D | etnaviv_mmu.c | 20 size_t pgsize = SZ_4K; in etnaviv_context_unmap() local 22 if (!IS_ALIGNED(iova | size, pgsize)) { in etnaviv_context_unmap() 24 iova, size, pgsize); in etnaviv_context_unmap() 30 pgsize); in etnaviv_context_unmap() 44 size_t pgsize = SZ_4K; in etnaviv_context_map() local 48 if (!IS_ALIGNED(iova | paddr | size, pgsize)) { in etnaviv_context_map() 50 iova, &paddr, size, pgsize); in etnaviv_context_map() 55 ret = context->global->ops->map(context, iova, paddr, pgsize, in etnaviv_context_map() 60 iova += pgsize; in etnaviv_context_map() 61 paddr += pgsize; in etnaviv_context_map() [all …]
|
/kernel/linux/linux-5.10/drivers/vfio/ |
D | vfio_iommu_type1.c | 208 static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, size_t pgsize) in vfio_dma_bitmap_alloc() argument 210 uint64_t npages = dma->size / pgsize; in vfio_dma_bitmap_alloc() 234 static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize) in vfio_dma_populate_bitmap() argument 237 unsigned long pgshift = __ffs(pgsize); in vfio_dma_populate_bitmap() 258 static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize) in vfio_dma_bitmap_alloc_all() argument 266 ret = vfio_dma_bitmap_alloc(dma, pgsize); in vfio_dma_bitmap_alloc_all() 278 vfio_dma_populate_bitmap(dma, pgsize); in vfio_dma_bitmap_alloc_all() 1064 size_t pgsize) in update_user_bitmap() argument 1066 unsigned long pgshift = __ffs(pgsize); in update_user_bitmap() 1100 dma_addr_t iova, size_t size, size_t pgsize) in vfio_iova_dirty_bitmap() argument [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/panfrost/ |
D | panfrost_mmu.c | 257 size_t pgsize = get_pgsize(iova | paddr, len); in mmu_map_sg() local 259 ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL); in mmu_map_sg() 260 iova += pgsize; in mmu_map_sg() 261 paddr += pgsize; in mmu_map_sg() 262 len -= pgsize; in mmu_map_sg() 314 size_t pgsize = get_pgsize(iova, len - unmapped_len); in panfrost_mmu_unmap() local 317 unmapped_page = ops->unmap(ops, iova, pgsize, NULL); in panfrost_mmu_unmap() 318 WARN_ON(unmapped_page != pgsize); in panfrost_mmu_unmap() 320 iova += pgsize; in panfrost_mmu_unmap() 321 unmapped_len += pgsize; in panfrost_mmu_unmap()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/bnxt_re/ |
D | qplib_res.c | 97 rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) { in bnxt_qplib_fill_user_dma_pages() 117 pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize); in __alloc_pbl() 132 pbl->pg_size = sginfo->pgsize; in __alloc_pbl() 196 pg_size = hwq_attr->sginfo->pgsize; in bnxt_qplib_alloc_init_hwq() 219 hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize); in bnxt_qplib_alloc_init_hwq() 251 sginfo.pgsize = npde * pg_size; in bnxt_qplib_alloc_init_hwq() 257 sginfo.pgsize = PAGE_SIZE; in bnxt_qplib_alloc_init_hwq() 316 sginfo.pgsize = PAGE_SIZE; in bnxt_qplib_alloc_init_hwq() 400 sginfo.pgsize = PAGE_SIZE; in bnxt_qplib_alloc_tqm_rings() 517 sginfo.pgsize = PAGE_SIZE; in bnxt_qplib_alloc_ctx()
|
D | qplib_res.h | 133 u32 pgsize; member
|
/kernel/linux/linux-5.10/arch/powerpc/kvm/ |
D | book3s_64_mmu.c | 206 int pgsize; in kvmppc_mmu_book3s_64_xlate() local 242 pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K; in kvmppc_mmu_book3s_64_xlate() 271 pgsize = decode_pagesize(slbe, pte1); in kvmppc_mmu_book3s_64_xlate() 272 if (pgsize < 0) in kvmppc_mmu_book3s_64_xlate() 297 eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1; in kvmppc_mmu_book3s_64_xlate() 299 gpte->page_size = pgsize; in kvmppc_mmu_book3s_64_xlate()
|
D | e500.h | 163 unsigned int pgsize = get_tlb_size(tlbe); in get_tlb_bytes() local 164 return 1ULL << 10 << pgsize; in get_tlb_bytes()
|
D | book3s_64_vio_hv.c | 269 const unsigned long pgsize = 1ULL << tbl->it_page_shift; in kvmppc_rm_tce_iommu_mapped_dec() local 276 mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize); in kvmppc_rm_tce_iommu_mapped_dec()
|
D | book3s_64_vio.c | 443 const unsigned long pgsize = 1ULL << tbl->it_page_shift; in kvmppc_tce_iommu_mapped_dec() local 449 mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize); in kvmppc_tce_iommu_mapped_dec()
|
/kernel/linux/linux-5.10/drivers/s390/char/ |
D | sclp_diag.h | 51 u8 pgsize; member
|
D | sclp_ftp.c | 108 sccb->evbuf.mdd.ftp.pgsize = 0; in sclp_ftp_et7()
|
/kernel/linux/linux-5.10/drivers/iommu/ |
D | iommu.c | 2344 size_t pgsize; in iommu_pgsize() local 2357 pgsize = (1UL << (pgsize_idx + 1)) - 1; in iommu_pgsize() 2360 pgsize &= domain->pgsize_bitmap; in iommu_pgsize() 2363 BUG_ON(!pgsize); in iommu_pgsize() 2366 pgsize_idx = __fls(pgsize); in iommu_pgsize() 2367 pgsize = 1UL << pgsize_idx; in iommu_pgsize() 2369 return pgsize; in iommu_pgsize() 2406 size_t pgsize = iommu_pgsize(domain, iova | paddr, size); in __iommu_map() local 2409 iova, &paddr, pgsize); in __iommu_map() 2410 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); in __iommu_map() [all …]
|
D | io-pgtable-arm.c | 1216 static const unsigned long pgsize[] __initconst = { in arm_lpae_do_selftests() local 1233 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { in arm_lpae_do_selftests() 1235 cfg.pgsize_bitmap = pgsize[i]; in arm_lpae_do_selftests() 1238 pgsize[i], ias[j]); in arm_lpae_do_selftests()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/arm/ |
D | malidp_planes.c | 333 u32 pgsize) in malidp_check_pages_threshold() argument 357 if (sgl->length < pgsize) { in malidp_check_pages_threshold() 469 u8 readahead, u8 n_planes, u32 pgsize) in malidp_calc_mmu_control_value() argument 481 if (pgsize == SZ_64K || pgsize == SZ_2M) { in malidp_calc_mmu_control_value()
|
/kernel/linux/linux-5.10/include/linux/ |
D | iommu.h | 182 size_t pgsize; member 546 if (gather->pgsize != size || in iommu_iotlb_gather_add_page() 548 if (gather->pgsize) in iommu_iotlb_gather_add_page() 550 gather->pgsize = size; in iommu_iotlb_gather_add_page()
|
/kernel/linux/linux-5.10/include/uapi/linux/ |
D | vfio.h | 1090 __u64 pgsize; /* page size for bitmap in bytes */ member
|