/drivers/mtd/tests/ |
D | pagetest.c | 34 static int pgsize; variable 66 for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) { in verify_eraseblock() 78 if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) { in verify_eraseblock() 85 if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) { in verify_eraseblock() 99 memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize); in verify_eraseblock() 100 prandom_bytes_state(&rnd_state, boundary + pgsize, pgsize); in verify_eraseblock() 118 pp1 = kcalloc(pgsize, 4, GFP_KERNEL); in crosstest() 121 pp2 = pp1 + pgsize; in crosstest() 122 pp3 = pp2 + pgsize; in crosstest() 123 pp4 = pp3 + pgsize; in crosstest() [all …]
|
D | torturetest.c | 70 static int pgsize; variable 97 addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize; in check_eraseblock() 98 len = pgcnt * pgsize; in check_eraseblock() 151 addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize; in write_pattern() 152 len = pgcnt * pgsize; in write_pattern() 203 pgsize = 512; in tort_init() 205 pgsize = mtd->writesize; in tort_init() 207 if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) { in tort_init() 237 for (i = 0; i < mtd->erasesize / pgsize; i++) { in tort_init() 239 memset(patt_5A5 + i * pgsize, 0x55, pgsize); in tort_init() [all …]
|
D | speedtest.c | 37 static int pgsize; variable 77 err = mtdtest_write(mtd, addr, pgsize, buf); in write_eraseblock_by_page() 80 addr += pgsize; in write_eraseblock_by_page() 81 buf += pgsize; in write_eraseblock_by_page() 89 size_t sz = pgsize * 2; in write_eraseblock_by_2pages() 102 err = mtdtest_write(mtd, addr, pgsize, buf); in write_eraseblock_by_2pages() 121 err = mtdtest_read(mtd, addr, pgsize, buf); in read_eraseblock_by_page() 124 addr += pgsize; in read_eraseblock_by_page() 125 buf += pgsize; in read_eraseblock_by_page() 133 size_t sz = pgsize * 2; in read_eraseblock_by_2pages() [all …]
|
D | readtest.c | 31 static int pgsize; variable 43 memset(buf, 0 , pgsize); in read_eraseblock_by_page() 44 ret = mtdtest_read(mtd, addr, pgsize, buf); in read_eraseblock_by_page() 72 addr += pgsize; in read_eraseblock_by_page() 73 buf += pgsize; in read_eraseblock_by_page() 138 pgsize = 512; in mtd_readtest_init() 140 pgsize = mtd->writesize; in mtd_readtest_init() 145 pgcnt = mtd->erasesize / pgsize; in mtd_readtest_init() 151 pgsize, ebcnt, pgcnt, mtd->oobsize); in mtd_readtest_init()
|
D | stresstest.c | 38 static int pgsize; variable 104 len = ((len + pgsize - 1) / pgsize) * pgsize; in do_write() 163 pgsize = 512; in mtd_stresstest_init() 165 pgsize = mtd->writesize; in mtd_stresstest_init() 170 pgcnt = mtd->erasesize / pgsize; in mtd_stresstest_init() 176 pgsize, ebcnt, pgcnt, mtd->oobsize); in mtd_stresstest_init()
|
/drivers/gpu/drm/etnaviv/ |
D | etnaviv_mmu.c | 20 size_t pgsize = SZ_4K; in etnaviv_context_unmap() local 22 if (!IS_ALIGNED(iova | size, pgsize)) { in etnaviv_context_unmap() 24 iova, size, pgsize); in etnaviv_context_unmap() 30 pgsize); in etnaviv_context_unmap() 44 size_t pgsize = SZ_4K; in etnaviv_context_map() local 48 if (!IS_ALIGNED(iova | paddr | size, pgsize)) { in etnaviv_context_map() 50 iova, &paddr, size, pgsize); in etnaviv_context_map() 55 ret = context->global->ops->map(context, iova, paddr, pgsize, in etnaviv_context_map() 60 iova += pgsize; in etnaviv_context_map() 61 paddr += pgsize; in etnaviv_context_map() [all …]
|
/drivers/gpu/drm/panfrost/ |
D | panfrost_mmu.c | 257 size_t pgsize = get_pgsize(iova | paddr, len); in mmu_map_sg() local 259 ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL); in mmu_map_sg() 260 iova += pgsize; in mmu_map_sg() 261 paddr += pgsize; in mmu_map_sg() 262 len -= pgsize; in mmu_map_sg() 314 size_t pgsize = get_pgsize(iova, len - unmapped_len); in panfrost_mmu_unmap() local 317 unmapped_page = ops->unmap(ops, iova, pgsize, NULL); in panfrost_mmu_unmap() 318 WARN_ON(unmapped_page != pgsize); in panfrost_mmu_unmap() 320 iova += pgsize; in panfrost_mmu_unmap() 321 unmapped_len += pgsize; in panfrost_mmu_unmap()
|
/drivers/vfio/ |
D | vfio_iommu_type1.c | 209 static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, size_t pgsize) in vfio_dma_bitmap_alloc() argument 211 uint64_t npages = dma->size / pgsize; in vfio_dma_bitmap_alloc() 235 static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize) in vfio_dma_populate_bitmap() argument 238 unsigned long pgshift = __ffs(pgsize); in vfio_dma_populate_bitmap() 259 static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize) in vfio_dma_bitmap_alloc_all() argument 267 ret = vfio_dma_bitmap_alloc(dma, pgsize); in vfio_dma_bitmap_alloc_all() 279 vfio_dma_populate_bitmap(dma, pgsize); in vfio_dma_bitmap_alloc_all() 1066 size_t pgsize) in update_user_bitmap() argument 1068 unsigned long pgshift = __ffs(pgsize); in update_user_bitmap() 1102 dma_addr_t iova, size_t size, size_t pgsize) in vfio_iova_dirty_bitmap() argument [all …]
|
/drivers/iommu/ |
D | io-pgtable-arm.c | 480 phys_addr_t paddr, size_t pgsize, size_t pgcount, in arm_lpae_map_pages() argument 494 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize)) in arm_lpae_map_pages() 503 ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl, in arm_lpae_map_pages() 688 size_t pgsize, size_t pgcount, in arm_lpae_unmap_pages() argument 696 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount)) in arm_lpae_unmap_pages() 704 return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount, in arm_lpae_unmap_pages() 1293 static const unsigned long pgsize[] __initconst = { in arm_lpae_do_selftests() local 1310 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { in arm_lpae_do_selftests() 1312 cfg.pgsize_bitmap = pgsize[i]; in arm_lpae_do_selftests() 1315 pgsize[i], ias[j]); in arm_lpae_do_selftests()
|
D | io-pgtable-arm-v7s.c | 527 phys_addr_t paddr, size_t pgsize, size_t pgcount, in arm_v7s_map_pages() argument 542 ret = __arm_v7s_map(data, iova, paddr, pgsize, prot, 1, data->pgd, in arm_v7s_map_pages() 547 iova += pgsize; in arm_v7s_map_pages() 548 paddr += pgsize; in arm_v7s_map_pages() 550 *mapped += pgsize; in arm_v7s_map_pages() 735 size_t pgsize, size_t pgcount, in arm_v7s_unmap_pages() argument 745 ret = __arm_v7s_unmap(data, gather, iova, pgsize, 1, data->pgd); in arm_v7s_unmap_pages() 749 unmapped += pgsize; in arm_v7s_unmap_pages() 750 iova += pgsize; in arm_v7s_unmap_pages()
|
D | iommu.c | 2365 size_t offset, pgsize, pgsize_next; in iommu_pgsize() local 2380 pgsize = BIT(pgsize_idx); in iommu_pgsize() 2382 return pgsize; in iommu_pgsize() 2412 return pgsize; in iommu_pgsize() 2420 size_t pgsize, count; in __iommu_map_pages() local 2423 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); in __iommu_map_pages() 2426 iova, &paddr, pgsize, count); in __iommu_map_pages() 2429 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, in __iommu_map_pages() 2432 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); in __iommu_map_pages() 2433 *mapped = ret ? 0 : pgsize; in __iommu_map_pages() [all …]
|
D | mtk_iommu.c | 554 mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize, in mtk_iommu_iotlb_sync()
|
/drivers/infiniband/hw/bnxt_re/ |
D | qplib_res.c | 97 rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) { in bnxt_qplib_fill_user_dma_pages() 117 pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize); in __alloc_pbl() 132 pbl->pg_size = sginfo->pgsize; in __alloc_pbl() 196 pg_size = hwq_attr->sginfo->pgsize; in bnxt_qplib_alloc_init_hwq() 219 hwq_attr->sginfo->pgsize); in bnxt_qplib_alloc_init_hwq() 243 sginfo.pgsize = npde * pg_size; in bnxt_qplib_alloc_init_hwq() 249 sginfo.pgsize = PAGE_SIZE; in bnxt_qplib_alloc_init_hwq() 308 sginfo.pgsize = PAGE_SIZE; in bnxt_qplib_alloc_init_hwq() 392 sginfo.pgsize = PAGE_SIZE; in bnxt_qplib_alloc_tqm_rings() 509 sginfo.pgsize = PAGE_SIZE; in bnxt_qplib_alloc_ctx()
|
D | qplib_res.h | 133 u32 pgsize; member
|
D | qplib_rcfw.c | 585 sginfo.pgsize = PAGE_SIZE; in bnxt_qplib_alloc_rcfw_channel() 604 sginfo.pgsize = bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth); in bnxt_qplib_alloc_rcfw_channel()
|
D | qplib_sp.c | 689 hwq_attr.sginfo->pgsize = buf_pg_size; in bnxt_qplib_reg_mr() 754 sginfo.pgsize = PAGE_SIZE; in bnxt_qplib_alloc_fast_reg_page_list()
|
D | ib_verbs.c | 942 qplib_qp->sq.sg_info.pgsize = PAGE_SIZE; in bnxt_re_init_user_qp() 955 qplib_qp->rq.sg_info.pgsize = PAGE_SIZE; in bnxt_re_init_user_qp() 1047 qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE; in bnxt_re_create_shadow_qp() 1058 qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE; in bnxt_re_create_shadow_qp() 1120 rq->sg_info.pgsize = PAGE_SIZE; in bnxt_re_init_rq_attr() 1185 qplqp->sq.sg_info.pgsize = PAGE_SIZE; in bnxt_re_init_sq_attr() 1609 qplib_srq->sg_info.pgsize = PAGE_SIZE; in bnxt_re_init_user_srq() 2842 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE; in bnxt_re_create_cq()
|
D | qplib_fp.c | 575 sginfo.pgsize = PAGE_SIZE; in bnxt_qplib_alloc_nq() 1075 sginfo.pgsize = req_size; in bnxt_qplib_create_qp() 1097 sginfo.pgsize = req_size; in bnxt_qplib_create_qp()
|
/drivers/s390/char/ |
D | sclp_diag.h | 51 u8 pgsize; member
|
D | sclp_ftp.c | 108 sccb->evbuf.mdd.ftp.pgsize = 0; in sclp_ftp_et7()
|
/drivers/gpu/drm/arm/ |
D | malidp_planes.c | 333 u32 pgsize) in malidp_check_pages_threshold() argument 357 if (sgl->length < pgsize) { in malidp_check_pages_threshold() 469 u8 readahead, u8 n_planes, u32 pgsize) in malidp_calc_mmu_control_value() argument 481 if (pgsize == SZ_64K || pgsize == SZ_2M) { in malidp_calc_mmu_control_value()
|
/drivers/iommu/arm/arm-smmu/ |
D | arm-smmu.c | 1203 phys_addr_t paddr, size_t pgsize, size_t pgcount, in arm_smmu_map_pages() argument 1214 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped); in arm_smmu_map_pages() 1221 size_t pgsize, size_t pgcount, in arm_smmu_unmap_pages() argument 1232 ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather); in arm_smmu_unmap_pages()
|
/drivers/iommu/arm/arm-smmu-v3/ |
D | arm-smmu-v3.c | 2274 gather->pgsize, true, smmu_domain); in arm_smmu_iotlb_sync()
|