/drivers/infiniband/hw/mlx5/ |
D | mem.c | 58 unsigned long page_shift = ilog2(umem->page_size); in mlx5_ib_cont_pages() local 60 addr = addr >> page_shift; in mlx5_ib_cont_pages() 67 len = sg_dma_len(sg) >> page_shift; in mlx5_ib_cont_pages() 68 pfn = sg_dma_address(sg) >> page_shift; in mlx5_ib_cont_pages() 107 *shift = page_shift + m; in mlx5_ib_cont_pages() 112 int page_shift, __be64 *pas, int umr) in mlx5_ib_populate_pas() argument 115 int shift = page_shift - umem_page_shift; in mlx5_ib_populate_pas() 145 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) in mlx5_ib_get_buf_offset() argument 153 page_size = (u64)1 << page_shift; in mlx5_ib_get_buf_offset()
|
D | srq.c | 85 int page_shift; in create_srq_user() local 114 &page_shift, &ncont, NULL); in create_srq_user() 115 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, in create_srq_user() 129 mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0); in create_srq_user() 138 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; in create_srq_user() 159 int page_shift; in create_srq_kernel() local 175 page_shift = srq->buf.page_shift; in create_srq_kernel() 187 npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT)); in create_srq_kernel() 189 buf_size, page_shift, srq->buf.npages, npages); in create_srq_kernel() 207 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; in create_srq_kernel()
|
D | mr.c | 673 int page_shift, u64 virt_addr, u64 len, in prep_umr_reg_wqe() argument 693 wr->wr.fast_reg.page_shift = page_shift; in prep_umr_reg_wqe() 733 int page_shift, int order, int access_flags) in reg_umr() argument 767 mlx5_ib_populate_pas(dev, umem, page_shift, in reg_umr() 779 …prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_fl… in reg_umr() 817 int npages, int page_shift, in reg_create() argument 836 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, 0); in reg_create() 844 in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift)); in reg_create() 845 in->seg.log2_page_size = page_shift; in reg_create() 848 1 << page_shift)); in reg_create() [all …]
|
D | cq.c | 607 int page_shift; in create_cq_user() local 642 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, in create_cq_user() 645 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); in create_cq_user() 653 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); in create_cq_user() 654 (*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; in create_cq_user() 718 (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; in create_cq_kernel() 945 int *page_shift, int *cqe_size) in resize_user() argument 967 mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift, in resize_user() 1072 int page_shift; in mlx5_ib_resize_cq() local 1094 err = resize_user(dev, cq, entries, udata, &npas, &page_shift, in mlx5_ib_resize_cq() [all …]
|
D | mlx5_ib.h | 315 u8 page_shift; member 528 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); 537 int page_shift, __be64 *pas, int umr);
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | mr.c | 203 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, in mlx4_mtt_init() argument 210 mtt->page_shift = MLX4_ICM_PAGE_SHIFT; in mlx4_mtt_init() 213 mtt->page_shift = page_shift; in mlx4_mtt_init() 427 int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc_reserved() argument 436 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_alloc_reserved() 536 int npages, int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc() argument 546 access, npages, page_shift, mr); in mlx4_mr_alloc() 599 int page_shift, struct mlx4_mpt_entry *mpt_entry) in mlx4_mr_rereg_mem_write() argument 603 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_rereg_mem_write() 609 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); in mlx4_mr_rereg_mem_write() [all …]
|
D | alloc.c | 181 buf->page_shift = get_order(size) + PAGE_SHIFT; in mlx4_buf_alloc() 189 while (t & ((1 << buf->page_shift) - 1)) { in mlx4_buf_alloc() 190 --buf->page_shift; in mlx4_buf_alloc() 201 buf->page_shift = PAGE_SHIFT; in mlx4_buf_alloc() 390 err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, in mlx4_alloc_hwq_res()
|
D | cq.c | 156 cq_context->log_page_size = mtt->page_shift - 12; in mlx4_cq_resize() 280 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; in mlx4_cq_alloc()
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | alloc.c | 59 buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; in mlx5_buf_alloc() 67 while (t & ((1 << buf->page_shift) - 1)) { in mlx5_buf_alloc() 68 --buf->page_shift; in mlx5_buf_alloc() 77 buf->page_shift = PAGE_SHIFT; in mlx5_buf_alloc() 231 addr = buf->direct.map + (i << buf->page_shift); in mlx5_fill_page_array()
|
/drivers/infiniband/hw/qib/ |
D | qib_keys.c | 202 if (mr->page_shift) { in qib_lkey_ok() 210 entries_spanned_by_off = off >> mr->page_shift; in qib_lkey_ok() 211 off -= (entries_spanned_by_off << mr->page_shift); in qib_lkey_ok() 301 if (mr->page_shift) { in qib_rkey_ok() 309 entries_spanned_by_off = off >> mr->page_shift; in qib_rkey_ok() 310 off -= (entries_spanned_by_off << mr->page_shift); in qib_rkey_ok() 366 ps = 1UL << wr->wr.fast_reg.page_shift; in qib_fast_reg_mr()
|
D | qib_mr.c | 266 mr->mr.page_shift = ilog2(umem->page_size); in qib_reg_user_mr() 412 fmr->mr.page_shift = fmr_attr->page_shift; in qib_alloc_fmr() 458 ps = 1 << fmr->mr.page_shift; in qib_map_phys_fmr()
|
/drivers/gpu/drm/nouveau/ |
D | nouveau_ttm.c | 86 size_nc = 1 << nvbo->page_shift; in nouveau_vram_manager_new() 96 node->page_shift = nvbo->page_shift; in nouveau_vram_manager_new() 172 node->page_shift = 12; in nouveau_gart_manager_new() 251 node->page_shift = 12; in nv04_gart_manager_new() 253 ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift, in nv04_gart_manager_new()
|
D | nouveau_bo.c | 174 *size = roundup(*size, (1 << nvbo->page_shift)); in nouveau_bo_fixup_align() 175 *align = max((1 << nvbo->page_shift), *align); in nouveau_bo_fixup_align() 217 nvbo->page_shift = 12; in nouveau_bo_new() 220 nvbo->page_shift = drm->client.vm->vmm->lpg_shift; in nouveau_bo_new() 935 ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift, in nouveau_bo_move_prep() 940 ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift, in nouveau_bo_move_prep() 1134 nvbo->page_shift != vma->vm->vmm->lpg_shift)) { in nouveau_bo_move_ntfy() 1505 ret = nouveau_vm_get(vm, size, nvbo->page_shift, in nouveau_bo_vma_add() 1512 nvbo->page_shift != vma->vm->vmm->lpg_shift)) in nouveau_bo_vma_add()
|
D | nouveau_bo.h | 26 unsigned page_shift; member
|
/drivers/mtd/nand/ |
D | nand_bbt.c | 188 from = ((loff_t)page) << this->page_shift; in read_bbt() 398 scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift, in read_abs_bbts() 407 scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift, in read_abs_bbts() 534 int blocktopage = this->bbt_erase_shift - this->page_shift; in search_bbt() 690 (this->bbt_erase_shift - this->page_shift); in write_bbt() 718 to = ((loff_t)page) << this->page_shift; in write_bbt() 734 ops.ooblen = (len >> this->page_shift) * mtd->oobsize; in write_bbt() 741 pageoffs = page - (int)(to >> this->page_shift); in write_bbt() 742 offs = pageoffs << this->page_shift; in write_bbt() 769 (len >> this->page_shift)* mtd->oobsize); in write_bbt() [all …]
|
D | nand_base.c | 331 page = (int)(ofs >> chip->page_shift) & chip->pagemask; in nand_block_bad() 362 page = (int)(ofs >> chip->page_shift) & chip->pagemask; in nand_block_bad() 934 page = ofs >> chip->page_shift; in __nand_unlock() 938 page = (ofs + len) >> chip->page_shift; in __nand_unlock() 1063 page = ofs >> chip->page_shift; in nand_lock() 1547 realpage = (int)(from >> chip->page_shift); in nand_do_read_ops() 1901 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) - in nand_do_read_oob() 1902 (from >> chip->page_shift)) * len)) { in nand_do_read_oob() 1912 realpage = (int)(from >> chip->page_shift); in nand_do_read_oob() 2407 realpage = (int)(to >> chip->page_shift); in nand_do_write_ops() [all …]
|
/drivers/infiniband/hw/amso1100/ |
D | c2_provider.c | 339 int err, i, j, k, page_shift, pbl_depth; in c2_reg_phys_mr() local 344 page_shift = PAGE_SHIFT; in c2_reg_phys_mr() 350 page_shift += 3; in c2_reg_phys_mr() 367 (1 << page_shift)) >> page_shift; in c2_reg_phys_mr() 382 (1 << page_shift)) >> page_shift; in c2_reg_phys_mr() 385 (k << page_shift)); in c2_reg_phys_mr() 398 __func__, page_shift, pbl_depth, total_len, in c2_reg_phys_mr() 403 (1 << page_shift), pbl_depth, in c2_reg_phys_mr()
|
/drivers/infiniband/hw/ipath/ |
D | ipath_mr.c | 45 u8 page_shift; member 322 fmr->page_shift = fmr_attr->page_shift; in ipath_alloc_fmr() 365 ps = 1 << fmr->page_shift; in ipath_map_phys_fmr() 369 ps = 1 << fmr->page_shift; in ipath_map_phys_fmr()
|
/drivers/misc/mic/host/ |
D | mic_x100.c | 525 dma_addr >> mdev->smpt->info.page_shift); in mic_x100_smpt_set() 542 info->page_shift = 34; in mic_x100_smpt_hw_init() 543 info->page_size = (1ULL << info->page_shift); in mic_x100_smpt_hw_init()
|
D | mic_smpt.h | 52 u8 page_shift; member
|
/drivers/infiniband/hw/mthca/ |
D | mthca_mr.c | 578 if (mr->attr.page_shift < 12 || mr->attr.page_shift >= 32) in mthca_fmr_alloc() 636 mpt_entry->page_size = cpu_to_be32(mr->attr.page_shift - 12); in mthca_fmr_alloc() 697 page_mask = (1 << fmr->attr.page_shift) - 1; in mthca_check_fmr() 745 mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift)); in mthca_tavor_map_phys_fmr() 795 fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift)); in mthca_arbel_map_phys_fmr()
|
/drivers/infiniband/hw/ehca/ |
D | ehca_mrmw.c | 323 int ret, page_shift; in ehca_reg_user_mr() local 378 page_shift = PAGE_SHIFT; in ehca_reg_user_mr() 381 page_shift = (fls64(length - 1) + 3) & ~3; in ehca_reg_user_mr() 382 page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K), in ehca_reg_user_mr() 385 hwpage_size = 1UL << page_shift; in ehca_reg_user_mr() 803 fmr_attr->page_shift); in ehca_alloc_fmr() 808 hw_pgsize = 1 << fmr_attr->page_shift; in ehca_alloc_fmr() 811 fmr_attr->page_shift); in ehca_alloc_fmr() 831 fmr_attr->max_pages * (1 << fmr_attr->page_shift), in ehca_alloc_fmr() 841 e_fmr->fmr_page_size = 1 << fmr_attr->page_shift; in ehca_alloc_fmr()
|
/drivers/gpu/drm/nouveau/core/subdev/vm/ |
D | base.c | 289 nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, in nouveau_vm_get() argument 293 u32 align = (1 << page_shift) >> 12; in nouveau_vm_get() 299 ret = nouveau_mm_head(&vm->mm, 0, page_shift, msize, msize, align, in nouveau_vm_get()
|
/drivers/net/ethernet/intel/e1000e/ |
D | phy.c | 2373 u32 page_shift, page_select; in e1000e_write_phy_reg_bm() local 2380 page_shift = IGP_PAGE_SHIFT; in e1000e_write_phy_reg_bm() 2383 page_shift = 0; in e1000e_write_phy_reg_bm() 2389 (page << page_shift)); in e1000e_write_phy_reg_bm() 2431 u32 page_shift, page_select; in e1000e_read_phy_reg_bm() local 2438 page_shift = IGP_PAGE_SHIFT; in e1000e_read_phy_reg_bm() 2441 page_shift = 0; in e1000e_read_phy_reg_bm() 2447 (page << page_shift)); in e1000e_read_phy_reg_bm()
|
/drivers/mtd/onenand/ |
D | onenand_base.c | 410 this->page_shift; in onenand_command() 412 page = (int) (addr >> this->page_shift); in onenand_command() 855 page = (int) (addr >> (this->page_shift + 1)) & this->page_mask; in onenand_get_2x_blockpage() 878 blockpage = (int) (addr >> this->page_shift); in onenand_check_bufferram() 921 blockpage = (int) (addr >> this->page_shift); in onenand_update_bufferram() 953 loff_t buf_addr = this->bufferram[i].blockpage << this->page_shift; in onenand_invalidate_bufferram() 1382 column + len > ((mtd->size >> this->page_shift) - in onenand_read_oob_nolock() 1383 (from >> this->page_shift)) * oobsize)) { in onenand_read_oob_nolock() 2093 column + len > ((mtd->size >> this->page_shift) - in onenand_write_oob_nolock() 2094 (to >> this->page_shift)) * oobsize)) { in onenand_write_oob_nolock() [all …]
|