/drivers/infiniband/hw/mthca/ |
D | mthca_allocator.c | 122 if (array->page_list[p].page) in mthca_array_get() 123 return array->page_list[p].page[index & MTHCA_ARRAY_MASK]; in mthca_array_get() 133 if (!array->page_list[p].page) in mthca_array_set() 134 array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC); in mthca_array_set() 136 if (!array->page_list[p].page) in mthca_array_set() 139 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value; in mthca_array_set() 140 ++array->page_list[p].used; in mthca_array_set() 149 if (--array->page_list[p].used == 0) { in mthca_array_clear() 150 free_page((unsigned long) array->page_list[p].page); in mthca_array_clear() 151 array->page_list[p].page = NULL; in mthca_array_clear() [all …]
|
D | mthca_eq.c | 231 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; in get_eqe() 482 eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list), in mthca_create_eq() 484 if (!eq->page_list) in mthca_create_eq() 488 eq->page_list[i].buf = NULL; in mthca_create_eq() 500 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, in mthca_create_eq() 502 if (!eq->page_list[i].buf) in mthca_create_eq() 506 dma_unmap_addr_set(&eq->page_list[i], mapping, t); in mthca_create_eq() 508 clear_page(eq->page_list[i].buf); in mthca_create_eq() 572 if (eq->page_list[i].buf) in mthca_create_eq() 574 eq->page_list[i].buf, in mthca_create_eq() [all …]
|
D | mthca_provider.h | 54 struct mthca_buf_list *page_list; member 96 struct mthca_buf_list *page_list; member
|
/drivers/xen/ |
D | unpopulated-alloc.c | 15 static struct page *page_list; variable 89 pg->zone_device_data = page_list; in fill_list() 90 page_list = pg; in fill_list() 124 struct page *pg = page_list; in xen_alloc_unpopulated_pages() 127 page_list = pg->zone_device_data; in xen_alloc_unpopulated_pages() 138 pages[j]->zone_device_data = page_list; in xen_alloc_unpopulated_pages() 139 page_list = pages[j]; in xen_alloc_unpopulated_pages() 165 pages[i]->zone_device_data = page_list; in xen_free_unpopulated_pages() 166 page_list = pages[i]; in xen_free_unpopulated_pages() 195 pg->zone_device_data = page_list; in init() [all …]
|
/drivers/infiniband/hw/usnic/ |
D | usnic_uiom.c | 51 ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\ 52 ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \ 53 (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0])) 75 for_each_sg(chunk->page_list, sg, chunk->nents, i) { in usnic_uiom_put_pages() 89 struct page **page_list; in usnic_uiom_get_pages() local 119 page_list = (struct page **) __get_free_page(GFP_KERNEL); in usnic_uiom_get_pages() 120 if (!page_list) in usnic_uiom_get_pages() 148 page_list, NULL); in usnic_uiom_get_pages() 157 chunk = kmalloc(struct_size(chunk, page_list, in usnic_uiom_get_pages() 166 sg_init_table(chunk->page_list, chunk->nents); in usnic_uiom_get_pages() [all …]
|
D | usnic_uiom.h | 80 struct scatterlist page_list[]; member
|
/drivers/infiniband/core/ |
D | umem.c | 146 struct page **page_list; in ib_umem_get() local 185 page_list = (struct page **) __get_free_page(GFP_KERNEL); in ib_umem_get() 186 if (!page_list) { in ib_umem_get() 217 gup_flags | FOLL_LONGTERM, page_list); in ib_umem_get() 226 &umem->sgt_append, page_list, pinned, 0, in ib_umem_get() 230 unpin_user_pages_dirty_lock(page_list, pinned, 0); in ib_umem_get() 248 free_page((unsigned long) page_list); in ib_umem_get()
|
/drivers/comedi/ |
D | comedi_buf.c | 29 if (bm->page_list) { in comedi_buf_map_kref_release() 35 buf = &bm->page_list[0]; in comedi_buf_map_kref_release() 41 buf = &bm->page_list[i]; in comedi_buf_map_kref_release() 46 vfree(bm->page_list); in comedi_buf_map_kref_release() 93 bm->page_list = vzalloc(sizeof(*buf) * n_pages); in comedi_buf_map_alloc() 94 if (!bm->page_list) in comedi_buf_map_alloc() 112 buf = &bm->page_list[i]; in comedi_buf_map_alloc() 120 buf = &bm->page_list[i]; in comedi_buf_map_alloc() 170 buf = &bm->page_list[0]; in __comedi_buf_alloc() 178 buf = &bm->page_list[i]; in __comedi_buf_alloc() [all …]
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | mr.c | 693 int start_index, int npages, u64 *page_list) in mlx4_write_mtt_chunk() argument 710 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); in mlx4_write_mtt_chunk() 719 int start_index, int npages, u64 *page_list) in __mlx4_write_mtt() argument 734 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); in __mlx4_write_mtt() 739 page_list += chunk; in __mlx4_write_mtt() 747 int start_index, int npages, u64 *page_list) in mlx4_write_mtt() argument 770 inbox[i + 2] = cpu_to_be64(page_list[i] | in mlx4_write_mtt() 780 page_list += chunk; in mlx4_write_mtt() 786 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); in mlx4_write_mtt() 793 u64 *page_list; in mlx4_buf_write_mtt() local [all …]
|
D | alloc.c | 619 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), in mlx4_buf_alloc() 621 if (!buf->page_list) in mlx4_buf_alloc() 625 buf->page_list[i].buf = in mlx4_buf_alloc() 628 if (!buf->page_list[i].buf) in mlx4_buf_alloc() 631 buf->page_list[i].map = t; in mlx4_buf_alloc() 653 if (buf->page_list[i].buf) in mlx4_buf_free() 656 buf->page_list[i].buf, in mlx4_buf_free() 657 buf->page_list[i].map); in mlx4_buf_free() 658 kfree(buf->page_list); in mlx4_buf_free()
|
D | eq.c | 118 …return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % … in get_eqe() 989 eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list), in mlx4_create_eq() 991 if (!eq->page_list) in mlx4_create_eq() 995 eq->page_list[i].buf = NULL; in mlx4_create_eq() 1007 eq->page_list[i].buf = dma_alloc_coherent(&dev->persist-> in mlx4_create_eq() 1011 if (!eq->page_list[i].buf) in mlx4_create_eq() 1015 eq->page_list[i].map = t; in mlx4_create_eq() 1072 if (eq->page_list[i].buf) in mlx4_create_eq() 1074 eq->page_list[i].buf, in mlx4_create_eq() 1075 eq->page_list[i].map); in mlx4_create_eq() [all …]
|
/drivers/misc/genwqe/ |
D | card_utils.c | 243 struct page **page_list, int num_pages, in genwqe_map_pages() argument 254 daddr = pci_map_page(pci_dev, page_list[i], in genwqe_map_pages() 568 m->page_list = kcalloc(m->nr_pages, in genwqe_user_vmap() 571 if (!m->page_list) { in genwqe_user_vmap() 578 m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages); in genwqe_user_vmap() 584 m->page_list); /* ptrs to pages */ in genwqe_user_vmap() 590 unpin_user_pages_dirty_lock(m->page_list, rc, m->write); in genwqe_user_vmap() 595 rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list); in genwqe_user_vmap() 602 unpin_user_pages_dirty_lock(m->page_list, m->nr_pages, m->write); in genwqe_user_vmap() 605 kfree(m->page_list); in genwqe_user_vmap() [all …]
|
/drivers/md/ |
D | dm-kcopyd.c | 58 struct page_list *pages; 93 static struct page_list zero_page_list; 214 static struct page_list *alloc_pl(gfp_t gfp) in alloc_pl() 216 struct page_list *pl; in alloc_pl() 231 static void free_pl(struct page_list *pl) in free_pl() 241 static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) in kcopyd_put_pages() 243 struct page_list *next; in kcopyd_put_pages() 261 unsigned int nr, struct page_list **pages) in kcopyd_get_pages() 263 struct page_list *pl; in kcopyd_get_pages() 292 static void drop_pages(struct page_list *pl) in drop_pages() [all …]
|
D | dm-io.c | 183 struct page_list *pl = (struct page_list *) dp->context_ptr; in list_get_page() 192 struct page_list *pl = (struct page_list *) dp->context_ptr; in list_next_page() 197 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) in list_dp_init()
|
D | dm-integrity.c | 173 struct page_list *journal; 174 struct page_list *journal_io; 175 struct page_list *journal_xor; 176 struct page_list *recalc_bitmap; 177 struct page_list *may_write_bitmap; 595 static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap, in block_bitmap_op() 700 static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *s… in block_bitmap_copy() 749 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl, in access_page_list() 900 struct page_list *source_pl, *target_pl; in xor_journal() 3533 static void dm_integrity_free_page_list(struct page_list *pl) in dm_integrity_free_page_list() [all …]
|
/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_validation.h | 80 struct list_head page_list; member 112 .page_list = LIST_HEAD_INIT((_name).page_list), \
|
D | vmwgfx_validation.c | 132 list_add_tail(&page->lru, &ctx->page_list); in vmw_validation_mem_alloc() 155 list_for_each_entry_safe(entry, next, &ctx->page_list, lru) { in vmw_validation_mem_free()
|
/drivers/vhost/ |
D | vdpa.c | 706 struct page **page_list; in vhost_vdpa_pa_map() local 716 page_list = (struct page **) __get_free_page(GFP_KERNEL); in vhost_vdpa_pa_map() 717 if (!page_list) in vhost_vdpa_pa_map() 744 gup_flags, page_list, NULL); in vhost_vdpa_pa_map() 749 unpin_user_pages(page_list, pinned); in vhost_vdpa_pa_map() 757 map_pfn = page_to_pfn(page_list[0]); in vhost_vdpa_pa_map() 760 unsigned long this_pfn = page_to_pfn(page_list[i]); in vhost_vdpa_pa_map() 778 unpin_user_pages(&page_list[i], in vhost_vdpa_pa_map() 821 free_page((unsigned long)page_list); in vhost_vdpa_pa_map()
|
/drivers/rapidio/devices/ |
D | rio_mport_cdev.c | 541 struct page **page_list; member 579 if (req->page_list) { in dma_req_free() 580 unpin_user_pages(req->page_list, req->nr_pages); in dma_req_free() 581 kfree(req->page_list); in dma_req_free() 812 struct page **page_list = NULL; in rio_dma_transfer() local 856 page_list = kmalloc_array(nr_pages, in rio_dma_transfer() 857 sizeof(*page_list), GFP_KERNEL); in rio_dma_transfer() 858 if (page_list == NULL) { in rio_dma_transfer() 867 page_list); in rio_dma_transfer() 887 ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages, in rio_dma_transfer() [all …]
|
/drivers/infiniband/hw/hns/ |
D | hns_roce_db.c | 21 list_for_each_entry(page, &context->page_list, list) in hns_roce_db_map_user() 41 list_add(&page->list, &context->page_list); in hns_roce_db_map_user()
|
D | hns_roce_mr.c | 433 mr->page_list[mr->npages++] = addr; in hns_roce_set_page() 450 mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count, in hns_roce_map_mr_sg() 452 if (!mr->page_list) in hns_roce_map_mr_sg() 466 ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages); in hns_roce_map_mr_sg() 476 kvfree(mr->page_list); in hns_roce_map_mr_sg() 477 mr->page_list = NULL; in hns_roce_map_mr_sg()
|
/drivers/net/ethernet/google/gve/ |
D | gve_adminq.c | 763 __be64 *page_list; in gve_adminq_register_page_list() local 768 page_list = dma_alloc_coherent(hdev, size, &page_list_bus, GFP_KERNEL); in gve_adminq_register_page_list() 769 if (!page_list) in gve_adminq_register_page_list() 773 page_list[i] = cpu_to_be64(qpl->page_buses[i]); in gve_adminq_register_page_list() 783 dma_free_coherent(hdev, size, page_list, page_list_bus); in gve_adminq_register_page_list()
|
/drivers/virt/vboxguest/ |
D | vboxguest_utils.c | 352 dst_parm->u.page_list.size = len; in hgcm_call_init_linaddr() 353 dst_parm->u.page_list.offset = *off_extra; in hgcm_call_init_linaddr() 576 dst_parm->u.page_list.size = src_parm->u.page_list.size; in hgcm_call_copy_back_result()
|
/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_misc.c | 206 u64 *page_list, in pvrdma_page_dir_insert_page_list() argument 216 ret = pvrdma_page_dir_insert_dma(pdir, i, page_list[i]); in pvrdma_page_dir_insert_page_list()
|
/drivers/dma/ |
D | ste_dma40.c | 3374 unsigned long *page_list; in d40_lcla_allocate() local 3383 page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS, in d40_lcla_allocate() 3384 sizeof(*page_list), in d40_lcla_allocate() 3386 if (!page_list) in d40_lcla_allocate() 3393 page_list[i] = __get_free_pages(GFP_KERNEL, in d40_lcla_allocate() 3395 if (!page_list[i]) { in d40_lcla_allocate() 3402 free_pages(page_list[j], base->lcla_pool.pages); in d40_lcla_allocate() 3406 if ((virt_to_phys((void *)page_list[i]) & in d40_lcla_allocate() 3412 free_pages(page_list[j], base->lcla_pool.pages); in d40_lcla_allocate() 3415 base->lcla_pool.base = (void *)page_list[i]; in d40_lcla_allocate() [all …]
|