/drivers/media/pci/ivtv/ |
D | ivtv-udma.c | 33 dma_page->page_count = dma_page->last - dma_page->first + 1; in ivtv_udma_get_page_info() 34 if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset; in ivtv_udma_get_page_info() 48 for (i = 0; i < dma_page->page_count; i++) { in ivtv_udma_fill_sg_list() 49 unsigned int len = (i == dma_page->page_count - 1) ? in ivtv_udma_fill_sg_list() 112 if (dma->SG_length || dma->page_count) { in ivtv_udma_setup() 114 dma->SG_length, dma->page_count); in ivtv_udma_setup() 120 if (user_dma.page_count <= 0) { in ivtv_udma_setup() 122 user_dma.page_count, size_in_bytes, user_dma.offset); in ivtv_udma_setup() 127 err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, in ivtv_udma_setup() 130 if (user_dma.page_count != err) { in ivtv_udma_setup() [all …]
|
D | ivtv-yuv.c | 67 if (dma->SG_length || dma->page_count) { in ivtv_yuv_prep_user_dma() 70 dma->SG_length, dma->page_count); in ivtv_yuv_prep_user_dma() 79 y_dma.page_count, &dma->map[0], FOLL_FORCE); in ivtv_yuv_prep_user_dma() 81 if (y_pages == y_dma.page_count) { in ivtv_yuv_prep_user_dma() 83 uv_dma.page_count, &dma->map[y_pages], in ivtv_yuv_prep_user_dma() 87 if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) { in ivtv_yuv_prep_user_dma() 90 if (y_pages == y_dma.page_count) { in ivtv_yuv_prep_user_dma() 93 "expecting %d\n", uv_pages, uv_dma.page_count); in ivtv_yuv_prep_user_dma() 105 "expecting %d\n", y_pages, y_dma.page_count); in ivtv_yuv_prep_user_dma() 122 dma->page_count = y_pages + uv_pages; in ivtv_yuv_prep_user_dma() [all …]
|
/drivers/firewire/ |
D | core-iso.c | 42 int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count) in fw_iso_buffer_alloc() argument 46 buffer->page_count = 0; in fw_iso_buffer_alloc() 48 buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]), in fw_iso_buffer_alloc() 53 for (i = 0; i < page_count; i++) { in fw_iso_buffer_alloc() 58 buffer->page_count = i; in fw_iso_buffer_alloc() 59 if (i < page_count) { in fw_iso_buffer_alloc() 75 for (i = 0; i < buffer->page_count; i++) { in fw_iso_buffer_map_dma() 84 if (i < buffer->page_count) in fw_iso_buffer_map_dma() 91 int page_count, enum dma_data_direction direction) in fw_iso_buffer_init() argument 95 ret = fw_iso_buffer_alloc(buffer, page_count); in fw_iso_buffer_init() [all …]
|
/drivers/char/agp/ |
D | generic.c | 186 if (curr->page_count != 0) { in agp_free_memory() 191 for (i = 0; i < curr->page_count; i++) { in agp_free_memory() 196 for (i = 0; i < curr->page_count; i++) { in agp_free_memory() 223 size_t page_count, u32 type) in agp_allocate_memory() argument 234 if ((cur_memory + page_count > bridge->max_memory_agp) || in agp_allocate_memory() 235 (cur_memory + page_count < page_count)) in agp_allocate_memory() 239 new = agp_generic_alloc_user(page_count, type); in agp_allocate_memory() 246 new = bridge->driver->alloc_by_type(page_count, type); in agp_allocate_memory() 252 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; in agp_allocate_memory() 260 if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { in agp_allocate_memory() [all …]
|
D | i460-agp.c | 311 if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) { in i460_insert_memory_small_io_page() 317 while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) { in i460_insert_memory_small_io_page() 327 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { in i460_insert_memory_small_io_page() 346 for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++) in i460_remove_memory_small_io_page() 415 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; in i460_insert_memory_large_io_page() 417 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; in i460_insert_memory_large_io_page() 473 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; in i460_remove_memory_large_io_page() 475 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; in i460_remove_memory_large_io_page()
|
D | sgi-agp.c | 173 if ((pg_start + mem->page_count) > num_entries) in sgi_tioca_insert_memory() 178 while (j < (pg_start + mem->page_count)) { in sgi_tioca_insert_memory() 189 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in sgi_tioca_insert_memory() 217 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in sgi_tioca_remove_memory()
|
D | nvidia-agp.c | 210 if (mem->page_count == 0) in nvidia_insert_memory() 213 if ((pg_start + mem->page_count) > in nvidia_insert_memory() 217 for (j = pg_start; j < (pg_start + mem->page_count); j++) { in nvidia_insert_memory() 226 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in nvidia_insert_memory() 250 if (mem->page_count == 0) in nvidia_remove_memory() 253 for (i = pg_start; i < (mem->page_count + pg_start); i++) in nvidia_remove_memory()
|
D | intel-gtt.c | 126 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); in intel_gtt_unmap_memory() 216 if ((pg_start + mem->page_count) in i810_insert_dcache_entries() 223 for (i = pg_start; i < (pg_start + mem->page_count); i++) { in i810_insert_dcache_entries() 268 new->page_count = pg_count; in alloc_agpphysmem_i8xx() 279 if (curr->page_count == 4) in intel_i810_free_by_type() 910 if (mem->page_count == 0) in intel_fake_agp_insert_entries() 913 if (pg_start + mem->page_count > intel_private.gtt_total_entries) in intel_fake_agp_insert_entries() 928 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); in intel_fake_agp_insert_entries() 936 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, in intel_fake_agp_insert_entries() 963 if (mem->page_count == 0) in intel_fake_agp_remove_entries() [all …]
|
D | ati-agp.c | 280 if (mem->page_count == 0) in ati_insert_memory() 283 if ((pg_start + mem->page_count) > num_entries) in ati_insert_memory() 287 while (j < (pg_start + mem->page_count)) { in ati_insert_memory() 301 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in ati_insert_memory() 326 if (mem->page_count == 0) in ati_remove_memory() 329 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in ati_remove_memory()
|
D | uninorth-agp.c | 164 if (mem->page_count == 0) in uninorth_insert_memory() 170 if ((pg_start + mem->page_count) > num_entries) in uninorth_insert_memory() 174 for (i = 0; i < mem->page_count; ++i) { in uninorth_insert_memory() 183 for (i = 0; i < mem->page_count; i++) { in uninorth_insert_memory() 213 if (mem->page_count == 0) in uninorth_remove_memory() 217 for (i = 0; i < mem->page_count; ++i) { in uninorth_remove_memory()
|
D | agp.h | 199 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type); 203 struct agp_memory *memory, size_t page_count); 220 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type);
|
D | efficeon-agp.c | 240 int i, count = mem->page_count, num_entries; in efficeon_insert_memory() 248 if ((pg_start + mem->page_count) > num_entries) in efficeon_insert_memory() 289 int i, count = mem->page_count, num_entries; in efficeon_remove_memory() 295 if ((pg_start + mem->page_count) > num_entries) in efficeon_remove_memory()
|
D | ali-agp.c | 128 int i, page_count; in m1541_cache_flush() local 133 page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order; in m1541_cache_flush() 134 for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) { in m1541_cache_flush()
|
D | sworks-agp.c | 331 if ((pg_start + mem->page_count) > num_entries) { in serverworks_insert_memory() 336 while (j < (pg_start + mem->page_count)) { in serverworks_insert_memory() 349 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in serverworks_insert_memory() 374 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in serverworks_remove_memory()
|
/drivers/target/ |
D | target_core_rd.c | 81 u32 i, j, page_count = 0, sg_per_table; in rd_release_sgl_table() local 91 page_count++; in rd_release_sgl_table() 98 return page_count; in rd_release_sgl_table() 103 u32 page_count; in rd_release_device_space() local 108 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array, in rd_release_device_space() 113 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, in rd_release_device_space() 114 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); in rd_release_device_space() 236 u32 page_count; in rd_release_prot_space() local 241 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array, in rd_release_prot_space() 246 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, in rd_release_prot_space() [all …]
|
/drivers/gpu/drm/nouveau/ |
D | nouveau_bo.c | 694 u32 page_count = new_mem->num_pages; in nvc0_bo_move_copy() local 697 page_count = new_mem->num_pages; in nvc0_bo_move_copy() 698 while (page_count) { in nvc0_bo_move_copy() 699 int line_count = (page_count > 8191) ? 8191 : page_count; in nvc0_bo_move_copy() 717 page_count -= line_count; in nvc0_bo_move_copy() 732 u32 page_count = new_mem->num_pages; in nvc0_bo_move_m2mf() local 735 page_count = new_mem->num_pages; in nvc0_bo_move_m2mf() 736 while (page_count) { in nvc0_bo_move_m2mf() 737 int line_count = (page_count > 2047) ? 2047 : page_count; in nvc0_bo_move_m2mf() 756 page_count -= line_count; in nvc0_bo_move_m2mf() [all …]
|
/drivers/staging/lustre/lustre/osc/ |
D | osc_cache.c | 180 size_t page_count; in osc_extent_sanity_check0() local 284 page_count = 0; in osc_extent_sanity_check0() 287 ++page_count; in osc_extent_sanity_check0() 293 if (page_count != ext->oe_nr_pages) { in osc_extent_sanity_check0() 1102 unsigned int page_count = 0; in osc_extent_make_ready() local 1114 ++page_count; in osc_extent_make_ready() 1137 LASSERT(page_count == ext->oe_nr_pages); in osc_extent_make_ready() 1966 unsigned int page_count = 0; in get_write_extents() local 1972 if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count, in get_write_extents() 1974 return page_count; in get_write_extents() [all …]
|
D | osc_page.c | 529 if (page_count(vmpage) - page_mapcount(vmpage) > 2) in lru_page_busy() 795 int page_count = desc->bd_iov_count; in unstable_page_accounting() local 800 for (i = 0; i < page_count; i++) { in unstable_page_accounting() 845 int page_count = desc->bd_iov_count; in osc_dec_unstable_pages() local 848 LASSERT(page_count >= 0); in osc_dec_unstable_pages() 851 unstable_count = atomic_long_sub_return(page_count, in osc_dec_unstable_pages() 855 unstable_count = atomic_long_sub_return(page_count, in osc_dec_unstable_pages() 872 long page_count = desc->bd_iov_count; in osc_inc_unstable_pages() local 879 atomic_long_add(page_count, &cli->cl_unstable_count); in osc_inc_unstable_pages() 880 atomic_long_add(page_count, &cli->cl_cache->ccc_unstable_nr); in osc_inc_unstable_pages()
|
D | osc_request.c | 1001 static void handle_short_read(int nob_read, u32 page_count, in handle_short_read() argument 1009 LASSERT(page_count > 0); in handle_short_read() 1017 page_count--; in handle_short_read() 1023 page_count--; in handle_short_read() 1028 while (page_count-- > 0) { in handle_short_read() 1038 u32 page_count, struct brw_page **pga) in check_write_rcs() argument 1132 (long)pga[i]->pg->flags, page_count(pga[i]->pg), in osc_checksum_bulk() 1155 struct lov_stripe_md *lsm, u32 page_count, in osc_brw_prep_request() argument 1188 for (niocount = i = 1; i < page_count; i++) { in osc_brw_prep_request() 1211 desc = ptlrpc_prep_bulk_imp(req, page_count, in osc_brw_prep_request() [all …]
|
/drivers/iommu/ |
D | tegra-gart.c | 58 u32 page_count; /* total remappable size */ member 90 iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \ 157 gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1; in gart_iova_range_valid() 240 gart->page_count * GART_PAGE_SIZE - 1; in gart_iommu_domain_alloc() 413 gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT); in tegra_gart_probe() 415 gart->savedata = vmalloc(sizeof(u32) * gart->page_count); in tegra_gart_probe()
|
/drivers/gpu/drm/ |
D | drm_bufs.c | 823 dma->page_count += byte_count >> PAGE_SHIFT; in drm_legacy_addbufs_agp() 858 int page_count; in drm_legacy_addbufs_pci() local 926 temp_pagelist = kmalloc_array(dma->page_count + (count << page_order), in drm_legacy_addbufs_pci() 937 dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); in drm_legacy_addbufs_pci() 939 dma->page_count + (count << page_order)); in drm_legacy_addbufs_pci() 944 page_count = 0; in drm_legacy_addbufs_pci() 963 dma->page_count + page_count, in drm_legacy_addbufs_pci() 965 temp_pagelist[dma->page_count + page_count++] in drm_legacy_addbufs_pci() 1024 if (dma->page_count) { in drm_legacy_addbufs_pci() 1031 dma->page_count += entry->seg_count << page_order; in drm_legacy_addbufs_pci() [all …]
|
/drivers/gpu/drm/udl/ |
D | udl_dmabuf.c | 82 int page_count; in udl_map_dma_buf() local 100 page_count = obj->base.size / PAGE_SIZE; in udl_map_dma_buf() 101 obj->sg = drm_prime_pages_to_sg(obj->pages, page_count); in udl_map_dma_buf()
|
/drivers/gpu/drm/ttm/ |
D | ttm_agp_backend.c | 62 mem->page_count = 0; in ttm_agp_bind() 69 mem->pages[mem->page_count++] = page; in ttm_agp_bind()
|
/drivers/block/ |
D | rbd.c | 270 u32 page_count; member 2169 obj_request->page_count); in rbd_obj_request_destroy() 2584 unsigned int page_count; in rbd_img_request_fill() local 2587 page_count = (u32)calc_pages_for(offset, length); in rbd_img_request_fill() 2588 obj_request->page_count = page_count; in rbd_img_request_fill() 2590 page_count--; /* more on last page */ in rbd_img_request_fill() 2591 pages += page_count; in rbd_img_request_fill() 2625 u32 page_count; in rbd_osd_copyup_callback() local 2641 page_count = obj_request->copyup_page_count; in rbd_osd_copyup_callback() 2642 rbd_assert(page_count); in rbd_osd_copyup_callback() [all …]
|
/drivers/staging/lustre/lustre/llite/ |
D | rw26.c | 125 if (page_count(vmpage) > 3) in ll_releasepage() 216 int page_count = pv->ldp_nr; in ll_direct_rw_pages() local 224 for (i = 0; i < page_count; i++) { in ll_direct_rw_pages() 320 struct page **pages, int page_count) in ll_direct_IO_26_seg() argument 324 .ldp_nr = page_count, in ll_direct_IO_26_seg()
|