Home
last modified time | relevance | path

Searched refs:page_count (Results 1 – 25 of 76) sorted by relevance

1234

/drivers/media/pci/ivtv/
Divtv-udma.c33 dma_page->page_count = dma_page->last - dma_page->first + 1; in ivtv_udma_get_page_info()
34 if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset; in ivtv_udma_get_page_info()
48 for (i = 0; i < dma_page->page_count; i++) { in ivtv_udma_fill_sg_list()
49 unsigned int len = (i == dma_page->page_count - 1) ? in ivtv_udma_fill_sg_list()
112 if (dma->SG_length || dma->page_count) { in ivtv_udma_setup()
114 dma->SG_length, dma->page_count); in ivtv_udma_setup()
120 if (user_dma.page_count <= 0) { in ivtv_udma_setup()
122 user_dma.page_count, size_in_bytes, user_dma.offset); in ivtv_udma_setup()
129 user_dma.uaddr, user_dma.page_count, 0, 1, dma->map, NULL); in ivtv_udma_setup()
132 if (user_dma.page_count != err) { in ivtv_udma_setup()
[all …]
Divtv-yuv.c67 if (dma->SG_length || dma->page_count) { in ivtv_yuv_prep_user_dma()
70 dma->SG_length, dma->page_count); in ivtv_yuv_prep_user_dma()
79 …y_pages = get_user_pages(current, current->mm, y_dma.uaddr, y_dma.page_count, 0, 1, &dma->map[0], … in ivtv_yuv_prep_user_dma()
81 if (y_pages == y_dma.page_count) { in ivtv_yuv_prep_user_dma()
83 uv_dma.uaddr, uv_dma.page_count, 0, 1, in ivtv_yuv_prep_user_dma()
88 if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) { in ivtv_yuv_prep_user_dma()
91 if (y_pages == y_dma.page_count) { in ivtv_yuv_prep_user_dma()
94 "expecting %d\n", uv_pages, uv_dma.page_count); in ivtv_yuv_prep_user_dma()
106 "expecting %d\n", y_pages, y_dma.page_count); in ivtv_yuv_prep_user_dma()
123 dma->page_count = y_pages + uv_pages; in ivtv_yuv_prep_user_dma()
[all …]
/drivers/firewire/
Dcore-iso.c42 int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count) in fw_iso_buffer_alloc() argument
46 buffer->page_count = 0; in fw_iso_buffer_alloc()
48 buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]), in fw_iso_buffer_alloc()
53 for (i = 0; i < page_count; i++) { in fw_iso_buffer_alloc()
58 buffer->page_count = i; in fw_iso_buffer_alloc()
59 if (i < page_count) { in fw_iso_buffer_alloc()
75 for (i = 0; i < buffer->page_count; i++) { in fw_iso_buffer_map_dma()
84 if (i < buffer->page_count) in fw_iso_buffer_map_dma()
91 int page_count, enum dma_data_direction direction) in fw_iso_buffer_init() argument
95 ret = fw_iso_buffer_alloc(buffer, page_count); in fw_iso_buffer_init()
[all …]
/drivers/char/agp/
Dgeneric.c197 if (curr->page_count != 0) { in agp_free_memory()
202 for (i = 0; i < curr->page_count; i++) { in agp_free_memory()
207 for (i = 0; i < curr->page_count; i++) { in agp_free_memory()
234 size_t page_count, u32 type) in agp_allocate_memory() argument
245 if ((cur_memory + page_count > bridge->max_memory_agp) || in agp_allocate_memory()
246 (cur_memory + page_count < page_count)) in agp_allocate_memory()
250 new = agp_generic_alloc_user(page_count, type); in agp_allocate_memory()
257 new = bridge->driver->alloc_by_type(page_count, type); in agp_allocate_memory()
263 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; in agp_allocate_memory()
271 if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { in agp_allocate_memory()
[all …]
Di460-agp.c311 if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) { in i460_insert_memory_small_io_page()
317 while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) { in i460_insert_memory_small_io_page()
327 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { in i460_insert_memory_small_io_page()
346 for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++) in i460_remove_memory_small_io_page()
415 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; in i460_insert_memory_large_io_page()
417 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; in i460_insert_memory_large_io_page()
473 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; in i460_remove_memory_large_io_page()
475 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; in i460_remove_memory_large_io_page()
Dsgi-agp.c173 if ((pg_start + mem->page_count) > num_entries) in sgi_tioca_insert_memory()
178 while (j < (pg_start + mem->page_count)) { in sgi_tioca_insert_memory()
189 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in sgi_tioca_insert_memory()
217 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in sgi_tioca_remove_memory()
Dnvidia-agp.c210 if (mem->page_count == 0) in nvidia_insert_memory()
213 if ((pg_start + mem->page_count) > in nvidia_insert_memory()
217 for (j = pg_start; j < (pg_start + mem->page_count); j++) { in nvidia_insert_memory()
226 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in nvidia_insert_memory()
250 if (mem->page_count == 0) in nvidia_remove_memory()
253 for (i = pg_start; i < (mem->page_count + pg_start); i++) in nvidia_remove_memory()
Dintel-gtt.c126 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); in intel_gtt_unmap_memory()
218 if ((pg_start + mem->page_count) in i810_insert_dcache_entries()
225 for (i = pg_start; i < (pg_start + mem->page_count); i++) { in i810_insert_dcache_entries()
270 new->page_count = pg_count; in alloc_agpphysmem_i8xx()
281 if (curr->page_count == 4) in intel_i810_free_by_type()
900 if (mem->page_count == 0) in intel_fake_agp_insert_entries()
903 if (pg_start + mem->page_count > intel_private.gtt_total_entries) in intel_fake_agp_insert_entries()
918 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); in intel_fake_agp_insert_entries()
926 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, in intel_fake_agp_insert_entries()
953 if (mem->page_count == 0) in intel_fake_agp_remove_entries()
[all …]
Dati-agp.c280 if (mem->page_count == 0) in ati_insert_memory()
283 if ((pg_start + mem->page_count) > num_entries) in ati_insert_memory()
287 while (j < (pg_start + mem->page_count)) { in ati_insert_memory()
301 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in ati_insert_memory()
326 if (mem->page_count == 0) in ati_remove_memory()
329 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in ati_remove_memory()
Duninorth-agp.c165 if (mem->page_count == 0) in uninorth_insert_memory()
171 if ((pg_start + mem->page_count) > num_entries) in uninorth_insert_memory()
175 for (i = 0; i < mem->page_count; ++i) { in uninorth_insert_memory()
184 for (i = 0; i < mem->page_count; i++) { in uninorth_insert_memory()
214 if (mem->page_count == 0) in uninorth_remove_memory()
218 for (i = 0; i < mem->page_count; ++i) { in uninorth_remove_memory()
Dagp.h199 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type);
203 struct agp_memory *memory, size_t page_count);
220 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type);
Defficeon-agp.c240 int i, count = mem->page_count, num_entries; in efficeon_insert_memory()
248 if ((pg_start + mem->page_count) > num_entries) in efficeon_insert_memory()
289 int i, count = mem->page_count, num_entries; in efficeon_remove_memory()
295 if ((pg_start + mem->page_count) > num_entries) in efficeon_remove_memory()
Dali-agp.c128 int i, page_count; in m1541_cache_flush() local
133 page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order; in m1541_cache_flush()
134 for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) { in m1541_cache_flush()
Damd-k7-agp.c295 if ((pg_start + mem->page_count) > num_entries) in amd_insert_memory()
299 while (j < (pg_start + mem->page_count)) { in amd_insert_memory()
312 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in amd_insert_memory()
335 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in amd_remove_memory()
Dsworks-agp.c331 if ((pg_start + mem->page_count) > num_entries) { in serverworks_insert_memory()
336 while (j < (pg_start + mem->page_count)) { in serverworks_insert_memory()
349 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in serverworks_insert_memory()
374 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in serverworks_remove_memory()
/drivers/target/
Dtarget_core_rd.c86 u32 i, j, page_count = 0, sg_per_table; in rd_release_sgl_table() local
96 page_count++; in rd_release_sgl_table()
103 return page_count; in rd_release_sgl_table()
108 u32 page_count; in rd_release_device_space() local
113 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array, in rd_release_device_space()
118 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, in rd_release_device_space()
119 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); in rd_release_device_space()
228 u32 page_count; in rd_release_prot_space() local
233 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array, in rd_release_prot_space()
238 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, in rd_release_prot_space()
[all …]
/drivers/staging/lustre/lustre/llite/
Dlloop.c195 u32 page_count = 0; in do_bio_lustrebacked() local
225 pages[page_count] = bvec.bv_page; in do_bio_lustrebacked()
226 offsets[page_count] = offset; in do_bio_lustrebacked()
227 page_count++; in do_bio_lustrebacked()
230 LASSERT(page_count <= LLOOP_MAX_SEGMENTS); in do_bio_lustrebacked()
235 page_count); in do_bio_lustrebacked()
237 pvec->ldp_size = page_count << PAGE_CACHE_SHIFT; in do_bio_lustrebacked()
238 pvec->ldp_nr = page_count; in do_bio_lustrebacked()
296 unsigned int page_count = 0; in loop_get_bio() local
315 page_count, (*bio)->bi_vcnt); in loop_get_bio()
[all …]
Drw26.c139 if (page_count(vmpage) > 3) in ll_releasepage()
239 int page_count = pv->ldp_nr; in ll_direct_rw_pages() local
247 for (i = 0; i < page_count; i++) { in ll_direct_rw_pages()
337 struct page **pages, int page_count) in ll_direct_IO_26_seg() argument
340 .ldp_nr = page_count, in ll_direct_IO_26_seg()
/drivers/gpu/drm/nouveau/
Dnouveau_bo.c624 u32 page_count = new_mem->num_pages; in nvc0_bo_move_copy() local
627 page_count = new_mem->num_pages; in nvc0_bo_move_copy()
628 while (page_count) { in nvc0_bo_move_copy()
629 int line_count = (page_count > 8191) ? 8191 : page_count; in nvc0_bo_move_copy()
647 page_count -= line_count; in nvc0_bo_move_copy()
662 u32 page_count = new_mem->num_pages; in nvc0_bo_move_m2mf() local
665 page_count = new_mem->num_pages; in nvc0_bo_move_m2mf()
666 while (page_count) { in nvc0_bo_move_m2mf()
667 int line_count = (page_count > 2047) ? 2047 : page_count; in nvc0_bo_move_m2mf()
686 page_count -= line_count; in nvc0_bo_move_m2mf()
[all …]
/drivers/staging/lustre/lustre/osc/
Dosc_cache.c178 int page_count; in osc_extent_sanity_check0() local
242 page_count = 0; in osc_extent_sanity_check0()
245 ++page_count; in osc_extent_sanity_check0()
249 if (page_count != ext->oe_nr_pages) in osc_extent_sanity_check0()
1032 int page_count = 0; in osc_extent_make_ready() local
1044 ++page_count; in osc_extent_make_ready()
1067 LASSERT(page_count == ext->oe_nr_pages); in osc_extent_make_ready()
1822 int page_count = 0; in get_write_extents() local
1830 if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count, in get_write_extents()
1832 return page_count; in get_write_extents()
[all …]
Dosc_request.c1094 static void handle_short_read(int nob_read, u32 page_count, in handle_short_read() argument
1102 LASSERT (page_count > 0); in handle_short_read()
1110 page_count--; in handle_short_read()
1116 page_count--; in handle_short_read()
1121 while (page_count-- > 0) { in handle_short_read()
1131 u32 page_count, struct brw_page **pga) in check_write_rcs() argument
1222 (long)pga[i]->pg->flags, page_count(pga[i]->pg), in osc_checksum_bulk()
1247 struct lov_stripe_md *lsm, u32 page_count, in osc_brw_prep_request() argument
1280 for (niocount = i = 1; i < page_count; i++) { in osc_brw_prep_request()
1303 desc = ptlrpc_prep_bulk_imp(req, page_count, in osc_brw_prep_request()
[all …]
/drivers/iommu/
Dtegra-gart.c58 u32 page_count; /* total remappable size */ member
80 iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
147 gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1; in gart_iova_range_valid()
170 gart->page_count * GART_PAGE_SIZE - 1; in gart_iommu_attach_dev()
386 gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT); in tegra_gart_probe()
388 gart->savedata = vmalloc(sizeof(u32) * gart->page_count); in tegra_gart_probe()
/drivers/block/
Drbd.c264 u32 page_count; member
2065 obj_request->page_count); in rbd_obj_request_destroy()
2288 obj_request->page_count = 0; in rbd_img_obj_end_request()
2477 unsigned int page_count; in rbd_img_request_fill() local
2480 page_count = (u32)calc_pages_for(offset, length); in rbd_img_request_fill()
2481 obj_request->page_count = page_count; in rbd_img_request_fill()
2483 page_count--; /* more on last page */ in rbd_img_request_fill()
2484 pages += page_count; in rbd_img_request_fill()
2520 u32 page_count; in rbd_osd_copyup_callback() local
2536 page_count = obj_request->copyup_page_count; in rbd_osd_copyup_callback()
[all …]
/drivers/gpu/drm/
Ddrm_bufs.c742 dma->page_count += byte_count >> PAGE_SHIFT; in drm_legacy_addbufs_agp()
777 int page_count; in drm_legacy_addbufs_pci() local
845 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) * in drm_legacy_addbufs_pci()
855 dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); in drm_legacy_addbufs_pci()
857 dma->page_count + (count << page_order)); in drm_legacy_addbufs_pci()
862 page_count = 0; in drm_legacy_addbufs_pci()
881 dma->page_count + page_count, in drm_legacy_addbufs_pci()
883 temp_pagelist[dma->page_count + page_count++] in drm_legacy_addbufs_pci()
942 if (dma->page_count) { in drm_legacy_addbufs_pci()
949 dma->page_count += entry->seg_count << page_order; in drm_legacy_addbufs_pci()
[all …]
/drivers/gpu/drm/ttm/
Dttm_agp_backend.c63 mem->page_count = 0; in ttm_agp_bind()
70 mem->pages[mem->page_count++] = page; in ttm_agp_bind()

1234