Home
last modified time | relevance | path

Searched refs:pgoff (Results 1 – 25 of 66) sorted by relevance

123

/drivers/dax/
Ddevice.c54 __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, in dax_pgoff_to_phys() argument
65 pgoff_end = dax_range->pgoff + PHYS_PFN(range_len(range)) - 1; in dax_pgoff_to_phys()
66 if (pgoff < dax_range->pgoff || pgoff > pgoff_end) in dax_pgoff_to_phys()
68 phys = PFN_PHYS(pgoff - dax_range->pgoff) + range->start; in dax_pgoff_to_phys()
82 pgoff_t pgoff; in dax_set_mapping() local
88 pgoff = linear_page_index(vmf->vma, in dax_set_mapping()
99 page->index = pgoff + i; in dax_set_mapping()
123 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); in __dev_dax_pte_fault()
125 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff); in __dev_dax_pte_fault()
142 pgoff_t pgoff; in __dev_dax_pmd_fault() local
[all …]
Dsuper.c149 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, in dax_direct_access() argument
163 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages, in dax_direct_access()
171 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, in dax_copy_from_iter() argument
187 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, in dax_copy_to_iter() argument
203 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, in dax_zero_page_range() argument
216 return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages); in dax_zero_page_range()
220 size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, in dax_recovery_write() argument
225 return dax_dev->ops->recovery_write(dax_dev, pgoff, addr, bytes, iter); in dax_recovery_write()
Ddax-private.h75 unsigned long pgoff; member
91 phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, unsigned long size);
Dbus.c731 rc = sprintf(buf, "%#lx\n", dax_range->pgoff); in pgoff_show()
808 unsigned long pgoff = 0; in alloc_dev_dax_range() local
835 pgoff += PHYS_PFN(range_len(&ranges[i].range)); in alloc_dev_dax_range()
838 .pgoff = pgoff, in alloc_dev_dax_range()
974 if (dax_range->pgoff > last->pgoff) in adjust_ok()
/drivers/md/
Ddm-linear.c158 static struct dax_device *linear_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff) in linear_dax_pgoff() argument
161 sector_t sector = linear_map_sector(ti, *pgoff << PAGE_SECTORS_SHIFT); in linear_dax_pgoff()
163 *pgoff = (get_start_sect(lc->dev->bdev) + sector) >> PAGE_SECTORS_SHIFT; in linear_dax_pgoff()
167 static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, in linear_dax_direct_access() argument
171 struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff); in linear_dax_direct_access()
173 return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn); in linear_dax_direct_access()
176 static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, in linear_dax_zero_page_range() argument
179 struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff); in linear_dax_zero_page_range()
181 return dax_zero_page_range(dax_dev, pgoff, nr_pages); in linear_dax_zero_page_range()
184 static size_t linear_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff, in linear_dax_recovery_write() argument
[all …]
Ddm-stripe.c298 static struct dax_device *stripe_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff) in stripe_dax_pgoff() argument
305 stripe_map_sector(sc, *pgoff * PAGE_SECTORS, &stripe, &dev_sector); in stripe_dax_pgoff()
309 *pgoff = (get_start_sect(bdev) + dev_sector) >> PAGE_SECTORS_SHIFT; in stripe_dax_pgoff()
313 static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, in stripe_dax_direct_access() argument
317 struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff); in stripe_dax_direct_access()
319 return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn); in stripe_dax_direct_access()
322 static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, in stripe_dax_zero_page_range() argument
325 struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff); in stripe_dax_zero_page_range()
327 return dax_zero_page_range(dax_dev, pgoff, nr_pages); in stripe_dax_zero_page_range()
330 static size_t stripe_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff, in stripe_dax_recovery_write() argument
[all …]
Ddm-log-writes.c883 pgoff_t *pgoff) in log_writes_dax_pgoff() argument
887 *pgoff += (get_start_sect(lc->dev->bdev) >> PAGE_SECTORS_SHIFT); in log_writes_dax_pgoff()
891 static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, in log_writes_dax_direct_access() argument
895 struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff); in log_writes_dax_direct_access()
897 return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn); in log_writes_dax_direct_access()
900 static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, in log_writes_dax_zero_page_range() argument
903 struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff); in log_writes_dax_zero_page_range()
905 return dax_zero_page_range(dax_dev, pgoff, nr_pages << PAGE_SHIFT); in log_writes_dax_zero_page_range()
909 pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) in log_writes_dax_recovery_write() argument
911 struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff); in log_writes_dax_recovery_write()
[all …]
/drivers/w1/slaves/
Dw1_ds250x.c49 int pgoff = pageno * W1_PAGE_SIZE; in w1_ds2502_read_page() local
63 buf[1] = pgoff & 0xff; in w1_ds2502_read_page()
64 buf[2] = pgoff >> 8; in w1_ds2502_read_page()
71 w1_read_block(sl->master, &data->eprom[pgoff], W1_PAGE_SIZE); in w1_ds2502_read_page()
74 if (w1_calc_crc8(&data->eprom[pgoff], W1_PAGE_SIZE) != crc8) in w1_ds2502_read_page()
88 int pgoff, epoff; in w1_ds2505_read_page() local
97 epoff = pgoff = pageno * W1_PAGE_SIZE; in w1_ds2505_read_page()
105 buf[1] = pgoff & 0xff; in w1_ds2505_read_page()
106 buf[2] = pgoff >> 8; in w1_ds2505_read_page()
121 pgoff = (redir ^ 0xff) * W1_PAGE_SIZE; in w1_ds2505_read_page()
/drivers/dma-buf/
Dudmabuf.c35 pgoff_t pgoff = vmf->pgoff; in udmabuf_vm_fault() local
37 if (pgoff >= ubuf->pagecount) in udmabuf_vm_fault()
39 vmf->page = ubuf->pages[pgoff]; in udmabuf_vm_fault()
178 pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit; in udmabuf_create() local
226 pgoff = list[i].offset >> PAGE_SHIFT; in udmabuf_create()
230 pgoff = list[i].offset >> huge_page_shift(hpstate); in udmabuf_create()
238 hpage = find_get_page_flags(mapping, pgoff, in udmabuf_create()
252 pgoff++; in udmabuf_create()
256 pgoff + pgidx); in udmabuf_create()
/drivers/video/fbdev/core/
Dfb_defio.c45 unsigned long pgoff = offset >> PAGE_SHIFT; in fb_deferred_io_pageref_get() local
48 if (WARN_ON_ONCE(pgoff >= info->npagerefs)) in fb_deferred_io_pageref_get()
52 pageref = &info->pagerefs[pgoff]; in fb_deferred_io_pageref_get()
64 pageref->offset = pgoff << PAGE_SHIFT; in fb_deferred_io_pageref_get()
100 offset = vmf->pgoff << PAGE_SHIFT; in fb_deferred_io_fault()
116 page->index = vmf->pgoff; /* for page_mkclean() */ in fb_deferred_io_fault()
/drivers/vfio/
Diova_bitmap.c41 unsigned long pgoff; member
204 mapped->pgoff = offset_in_page(addr); in iova_bitmap_get()
301 bytes = (bitmap->mapped.npages << PAGE_SHIFT) - bitmap->mapped.pgoff; in iova_bitmap_mapped_remaining()
406 mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE; in iova_bitmap_set()
408 mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE; in iova_bitmap_set()
/drivers/infiniband/core/
Dib_core_uverbs.c118 unsigned long pgoff) in rdma_user_mmap_entry_get_pgoff() argument
122 if (pgoff > U32_MAX) in rdma_user_mmap_entry_get_pgoff()
127 entry = xa_load(&ucontext->mmap_xa, pgoff); in rdma_user_mmap_entry_get_pgoff()
134 if (!entry || entry->start_pgoff != pgoff || entry->driver_removed || in rdma_user_mmap_entry_get_pgoff()
141 pgoff, entry->npages); in rdma_user_mmap_entry_get_pgoff()
Dumem.c84 unsigned long va, pgoff; in ib_umem_find_best_pgsz() local
107 pgoff = umem->address & ~PAGE_MASK; in ib_umem_find_best_pgsz()
113 mask |= (sg_dma_address(sg) + pgoff) ^ va; in ib_umem_find_best_pgsz()
114 va += sg_dma_len(sg) - pgoff; in ib_umem_find_best_pgsz()
121 pgoff = 0; in ib_umem_find_best_pgsz()
/drivers/android/
Dbinder_alloc.c1168 pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; in binder_alloc_get_page() local
1173 *pgoffp = pgoff; in binder_alloc_get_page()
1193 pgoff_t pgoff; in binder_alloc_clear_buf() local
1196 buffer_offset, &pgoff); in binder_alloc_clear_buf()
1197 size = min_t(size_t, bytes, PAGE_SIZE - pgoff); in binder_alloc_clear_buf()
1198 memset_page(page, pgoff, 0, size); in binder_alloc_clear_buf()
1230 pgoff_t pgoff; in binder_alloc_copy_user_to_buffer() local
1234 buffer_offset, &pgoff); in binder_alloc_copy_user_to_buffer()
1235 size = min_t(size_t, bytes, PAGE_SIZE - pgoff); in binder_alloc_copy_user_to_buffer()
1236 kptr = kmap_local_page(page) + pgoff; in binder_alloc_copy_user_to_buffer()
[all …]
/drivers/nvdimm/
Dpmem.c264 __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, in __pmem_direct_access() argument
268 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset; in __pmem_direct_access()
269 sector_t sector = PFN_PHYS(pgoff) >> SECTOR_SHIFT; in __pmem_direct_access()
316 static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, in pmem_dax_zero_page_range() argument
322 PFN_PHYS(pgoff) >> SECTOR_SHIFT, in pmem_dax_zero_page_range()
327 pgoff_t pgoff, long nr_pages, enum dax_access_mode mode, in pmem_dax_direct_access() argument
332 return __pmem_direct_access(pmem, pgoff, nr_pages, mode, kaddr, pfn); in pmem_dax_direct_access()
348 static size_t pmem_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, in pmem_recovery_write() argument
359 if (!is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) >> SECTOR_SHIFT, len)) in pmem_recovery_write()
372 pmem_off = PFN_PHYS(pgoff) + pmem->data_offset; in pmem_recovery_write()
Dpmem.h32 long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
/drivers/hwtracing/intel_th/
Dmsu.c69 unsigned long pgoff; member
983 unsigned long pgoff) in msc_buffer_contig_get_page() argument
985 if (pgoff >= msc->nr_pages) in msc_buffer_contig_get_page()
988 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT)); in msc_buffer_contig_get_page()
1112 win->pgoff = prev->pgoff + prev->nr_blocks; in msc_buffer_win_alloc()
1395 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff) in msc_buffer_get_page() argument
1402 return msc_buffer_contig_get_page(msc, pgoff); in msc_buffer_get_page()
1405 if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks) in msc_buffer_get_page()
1411 pgoff -= win->pgoff; in msc_buffer_get_page()
1417 if (pgoff < pgsz) in msc_buffer_get_page()
[all …]
/drivers/gpu/drm/omapdrm/
Domap_gem.c351 pgoff_t pgoff; in omap_gem_fault_1d() local
354 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in omap_gem_fault_1d()
357 omap_gem_cpu_sync_page(obj, pgoff); in omap_gem_fault_1d()
358 pfn = page_to_pfn(omap_obj->pages[pgoff]); in omap_gem_fault_1d()
361 pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff; in omap_gem_fault_1d()
381 pgoff_t pgoff, base_pgoff; in omap_gem_fault_2d() local
403 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in omap_gem_fault_2d()
409 base_pgoff = round_down(pgoff, m << n_shift); in omap_gem_fault_2d()
414 vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT); in omap_gem_fault_2d()
430 int off = pgoff % m; in omap_gem_fault_2d()
[all …]
/drivers/xen/
Dgntalloc.c83 uint16_t pgoff:12; /* Bits 0-11: Offset of the byte to clear */ member
180 tmp[gref->notify.pgoff] = 0; in __del_gref()
377 int pgoff; in gntalloc_ioctl_unmap_notify() local
384 pgoff = op.index & (PAGE_SIZE - 1); in gntalloc_ioctl_unmap_notify()
417 gref->notify.pgoff = pgoff; in gntalloc_ioctl_unmap_notify()
/drivers/char/
Dmem.c323 unsigned long pgoff, in get_unmapped_area_mem() argument
326 if (!valid_mmap_phys_addr_range(pgoff, len)) in get_unmapped_area_mem()
328 return pgoff << PAGE_SHIFT; in get_unmapped_area_mem()
552 unsigned long pgoff, unsigned long flags) in get_unmapped_area_zero() argument
562 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags); in get_unmapped_area_zero()
566 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); in get_unmapped_area_zero()
/drivers/s390/block/
Ddcssblk.c34 static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
49 pgoff_t pgoff, size_t nr_pages) in dcssblk_dax_zero_page_range() argument
54 rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS, in dcssblk_dax_zero_page_range()
919 __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff, in __dcssblk_direct_access() argument
922 resource_size_t offset = pgoff * PAGE_SIZE; in __dcssblk_direct_access()
936 dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, in dcssblk_dax_direct_access() argument
942 return __dcssblk_direct_access(dev_info, pgoff, nr_pages, kaddr, pfn); in dcssblk_dax_direct_access()
/drivers/comedi/
Dcomedi_buf.c206 unsigned int pgoff = offset_in_page(offset); in comedi_buf_map_access() local
211 int l = min_t(int, len - done, PAGE_SIZE - pgoff); in comedi_buf_map_access()
212 void *b = bm->page_list[pg].virt_addr + pgoff; in comedi_buf_map_access()
221 pgoff = 0; in comedi_buf_map_access()
/drivers/misc/ocxl/
Dsysfs.c115 if (vmf->pgoff >= (afu->config.global_mmio_size >> PAGE_SHIFT)) in global_mmio_fault()
118 offset = vmf->pgoff; in global_mmio_fault()
/drivers/vfio/platform/
Dvfio_platform_common.c548 u64 req_len, pgoff, req_start; in vfio_platform_mmap_mmio() local
551 pgoff = vma->vm_pgoff & in vfio_platform_mmap_mmio()
553 req_start = pgoff << PAGE_SHIFT; in vfio_platform_mmap_mmio()
559 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff; in vfio_platform_mmap_mmio()
/drivers/vfio/fsl-mc/
Dvfio_fsl_mc.c363 u64 pgoff, base; in vfio_fsl_mc_mmap_mmio() local
366 pgoff = vma->vm_pgoff & in vfio_fsl_mc_mmap_mmio()
368 base = pgoff << PAGE_SHIFT; in vfio_fsl_mc_mmap_mmio()
378 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff; in vfio_fsl_mc_mmap_mmio()

123