Home
last modified time | relevance | path

Searched refs:pfn (Results 1 – 25 of 92) sorted by relevance

1234

/drivers/gpu/drm/i915/
Di915_mm.c34 unsigned long pfn; member
44 set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); in remap_pfn()
45 r->pfn++; in remap_pfn()
61 unsigned long addr, unsigned long pfn, unsigned long size, in remap_io_mapping() argument
73 r.pfn = pfn; in remap_io_mapping()
79 zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT); in remap_io_mapping()
/drivers/base/
Dnode.c373 static int __ref get_nid_for_pfn(unsigned long pfn) in get_nid_for_pfn() argument
377 if (!pfn_valid_within(pfn)) in get_nid_for_pfn()
381 return early_pfn_to_nid(pfn); in get_nid_for_pfn()
383 page = pfn_to_page(pfn); in get_nid_for_pfn()
386 return pfn_to_nid(pfn); in get_nid_for_pfn()
393 unsigned long pfn, sect_start_pfn, sect_end_pfn; in register_mem_sect_under_node() local
403 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { in register_mem_sect_under_node()
410 if (!pfn_present(pfn)) { in register_mem_sect_under_node()
411 pfn = round_down(pfn + PAGES_PER_SECTION, in register_mem_sect_under_node()
416 page_nid = get_nid_for_pfn(pfn); in register_mem_sect_under_node()
[all …]
Dmemory.c127 unsigned long i, pfn; in show_mem_removable() local
134 pfn = section_nr_to_pfn(mem->start_section_nr + i); in show_mem_removable()
135 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); in show_mem_removable()
192 unsigned long pfn = start_pfn; in pages_correctly_reserved() local
199 for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) { in pages_correctly_reserved()
200 if (WARN_ON_ONCE(!pfn_valid(pfn))) in pages_correctly_reserved()
202 page = pfn_to_page(pfn); in pages_correctly_reserved()
210 pfn_to_section_nr(pfn), j); in pages_correctly_reserved()
530 u64 pfn; in store_soft_offline_page() local
533 if (kstrtoull(buf, 0, &pfn) < 0) in store_soft_offline_page()
[all …]
Ddma-mapping.c255 unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); in dma_common_mmap() local
265 pfn + off, in dma_common_mmap()
312 unsigned long pfn; in dma_common_contiguous_remap() local
318 for (i = 0, pfn = page_to_pfn(page); i < (size >> PAGE_SHIFT); i++) in dma_common_contiguous_remap()
319 pages[i] = pfn_to_page(pfn + i); in dma_common_contiguous_remap()
/drivers/vfio/
Dvfio_iommu_type1.c170 static bool is_invalid_reserved_pfn(unsigned long pfn) in is_invalid_reserved_pfn() argument
172 if (pfn_valid(pfn)) { in is_invalid_reserved_pfn()
174 struct page *tail = pfn_to_page(pfn); in is_invalid_reserved_pfn()
198 static int put_pfn(unsigned long pfn, int prot) in put_pfn() argument
200 if (!is_invalid_reserved_pfn(pfn)) { in put_pfn()
201 struct page *page = pfn_to_page(pfn); in put_pfn()
210 static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn) in vaddr_get_pfn() argument
217 *pfn = page_to_pfn(page[0]); in vaddr_get_pfn()
226 *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; in vaddr_get_pfn()
227 if (is_invalid_reserved_pfn(*pfn)) in vaddr_get_pfn()
[all …]
/drivers/xen/
Dballoon.c286 unsigned long pfn = res->start >> PAGE_SHIFT; in additional_memory_resource() local
288 if (pfn > limit) { in additional_memory_resource()
290 pfn, limit); in additional_memory_resource()
342 unsigned long pfn, i; in reserve_additional_memory() local
344 pfn = PFN_DOWN(resource->start); in reserve_additional_memory()
346 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { in reserve_additional_memory()
464 unsigned long pfn = page_to_pfn(page); in increase_reservation() local
466 set_phys_to_machine(pfn, frame_list[i]); in increase_reservation()
472 (unsigned long)__va(pfn << PAGE_SHIFT), in increase_reservation()
542 unsigned long pfn = page_to_pfn(page); in decrease_reservation() local
[all …]
Dgrant-table.c393 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) in gnttab_grant_foreign_transfer() argument
400 gnttab_grant_foreign_transfer_ref(ref, domid, pfn); in gnttab_grant_foreign_transfer()
407 unsigned long pfn) in gnttab_grant_foreign_transfer_ref() argument
409 gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer); in gnttab_grant_foreign_transfer_ref()
636 xen_pfn_t *pfn; in gnttab_setup_auto_xlat_frames() local
650 pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL); in gnttab_setup_auto_xlat_frames()
651 if (!pfn) { in gnttab_setup_auto_xlat_frames()
656 pfn[i] = XEN_PFN_DOWN(addr) + i; in gnttab_setup_auto_xlat_frames()
659 xen_auto_xlat_grant_frames.pfn = pfn; in gnttab_setup_auto_xlat_frames()
670 kfree(xen_auto_xlat_grant_frames.pfn); in gnttab_free_auto_xlat_frames()
[all …]
/drivers/char/
Dmem.c56 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) in valid_mmap_phys_addr_range() argument
63 static inline int page_is_allowed(unsigned long pfn) in page_is_allowed() argument
65 return devmem_is_allowed(pfn); in page_is_allowed()
67 static inline int range_is_allowed(unsigned long pfn, unsigned long size) in range_is_allowed() argument
69 u64 from = ((u64)pfn) << PAGE_SHIFT; in range_is_allowed()
74 if (!devmem_is_allowed(pfn)) in range_is_allowed()
77 pfn++; in range_is_allowed()
82 static inline int page_is_allowed(unsigned long pfn) in page_is_allowed() argument
86 static inline int range_is_allowed(unsigned long pfn, unsigned long size) in range_is_allowed() argument
243 unsigned long pfn, unsigned long size, pgprot_t *vma_prot) in phys_mem_access_prot_allowed() argument
[all …]
Dmspec.c197 unsigned long pfn; in mspec_fault() local
223 pfn = paddr >> PAGE_SHIFT; in mspec_fault()
230 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); in mspec_fault()
/drivers/iommu/
Diova.c27 unsigned long pfn,
307 private_find_iova(struct iova_domain *iovad, unsigned long pfn) in private_find_iova() argument
317 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { in private_find_iova()
321 if (pfn < iova->pfn_lo) in private_find_iova()
323 else if (pfn > iova->pfn_lo) in private_find_iova()
345 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) in find_iova() argument
352 iova = private_find_iova(iovad, pfn); in find_iova()
383 free_iova(struct iova_domain *iovad, unsigned long pfn) in free_iova() argument
385 struct iova *iova = find_iova(iovad, pfn); in free_iova()
443 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) in free_iova_fast() argument
[all …]
/drivers/misc/
Dvmw_balloon.c445 static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, in vmballoon_send_lock_page() argument
451 pfn32 = (u32)pfn; in vmballoon_send_lock_page()
452 if (pfn32 != pfn) in vmballoon_send_lock_page()
457 *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy, *target); in vmballoon_send_lock_page()
461 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); in vmballoon_send_lock_page()
470 unsigned long pfn = page_to_pfn(b->page); in vmballoon_send_batched_lock() local
475 status = VMWARE_BALLOON_CMD(BATCHED_2M_LOCK, pfn, num_pages, in vmballoon_send_batched_lock()
478 status = VMWARE_BALLOON_CMD(BATCHED_LOCK, pfn, num_pages, in vmballoon_send_batched_lock()
484 pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status); in vmballoon_send_batched_lock()
493 static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn, in vmballoon_send_unlock_page() argument
[all …]
/drivers/infiniband/hw/mlx5/
Dmem.c56 u64 pfn; in mlx5_ib_cont_pages() local
80 pfn = sg_dma_address(sg) >> page_shift; in mlx5_ib_cont_pages()
83 tmp = (unsigned long)pfn; in mlx5_ib_cont_pages()
87 base = pfn; in mlx5_ib_cont_pages()
90 if (base + p != pfn) { in mlx5_ib_cont_pages()
95 base = pfn; in mlx5_ib_cont_pages()
/drivers/net/ethernet/ibm/ehea/
Dehea_qmr.c628 static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add) in ehea_update_busmap() argument
641 start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE; in ehea_update_busmap()
671 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages) in ehea_add_sect_bmap() argument
676 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT); in ehea_add_sect_bmap()
681 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages) in ehea_rem_sect_bmap() argument
686 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT); in ehea_rem_sect_bmap()
691 static int ehea_is_hugepage(unsigned long pfn) in ehea_is_hugepage() argument
695 if (pfn & EHEA_HUGEPAGE_PFN_MASK) in ehea_is_hugepage()
698 page_order = compound_order(pfn_to_page(pfn)); in ehea_is_hugepage()
709 unsigned long pfn, start_pfn, end_pfn, nr_pages; in ehea_create_busmap_callback() local
[all …]
/drivers/scsi/csiostor/
Dcsio_mb.c281 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(hw->pfn)); in csio_mb_ldst()
449 FW_IQ_CMD_PFN_V(iq_params->pfn) | in csio_mb_iq_alloc()
505 FW_IQ_CMD_PFN_V(iq_params->pfn) | in csio_mb_iq_write()
628 FW_IQ_CMD_PFN_V(iq_params->pfn) | in csio_mb_iq_free()
663 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | in csio_mb_eq_ofld_alloc()
711 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | in csio_mb_eq_ofld_write()
814 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | in csio_mb_eq_ofld_free()
1158 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); in csio_mb_debug_cmd_handler()
1159 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A); in csio_mb_debug_cmd_handler()
1192 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); in csio_mb_issue()
[all …]
/drivers/edac/
Dcell_edac.c38 unsigned long address, pfn, offset, syndrome; in cell_edac_count_ce() local
47 pfn = address >> PAGE_SHIFT; in cell_edac_count_ce()
53 csrow->first_page + pfn, offset, syndrome, in cell_edac_count_ce()
61 unsigned long address, pfn, offset; in cell_edac_count_ue() local
70 pfn = address >> PAGE_SHIFT; in cell_edac_count_ue()
75 csrow->first_page + pfn, offset, 0, in cell_edac_count_ue()
Dfsl_ddr_edac.c282 u32 pfn; in fsl_mc_check() local
315 pfn = err_addr >> PAGE_SHIFT; in fsl_mc_check()
319 if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page)) in fsl_mc_check()
352 fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn); in fsl_mc_check()
360 pfn, err_addr & ~PAGE_MASK, syndrome, in fsl_mc_check()
366 pfn, err_addr & ~PAGE_MASK, syndrome, in fsl_mc_check()
Di3000_edac.c237 unsigned long pfn, offset; in i3000_process_error_info() local
254 pfn = deap_pfn(info->edeap, info->deap); in i3000_process_error_info()
258 row = edac_mc_find_csrow_by_page(mci, pfn); in i3000_process_error_info()
262 pfn, offset, 0, in i3000_process_error_info()
267 pfn, offset, info->derrsyn, in i3000_process_error_info()
/drivers/gpu/drm/gma500/
Dgem.c172 unsigned long pfn; in psb_gem_fault() local
205 pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT; in psb_gem_fault()
207 pfn = page_to_pfn(r->pages[page_offset]); in psb_gem_fault()
208 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); in psb_gem_fault()
/drivers/net/ethernet/mellanox/mlx5/core/
Duar.c187 phys_addr_t pfn; in mlx5_alloc_map_uar() local
198 pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index; in mlx5_alloc_map_uar()
201 uar->bf_map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE); in mlx5_alloc_map_uar()
204 uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); in mlx5_alloc_map_uar()
209 uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); in mlx5_alloc_map_uar()
/drivers/gpu/drm/ttm/
Dttm_bo_vm.c102 unsigned long pfn; in ttm_bo_vm_fault() local
234 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; in ttm_bo_vm_fault()
246 pfn = page_to_pfn(page); in ttm_bo_vm_fault()
251 __pfn_to_pfn_t(pfn, PFN_DEV)); in ttm_bo_vm_fault()
253 ret = vm_insert_pfn(&cvma, address, pfn); in ttm_bo_vm_fault()
/drivers/media/v4l2-core/
Dvideobuf2-dma-contig.c454 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) in vb2_dc_pfn_to_dma() argument
456 return (dma_addr_t)__arch_pfn_to_dma(dev, pfn); in vb2_dc_pfn_to_dma()
459 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) in vb2_dc_pfn_to_dma() argument
461 return (dma_addr_t)__pfn_to_bus(pfn); in vb2_dc_pfn_to_dma()
464 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) in vb2_dc_pfn_to_dma() argument
466 return (dma_addr_t)__pfn_to_phys(pfn); in vb2_dc_pfn_to_dma()
469 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) in vb2_dc_pfn_to_dma() argument
472 return (dma_addr_t)(pfn) << PAGE_SHIFT; in vb2_dc_pfn_to_dma()
/drivers/md/
Ddm-linear.c151 void **kaddr, pfn_t *pfn, long size) in dm_linear_direct_access() argument
163 *pfn = dax.pfn; in dm_linear_direct_access()
/drivers/gpu/drm/omapdrm/
Domap_gem.c395 unsigned long pfn; in fault_1d() local
404 pfn = page_to_pfn(omap_obj->pages[pgoff]); in fault_1d()
407 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff; in fault_1d()
411 pfn, pfn << PAGE_SHIFT); in fault_1d()
414 __pfn_to_pfn_t(pfn, PFN_DEV)); in fault_1d()
426 unsigned long pfn; in fault_2d() local
502 pfn = entry->paddr >> PAGE_SHIFT; in fault_2d()
505 pfn, pfn << PAGE_SHIFT); in fault_2d()
509 __pfn_to_pfn_t(pfn, PFN_DEV)); in fault_2d()
510 pfn += priv->usergart[fmt].stride_pfn; in fault_2d()
/drivers/acpi/
Dosl.c277 #define should_use_kmap(pfn) 0 argument
279 #define should_use_kmap(pfn) page_is_ram(pfn) argument
284 unsigned long pfn; in acpi_map() local
286 pfn = pg_off >> PAGE_SHIFT; in acpi_map()
287 if (should_use_kmap(pfn)) { in acpi_map()
290 return (void __iomem __force *)kmap(pfn_to_page(pfn)); in acpi_map()
297 unsigned long pfn; in acpi_unmap() local
299 pfn = pg_off >> PAGE_SHIFT; in acpi_unmap()
300 if (should_use_kmap(pfn)) in acpi_unmap()
301 kunmap(pfn_to_page(pfn)); in acpi_unmap()
/drivers/acpi/apei/
Dghes.c152 static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) in ghes_ioremap_pfn_nmi() argument
158 pfn << PAGE_SHIFT, PAGE_KERNEL); in ghes_ioremap_pfn_nmi()
163 static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) in ghes_ioremap_pfn_irq() argument
170 paddr = pfn << PAGE_SHIFT; in ghes_ioremap_pfn_irq()
399 unsigned long pfn; in ghes_handle_memory_failure() local
408 pfn = mem_err->physical_addr >> PAGE_SHIFT; in ghes_handle_memory_failure()
409 if (!pfn_valid(pfn)) { in ghes_handle_memory_failure()
424 memory_failure_queue(pfn, 0, flags); in ghes_handle_memory_failure()

1234