/drivers/gpu/drm/i915/selftests/ |
D | scatterlist.c | 47 unsigned long pfn, n; in expect_pfn_sg() local 49 pfn = pt->start; in expect_pfn_sg() 54 if (page_to_pfn(page) != pfn) { in expect_pfn_sg() 56 __func__, who, pfn, page_to_pfn(page)); in expect_pfn_sg() 69 pfn += npages; in expect_pfn_sg() 71 if (pfn != pt->end) { in expect_pfn_sg() 73 __func__, who, pt->end, pfn); in expect_pfn_sg() 85 unsigned long pfn; in expect_pfn_sg_page_iter() local 87 pfn = pt->start; in expect_pfn_sg_page_iter() 91 if (page != pfn_to_page(pfn)) { in expect_pfn_sg_page_iter() [all …]
|
/drivers/ras/ |
D | cec.c | 186 static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to) in __find_elem() argument 196 if (this_pfn < pfn) in __find_elem() 198 else if (this_pfn > pfn) in __find_elem() 200 else if (this_pfn == pfn) { in __find_elem() 223 static int find_elem(struct ce_array *ca, u64 pfn, unsigned int *to) in find_elem() argument 231 return __find_elem(ca, pfn, to); in find_elem() 271 u64 pfn; in del_lru_elem() local 277 pfn = del_lru_elem_unlocked(ca); in del_lru_elem() 280 return pfn; in del_lru_elem() 284 int cec_add_elem(u64 pfn) in cec_add_elem() argument [all …]
|
/drivers/gpu/drm/i915/ |
D | i915_mm.c | 34 unsigned long pfn; member 44 set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); in remap_pfn() 45 r->pfn++; in remap_pfn() 61 unsigned long addr, unsigned long pfn, unsigned long size, in remap_io_mapping() argument 73 r.pfn = pfn; in remap_io_mapping() 79 zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT); in remap_io_mapping()
|
/drivers/base/ |
D | node.c | 393 static int __ref get_nid_for_pfn(unsigned long pfn) in get_nid_for_pfn() argument 395 if (!pfn_valid_within(pfn)) in get_nid_for_pfn() 399 return early_pfn_to_nid(pfn); in get_nid_for_pfn() 401 return pfn_to_nid(pfn); in get_nid_for_pfn() 408 unsigned long pfn, sect_start_pfn, sect_end_pfn; in register_mem_sect_under_node() local 418 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { in register_mem_sect_under_node() 425 if (!pfn_present(pfn)) { in register_mem_sect_under_node() 426 pfn = round_down(pfn + PAGES_PER_SECTION, in register_mem_sect_under_node() 431 page_nid = get_nid_for_pfn(pfn); in register_mem_sect_under_node() 455 unsigned long pfn, sect_start_pfn, sect_end_pfn; in unregister_mem_sect_under_nodes() local [all …]
|
D | memory.c | 128 unsigned long i, pfn; in show_mem_removable() local 138 pfn = section_nr_to_pfn(mem->start_section_nr + i); in show_mem_removable() 139 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); in show_mem_removable() 197 unsigned long pfn = start_pfn; in pages_correctly_reserved() local 204 for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) { in pages_correctly_reserved() 205 if (WARN_ON_ONCE(!pfn_valid(pfn))) in pages_correctly_reserved() 207 page = pfn_to_page(pfn); in pages_correctly_reserved() 215 pfn_to_section_nr(pfn), j); in pages_correctly_reserved() 552 u64 pfn; in store_soft_offline_page() local 555 if (kstrtoull(buf, 0, &pfn) < 0) in store_soft_offline_page() [all …]
|
/drivers/vfio/ |
D | vfio_iommu_type1.c | 108 unsigned long pfn; /* Host pfn */ member 115 static int put_pfn(unsigned long pfn, int prot); 213 unsigned long pfn) in vfio_add_to_pfn_list() argument 222 vpfn->pfn = pfn; in vfio_add_to_pfn_list() 250 ret = put_pfn(vpfn->pfn, dma->prot); in vfio_iova_put_vfio_pfn() 299 static bool is_invalid_reserved_pfn(unsigned long pfn) in is_invalid_reserved_pfn() argument 301 if (pfn_valid(pfn)) { in is_invalid_reserved_pfn() 303 struct page *tail = pfn_to_page(pfn); in is_invalid_reserved_pfn() 327 static int put_pfn(unsigned long pfn, int prot) in put_pfn() argument 329 if (!is_invalid_reserved_pfn(pfn)) { in put_pfn() [all …]
|
/drivers/xen/ |
D | balloon.c | 284 unsigned long pfn = res->start >> PAGE_SHIFT; in additional_memory_resource() local 286 if (pfn > limit) { in additional_memory_resource() 288 pfn, limit); in additional_memory_resource() 340 unsigned long pfn, i; in reserve_additional_memory() local 342 pfn = PFN_DOWN(resource->start); in reserve_additional_memory() 344 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { in reserve_additional_memory() 466 unsigned long pfn = page_to_pfn(page); in increase_reservation() local 468 set_phys_to_machine(pfn, frame_list[i]); in increase_reservation() 474 (unsigned long)__va(pfn << PAGE_SHIFT), in increase_reservation() 545 unsigned long pfn = page_to_pfn(page); in decrease_reservation() local [all …]
|
D | grant-table.c | 394 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) in gnttab_grant_foreign_transfer() argument 401 gnttab_grant_foreign_transfer_ref(ref, domid, pfn); in gnttab_grant_foreign_transfer() 408 unsigned long pfn) in gnttab_grant_foreign_transfer_ref() argument 410 gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer); in gnttab_grant_foreign_transfer_ref() 637 xen_pfn_t *pfn; in gnttab_setup_auto_xlat_frames() local 651 pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL); in gnttab_setup_auto_xlat_frames() 652 if (!pfn) { in gnttab_setup_auto_xlat_frames() 657 pfn[i] = XEN_PFN_DOWN(addr) + i; in gnttab_setup_auto_xlat_frames() 660 xen_auto_xlat_grant_frames.pfn = pfn; in gnttab_setup_auto_xlat_frames() 671 kfree(xen_auto_xlat_grant_frames.pfn); in gnttab_free_auto_xlat_frames() [all …]
|
/drivers/char/ |
D | mem.c | 57 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) in valid_mmap_phys_addr_range() argument 64 static inline int page_is_allowed(unsigned long pfn) in page_is_allowed() argument 66 return devmem_is_allowed(pfn); in page_is_allowed() 68 static inline int range_is_allowed(unsigned long pfn, unsigned long size) in range_is_allowed() argument 70 u64 from = ((u64)pfn) << PAGE_SHIFT; in range_is_allowed() 75 if (!devmem_is_allowed(pfn)) in range_is_allowed() 78 pfn++; in range_is_allowed() 83 static inline int page_is_allowed(unsigned long pfn) in page_is_allowed() argument 87 static inline int range_is_allowed(unsigned long pfn, unsigned long size) in range_is_allowed() argument 272 unsigned long pfn, unsigned long size, pgprot_t *vma_prot) in phys_mem_access_prot_allowed() argument [all …]
|
/drivers/misc/ |
D | vmw_balloon.c | 452 static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, in vmballoon_send_lock_page() argument 458 pfn32 = (u32)pfn; in vmballoon_send_lock_page() 459 if (pfn32 != pfn) in vmballoon_send_lock_page() 464 *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy, *target); in vmballoon_send_lock_page() 468 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); in vmballoon_send_lock_page() 477 unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page)); in vmballoon_send_batched_lock() local 482 status = VMWARE_BALLOON_CMD(BATCHED_2M_LOCK, pfn, num_pages, in vmballoon_send_batched_lock() 485 status = VMWARE_BALLOON_CMD(BATCHED_LOCK, pfn, num_pages, in vmballoon_send_batched_lock() 491 pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status); in vmballoon_send_batched_lock() 500 static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn, in vmballoon_send_unlock_page() argument [all …]
|
/drivers/iommu/ |
D | iova.c | 28 unsigned long pfn, 346 private_find_iova(struct iova_domain *iovad, unsigned long pfn) in private_find_iova() argument 356 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { in private_find_iova() 360 if (pfn < iova->pfn_lo) in private_find_iova() 362 else if (pfn > iova->pfn_lo) in private_find_iova() 384 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) in find_iova() argument 391 iova = private_find_iova(iovad, pfn); in find_iova() 422 free_iova(struct iova_domain *iovad, unsigned long pfn) in free_iova() argument 424 struct iova *iova = find_iova(iovad, pfn); in free_iova() 480 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) in free_iova_fast() argument [all …]
|
/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.c | 628 static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add) in ehea_update_busmap() argument 641 start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE; in ehea_update_busmap() 671 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages) in ehea_add_sect_bmap() argument 676 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT); in ehea_add_sect_bmap() 681 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages) in ehea_rem_sect_bmap() argument 686 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT); in ehea_rem_sect_bmap() 691 static int ehea_is_hugepage(unsigned long pfn) in ehea_is_hugepage() argument 695 if (pfn & EHEA_HUGEPAGE_PFN_MASK) in ehea_is_hugepage() 698 page_order = compound_order(pfn_to_page(pfn)); in ehea_is_hugepage() 709 unsigned long pfn, start_pfn, end_pfn, nr_pages; in ehea_create_busmap_callback() local [all …]
|
/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ |
D | pno.c | 205 struct brcmf_pno_net_param_le pfn; in brcmf_pno_add_ssid() local 208 pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN); in brcmf_pno_add_ssid() 209 pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY); in brcmf_pno_add_ssid() 210 pfn.wsec = cpu_to_le32(0); in brcmf_pno_add_ssid() 211 pfn.infra = cpu_to_le32(1); in brcmf_pno_add_ssid() 212 pfn.flags = 0; in brcmf_pno_add_ssid() 214 pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT); in brcmf_pno_add_ssid() 215 pfn.ssid.SSID_len = cpu_to_le32(ssid->ssid_len); in brcmf_pno_add_ssid() 216 memcpy(pfn.ssid.SSID, ssid->ssid, ssid->ssid_len); in brcmf_pno_add_ssid() 219 err = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn)); in brcmf_pno_add_ssid()
|
/drivers/scsi/csiostor/ |
D | csio_mb.c | 281 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(hw->pfn)); in csio_mb_ldst() 449 FW_IQ_CMD_PFN_V(iq_params->pfn) | in csio_mb_iq_alloc() 505 FW_IQ_CMD_PFN_V(iq_params->pfn) | in csio_mb_iq_write() 628 FW_IQ_CMD_PFN_V(iq_params->pfn) | in csio_mb_iq_free() 663 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | in csio_mb_eq_ofld_alloc() 711 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | in csio_mb_eq_ofld_write() 814 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | in csio_mb_eq_ofld_free() 1158 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); in csio_mb_debug_cmd_handler() 1159 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A); in csio_mb_debug_cmd_handler() 1192 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); in csio_mb_issue() [all …]
|
/drivers/infiniband/hw/mlx5/ |
D | mem.c | 54 u64 len, pfn; in mlx5_ib_cont_pages() local 78 pfn = sg_dma_address(sg) >> page_shift; in mlx5_ib_cont_pages() 79 if (base + p != pfn) { in mlx5_ib_cont_pages() 83 tmp = (unsigned long)(pfn | p); in mlx5_ib_cont_pages() 87 base = pfn; in mlx5_ib_cont_pages()
|
/drivers/gpu/drm/gma500/ |
D | gem.c | 143 unsigned long pfn; in psb_gem_fault() local 175 pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT; in psb_gem_fault() 177 pfn = page_to_pfn(r->pages[page_offset]); in psb_gem_fault() 178 ret = vm_insert_pfn(vma, vmf->address, pfn); in psb_gem_fault()
|
/drivers/edac/ |
D | cell_edac.c | 38 unsigned long address, pfn, offset, syndrome; in cell_edac_count_ce() local 47 pfn = address >> PAGE_SHIFT; in cell_edac_count_ce() 53 csrow->first_page + pfn, offset, syndrome, in cell_edac_count_ce() 61 unsigned long address, pfn, offset; in cell_edac_count_ue() local 70 pfn = address >> PAGE_SHIFT; in cell_edac_count_ue() 75 csrow->first_page + pfn, offset, 0, in cell_edac_count_ue()
|
D | fsl_ddr_edac.c | 281 u32 pfn; in fsl_mc_check() local 314 pfn = err_addr >> PAGE_SHIFT; in fsl_mc_check() 318 if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page)) in fsl_mc_check() 351 fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn); in fsl_mc_check() 359 pfn, err_addr & ~PAGE_MASK, syndrome, in fsl_mc_check() 365 pfn, err_addr & ~PAGE_MASK, syndrome, in fsl_mc_check()
|
D | i3000_edac.c | 235 unsigned long pfn, offset; in i3000_process_error_info() local 252 pfn = deap_pfn(info->edeap, info->deap); in i3000_process_error_info() 256 row = edac_mc_find_csrow_by_page(mci, pfn); in i3000_process_error_info() 260 pfn, offset, 0, in i3000_process_error_info() 265 pfn, offset, info->derrsyn, in i3000_process_error_info()
|
/drivers/dax/ |
D | device.c | 256 pfn_t pfn; in __dev_dax_pte_fault() local 279 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); in __dev_dax_pte_fault() 281 rc = vm_insert_mixed(vmf->vma, vmf->address, pfn); in __dev_dax_pte_fault() 298 pfn_t pfn; in __dev_dax_pmd_fault() local 335 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); in __dev_dax_pmd_fault() 337 return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, pfn, in __dev_dax_pmd_fault() 349 pfn_t pfn; in __dev_dax_pud_fault() local 387 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); in __dev_dax_pud_fault() 389 return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, pfn, in __dev_dax_pud_fault()
|
/drivers/media/v4l2-core/ |
D | videobuf2-dma-contig.c | 455 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) in vb2_dc_pfn_to_dma() argument 457 return (dma_addr_t)__arch_pfn_to_dma(dev, pfn); in vb2_dc_pfn_to_dma() 460 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) in vb2_dc_pfn_to_dma() argument 462 return (dma_addr_t)__pfn_to_bus(pfn); in vb2_dc_pfn_to_dma() 465 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) in vb2_dc_pfn_to_dma() argument 467 return (dma_addr_t)__pfn_to_phys(pfn); in vb2_dc_pfn_to_dma() 470 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) in vb2_dc_pfn_to_dma() argument 473 return (dma_addr_t)(pfn) << PAGE_SHIFT; in vb2_dc_pfn_to_dma()
|
/drivers/gpu/drm/omapdrm/ |
D | omap_gem.c | 379 unsigned long pfn; in fault_1d() local 387 pfn = page_to_pfn(omap_obj->pages[pgoff]); in fault_1d() 390 pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff; in fault_1d() 394 pfn, pfn << PAGE_SHIFT); in fault_1d() 396 return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); in fault_1d() 408 unsigned long pfn; in fault_2d() local 483 pfn = entry->dma_addr >> PAGE_SHIFT; in fault_2d() 486 pfn, pfn << PAGE_SHIFT); in fault_2d() 489 vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV)); in fault_2d() 490 pfn += priv->usergart[fmt].stride_pfn; in fault_2d()
|
/drivers/gpu/drm/ttm/ |
D | ttm_bo_vm.c | 103 unsigned long pfn; in ttm_bo_vm_fault() local 237 pfn = bdev->driver->io_mem_pfn(bo, page_offset); in ttm_bo_vm_fault() 249 pfn = page_to_pfn(page); in ttm_bo_vm_fault() 254 __pfn_to_pfn_t(pfn, PFN_DEV)); in ttm_bo_vm_fault() 256 ret = vm_insert_pfn(&cvma, address, pfn); in ttm_bo_vm_fault()
|
/drivers/acpi/ |
D | osl.c | 273 #define should_use_kmap(pfn) 0 argument 275 #define should_use_kmap(pfn) page_is_ram(pfn) argument 280 unsigned long pfn; in acpi_map() local 282 pfn = pg_off >> PAGE_SHIFT; in acpi_map() 283 if (should_use_kmap(pfn)) { in acpi_map() 286 return (void __iomem __force *)kmap(pfn_to_page(pfn)); in acpi_map() 293 unsigned long pfn; in acpi_unmap() local 295 pfn = pg_off >> PAGE_SHIFT; in acpi_unmap() 296 if (should_use_kmap(pfn)) in acpi_unmap() 297 kunmap(pfn_to_page(pfn)); in acpi_unmap()
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | uar.c | 103 phys_addr_t pfn; in alloc_uars_page() local 137 pfn = uar2pfn(mdev, up->index); in alloc_uars_page() 139 up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE); in alloc_uars_page() 145 up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); in alloc_uars_page()
|