Home
last modified time | relevance | path

Searched refs:pfn (Results 1 – 25 of 131) sorted by relevance

123456

/drivers/gpu/drm/i915/selftests/
Dscatterlist.c48 unsigned long pfn, n; in expect_pfn_sg() local
50 pfn = pt->start; in expect_pfn_sg()
55 if (page_to_pfn(page) != pfn) { in expect_pfn_sg()
57 __func__, who, pfn, page_to_pfn(page)); in expect_pfn_sg()
70 pfn += npages; in expect_pfn_sg()
72 if (pfn != pt->end) { in expect_pfn_sg()
74 __func__, who, pt->end, pfn); in expect_pfn_sg()
86 unsigned long pfn; in expect_pfn_sg_page_iter() local
88 pfn = pt->start; in expect_pfn_sg_page_iter()
92 if (page != pfn_to_page(pfn)) { in expect_pfn_sg_page_iter()
[all …]
/drivers/ras/
Dcec.c186 static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to) in __find_elem() argument
196 if (this_pfn < pfn) in __find_elem()
198 else if (this_pfn > pfn) in __find_elem()
200 else if (this_pfn == pfn) { in __find_elem()
223 static int find_elem(struct ce_array *ca, u64 pfn, unsigned int *to) in find_elem() argument
231 return __find_elem(ca, pfn, to); in find_elem()
271 u64 pfn; in del_lru_elem() local
277 pfn = del_lru_elem_unlocked(ca); in del_lru_elem()
280 return pfn; in del_lru_elem()
321 static int cec_add_elem(u64 pfn) in cec_add_elem() argument
[all …]
/drivers/gpu/drm/i915/
Di915_mm.c34 unsigned long pfn; member
48 return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT); in sgt_pfn()
61 r->pfn++; /* track insertions in case we need to unwind later */ in remap_sg()
78 set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); in remap_pfn()
79 r->pfn++; in remap_pfn()
95 unsigned long addr, unsigned long pfn, unsigned long size, in remap_io_mapping() argument
105 r.pfn = pfn; in remap_io_mapping()
111 zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT); in remap_io_mapping()
149 zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT); in remap_io_sg()
Di915_scatterlist.h26 unsigned long pfn; member
42 s.pfn = page_to_pfn(sg_page(s.sgp)); in __sgt_iter()
103 ((__pp) = (__iter).pfn == 0 ? NULL : \
104 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
/drivers/virtio/
Dvirtio_mem.c274 static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
276 static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
924 unsigned long pfn; in virtio_mem_sbm_notify_going_offline() local
930 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + in virtio_mem_sbm_notify_going_offline()
932 virtio_mem_fake_offline_going_offline(pfn, nr_pages); in virtio_mem_sbm_notify_going_offline()
940 unsigned long pfn; in virtio_mem_sbm_notify_cancel_offline() local
946 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + in virtio_mem_sbm_notify_cancel_offline()
948 virtio_mem_fake_offline_cancel_offline(pfn, nr_pages); in virtio_mem_sbm_notify_cancel_offline()
954 unsigned long pfn, in virtio_mem_bbm_notify_going_offline() argument
964 virtio_mem_fake_offline_going_offline(pfn, nr_pages); in virtio_mem_bbm_notify_going_offline()
[all …]
/drivers/xen/
Dmem-reservation.c37 unsigned long pfn = page_to_pfn(page); in __xenmem_reservation_va_mapping_update() local
48 set_phys_to_machine(pfn, frames[i]); in __xenmem_reservation_va_mapping_update()
51 (unsigned long)__va(pfn << PAGE_SHIFT), in __xenmem_reservation_va_mapping_update()
65 unsigned long pfn = page_to_pfn(page); in __xenmem_reservation_va_mapping_reset() local
75 (unsigned long)__va(pfn << PAGE_SHIFT), in __xenmem_reservation_va_mapping_reset()
79 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); in __xenmem_reservation_va_mapping_reset()
Dballoon.c302 unsigned long pfn, i; in reserve_additional_memory() local
304 pfn = PFN_DOWN(resource->start); in reserve_additional_memory()
306 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { in reserve_additional_memory()
677 unsigned long pfn, extra_pfn_end; in balloon_add_regions() local
694 for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) in balloon_add_regions()
695 balloon_append(pfn_to_page(pfn)); in balloon_add_regions()
/drivers/gpu/drm/i915/gt/
Dshmem_utils.c97 unsigned long pfn; in __shmem_rw() local
99 for (pfn = off >> PAGE_SHIFT; len; pfn++) { in __shmem_rw()
105 page = shmem_read_mapping_page_gfp(file->f_mapping, pfn, in __shmem_rw()
132 unsigned long pfn; in shmem_read_to_iosys_map() local
134 for (pfn = off >> PAGE_SHIFT; len; pfn++) { in shmem_read_to_iosys_map()
140 page = shmem_read_mapping_page_gfp(file->f_mapping, pfn, in shmem_read_to_iosys_map()
/drivers/virt/geniezone/
Dgzvm_mmu.c130 u64 gfn, u64 *pfn) in gzvm_vm_allocate_guest_page() argument
149 *pfn = page_to_pfn(page); in gzvm_vm_allocate_guest_page()
157 u64 pfn; in handle_single_demand_page() local
159 ret = gzvm_vm_allocate_guest_page(vm, &vm->memslot[memslot_id], gfn, &pfn); in handle_single_demand_page()
163 ret = gzvm_arch_map_guest(vm->vm_id, memslot_id, pfn, gfn, 1); in handle_single_demand_page()
172 u64 pfn, __gfn; in handle_block_demand_page() local
193 ret = gzvm_vm_allocate_guest_page(vm, memslot, __gfn, &pfn); in handle_block_demand_page()
200 vm->demand_page_buffer[i] = pfn; in handle_block_demand_page()
/drivers/dax/
Ddevice.c76 static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn, in dax_set_mapping() argument
92 struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i); in dax_set_mapping()
108 pfn_t pfn; in __dev_dax_pte_fault() local
129 pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); in __dev_dax_pte_fault()
131 dax_set_mapping(vmf, pfn, fault_size); in __dev_dax_pte_fault()
133 return vmf_insert_mixed(vmf->vma, vmf->address, pfn); in __dev_dax_pte_fault()
143 pfn_t pfn; in __dev_dax_pmd_fault() local
172 pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); in __dev_dax_pmd_fault()
174 dax_set_mapping(vmf, pfn, fault_size); in __dev_dax_pmd_fault()
176 return vmf_insert_pfn_pmd(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE); in __dev_dax_pmd_fault()
[all …]
/drivers/iommu/
Diova.c22 unsigned long pfn,
347 private_find_iova(struct iova_domain *iovad, unsigned long pfn) in private_find_iova() argument
356 if (pfn < iova->pfn_lo) in private_find_iova()
358 else if (pfn > iova->pfn_hi) in private_find_iova()
381 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) in find_iova() argument
388 iova = private_find_iova(iovad, pfn); in find_iova()
420 free_iova(struct iova_domain *iovad, unsigned long pfn) in free_iova() argument
426 iova = private_find_iova(iovad, pfn); in free_iova()
496 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) in free_iova_fast() argument
498 if (iova_rcache_insert(iovad, pfn, size)) in free_iova_fast()
[all …]
/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dvmmgp100.c82 for (; ptes; ptes--, map->pfn++) { in gp100_vmm_pgt_pfn()
85 if (!(*map->pfn & NVKM_VMM_PFN_V)) in gp100_vmm_pgt_pfn()
88 if (!(*map->pfn & NVKM_VMM_PFN_W)) in gp100_vmm_pgt_pfn()
91 if (!(*map->pfn & NVKM_VMM_PFN_A)) in gp100_vmm_pgt_pfn()
94 if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) { in gp100_vmm_pgt_pfn()
95 addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT; in gp100_vmm_pgt_pfn()
105 data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4; in gp100_vmm_pgt_pfn()
176 .pfn = gp100_vmm_pgt_pfn,
319 for (; ptes; ptes--, map->pfn++) { in gp100_vmm_pd0_pfn()
322 if (!(*map->pfn & NVKM_VMM_PFN_V)) in gp100_vmm_pd0_pfn()
[all …]
Dvmm.c258 nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes) in nvkm_vmm_unref_ptes() argument
265 if (pfn) { in nvkm_vmm_unref_ptes()
364 nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes) in nvkm_vmm_ref_ptes() argument
394 nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes) in nvkm_vmm_sparse_unref_ptes() argument
402 return nvkm_vmm_unref_ptes(it, pfn, ptei, ptes); in nvkm_vmm_sparse_unref_ptes()
406 nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes) in nvkm_vmm_sparse_ref_ptes() argument
409 return nvkm_vmm_ref_ptes(it, pfn, ptei, ptes); in nvkm_vmm_sparse_ref_ptes()
502 u64 addr, u64 size, const char *name, bool ref, bool pfn, in nvkm_vmm_iter() argument
503 bool (*REF_PTES)(struct nvkm_vmm_iter *, bool pfn, u32, u32), in nvkm_vmm_iter()
563 if (!REF_PTES || REF_PTES(&it, pfn, ptei, ptes)) { in nvkm_vmm_iter()
[all …]
/drivers/char/
Dmem.c57 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) in valid_mmap_phys_addr_range() argument
64 static inline int page_is_allowed(unsigned long pfn) in page_is_allowed() argument
66 return devmem_is_allowed(pfn); in page_is_allowed()
68 static inline int range_is_allowed(unsigned long pfn, unsigned long size) in range_is_allowed() argument
70 u64 from = ((u64)pfn) << PAGE_SHIFT; in range_is_allowed()
75 if (!devmem_is_allowed(pfn)) in range_is_allowed()
78 pfn++; in range_is_allowed()
83 static inline int page_is_allowed(unsigned long pfn) in page_is_allowed() argument
87 static inline int range_is_allowed(unsigned long pfn, unsigned long size) in range_is_allowed() argument
265 unsigned long pfn, unsigned long size, pgprot_t *vma_prot) in phys_mem_access_prot_allowed() argument
[all …]
/drivers/net/ethernet/ibm/ehea/
Dehea_qmr.c608 static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add) in ehea_update_busmap() argument
621 start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE; in ehea_update_busmap()
651 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages) in ehea_add_sect_bmap() argument
656 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT); in ehea_add_sect_bmap()
661 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages) in ehea_rem_sect_bmap() argument
666 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT); in ehea_rem_sect_bmap()
671 static int ehea_is_hugepage(unsigned long pfn) in ehea_is_hugepage() argument
673 if (pfn & EHEA_HUGEPAGE_PFN_MASK) in ehea_is_hugepage()
676 if (page_shift(pfn_to_page(pfn)) != EHEA_HUGEPAGESHIFT) in ehea_is_hugepage()
686 unsigned long pfn, start_pfn, end_pfn, nr_pages; in ehea_create_busmap_callback() local
[all …]
/drivers/vdpa/vdpa_user/
Diova_domain.c139 unsigned long pfn = PFN_DOWN(orig); in do_bounce() local
147 page = pfn_to_page(pfn); in do_bounce()
154 pfn++; in do_bounce()
233 unsigned long pfn, bounce_pfns; in vduse_domain_free_kernel_bounce_pages() local
237 for (pfn = 0; pfn < bounce_pfns; pfn++) { in vduse_domain_free_kernel_bounce_pages()
238 map = &domain->bounce_maps[pfn]; in vduse_domain_free_kernel_bounce_pages()
557 unsigned long pfn, bounce_pfns; in vduse_domain_create() local
579 for (pfn = 0; pfn < bounce_pfns; pfn++) { in vduse_domain_create()
580 map = &domain->bounce_maps[pfn]; in vduse_domain_create()
/drivers/vfio/
Dvfio_iommu_type1.c130 unsigned long pfn; /* Host pfn */ member
154 static int put_pfn(unsigned long pfn, int prot);
368 unsigned long pfn) in vfio_add_to_pfn_list() argument
377 vpfn->pfn = pfn; in vfio_add_to_pfn_list()
406 ret = put_pfn(vpfn->pfn, dma->prot); in vfio_iova_put_vfio_pfn()
454 static bool is_invalid_reserved_pfn(unsigned long pfn) in is_invalid_reserved_pfn() argument
456 if (pfn_valid(pfn)) in is_invalid_reserved_pfn()
457 return PageReserved(pfn_to_page(pfn)); in is_invalid_reserved_pfn()
462 static int put_pfn(unsigned long pfn, int prot) in put_pfn() argument
464 if (!is_invalid_reserved_pfn(pfn)) { in put_pfn()
[all …]
/drivers/base/
Dmemory.c58 static inline unsigned long pfn_to_block_id(unsigned long pfn) in pfn_to_block_id() argument
60 return memory_block_id(pfn_to_section_nr(pfn)); in pfn_to_block_id()
575 u64 pfn; in soft_offline_page_store() local
578 if (kstrtoull(buf, 0, &pfn) < 0) in soft_offline_page_store()
580 pfn >>= PAGE_SHIFT; in soft_offline_page_store()
581 ret = soft_offline_page(pfn, 0); in soft_offline_page_store()
591 u64 pfn; in hard_offline_page_store() local
594 if (kstrtoull(buf, 0, &pfn) < 0) in hard_offline_page_store()
596 pfn >>= PAGE_SHIFT; in hard_offline_page_store()
597 ret = memory_failure(pfn, MF_SW_SIMULATED); in hard_offline_page_store()
[all …]
Dnode.c747 static int __ref get_nid_for_pfn(unsigned long pfn) in get_nid_for_pfn() argument
751 return early_pfn_to_nid(pfn); in get_nid_for_pfn()
753 return pfn_to_nid(pfn); in get_nid_for_pfn()
790 unsigned long pfn; in register_mem_block_under_node_early() local
792 for (pfn = start_pfn; pfn <= end_pfn; pfn++) { in register_mem_block_under_node_early()
799 if (!pfn_in_present_section(pfn)) { in register_mem_block_under_node_early()
800 pfn = round_down(pfn + PAGES_PER_SECTION, in register_mem_block_under_node_early()
809 page_nid = get_nid_for_pfn(pfn); in register_mem_block_under_node_early()
/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
Dpno.c201 struct brcmf_pno_net_param_le pfn; in brcmf_pno_add_ssid() local
204 pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN); in brcmf_pno_add_ssid()
205 pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY); in brcmf_pno_add_ssid()
206 pfn.wsec = cpu_to_le32(0); in brcmf_pno_add_ssid()
207 pfn.infra = cpu_to_le32(1); in brcmf_pno_add_ssid()
208 pfn.flags = 0; in brcmf_pno_add_ssid()
210 pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT); in brcmf_pno_add_ssid()
211 pfn.ssid.SSID_len = cpu_to_le32(ssid->ssid_len); in brcmf_pno_add_ssid()
212 memcpy(pfn.ssid.SSID, ssid->ssid, ssid->ssid_len); in brcmf_pno_add_ssid()
215 err = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn)); in brcmf_pno_add_ssid()
/drivers/nvdimm/
Dpmem.c68 unsigned long pfn_start, pfn_end, pfn; in pmem_mkpage_present() local
76 for (pfn = pfn_start; pfn < pfn_end; pfn++) { in pmem_mkpage_present()
77 struct page *page = pfn_to_page(pfn); in pmem_mkpage_present()
85 clear_mce_nospec(pfn); in pmem_mkpage_present()
245 pfn_t *pfn) in __pmem_direct_access() argument
256 if (pfn) in __pmem_direct_access()
257 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); in __pmem_direct_access()
306 void **kaddr, pfn_t *pfn) in pmem_dax_direct_access() argument
310 return __pmem_direct_access(pmem, pgoff, nr_pages, mode, kaddr, pfn); in pmem_dax_direct_access()
435 unsigned long pfn, unsigned long nr_pages, int mf_flags) in pmem_pagemap_memory_failure() argument
[all …]
/drivers/gpu/drm/ttm/
Dttm_bo_vm.c190 unsigned long pfn; in ttm_bo_vm_fault_reserved() local
247 pfn = ttm_bo_io_mem_pfn(bo, page_offset); in ttm_bo_vm_fault_reserved()
255 pfn = page_to_pfn(page); in ttm_bo_vm_fault_reserved()
266 ret = vmf_insert_pfn_prot(vma, address, pfn, prot); in ttm_bo_vm_fault_reserved()
298 unsigned long pfn; in ttm_bo_vm_dummy_page() local
310 pfn = page_to_pfn(page); in ttm_bo_vm_dummy_page()
315 ret = vmf_insert_pfn_prot(vma, address, pfn, prot); in ttm_bo_vm_dummy_page()
/drivers/edac/
Dcell_edac.c38 unsigned long address, pfn, offset, syndrome; in cell_edac_count_ce() local
47 pfn = address >> PAGE_SHIFT; in cell_edac_count_ce()
53 csrow->first_page + pfn, offset, syndrome, in cell_edac_count_ce()
61 unsigned long address, pfn, offset; in cell_edac_count_ue() local
70 pfn = address >> PAGE_SHIFT; in cell_edac_count_ue()
75 csrow->first_page + pfn, offset, 0, in cell_edac_count_ue()
/drivers/scsi/csiostor/
Dcsio_mb.c281 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(hw->pfn)); in csio_mb_ldst()
450 FW_IQ_CMD_PFN_V(iq_params->pfn) | in csio_mb_iq_alloc()
507 FW_IQ_CMD_PFN_V(iq_params->pfn) | in csio_mb_iq_write()
632 FW_IQ_CMD_PFN_V(iq_params->pfn) | in csio_mb_iq_free()
667 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | in csio_mb_eq_ofld_alloc()
715 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | in csio_mb_eq_ofld_write()
818 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | in csio_mb_eq_ofld_free()
1162 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); in csio_mb_debug_cmd_handler()
1163 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A); in csio_mb_debug_cmd_handler()
1196 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); in csio_mb_issue()
[all …]
/drivers/hv/
Dhv_balloon.c593 unsigned long pfn) in has_pfn_is_backed() argument
598 if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn)) in has_pfn_is_backed()
603 if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn)) in has_pfn_is_backed()
613 unsigned long pfn = start_pfn, count = 0; in hv_page_offline_check() local
617 while (pfn < start_pfn + nr_pages) { in hv_page_offline_check()
624 while ((pfn >= has->start_pfn) && in hv_page_offline_check()
625 (pfn < has->end_pfn) && in hv_page_offline_check()
626 (pfn < start_pfn + nr_pages)) { in hv_page_offline_check()
628 if (has_pfn_is_backed(has, pfn)) in hv_page_offline_check()
630 pfn++; in hv_page_offline_check()
[all …]

123456