/arch/parisc/kernel/ |
D | pa7300lc.c | 33 u32 hpa; in pa7300lc_lpmc() local 38 hpa = cpu_hpa(); in pa7300lc_lpmc() 43 gsc_readl(hpa+MIOC_CONTROL), gsc_readl(hpa+MIOC_STATUS), in pa7300lc_lpmc() 44 gsc_readl(hpa+MDERRADD), gsc_readl(hpa+DMAERR), in pa7300lc_lpmc() 45 gsc_readl(hpa+DIOERR), gsc_readl(hpa+HIDMAMEM)); in pa7300lc_lpmc()
|
D | drivers.c | 228 unsigned long hpa; member 238 if (pdev->hpa.start == d->hpa) { in find_device() 246 static struct parisc_device *find_device_by_addr(unsigned long hpa) in find_device_by_addr() argument 249 .hpa = hpa, in find_device_by_addr() 494 alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) in alloc_pa_dev() argument 503 if (find_device_by_addr(hpa) != NULL) in alloc_pa_dev() 506 status = pdc_iodc_read(&bytecnt, hpa, 0, &iodc_data, 32); in alloc_pa_dev() 523 dev->hpa.start = hpa; in alloc_pa_dev() 528 if (hpa == 0xf4000000 || hpa == 0xf8000000) { in alloc_pa_dev() 529 dev->hpa.end = hpa + 0x03ffffff; in alloc_pa_dev() [all …]
|
D | processor.c | 102 txn_addr = dev->hpa.start; /* for legacy PDC */ in processor_probe() 129 status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start); in processor_probe() 135 &dev->hpa.start); in processor_probe() 148 &dev->hpa.start, cpu_info.cpu_num, NR_CPUS); in processor_probe() 167 p->hpa = dev->hpa.start; /* save CPU hpa */ in processor_probe() 387 if (0 == cpuinfo->hpa) in show_cpuinfo()
|
D | firmware.c | 389 int pdc_iodc_read(unsigned long *actcnt, unsigned long hpa, unsigned int index, in pdc_iodc_read() argument 396 retval = mem_pdc_call(PDC_IODC, PDC_IODC_READ, __pa(pdc_result), hpa, in pdc_iodc_read() 704 int pdc_lan_station_id(char *lan_addr, unsigned long hpa) in pdc_lan_station_id() argument 711 __pa(pdc_result), hpa); in pdc_lan_station_id() 912 int pdc_pci_irt_size(unsigned long *num_entries, unsigned long hpa) in pdc_pci_irt_size() argument 919 __pa(pdc_result), hpa); in pdc_pci_irt_size() 936 int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl) in pdc_pci_irt() argument 946 __pa(pdc_result), hpa, __pa(tbl)); in pdc_pci_irt() 962 unsigned int pdc_pci_config_read(void *hpa, unsigned long cfg_addr) 971 __pa(pdc_result), hpa, cfg_addr&~3UL, 4UL); [all …]
|
D | smp.c | 206 gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa); in ipi_send() 354 printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa); in smp_boot_one_cpu() 365 gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa); in smp_boot_one_cpu()
|
/arch/s390/kvm/ |
D | vsie.c | 646 static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa) in pin_guest_page() argument 653 *hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK); in pin_guest_page() 658 static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa) in unpin_guest_page() argument 660 kvm_release_pfn_dirty(hpa >> PAGE_SHIFT); in unpin_guest_page() 669 hpa_t hpa; in unpin_blocks() local 671 hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol; in unpin_blocks() 672 if (hpa) { in unpin_blocks() 673 unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa); in unpin_blocks() 679 hpa = scb_s->itdba; in unpin_blocks() 680 if (hpa) { in unpin_blocks() [all …]
|
/arch/powerpc/mm/book3s64/ |
D | iommu_api.c | 349 unsigned long ua, unsigned int pageshift, unsigned long *hpa) in mm_iommu_ua_to_hpa() argument 361 *hpa = mem->dev_hpa + (ua - mem->ua); in mm_iommu_ua_to_hpa() 366 *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK); in mm_iommu_ua_to_hpa() 373 unsigned long ua, unsigned int pageshift, unsigned long *hpa) in mm_iommu_ua_to_hpa_rm() argument 385 *hpa = mem->dev_hpa + (ua - mem->ua); in mm_iommu_ua_to_hpa_rm() 393 *hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK); in mm_iommu_ua_to_hpa_rm() 422 bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, in mm_iommu_is_devmem() argument 434 if ((mem->dev_hpa <= hpa) && (hpa < end)) { in mm_iommu_is_devmem() 441 *size = min(1UL << pageshift, end - hpa); in mm_iommu_is_devmem()
|
/arch/powerpc/kvm/ |
D | book3s_64_vio_hv.c | 120 unsigned long hpa = 0; in kvmppc_rm_tce_validate() local 128 if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa)) in kvmppc_rm_tce_validate() 222 unsigned long entry, unsigned long *hpa, in iommu_tce_xchg_no_kill_rm() argument 227 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true); in iommu_tce_xchg_no_kill_rm() 258 unsigned long hpa = 0; in kvmppc_rm_clear_tce() local 261 iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir); in kvmppc_rm_clear_tce() 291 unsigned long hpa = 0; in kvmppc_rm_tce_iommu_do_unmap() local 294 if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir)) in kvmppc_rm_tce_iommu_do_unmap() 306 iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir); in kvmppc_rm_tce_iommu_do_unmap() 335 unsigned long hpa = 0; in kvmppc_rm_tce_iommu_do_map() local [all …]
|
D | book3s_64_vio.c | 379 unsigned long hpa = 0; in kvmppc_tce_validate() local 384 if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) { in kvmppc_tce_validate() 432 unsigned long hpa = 0; in kvmppc_clear_tce() local 435 iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir); in kvmppc_clear_tce() 464 unsigned long hpa = 0; in kvmppc_tce_iommu_do_unmap() local 467 if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, in kvmppc_tce_iommu_do_unmap() 476 iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir); in kvmppc_tce_iommu_do_unmap() 505 unsigned long hpa; in kvmppc_tce_iommu_do_map() local 518 if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa))) in kvmppc_tce_iommu_do_map() 524 ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir); in kvmppc_tce_iommu_do_map()
|
D | book3s_hv_nested.c | 909 unsigned long hpa, unsigned long mask) in kvmhv_update_nest_rmap_rc() argument 926 if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) { in kvmhv_update_nest_rmap_rc() 938 unsigned long hpa, unsigned long nbytes) in kvmhv_update_nest_rmap_rc_list() argument 948 hpa &= mask; in kvmhv_update_nest_rmap_rc_list() 951 kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask); in kvmhv_update_nest_rmap_rc_list() 955 unsigned long hpa, unsigned long mask) in kvmhv_remove_nest_rmap() argument 971 if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) in kvmhv_remove_nest_rmap() 976 unsigned long hpa, unsigned long mask) in kvmhv_remove_nest_rmap_list() argument 983 kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask); in kvmhv_remove_nest_rmap_list() 991 unsigned long gpa, unsigned long hpa, in kvmhv_remove_nest_rmap_range() argument [all …]
|
/arch/parisc/include/asm/ |
D | pdc.h | 36 int pdc_iodc_read(unsigned long *actcnt, unsigned long hpa, unsigned int index, 62 int pdc_pci_irt_size(unsigned long *num_entries, unsigned long hpa); 63 int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl);
|
D | ropes.h | 240 static inline int agp_mode_mercury(void __iomem *hpa) { in agp_mode_mercury() argument 243 bus_mode = readl(hpa + 0x0620); in agp_mode_mercury() 258 extern void *iosapic_register(unsigned long hpa);
|
D | parisc-device.h | 8 struct resource hpa; /* Hard Physical Address */ member
|
D | hardware.h | 108 extern struct parisc_device *alloc_pa_dev(unsigned long hpa,
|
D | processor.h | 85 unsigned long hpa; /* Host Physical address */ member
|
D | pdcpat.h | 369 extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, unsigned long hpa);
|
/arch/powerpc/platforms/powernv/ |
D | pci-ioda-tce.c | 148 unsigned long *hpa, enum dma_data_direction *direction, in pnv_tce_xchg() argument 152 unsigned long newtce = *hpa | proto_tce, oldtce; in pnv_tce_xchg() 156 BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl)); in pnv_tce_xchg() 161 *hpa = 0; in pnv_tce_xchg() 176 *hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE); in pnv_tce_xchg() 228 unsigned long hpa = be64_to_cpu(tmp[i]); in pnv_pci_ioda2_table_do_free_pages() local 230 if (!(hpa & (TCE_PCI_READ | TCE_PCI_WRITE))) in pnv_pci_ioda2_table_do_free_pages() 233 pnv_pci_ioda2_table_do_free_pages(__va(hpa), size, in pnv_pci_ioda2_table_do_free_pages()
|
/arch/x86/kvm/mmu/ |
D | tdp_mmu.h | 97 hpa_t hpa = mmu->root_hpa; in is_tdp_mmu() local 99 if (WARN_ON(!VALID_PAGE(hpa))) in is_tdp_mmu() 107 sp = to_shadow_page(hpa); in is_tdp_mmu()
|
D | mmu_audit.c | 98 hpa_t hpa; in audit_mappings() local 119 hpa = pfn << PAGE_SHIFT; in audit_mappings() 120 if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) in audit_mappings() 123 hpa, *sptep); in audit_mappings()
|
/arch/powerpc/include/asm/ |
D | mmu_context.h | 43 unsigned long ua, unsigned int pageshift, unsigned long *hpa); 45 unsigned long ua, unsigned int pageshift, unsigned long *hpa); 47 extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, 52 static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, in mm_iommu_is_devmem() argument
|
D | iommu.h | 53 unsigned long *hpa, 212 unsigned long entry, unsigned long *hpa, 216 unsigned long entry, unsigned long *hpa,
|
D | kvm_book3s_64.h | 640 unsigned long hpa, unsigned long nbytes); 643 unsigned long gpa, unsigned long hpa,
|
/arch/ia64/hp/common/ |
D | sba_iommu.c | 286 sba_dump_tlb(char *hpa) in sba_dump_tlb() argument 288 DBG_INIT("IO TLB at 0x%p\n", (void *)hpa); in sba_dump_tlb() 289 DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE)); in sba_dump_tlb() 290 DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK)); in sba_dump_tlb() 291 DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG)); in sba_dump_tlb() 292 DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE)); in sba_dump_tlb() 1782 static void __init ioc_init(unsigned long hpa, struct ioc *ioc) in ioc_init() argument 1789 ioc->ioc_hpa = ioremap(hpa, 0x1000); in ioc_init() 1825 hpa, ioc->iov_size >> 20, ioc->ibase); in ioc_init() 1983 u64 hpa, length; in acpi_sba_ioc_add() local [all …]
|
/arch/parisc/include/uapi/asm/ |
D | pdc.h | 381 unsigned int hpa; /* HPA base address */ member 630 unsigned long hpa; /* mod's register set address */ member
|
/arch/powerpc/kernel/ |
D | iommu.c | 1076 unsigned long entry, unsigned long *hpa, in iommu_tce_xchg_no_kill() argument 1082 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false); in iommu_tce_xchg_no_kill() 1085 !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift, in iommu_tce_xchg_no_kill() 1087 SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT)); in iommu_tce_xchg_no_kill()
|