/arch/s390/kernel/ |
D | vdso.c | 98 unsigned long segment_table, page_table, page_frame; in vdso_alloc_per_cpu() local 108 page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA); in vdso_alloc_per_cpu() 110 if (!segment_table || !page_table || !page_frame) in vdso_alloc_per_cpu() 115 clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY, in vdso_alloc_per_cpu() 118 *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; in vdso_alloc_per_cpu() 119 *(unsigned long *) page_table = _PAGE_RO + page_frame; in vdso_alloc_per_cpu() 121 psal = (u32 *) (page_table + 256*sizeof(unsigned long)); in vdso_alloc_per_cpu() 139 free_page(page_table); in vdso_alloc_per_cpu() 146 unsigned long segment_table, page_table, page_frame; in vdso_free_per_cpu() local 155 page_table = *(unsigned long *) segment_table; in vdso_free_per_cpu() [all …]
|
/arch/unicore32/kernel/ |
D | hibernate.c | 53 pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC); in resume_one_page_table_init() local 54 if (!page_table) in resume_one_page_table_init() 57 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_KERNEL_TABLE)); in resume_one_page_table_init() 59 BUG_ON(page_table != pte_offset_kernel(pmd, 0)); in resume_one_page_table_init() 61 return page_table; in resume_one_page_table_init()
|
/arch/x86/power/ |
D | hibernate_32.c | 64 pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC); in resume_one_page_table_init() local 65 if (!page_table) in resume_one_page_table_init() 68 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); in resume_one_page_table_init() 70 BUG_ON(page_table != pte_offset_kernel(pmd, 0)); in resume_one_page_table_init() 72 return page_table; in resume_one_page_table_init()
|
/arch/sparc/mm/ |
D | iommu.c | 99 iommu->page_table = (iopte_t *)tmp; in sbus_iommu_init() 102 memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t)); in sbus_iommu_init() 105 iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4; in sbus_iommu_init() 124 impl, vers, iommu->page_table, in sbus_iommu_init() 188 iopte0 = &iommu->page_table[ioptex]; in iommu_get_one() 308 iopte_val(iommu->page_table[ioptex + i]) = 0; in iommu_release_one() 345 iopte_t *iopte = iommu->page_table; in iommu_map_dma_area() 410 iopte_t *iopte = iommu->page_table; in iommu_unmap_dma_area()
|
D | io-unit.c | 63 iounit->page_table = xpt; in iounit_iommu_init() 66 for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t); in iounit_iommu_init() 133 iounit->page_table[scan] = iopte; in iounit_get_area() 227 iopte = (iopte_t *)(iounit->page_table + i); in iounit_map_dma_area()
|
/arch/tile/mm/ |
D | migrate.h | 28 extern int flush_and_install_context(HV_PhysAddr page_table, HV_PTE access,
|
D | init.c | 120 static void __init assign_pte(pmd_t *pmd, pte_t *page_table) in assign_pte() argument 122 phys_addr_t pa = __pa(page_table); in assign_pte() 128 if (page_table != (pte_t *)pmd_page_vaddr(*pmd)) in assign_pte()
|
D | fault.c | 188 unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT; in get_current_pgd() 191 return (pgd_t *) __va(ctx.page_table); in get_current_pgd()
|
/arch/sparc/kernel/ |
D | iommu.c | 225 iommu->page_table = (iopte_t *)page_address(page); in iommu_table_init() 228 iopte_make_dummy(iommu, &iommu->page_table[i]); in iommu_table_init() 252 return iommu->page_table + entry; in alloc_npages() 318 ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); in dma_4u_alloc_coherent() 388 ((base - iommu->page_table) << IO_PAGE_SHIFT)); in dma_4u_map_page() 497 base = iommu->page_table + in dma_4u_unmap_page() 593 base = iommu->page_table + entry; in dma_4u_map_sg() 660 base = iommu->page_table + entry; in dma_4u_map_sg() 688 base = iommu->page_table + in fetch_sg_ctx() 729 base = iommu->page_table + entry; in dma_4u_unmap_sg() [all …]
|
D | ldc.c | 104 struct ldc_mtable_entry *page_table; member 1041 iommu->page_table = table; in ldc_iommu_init() 1053 iommu->page_table = NULL; in ldc_iommu_init() 1073 free_pages((unsigned long) iommu->page_table, order); in ldc_iommu_release() 1074 iommu->page_table = NULL; in ldc_iommu_release() 1964 return iommu->page_table + entry; in alloc_npages() 2013 struct ldc_mtable_entry *page_table; member 2028 sp->page_table[sp->pte_idx].mte = sp->mte_base | pa; in fill_cookies() 2115 state.page_table = iommu->page_table; in ldc_map_sg() 2119 state.pte_idx = (base - iommu->page_table); in ldc_map_sg() [all …]
|
D | psycho_common.c | 427 upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase); in psycho_iommu_init()
|
D | pci_fire.c | 62 upa_writeq(__pa(iommu->page_table) | 0x7UL, iommu->iommu_tsbbase); in pci_fire_pbm_iommu_init()
|
D | sbus.c | 625 upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase); in sbus_iommu_init()
|
D | pci_schizo.c | 1204 upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase); in schizo_pbm_iommu_init()
|
/arch/sparc/include/asm/ |
D | io-unit.h | 46 iopte_t *page_table; member
|
D | iommu_64.h | 30 iopte_t *page_table; member
|
D | iommu_32.h | 103 iopte_t *page_table; member
|
/arch/x86/mm/ |
D | init_32.c | 112 pte_t *page_table = NULL; in one_page_table_init() local 116 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); in one_page_table_init() 118 if (!page_table) in one_page_table_init() 119 page_table = in one_page_table_init() 122 page_table = (pte_t *)alloc_low_page(); in one_page_table_init() 124 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); in one_page_table_init() 125 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); in one_page_table_init() 126 BUG_ON(page_table != pte_offset_kernel(pmd, 0)); in one_page_table_init()
|
/arch/tile/include/hv/ |
D | hypervisor.h | 662 int hv_install_context(HV_PhysAddr page_table, HV_PTE access, HV_ASID asid, 676 HV_PhysAddr page_table; member
|