/arch/powerpc/kernel/ |
D | iommu.c | 175 struct iommu_table *tbl, in iommu_range_alloc() argument 208 pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); in iommu_range_alloc() 211 pool = &(tbl->large_pool); in iommu_range_alloc() 213 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc() 233 if (limit + tbl->it_offset > mask) { in iommu_range_alloc() 234 limit = mask - tbl->it_offset + 1; in iommu_range_alloc() 241 pool = &(tbl->pools[0]); in iommu_range_alloc() 249 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc() 250 dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift), in iommu_range_alloc() 259 } else if (pass <= tbl->nr_pools) { in iommu_range_alloc() [all …]
|
D | dma-iommu.c | 90 struct iommu_table *tbl = get_iommu_table_base(dev); in dma_iommu_dma_supported() local 98 if (!tbl) { in dma_iommu_dma_supported() 103 if (tbl->it_offset > (mask >> tbl->it_page_shift)) { in dma_iommu_dma_supported() 106 mask, tbl->it_offset << tbl->it_page_shift); in dma_iommu_dma_supported() 117 struct iommu_table *tbl = get_iommu_table_base(dev); in dma_iommu_get_required_mask() local 129 if (!tbl) in dma_iommu_get_required_mask() 132 mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) + in dma_iommu_get_required_mask() 133 tbl->it_page_shift - 1); in dma_iommu_get_required_mask()
|
/arch/powerpc/platforms/powernv/ |
D | pci-ioda-tce.c | 48 void pnv_pci_setup_iommu_table(struct iommu_table *tbl, in pnv_pci_setup_iommu_table() argument 52 tbl->it_blocksize = 16; in pnv_pci_setup_iommu_table() 53 tbl->it_base = (unsigned long)tce_mem; in pnv_pci_setup_iommu_table() 54 tbl->it_page_shift = page_shift; in pnv_pci_setup_iommu_table() 55 tbl->it_offset = dma_offset >> tbl->it_page_shift; in pnv_pci_setup_iommu_table() 56 tbl->it_index = 0; in pnv_pci_setup_iommu_table() 57 tbl->it_size = tce_size >> 3; in pnv_pci_setup_iommu_table() 58 tbl->it_busno = 0; in pnv_pci_setup_iommu_table() 59 tbl->it_type = TCE_PCI; in pnv_pci_setup_iommu_table() 83 static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc) in pnv_tce() argument [all …]
|
D | pci.h | 327 extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages, 330 extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages); 331 extern int pnv_tce_xchg(struct iommu_table *tbl, long index, 334 extern __be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index, 336 extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index); 340 bool alloc_userspace_copy, struct iommu_table *tbl); 341 extern void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl); 344 struct iommu_table *tbl, 346 extern void pnv_pci_unlink_table_and_group(struct iommu_table *tbl, 348 extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
|
D | pci-ioda.c | 1387 static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl, in pnv_pci_p7ioc_tce_invalidate() argument 1391 &tbl->it_group_list, struct iommu_table_group_link, in pnv_pci_p7ioc_tce_invalidate() 1398 start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset); in pnv_pci_p7ioc_tce_invalidate() 1399 end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset + in pnv_pci_p7ioc_tce_invalidate() 1424 static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index, in pnv_ioda1_tce_build() argument 1429 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, in pnv_ioda1_tce_build() 1433 pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false); in pnv_ioda1_tce_build() 1440 static int pnv_ioda_tce_xchg_no_kill(struct iommu_table *tbl, long index, in pnv_ioda_tce_xchg_no_kill() argument 1444 return pnv_tce_xchg(tbl, index, hpa, direction, !realmode); in pnv_ioda_tce_xchg_no_kill() 1448 static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index, in pnv_ioda1_tce_free() argument [all …]
|
D | npu-dma.c | 129 struct iommu_table *tbl) in pnv_npu_set_window() argument 135 const unsigned long size = tbl->it_indirect_levels ? in pnv_npu_set_window() 136 tbl->it_level_size : tbl->it_size; in pnv_npu_set_window() 137 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift; in pnv_npu_set_window() 138 const __u64 win_size = tbl->it_size << tbl->it_page_shift; in pnv_npu_set_window() 147 IOMMU_PAGE_SIZE(tbl)); in pnv_npu_set_window() 152 tbl->it_indirect_levels + 1, in pnv_npu_set_window() 153 __pa(tbl->it_base), in pnv_npu_set_window() 155 IOMMU_PAGE_SIZE(tbl)); in pnv_npu_set_window() 164 tbl, &npe->table_group); in pnv_npu_set_window() [all …]
|
/arch/powerpc/include/asm/ |
D | iommu.h | 40 int (*set)(struct iommu_table *tbl, 51 int (*xchg_no_kill)(struct iommu_table *tbl, 57 void (*tce_kill)(struct iommu_table *tbl, 62 __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc); 64 void (*clear)(struct iommu_table *tbl, 67 unsigned long (*get)(struct iommu_table *tbl, long index); 68 void (*flush)(struct iommu_table *tbl); 69 void (*free)(struct iommu_table *tbl); 119 #define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \ argument 120 ((tbl)->it_ops->useraddrptr((tbl), (entry), false)) [all …]
|
/arch/powerpc/kvm/ |
D | book3s_64_vio_hv.c | 122 long shift = stit->tbl->it_page_shift; in kvmppc_rm_tce_validate() 171 u64 *tbl; in kvmppc_rm_tce_put() local 183 tbl = kvmppc_page_address(page); in kvmppc_rm_tce_put() 185 tbl[idx % TCES_PER_PAGE] = tce; in kvmppc_rm_tce_put() 221 struct iommu_table *tbl, in iommu_tce_xchg_no_kill_rm() argument 227 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true); in iommu_tce_xchg_no_kill_rm() 231 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); in iommu_tce_xchg_no_kill_rm() 243 static void iommu_tce_kill_rm(struct iommu_table *tbl, in iommu_tce_kill_rm() argument 246 if (tbl->it_ops->tce_kill) in iommu_tce_kill_rm() 247 tbl->it_ops->tce_kill(tbl, entry, pages, true); in iommu_tce_kill_rm() [all …]
|
D | book3s_64_vio.c | 53 iommu_tce_table_put(stit->tbl); in kvm_spapr_tce_iommu_table_free() 85 if (table_group->tables[i] != stit->tbl) in kvm_spapr_tce_release_iommu_group() 101 struct iommu_table *tbl = NULL; in kvm_spapr_tce_attach_iommu_group() local 144 tbl = iommu_tce_table_get(tbltmp); in kvm_spapr_tce_attach_iommu_group() 148 if (!tbl) in kvm_spapr_tce_attach_iommu_group() 153 if (tbl != stit->tbl) in kvm_spapr_tce_attach_iommu_group() 158 iommu_tce_table_put(tbl); in kvm_spapr_tce_attach_iommu_group() 173 iommu_tce_table_put(tbl); in kvm_spapr_tce_attach_iommu_group() 177 stit->tbl = tbl; in kvm_spapr_tce_attach_iommu_group() 381 long shift = stit->tbl->it_page_shift; in kvmppc_tce_validate() [all …]
|
/arch/powerpc/platforms/pseries/ |
D | iommu.c | 59 struct iommu_table *tbl; in iommu_pseries_alloc_group() local 66 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node); in iommu_pseries_alloc_group() 67 if (!tbl) in iommu_pseries_alloc_group() 70 INIT_LIST_HEAD_RCU(&tbl->it_group_list); in iommu_pseries_alloc_group() 71 kref_init(&tbl->it_kref); in iommu_pseries_alloc_group() 73 table_group->tables[0] = tbl; in iommu_pseries_alloc_group() 85 struct iommu_table *tbl; in iommu_pseries_free_group() local 90 tbl = table_group->tables[0]; in iommu_pseries_free_group() 97 iommu_tce_table_put(tbl); in iommu_pseries_free_group() 102 static int tce_build_pSeries(struct iommu_table *tbl, long index, in tce_build_pSeries() argument [all …]
|
D | vio.c | 519 struct iommu_table *tbl = get_iommu_table_base(dev); in vio_dma_iommu_map_page() local 522 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) in vio_dma_iommu_map_page() 524 ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev), in vio_dma_iommu_map_page() 531 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); in vio_dma_iommu_map_page() 543 struct iommu_table *tbl = get_iommu_table_base(dev); in vio_dma_iommu_unmap_page() local 545 iommu_unmap_page(tbl, dma_handle, size, direction, attrs); in vio_dma_iommu_unmap_page() 546 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); in vio_dma_iommu_unmap_page() 554 struct iommu_table *tbl = get_iommu_table_base(dev); in vio_dma_iommu_map_sg() local 560 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl)); in vio_dma_iommu_map_sg() 564 ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev), in vio_dma_iommu_map_sg() [all …]
|
/arch/sparc/kernel/ |
D | pci_sun4v.c | 188 struct iommu_map_table *tbl; in dma_4v_alloc_coherent() local 215 tbl = &iommu->tbl; in dma_4v_alloc_coherent() 217 tbl = &iommu->atu->tbl; in dma_4v_alloc_coherent() 219 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, in dma_4v_alloc_coherent() 225 *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); in dma_4v_alloc_coherent() 251 iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); in dma_4v_alloc_coherent() 328 struct iommu_map_table *tbl; in dma_4v_free_coherent() local 340 tbl = &iommu->tbl; in dma_4v_free_coherent() 343 tbl = &atu->tbl; in dma_4v_free_coherent() 346 entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT); in dma_4v_free_coherent() [all …]
|
D | iommu.c | 52 struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl); in iommu_flushall() 105 iommu->tbl.table_map_base = dma_offset; in iommu_table_init() 111 iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node); in iommu_table_init() 112 if (!iommu->tbl.map) in iommu_table_init() 115 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, in iommu_table_init() 150 kfree(iommu->tbl.map); in iommu_table_init() 151 iommu->tbl.map = NULL; in iommu_table_init() 162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages() 230 *dma_addrp = (iommu->tbl.table_map_base + in dma_4u_alloc_coherent() 256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); in dma_4u_free_coherent() [all …]
|
D | iommu-common.c | 223 static struct iommu_pool *get_pool(struct iommu_map_table *tbl, in get_pool() argument 227 unsigned long largepool_start = tbl->large_pool.start; in get_pool() 228 bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0); in get_pool() 232 p = &tbl->large_pool; in get_pool() 234 unsigned int pool_nr = entry / tbl->poolsize; in get_pool() 236 BUG_ON(pool_nr >= tbl->nr_pools); in get_pool() 237 p = &tbl->pools[pool_nr]; in get_pool()
|
/arch/powerpc/platforms/pasemi/ |
D | iommu.c | 76 static int iobmap_build(struct iommu_table *tbl, long index, in iobmap_build() argument 87 bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT; in iobmap_build() 89 ip = ((u32 *)tbl->it_base) + index; in iobmap_build() 105 static void iobmap_free(struct iommu_table *tbl, long index, in iobmap_free() argument 113 bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT; in iobmap_free() 115 ip = ((u32 *)tbl->it_base) + index; in iobmap_free()
|
/arch/arm64/kernel/ |
D | head.S | 151 .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 152 add \tmp1, \tbl, #PAGE_SIZE 158 str \tmp2, [\tbl, \tmp1, lsl #3] 159 add \tbl, \tbl, #PAGE_SIZE // next level table page 178 .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1 181 str \tmp1, [\tbl, \index, lsl #3] 241 .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv 243 add \rtbl, \tbl, #PAGE_SIZE 247 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp 248 mov \tbl, \sv [all …]
|
/arch/arm64/crypto/ |
D | aes-neon.S | 72 tbl \in\().16b, {v16.16b-v19.16b}, \in\().16b 94 tbl \in\().16b, {\in\().16b}, v14.16b 104 tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */ 129 tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b 131 tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b 133 tbl \in2\().16b, {v16.16b-v19.16b}, \in2\().16b 135 tbl \in3\().16b, {v16.16b-v19.16b}, \in3\().16b 199 tbl \in0\().16b, {\in0\().16b}, v14.16b 200 tbl \in1\().16b, {\in1\().16b}, v14.16b 214 tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */ [all …]
|
D | chacha-neon-core.S | 59 tbl v3.16b, {v3.16b}, v12.16b 88 tbl v3.16b, {v3.16b}, v12.16b 331 tbl v12.16b, {v12.16b}, v31.16b 333 tbl v13.16b, {v13.16b}, v31.16b 335 tbl v14.16b, {v14.16b}, v31.16b 337 tbl v15.16b, {v15.16b}, v31.16b 465 tbl v15.16b, {v15.16b}, v31.16b 467 tbl v12.16b, {v12.16b}, v31.16b 469 tbl v13.16b, {v13.16b}, v31.16b 471 tbl v14.16b, {v14.16b}, v31.16b [all …]
|
/arch/x86/boot/compressed/ |
D | acpi.c | 42 efi_config_table_64_t *tbl = (efi_config_table_64_t *)config_tables + i; in __efi_get_rsdp_addr() local 44 guid = tbl->guid; in __efi_get_rsdp_addr() 45 table = tbl->table; in __efi_get_rsdp_addr() 52 efi_config_table_32_t *tbl = (efi_config_table_32_t *)config_tables + i; in __efi_get_rsdp_addr() local 54 guid = tbl->guid; in __efi_get_rsdp_addr() 55 table = tbl->table; in __efi_get_rsdp_addr()
|
/arch/arc/kernel/ |
D | setup.c | 120 const struct id_to_str *tbl; in decode_arc_core() local 124 for (tbl = &arc_legacy_rel[0]; tbl->id != 0; tbl++) { in decode_arc_core() 125 if (cpu->core.family == tbl->id) { in decode_arc_core() 126 cpu->release = tbl->str; in decode_arc_core() 133 else if (tbl->str) in decode_arc_core() 156 for (tbl = &arc_hs_ver54_rel[0]; tbl->id != 0xFF; tbl++) { in decode_arc_core() 157 if (uarch.maj == tbl->id) { in decode_arc_core() 158 cpu->release = tbl->str; in decode_arc_core()
|
/arch/sparc/include/asm/ |
D | iommu_64.h | 49 struct iommu_map_table tbl; member 56 struct iommu_map_table tbl; member
|
/arch/powerpc/sysdev/ |
D | dart_iommu.c | 163 static void dart_flush(struct iommu_table *tbl) in dart_flush() argument 172 static int dart_build(struct iommu_table *tbl, long index, in dart_build() argument 183 orig_dp = dp = ((unsigned int*)tbl->it_base) + index; in dart_build() 209 static void dart_free(struct iommu_table *tbl, long index, long npages) in dart_free() argument 221 orig_dp = dp = ((unsigned int *)tbl->it_base) + index; in dart_free()
|
/arch/mips/kernel/syscalls/ |
D | Makefile | 8 syscalln32 := $(srctree)/$(src)/syscall_n32.tbl 9 syscalln64 := $(srctree)/$(src)/syscall_n64.tbl 10 syscallo32 := $(srctree)/$(src)/syscall_o32.tbl
|
/arch/nds32/kernel/ |
D | ex-scall.S | 33 #define tbl $r8 macro 63 la tbl, sys_call_table ! load syscall table pointer 65 add $p1, tbl, $p1
|
/arch/parisc/include/asm/ |
D | pdc.h | 63 int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl); 75 struct pdc_memory_table *tbl, unsigned long entries);
|