/arch/powerpc/kernel/ |
D | iommu.c | 174 struct iommu_table *tbl, in iommu_range_alloc() argument 208 pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1); in iommu_range_alloc() 211 pool = &(tbl->large_pool); in iommu_range_alloc() 213 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc() 233 if (limit + tbl->it_offset > mask) { in iommu_range_alloc() 234 limit = mask - tbl->it_offset + 1; in iommu_range_alloc() 241 pool = &(tbl->pools[0]); in iommu_range_alloc() 256 n = iommu_area_alloc(tbl->it_map, limit, start, npages, in iommu_range_alloc() 257 tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT, in iommu_range_alloc() 266 } else if (pass <= tbl->nr_pools) { in iommu_range_alloc() [all …]
|
D | dma-iommu.c | 78 struct iommu_table *tbl = get_iommu_table_base(dev); in dma_iommu_dma_supported() local 80 if (!tbl) { in dma_iommu_dma_supported() 86 if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) { in dma_iommu_dma_supported() 89 mask, tbl->it_offset << IOMMU_PAGE_SHIFT); in dma_iommu_dma_supported() 97 struct iommu_table *tbl = get_iommu_table_base(dev); in dma_iommu_get_required_mask() local 99 if (!tbl) in dma_iommu_get_required_mask() 102 mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1); in dma_iommu_get_required_mask()
|
D | vio.c | 1157 struct iommu_table *tbl; in vio_build_iommu_table() local 1165 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); in vio_build_iommu_table() 1166 if (tbl == NULL) in vio_build_iommu_table() 1170 &tbl->it_index, &offset, &size); in vio_build_iommu_table() 1173 tbl->it_size = size >> IOMMU_PAGE_SHIFT; in vio_build_iommu_table() 1175 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; in vio_build_iommu_table() 1176 tbl->it_busno = 0; in vio_build_iommu_table() 1177 tbl->it_type = TCE_VB; in vio_build_iommu_table() 1178 tbl->it_blocksize = 16; in vio_build_iommu_table() 1180 return iommu_init_table(tbl, -1); in vio_build_iommu_table() [all …]
|
/arch/x86/kernel/ |
D | tce_64.c | 49 void tce_build(struct iommu_table *tbl, unsigned long index, in tce_build() argument 60 tp = ((u64*)tbl->it_base) + index; in tce_build() 75 void tce_free(struct iommu_table *tbl, long index, unsigned int npages) in tce_free() argument 79 tp = ((u64*)tbl->it_base) + index; in tce_free() 98 static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl) in tce_table_setparms() argument 104 tbl->it_busno = dev->bus->number; in tce_table_setparms() 107 tbl->it_size = table_size_to_number_of_entries(specified_table_size); in tce_table_setparms() 113 bitmapsz = tbl->it_size / BITS_PER_BYTE; in tce_table_setparms() 121 tbl->it_map = (unsigned long*)bmppages; in tce_table_setparms() 123 memset(tbl->it_map, 0, bitmapsz); in tce_table_setparms() [all …]
|
D | pci-calgary_64.c | 174 static void calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev); 175 static void calgary_tce_cache_blast(struct iommu_table *tbl); 176 static void calgary_dump_error_regs(struct iommu_table *tbl); 177 static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev); 178 static void calioc2_tce_cache_blast(struct iommu_table *tbl); 179 static void calioc2_dump_error_regs(struct iommu_table *tbl); 180 static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl); 197 static inline int translation_enabled(struct iommu_table *tbl) in translation_enabled() argument 200 return (tbl != NULL); in translation_enabled() 203 static void iommu_range_reserve(struct iommu_table *tbl, in iommu_range_reserve() argument [all …]
|
/arch/unicore32/mm/ |
D | proc-macros.S | 97 .macro va2pa, va, pa, tbl, msk, off, err=990f 100 adr \tbl, 910f @ tbl <- table of 1st page table 107 add \tbl, \tbl, \off << #3 @ cmove table pointer 108 ldw \msk, [\tbl+], #0 @ get the mask 109 ldw pc, [\tbl+], #4 113 cntlo \tbl, \msk @ use tbl as temp reg 114 mov \off, \off >> \tbl 116 adr \tbl, 920f @ tbl <- table of 2nd pt 130 andn \tbl, \va, \msk 132 or \pa, \pa, \tbl
|
/arch/powerpc/platforms/pseries/ |
D | iommu.c | 55 static void tce_invalidate_pSeries_sw(struct iommu_table *tbl, in tce_invalidate_pSeries_sw() argument 58 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index; in tce_invalidate_pSeries_sw() 67 if (tbl->it_busno) { in tce_invalidate_pSeries_sw() 71 start |= tbl->it_busno; in tce_invalidate_pSeries_sw() 72 end |= tbl->it_busno; in tce_invalidate_pSeries_sw() 84 static int tce_build_pSeries(struct iommu_table *tbl, long index, in tce_build_pSeries() argument 98 tces = tcep = ((u64 *)tbl->it_base) + index; in tce_build_pSeries() 109 if (tbl->it_type & TCE_PCI_SWINV_CREATE) in tce_build_pSeries() 110 tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); in tce_build_pSeries() 115 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) in tce_free_pSeries() argument [all …]
|
/arch/powerpc/include/asm/ |
D | iommu.h | 94 extern void iommu_free_table(struct iommu_table *tbl, const char *node_name); 99 extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, 102 extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl, 106 extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, 110 extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, 113 extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, 115 extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, 120 extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
|
D | time.h | 65 unsigned long tbl; in get_tbl() local 66 asm volatile("mfspr %0, 0x3dd" : "=r" (tbl)); in get_tbl() 67 return tbl; in get_tbl()
|
D | machdep.h | 65 int (*tce_build)(struct iommu_table *tbl, 71 void (*tce_free)(struct iommu_table *tbl, 74 unsigned long (*tce_get)(struct iommu_table *tbl, 76 void (*tce_flush)(struct iommu_table *tbl);
|
/arch/powerpc/platforms/powernv/ |
D | pci.c | 336 static int pnv_tce_build(struct iommu_table *tbl, long index, long npages, in pnv_tce_build() argument 349 tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset; in pnv_tce_build() 359 if (tbl->it_type & TCE_PCI_SWINV_CREATE) in pnv_tce_build() 360 pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1); in pnv_tce_build() 365 static void pnv_tce_free(struct iommu_table *tbl, long index, long npages) in pnv_tce_free() argument 369 tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset; in pnv_tce_free() 374 if (tbl->it_type & TCE_PCI_SWINV_FREE) in pnv_tce_free() 375 pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1); in pnv_tce_free() 378 static unsigned long pnv_tce_get(struct iommu_table *tbl, long index) in pnv_tce_get() argument 380 return ((u64 *)tbl->it_base)[index - tbl->it_offset]; in pnv_tce_get() [all …]
|
D | pci-ioda.c | 444 static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl, in pnv_pci_ioda1_tce_invalidate() argument 447 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index; in pnv_pci_ioda1_tce_invalidate() 454 if (tbl->it_busno) { in pnv_pci_ioda1_tce_invalidate() 458 start |= tbl->it_busno; in pnv_pci_ioda1_tce_invalidate() 459 end |= tbl->it_busno; in pnv_pci_ioda1_tce_invalidate() 460 } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) { in pnv_pci_ioda1_tce_invalidate() 485 struct iommu_table *tbl, in pnv_pci_ioda2_tce_invalidate() argument 489 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index; in pnv_pci_ioda2_tce_invalidate() 497 inc = tbl->it_offset + (((u64)startp - tbl->it_base) / sizeof(u64)); in pnv_pci_ioda2_tce_invalidate() 499 inc = tbl->it_offset + (((u64)endp - tbl->it_base) / sizeof(u64)); in pnv_pci_ioda2_tce_invalidate() [all …]
|
D | pci.h | 154 extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl, 160 extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
|
/arch/x86/include/asm/ |
D | tce.h | 41 extern void tce_build(struct iommu_table *tbl, unsigned long index, 43 extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages); 45 extern void __init free_tce_table(void *tbl);
|
D | calgary.h | 47 void (*handle_quirks)(struct iommu_table *tbl, struct pci_dev *dev); 48 void (*tce_cache_blast)(struct iommu_table *tbl); 49 void (*dump_error_regs)(struct iommu_table *tbl);
|
/arch/powerpc/kvm/ |
D | book3s_64_vio_hv.c | 57 u64 *tbl; in kvmppc_h_put_tce() local 65 tbl = (u64 *)page_address(page); in kvmppc_h_put_tce() 69 tbl[idx % TCES_PER_PAGE] = tce; in kvmppc_h_put_tce()
|
/arch/powerpc/platforms/pasemi/ |
D | iommu.c | 86 static int iobmap_build(struct iommu_table *tbl, long index, in iobmap_build() argument 97 bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT; in iobmap_build() 99 ip = ((u32 *)tbl->it_base) + index; in iobmap_build() 115 static void iobmap_free(struct iommu_table *tbl, long index, in iobmap_free() argument 123 bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT; in iobmap_free() 125 ip = ((u32 *)tbl->it_base) + index; in iobmap_free()
|
/arch/blackfin/kernel/ |
D | cplbinfo.c | 33 struct cplb_entry *tbl; member 55 addr = cdata->tbl[pos].addr; in cplbinfo_show() 56 data = cdata->tbl[pos].data; in cplbinfo_show() 73 cdata->tbl = icplb_tbl[cpu]; in cplbinfo_seq_init() 77 cdata->tbl = dcplb_tbl[cpu]; in cplbinfo_seq_init()
|
/arch/powerpc/platforms/wsp/ |
D | wsp_pci.c | 231 static int tce_build_wsp(struct iommu_table *tbl, long index, long npages, in tce_build_wsp() argument 235 struct wsp_dma_table *ptbl = container_of(tbl, in tce_build_wsp() 263 tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT); in tce_build_wsp() 271 static void tce_free_wsp(struct iommu_table *tbl, long index, long npages) in tce_free_wsp() argument 273 struct wsp_dma_table *ptbl = container_of(tbl, in tce_free_wsp() 308 struct wsp_dma_table *tbl; in wsp_pci_create_dma32_table() local 330 tbl = kzalloc_node(sizeof(struct wsp_dma_table), GFP_KERNEL, nid); in wsp_pci_create_dma32_table() 331 if (!tbl) in wsp_pci_create_dma32_table() 333 tbl->phb = phb; in wsp_pci_create_dma32_table() 342 tbl->tces[i] = alloc_pages_node(nid, GFP_KERNEL, get_order(0x80000)); in wsp_pci_create_dma32_table() [all …]
|
/arch/powerpc/boot/ |
D | cuboot-c2k.c | 36 struct mv64x60_cpu2pci_win *tbl; in c2k_bridge_setup() local 90 tbl = mv64x60_cpu2pci_io; in c2k_bridge_setup() 93 tbl = mv64x60_cpu2pci_mem; in c2k_bridge_setup() 112 pci_base_hi, pci_base_lo, cpu_base, size, tbl); in c2k_bridge_setup()
|
/arch/arc/kernel/ |
D | setup.c | 117 const struct cpuinfo_data *tbl; in arc_cpu_mumbojumbo() local 130 for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) { in arc_cpu_mumbojumbo() 131 if ((core->family >= tbl->info.id) && in arc_cpu_mumbojumbo() 132 (core->family <= tbl->up_range)) { in arc_cpu_mumbojumbo() 135 tbl->info.str, in arc_cpu_mumbojumbo() 141 if (tbl->info.id == 0) in arc_cpu_mumbojumbo()
|
/arch/powerpc/sysdev/ |
D | dart_iommu.c | 154 static void dart_flush(struct iommu_table *tbl) in dart_flush() argument 163 static int dart_build(struct iommu_table *tbl, long index, in dart_build() argument 174 dp = ((unsigned int*)tbl->it_base) + index; in dart_build() 204 static void dart_free(struct iommu_table *tbl, long index, long npages) in dart_free() argument 215 dp = ((unsigned int *)tbl->it_base) + index; in dart_free()
|
/arch/arm64/crypto/ |
D | aes-neon.S | 53 tbl \in\().16b, {v16.16b-v19.16b}, \in\().16b 90 tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */ 120 tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b 121 tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b 136 tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b 138 tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b 140 tbl \in2\().16b, {v16.16b-v19.16b}, \in2\().16b 142 tbl \in3\().16b, {v16.16b-v19.16b}, \in3\().16b 238 tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */ 239 tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */ [all …]
|
/arch/x86/syscalls/ |
D | Makefile | 8 syscall32 := $(srctree)/$(src)/syscall_32.tbl 9 syscall64 := $(srctree)/$(src)/syscall_64.tbl
|
/arch/x86/tools/ |
D | gen-insn-attr-x86.awk | 154 function print_table(tbl,name,fmt,n) 159 if (tbl[id]) 160 print " [" id "] = " tbl[id] ","
|