Home
last modified time | relevance | path

Searched refs:tbl (Results 1 – 25 of 59) sorted by relevance

123

/arch/powerpc/kernel/
Diommu.c164 struct iommu_table *tbl, in iommu_range_alloc() argument
198 pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); in iommu_range_alloc()
201 pool = &(tbl->large_pool); in iommu_range_alloc()
203 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc()
223 if (limit + tbl->it_offset > mask) { in iommu_range_alloc()
224 limit = mask - tbl->it_offset + 1; in iommu_range_alloc()
231 pool = &(tbl->pools[0]); in iommu_range_alloc()
241 1 << tbl->it_page_shift); in iommu_range_alloc()
243 boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift); in iommu_range_alloc()
246 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc()
[all …]
Ddma-iommu.c123 struct iommu_table *tbl = get_iommu_table_base(dev); in dma_iommu_dma_supported() local
131 if (!tbl) { in dma_iommu_dma_supported()
136 if (tbl->it_offset > (mask >> tbl->it_page_shift)) { in dma_iommu_dma_supported()
139 mask, tbl->it_offset << tbl->it_page_shift); in dma_iommu_dma_supported()
150 struct iommu_table *tbl = get_iommu_table_base(dev); in dma_iommu_get_required_mask() local
153 if (!tbl) in dma_iommu_get_required_mask()
163 mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1); in dma_iommu_get_required_mask()
/arch/powerpc/platforms/powernv/
Dpci-ioda-tce.c20 void pnv_pci_setup_iommu_table(struct iommu_table *tbl, in pnv_pci_setup_iommu_table() argument
24 tbl->it_blocksize = 16; in pnv_pci_setup_iommu_table()
25 tbl->it_base = (unsigned long)tce_mem; in pnv_pci_setup_iommu_table()
26 tbl->it_page_shift = page_shift; in pnv_pci_setup_iommu_table()
27 tbl->it_offset = dma_offset >> tbl->it_page_shift; in pnv_pci_setup_iommu_table()
28 tbl->it_index = 0; in pnv_pci_setup_iommu_table()
29 tbl->it_size = tce_size >> 3; in pnv_pci_setup_iommu_table()
30 tbl->it_busno = 0; in pnv_pci_setup_iommu_table()
31 tbl->it_type = TCE_PCI; in pnv_pci_setup_iommu_table()
55 static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc) in pnv_tce() argument
[all …]
Dpci.h225 extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
228 extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
229 extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
232 extern __be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index,
234 extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index);
238 bool alloc_userspace_copy, struct iommu_table *tbl);
239 extern void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl);
242 struct iommu_table *tbl,
244 extern void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
246 extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
Dnpu-dma.c128 struct iommu_table *tbl) in pnv_npu_set_window() argument
134 const unsigned long size = tbl->it_indirect_levels ? in pnv_npu_set_window()
135 tbl->it_level_size : tbl->it_size; in pnv_npu_set_window()
136 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift; in pnv_npu_set_window()
137 const __u64 win_size = tbl->it_size << tbl->it_page_shift; in pnv_npu_set_window()
146 IOMMU_PAGE_SIZE(tbl)); in pnv_npu_set_window()
151 tbl->it_indirect_levels + 1, in pnv_npu_set_window()
152 __pa(tbl->it_base), in pnv_npu_set_window()
154 IOMMU_PAGE_SIZE(tbl)); in pnv_npu_set_window()
163 tbl, &npe->table_group); in pnv_npu_set_window()
[all …]
Dpci-ioda.c1444 struct iommu_table *tbl; in pnv_pci_ioda2_release_dma_pe() local
1447 tbl = pe->table_group.tables[0]; in pnv_pci_ioda2_release_dma_pe()
1457 iommu_tce_table_put(tbl); in pnv_pci_ioda2_release_dma_pe()
1890 static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl, in pnv_pci_p7ioc_tce_invalidate() argument
1894 &tbl->it_group_list, struct iommu_table_group_link, in pnv_pci_p7ioc_tce_invalidate()
1901 start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset); in pnv_pci_p7ioc_tce_invalidate()
1902 end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset + in pnv_pci_p7ioc_tce_invalidate()
1927 static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index, in pnv_ioda1_tce_build() argument
1932 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, in pnv_ioda1_tce_build()
1936 pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false); in pnv_ioda1_tce_build()
[all …]
/arch/x86/kernel/
Dtce_64.c36 void tce_build(struct iommu_table *tbl, unsigned long index, in tce_build() argument
47 tp = ((u64*)tbl->it_base) + index; in tce_build()
62 void tce_free(struct iommu_table *tbl, long index, unsigned int npages) in tce_free() argument
66 tp = ((u64*)tbl->it_base) + index; in tce_free()
85 static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl) in tce_table_setparms() argument
91 tbl->it_busno = dev->bus->number; in tce_table_setparms()
94 tbl->it_size = table_size_to_number_of_entries(specified_table_size); in tce_table_setparms()
100 bitmapsz = tbl->it_size / BITS_PER_BYTE; in tce_table_setparms()
108 tbl->it_map = (unsigned long*)bmppages; in tce_table_setparms()
110 memset(tbl->it_map, 0, bitmapsz); in tce_table_setparms()
[all …]
Dpci-calgary_64.c161 static void calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev);
162 static void calgary_tce_cache_blast(struct iommu_table *tbl);
163 static void calgary_dump_error_regs(struct iommu_table *tbl);
164 static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev);
165 static void calioc2_tce_cache_blast(struct iommu_table *tbl);
166 static void calioc2_dump_error_regs(struct iommu_table *tbl);
167 static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl);
184 static inline int translation_enabled(struct iommu_table *tbl) in translation_enabled() argument
187 return (tbl != NULL); in translation_enabled()
190 static void iommu_range_reserve(struct iommu_table *tbl, in iommu_range_reserve() argument
[all …]
/arch/powerpc/include/asm/
Diommu.h40 int (*set)(struct iommu_table *tbl,
51 int (*xchg_no_kill)(struct iommu_table *tbl,
57 void (*tce_kill)(struct iommu_table *tbl,
62 __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc);
64 void (*clear)(struct iommu_table *tbl,
67 unsigned long (*get)(struct iommu_table *tbl, long index);
68 void (*flush)(struct iommu_table *tbl);
69 void (*free)(struct iommu_table *tbl);
119 #define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \ argument
120 ((tbl)->it_ops->useraddrptr((tbl), (entry), false))
[all …]
Dtime.h56 unsigned long tbl; in get_tbl() local
57 asm volatile("mfspr %0, 0x3dd" : "=r" (tbl)); in get_tbl()
58 return tbl; in get_tbl()
/arch/powerpc/kvm/
Dbook3s_64_vio_hv.c126 long shift = stit->tbl->it_page_shift; in kvmppc_rm_tce_validate()
175 u64 *tbl; in kvmppc_rm_tce_put() local
184 tbl = kvmppc_page_address(page); in kvmppc_rm_tce_put()
186 tbl[idx % TCES_PER_PAGE] = tce; in kvmppc_rm_tce_put()
222 struct iommu_table *tbl, in iommu_tce_xchg_no_kill_rm() argument
228 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true); in iommu_tce_xchg_no_kill_rm()
232 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); in iommu_tce_xchg_no_kill_rm()
244 extern void iommu_tce_kill_rm(struct iommu_table *tbl, in iommu_tce_kill_rm() argument
247 if (tbl->it_ops->tce_kill) in iommu_tce_kill_rm()
248 tbl->it_ops->tce_kill(tbl, entry, pages, true); in iommu_tce_kill_rm()
[all …]
Dbook3s_64_vio.c54 iommu_tce_table_put(stit->tbl); in kvm_spapr_tce_iommu_table_free()
85 if (table_group->tables[i] != stit->tbl) in kvm_spapr_tce_release_iommu_group()
99 struct iommu_table *tbl = NULL; in kvm_spapr_tce_attach_iommu_group() local
140 tbl = iommu_tce_table_get(tbltmp); in kvm_spapr_tce_attach_iommu_group()
144 if (!tbl) in kvm_spapr_tce_attach_iommu_group()
148 if (tbl != stit->tbl) in kvm_spapr_tce_attach_iommu_group()
153 iommu_tce_table_put(tbl); in kvm_spapr_tce_attach_iommu_group()
165 iommu_tce_table_put(tbl); in kvm_spapr_tce_attach_iommu_group()
169 stit->tbl = tbl; in kvm_spapr_tce_attach_iommu_group()
370 long shift = stit->tbl->it_page_shift; in kvmppc_tce_validate()
[all …]
/arch/powerpc/platforms/pseries/
Diommu.c46 struct iommu_table *tbl; in iommu_pseries_alloc_group() local
53 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node); in iommu_pseries_alloc_group()
54 if (!tbl) in iommu_pseries_alloc_group()
57 INIT_LIST_HEAD_RCU(&tbl->it_group_list); in iommu_pseries_alloc_group()
58 kref_init(&tbl->it_kref); in iommu_pseries_alloc_group()
60 table_group->tables[0] = tbl; in iommu_pseries_alloc_group()
72 struct iommu_table *tbl; in iommu_pseries_free_group() local
77 tbl = table_group->tables[0]; in iommu_pseries_free_group()
84 iommu_tce_table_put(tbl); in iommu_pseries_free_group()
89 static int tce_build_pSeries(struct iommu_table *tbl, long index, in tce_build_pSeries() argument
[all …]
Dvio.c518 struct iommu_table *tbl = get_iommu_table_base(dev); in vio_dma_iommu_map_page() local
521 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) in vio_dma_iommu_map_page()
523 ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev), in vio_dma_iommu_map_page()
530 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); in vio_dma_iommu_map_page()
542 struct iommu_table *tbl = get_iommu_table_base(dev); in vio_dma_iommu_unmap_page() local
544 iommu_unmap_page(tbl, dma_handle, size, direction, attrs); in vio_dma_iommu_unmap_page()
545 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); in vio_dma_iommu_unmap_page()
553 struct iommu_table *tbl = get_iommu_table_base(dev); in vio_dma_iommu_map_sg() local
559 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl)); in vio_dma_iommu_map_sg()
563 ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev), in vio_dma_iommu_map_sg()
[all …]
/arch/unicore32/mm/
Dproc-macros.S94 .macro va2pa, va, pa, tbl, msk, off, err=990f
97 adr \tbl, 910f @ tbl <- table of 1st page table
104 add \tbl, \tbl, \off << #3 @ cmove table pointer
105 ldw \msk, [\tbl+], #0 @ get the mask
106 ldw pc, [\tbl+], #4
110 cntlo \tbl, \msk @ use tbl as temp reg
111 mov \off, \off >> \tbl
113 adr \tbl, 920f @ tbl <- table of 2nd pt
127 andn \tbl, \va, \msk
129 or \pa, \pa, \tbl
/arch/sparc/kernel/
Dpci_sun4v.c187 struct iommu_map_table *tbl; in dma_4v_alloc_coherent() local
214 tbl = &iommu->tbl; in dma_4v_alloc_coherent()
216 tbl = &iommu->atu->tbl; in dma_4v_alloc_coherent()
218 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, in dma_4v_alloc_coherent()
224 *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); in dma_4v_alloc_coherent()
250 iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); in dma_4v_alloc_coherent()
327 struct iommu_map_table *tbl; in dma_4v_free_coherent() local
339 tbl = &iommu->tbl; in dma_4v_free_coherent()
342 tbl = &atu->tbl; in dma_4v_free_coherent()
345 entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT); in dma_4v_free_coherent()
[all …]
Diommu.c52 struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl); in iommu_flushall()
105 iommu->tbl.table_map_base = dma_offset; in iommu_table_init()
111 iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node); in iommu_table_init()
112 if (!iommu->tbl.map) in iommu_table_init()
115 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, in iommu_table_init()
150 kfree(iommu->tbl.map); in iommu_table_init()
151 iommu->tbl.map = NULL; in iommu_table_init()
162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages()
230 *dma_addrp = (iommu->tbl.table_map_base + in dma_4u_alloc_coherent()
256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); in dma_4u_free_coherent()
[all …]
Diommu-common.c227 static struct iommu_pool *get_pool(struct iommu_map_table *tbl, in get_pool() argument
231 unsigned long largepool_start = tbl->large_pool.start; in get_pool()
232 bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0); in get_pool()
236 p = &tbl->large_pool; in get_pool()
238 unsigned int pool_nr = entry / tbl->poolsize; in get_pool()
240 BUG_ON(pool_nr >= tbl->nr_pools); in get_pool()
241 p = &tbl->pools[pool_nr]; in get_pool()
/arch/x86/include/asm/
Dtce.h28 extern void tce_build(struct iommu_table *tbl, unsigned long index,
30 extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages);
32 extern void __init free_tce_table(void *tbl);
Dcalgary.h34 void (*handle_quirks)(struct iommu_table *tbl, struct pci_dev *dev);
35 void (*tce_cache_blast)(struct iommu_table *tbl);
36 void (*dump_error_regs)(struct iommu_table *tbl);
/arch/powerpc/platforms/pasemi/
Diommu.c76 static int iobmap_build(struct iommu_table *tbl, long index, in iobmap_build() argument
87 bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT; in iobmap_build()
89 ip = ((u32 *)tbl->it_base) + index; in iobmap_build()
105 static void iobmap_free(struct iommu_table *tbl, long index, in iobmap_free() argument
113 bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT; in iobmap_free()
115 ip = ((u32 *)tbl->it_base) + index; in iobmap_free()
/arch/arm64/crypto/
Daes-neon.S72 tbl \in\().16b, {v16.16b-v19.16b}, \in\().16b
94 tbl \in\().16b, {\in\().16b}, v14.16b
104 tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */
129 tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
131 tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b
133 tbl \in2\().16b, {v16.16b-v19.16b}, \in2\().16b
135 tbl \in3\().16b, {v16.16b-v19.16b}, \in3\().16b
199 tbl \in0\().16b, {\in0\().16b}, v14.16b
200 tbl \in1\().16b, {\in1\().16b}, v14.16b
214 tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
[all …]
/arch/arm64/kernel/
Dhead.S155 .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
156 add \tmp1, \tbl, #PAGE_SIZE
162 str \tmp2, [\tbl, \tmp1, lsl #3]
163 add \tbl, \tbl, #PAGE_SIZE // next level table page
182 .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
185 str \tmp1, [\tbl, \index, lsl #3]
245 .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
246 add \rtbl, \tbl, #PAGE_SIZE
250 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
251 mov \tbl, \sv
[all …]
/arch/x86/boot/compressed/
Dacpi.c42 efi_config_table_64_t *tbl = (efi_config_table_64_t *)config_tables + i; in __efi_get_rsdp_addr() local
44 guid = tbl->guid; in __efi_get_rsdp_addr()
45 table = tbl->table; in __efi_get_rsdp_addr()
52 efi_config_table_32_t *tbl = (efi_config_table_32_t *)config_tables + i; in __efi_get_rsdp_addr() local
54 guid = tbl->guid; in __efi_get_rsdp_addr()
55 table = tbl->table; in __efi_get_rsdp_addr()
/arch/arc/kernel/
Dsetup.c115 const struct id_to_str *tbl; in decode_arc_core() local
125 for (tbl = &arc_legacy_rel[0]; tbl->id != 0; tbl++) { in decode_arc_core()
126 if (cpu->core.family == tbl->id) { in decode_arc_core()
127 cpu->release = tbl->str; in decode_arc_core()
134 else if (tbl->str) in decode_arc_core()
158 for (tbl = &arc_cpu_rel[0]; tbl->id != 0xFF; tbl++) { in decode_arc_core()
159 if (uarch.maj == tbl->id) { in decode_arc_core()
160 cpu->release = tbl->str; in decode_arc_core()

123