1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __IO_PGTABLE_H
3 #define __IO_PGTABLE_H
4
5 #include <linux/bitops.h>
6 #include <linux/iommu.h>
7
8 /*
9 * Public API for use by IOMMU drivers
10 */
11 enum io_pgtable_fmt {
12 ARM_32_LPAE_S1,
13 ARM_32_LPAE_S2,
14 ARM_64_LPAE_S1,
15 ARM_64_LPAE_S2,
16 ARM_V7S,
17 ARM_MALI_LPAE,
18 AMD_IOMMU_V1,
19 APPLE_DART,
20 IO_PGTABLE_NUM_FMTS,
21 };
22
23 /**
24 * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management.
25 *
26 * @tlb_flush_all: Synchronously invalidate the entire TLB context.
27 * @tlb_flush_walk: Synchronously invalidate all intermediate TLB state
28 * (sometimes referred to as the "walk cache") for a virtual
29 * address range.
30 * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a
31 * single page. IOMMUs that cannot batch TLB invalidation
32 * operations efficiently will typically issue them here, but
33 * others may decide to update the iommu_iotlb_gather structure
34 * and defer the invalidation until iommu_iotlb_sync() instead.
35 *
36 * Note that these can all be called in atomic context and must therefore
37 * not block.
38 */
39 struct iommu_flush_ops {
40 void (*tlb_flush_all)(void *cookie);
41 void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
42 void *cookie);
43 void (*tlb_add_page)(struct iommu_iotlb_gather *gather,
44 unsigned long iova, size_t granule, void *cookie);
45 };
46
47 /**
48 * struct io_pgtable_cfg - Configuration data for a set of page tables.
49 *
50 * @quirks: A bitmap of hardware quirks that require some special
51 * action by the low-level page table allocator.
52 * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
53 * tables.
54 * @ias: Input address (iova) size, in bits.
55 * @oas: Output address (paddr) size, in bits.
56 * @coherent_walk A flag to indicate whether or not page table walks made
57 * by the IOMMU are coherent with the CPU caches.
58 * @tlb: TLB management callbacks for this set of tables.
59 * @iommu_dev: The device representing the DMA configuration for the
60 * page table walker.
61 */
62 struct io_pgtable_cfg {
63 /*
64 * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
65 * stage 1 PTEs, for hardware which insists on validating them
66 * even in non-secure state where they should normally be ignored.
67 *
68 * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
69 * IOMMU_NOEXEC flags and map everything with full access, for
70 * hardware which does not implement the permissions of a given
71 * format, and/or requires some format-specific default value.
72 *
73 * IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend
74 * to support up to 35 bits PA where the bit32, bit33 and bit34 are
75 * encoded in the bit9, bit4 and bit5 of the PTE respectively.
76 *
77 * IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT: (ARM v7s format) MediaTek IOMMUs
78 * extend the translation table base support up to 35 bits PA, the
79 * encoding format is same with IO_PGTABLE_QUIRK_ARM_MTK_EXT.
80 *
81 * IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
82 * for use in the upper half of a split address space.
83 *
84 * IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability
85 * attributes set in the TCR for a non-coherent page-table walker.
86 */
87 #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
88 #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
89 #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
90 #define IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT BIT(4)
91 #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
92 #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
93 unsigned long quirks;
94 unsigned long pgsize_bitmap;
95 unsigned int ias;
96 unsigned int oas;
97 bool coherent_walk;
98 const struct iommu_flush_ops *tlb;
99 struct device *iommu_dev;
100
101 /* Low-level data specific to the table format */
102 union {
103 struct {
104 u64 ttbr;
105 struct {
106 u32 ips:3;
107 u32 tg:2;
108 u32 sh:2;
109 u32 orgn:2;
110 u32 irgn:2;
111 u32 tsz:6;
112 } tcr;
113 u64 mair;
114 } arm_lpae_s1_cfg;
115
116 struct {
117 u64 vttbr;
118 struct {
119 u32 ps:3;
120 u32 tg:2;
121 u32 sh:2;
122 u32 orgn:2;
123 u32 irgn:2;
124 u32 sl:2;
125 u32 tsz:6;
126 } vtcr;
127 } arm_lpae_s2_cfg;
128
129 struct {
130 u32 ttbr;
131 u32 tcr;
132 u32 nmrr;
133 u32 prrr;
134 } arm_v7s_cfg;
135
136 struct {
137 u64 transtab;
138 u64 memattr;
139 } arm_mali_lpae_cfg;
140
141 struct {
142 u64 ttbr[4];
143 u32 n_ttbrs;
144 } apple_dart_cfg;
145 };
146 };
147
148 /**
149 * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
150 *
151 * @map: Map a physically contiguous memory region.
152 * @map_pages: Map a physically contiguous range of pages of the same size.
153 * @map_sg: Map a scatter-gather list of physically contiguous memory
154 * chunks. The mapped pointer argument is used to store how
155 * many bytes are mapped.
156 * @unmap: Unmap a physically contiguous memory region.
157 * @unmap_pages: Unmap a range of virtually contiguous pages of the same size.
158 * @iova_to_phys: Translate iova to physical address.
159 *
160 * These functions map directly onto the iommu_ops member functions with
161 * the same names.
162 */
163 struct io_pgtable_ops {
164 int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
165 phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
166 int (*map_pages)(struct io_pgtable_ops *ops, unsigned long iova,
167 phys_addr_t paddr, size_t pgsize, size_t pgcount,
168 int prot, gfp_t gfp, size_t *mapped);
169 int (*map_sg)(struct io_pgtable_ops *ops, unsigned long iova,
170 struct scatterlist *sg, unsigned int nents, int prot,
171 gfp_t gfp, size_t *mapped);
172 size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
173 size_t size, struct iommu_iotlb_gather *gather);
174 size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova,
175 size_t pgsize, size_t pgcount,
176 struct iommu_iotlb_gather *gather);
177 phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
178 unsigned long iova);
179 };
180
181 /**
182 * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
183 *
184 * @fmt: The page table format.
185 * @cfg: The page table configuration. This will be modified to represent
186 * the configuration actually provided by the allocator (e.g. the
187 * pgsize_bitmap may be restricted).
188 * @cookie: An opaque token provided by the IOMMU driver and passed back to
189 * the callback routines in cfg->tlb.
190 */
191 struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
192 struct io_pgtable_cfg *cfg,
193 void *cookie);
194
195 /**
196 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
197 * *must* ensure that the page table is no longer
198 * live, but the TLB can be dirty.
199 *
200 * @ops: The ops returned from alloc_io_pgtable_ops.
201 */
202 void free_io_pgtable_ops(struct io_pgtable_ops *ops);
203
204
205 /*
206 * Internal structures for page table allocator implementations.
207 */
208
209 /**
210 * struct io_pgtable - Internal structure describing a set of page tables.
211 *
212 * @fmt: The page table format.
213 * @cookie: An opaque token provided by the IOMMU driver and passed back to
214 * any callback routines.
215 * @cfg: A copy of the page table configuration.
216 * @ops: The page table operations in use for this set of page tables.
217 */
218 struct io_pgtable {
219 enum io_pgtable_fmt fmt;
220 void *cookie;
221 struct io_pgtable_cfg cfg;
222 struct io_pgtable_ops ops;
223 };
224
225 #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
226
io_pgtable_tlb_flush_all(struct io_pgtable * iop)227 static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
228 {
229 if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_all)
230 iop->cfg.tlb->tlb_flush_all(iop->cookie);
231 }
232
233 static inline void
io_pgtable_tlb_flush_walk(struct io_pgtable * iop,unsigned long iova,size_t size,size_t granule)234 io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova,
235 size_t size, size_t granule)
236 {
237 if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_walk)
238 iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
239 }
240
241 static inline void
io_pgtable_tlb_add_page(struct io_pgtable * iop,struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule)242 io_pgtable_tlb_add_page(struct io_pgtable *iop,
243 struct iommu_iotlb_gather * gather, unsigned long iova,
244 size_t granule)
245 {
246 if (iop->cfg.tlb && iop->cfg.tlb->tlb_add_page)
247 iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
248 }
249
250 /**
251 * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
252 * particular format.
253 *
254 * @alloc: Allocate a set of page tables described by cfg.
255 * @free: Free the page tables associated with iop.
256 */
257 struct io_pgtable_init_fns {
258 struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
259 void (*free)(struct io_pgtable *iop);
260 };
261
262 extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
263 extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
264 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
265 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
266 extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
267 extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;
268 extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns;
269 extern struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns;
270
271 #endif /* __IO_PGTABLE_H */
272