• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __IO_PGTABLE_H
3 #define __IO_PGTABLE_H
4 
5 #include <linux/bitops.h>
6 #include <linux/iommu.h>
7 
8 /*
9  * Public API for use by IOMMU drivers
10  */
11 enum io_pgtable_fmt {
12 	ARM_32_LPAE_S1,
13 	ARM_32_LPAE_S2,
14 	ARM_64_LPAE_S1,
15 	ARM_64_LPAE_S2,
16 	ARM_V7S,
17 	ARM_MALI_LPAE,
18 	IO_PGTABLE_NUM_FMTS,
19 };
20 
21 /**
22  * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management.
23  *
24  * @tlb_flush_all:  Synchronously invalidate the entire TLB context.
25  * @tlb_flush_walk: Synchronously invalidate all intermediate TLB state
26  *                  (sometimes referred to as the "walk cache") for a virtual
27  *                  address range.
28  * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual
29  *                  address range.
30  * @tlb_add_page:   Optional callback to queue up leaf TLB invalidation for a
31  *                  single page.  IOMMUs that cannot batch TLB invalidation
32  *                  operations efficiently will typically issue them here, but
33  *                  others may decide to update the iommu_iotlb_gather structure
34  *                  and defer the invalidation until iommu_tlb_sync() instead.
35  *
36  * Note that these can all be called in atomic context and must therefore
37  * not block.
38  */
39 struct iommu_flush_ops {
40 	void (*tlb_flush_all)(void *cookie);
41 	void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
42 			       void *cookie);
43 	void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule,
44 			       void *cookie);
45 	void (*tlb_add_page)(struct iommu_iotlb_gather *gather,
46 			     unsigned long iova, size_t granule, void *cookie);
47 };
48 
49 /**
50  * struct iommu_pgtable_ops - IOMMU callbacks for page table memory management.
51  *
52  * @alloc_pgtable: Allocate page table memory, and return a page-aligned
53  *                 cacheable linear mapping address of the start of a physically
54  *                 contiguous region of memory.
55  * @free_pgtable: Free page table memory.
56  */
57 struct iommu_pgtable_ops {
58 	void *(*alloc_pgtable)(void *cookie, int order, gfp_t gfp_mask);
59 	void (*free_pgtable)(void *cookie, void *virt, int order);
60 };
61 
62 /**
63  * struct io_pgtable_cfg - Configuration data for a set of page tables.
64  *
65  * @quirks:        A bitmap of hardware quirks that require some special
66  *                 action by the low-level page table allocator.
67  * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
68  *                 tables.
69  * @ias:           Input address (iova) size, in bits.
70  * @oas:           Output address (paddr) size, in bits.
71  * @coherent_walk  A flag to indicate whether or not page table walks made
72  *                 by the IOMMU are coherent with the CPU caches.
73  * @tlb:           TLB management callbacks for this set of tables.
74  * @iommu_pgtable_ops: IOMMU page table memory management callbacks (optional;
75  *                     defaults to the buddy allocator if not present).
76  * @iommu_dev:     The device representing the DMA configuration for the
77  *                 page table walker.
78  */
79 struct io_pgtable_cfg {
80 	/*
81 	 * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
82 	 *	stage 1 PTEs, for hardware which insists on validating them
83 	 *	even in	non-secure state where they should normally be ignored.
84 	 *
85 	 * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
86 	 *	IOMMU_NOEXEC flags and map everything with full access, for
87 	 *	hardware which does not implement the permissions of a given
88 	 *	format, and/or requires some format-specific default value.
89 	 *
90 	 * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
91 	 *	(unmapped) entries but the hardware might do so anyway, perform
92 	 *	TLB maintenance when mapping as well as when unmapping.
93 	 *
94 	 * IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend
95 	 *	to support up to 35 bits PA where the bit32, bit33 and bit34 are
96 	 *	encoded in the bit9, bit4 and bit5 of the PTE respectively.
97 	 *
98 	 * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
99 	 *	on unmap, for DMA domains using the flush queue mechanism for
100 	 *	delayed invalidation.
101 	 */
102 	#define IO_PGTABLE_QUIRK_ARM_NS		BIT(0)
103 	#define IO_PGTABLE_QUIRK_NO_PERMS	BIT(1)
104 	#define IO_PGTABLE_QUIRK_TLBI_ON_MAP	BIT(2)
105 	#define IO_PGTABLE_QUIRK_ARM_MTK_EXT	BIT(3)
106 	#define IO_PGTABLE_QUIRK_NON_STRICT	BIT(4)
107 	unsigned long			quirks;
108 	unsigned long			pgsize_bitmap;
109 	unsigned int			ias;
110 	unsigned int			oas;
111 	bool				coherent_walk;
112 	const struct iommu_flush_ops	*tlb;
113 	const struct iommu_pgtable_ops  *iommu_pgtable_ops;
114 	struct device			*iommu_dev;
115 
116 	/* Low-level data specific to the table format */
117 	union {
118 		struct {
119 			u64	ttbr[2];
120 			u64	tcr;
121 			u64	mair[2];
122 		} arm_lpae_s1_cfg;
123 
124 		struct {
125 			u64	vttbr;
126 			u64	vtcr;
127 		} arm_lpae_s2_cfg;
128 
129 		struct {
130 			u32	ttbr[2];
131 			u32	tcr;
132 			u32	nmrr;
133 			u32	prrr;
134 		} arm_v7s_cfg;
135 
136 		struct {
137 			u64	transtab;
138 			u64	memattr;
139 		} arm_mali_lpae_cfg;
140 	};
141 };
142 
143 /**
144  * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
145  *
146  * @map:          Map a physically contiguous memory region.
147  * @unmap:        Unmap a physically contiguous memory region.
148  * @iova_to_phys: Translate iova to physical address.
149  *
150  * These functions map directly onto the iommu_ops member functions with
151  * the same names.
152  */
153 struct io_pgtable_ops {
154 	int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
155 		   phys_addr_t paddr, size_t size, int prot);
156 	size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
157 			size_t size, struct iommu_iotlb_gather *gather);
158 	phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
159 				    unsigned long iova);
160 };
161 
162 /**
163  * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
164  *
165  * @fmt:    The page table format.
166  * @cfg:    The page table configuration. This will be modified to represent
167  *          the configuration actually provided by the allocator (e.g. the
168  *          pgsize_bitmap may be restricted).
169  * @cookie: An opaque token provided by the IOMMU driver and passed back to
170  *          the callback routines in cfg->tlb.
171  */
172 struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
173 					    struct io_pgtable_cfg *cfg,
174 					    void *cookie);
175 
176 /**
177  * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
178  *                         *must* ensure that the page table is no longer
179  *                         live, but the TLB can be dirty.
180  *
181  * @ops: The ops returned from alloc_io_pgtable_ops.
182  */
183 void free_io_pgtable_ops(struct io_pgtable_ops *ops);
184 
185 /**
186  * io_pgtable_alloc_pages - Allocate memory for page tables using an IOMMU
187  *                          driver's provided callback, or the buddy allocator.
188  *
189  * @cfg:      The page table configuration. This will be used to determine if
190  *            the page table memory should be allocated through the IOMMU
191  *            driver's callback, or the buddy allocator.
192  * @cookie:   An opaque pointer used by the IOMMU driver's callback.
193  * @order:    The order of the size of the allocation.
194  * @gfp_mask: The GFP mask to be used with the allocation
195  *
196  * Returns a cacheable linear mapping address to a physically contiguous region
197  * of memory. The start of the region must be page-aligned.
198  */
199 void *io_pgtable_alloc_pages(struct io_pgtable_cfg *cfg, void *cookie,
200 			     int order, gfp_t gfp_mask);
201 
202 /**
203  * io_pgtable_free_pages - Free memory for page tables using an IOMMU
204  *                         driver's provided callback, or the buddy allocator.
205  *
206  * @cfg:      The page table configuration. This will be used to determine if
207  *            the page table memory should be allocated through the IOMMU
208  *            driver's callback, or the buddy allocator.
209  * @cookie:   An opage pointer used by the IOMMU driver's callback.
210  * @virt:     The virtual address of the memory to free.
211  * @order:     The order of the size of the allocation.
212  */
213 void io_pgtable_free_pages(struct io_pgtable_cfg *cfg, void *cookie, void *virt,
214 			   int order);
215 
216 /*
217  * Internal structures for page table allocator implementations.
218  */
219 
220 /**
221  * struct io_pgtable - Internal structure describing a set of page tables.
222  *
223  * @fmt:    The page table format.
224  * @cookie: An opaque token provided by the IOMMU driver and passed back to
225  *          any callback routines.
226  * @cfg:    A copy of the page table configuration.
227  * @ops:    The page table operations in use for this set of page tables.
228  */
229 struct io_pgtable {
230 	enum io_pgtable_fmt	fmt;
231 	void			*cookie;
232 	struct io_pgtable_cfg	cfg;
233 	struct io_pgtable_ops	ops;
234 };
235 
236 #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
237 
io_pgtable_tlb_flush_all(struct io_pgtable * iop)238 static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
239 {
240 	iop->cfg.tlb->tlb_flush_all(iop->cookie);
241 }
242 
243 static inline void
io_pgtable_tlb_flush_walk(struct io_pgtable * iop,unsigned long iova,size_t size,size_t granule)244 io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova,
245 			  size_t size, size_t granule)
246 {
247 	iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
248 }
249 
250 static inline void
io_pgtable_tlb_flush_leaf(struct io_pgtable * iop,unsigned long iova,size_t size,size_t granule)251 io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova,
252 			  size_t size, size_t granule)
253 {
254 	iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie);
255 }
256 
257 static inline void
io_pgtable_tlb_add_page(struct io_pgtable * iop,struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule)258 io_pgtable_tlb_add_page(struct io_pgtable *iop,
259 			struct iommu_iotlb_gather * gather, unsigned long iova,
260 			size_t granule)
261 {
262 	if (iop->cfg.tlb->tlb_add_page)
263 		iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
264 }
265 
266 /**
267  * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
268  *                              particular format.
269  *
270  * @alloc: Allocate a set of page tables described by cfg.
271  * @free:  Free the page tables associated with iop.
272  */
273 struct io_pgtable_init_fns {
274 	struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
275 	void (*free)(struct io_pgtable *iop);
276 };
277 
278 extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
279 extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
280 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
281 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
282 extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
283 extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;
284 
285 #endif /* __IO_PGTABLE_H */
286