• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  * IOMMU user API definitions
4  */
5 
6 #ifndef _UAPI_IOMMU_H
7 #define _UAPI_IOMMU_H
8 
9 #include <linux/types.h>
10 
11 #define IOMMU_FAULT_PERM_READ	(1 << 0) /* read */
12 #define IOMMU_FAULT_PERM_WRITE	(1 << 1) /* write */
13 #define IOMMU_FAULT_PERM_EXEC	(1 << 2) /* exec */
14 #define IOMMU_FAULT_PERM_PRIV	(1 << 3) /* privileged */
15 
16 /* Generic fault types, can be expanded IRQ remapping fault */
17 enum iommu_fault_type {
18 	IOMMU_FAULT_DMA_UNRECOV = 1,	/* unrecoverable fault */
19 	IOMMU_FAULT_PAGE_REQ,		/* page request fault */
20 };
21 
22 enum iommu_fault_reason {
23 	IOMMU_FAULT_REASON_UNKNOWN = 0,
24 
25 	/* Could not access the PASID table (fetch caused external abort) */
26 	IOMMU_FAULT_REASON_PASID_FETCH,
27 
28 	/* PASID entry is invalid or has configuration errors */
29 	IOMMU_FAULT_REASON_BAD_PASID_ENTRY,
30 
31 	/*
32 	 * PASID is out of range (e.g. exceeds the maximum PASID
33 	 * supported by the IOMMU) or disabled.
34 	 */
35 	IOMMU_FAULT_REASON_PASID_INVALID,
36 
37 	/*
38 	 * An external abort occurred fetching (or updating) a translation
39 	 * table descriptor
40 	 */
41 	IOMMU_FAULT_REASON_WALK_EABT,
42 
43 	/*
44 	 * Could not access the page table entry (Bad address),
45 	 * actual translation fault
46 	 */
47 	IOMMU_FAULT_REASON_PTE_FETCH,
48 
49 	/* Protection flag check failed */
50 	IOMMU_FAULT_REASON_PERMISSION,
51 
52 	/* access flag check failed */
53 	IOMMU_FAULT_REASON_ACCESS,
54 
55 	/* Output address of a translation stage caused Address Size fault */
56 	IOMMU_FAULT_REASON_OOR_ADDRESS,
57 };
58 
59 /**
60  * struct iommu_fault_unrecoverable - Unrecoverable fault data
61  * @reason: reason of the fault, from &enum iommu_fault_reason
62  * @flags: parameters of this fault (IOMMU_FAULT_UNRECOV_* values)
63  * @pasid: Process Address Space ID
64  * @perm: requested permission access using by the incoming transaction
65  *        (IOMMU_FAULT_PERM_* values)
66  * @addr: offending page address
67  * @fetch_addr: address that caused a fetch abort, if any
68  */
69 struct iommu_fault_unrecoverable {
70 	__u32	reason;
71 #define IOMMU_FAULT_UNRECOV_PASID_VALID		(1 << 0)
72 #define IOMMU_FAULT_UNRECOV_ADDR_VALID		(1 << 1)
73 #define IOMMU_FAULT_UNRECOV_FETCH_ADDR_VALID	(1 << 2)
74 	__u32	flags;
75 	__u32	pasid;
76 	__u32	perm;
77 	__u64	addr;
78 	__u64	fetch_addr;
79 };
80 
81 /**
82  * struct iommu_fault_page_request - Page Request data
83  * @flags: encodes whether the corresponding fields are valid and whether this
84  *         is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
85  *         When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
86  *         must have the same PASID value as the page request. When it is clear,
87  *         the page response should not have a PASID.
88  * @pasid: Process Address Space ID
89  * @grpid: Page Request Group Index
90  * @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
91  * @addr: page address
92  * @private_data: device-specific private information
93  */
94 struct iommu_fault_page_request {
95 #define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID	(1 << 0)
96 #define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE	(1 << 1)
97 #define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA	(1 << 2)
98 #define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID	(1 << 3)
99 	__u32	flags;
100 	__u32	pasid;
101 	__u32	grpid;
102 	__u32	perm;
103 	__u64	addr;
104 	__u64	private_data[2];
105 };
106 
107 /**
108  * struct iommu_fault - Generic fault data
109  * @type: fault type from &enum iommu_fault_type
110  * @padding: reserved for future use (should be zero)
111  * @event: fault event, when @type is %IOMMU_FAULT_DMA_UNRECOV
112  * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
113  * @padding2: sets the fault size to allow for future extensions
114  */
115 struct iommu_fault {
116 	__u32	type;
117 	__u32	padding;
118 	union {
119 		struct iommu_fault_unrecoverable event;
120 		struct iommu_fault_page_request prm;
121 		__u8 padding2[56];
122 	};
123 };
124 
125 /**
126  * enum iommu_page_response_code - Return status of fault handlers
127  * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
128  *	populated, retry the access. This is "Success" in PCI PRI.
129  * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
130  *	this device if possible. This is "Response Failure" in PCI PRI.
131  * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
132  *	access. This is "Invalid Request" in PCI PRI.
133  */
134 enum iommu_page_response_code {
135 	IOMMU_PAGE_RESP_SUCCESS = 0,
136 	IOMMU_PAGE_RESP_INVALID,
137 	IOMMU_PAGE_RESP_FAILURE,
138 };
139 
140 /**
141  * struct iommu_page_response - Generic page response information
142  * @argsz: User filled size of this data
143  * @version: API version of this structure
144  * @flags: encodes whether the corresponding fields are valid
145  *         (IOMMU_FAULT_PAGE_RESPONSE_* values)
146  * @pasid: Process Address Space ID
147  * @grpid: Page Request Group Index
148  * @code: response code from &enum iommu_page_response_code
149  */
150 struct iommu_page_response {
151 	__u32	argsz;
152 #define IOMMU_PAGE_RESP_VERSION_1	1
153 	__u32	version;
154 #define IOMMU_PAGE_RESP_PASID_VALID	(1 << 0)
155 	__u32	flags;
156 	__u32	pasid;
157 	__u32	grpid;
158 	__u32	code;
159 };
160 
161 /* defines the granularity of the invalidation */
162 enum iommu_inv_granularity {
163 	IOMMU_INV_GRANU_DOMAIN,	/* domain-selective invalidation */
164 	IOMMU_INV_GRANU_PASID,	/* PASID-selective invalidation */
165 	IOMMU_INV_GRANU_ADDR,	/* page-selective invalidation */
166 	IOMMU_INV_GRANU_NR,	/* number of invalidation granularities */
167 };
168 
169 /**
170  * struct iommu_inv_addr_info - Address Selective Invalidation Structure
171  *
172  * @flags: indicates the granularity of the address-selective invalidation
173  * - If the PASID bit is set, the @pasid field is populated and the invalidation
174  *   relates to cache entries tagged with this PASID and matching the address
175  *   range.
176  * - If ARCHID bit is set, @archid is populated and the invalidation relates
177  *   to cache entries tagged with this architecture specific ID and matching
178  *   the address range.
179  * - Both PASID and ARCHID can be set as they may tag different caches.
180  * - If neither PASID or ARCHID is set, global addr invalidation applies.
181  * - The LEAF flag indicates whether only the leaf PTE caching needs to be
182  *   invalidated and other paging structure caches can be preserved.
183  * @pasid: process address space ID
184  * @archid: architecture-specific ID
185  * @addr: first stage/level input address
186  * @granule_size: page/block size of the mapping in bytes
187  * @nb_granules: number of contiguous granules to be invalidated
188  */
189 struct iommu_inv_addr_info {
190 #define IOMMU_INV_ADDR_FLAGS_PASID	(1 << 0)
191 #define IOMMU_INV_ADDR_FLAGS_ARCHID	(1 << 1)
192 #define IOMMU_INV_ADDR_FLAGS_LEAF	(1 << 2)
193 	__u32	flags;
194 	__u32	archid;
195 	__u64	pasid;
196 	__u64	addr;
197 	__u64	granule_size;
198 	__u64	nb_granules;
199 };
200 
201 /**
202  * struct iommu_inv_pasid_info - PASID Selective Invalidation Structure
203  *
204  * @flags: indicates the granularity of the PASID-selective invalidation
205  * - If the PASID bit is set, the @pasid field is populated and the invalidation
206  *   relates to cache entries tagged with this PASID and matching the address
207  *   range.
208  * - If the ARCHID bit is set, the @archid is populated and the invalidation
209  *   relates to cache entries tagged with this architecture specific ID and
210  *   matching the address range.
211  * - Both PASID and ARCHID can be set as they may tag different caches.
212  * - At least one of PASID or ARCHID must be set.
213  * @pasid: process address space ID
214  * @archid: architecture-specific ID
215  */
216 struct iommu_inv_pasid_info {
217 #define IOMMU_INV_PASID_FLAGS_PASID	(1 << 0)
218 #define IOMMU_INV_PASID_FLAGS_ARCHID	(1 << 1)
219 	__u32	flags;
220 	__u32	archid;
221 	__u64	pasid;
222 };
223 
224 /**
225  * struct iommu_cache_invalidate_info - First level/stage invalidation
226  *     information
227  * @argsz: User filled size of this data
228  * @version: API version of this structure
229  * @cache: bitfield that allows to select which caches to invalidate
230  * @granularity: defines the lowest granularity used for the invalidation:
231  *     domain > PASID > addr
232  * @padding: reserved for future use (should be zero)
233  * @pasid_info: invalidation data when @granularity is %IOMMU_INV_GRANU_PASID
234  * @addr_info: invalidation data when @granularity is %IOMMU_INV_GRANU_ADDR
235  *
236  * Not all the combinations of cache/granularity are valid:
237  *
238  * +--------------+---------------+---------------+---------------+
239  * | type /       |   DEV_IOTLB   |     IOTLB     |      PASID    |
240  * | granularity  |               |               |      cache    |
241  * +==============+===============+===============+===============+
242  * | DOMAIN       |       N/A     |       Y       |       Y       |
243  * +--------------+---------------+---------------+---------------+
244  * | PASID        |       Y       |       Y       |       Y       |
245  * +--------------+---------------+---------------+---------------+
246  * | ADDR         |       Y       |       Y       |       N/A     |
247  * +--------------+---------------+---------------+---------------+
248  *
249  * Invalidations by %IOMMU_INV_GRANU_DOMAIN don't take any argument other than
250  * @version and @cache.
251  *
252  * If multiple cache types are invalidated simultaneously, they all
253  * must support the used granularity.
254  */
255 struct iommu_cache_invalidate_info {
256 	__u32	argsz;
257 #define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1
258 	__u32	version;
259 /* IOMMU paging structure cache */
260 #define IOMMU_CACHE_INV_TYPE_IOTLB	(1 << 0) /* IOMMU IOTLB */
261 #define IOMMU_CACHE_INV_TYPE_DEV_IOTLB	(1 << 1) /* Device IOTLB */
262 #define IOMMU_CACHE_INV_TYPE_PASID	(1 << 2) /* PASID cache */
263 #define IOMMU_CACHE_INV_TYPE_NR		(3)
264 	__u8	cache;
265 	__u8	granularity;
266 	__u8	padding[6];
267 	union {
268 		struct iommu_inv_pasid_info pasid_info;
269 		struct iommu_inv_addr_info addr_info;
270 	} granu;
271 };
272 
273 /**
274  * struct iommu_gpasid_bind_data_vtd - Intel VT-d specific data on device and guest
275  * SVA binding.
276  *
277  * @flags:	VT-d PASID table entry attributes
278  * @pat:	Page attribute table data to compute effective memory type
279  * @emt:	Extended memory type
280  *
281  * Only guest vIOMMU selectable and effective options are passed down to
282  * the host IOMMU.
283  */
284 struct iommu_gpasid_bind_data_vtd {
285 #define IOMMU_SVA_VTD_GPASID_SRE	(1 << 0) /* supervisor request */
286 #define IOMMU_SVA_VTD_GPASID_EAFE	(1 << 1) /* extended access enable */
287 #define IOMMU_SVA_VTD_GPASID_PCD	(1 << 2) /* page-level cache disable */
288 #define IOMMU_SVA_VTD_GPASID_PWT	(1 << 3) /* page-level write through */
289 #define IOMMU_SVA_VTD_GPASID_EMTE	(1 << 4) /* extended mem type enable */
290 #define IOMMU_SVA_VTD_GPASID_CD		(1 << 5) /* PASID-level cache disable */
291 #define IOMMU_SVA_VTD_GPASID_LAST	(1 << 6)
292 	__u64 flags;
293 	__u32 pat;
294 	__u32 emt;
295 };
296 
297 #define IOMMU_SVA_VTD_GPASID_MTS_MASK	(IOMMU_SVA_VTD_GPASID_CD | \
298 					 IOMMU_SVA_VTD_GPASID_EMTE | \
299 					 IOMMU_SVA_VTD_GPASID_PCD |  \
300 					 IOMMU_SVA_VTD_GPASID_PWT)
301 
302 /**
303  * struct iommu_gpasid_bind_data - Information about device and guest PASID binding
304  * @argsz:	User filled size of this data
305  * @version:	Version of this data structure
306  * @format:	PASID table entry format
307  * @flags:	Additional information on guest bind request
308  * @gpgd:	Guest page directory base of the guest mm to bind
309  * @hpasid:	Process address space ID used for the guest mm in host IOMMU
310  * @gpasid:	Process address space ID used for the guest mm in guest IOMMU
311  * @addr_width:	Guest virtual address width
312  * @padding:	Reserved for future use (should be zero)
313  * @vtd:	Intel VT-d specific data
314  *
315  * Guest to host PASID mapping can be an identity or non-identity, where guest
316  * has its own PASID space. For non-identify mapping, guest to host PASID lookup
317  * is needed when VM programs guest PASID into an assigned device. VMM may
318  * trap such PASID programming then request host IOMMU driver to convert guest
319  * PASID to host PASID based on this bind data.
320  */
321 struct iommu_gpasid_bind_data {
322 	__u32 argsz;
323 #define IOMMU_GPASID_BIND_VERSION_1	1
324 	__u32 version;
325 #define IOMMU_PASID_FORMAT_INTEL_VTD	1
326 #define IOMMU_PASID_FORMAT_LAST		2
327 	__u32 format;
328 	__u32 addr_width;
329 #define IOMMU_SVA_GPASID_VAL	(1 << 0) /* guest PASID valid */
330 	__u64 flags;
331 	__u64 gpgd;
332 	__u64 hpasid;
333 	__u64 gpasid;
334 	__u8  padding[8];
335 	/* Vendor specific data */
336 	union {
337 		struct iommu_gpasid_bind_data_vtd vtd;
338 	} vendor;
339 };
340 
341 #endif /* _UAPI_IOMMU_H */
342