• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <jroedel@suse.de>
5  *         Leo Duran <leo.duran@amd.com>
6  */
7 
8 #ifndef _ASM_X86_AMD_IOMMU_TYPES_H
9 #define _ASM_X86_AMD_IOMMU_TYPES_H
10 
11 #include <linux/types.h>
12 #include <linux/mutex.h>
13 #include <linux/msi.h>
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/pci.h>
17 #include <linux/irqreturn.h>
18 #include <linux/io-pgtable.h>
19 
20 /*
21  * Maximum number of IOMMUs supported
22  */
23 #define MAX_IOMMUS	32
24 
25 /*
26  * some size calculation constants
27  */
28 #define DEV_TABLE_ENTRY_SIZE		32
29 #define ALIAS_TABLE_ENTRY_SIZE		2
30 #define RLOOKUP_TABLE_ENTRY_SIZE	(sizeof(void *))
31 
32 /* Capability offsets used by the driver */
33 #define MMIO_CAP_HDR_OFFSET	0x00
34 #define MMIO_RANGE_OFFSET	0x0c
35 #define MMIO_MISC_OFFSET	0x10
36 
37 /* Masks, shifts and macros to parse the device range capability */
38 #define MMIO_RANGE_LD_MASK	0xff000000
39 #define MMIO_RANGE_FD_MASK	0x00ff0000
40 #define MMIO_RANGE_BUS_MASK	0x0000ff00
41 #define MMIO_RANGE_LD_SHIFT	24
42 #define MMIO_RANGE_FD_SHIFT	16
43 #define MMIO_RANGE_BUS_SHIFT	8
44 #define MMIO_GET_LD(x)  (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
45 #define MMIO_GET_FD(x)  (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
46 #define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
47 #define MMIO_MSI_NUM(x)	((x) & 0x1f)
48 
49 /* Flag masks for the AMD IOMMU exclusion range */
50 #define MMIO_EXCL_ENABLE_MASK 0x01ULL
51 #define MMIO_EXCL_ALLOW_MASK  0x02ULL
52 
53 /* Used offsets into the MMIO space */
54 #define MMIO_DEV_TABLE_OFFSET   0x0000
55 #define MMIO_CMD_BUF_OFFSET     0x0008
56 #define MMIO_EVT_BUF_OFFSET     0x0010
57 #define MMIO_CONTROL_OFFSET     0x0018
58 #define MMIO_EXCL_BASE_OFFSET   0x0020
59 #define MMIO_EXCL_LIMIT_OFFSET  0x0028
60 #define MMIO_EXT_FEATURES	0x0030
61 #define MMIO_PPR_LOG_OFFSET	0x0038
62 #define MMIO_GA_LOG_BASE_OFFSET	0x00e0
63 #define MMIO_GA_LOG_TAIL_OFFSET	0x00e8
64 #define MMIO_MSI_ADDR_LO_OFFSET	0x015C
65 #define MMIO_MSI_ADDR_HI_OFFSET	0x0160
66 #define MMIO_MSI_DATA_OFFSET	0x0164
67 #define MMIO_INTCAPXT_EVT_OFFSET	0x0170
68 #define MMIO_INTCAPXT_PPR_OFFSET	0x0178
69 #define MMIO_INTCAPXT_GALOG_OFFSET	0x0180
70 #define MMIO_CMD_HEAD_OFFSET	0x2000
71 #define MMIO_CMD_TAIL_OFFSET	0x2008
72 #define MMIO_EVT_HEAD_OFFSET	0x2010
73 #define MMIO_EVT_TAIL_OFFSET	0x2018
74 #define MMIO_STATUS_OFFSET	0x2020
75 #define MMIO_PPR_HEAD_OFFSET	0x2030
76 #define MMIO_PPR_TAIL_OFFSET	0x2038
77 #define MMIO_GA_HEAD_OFFSET	0x2040
78 #define MMIO_GA_TAIL_OFFSET	0x2048
79 #define MMIO_CNTR_CONF_OFFSET	0x4000
80 #define MMIO_CNTR_REG_OFFSET	0x40000
81 #define MMIO_REG_END_OFFSET	0x80000
82 
83 
84 
85 /* Extended Feature Bits */
86 #define FEATURE_PREFETCH	(1ULL<<0)
87 #define FEATURE_PPR		(1ULL<<1)
88 #define FEATURE_X2APIC		(1ULL<<2)
89 #define FEATURE_NX		(1ULL<<3)
90 #define FEATURE_GT		(1ULL<<4)
91 #define FEATURE_IA		(1ULL<<6)
92 #define FEATURE_GA		(1ULL<<7)
93 #define FEATURE_HE		(1ULL<<8)
94 #define FEATURE_PC		(1ULL<<9)
95 #define FEATURE_GAM_VAPIC	(1ULL<<21)
96 #define FEATURE_EPHSUP		(1ULL<<50)
97 #define FEATURE_SNP		(1ULL<<63)
98 
99 #define FEATURE_PASID_SHIFT	32
100 #define FEATURE_PASID_MASK	(0x1fULL << FEATURE_PASID_SHIFT)
101 
102 #define FEATURE_GLXVAL_SHIFT	14
103 #define FEATURE_GLXVAL_MASK	(0x03ULL << FEATURE_GLXVAL_SHIFT)
104 
105 /* Note:
106  * The current driver only support 16-bit PASID.
107  * Currently, hardware only implement upto 16-bit PASID
108  * even though the spec says it could have upto 20 bits.
109  */
110 #define PASID_MASK		0x0000ffff
111 
112 /* MMIO status bits */
113 #define MMIO_STATUS_EVT_OVERFLOW_INT_MASK	(1 << 0)
114 #define MMIO_STATUS_EVT_INT_MASK	(1 << 1)
115 #define MMIO_STATUS_COM_WAIT_INT_MASK	(1 << 2)
116 #define MMIO_STATUS_PPR_INT_MASK	(1 << 6)
117 #define MMIO_STATUS_GALOG_RUN_MASK	(1 << 8)
118 #define MMIO_STATUS_GALOG_OVERFLOW_MASK	(1 << 9)
119 #define MMIO_STATUS_GALOG_INT_MASK	(1 << 10)
120 
121 /* event logging constants */
122 #define EVENT_ENTRY_SIZE	0x10
123 #define EVENT_TYPE_SHIFT	28
124 #define EVENT_TYPE_MASK		0xf
125 #define EVENT_TYPE_ILL_DEV	0x1
126 #define EVENT_TYPE_IO_FAULT	0x2
127 #define EVENT_TYPE_DEV_TAB_ERR	0x3
128 #define EVENT_TYPE_PAGE_TAB_ERR	0x4
129 #define EVENT_TYPE_ILL_CMD	0x5
130 #define EVENT_TYPE_CMD_HARD_ERR	0x6
131 #define EVENT_TYPE_IOTLB_INV_TO	0x7
132 #define EVENT_TYPE_INV_DEV_REQ	0x8
133 #define EVENT_TYPE_INV_PPR_REQ	0x9
134 #define EVENT_TYPE_RMP_FAULT	0xd
135 #define EVENT_TYPE_RMP_HW_ERR	0xe
136 #define EVENT_DEVID_MASK	0xffff
137 #define EVENT_DEVID_SHIFT	0
138 #define EVENT_DOMID_MASK_LO	0xffff
139 #define EVENT_DOMID_MASK_HI	0xf0000
140 #define EVENT_FLAGS_MASK	0xfff
141 #define EVENT_FLAGS_SHIFT	0x10
142 
143 /* feature control bits */
144 #define CONTROL_IOMMU_EN        0x00ULL
145 #define CONTROL_HT_TUN_EN       0x01ULL
146 #define CONTROL_EVT_LOG_EN      0x02ULL
147 #define CONTROL_EVT_INT_EN      0x03ULL
148 #define CONTROL_COMWAIT_EN      0x04ULL
149 #define CONTROL_INV_TIMEOUT	0x05ULL
150 #define CONTROL_PASSPW_EN       0x08ULL
151 #define CONTROL_RESPASSPW_EN    0x09ULL
152 #define CONTROL_COHERENT_EN     0x0aULL
153 #define CONTROL_ISOC_EN         0x0bULL
154 #define CONTROL_CMDBUF_EN       0x0cULL
155 #define CONTROL_PPRLOG_EN       0x0dULL
156 #define CONTROL_PPRINT_EN       0x0eULL
157 #define CONTROL_PPR_EN          0x0fULL
158 #define CONTROL_GT_EN           0x10ULL
159 #define CONTROL_GA_EN           0x11ULL
160 #define CONTROL_GAM_EN          0x19ULL
161 #define CONTROL_GALOG_EN        0x1CULL
162 #define CONTROL_GAINT_EN        0x1DULL
163 #define CONTROL_XT_EN           0x32ULL
164 #define CONTROL_INTCAPXT_EN     0x33ULL
165 
166 #define CTRL_INV_TO_MASK	(7 << CONTROL_INV_TIMEOUT)
167 #define CTRL_INV_TO_NONE	0
168 #define CTRL_INV_TO_1MS		1
169 #define CTRL_INV_TO_10MS	2
170 #define CTRL_INV_TO_100MS	3
171 #define CTRL_INV_TO_1S		4
172 #define CTRL_INV_TO_10S		5
173 #define CTRL_INV_TO_100S	6
174 
175 /* command specific defines */
176 #define CMD_COMPL_WAIT          0x01
177 #define CMD_INV_DEV_ENTRY       0x02
178 #define CMD_INV_IOMMU_PAGES	0x03
179 #define CMD_INV_IOTLB_PAGES	0x04
180 #define CMD_INV_IRT		0x05
181 #define CMD_COMPLETE_PPR	0x07
182 #define CMD_INV_ALL		0x08
183 
184 #define CMD_COMPL_WAIT_STORE_MASK	0x01
185 #define CMD_COMPL_WAIT_INT_MASK		0x02
186 #define CMD_INV_IOMMU_PAGES_SIZE_MASK	0x01
187 #define CMD_INV_IOMMU_PAGES_PDE_MASK	0x02
188 #define CMD_INV_IOMMU_PAGES_GN_MASK	0x04
189 
190 #define PPR_STATUS_MASK			0xf
191 #define PPR_STATUS_SHIFT		12
192 
193 #define CMD_INV_IOMMU_ALL_PAGES_ADDRESS	0x7fffffffffffffffULL
194 
195 /* macros and definitions for device table entries */
196 #define DEV_ENTRY_VALID         0x00
197 #define DEV_ENTRY_TRANSLATION   0x01
198 #define DEV_ENTRY_PPR           0x34
199 #define DEV_ENTRY_IR            0x3d
200 #define DEV_ENTRY_IW            0x3e
201 #define DEV_ENTRY_NO_PAGE_FAULT	0x62
202 #define DEV_ENTRY_EX            0x67
203 #define DEV_ENTRY_SYSMGT1       0x68
204 #define DEV_ENTRY_SYSMGT2       0x69
205 #define DEV_ENTRY_IRQ_TBL_EN	0x80
206 #define DEV_ENTRY_INIT_PASS     0xb8
207 #define DEV_ENTRY_EINT_PASS     0xb9
208 #define DEV_ENTRY_NMI_PASS      0xba
209 #define DEV_ENTRY_LINT0_PASS    0xbe
210 #define DEV_ENTRY_LINT1_PASS    0xbf
211 #define DEV_ENTRY_MODE_MASK	0x07
212 #define DEV_ENTRY_MODE_SHIFT	0x09
213 
214 #define MAX_DEV_TABLE_ENTRIES	0xffff
215 
216 /* constants to configure the command buffer */
217 #define CMD_BUFFER_SIZE    8192
218 #define CMD_BUFFER_UNINITIALIZED 1
219 #define CMD_BUFFER_ENTRIES 512
220 #define MMIO_CMD_SIZE_SHIFT 56
221 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
222 
223 /* constants for event buffer handling */
224 #define EVT_BUFFER_SIZE		8192 /* 512 entries */
225 #define EVT_LEN_MASK		(0x9ULL << 56)
226 
227 /* Constants for PPR Log handling */
228 #define PPR_LOG_ENTRIES		512
229 #define PPR_LOG_SIZE_SHIFT	56
230 #define PPR_LOG_SIZE_512	(0x9ULL << PPR_LOG_SIZE_SHIFT)
231 #define PPR_ENTRY_SIZE		16
232 #define PPR_LOG_SIZE		(PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
233 
234 #define PPR_REQ_TYPE(x)		(((x) >> 60) & 0xfULL)
235 #define PPR_FLAGS(x)		(((x) >> 48) & 0xfffULL)
236 #define PPR_DEVID(x)		((x) & 0xffffULL)
237 #define PPR_TAG(x)		(((x) >> 32) & 0x3ffULL)
238 #define PPR_PASID1(x)		(((x) >> 16) & 0xffffULL)
239 #define PPR_PASID2(x)		(((x) >> 42) & 0xfULL)
240 #define PPR_PASID(x)		((PPR_PASID2(x) << 16) | PPR_PASID1(x))
241 
242 #define PPR_REQ_FAULT		0x01
243 
244 /* Constants for GA Log handling */
245 #define GA_LOG_ENTRIES		512
246 #define GA_LOG_SIZE_SHIFT	56
247 #define GA_LOG_SIZE_512		(0x8ULL << GA_LOG_SIZE_SHIFT)
248 #define GA_ENTRY_SIZE		8
249 #define GA_LOG_SIZE		(GA_ENTRY_SIZE * GA_LOG_ENTRIES)
250 
251 #define GA_TAG(x)		(u32)(x & 0xffffffffULL)
252 #define GA_DEVID(x)		(u16)(((x) >> 32) & 0xffffULL)
253 #define GA_REQ_TYPE(x)		(((x) >> 60) & 0xfULL)
254 
255 #define GA_GUEST_NR		0x1
256 
257 #define IOMMU_IN_ADDR_BIT_SIZE  52
258 #define IOMMU_OUT_ADDR_BIT_SIZE 52
259 
260 /*
261  * This bitmap is used to advertise the page sizes our hardware support
262  * to the IOMMU core, which will then use this information to split
263  * physically contiguous memory regions it is mapping into page sizes
264  * that we support.
265  *
266  * 512GB Pages are not supported due to a hardware bug
267  */
268 #define AMD_IOMMU_PGSIZES	((~0xFFFUL) & ~(2ULL << 38))
269 
270 /* Bit value definition for dte irq remapping fields*/
271 #define DTE_IRQ_PHYS_ADDR_MASK	(((1ULL << 45)-1) << 6)
272 #define DTE_IRQ_REMAP_INTCTL_MASK	(0x3ULL << 60)
273 #define DTE_IRQ_REMAP_INTCTL    (2ULL << 60)
274 #define DTE_IRQ_REMAP_ENABLE    1ULL
275 
276 /*
277  * AMD IOMMU hardware only support 512 IRTEs despite
278  * the architectural limitation of 2048 entries.
279  */
280 #define DTE_INTTAB_ALIGNMENT    128
281 #define DTE_INTTABLEN_VALUE     9ULL
282 #define DTE_INTTABLEN           (DTE_INTTABLEN_VALUE << 1)
283 #define DTE_INTTABLEN_MASK      (0xfULL << 1)
284 #define MAX_IRQS_PER_TABLE      (1 << DTE_INTTABLEN_VALUE)
285 
286 #define PAGE_MODE_NONE    0x00
287 #define PAGE_MODE_1_LEVEL 0x01
288 #define PAGE_MODE_2_LEVEL 0x02
289 #define PAGE_MODE_3_LEVEL 0x03
290 #define PAGE_MODE_4_LEVEL 0x04
291 #define PAGE_MODE_5_LEVEL 0x05
292 #define PAGE_MODE_6_LEVEL 0x06
293 #define PAGE_MODE_7_LEVEL 0x07
294 
295 #define PM_LEVEL_SHIFT(x)	(12 + ((x) * 9))
296 #define PM_LEVEL_SIZE(x)	(((x) < 6) ? \
297 				  ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
298 				   (0xffffffffffffffffULL))
299 #define PM_LEVEL_INDEX(x, a)	(((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
300 #define PM_LEVEL_ENC(x)		(((x) << 9) & 0xe00ULL)
301 #define PM_LEVEL_PDE(x, a)	((a) | PM_LEVEL_ENC((x)) | \
302 				 IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW)
303 #define PM_PTE_LEVEL(pte)	(((pte) >> 9) & 0x7ULL)
304 
305 #define PM_MAP_4k		0
306 #define PM_ADDR_MASK		0x000ffffffffff000ULL
307 #define PM_MAP_MASK(lvl)	(PM_ADDR_MASK & \
308 				(~((1ULL << (12 + ((lvl) * 9))) - 1)))
309 #define PM_ALIGNED(lvl, addr)	((PM_MAP_MASK(lvl) & (addr)) == (addr))
310 
311 /*
312  * Returns the page table level to use for a given page size
313  * Pagesize is expected to be a power-of-two
314  */
315 #define PAGE_SIZE_LEVEL(pagesize) \
316 		((__ffs(pagesize) - 12) / 9)
317 /*
318  * Returns the number of ptes to use for a given page size
319  * Pagesize is expected to be a power-of-two
320  */
321 #define PAGE_SIZE_PTE_COUNT(pagesize) \
322 		(1ULL << ((__ffs(pagesize) - 12) % 9))
323 
324 /*
325  * Aligns a given io-virtual address to a given page size
326  * Pagesize is expected to be a power-of-two
327  */
328 #define PAGE_SIZE_ALIGN(address, pagesize) \
329 		((address) & ~((pagesize) - 1))
330 /*
331  * Creates an IOMMU PTE for an address and a given pagesize
332  * The PTE has no permission bits set
333  * Pagesize is expected to be a power-of-two larger than 4096
334  */
335 #define PAGE_SIZE_PTE(address, pagesize)		\
336 		(((address) | ((pagesize) - 1)) &	\
337 		 (~(pagesize >> 1)) & PM_ADDR_MASK)
338 
339 /*
340  * Takes a PTE value with mode=0x07 and returns the page size it maps
341  */
342 #define PTE_PAGE_SIZE(pte) \
343 	(1ULL << (1 + ffz(((pte) | 0xfffULL))))
344 
345 /*
346  * Takes a page-table level and returns the default page-size for this level
347  */
348 #define PTE_LEVEL_PAGE_SIZE(level)			\
349 	(1ULL << (12 + (9 * (level))))
350 
351 /*
352  * Bit value definition for I/O PTE fields
353  */
354 #define IOMMU_PTE_PR (1ULL << 0)
355 #define IOMMU_PTE_U  (1ULL << 59)
356 #define IOMMU_PTE_FC (1ULL << 60)
357 #define IOMMU_PTE_IR (1ULL << 61)
358 #define IOMMU_PTE_IW (1ULL << 62)
359 
360 /*
361  * Bit value definition for DTE fields
362  */
363 #define DTE_FLAG_V  (1ULL << 0)
364 #define DTE_FLAG_TV (1ULL << 1)
365 #define DTE_FLAG_IR (1ULL << 61)
366 #define DTE_FLAG_IW (1ULL << 62)
367 
368 #define DTE_FLAG_IOTLB	(1ULL << 32)
369 #define DTE_FLAG_GV	(1ULL << 55)
370 #define DTE_FLAG_MASK	(0x3ffULL << 32)
371 #define DTE_GLX_SHIFT	(56)
372 #define DTE_GLX_MASK	(3)
373 #define DEV_DOMID_MASK	0xffffULL
374 
375 #define DTE_GCR3_VAL_A(x)	(((x) >> 12) & 0x00007ULL)
376 #define DTE_GCR3_VAL_B(x)	(((x) >> 15) & 0x0ffffULL)
377 #define DTE_GCR3_VAL_C(x)	(((x) >> 31) & 0x1fffffULL)
378 
379 #define DTE_GCR3_INDEX_A	0
380 #define DTE_GCR3_INDEX_B	1
381 #define DTE_GCR3_INDEX_C	1
382 
383 #define DTE_GCR3_SHIFT_A	58
384 #define DTE_GCR3_SHIFT_B	16
385 #define DTE_GCR3_SHIFT_C	43
386 
387 #define GCR3_VALID		0x01ULL
388 
389 #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
390 #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR)
391 #define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK))
392 #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
393 
394 #define IOMMU_PROT_MASK 0x03
395 #define IOMMU_PROT_IR 0x01
396 #define IOMMU_PROT_IW 0x02
397 
398 #define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE	(1 << 2)
399 
400 /* IOMMU capabilities */
401 #define IOMMU_CAP_IOTLB   24
402 #define IOMMU_CAP_NPCACHE 26
403 #define IOMMU_CAP_EFR     27
404 
405 /* IOMMU IVINFO */
406 #define IOMMU_IVINFO_OFFSET     36
407 #define IOMMU_IVINFO_EFRSUP     BIT(0)
408 
409 /* IOMMU Feature Reporting Field (for IVHD type 10h */
410 #define IOMMU_FEAT_GASUP_SHIFT	6
411 
412 /* IOMMU Extended Feature Register (EFR) */
413 #define IOMMU_EFR_XTSUP_SHIFT	2
414 #define IOMMU_EFR_GASUP_SHIFT	7
415 #define IOMMU_EFR_MSICAPMMIOSUP_SHIFT	46
416 
417 #define MAX_DOMAIN_ID 65536
418 
419 /* Protection domain flags */
420 #define PD_DMA_OPS_MASK		(1UL << 0) /* domain used for dma_ops */
421 #define PD_DEFAULT_MASK		(1UL << 1) /* domain is a default dma_ops
422 					      domain for an IOMMU */
423 #define PD_PASSTHROUGH_MASK	(1UL << 2) /* domain has no page
424 					      translation */
425 #define PD_IOMMUV2_MASK		(1UL << 3) /* domain has gcr3 table */
426 
427 extern bool amd_iommu_dump;
428 #define DUMP_printk(format, arg...)				\
429 	do {							\
430 		if (amd_iommu_dump)				\
431 			pr_info("AMD-Vi: " format, ## arg);	\
432 	} while(0);
433 
434 /* global flag if IOMMUs cache non-present entries */
435 extern bool amd_iommu_np_cache;
436 /* Only true if all IOMMUs support device IOTLBs */
437 extern bool amd_iommu_iotlb_sup;
438 
439 struct irq_remap_table {
440 	raw_spinlock_t lock;
441 	unsigned min_index;
442 	u32 *table;
443 };
444 
445 extern struct irq_remap_table **irq_lookup_table;
446 
447 /* Interrupt remapping feature used? */
448 extern bool amd_iommu_irq_remap;
449 
450 /* kmem_cache to get tables with 128 byte alignement */
451 extern struct kmem_cache *amd_iommu_irq_cache;
452 
453 /*
454  * Make iterating over all IOMMUs easier
455  */
456 #define for_each_iommu(iommu) \
457 	list_for_each_entry((iommu), &amd_iommu_list, list)
458 #define for_each_iommu_safe(iommu, next) \
459 	list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
460 
461 #define APERTURE_RANGE_SHIFT	27	/* 128 MB */
462 #define APERTURE_RANGE_SIZE	(1ULL << APERTURE_RANGE_SHIFT)
463 #define APERTURE_RANGE_PAGES	(APERTURE_RANGE_SIZE >> PAGE_SHIFT)
464 #define APERTURE_MAX_RANGES	32	/* allows 4GB of DMA address space */
465 #define APERTURE_RANGE_INDEX(a)	((a) >> APERTURE_RANGE_SHIFT)
466 #define APERTURE_PAGE_INDEX(a)	(((a) >> 21) & 0x3fULL)
467 
468 /*
469  * This struct is used to pass information about
470  * incoming PPR faults around.
471  */
472 struct amd_iommu_fault {
473 	u64 address;    /* IO virtual address of the fault*/
474 	u32 pasid;      /* Address space identifier */
475 	u16 device_id;  /* Originating PCI device id */
476 	u16 tag;        /* PPR tag */
477 	u16 flags;      /* Fault flags */
478 
479 };
480 
481 
482 struct iommu_domain;
483 struct irq_domain;
484 struct amd_irte_ops;
485 
486 #define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED      (1 << 0)
487 
488 #define io_pgtable_to_data(x) \
489 	container_of((x), struct amd_io_pgtable, iop)
490 
491 #define io_pgtable_ops_to_data(x) \
492 	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
493 
494 #define io_pgtable_ops_to_domain(x) \
495 	container_of(io_pgtable_ops_to_data(x), \
496 		     struct protection_domain, iop)
497 
498 #define io_pgtable_cfg_to_data(x) \
499 	container_of((x), struct amd_io_pgtable, pgtbl_cfg)
500 
501 struct amd_io_pgtable {
502 	struct io_pgtable_cfg	pgtbl_cfg;
503 	struct io_pgtable	iop;
504 	int			mode;
505 	u64			*root;
506 	atomic64_t		pt_root;    /* pgtable root and pgtable mode */
507 };
508 
509 /*
510  * This structure contains generic data for  IOMMU protection domains
511  * independent of their use.
512  */
513 struct protection_domain {
514 	struct list_head dev_list; /* List of all devices in this domain */
515 	struct iommu_domain domain; /* generic domain handle used by
516 				       iommu core code */
517 	struct amd_io_pgtable iop;
518 	spinlock_t lock;	/* mostly used to lock the page table*/
519 	u16 id;			/* the domain id written to the device table */
520 	int glx;		/* Number of levels for GCR3 table */
521 	u64 *gcr3_tbl;		/* Guest CR3 table */
522 	unsigned long flags;	/* flags to find out type of domain */
523 	unsigned dev_cnt;	/* devices assigned to this domain */
524 	unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
525 };
526 
527 /*
528  * Structure where we save information about one hardware AMD IOMMU in the
529  * system.
530  */
531 struct amd_iommu {
532 	struct list_head list;
533 
534 	/* Index within the IOMMU array */
535 	int index;
536 
537 	/* locks the accesses to the hardware */
538 	raw_spinlock_t lock;
539 
540 	/* Pointer to PCI device of this IOMMU */
541 	struct pci_dev *dev;
542 
543 	/* Cache pdev to root device for resume quirks */
544 	struct pci_dev *root_pdev;
545 
546 	/* physical address of MMIO space */
547 	u64 mmio_phys;
548 
549 	/* physical end address of MMIO space */
550 	u64 mmio_phys_end;
551 
552 	/* virtual address of MMIO space */
553 	u8 __iomem *mmio_base;
554 
555 	/* capabilities of that IOMMU read from ACPI */
556 	u32 cap;
557 
558 	/* flags read from acpi table */
559 	u8 acpi_flags;
560 
561 	/* Extended features */
562 	u64 features;
563 
564 	/* IOMMUv2 */
565 	bool is_iommu_v2;
566 
567 	/* PCI device id of the IOMMU device */
568 	u16 devid;
569 
570 	/*
571 	 * Capability pointer. There could be more than one IOMMU per PCI
572 	 * device function if there are more than one AMD IOMMU capability
573 	 * pointers.
574 	 */
575 	u16 cap_ptr;
576 
577 	/* pci domain of this IOMMU */
578 	u16 pci_seg;
579 
580 	/* start of exclusion range of that IOMMU */
581 	u64 exclusion_start;
582 	/* length of exclusion range of that IOMMU */
583 	u64 exclusion_length;
584 
585 	/* command buffer virtual address */
586 	u8 *cmd_buf;
587 	u32 cmd_buf_head;
588 	u32 cmd_buf_tail;
589 
590 	/* event buffer virtual address */
591 	u8 *evt_buf;
592 
593 	/* Base of the PPR log, if present */
594 	u8 *ppr_log;
595 
596 	/* Base of the GA log, if present */
597 	u8 *ga_log;
598 
599 	/* Tail of the GA log, if present */
600 	u8 *ga_log_tail;
601 
602 	/* true if interrupts for this IOMMU are already enabled */
603 	bool int_enabled;
604 
605 	/* if one, we need to send a completion wait command */
606 	bool need_sync;
607 
608 	/* Handle for IOMMU core code */
609 	struct iommu_device iommu;
610 
611 	/*
612 	 * We can't rely on the BIOS to restore all values on reinit, so we
613 	 * need to stash them
614 	 */
615 
616 	/* The iommu BAR */
617 	u32 stored_addr_lo;
618 	u32 stored_addr_hi;
619 
620 	/*
621 	 * Each iommu has 6 l1s, each of which is documented as having 0x12
622 	 * registers
623 	 */
624 	u32 stored_l1[6][0x12];
625 
626 	/* The l2 indirect registers */
627 	u32 stored_l2[0x83];
628 
629 	/* The maximum PC banks and counters/bank (PCSup=1) */
630 	u8 max_banks;
631 	u8 max_counters;
632 #ifdef CONFIG_IRQ_REMAP
633 	struct irq_domain *ir_domain;
634 	struct irq_domain *msi_domain;
635 
636 	struct amd_irte_ops *irte_ops;
637 #endif
638 
639 	u32 flags;
640 	volatile u64 *cmd_sem;
641 	u64 cmd_sem_val;
642 
643 #ifdef CONFIG_AMD_IOMMU_DEBUGFS
644 	/* DebugFS Info */
645 	struct dentry *debugfs;
646 #endif
647 };
648 
dev_to_amd_iommu(struct device * dev)649 static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
650 {
651 	struct iommu_device *iommu = dev_to_iommu_device(dev);
652 
653 	return container_of(iommu, struct amd_iommu, iommu);
654 }
655 
656 #define ACPIHID_UID_LEN 256
657 #define ACPIHID_HID_LEN 9
658 
659 struct acpihid_map_entry {
660 	struct list_head list;
661 	u8 uid[ACPIHID_UID_LEN];
662 	u8 hid[ACPIHID_HID_LEN];
663 	u16 devid;
664 	u16 root_devid;
665 	bool cmd_line;
666 	struct iommu_group *group;
667 };
668 
669 struct devid_map {
670 	struct list_head list;
671 	u8 id;
672 	u16 devid;
673 	bool cmd_line;
674 };
675 
676 /*
677  * This struct contains device specific data for the IOMMU
678  */
679 struct iommu_dev_data {
680 	/*Protect against attach/detach races */
681 	spinlock_t lock;
682 
683 	struct list_head list;		  /* For domain->dev_list */
684 	struct llist_node dev_data_list;  /* For global dev_data_list */
685 	struct protection_domain *domain; /* Domain the device is bound to */
686 	struct pci_dev *pdev;
687 	u16 devid;			  /* PCI Device ID */
688 	bool iommu_v2;			  /* Device can make use of IOMMUv2 */
689 	struct {
690 		bool enabled;
691 		int qdep;
692 	} ats;				  /* ATS state */
693 	bool pri_tlp;			  /* PASID TLB required for
694 					     PPR completions */
695 	bool use_vapic;			  /* Enable device to use vapic mode */
696 	bool defer_attach;
697 
698 	struct ratelimit_state rs;        /* Ratelimit IOPF messages */
699 };
700 
701 /* Map HPET and IOAPIC ids to the devid used by the IOMMU */
702 extern struct list_head ioapic_map;
703 extern struct list_head hpet_map;
704 extern struct list_head acpihid_map;
705 
706 /*
707  * List with all IOMMUs in the system. This list is not locked because it is
708  * only written and read at driver initialization or suspend time
709  */
710 extern struct list_head amd_iommu_list;
711 
712 /*
713  * Array with pointers to each IOMMU struct
714  * The indices are referenced in the protection domains
715  */
716 extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
717 
718 /*
719  * Structure defining one entry in the device table
720  */
721 struct dev_table_entry {
722 	u64 data[4];
723 };
724 
725 /*
726  * One entry for unity mappings parsed out of the ACPI table.
727  */
728 struct unity_map_entry {
729 	struct list_head list;
730 
731 	/* starting device id this entry is used for (including) */
732 	u16 devid_start;
733 	/* end device id this entry is used for (including) */
734 	u16 devid_end;
735 
736 	/* start address to unity map (including) */
737 	u64 address_start;
738 	/* end address to unity map (including) */
739 	u64 address_end;
740 
741 	/* required protection */
742 	int prot;
743 };
744 
745 /*
746  * List of all unity mappings. It is not locked because as runtime it is only
747  * read. It is created at ACPI table parsing time.
748  */
749 extern struct list_head amd_iommu_unity_map;
750 
751 /*
752  * Data structures for device handling
753  */
754 
755 /*
756  * Device table used by hardware. Read and write accesses by software are
757  * locked with the amd_iommu_pd_table lock.
758  */
759 extern struct dev_table_entry *amd_iommu_dev_table;
760 
761 /*
762  * Alias table to find requestor ids to device ids. Not locked because only
763  * read on runtime.
764  */
765 extern u16 *amd_iommu_alias_table;
766 
767 /*
768  * Reverse lookup table to find the IOMMU which translates a specific device.
769  */
770 extern struct amd_iommu **amd_iommu_rlookup_table;
771 
772 /* size of the dma_ops aperture as power of 2 */
773 extern unsigned amd_iommu_aperture_order;
774 
775 /* largest PCI device id we expect translation requests for */
776 extern u16 amd_iommu_last_bdf;
777 
778 /* allocation bitmap for domain ids */
779 extern unsigned long *amd_iommu_pd_alloc_bitmap;
780 
781 /* Smallest max PASID supported by any IOMMU in the system */
782 extern u32 amd_iommu_max_pasid;
783 
784 extern bool amd_iommu_v2_present;
785 
786 extern bool amd_iommu_force_isolation;
787 
788 /* Max levels of glxval supported */
789 extern int amd_iommu_max_glx_val;
790 
791 /*
792  * This function flushes all internal caches of
793  * the IOMMU used by this driver.
794  */
795 extern void iommu_flush_all_caches(struct amd_iommu *iommu);
796 
get_ioapic_devid(int id)797 static inline int get_ioapic_devid(int id)
798 {
799 	struct devid_map *entry;
800 
801 	list_for_each_entry(entry, &ioapic_map, list) {
802 		if (entry->id == id)
803 			return entry->devid;
804 	}
805 
806 	return -EINVAL;
807 }
808 
get_hpet_devid(int id)809 static inline int get_hpet_devid(int id)
810 {
811 	struct devid_map *entry;
812 
813 	list_for_each_entry(entry, &hpet_map, list) {
814 		if (entry->id == id)
815 			return entry->devid;
816 	}
817 
818 	return -EINVAL;
819 }
820 
821 enum amd_iommu_intr_mode_type {
822 	AMD_IOMMU_GUEST_IR_LEGACY,
823 
824 	/* This mode is not visible to users. It is used when
825 	 * we cannot fully enable vAPIC and fallback to only support
826 	 * legacy interrupt remapping via 128-bit IRTE.
827 	 */
828 	AMD_IOMMU_GUEST_IR_LEGACY_GA,
829 	AMD_IOMMU_GUEST_IR_VAPIC,
830 };
831 
832 #define AMD_IOMMU_GUEST_IR_GA(x)	(x == AMD_IOMMU_GUEST_IR_VAPIC || \
833 					 x == AMD_IOMMU_GUEST_IR_LEGACY_GA)
834 
835 #define AMD_IOMMU_GUEST_IR_VAPIC(x)	(x == AMD_IOMMU_GUEST_IR_VAPIC)
836 
837 union irte {
838 	u32 val;
839 	struct {
840 		u32 valid	: 1,
841 		    no_fault	: 1,
842 		    int_type	: 3,
843 		    rq_eoi	: 1,
844 		    dm		: 1,
845 		    rsvd_1	: 1,
846 		    destination	: 8,
847 		    vector	: 8,
848 		    rsvd_2	: 8;
849 	} fields;
850 };
851 
852 #define APICID_TO_IRTE_DEST_LO(x)    (x & 0xffffff)
853 #define APICID_TO_IRTE_DEST_HI(x)    ((x >> 24) & 0xff)
854 
855 union irte_ga_lo {
856 	u64 val;
857 
858 	/* For int remapping */
859 	struct {
860 		u64 valid	: 1,
861 		    no_fault	: 1,
862 		    /* ------ */
863 		    int_type	: 3,
864 		    rq_eoi	: 1,
865 		    dm		: 1,
866 		    /* ------ */
867 		    guest_mode	: 1,
868 		    destination	: 24,
869 		    ga_tag	: 32;
870 	} fields_remap;
871 
872 	/* For guest vAPIC */
873 	struct {
874 		u64 valid	: 1,
875 		    no_fault	: 1,
876 		    /* ------ */
877 		    ga_log_intr	: 1,
878 		    rsvd1	: 3,
879 		    is_run	: 1,
880 		    /* ------ */
881 		    guest_mode	: 1,
882 		    destination	: 24,
883 		    ga_tag	: 32;
884 	} fields_vapic;
885 };
886 
887 union irte_ga_hi {
888 	u64 val;
889 	struct {
890 		u64 vector	: 8,
891 		    rsvd_1	: 4,
892 		    ga_root_ptr	: 40,
893 		    rsvd_2	: 4,
894 		    destination : 8;
895 	} fields;
896 };
897 
898 struct irte_ga {
899 	union irte_ga_lo lo;
900 	union irte_ga_hi hi;
901 };
902 
903 struct irq_2_irte {
904 	u16 devid; /* Device ID for IRTE table */
905 	u16 index; /* Index into IRTE table*/
906 };
907 
908 struct amd_ir_data {
909 	u32 cached_ga_tag;
910 	struct irq_2_irte irq_2_irte;
911 	struct msi_msg msi_entry;
912 	void *entry;    /* Pointer to union irte or struct irte_ga */
913 	void *ref;      /* Pointer to the actual irte */
914 
915 	/**
916 	 * Store information for activate/de-activate
917 	 * Guest virtual APIC mode during runtime.
918 	 */
919 	struct irq_cfg *cfg;
920 	int ga_vector;
921 	u64 ga_root_ptr;
922 	u32 ga_tag;
923 };
924 
925 struct amd_irte_ops {
926 	void (*prepare)(void *, u32, bool, u8, u32, int);
927 	void (*activate)(void *, u16, u16);
928 	void (*deactivate)(void *, u16, u16);
929 	void (*set_affinity)(void *, u16, u16, u8, u32);
930 	void *(*get)(struct irq_remap_table *, int);
931 	void (*set_allocated)(struct irq_remap_table *, int);
932 	bool (*is_allocated)(struct irq_remap_table *, int);
933 	void (*clear_allocated)(struct irq_remap_table *, int);
934 };
935 
936 #ifdef CONFIG_IRQ_REMAP
937 extern struct amd_irte_ops irte_32_ops;
938 extern struct amd_irte_ops irte_128_ops;
939 #endif
940 
941 #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
942