• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright © 2006-2015, Intel Corporation.
4  *
5  * Authors: Ashok Raj <ashok.raj@intel.com>
6  *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
7  *          David Woodhouse <David.Woodhouse@intel.com>
8  */
9 
10 #ifndef _INTEL_IOMMU_H_
11 #define _INTEL_IOMMU_H_
12 
13 #include <linux/types.h>
14 #include <linux/iova.h>
15 #include <linux/io.h>
16 #include <linux/idr.h>
17 #include <linux/mmu_notifier.h>
18 #include <linux/list.h>
19 #include <linux/iommu.h>
20 #include <linux/io-64-nonatomic-lo-hi.h>
21 #include <linux/dmar.h>
22 
23 #include <asm/cacheflush.h>
24 #include <asm/iommu.h>
25 
26 /*
27  * VT-d hardware uses 4KiB page size regardless of host page size.
28  */
29 #define VTD_PAGE_SHIFT		(12)
30 #define VTD_PAGE_SIZE		(1UL << VTD_PAGE_SHIFT)
31 #define VTD_PAGE_MASK		(((u64)-1) << VTD_PAGE_SHIFT)
32 #define VTD_PAGE_ALIGN(addr)	(((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
33 
34 #define VTD_STRIDE_SHIFT        (9)
35 #define VTD_STRIDE_MASK         (((u64)-1) << VTD_STRIDE_SHIFT)
36 
37 #define DMA_PTE_READ (1)
38 #define DMA_PTE_WRITE (2)
39 #define DMA_PTE_LARGE_PAGE (1 << 7)
40 #define DMA_PTE_SNP (1 << 11)
41 
42 #define CONTEXT_TT_MULTI_LEVEL	0
43 #define CONTEXT_TT_DEV_IOTLB	1
44 #define CONTEXT_TT_PASS_THROUGH 2
45 #define CONTEXT_PASIDE		BIT_ULL(3)
46 
47 /*
48  * Intel IOMMU register specification per version 1.0 public spec.
49  */
50 #define	DMAR_VER_REG	0x0	/* Arch version supported by this IOMMU */
51 #define	DMAR_CAP_REG	0x8	/* Hardware supported capabilities */
52 #define	DMAR_ECAP_REG	0x10	/* Extended capabilities supported */
53 #define	DMAR_GCMD_REG	0x18	/* Global command register */
54 #define	DMAR_GSTS_REG	0x1c	/* Global status register */
55 #define	DMAR_RTADDR_REG	0x20	/* Root entry table */
56 #define	DMAR_CCMD_REG	0x28	/* Context command reg */
57 #define	DMAR_FSTS_REG	0x34	/* Fault Status register */
58 #define	DMAR_FECTL_REG	0x38	/* Fault control register */
59 #define	DMAR_FEDATA_REG	0x3c	/* Fault event interrupt data register */
60 #define	DMAR_FEADDR_REG	0x40	/* Fault event interrupt addr register */
61 #define	DMAR_FEUADDR_REG 0x44	/* Upper address register */
62 #define	DMAR_AFLOG_REG	0x58	/* Advanced Fault control */
63 #define	DMAR_PMEN_REG	0x64	/* Enable Protected Memory Region */
64 #define	DMAR_PLMBASE_REG 0x68	/* PMRR Low addr */
65 #define	DMAR_PLMLIMIT_REG 0x6c	/* PMRR low limit */
66 #define	DMAR_PHMBASE_REG 0x70	/* pmrr high base addr */
67 #define	DMAR_PHMLIMIT_REG 0x78	/* pmrr high limit */
68 #define DMAR_IQH_REG	0x80	/* Invalidation queue head register */
69 #define DMAR_IQT_REG	0x88	/* Invalidation queue tail register */
70 #define DMAR_IQ_SHIFT	4	/* Invalidation queue head/tail shift */
71 #define DMAR_IQA_REG	0x90	/* Invalidation queue addr register */
72 #define DMAR_ICS_REG	0x9c	/* Invalidation complete status register */
73 #define DMAR_IRTA_REG	0xb8    /* Interrupt remapping table addr register */
74 #define DMAR_PQH_REG	0xc0	/* Page request queue head register */
75 #define DMAR_PQT_REG	0xc8	/* Page request queue tail register */
76 #define DMAR_PQA_REG	0xd0	/* Page request queue address register */
77 #define DMAR_PRS_REG	0xdc	/* Page request status register */
78 #define DMAR_PECTL_REG	0xe0	/* Page request event control register */
79 #define	DMAR_PEDATA_REG	0xe4	/* Page request event interrupt data register */
80 #define	DMAR_PEADDR_REG	0xe8	/* Page request event interrupt addr register */
81 #define	DMAR_PEUADDR_REG 0xec	/* Page request event Upper address register */
82 #define DMAR_MTRRCAP_REG 0x100	/* MTRR capability register */
83 #define DMAR_MTRRDEF_REG 0x108	/* MTRR default type register */
84 #define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */
85 #define DMAR_MTRR_FIX16K_80000_REG 0x128
86 #define DMAR_MTRR_FIX16K_A0000_REG 0x130
87 #define DMAR_MTRR_FIX4K_C0000_REG 0x138
88 #define DMAR_MTRR_FIX4K_C8000_REG 0x140
89 #define DMAR_MTRR_FIX4K_D0000_REG 0x148
90 #define DMAR_MTRR_FIX4K_D8000_REG 0x150
91 #define DMAR_MTRR_FIX4K_E0000_REG 0x158
92 #define DMAR_MTRR_FIX4K_E8000_REG 0x160
93 #define DMAR_MTRR_FIX4K_F0000_REG 0x168
94 #define DMAR_MTRR_FIX4K_F8000_REG 0x170
95 #define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */
96 #define DMAR_MTRR_PHYSMASK0_REG 0x188
97 #define DMAR_MTRR_PHYSBASE1_REG 0x190
98 #define DMAR_MTRR_PHYSMASK1_REG 0x198
99 #define DMAR_MTRR_PHYSBASE2_REG 0x1a0
100 #define DMAR_MTRR_PHYSMASK2_REG 0x1a8
101 #define DMAR_MTRR_PHYSBASE3_REG 0x1b0
102 #define DMAR_MTRR_PHYSMASK3_REG 0x1b8
103 #define DMAR_MTRR_PHYSBASE4_REG 0x1c0
104 #define DMAR_MTRR_PHYSMASK4_REG 0x1c8
105 #define DMAR_MTRR_PHYSBASE5_REG 0x1d0
106 #define DMAR_MTRR_PHYSMASK5_REG 0x1d8
107 #define DMAR_MTRR_PHYSBASE6_REG 0x1e0
108 #define DMAR_MTRR_PHYSMASK6_REG 0x1e8
109 #define DMAR_MTRR_PHYSBASE7_REG 0x1f0
110 #define DMAR_MTRR_PHYSMASK7_REG 0x1f8
111 #define DMAR_MTRR_PHYSBASE8_REG 0x200
112 #define DMAR_MTRR_PHYSMASK8_REG 0x208
113 #define DMAR_MTRR_PHYSBASE9_REG 0x210
114 #define DMAR_MTRR_PHYSMASK9_REG 0x218
115 #define DMAR_VCCAP_REG		0xe00 /* Virtual command capability register */
116 #define DMAR_VCMD_REG		0xe10 /* Virtual command register */
117 #define DMAR_VCRSP_REG		0xe20 /* Virtual command response register */
118 
119 #define OFFSET_STRIDE		(9)
120 
121 #define dmar_readq(a) readq(a)
122 #define dmar_writeq(a,v) writeq(v,a)
123 #define dmar_readl(a) readl(a)
124 #define dmar_writel(a, v) writel(v, a)
125 
126 #define DMAR_VER_MAJOR(v)		(((v) & 0xf0) >> 4)
127 #define DMAR_VER_MINOR(v)		((v) & 0x0f)
128 
129 /*
130  * Decoding Capability Register
131  */
132 #define cap_5lp_support(c)	(((c) >> 60) & 1)
133 #define cap_pi_support(c)	(((c) >> 59) & 1)
134 #define cap_fl1gp_support(c)	(((c) >> 56) & 1)
135 #define cap_read_drain(c)	(((c) >> 55) & 1)
136 #define cap_write_drain(c)	(((c) >> 54) & 1)
137 #define cap_max_amask_val(c)	(((c) >> 48) & 0x3f)
138 #define cap_num_fault_regs(c)	((((c) >> 40) & 0xff) + 1)
139 #define cap_pgsel_inv(c)	(((c) >> 39) & 1)
140 
141 #define cap_super_page_val(c)	(((c) >> 34) & 0xf)
142 #define cap_super_offset(c)	(((find_first_bit(&cap_super_page_val(c), 4)) \
143 					* OFFSET_STRIDE) + 21)
144 
145 #define cap_fault_reg_offset(c)	((((c) >> 24) & 0x3ff) * 16)
146 #define cap_max_fault_reg_offset(c) \
147 	(cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
148 
149 #define cap_zlr(c)		(((c) >> 22) & 1)
150 #define cap_isoch(c)		(((c) >> 23) & 1)
151 #define cap_mgaw(c)		((((c) >> 16) & 0x3f) + 1)
152 #define cap_sagaw(c)		(((c) >> 8) & 0x1f)
153 #define cap_caching_mode(c)	(((c) >> 7) & 1)
154 #define cap_phmr(c)		(((c) >> 6) & 1)
155 #define cap_plmr(c)		(((c) >> 5) & 1)
156 #define cap_rwbf(c)		(((c) >> 4) & 1)
157 #define cap_afl(c)		(((c) >> 3) & 1)
158 #define cap_ndoms(c)		(((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
159 /*
160  * Extended Capability Register
161  */
162 
163 #define ecap_smpwc(e)		(((e) >> 48) & 0x1)
164 #define ecap_flts(e)		(((e) >> 47) & 0x1)
165 #define ecap_slts(e)		(((e) >> 46) & 0x1)
166 #define ecap_smts(e)		(((e) >> 43) & 0x1)
167 #define ecap_dit(e)		((e >> 41) & 0x1)
168 #define ecap_pasid(e)		((e >> 40) & 0x1)
169 #define ecap_pss(e)		((e >> 35) & 0x1f)
170 #define ecap_eafs(e)		((e >> 34) & 0x1)
171 #define ecap_nwfs(e)		((e >> 33) & 0x1)
172 #define ecap_srs(e)		((e >> 31) & 0x1)
173 #define ecap_ers(e)		((e >> 30) & 0x1)
174 #define ecap_prs(e)		((e >> 29) & 0x1)
175 #define ecap_broken_pasid(e)	((e >> 28) & 0x1)
176 #define ecap_dis(e)		((e >> 27) & 0x1)
177 #define ecap_nest(e)		((e >> 26) & 0x1)
178 #define ecap_mts(e)		((e >> 25) & 0x1)
179 #define ecap_ecs(e)		((e >> 24) & 0x1)
180 #define ecap_iotlb_offset(e) 	((((e) >> 8) & 0x3ff) * 16)
181 #define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
182 #define ecap_coherent(e)	((e) & 0x1)
183 #define ecap_qis(e)		((e) & 0x2)
184 #define ecap_pass_through(e)	((e >> 6) & 0x1)
185 #define ecap_eim_support(e)	((e >> 4) & 0x1)
186 #define ecap_ir_support(e)	((e >> 3) & 0x1)
187 #define ecap_dev_iotlb_support(e)	(((e) >> 2) & 0x1)
188 #define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
189 #define ecap_sc_support(e)	((e >> 7) & 0x1) /* Snooping Control */
190 
191 /* IOTLB_REG */
192 #define DMA_TLB_FLUSH_GRANU_OFFSET  60
193 #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
194 #define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
195 #define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
196 #define DMA_TLB_IIRG(type) ((type >> 60) & 3)
197 #define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
198 #define DMA_TLB_READ_DRAIN (((u64)1) << 49)
199 #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
200 #define DMA_TLB_DID(id)	(((u64)((id) & 0xffff)) << 32)
201 #define DMA_TLB_IVT (((u64)1) << 63)
202 #define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
203 #define DMA_TLB_MAX_SIZE (0x3f)
204 
205 /* INVALID_DESC */
206 #define DMA_CCMD_INVL_GRANU_OFFSET  61
207 #define DMA_ID_TLB_GLOBAL_FLUSH	(((u64)1) << 4)
208 #define DMA_ID_TLB_DSI_FLUSH	(((u64)2) << 4)
209 #define DMA_ID_TLB_PSI_FLUSH	(((u64)3) << 4)
210 #define DMA_ID_TLB_READ_DRAIN	(((u64)1) << 7)
211 #define DMA_ID_TLB_WRITE_DRAIN	(((u64)1) << 6)
212 #define DMA_ID_TLB_DID(id)	(((u64)((id & 0xffff) << 16)))
213 #define DMA_ID_TLB_IH_NONLEAF	(((u64)1) << 6)
214 #define DMA_ID_TLB_ADDR(addr)	(addr)
215 #define DMA_ID_TLB_ADDR_MASK(mask)	(mask)
216 
217 /* PMEN_REG */
218 #define DMA_PMEN_EPM (((u32)1)<<31)
219 #define DMA_PMEN_PRS (((u32)1)<<0)
220 
221 /* GCMD_REG */
222 #define DMA_GCMD_TE (((u32)1) << 31)
223 #define DMA_GCMD_SRTP (((u32)1) << 30)
224 #define DMA_GCMD_SFL (((u32)1) << 29)
225 #define DMA_GCMD_EAFL (((u32)1) << 28)
226 #define DMA_GCMD_WBF (((u32)1) << 27)
227 #define DMA_GCMD_QIE (((u32)1) << 26)
228 #define DMA_GCMD_SIRTP (((u32)1) << 24)
229 #define DMA_GCMD_IRE (((u32) 1) << 25)
230 #define DMA_GCMD_CFI (((u32) 1) << 23)
231 
232 /* GSTS_REG */
233 #define DMA_GSTS_TES (((u32)1) << 31)
234 #define DMA_GSTS_RTPS (((u32)1) << 30)
235 #define DMA_GSTS_FLS (((u32)1) << 29)
236 #define DMA_GSTS_AFLS (((u32)1) << 28)
237 #define DMA_GSTS_WBFS (((u32)1) << 27)
238 #define DMA_GSTS_QIES (((u32)1) << 26)
239 #define DMA_GSTS_IRTPS (((u32)1) << 24)
240 #define DMA_GSTS_IRES (((u32)1) << 25)
241 #define DMA_GSTS_CFIS (((u32)1) << 23)
242 
243 /* DMA_RTADDR_REG */
244 #define DMA_RTADDR_RTT (((u64)1) << 11)
245 #define DMA_RTADDR_SMT (((u64)1) << 10)
246 
247 /* CCMD_REG */
248 #define DMA_CCMD_ICC (((u64)1) << 63)
249 #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
250 #define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
251 #define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
252 #define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
253 #define DMA_CCMD_MASK_NOBIT 0
254 #define DMA_CCMD_MASK_1BIT 1
255 #define DMA_CCMD_MASK_2BIT 2
256 #define DMA_CCMD_MASK_3BIT 3
257 #define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
258 #define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
259 
260 /* FECTL_REG */
261 #define DMA_FECTL_IM (((u32)1) << 31)
262 
263 /* FSTS_REG */
264 #define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */
265 #define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */
266 #define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */
267 #define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */
268 #define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */
269 #define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */
270 #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
271 
272 /* FRCD_REG, 32 bits access */
273 #define DMA_FRCD_F (((u32)1) << 31)
274 #define dma_frcd_type(d) ((d >> 30) & 1)
275 #define dma_frcd_fault_reason(c) (c & 0xff)
276 #define dma_frcd_source_id(c) (c & 0xffff)
277 #define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff)
278 #define dma_frcd_pasid_present(c) (((c) >> 31) & 1)
279 /* low 64 bit */
280 #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
281 
282 /* PRS_REG */
283 #define DMA_PRS_PPR	((u32)1)
284 
285 #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts)			\
286 do {									\
287 	cycles_t start_time = get_cycles();				\
288 	while (1) {							\
289 		sts = op(iommu->reg + offset);				\
290 		if (cond)						\
291 			break;						\
292 		if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
293 			panic("DMAR hardware is malfunctioning\n");	\
294 		cpu_relax();						\
295 	}								\
296 } while (0)
297 
298 #define QI_LENGTH	256	/* queue length */
299 
300 enum {
301 	QI_FREE,
302 	QI_IN_USE,
303 	QI_DONE,
304 	QI_ABORT
305 };
306 
307 #define QI_CC_TYPE		0x1
308 #define QI_IOTLB_TYPE		0x2
309 #define QI_DIOTLB_TYPE		0x3
310 #define QI_IEC_TYPE		0x4
311 #define QI_IWD_TYPE		0x5
312 #define QI_EIOTLB_TYPE		0x6
313 #define QI_PC_TYPE		0x7
314 #define QI_DEIOTLB_TYPE		0x8
315 #define QI_PGRP_RESP_TYPE	0x9
316 #define QI_PSTRM_RESP_TYPE	0xa
317 
318 #define QI_IEC_SELECTIVE	(((u64)1) << 4)
319 #define QI_IEC_IIDEX(idx)	(((u64)(idx & 0xffff) << 32))
320 #define QI_IEC_IM(m)		(((u64)(m & 0x1f) << 27))
321 
322 #define QI_IWD_STATUS_DATA(d)	(((u64)d) << 32)
323 #define QI_IWD_STATUS_WRITE	(((u64)1) << 5)
324 
325 #define QI_IOTLB_DID(did) 	(((u64)did) << 16)
326 #define QI_IOTLB_DR(dr) 	(((u64)dr) << 7)
327 #define QI_IOTLB_DW(dw) 	(((u64)dw) << 6)
328 #define QI_IOTLB_GRAN(gran) 	(((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
329 #define QI_IOTLB_ADDR(addr)	(((u64)addr) & VTD_PAGE_MASK)
330 #define QI_IOTLB_IH(ih)		(((u64)ih) << 6)
331 #define QI_IOTLB_AM(am)		(((u8)am))
332 
333 #define QI_CC_FM(fm)		(((u64)fm) << 48)
334 #define QI_CC_SID(sid)		(((u64)sid) << 32)
335 #define QI_CC_DID(did)		(((u64)did) << 16)
336 #define QI_CC_GRAN(gran)	(((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
337 
338 #define QI_DEV_IOTLB_SID(sid)	((u64)((sid) & 0xffff) << 32)
339 #define QI_DEV_IOTLB_QDEP(qdep)	(((qdep) & 0x1f) << 16)
340 #define QI_DEV_IOTLB_ADDR(addr)	((u64)(addr) & VTD_PAGE_MASK)
341 #define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
342 				   ((u64)((pfsid >> 4) & 0xfff) << 52))
343 #define QI_DEV_IOTLB_SIZE	1
344 #define QI_DEV_IOTLB_MAX_INVS	32
345 
346 #define QI_PC_PASID(pasid)	(((u64)pasid) << 32)
347 #define QI_PC_DID(did)		(((u64)did) << 16)
348 #define QI_PC_GRAN(gran)	(((u64)gran) << 4)
349 
350 #define QI_PC_ALL_PASIDS	(QI_PC_TYPE | QI_PC_GRAN(0))
351 #define QI_PC_PASID_SEL		(QI_PC_TYPE | QI_PC_GRAN(1))
352 
353 #define QI_EIOTLB_ADDR(addr)	((u64)(addr) & VTD_PAGE_MASK)
354 #define QI_EIOTLB_IH(ih)	(((u64)ih) << 6)
355 #define QI_EIOTLB_AM(am)	(((u64)am))
356 #define QI_EIOTLB_PASID(pasid) 	(((u64)pasid) << 32)
357 #define QI_EIOTLB_DID(did)	(((u64)did) << 16)
358 #define QI_EIOTLB_GRAN(gran) 	(((u64)gran) << 4)
359 
360 #define QI_DEV_EIOTLB_ADDR(a)	((u64)(a) & VTD_PAGE_MASK)
361 #define QI_DEV_EIOTLB_SIZE	(((u64)1) << 11)
362 #define QI_DEV_EIOTLB_GLOB(g)	((u64)(g) & 0x1)
363 #define QI_DEV_EIOTLB_PASID(p)	((u64)((p) & 0xfffff) << 32)
364 #define QI_DEV_EIOTLB_SID(sid)	((u64)((sid) & 0xffff) << 16)
365 #define QI_DEV_EIOTLB_QDEP(qd)	((u64)((qd) & 0x1f) << 4)
366 #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
367 				    ((u64)((pfsid >> 4) & 0xfff) << 52))
368 #define QI_DEV_EIOTLB_MAX_INVS	32
369 
370 /* Page group response descriptor QW0 */
371 #define QI_PGRP_PASID_P(p)	(((u64)(p)) << 4)
372 #define QI_PGRP_PDP(p)		(((u64)(p)) << 5)
373 #define QI_PGRP_RESP_CODE(res)	(((u64)(res)) << 12)
374 #define QI_PGRP_DID(rid)	(((u64)(rid)) << 16)
375 #define QI_PGRP_PASID(pasid)	(((u64)(pasid)) << 32)
376 
377 /* Page group response descriptor QW1 */
378 #define QI_PGRP_LPIG(x)		(((u64)(x)) << 2)
379 #define QI_PGRP_IDX(idx)	(((u64)(idx)) << 3)
380 
381 
382 #define QI_RESP_SUCCESS		0x0
383 #define QI_RESP_INVALID		0x1
384 #define QI_RESP_FAILURE		0xf
385 
386 #define QI_GRAN_NONG_PASID		2
387 #define QI_GRAN_PSI_PASID		3
388 
389 #define qi_shift(iommu)		(DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap))
390 
391 struct qi_desc {
392 	u64 qw0;
393 	u64 qw1;
394 	u64 qw2;
395 	u64 qw3;
396 };
397 
398 struct q_inval {
399 	raw_spinlock_t  q_lock;
400 	void		*desc;          /* invalidation queue */
401 	int             *desc_status;   /* desc status */
402 	int             free_head;      /* first free entry */
403 	int             free_tail;      /* last free entry */
404 	int             free_cnt;
405 };
406 
407 #ifdef CONFIG_IRQ_REMAP
408 /* 1MB - maximum possible interrupt remapping table size */
409 #define INTR_REMAP_PAGE_ORDER	8
410 #define INTR_REMAP_TABLE_REG_SIZE	0xf
411 #define INTR_REMAP_TABLE_REG_SIZE_MASK  0xf
412 
413 #define INTR_REMAP_TABLE_ENTRIES	65536
414 
415 struct irq_domain;
416 
417 struct ir_table {
418 	struct irte *base;
419 	unsigned long *bitmap;
420 };
421 #endif
422 
423 struct iommu_flush {
424 	void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
425 			      u8 fm, u64 type);
426 	void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
427 			    unsigned int size_order, u64 type);
428 };
429 
430 enum {
431 	SR_DMAR_FECTL_REG,
432 	SR_DMAR_FEDATA_REG,
433 	SR_DMAR_FEADDR_REG,
434 	SR_DMAR_FEUADDR_REG,
435 	MAX_SR_DMAR_REGS
436 };
437 
438 #define VTD_FLAG_TRANS_PRE_ENABLED	(1 << 0)
439 #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED	(1 << 1)
440 
441 extern int intel_iommu_sm;
442 
443 #define sm_supported(iommu)	(intel_iommu_sm && ecap_smts((iommu)->ecap))
444 #define pasid_supported(iommu)	(sm_supported(iommu) &&			\
445 				 ecap_pasid((iommu)->ecap))
446 
447 struct pasid_entry;
448 struct pasid_state_entry;
449 struct page_req_dsc;
450 
451 /*
452  * 0: Present
453  * 1-11: Reserved
454  * 12-63: Context Ptr (12 - (haw-1))
455  * 64-127: Reserved
456  */
457 struct root_entry {
458 	u64     lo;
459 	u64     hi;
460 };
461 
462 /*
463  * low 64 bits:
464  * 0: present
465  * 1: fault processing disable
466  * 2-3: translation type
467  * 12-63: address space root
468  * high 64 bits:
469  * 0-2: address width
470  * 3-6: aval
471  * 8-23: domain id
472  */
473 struct context_entry {
474 	u64 lo;
475 	u64 hi;
476 };
477 
478 struct dmar_domain {
479 	int	nid;			/* node id */
480 
481 	unsigned	iommu_refcnt[DMAR_UNITS_SUPPORTED];
482 					/* Refcount of devices per iommu */
483 
484 
485 	u16		iommu_did[DMAR_UNITS_SUPPORTED];
486 					/* Domain ids per IOMMU. Use u16 since
487 					 * domain ids are 16 bit wide according
488 					 * to VT-d spec, section 9.3 */
489 	unsigned int	auxd_refcnt;	/* Refcount of auxiliary attaching */
490 
491 	bool has_iotlb_device;
492 	struct list_head devices;	/* all devices' list */
493 	struct list_head auxd;		/* link to device's auxiliary list */
494 	struct iova_domain iovad;	/* iova's that belong to this domain */
495 
496 	struct dma_pte	*pgd;		/* virtual address */
497 	int		gaw;		/* max guest address width */
498 
499 	/* adjusted guest address width, 0 is level 2 30-bit */
500 	int		agaw;
501 
502 	int		flags;		/* flags to find out type of domain */
503 
504 	int		iommu_coherency;/* indicate coherency of iommu access */
505 	int		iommu_snooping; /* indicate snooping control feature*/
506 	int		iommu_count;	/* reference count of iommu */
507 	int		iommu_superpage;/* Level of superpages supported:
508 					   0 == 4KiB (no superpages), 1 == 2MiB,
509 					   2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
510 	u64		max_addr;	/* maximum mapped address */
511 
512 	int		default_pasid;	/*
513 					 * The default pasid used for non-SVM
514 					 * traffic on mediated devices.
515 					 */
516 
517 	struct iommu_domain domain;	/* generic domain data structure for
518 					   iommu core */
519 };
520 
521 struct intel_iommu {
522 	void __iomem	*reg; /* Pointer to hardware regs, virtual addr */
523 	u64 		reg_phys; /* physical address of hw register set */
524 	u64		reg_size; /* size of hw register set */
525 	u64		cap;
526 	u64		ecap;
527 	u32		gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
528 	raw_spinlock_t	register_lock; /* protect register handling */
529 	int		seq_id;	/* sequence id of the iommu */
530 	int		agaw; /* agaw of this iommu */
531 	int		msagaw; /* max sagaw of this iommu */
532 	unsigned int 	irq, pr_irq;
533 	u16		segment;     /* PCI segment# */
534 	unsigned char 	name[13];    /* Device Name */
535 
536 #ifdef CONFIG_INTEL_IOMMU
537 	unsigned long 	*domain_ids; /* bitmap of domains */
538 	struct dmar_domain ***domains; /* ptr to domains */
539 	spinlock_t	lock; /* protect context, domain ids */
540 	struct root_entry *root_entry; /* virtual address */
541 
542 	struct iommu_flush flush;
543 #endif
544 #ifdef CONFIG_INTEL_IOMMU_SVM
545 	struct page_req_dsc *prq;
546 	unsigned char prq_name[16];    /* Name for PRQ interrupt */
547 #endif
548 	struct q_inval  *qi;            /* Queued invalidation info */
549 	u32 *iommu_state; /* Store iommu states between suspend and resume.*/
550 
551 #ifdef CONFIG_IRQ_REMAP
552 	struct ir_table *ir_table;	/* Interrupt remapping info */
553 	struct irq_domain *ir_domain;
554 	struct irq_domain *ir_msi_domain;
555 #endif
556 	struct iommu_device iommu;  /* IOMMU core code handle */
557 	int		node;
558 	u32		flags;      /* Software defined flags */
559 
560 	struct dmar_drhd_unit *drhd;
561 };
562 
563 /* PCI domain-device relationship */
564 struct device_domain_info {
565 	struct list_head link;	/* link to domain siblings */
566 	struct list_head global; /* link to global list */
567 	struct list_head table;	/* link to pasid table */
568 	struct list_head auxiliary_domains; /* auxiliary domains
569 					     * attached to this device
570 					     */
571 	u8 bus;			/* PCI bus number */
572 	u8 devfn;		/* PCI devfn number */
573 	u16 pfsid;		/* SRIOV physical function source ID */
574 	u8 pasid_supported:3;
575 	u8 pasid_enabled:1;
576 	u8 pri_supported:1;
577 	u8 pri_enabled:1;
578 	u8 ats_supported:1;
579 	u8 ats_enabled:1;
580 	u8 auxd_enabled:1;	/* Multiple domains per device */
581 	u8 ats_qdep;
582 	struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
583 	struct intel_iommu *iommu; /* IOMMU used by this device */
584 	struct dmar_domain *domain; /* pointer to domain */
585 	struct pasid_table *pasid_table; /* pasid table */
586 };
587 
__iommu_flush_cache(struct intel_iommu * iommu,void * addr,int size)588 static inline void __iommu_flush_cache(
589 	struct intel_iommu *iommu, void *addr, int size)
590 {
591 	if (!ecap_coherent(iommu->ecap))
592 		clflush_cache_range(addr, size);
593 }
594 
595 /*
596  * 0: readable
597  * 1: writable
598  * 2-6: reserved
599  * 7: super page
600  * 8-10: available
601  * 11: snoop behavior
602  * 12-63: Host physcial address
603  */
604 struct dma_pte {
605 	u64 val;
606 };
607 
dma_clear_pte(struct dma_pte * pte)608 static inline void dma_clear_pte(struct dma_pte *pte)
609 {
610 	pte->val = 0;
611 }
612 
dma_pte_addr(struct dma_pte * pte)613 static inline u64 dma_pte_addr(struct dma_pte *pte)
614 {
615 #ifdef CONFIG_64BIT
616 	return pte->val & VTD_PAGE_MASK;
617 #else
618 	/* Must have a full atomic 64-bit read */
619 	return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
620 #endif
621 }
622 
dma_pte_present(struct dma_pte * pte)623 static inline bool dma_pte_present(struct dma_pte *pte)
624 {
625 	return (pte->val & 3) != 0;
626 }
627 
dma_pte_superpage(struct dma_pte * pte)628 static inline bool dma_pte_superpage(struct dma_pte *pte)
629 {
630 	return (pte->val & DMA_PTE_LARGE_PAGE);
631 }
632 
first_pte_in_page(struct dma_pte * pte)633 static inline int first_pte_in_page(struct dma_pte *pte)
634 {
635 	return !((unsigned long)pte & ~VTD_PAGE_MASK);
636 }
637 
638 extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
639 extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
640 
641 extern int dmar_enable_qi(struct intel_iommu *iommu);
642 extern void dmar_disable_qi(struct intel_iommu *iommu);
643 extern int dmar_reenable_qi(struct intel_iommu *iommu);
644 extern void qi_global_iec(struct intel_iommu *iommu);
645 
646 extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
647 			     u8 fm, u64 type);
648 extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
649 			  unsigned int size_order, u64 type);
650 extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
651 			u16 qdep, u64 addr, unsigned mask);
652 extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
653 
654 extern int dmar_ir_support(void);
655 
656 void *alloc_pgtable_page(int node);
657 void free_pgtable_page(void *vaddr);
658 struct intel_iommu *domain_get_iommu(struct dmar_domain *domain);
659 int for_each_device_domain(int (*fn)(struct device_domain_info *info,
660 				     void *data), void *data);
661 void iommu_flush_write_buffer(struct intel_iommu *iommu);
662 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
663 
664 #ifdef CONFIG_INTEL_IOMMU_SVM
665 int intel_svm_init(struct intel_iommu *iommu);
666 extern int intel_svm_enable_prq(struct intel_iommu *iommu);
667 extern int intel_svm_finish_prq(struct intel_iommu *iommu);
668 
669 struct svm_dev_ops;
670 
671 struct intel_svm_dev {
672 	struct list_head list;
673 	struct rcu_head rcu;
674 	struct device *dev;
675 	struct svm_dev_ops *ops;
676 	int users;
677 	u16 did;
678 	u16 dev_iotlb:1;
679 	u16 sid, qdep;
680 };
681 
682 struct intel_svm {
683 	struct mmu_notifier notifier;
684 	struct mm_struct *mm;
685 	struct intel_iommu *iommu;
686 	int flags;
687 	int pasid;
688 	struct list_head devs;
689 	struct list_head list;
690 };
691 
692 extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
693 #endif
694 
695 #ifdef CONFIG_INTEL_IOMMU_DEBUGFS
696 void intel_iommu_debugfs_init(void);
697 #else
intel_iommu_debugfs_init(void)698 static inline void intel_iommu_debugfs_init(void) {}
699 #endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
700 
701 extern const struct attribute_group *intel_iommu_groups[];
702 bool context_present(struct context_entry *context);
703 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
704 					 u8 devfn, int alloc);
705 
706 #ifdef CONFIG_INTEL_IOMMU
707 extern int iommu_calculate_agaw(struct intel_iommu *iommu);
708 extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
709 extern int dmar_disabled;
710 extern int intel_iommu_enabled;
711 extern int intel_iommu_gfx_mapped;
712 #else
iommu_calculate_agaw(struct intel_iommu * iommu)713 static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
714 {
715 	return 0;
716 }
iommu_calculate_max_sagaw(struct intel_iommu * iommu)717 static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
718 {
719 	return 0;
720 }
721 #define dmar_disabled	(1)
722 #define intel_iommu_enabled (0)
723 #endif
724 
725 #endif
726