• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This file is released under the GPLv2.
5  *
6  * Copyright (C) 2006-2008 Intel Corporation
7  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *
9  */
10 
11 #ifndef _IOVA_H_
12 #define _IOVA_H_
13 
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/rbtree.h>
17 #include <linux/atomic.h>
18 #include <linux/dma-mapping.h>
19 
20 /* iova structure */
21 struct iova {
22 	struct rb_node	node;
23 	unsigned long	pfn_hi; /* Highest allocated pfn */
24 	unsigned long	pfn_lo; /* Lowest allocated pfn */
25 };
26 
27 struct iova_magazine;
28 struct iova_cpu_rcache;
29 
30 #define IOVA_RANGE_CACHE_MAX_SIZE 6	/* log of max cached IOVA range size (in pages) */
31 #define MAX_GLOBAL_MAGS 32	/* magazines per bin */
32 
33 struct iova_rcache {
34 	spinlock_t lock;
35 	unsigned long depot_size;
36 	struct iova_magazine *depot[MAX_GLOBAL_MAGS];
37 	struct iova_cpu_rcache __percpu *cpu_rcaches;
38 };
39 
40 struct iova_domain;
41 
42 /* Call-Back from IOVA code into IOMMU drivers */
43 typedef void (* iova_flush_cb)(struct iova_domain *domain);
44 
45 /* Destructor for per-entry data */
46 typedef void (* iova_entry_dtor)(unsigned long data);
47 
48 /* Number of entries per Flush Queue */
49 #define IOVA_FQ_SIZE	256
50 
51 /* Timeout (in ms) after which entries are flushed from the Flush-Queue */
52 #define IOVA_FQ_TIMEOUT	10
53 
54 /* Flush Queue entry for defered flushing */
55 struct iova_fq_entry {
56 	unsigned long iova_pfn;
57 	unsigned long pages;
58 	unsigned long data;
59 	u64 counter; /* Flush counter when this entrie was added */
60 };
61 
62 /* Per-CPU Flush Queue structure */
63 struct iova_fq {
64 	struct iova_fq_entry entries[IOVA_FQ_SIZE];
65 	unsigned head, tail;
66 	spinlock_t lock;
67 };
68 
69 /* holds all the iova translations for a domain */
70 struct iova_domain {
71 	spinlock_t	iova_rbtree_lock; /* Lock to protect update of rbtree */
72 	struct rb_root	rbroot;		/* iova domain rbtree root */
73 	struct rb_node	*cached32_node; /* Save last alloced node */
74 	unsigned long	granule;	/* pfn granularity for this domain */
75 	unsigned long	start_pfn;	/* Lower limit for this domain */
76 	unsigned long	dma_32bit_pfn;
77 	struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE];	/* IOVA range caches */
78 
79 	iova_flush_cb	flush_cb;	/* Call-Back function to flush IOMMU
80 					   TLBs */
81 
82 	iova_entry_dtor entry_dtor;	/* IOMMU driver specific destructor for
83 					   iova entry */
84 
85 	struct iova_fq __percpu *fq;	/* Flush Queue */
86 
87 	atomic64_t	fq_flush_start_cnt;	/* Number of TLB flushes that
88 						   have been started */
89 
90 	atomic64_t	fq_flush_finish_cnt;	/* Number of TLB flushes that
91 						   have been finished */
92 
93 	struct timer_list fq_timer;		/* Timer to regularily empty the
94 						   flush-queues */
95 	atomic_t fq_timer_on;			/* 1 when timer is active, 0
96 						   when not */
97 };
98 
iova_size(struct iova * iova)99 static inline unsigned long iova_size(struct iova *iova)
100 {
101 	return iova->pfn_hi - iova->pfn_lo + 1;
102 }
103 
iova_shift(struct iova_domain * iovad)104 static inline unsigned long iova_shift(struct iova_domain *iovad)
105 {
106 	return __ffs(iovad->granule);
107 }
108 
iova_mask(struct iova_domain * iovad)109 static inline unsigned long iova_mask(struct iova_domain *iovad)
110 {
111 	return iovad->granule - 1;
112 }
113 
iova_offset(struct iova_domain * iovad,dma_addr_t iova)114 static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
115 {
116 	return iova & iova_mask(iovad);
117 }
118 
iova_align(struct iova_domain * iovad,size_t size)119 static inline size_t iova_align(struct iova_domain *iovad, size_t size)
120 {
121 	return ALIGN(size, iovad->granule);
122 }
123 
iova_dma_addr(struct iova_domain * iovad,struct iova * iova)124 static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
125 {
126 	return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
127 }
128 
iova_pfn(struct iova_domain * iovad,dma_addr_t iova)129 static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
130 {
131 	return iova >> iova_shift(iovad);
132 }
133 
134 #if IS_ENABLED(CONFIG_IOMMU_IOVA)
135 int iova_cache_get(void);
136 void iova_cache_put(void);
137 
138 struct iova *alloc_iova_mem(void);
139 void free_iova_mem(struct iova *iova);
140 void free_iova(struct iova_domain *iovad, unsigned long pfn);
141 void __free_iova(struct iova_domain *iovad, struct iova *iova);
142 struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
143 	unsigned long limit_pfn,
144 	bool size_aligned);
145 void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
146 		    unsigned long size);
147 void queue_iova(struct iova_domain *iovad,
148 		unsigned long pfn, unsigned long pages,
149 		unsigned long data);
150 unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
151 			      unsigned long limit_pfn);
152 struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
153 	unsigned long pfn_hi);
154 void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
155 void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
156 	unsigned long start_pfn, unsigned long pfn_32bit);
157 bool has_iova_flush_queue(struct iova_domain *iovad);
158 int init_iova_flush_queue(struct iova_domain *iovad,
159 			  iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
160 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
161 void put_iova_domain(struct iova_domain *iovad);
162 struct iova *split_and_remove_iova(struct iova_domain *iovad,
163 	struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
164 void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
165 #else
iova_cache_get(void)166 static inline int iova_cache_get(void)
167 {
168 	return -ENOTSUPP;
169 }
170 
iova_cache_put(void)171 static inline void iova_cache_put(void)
172 {
173 }
174 
alloc_iova_mem(void)175 static inline struct iova *alloc_iova_mem(void)
176 {
177 	return NULL;
178 }
179 
free_iova_mem(struct iova * iova)180 static inline void free_iova_mem(struct iova *iova)
181 {
182 }
183 
free_iova(struct iova_domain * iovad,unsigned long pfn)184 static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
185 {
186 }
187 
__free_iova(struct iova_domain * iovad,struct iova * iova)188 static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
189 {
190 }
191 
alloc_iova(struct iova_domain * iovad,unsigned long size,unsigned long limit_pfn,bool size_aligned)192 static inline struct iova *alloc_iova(struct iova_domain *iovad,
193 				      unsigned long size,
194 				      unsigned long limit_pfn,
195 				      bool size_aligned)
196 {
197 	return NULL;
198 }
199 
free_iova_fast(struct iova_domain * iovad,unsigned long pfn,unsigned long size)200 static inline void free_iova_fast(struct iova_domain *iovad,
201 				  unsigned long pfn,
202 				  unsigned long size)
203 {
204 }
205 
queue_iova(struct iova_domain * iovad,unsigned long pfn,unsigned long pages,unsigned long data)206 static inline void queue_iova(struct iova_domain *iovad,
207 			      unsigned long pfn, unsigned long pages,
208 			      unsigned long data)
209 {
210 }
211 
alloc_iova_fast(struct iova_domain * iovad,unsigned long size,unsigned long limit_pfn)212 static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
213 					    unsigned long size,
214 					    unsigned long limit_pfn)
215 {
216 	return 0;
217 }
218 
reserve_iova(struct iova_domain * iovad,unsigned long pfn_lo,unsigned long pfn_hi)219 static inline struct iova *reserve_iova(struct iova_domain *iovad,
220 					unsigned long pfn_lo,
221 					unsigned long pfn_hi)
222 {
223 	return NULL;
224 }
225 
copy_reserved_iova(struct iova_domain * from,struct iova_domain * to)226 static inline void copy_reserved_iova(struct iova_domain *from,
227 				      struct iova_domain *to)
228 {
229 }
230 
init_iova_domain(struct iova_domain * iovad,unsigned long granule,unsigned long start_pfn,unsigned long pfn_32bit)231 static inline void init_iova_domain(struct iova_domain *iovad,
232 				    unsigned long granule,
233 				    unsigned long start_pfn,
234 				    unsigned long pfn_32bit)
235 {
236 }
237 
has_iova_flush_queue(struct iova_domain * iovad)238 static inline bool has_iova_flush_queue(struct iova_domain *iovad)
239 {
240 	return false;
241 }
242 
init_iova_flush_queue(struct iova_domain * iovad,iova_flush_cb flush_cb,iova_entry_dtor entry_dtor)243 static inline int init_iova_flush_queue(struct iova_domain *iovad,
244 					iova_flush_cb flush_cb,
245 					iova_entry_dtor entry_dtor)
246 {
247 	return -ENODEV;
248 }
249 
find_iova(struct iova_domain * iovad,unsigned long pfn)250 static inline struct iova *find_iova(struct iova_domain *iovad,
251 				     unsigned long pfn)
252 {
253 	return NULL;
254 }
255 
put_iova_domain(struct iova_domain * iovad)256 static inline void put_iova_domain(struct iova_domain *iovad)
257 {
258 }
259 
split_and_remove_iova(struct iova_domain * iovad,struct iova * iova,unsigned long pfn_lo,unsigned long pfn_hi)260 static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
261 						 struct iova *iova,
262 						 unsigned long pfn_lo,
263 						 unsigned long pfn_hi)
264 {
265 	return NULL;
266 }
267 
free_cpu_cached_iovas(unsigned int cpu,struct iova_domain * iovad)268 static inline void free_cpu_cached_iovas(unsigned int cpu,
269 					 struct iova_domain *iovad)
270 {
271 }
272 #endif
273 
274 #endif
275