1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (c) 2006, Intel Corporation.
4 *
5 * Copyright (C) 2006-2008 Intel Corporation
6 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
7 */
8
9 #ifndef _IOVA_H_
10 #define _IOVA_H_
11
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/rbtree.h>
15 #include <linux/atomic.h>
16 #include <linux/dma-mapping.h>
17
18 /* iova structure */
19 struct iova {
20 struct rb_node node;
21 unsigned long pfn_hi; /* Highest allocated pfn */
22 unsigned long pfn_lo; /* Lowest allocated pfn */
23 };
24
25 struct iova_magazine;
26 struct iova_cpu_rcache;
27
28 #define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
29 #define MAX_GLOBAL_MAGS 32 /* magazines per bin */
30
31 struct iova_rcache {
32 spinlock_t lock;
33 unsigned long depot_size;
34 struct iova_magazine *depot[MAX_GLOBAL_MAGS];
35 struct iova_cpu_rcache __percpu *cpu_rcaches;
36 };
37
38 struct iova_domain;
39
40 /* Call-Back from IOVA code into IOMMU drivers */
41 typedef void (* iova_flush_cb)(struct iova_domain *domain);
42
43 /* Destructor for per-entry data */
44 typedef void (* iova_entry_dtor)(unsigned long data);
45
46 /* Number of entries per Flush Queue */
47 #define IOVA_FQ_SIZE 256
48
49 /* Timeout (in ms) after which entries are flushed from the Flush-Queue */
50 #define IOVA_FQ_TIMEOUT 10
51
52 /* Flush Queue entry for defered flushing */
53 struct iova_fq_entry {
54 unsigned long iova_pfn;
55 unsigned long pages;
56 unsigned long data;
57 u64 counter; /* Flush counter when this entrie was added */
58 };
59
60 /* Per-CPU Flush Queue structure */
61 struct iova_fq {
62 struct iova_fq_entry entries[IOVA_FQ_SIZE];
63 unsigned head, tail;
64 spinlock_t lock;
65 };
66
67 /* holds all the iova translations for a domain */
68 struct iova_domain {
69 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
70 struct rb_root rbroot; /* iova domain rbtree root */
71 struct rb_node *cached_node; /* Save last alloced node */
72 struct rb_node *cached32_node; /* Save last 32-bit alloced node */
73 unsigned long granule; /* pfn granularity for this domain */
74 unsigned long start_pfn; /* Lower limit for this domain */
75 unsigned long dma_32bit_pfn;
76 unsigned long max32_alloc_size; /* Size of last failed allocation */
77 struct iova_fq __percpu *fq; /* Flush Queue */
78
79 atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
80 have been started */
81
82 atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
83 have been finished */
84
85 struct iova anchor; /* rbtree lookup anchor */
86 struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
87
88 iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
89 TLBs */
90
91 iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
92 iova entry */
93
94 struct timer_list fq_timer; /* Timer to regularily empty the
95 flush-queues */
96 atomic_t fq_timer_on; /* 1 when timer is active, 0
97 when not */
98 struct hlist_node cpuhp_dead;
99 bool best_fit;
100 };
101
iova_size(struct iova * iova)102 static inline unsigned long iova_size(struct iova *iova)
103 {
104 return iova->pfn_hi - iova->pfn_lo + 1;
105 }
106
iova_shift(struct iova_domain * iovad)107 static inline unsigned long iova_shift(struct iova_domain *iovad)
108 {
109 return __ffs(iovad->granule);
110 }
111
iova_mask(struct iova_domain * iovad)112 static inline unsigned long iova_mask(struct iova_domain *iovad)
113 {
114 return iovad->granule - 1;
115 }
116
iova_offset(struct iova_domain * iovad,dma_addr_t iova)117 static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
118 {
119 return iova & iova_mask(iovad);
120 }
121
iova_align(struct iova_domain * iovad,size_t size)122 static inline size_t iova_align(struct iova_domain *iovad, size_t size)
123 {
124 return ALIGN(size, iovad->granule);
125 }
126
iova_dma_addr(struct iova_domain * iovad,struct iova * iova)127 static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
128 {
129 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
130 }
131
iova_pfn(struct iova_domain * iovad,dma_addr_t iova)132 static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
133 {
134 return iova >> iova_shift(iovad);
135 }
136
137 #if IS_REACHABLE(CONFIG_IOMMU_IOVA)
138 int iova_cache_get(void);
139 void iova_cache_put(void);
140
141 void free_iova(struct iova_domain *iovad, unsigned long pfn);
142 void __free_iova(struct iova_domain *iovad, struct iova *iova);
143 struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
144 unsigned long limit_pfn,
145 bool size_aligned);
146 void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
147 unsigned long size);
148 void queue_iova(struct iova_domain *iovad,
149 unsigned long pfn, unsigned long pages,
150 unsigned long data);
151 unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
152 unsigned long limit_pfn, bool flush_rcache);
153 struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
154 unsigned long pfn_hi);
155 void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
156 unsigned long start_pfn);
157 int init_iova_flush_queue(struct iova_domain *iovad,
158 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
159 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
160 void put_iova_domain(struct iova_domain *iovad);
161 #else
iova_cache_get(void)162 static inline int iova_cache_get(void)
163 {
164 return -ENOTSUPP;
165 }
166
iova_cache_put(void)167 static inline void iova_cache_put(void)
168 {
169 }
170
free_iova(struct iova_domain * iovad,unsigned long pfn)171 static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
172 {
173 }
174
__free_iova(struct iova_domain * iovad,struct iova * iova)175 static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
176 {
177 }
178
alloc_iova(struct iova_domain * iovad,unsigned long size,unsigned long limit_pfn,bool size_aligned)179 static inline struct iova *alloc_iova(struct iova_domain *iovad,
180 unsigned long size,
181 unsigned long limit_pfn,
182 bool size_aligned)
183 {
184 return NULL;
185 }
186
free_iova_fast(struct iova_domain * iovad,unsigned long pfn,unsigned long size)187 static inline void free_iova_fast(struct iova_domain *iovad,
188 unsigned long pfn,
189 unsigned long size)
190 {
191 }
192
queue_iova(struct iova_domain * iovad,unsigned long pfn,unsigned long pages,unsigned long data)193 static inline void queue_iova(struct iova_domain *iovad,
194 unsigned long pfn, unsigned long pages,
195 unsigned long data)
196 {
197 }
198
alloc_iova_fast(struct iova_domain * iovad,unsigned long size,unsigned long limit_pfn,bool flush_rcache)199 static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
200 unsigned long size,
201 unsigned long limit_pfn,
202 bool flush_rcache)
203 {
204 return 0;
205 }
206
reserve_iova(struct iova_domain * iovad,unsigned long pfn_lo,unsigned long pfn_hi)207 static inline struct iova *reserve_iova(struct iova_domain *iovad,
208 unsigned long pfn_lo,
209 unsigned long pfn_hi)
210 {
211 return NULL;
212 }
213
init_iova_domain(struct iova_domain * iovad,unsigned long granule,unsigned long start_pfn)214 static inline void init_iova_domain(struct iova_domain *iovad,
215 unsigned long granule,
216 unsigned long start_pfn)
217 {
218 }
219
init_iova_flush_queue(struct iova_domain * iovad,iova_flush_cb flush_cb,iova_entry_dtor entry_dtor)220 static inline int init_iova_flush_queue(struct iova_domain *iovad,
221 iova_flush_cb flush_cb,
222 iova_entry_dtor entry_dtor)
223 {
224 return -ENODEV;
225 }
226
find_iova(struct iova_domain * iovad,unsigned long pfn)227 static inline struct iova *find_iova(struct iova_domain *iovad,
228 unsigned long pfn)
229 {
230 return NULL;
231 }
232
put_iova_domain(struct iova_domain * iovad)233 static inline void put_iova_domain(struct iova_domain *iovad)
234 {
235 }
236
237 #endif
238
239 #endif
240