• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * I/O Address Space ID allocator. There is one global IOASID space, split into
4  * subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and
5  * free IOASIDs with ioasid_alloc and ioasid_put.
6  */
7 #include <linux/ioasid.h>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/spinlock.h>
11 #include <linux/xarray.h>
12 
13 struct ioasid_data {
14 	ioasid_t id;
15 	struct ioasid_set *set;
16 	void *private;
17 	struct rcu_head rcu;
18 	refcount_t refs;
19 };
20 
21 /*
22  * struct ioasid_allocator_data - Internal data structure to hold information
23  * about an allocator. There are two types of allocators:
24  *
25  * - Default allocator always has its own XArray to track the IOASIDs allocated.
26  * - Custom allocators may share allocation helpers with different private data.
27  *   Custom allocators that share the same helper functions also share the same
28  *   XArray.
29  * Rules:
30  * 1. Default allocator is always available, not dynamically registered. This is
31  *    to prevent race conditions with early boot code that want to register
32  *    custom allocators or allocate IOASIDs.
33  * 2. Custom allocators take precedence over the default allocator.
34  * 3. When all custom allocators sharing the same helper functions are
35  *    unregistered (e.g. due to hotplug), all outstanding IOASIDs must be
36  *    freed. Otherwise, outstanding IOASIDs will be lost and orphaned.
37  * 4. When switching between custom allocators sharing the same helper
38  *    functions, outstanding IOASIDs are preserved.
39  * 5. When switching between custom allocator and default allocator, all IOASIDs
40  *    must be freed to ensure unadulterated space for the new allocator.
41  *
42  * @ops:	allocator helper functions and its data
43  * @list:	registered custom allocators
44  * @slist:	allocators share the same ops but different data
45  * @flags:	attributes of the allocator
46  * @xa:		xarray holds the IOASID space
47  * @rcu:	used for kfree_rcu when unregistering allocator
48  */
49 struct ioasid_allocator_data {
50 	struct ioasid_allocator_ops *ops;
51 	struct list_head list;
52 	struct list_head slist;
53 #define IOASID_ALLOCATOR_CUSTOM BIT(0) /* Needs framework to track results */
54 	unsigned long flags;
55 	struct xarray xa;
56 	struct rcu_head rcu;
57 };
58 
59 static DEFINE_SPINLOCK(ioasid_allocator_lock);
60 static LIST_HEAD(allocators_list);
61 
62 static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque);
63 static void default_free(ioasid_t ioasid, void *opaque);
64 
65 static struct ioasid_allocator_ops default_ops = {
66 	.alloc = default_alloc,
67 	.free = default_free,
68 };
69 
70 static struct ioasid_allocator_data default_allocator = {
71 	.ops = &default_ops,
72 	.flags = 0,
73 	.xa = XARRAY_INIT(ioasid_xa, XA_FLAGS_ALLOC),
74 };
75 
76 static struct ioasid_allocator_data *active_allocator = &default_allocator;
77 
default_alloc(ioasid_t min,ioasid_t max,void * opaque)78 static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque)
79 {
80 	ioasid_t id;
81 
82 	if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max), GFP_ATOMIC)) {
83 		pr_err("Failed to alloc ioasid from %d to %d\n", min, max);
84 		return INVALID_IOASID;
85 	}
86 
87 	return id;
88 }
89 
default_free(ioasid_t ioasid,void * opaque)90 static void default_free(ioasid_t ioasid, void *opaque)
91 {
92 	struct ioasid_data *ioasid_data;
93 
94 	ioasid_data = xa_erase(&default_allocator.xa, ioasid);
95 	kfree_rcu(ioasid_data, rcu);
96 }
97 
98 /* Allocate and initialize a new custom allocator with its helper functions */
ioasid_alloc_allocator(struct ioasid_allocator_ops * ops)99 static struct ioasid_allocator_data *ioasid_alloc_allocator(struct ioasid_allocator_ops *ops)
100 {
101 	struct ioasid_allocator_data *ia_data;
102 
103 	ia_data = kzalloc(sizeof(*ia_data), GFP_ATOMIC);
104 	if (!ia_data)
105 		return NULL;
106 
107 	xa_init_flags(&ia_data->xa, XA_FLAGS_ALLOC);
108 	INIT_LIST_HEAD(&ia_data->slist);
109 	ia_data->flags |= IOASID_ALLOCATOR_CUSTOM;
110 	ia_data->ops = ops;
111 
112 	/* For tracking custom allocators that share the same ops */
113 	list_add_tail(&ops->list, &ia_data->slist);
114 
115 	return ia_data;
116 }
117 
use_same_ops(struct ioasid_allocator_ops * a,struct ioasid_allocator_ops * b)118 static bool use_same_ops(struct ioasid_allocator_ops *a, struct ioasid_allocator_ops *b)
119 {
120 	return (a->free == b->free) && (a->alloc == b->alloc);
121 }
122 
123 /**
124  * ioasid_register_allocator - register a custom allocator
125  * @ops: the custom allocator ops to be registered
126  *
127  * Custom allocators take precedence over the default xarray based allocator.
128  * Private data associated with the IOASID allocated by the custom allocators
129  * are managed by IOASID framework similar to data stored in xa by default
130  * allocator.
131  *
132  * There can be multiple allocators registered but only one is active. In case
133  * of runtime removal of a custom allocator, the next one is activated based
134  * on the registration ordering.
135  *
136  * Multiple allocators can share the same alloc() function, in this case the
137  * IOASID space is shared.
138  */
ioasid_register_allocator(struct ioasid_allocator_ops * ops)139 int ioasid_register_allocator(struct ioasid_allocator_ops *ops)
140 {
141 	struct ioasid_allocator_data *ia_data;
142 	struct ioasid_allocator_data *pallocator;
143 	int ret = 0;
144 
145 	spin_lock(&ioasid_allocator_lock);
146 
147 	ia_data = ioasid_alloc_allocator(ops);
148 	if (!ia_data) {
149 		ret = -ENOMEM;
150 		goto out_unlock;
151 	}
152 
153 	/*
154 	 * No particular preference, we activate the first one and keep
155 	 * the later registered allocators in a list in case the first one gets
156 	 * removed due to hotplug.
157 	 */
158 	if (list_empty(&allocators_list)) {
159 		WARN_ON(active_allocator != &default_allocator);
160 		/* Use this new allocator if default is not active */
161 		if (xa_empty(&active_allocator->xa)) {
162 			rcu_assign_pointer(active_allocator, ia_data);
163 			list_add_tail(&ia_data->list, &allocators_list);
164 			goto out_unlock;
165 		}
166 		pr_warn("Default allocator active with outstanding IOASID\n");
167 		ret = -EAGAIN;
168 		goto out_free;
169 	}
170 
171 	/* Check if the allocator is already registered */
172 	list_for_each_entry(pallocator, &allocators_list, list) {
173 		if (pallocator->ops == ops) {
174 			pr_err("IOASID allocator already registered\n");
175 			ret = -EEXIST;
176 			goto out_free;
177 		} else if (use_same_ops(pallocator->ops, ops)) {
178 			/*
179 			 * If the new allocator shares the same ops,
180 			 * then they will share the same IOASID space.
181 			 * We should put them under the same xarray.
182 			 */
183 			list_add_tail(&ops->list, &pallocator->slist);
184 			goto out_free;
185 		}
186 	}
187 	list_add_tail(&ia_data->list, &allocators_list);
188 
189 	spin_unlock(&ioasid_allocator_lock);
190 	return 0;
191 out_free:
192 	kfree(ia_data);
193 out_unlock:
194 	spin_unlock(&ioasid_allocator_lock);
195 	return ret;
196 }
197 EXPORT_SYMBOL_GPL(ioasid_register_allocator);
198 
199 /**
200  * ioasid_unregister_allocator - Remove a custom IOASID allocator ops
201  * @ops: the custom allocator to be removed
202  *
203  * Remove an allocator from the list, activate the next allocator in
204  * the order it was registered. Or revert to default allocator if all
205  * custom allocators are unregistered without outstanding IOASIDs.
206  */
ioasid_unregister_allocator(struct ioasid_allocator_ops * ops)207 void ioasid_unregister_allocator(struct ioasid_allocator_ops *ops)
208 {
209 	struct ioasid_allocator_data *pallocator;
210 	struct ioasid_allocator_ops *sops;
211 
212 	spin_lock(&ioasid_allocator_lock);
213 	if (list_empty(&allocators_list)) {
214 		pr_warn("No custom IOASID allocators active!\n");
215 		goto exit_unlock;
216 	}
217 
218 	list_for_each_entry(pallocator, &allocators_list, list) {
219 		if (!use_same_ops(pallocator->ops, ops))
220 			continue;
221 
222 		if (list_is_singular(&pallocator->slist)) {
223 			/* No shared helper functions */
224 			list_del(&pallocator->list);
225 			/*
226 			 * All IOASIDs should have been freed before
227 			 * the last allocator that shares the same ops
228 			 * is unregistered.
229 			 */
230 			WARN_ON(!xa_empty(&pallocator->xa));
231 			if (list_empty(&allocators_list)) {
232 				pr_info("No custom IOASID allocators, switch to default.\n");
233 				rcu_assign_pointer(active_allocator, &default_allocator);
234 			} else if (pallocator == active_allocator) {
235 				rcu_assign_pointer(active_allocator,
236 						list_first_entry(&allocators_list,
237 								struct ioasid_allocator_data, list));
238 				pr_info("IOASID allocator changed");
239 			}
240 			kfree_rcu(pallocator, rcu);
241 			break;
242 		}
243 		/*
244 		 * Find the matching shared ops to delete,
245 		 * but keep outstanding IOASIDs
246 		 */
247 		list_for_each_entry(sops, &pallocator->slist, list) {
248 			if (sops == ops) {
249 				list_del(&ops->list);
250 				break;
251 			}
252 		}
253 		break;
254 	}
255 
256 exit_unlock:
257 	spin_unlock(&ioasid_allocator_lock);
258 }
259 EXPORT_SYMBOL_GPL(ioasid_unregister_allocator);
260 
261 /**
262  * ioasid_set_data - Set private data for an allocated ioasid
263  * @ioasid: the ID to set data
264  * @data:   the private data
265  *
266  * For IOASID that is already allocated, private data can be set
267  * via this API. Future lookup can be done via ioasid_find.
268  */
ioasid_set_data(ioasid_t ioasid,void * data)269 int ioasid_set_data(ioasid_t ioasid, void *data)
270 {
271 	struct ioasid_data *ioasid_data;
272 	int ret = 0;
273 
274 	spin_lock(&ioasid_allocator_lock);
275 	ioasid_data = xa_load(&active_allocator->xa, ioasid);
276 	if (ioasid_data)
277 		rcu_assign_pointer(ioasid_data->private, data);
278 	else
279 		ret = -ENOENT;
280 	spin_unlock(&ioasid_allocator_lock);
281 
282 	/*
283 	 * Wait for readers to stop accessing the old private data, so the
284 	 * caller can free it.
285 	 */
286 	if (!ret)
287 		synchronize_rcu();
288 
289 	return ret;
290 }
291 EXPORT_SYMBOL_GPL(ioasid_set_data);
292 
293 /**
294  * ioasid_alloc - Allocate an IOASID
295  * @set: the IOASID set
296  * @min: the minimum ID (inclusive)
297  * @max: the maximum ID (inclusive)
298  * @private: data private to the caller
299  *
300  * Allocate an ID between @min and @max. The @private pointer is stored
301  * internally and can be retrieved with ioasid_find().
302  *
303  * Return: the allocated ID on success, or %INVALID_IOASID on failure.
304  */
ioasid_alloc(struct ioasid_set * set,ioasid_t min,ioasid_t max,void * private)305 ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
306 		      void *private)
307 {
308 	struct ioasid_data *data;
309 	void *adata;
310 	ioasid_t id;
311 
312 	data = kzalloc(sizeof(*data), GFP_ATOMIC);
313 	if (!data)
314 		return INVALID_IOASID;
315 
316 	data->set = set;
317 	data->private = private;
318 	refcount_set(&data->refs, 1);
319 
320 	/*
321 	 * Custom allocator needs allocator data to perform platform specific
322 	 * operations.
323 	 */
324 	spin_lock(&ioasid_allocator_lock);
325 	adata = active_allocator->flags & IOASID_ALLOCATOR_CUSTOM ? active_allocator->ops->pdata : data;
326 	id = active_allocator->ops->alloc(min, max, adata);
327 	if (id == INVALID_IOASID) {
328 		pr_err("Failed ASID allocation %lu\n", active_allocator->flags);
329 		goto exit_free;
330 	}
331 
332 	if ((active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) &&
333 	     xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id), GFP_ATOMIC)) {
334 		/* Custom allocator needs framework to store and track allocation results */
335 		pr_err("Failed to alloc ioasid from %d\n", id);
336 		active_allocator->ops->free(id, active_allocator->ops->pdata);
337 		goto exit_free;
338 	}
339 	data->id = id;
340 
341 	spin_unlock(&ioasid_allocator_lock);
342 	return id;
343 exit_free:
344 	spin_unlock(&ioasid_allocator_lock);
345 	kfree(data);
346 	return INVALID_IOASID;
347 }
348 EXPORT_SYMBOL_GPL(ioasid_alloc);
349 
350 /**
351  * ioasid_get - obtain a reference to the IOASID
352  */
ioasid_get(ioasid_t ioasid)353 void ioasid_get(ioasid_t ioasid)
354 {
355 	struct ioasid_data *ioasid_data;
356 
357 	spin_lock(&ioasid_allocator_lock);
358 	ioasid_data = xa_load(&active_allocator->xa, ioasid);
359 	if (ioasid_data)
360 		refcount_inc(&ioasid_data->refs);
361 	else
362 		WARN_ON(1);
363 	spin_unlock(&ioasid_allocator_lock);
364 }
365 EXPORT_SYMBOL_GPL(ioasid_get);
366 
367 /**
368  * ioasid_put - Release a reference to an ioasid
369  * @ioasid: the ID to remove
370  *
371  * Put a reference to the IOASID, free it when the number of references drops to
372  * zero.
373  *
374  * Return: %true if the IOASID was freed, %false otherwise.
375  */
ioasid_put(ioasid_t ioasid)376 bool ioasid_put(ioasid_t ioasid)
377 {
378 	bool free = false;
379 	struct ioasid_data *ioasid_data;
380 
381 	spin_lock(&ioasid_allocator_lock);
382 	ioasid_data = xa_load(&active_allocator->xa, ioasid);
383 	if (!ioasid_data) {
384 		pr_err("Trying to free unknown IOASID %u\n", ioasid);
385 		goto exit_unlock;
386 	}
387 
388 	free = refcount_dec_and_test(&ioasid_data->refs);
389 	if (!free)
390 		goto exit_unlock;
391 
392 	active_allocator->ops->free(ioasid, active_allocator->ops->pdata);
393 	/* Custom allocator needs additional steps to free the xa element */
394 	if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) {
395 		ioasid_data = xa_erase(&active_allocator->xa, ioasid);
396 		kfree_rcu(ioasid_data, rcu);
397 	}
398 
399 exit_unlock:
400 	spin_unlock(&ioasid_allocator_lock);
401 	return free;
402 }
403 EXPORT_SYMBOL_GPL(ioasid_put);
404 
405 /**
406  * ioasid_find - Find IOASID data
407  * @set: the IOASID set
408  * @ioasid: the IOASID to find
409  * @getter: function to call on the found object
410  *
411  * The optional getter function allows to take a reference to the found object
412  * under the rcu lock. The function can also check if the object is still valid:
413  * if @getter returns false, then the object is invalid and NULL is returned.
414  *
415  * If the IOASID exists, return the private pointer passed to ioasid_alloc.
416  * Private data can be NULL if not set. Return an error if the IOASID is not
417  * found, or if @set is not NULL and the IOASID does not belong to the set.
418  */
ioasid_find(struct ioasid_set * set,ioasid_t ioasid,bool (* getter)(void *))419 void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
420 		  bool (*getter)(void *))
421 {
422 	void *priv;
423 	struct ioasid_data *ioasid_data;
424 	struct ioasid_allocator_data *idata;
425 
426 	rcu_read_lock();
427 	idata = rcu_dereference(active_allocator);
428 	ioasid_data = xa_load(&idata->xa, ioasid);
429 	if (!ioasid_data) {
430 		priv = ERR_PTR(-ENOENT);
431 		goto unlock;
432 	}
433 	if (set && ioasid_data->set != set) {
434 		/* data found but does not belong to the set */
435 		priv = ERR_PTR(-EACCES);
436 		goto unlock;
437 	}
438 	/* Now IOASID and its set is verified, we can return the private data */
439 	priv = rcu_dereference(ioasid_data->private);
440 	if (getter && !getter(priv))
441 		priv = NULL;
442 unlock:
443 	rcu_read_unlock();
444 
445 	return priv;
446 }
447 EXPORT_SYMBOL_GPL(ioasid_find);
448 
449 MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
450 MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
451 MODULE_DESCRIPTION("IO Address Space ID (IOASID) allocator");
452 MODULE_LICENSE("GPL");
453