• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #include "rxe.h"
8 #include "rxe_loc.h"
9 
10 /* info about object pools
11  * note that mr and mw share a single index space
12  * so that one can map an lkey to the correct type of object
13  */
14 struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
15 	[RXE_TYPE_UC] = {
16 		.name		= "rxe-uc",
17 		.size		= sizeof(struct rxe_ucontext),
18 		.flags          = RXE_POOL_NO_ALLOC,
19 	},
20 	[RXE_TYPE_PD] = {
21 		.name		= "rxe-pd",
22 		.size		= sizeof(struct rxe_pd),
23 		.flags		= RXE_POOL_NO_ALLOC,
24 	},
25 	[RXE_TYPE_AH] = {
26 		.name		= "rxe-ah",
27 		.size		= sizeof(struct rxe_ah),
28 		.flags		= RXE_POOL_ATOMIC | RXE_POOL_NO_ALLOC,
29 	},
30 	[RXE_TYPE_SRQ] = {
31 		.name		= "rxe-srq",
32 		.size		= sizeof(struct rxe_srq),
33 		.flags		= RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
34 		.min_index	= RXE_MIN_SRQ_INDEX,
35 		.max_index	= RXE_MAX_SRQ_INDEX,
36 	},
37 	[RXE_TYPE_QP] = {
38 		.name		= "rxe-qp",
39 		.size		= sizeof(struct rxe_qp),
40 		.cleanup	= rxe_qp_cleanup,
41 		.flags		= RXE_POOL_INDEX,
42 		.min_index	= RXE_MIN_QP_INDEX,
43 		.max_index	= RXE_MAX_QP_INDEX,
44 	},
45 	[RXE_TYPE_CQ] = {
46 		.name		= "rxe-cq",
47 		.size		= sizeof(struct rxe_cq),
48 		.flags          = RXE_POOL_NO_ALLOC,
49 		.cleanup	= rxe_cq_cleanup,
50 	},
51 	[RXE_TYPE_MR] = {
52 		.name		= "rxe-mr",
53 		.size		= sizeof(struct rxe_mem),
54 		.cleanup	= rxe_mem_cleanup,
55 		.flags		= RXE_POOL_INDEX,
56 		.max_index	= RXE_MAX_MR_INDEX,
57 		.min_index	= RXE_MIN_MR_INDEX,
58 	},
59 	[RXE_TYPE_MW] = {
60 		.name		= "rxe-mw",
61 		.size		= sizeof(struct rxe_mem),
62 		.flags		= RXE_POOL_INDEX,
63 		.max_index	= RXE_MAX_MW_INDEX,
64 		.min_index	= RXE_MIN_MW_INDEX,
65 	},
66 	[RXE_TYPE_MC_GRP] = {
67 		.name		= "rxe-mc_grp",
68 		.size		= sizeof(struct rxe_mc_grp),
69 		.cleanup	= rxe_mc_cleanup,
70 		.flags		= RXE_POOL_KEY,
71 		.key_offset	= offsetof(struct rxe_mc_grp, mgid),
72 		.key_size	= sizeof(union ib_gid),
73 	},
74 	[RXE_TYPE_MC_ELEM] = {
75 		.name		= "rxe-mc_elem",
76 		.size		= sizeof(struct rxe_mc_elem),
77 		.flags		= RXE_POOL_ATOMIC,
78 	},
79 };
80 
pool_name(struct rxe_pool * pool)81 static inline const char *pool_name(struct rxe_pool *pool)
82 {
83 	return rxe_type_info[pool->type].name;
84 }
85 
rxe_pool_init_index(struct rxe_pool * pool,u32 max,u32 min)86 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
87 {
88 	int err = 0;
89 	size_t size;
90 
91 	if ((max - min + 1) < pool->max_elem) {
92 		pr_warn("not enough indices for max_elem\n");
93 		err = -EINVAL;
94 		goto out;
95 	}
96 
97 	pool->max_index = max;
98 	pool->min_index = min;
99 
100 	size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
101 	pool->table = kmalloc(size, GFP_KERNEL);
102 	if (!pool->table) {
103 		err = -ENOMEM;
104 		goto out;
105 	}
106 
107 	pool->table_size = size;
108 	bitmap_zero(pool->table, max - min + 1);
109 
110 out:
111 	return err;
112 }
113 
rxe_pool_init(struct rxe_dev * rxe,struct rxe_pool * pool,enum rxe_elem_type type,unsigned int max_elem)114 int rxe_pool_init(
115 	struct rxe_dev		*rxe,
116 	struct rxe_pool		*pool,
117 	enum rxe_elem_type	type,
118 	unsigned int		max_elem)
119 {
120 	int			err = 0;
121 	size_t			size = rxe_type_info[type].size;
122 
123 	memset(pool, 0, sizeof(*pool));
124 
125 	pool->rxe		= rxe;
126 	pool->type		= type;
127 	pool->max_elem		= max_elem;
128 	pool->elem_size		= ALIGN(size, RXE_POOL_ALIGN);
129 	pool->flags		= rxe_type_info[type].flags;
130 	pool->tree		= RB_ROOT;
131 	pool->cleanup		= rxe_type_info[type].cleanup;
132 
133 	atomic_set(&pool->num_elem, 0);
134 
135 	kref_init(&pool->ref_cnt);
136 
137 	rwlock_init(&pool->pool_lock);
138 
139 	if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
140 		err = rxe_pool_init_index(pool,
141 					  rxe_type_info[type].max_index,
142 					  rxe_type_info[type].min_index);
143 		if (err)
144 			goto out;
145 	}
146 
147 	if (rxe_type_info[type].flags & RXE_POOL_KEY) {
148 		pool->key_offset = rxe_type_info[type].key_offset;
149 		pool->key_size = rxe_type_info[type].key_size;
150 	}
151 
152 	pool->state = RXE_POOL_STATE_VALID;
153 
154 out:
155 	return err;
156 }
157 
rxe_pool_release(struct kref * kref)158 static void rxe_pool_release(struct kref *kref)
159 {
160 	struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
161 
162 	pool->state = RXE_POOL_STATE_INVALID;
163 	kfree(pool->table);
164 }
165 
rxe_pool_put(struct rxe_pool * pool)166 static void rxe_pool_put(struct rxe_pool *pool)
167 {
168 	kref_put(&pool->ref_cnt, rxe_pool_release);
169 }
170 
rxe_pool_cleanup(struct rxe_pool * pool)171 void rxe_pool_cleanup(struct rxe_pool *pool)
172 {
173 	unsigned long flags;
174 
175 	write_lock_irqsave(&pool->pool_lock, flags);
176 	pool->state = RXE_POOL_STATE_INVALID;
177 	if (atomic_read(&pool->num_elem) > 0)
178 		pr_warn("%s pool destroyed with unfree'd elem\n",
179 			pool_name(pool));
180 	write_unlock_irqrestore(&pool->pool_lock, flags);
181 
182 	rxe_pool_put(pool);
183 }
184 
alloc_index(struct rxe_pool * pool)185 static u32 alloc_index(struct rxe_pool *pool)
186 {
187 	u32 index;
188 	u32 range = pool->max_index - pool->min_index + 1;
189 
190 	index = find_next_zero_bit(pool->table, range, pool->last);
191 	if (index >= range)
192 		index = find_first_zero_bit(pool->table, range);
193 
194 	WARN_ON_ONCE(index >= range);
195 	set_bit(index, pool->table);
196 	pool->last = index;
197 	return index + pool->min_index;
198 }
199 
insert_index(struct rxe_pool * pool,struct rxe_pool_entry * new)200 static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
201 {
202 	struct rb_node **link = &pool->tree.rb_node;
203 	struct rb_node *parent = NULL;
204 	struct rxe_pool_entry *elem;
205 
206 	while (*link) {
207 		parent = *link;
208 		elem = rb_entry(parent, struct rxe_pool_entry, node);
209 
210 		if (elem->index == new->index) {
211 			pr_warn("element already exists!\n");
212 			goto out;
213 		}
214 
215 		if (elem->index > new->index)
216 			link = &(*link)->rb_left;
217 		else
218 			link = &(*link)->rb_right;
219 	}
220 
221 	rb_link_node(&new->node, parent, link);
222 	rb_insert_color(&new->node, &pool->tree);
223 out:
224 	return;
225 }
226 
insert_key(struct rxe_pool * pool,struct rxe_pool_entry * new)227 static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
228 {
229 	struct rb_node **link = &pool->tree.rb_node;
230 	struct rb_node *parent = NULL;
231 	struct rxe_pool_entry *elem;
232 	int cmp;
233 
234 	while (*link) {
235 		parent = *link;
236 		elem = rb_entry(parent, struct rxe_pool_entry, node);
237 
238 		cmp = memcmp((u8 *)elem + pool->key_offset,
239 			     (u8 *)new + pool->key_offset, pool->key_size);
240 
241 		if (cmp == 0) {
242 			pr_warn("key already exists!\n");
243 			goto out;
244 		}
245 
246 		if (cmp > 0)
247 			link = &(*link)->rb_left;
248 		else
249 			link = &(*link)->rb_right;
250 	}
251 
252 	rb_link_node(&new->node, parent, link);
253 	rb_insert_color(&new->node, &pool->tree);
254 out:
255 	return;
256 }
257 
rxe_add_key(void * arg,void * key)258 void rxe_add_key(void *arg, void *key)
259 {
260 	struct rxe_pool_entry *elem = arg;
261 	struct rxe_pool *pool = elem->pool;
262 	unsigned long flags;
263 
264 	write_lock_irqsave(&pool->pool_lock, flags);
265 	memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
266 	insert_key(pool, elem);
267 	write_unlock_irqrestore(&pool->pool_lock, flags);
268 }
269 
rxe_drop_key(void * arg)270 void rxe_drop_key(void *arg)
271 {
272 	struct rxe_pool_entry *elem = arg;
273 	struct rxe_pool *pool = elem->pool;
274 	unsigned long flags;
275 
276 	write_lock_irqsave(&pool->pool_lock, flags);
277 	rb_erase(&elem->node, &pool->tree);
278 	write_unlock_irqrestore(&pool->pool_lock, flags);
279 }
280 
rxe_add_index(void * arg)281 void rxe_add_index(void *arg)
282 {
283 	struct rxe_pool_entry *elem = arg;
284 	struct rxe_pool *pool = elem->pool;
285 	unsigned long flags;
286 
287 	write_lock_irqsave(&pool->pool_lock, flags);
288 	elem->index = alloc_index(pool);
289 	insert_index(pool, elem);
290 	write_unlock_irqrestore(&pool->pool_lock, flags);
291 }
292 
rxe_drop_index(void * arg)293 void rxe_drop_index(void *arg)
294 {
295 	struct rxe_pool_entry *elem = arg;
296 	struct rxe_pool *pool = elem->pool;
297 	unsigned long flags;
298 
299 	write_lock_irqsave(&pool->pool_lock, flags);
300 	clear_bit(elem->index - pool->min_index, pool->table);
301 	rb_erase(&elem->node, &pool->tree);
302 	write_unlock_irqrestore(&pool->pool_lock, flags);
303 }
304 
rxe_alloc(struct rxe_pool * pool)305 void *rxe_alloc(struct rxe_pool *pool)
306 {
307 	struct rxe_pool_entry *elem;
308 	unsigned long flags;
309 
310 	might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
311 
312 	read_lock_irqsave(&pool->pool_lock, flags);
313 	if (pool->state != RXE_POOL_STATE_VALID) {
314 		read_unlock_irqrestore(&pool->pool_lock, flags);
315 		return NULL;
316 	}
317 	kref_get(&pool->ref_cnt);
318 	read_unlock_irqrestore(&pool->pool_lock, flags);
319 
320 	if (!ib_device_try_get(&pool->rxe->ib_dev))
321 		goto out_put_pool;
322 
323 	if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
324 		goto out_cnt;
325 
326 	elem = kzalloc(rxe_type_info[pool->type].size,
327 				 (pool->flags & RXE_POOL_ATOMIC) ?
328 				 GFP_ATOMIC : GFP_KERNEL);
329 	if (!elem)
330 		goto out_cnt;
331 
332 	elem->pool = pool;
333 	kref_init(&elem->ref_cnt);
334 
335 	return elem;
336 
337 out_cnt:
338 	atomic_dec(&pool->num_elem);
339 	ib_device_put(&pool->rxe->ib_dev);
340 out_put_pool:
341 	rxe_pool_put(pool);
342 	return NULL;
343 }
344 
rxe_add_to_pool(struct rxe_pool * pool,struct rxe_pool_entry * elem)345 int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
346 {
347 	unsigned long flags;
348 
349 	might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
350 
351 	read_lock_irqsave(&pool->pool_lock, flags);
352 	if (pool->state != RXE_POOL_STATE_VALID) {
353 		read_unlock_irqrestore(&pool->pool_lock, flags);
354 		return -EINVAL;
355 	}
356 	kref_get(&pool->ref_cnt);
357 	read_unlock_irqrestore(&pool->pool_lock, flags);
358 
359 	if (!ib_device_try_get(&pool->rxe->ib_dev))
360 		goto out_put_pool;
361 
362 	if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
363 		goto out_cnt;
364 
365 	elem->pool = pool;
366 	kref_init(&elem->ref_cnt);
367 
368 	return 0;
369 
370 out_cnt:
371 	atomic_dec(&pool->num_elem);
372 	ib_device_put(&pool->rxe->ib_dev);
373 out_put_pool:
374 	rxe_pool_put(pool);
375 	return -EINVAL;
376 }
377 
rxe_elem_release(struct kref * kref)378 void rxe_elem_release(struct kref *kref)
379 {
380 	struct rxe_pool_entry *elem =
381 		container_of(kref, struct rxe_pool_entry, ref_cnt);
382 	struct rxe_pool *pool = elem->pool;
383 
384 	if (pool->cleanup)
385 		pool->cleanup(elem);
386 
387 	if (!(pool->flags & RXE_POOL_NO_ALLOC))
388 		kfree(elem);
389 	atomic_dec(&pool->num_elem);
390 	ib_device_put(&pool->rxe->ib_dev);
391 	rxe_pool_put(pool);
392 }
393 
rxe_pool_get_index(struct rxe_pool * pool,u32 index)394 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
395 {
396 	struct rb_node *node = NULL;
397 	struct rxe_pool_entry *elem = NULL;
398 	unsigned long flags;
399 
400 	read_lock_irqsave(&pool->pool_lock, flags);
401 
402 	if (pool->state != RXE_POOL_STATE_VALID)
403 		goto out;
404 
405 	node = pool->tree.rb_node;
406 
407 	while (node) {
408 		elem = rb_entry(node, struct rxe_pool_entry, node);
409 
410 		if (elem->index > index)
411 			node = node->rb_left;
412 		else if (elem->index < index)
413 			node = node->rb_right;
414 		else {
415 			kref_get(&elem->ref_cnt);
416 			break;
417 		}
418 	}
419 
420 out:
421 	read_unlock_irqrestore(&pool->pool_lock, flags);
422 	return node ? elem : NULL;
423 }
424 
rxe_pool_get_key(struct rxe_pool * pool,void * key)425 void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
426 {
427 	struct rb_node *node = NULL;
428 	struct rxe_pool_entry *elem = NULL;
429 	int cmp;
430 	unsigned long flags;
431 
432 	read_lock_irqsave(&pool->pool_lock, flags);
433 
434 	if (pool->state != RXE_POOL_STATE_VALID)
435 		goto out;
436 
437 	node = pool->tree.rb_node;
438 
439 	while (node) {
440 		elem = rb_entry(node, struct rxe_pool_entry, node);
441 
442 		cmp = memcmp((u8 *)elem + pool->key_offset,
443 			     key, pool->key_size);
444 
445 		if (cmp > 0)
446 			node = node->rb_left;
447 		else if (cmp < 0)
448 			node = node->rb_right;
449 		else
450 			break;
451 	}
452 
453 	if (node)
454 		kref_get(&elem->ref_cnt);
455 
456 out:
457 	read_unlock_irqrestore(&pool->pool_lock, flags);
458 	return node ? elem : NULL;
459 }
460