1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #include "rxe.h"
8 #include "rxe_loc.h"
9
10 /* info about object pools
11 */
12 struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
13 [RXE_TYPE_UC] = {
14 .name = "rxe-uc",
15 .size = sizeof(struct rxe_ucontext),
16 .elem_offset = offsetof(struct rxe_ucontext, pelem),
17 .flags = RXE_POOL_NO_ALLOC,
18 },
19 [RXE_TYPE_PD] = {
20 .name = "rxe-pd",
21 .size = sizeof(struct rxe_pd),
22 .elem_offset = offsetof(struct rxe_pd, pelem),
23 .flags = RXE_POOL_NO_ALLOC,
24 },
25 [RXE_TYPE_AH] = {
26 .name = "rxe-ah",
27 .size = sizeof(struct rxe_ah),
28 .elem_offset = offsetof(struct rxe_ah, pelem),
29 .flags = RXE_POOL_NO_ALLOC,
30 },
31 [RXE_TYPE_SRQ] = {
32 .name = "rxe-srq",
33 .size = sizeof(struct rxe_srq),
34 .elem_offset = offsetof(struct rxe_srq, pelem),
35 .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
36 .min_index = RXE_MIN_SRQ_INDEX,
37 .max_index = RXE_MAX_SRQ_INDEX,
38 },
39 [RXE_TYPE_QP] = {
40 .name = "rxe-qp",
41 .size = sizeof(struct rxe_qp),
42 .elem_offset = offsetof(struct rxe_qp, pelem),
43 .cleanup = rxe_qp_cleanup,
44 .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
45 .min_index = RXE_MIN_QP_INDEX,
46 .max_index = RXE_MAX_QP_INDEX,
47 },
48 [RXE_TYPE_CQ] = {
49 .name = "rxe-cq",
50 .size = sizeof(struct rxe_cq),
51 .elem_offset = offsetof(struct rxe_cq, pelem),
52 .flags = RXE_POOL_NO_ALLOC,
53 .cleanup = rxe_cq_cleanup,
54 },
55 [RXE_TYPE_MR] = {
56 .name = "rxe-mr",
57 .size = sizeof(struct rxe_mr),
58 .elem_offset = offsetof(struct rxe_mr, pelem),
59 .cleanup = rxe_mr_cleanup,
60 .flags = RXE_POOL_INDEX,
61 .max_index = RXE_MAX_MR_INDEX,
62 .min_index = RXE_MIN_MR_INDEX,
63 },
64 [RXE_TYPE_MW] = {
65 .name = "rxe-mw",
66 .size = sizeof(struct rxe_mw),
67 .elem_offset = offsetof(struct rxe_mw, pelem),
68 .cleanup = rxe_mw_cleanup,
69 .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
70 .max_index = RXE_MAX_MW_INDEX,
71 .min_index = RXE_MIN_MW_INDEX,
72 },
73 [RXE_TYPE_MC_GRP] = {
74 .name = "rxe-mc_grp",
75 .size = sizeof(struct rxe_mc_grp),
76 .elem_offset = offsetof(struct rxe_mc_grp, pelem),
77 .cleanup = rxe_mc_cleanup,
78 .flags = RXE_POOL_KEY,
79 .key_offset = offsetof(struct rxe_mc_grp, mgid),
80 .key_size = sizeof(union ib_gid),
81 },
82 [RXE_TYPE_MC_ELEM] = {
83 .name = "rxe-mc_elem",
84 .size = sizeof(struct rxe_mc_elem),
85 .elem_offset = offsetof(struct rxe_mc_elem, pelem),
86 },
87 };
88
pool_name(struct rxe_pool * pool)89 static inline const char *pool_name(struct rxe_pool *pool)
90 {
91 return rxe_type_info[pool->type].name;
92 }
93
rxe_pool_init_index(struct rxe_pool * pool,u32 max,u32 min)94 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
95 {
96 int err = 0;
97 size_t size;
98
99 if ((max - min + 1) < pool->max_elem) {
100 pr_warn("not enough indices for max_elem\n");
101 err = -EINVAL;
102 goto out;
103 }
104
105 pool->index.max_index = max;
106 pool->index.min_index = min;
107
108 size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
109 pool->index.table = kmalloc(size, GFP_KERNEL);
110 if (!pool->index.table) {
111 err = -ENOMEM;
112 goto out;
113 }
114
115 pool->index.table_size = size;
116 bitmap_zero(pool->index.table, max - min + 1);
117
118 out:
119 return err;
120 }
121
rxe_pool_init(struct rxe_dev * rxe,struct rxe_pool * pool,enum rxe_elem_type type,unsigned int max_elem)122 int rxe_pool_init(
123 struct rxe_dev *rxe,
124 struct rxe_pool *pool,
125 enum rxe_elem_type type,
126 unsigned int max_elem)
127 {
128 int err = 0;
129 size_t size = rxe_type_info[type].size;
130
131 memset(pool, 0, sizeof(*pool));
132
133 pool->rxe = rxe;
134 pool->type = type;
135 pool->max_elem = max_elem;
136 pool->elem_size = ALIGN(size, RXE_POOL_ALIGN);
137 pool->flags = rxe_type_info[type].flags;
138 pool->index.tree = RB_ROOT;
139 pool->key.tree = RB_ROOT;
140 pool->cleanup = rxe_type_info[type].cleanup;
141
142 atomic_set(&pool->num_elem, 0);
143
144 rwlock_init(&pool->pool_lock);
145
146 if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
147 err = rxe_pool_init_index(pool,
148 rxe_type_info[type].max_index,
149 rxe_type_info[type].min_index);
150 if (err)
151 goto out;
152 }
153
154 if (rxe_type_info[type].flags & RXE_POOL_KEY) {
155 pool->key.key_offset = rxe_type_info[type].key_offset;
156 pool->key.key_size = rxe_type_info[type].key_size;
157 }
158
159 out:
160 return err;
161 }
162
rxe_pool_cleanup(struct rxe_pool * pool)163 void rxe_pool_cleanup(struct rxe_pool *pool)
164 {
165 if (atomic_read(&pool->num_elem) > 0)
166 pr_warn("%s pool destroyed with unfree'd elem\n",
167 pool_name(pool));
168
169 kfree(pool->index.table);
170 }
171
alloc_index(struct rxe_pool * pool)172 static u32 alloc_index(struct rxe_pool *pool)
173 {
174 u32 index;
175 u32 range = pool->index.max_index - pool->index.min_index + 1;
176
177 index = find_next_zero_bit(pool->index.table, range, pool->index.last);
178 if (index >= range)
179 index = find_first_zero_bit(pool->index.table, range);
180
181 WARN_ON_ONCE(index >= range);
182 set_bit(index, pool->index.table);
183 pool->index.last = index;
184 return index + pool->index.min_index;
185 }
186
rxe_insert_index(struct rxe_pool * pool,struct rxe_pool_entry * new)187 static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
188 {
189 struct rb_node **link = &pool->index.tree.rb_node;
190 struct rb_node *parent = NULL;
191 struct rxe_pool_entry *elem;
192
193 while (*link) {
194 parent = *link;
195 elem = rb_entry(parent, struct rxe_pool_entry, index_node);
196
197 if (elem->index == new->index) {
198 pr_warn("element already exists!\n");
199 return -EINVAL;
200 }
201
202 if (elem->index > new->index)
203 link = &(*link)->rb_left;
204 else
205 link = &(*link)->rb_right;
206 }
207
208 rb_link_node(&new->index_node, parent, link);
209 rb_insert_color(&new->index_node, &pool->index.tree);
210
211 return 0;
212 }
213
rxe_insert_key(struct rxe_pool * pool,struct rxe_pool_entry * new)214 static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
215 {
216 struct rb_node **link = &pool->key.tree.rb_node;
217 struct rb_node *parent = NULL;
218 struct rxe_pool_entry *elem;
219 int cmp;
220
221 while (*link) {
222 parent = *link;
223 elem = rb_entry(parent, struct rxe_pool_entry, key_node);
224
225 cmp = memcmp((u8 *)elem + pool->key.key_offset,
226 (u8 *)new + pool->key.key_offset, pool->key.key_size);
227
228 if (cmp == 0) {
229 pr_warn("key already exists!\n");
230 return -EINVAL;
231 }
232
233 if (cmp > 0)
234 link = &(*link)->rb_left;
235 else
236 link = &(*link)->rb_right;
237 }
238
239 rb_link_node(&new->key_node, parent, link);
240 rb_insert_color(&new->key_node, &pool->key.tree);
241
242 return 0;
243 }
244
__rxe_add_key_locked(struct rxe_pool_entry * elem,void * key)245 int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key)
246 {
247 struct rxe_pool *pool = elem->pool;
248 int err;
249
250 memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size);
251 err = rxe_insert_key(pool, elem);
252
253 return err;
254 }
255
__rxe_add_key(struct rxe_pool_entry * elem,void * key)256 int __rxe_add_key(struct rxe_pool_entry *elem, void *key)
257 {
258 struct rxe_pool *pool = elem->pool;
259 unsigned long flags;
260 int err;
261
262 write_lock_irqsave(&pool->pool_lock, flags);
263 err = __rxe_add_key_locked(elem, key);
264 write_unlock_irqrestore(&pool->pool_lock, flags);
265
266 return err;
267 }
268
__rxe_drop_key_locked(struct rxe_pool_entry * elem)269 void __rxe_drop_key_locked(struct rxe_pool_entry *elem)
270 {
271 struct rxe_pool *pool = elem->pool;
272
273 rb_erase(&elem->key_node, &pool->key.tree);
274 }
275
__rxe_drop_key(struct rxe_pool_entry * elem)276 void __rxe_drop_key(struct rxe_pool_entry *elem)
277 {
278 struct rxe_pool *pool = elem->pool;
279 unsigned long flags;
280
281 write_lock_irqsave(&pool->pool_lock, flags);
282 __rxe_drop_key_locked(elem);
283 write_unlock_irqrestore(&pool->pool_lock, flags);
284 }
285
__rxe_add_index_locked(struct rxe_pool_entry * elem)286 int __rxe_add_index_locked(struct rxe_pool_entry *elem)
287 {
288 struct rxe_pool *pool = elem->pool;
289 int err;
290
291 elem->index = alloc_index(pool);
292 err = rxe_insert_index(pool, elem);
293
294 return err;
295 }
296
__rxe_add_index(struct rxe_pool_entry * elem)297 int __rxe_add_index(struct rxe_pool_entry *elem)
298 {
299 struct rxe_pool *pool = elem->pool;
300 unsigned long flags;
301 int err;
302
303 write_lock_irqsave(&pool->pool_lock, flags);
304 err = __rxe_add_index_locked(elem);
305 write_unlock_irqrestore(&pool->pool_lock, flags);
306
307 return err;
308 }
309
__rxe_drop_index_locked(struct rxe_pool_entry * elem)310 void __rxe_drop_index_locked(struct rxe_pool_entry *elem)
311 {
312 struct rxe_pool *pool = elem->pool;
313
314 clear_bit(elem->index - pool->index.min_index, pool->index.table);
315 rb_erase(&elem->index_node, &pool->index.tree);
316 }
317
__rxe_drop_index(struct rxe_pool_entry * elem)318 void __rxe_drop_index(struct rxe_pool_entry *elem)
319 {
320 struct rxe_pool *pool = elem->pool;
321 unsigned long flags;
322
323 write_lock_irqsave(&pool->pool_lock, flags);
324 __rxe_drop_index_locked(elem);
325 write_unlock_irqrestore(&pool->pool_lock, flags);
326 }
327
rxe_alloc_locked(struct rxe_pool * pool)328 void *rxe_alloc_locked(struct rxe_pool *pool)
329 {
330 struct rxe_type_info *info = &rxe_type_info[pool->type];
331 struct rxe_pool_entry *elem;
332 u8 *obj;
333
334 if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
335 goto out_cnt;
336
337 obj = kzalloc(info->size, GFP_ATOMIC);
338 if (!obj)
339 goto out_cnt;
340
341 elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
342
343 elem->pool = pool;
344 kref_init(&elem->ref_cnt);
345
346 return obj;
347
348 out_cnt:
349 atomic_dec(&pool->num_elem);
350 return NULL;
351 }
352
rxe_alloc(struct rxe_pool * pool)353 void *rxe_alloc(struct rxe_pool *pool)
354 {
355 struct rxe_type_info *info = &rxe_type_info[pool->type];
356 struct rxe_pool_entry *elem;
357 u8 *obj;
358
359 if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
360 goto out_cnt;
361
362 obj = kzalloc(info->size, GFP_KERNEL);
363 if (!obj)
364 goto out_cnt;
365
366 elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
367
368 elem->pool = pool;
369 kref_init(&elem->ref_cnt);
370
371 return obj;
372
373 out_cnt:
374 atomic_dec(&pool->num_elem);
375 return NULL;
376 }
377
__rxe_add_to_pool(struct rxe_pool * pool,struct rxe_pool_entry * elem)378 int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
379 {
380 if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
381 goto out_cnt;
382
383 elem->pool = pool;
384 kref_init(&elem->ref_cnt);
385
386 return 0;
387
388 out_cnt:
389 atomic_dec(&pool->num_elem);
390 return -EINVAL;
391 }
392
rxe_elem_release(struct kref * kref)393 void rxe_elem_release(struct kref *kref)
394 {
395 struct rxe_pool_entry *elem =
396 container_of(kref, struct rxe_pool_entry, ref_cnt);
397 struct rxe_pool *pool = elem->pool;
398 struct rxe_type_info *info = &rxe_type_info[pool->type];
399 u8 *obj;
400
401 if (pool->cleanup)
402 pool->cleanup(elem);
403
404 if (!(pool->flags & RXE_POOL_NO_ALLOC)) {
405 obj = (u8 *)elem - info->elem_offset;
406 kfree(obj);
407 }
408
409 atomic_dec(&pool->num_elem);
410 }
411
rxe_pool_get_index_locked(struct rxe_pool * pool,u32 index)412 void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
413 {
414 struct rxe_type_info *info = &rxe_type_info[pool->type];
415 struct rb_node *node;
416 struct rxe_pool_entry *elem;
417 u8 *obj;
418
419 node = pool->index.tree.rb_node;
420
421 while (node) {
422 elem = rb_entry(node, struct rxe_pool_entry, index_node);
423
424 if (elem->index > index)
425 node = node->rb_left;
426 else if (elem->index < index)
427 node = node->rb_right;
428 else
429 break;
430 }
431
432 if (node) {
433 kref_get(&elem->ref_cnt);
434 obj = (u8 *)elem - info->elem_offset;
435 } else {
436 obj = NULL;
437 }
438
439 return obj;
440 }
441
rxe_pool_get_index(struct rxe_pool * pool,u32 index)442 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
443 {
444 u8 *obj;
445 unsigned long flags;
446
447 read_lock_irqsave(&pool->pool_lock, flags);
448 obj = rxe_pool_get_index_locked(pool, index);
449 read_unlock_irqrestore(&pool->pool_lock, flags);
450
451 return obj;
452 }
453
rxe_pool_get_key_locked(struct rxe_pool * pool,void * key)454 void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
455 {
456 struct rxe_type_info *info = &rxe_type_info[pool->type];
457 struct rb_node *node;
458 struct rxe_pool_entry *elem;
459 u8 *obj;
460 int cmp;
461
462 node = pool->key.tree.rb_node;
463
464 while (node) {
465 elem = rb_entry(node, struct rxe_pool_entry, key_node);
466
467 cmp = memcmp((u8 *)elem + pool->key.key_offset,
468 key, pool->key.key_size);
469
470 if (cmp > 0)
471 node = node->rb_left;
472 else if (cmp < 0)
473 node = node->rb_right;
474 else
475 break;
476 }
477
478 if (node) {
479 kref_get(&elem->ref_cnt);
480 obj = (u8 *)elem - info->elem_offset;
481 } else {
482 obj = NULL;
483 }
484
485 return obj;
486 }
487
rxe_pool_get_key(struct rxe_pool * pool,void * key)488 void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
489 {
490 u8 *obj;
491 unsigned long flags;
492
493 read_lock_irqsave(&pool->pool_lock, flags);
494 obj = rxe_pool_get_key_locked(pool, key);
495 read_unlock_irqrestore(&pool->pool_lock, flags);
496
497 return obj;
498 }
499