• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "dr_types.h"
5 
6 #define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
7 #define DR_ICM_SYNC_THRESHOLD (64 * 1024 * 1024)
8 
9 struct mlx5dr_icm_pool;
10 
11 struct mlx5dr_icm_bucket {
12 	struct mlx5dr_icm_pool *pool;
13 
14 	/* Chunks that aren't visible to HW not directly and not in cache */
15 	struct list_head free_list;
16 	unsigned int free_list_count;
17 
18 	/* Used chunks, HW may be accessing this memory */
19 	struct list_head used_list;
20 	unsigned int used_list_count;
21 
22 	/* HW may be accessing this memory but at some future,
23 	 * undetermined time, it might cease to do so. Before deciding to call
24 	 * sync_ste, this list is moved to sync_list
25 	 */
26 	struct list_head hot_list;
27 	unsigned int hot_list_count;
28 
29 	/* Pending sync list, entries from the hot list are moved to this list.
30 	 * sync_ste is executed and then sync_list is concatenated to the free list
31 	 */
32 	struct list_head sync_list;
33 	unsigned int sync_list_count;
34 
35 	u32 total_chunks;
36 	u32 num_of_entries;
37 	u32 entry_size;
38 	/* protect the ICM bucket */
39 	struct mutex mutex;
40 };
41 
42 struct mlx5dr_icm_pool {
43 	struct mlx5dr_icm_bucket *buckets;
44 	enum mlx5dr_icm_type icm_type;
45 	enum mlx5dr_icm_chunk_size max_log_chunk_sz;
46 	enum mlx5dr_icm_chunk_size num_of_buckets;
47 	struct list_head icm_mr_list;
48 	/* protect the ICM MR list */
49 	struct mutex mr_mutex;
50 	struct mlx5dr_domain *dmn;
51 };
52 
53 struct mlx5dr_icm_dm {
54 	u32 obj_id;
55 	enum mlx5_sw_icm_type type;
56 	phys_addr_t addr;
57 	size_t length;
58 };
59 
60 struct mlx5dr_icm_mr {
61 	struct mlx5dr_icm_pool *pool;
62 	struct mlx5_core_mkey mkey;
63 	struct mlx5dr_icm_dm dm;
64 	size_t used_length;
65 	size_t length;
66 	u64 icm_start_addr;
67 	struct list_head mr_list;
68 };
69 
dr_icm_create_dm_mkey(struct mlx5_core_dev * mdev,u32 pd,u64 length,u64 start_addr,int mode,struct mlx5_core_mkey * mkey)70 static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
71 				 u32 pd, u64 length, u64 start_addr, int mode,
72 				 struct mlx5_core_mkey *mkey)
73 {
74 	u32 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
75 	u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
76 	void *mkc;
77 
78 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
79 
80 	MLX5_SET(mkc, mkc, access_mode_1_0, mode);
81 	MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
82 	MLX5_SET(mkc, mkc, lw, 1);
83 	MLX5_SET(mkc, mkc, lr, 1);
84 	if (mode == MLX5_MKC_ACCESS_MODE_SW_ICM) {
85 		MLX5_SET(mkc, mkc, rw, 1);
86 		MLX5_SET(mkc, mkc, rr, 1);
87 	}
88 
89 	MLX5_SET64(mkc, mkc, len, length);
90 	MLX5_SET(mkc, mkc, pd, pd);
91 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
92 	MLX5_SET64(mkc, mkc, start_addr, start_addr);
93 
94 	return mlx5_core_create_mkey(mdev, mkey, in, inlen);
95 }
96 
97 static struct mlx5dr_icm_mr *
dr_icm_pool_mr_create(struct mlx5dr_icm_pool * pool)98 dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
99 {
100 	struct mlx5_core_dev *mdev = pool->dmn->mdev;
101 	enum mlx5_sw_icm_type dm_type;
102 	struct mlx5dr_icm_mr *icm_mr;
103 	size_t log_align_base;
104 	int err;
105 
106 	icm_mr = kvzalloc(sizeof(*icm_mr), GFP_KERNEL);
107 	if (!icm_mr)
108 		return NULL;
109 
110 	icm_mr->pool = pool;
111 	INIT_LIST_HEAD(&icm_mr->mr_list);
112 
113 	icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
114 							       pool->icm_type);
115 
116 	if (pool->icm_type == DR_ICM_TYPE_STE) {
117 		dm_type = MLX5_SW_ICM_TYPE_STEERING;
118 		log_align_base = ilog2(icm_mr->dm.length);
119 	} else {
120 		dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
121 		/* Align base is 64B */
122 		log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
123 	}
124 	icm_mr->dm.type = dm_type;
125 
126 	err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length,
127 				   log_align_base, 0, &icm_mr->dm.addr,
128 				   &icm_mr->dm.obj_id);
129 	if (err) {
130 		mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err);
131 		goto free_icm_mr;
132 	}
133 
134 	/* Register device memory */
135 	err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn,
136 				    icm_mr->dm.length,
137 				    icm_mr->dm.addr,
138 				    MLX5_MKC_ACCESS_MODE_SW_ICM,
139 				    &icm_mr->mkey);
140 	if (err) {
141 		mlx5dr_err(pool->dmn, "Failed to create SW ICM MKEY, err (%d)\n", err);
142 		goto free_dm;
143 	}
144 
145 	icm_mr->icm_start_addr = icm_mr->dm.addr;
146 
147 	if (icm_mr->icm_start_addr & (BIT(log_align_base) - 1)) {
148 		mlx5dr_err(pool->dmn, "Failed to get Aligned ICM mem (asked: %zu)\n",
149 			   log_align_base);
150 		goto free_mkey;
151 	}
152 
153 	list_add_tail(&icm_mr->mr_list, &pool->icm_mr_list);
154 
155 	return icm_mr;
156 
157 free_mkey:
158 	mlx5_core_destroy_mkey(mdev, &icm_mr->mkey);
159 free_dm:
160 	mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
161 			       icm_mr->dm.addr, icm_mr->dm.obj_id);
162 free_icm_mr:
163 	kvfree(icm_mr);
164 	return NULL;
165 }
166 
dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr * icm_mr)167 static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
168 {
169 	struct mlx5_core_dev *mdev = icm_mr->pool->dmn->mdev;
170 	struct mlx5dr_icm_dm *dm = &icm_mr->dm;
171 
172 	list_del(&icm_mr->mr_list);
173 	mlx5_core_destroy_mkey(mdev, &icm_mr->mkey);
174 	mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0,
175 			       dm->addr, dm->obj_id);
176 	kvfree(icm_mr);
177 }
178 
dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk * chunk)179 static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk)
180 {
181 	struct mlx5dr_icm_bucket *bucket = chunk->bucket;
182 
183 	chunk->ste_arr = kvzalloc(bucket->num_of_entries *
184 				  sizeof(chunk->ste_arr[0]), GFP_KERNEL);
185 	if (!chunk->ste_arr)
186 		return -ENOMEM;
187 
188 	chunk->hw_ste_arr = kvzalloc(bucket->num_of_entries *
189 				     DR_STE_SIZE_REDUCED, GFP_KERNEL);
190 	if (!chunk->hw_ste_arr)
191 		goto out_free_ste_arr;
192 
193 	chunk->miss_list = kvmalloc(bucket->num_of_entries *
194 				    sizeof(chunk->miss_list[0]), GFP_KERNEL);
195 	if (!chunk->miss_list)
196 		goto out_free_hw_ste_arr;
197 
198 	return 0;
199 
200 out_free_hw_ste_arr:
201 	kvfree(chunk->hw_ste_arr);
202 out_free_ste_arr:
203 	kvfree(chunk->ste_arr);
204 	return -ENOMEM;
205 }
206 
dr_icm_chunks_create(struct mlx5dr_icm_bucket * bucket)207 static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket)
208 {
209 	size_t mr_free_size, mr_req_size, mr_row_size;
210 	struct mlx5dr_icm_pool *pool = bucket->pool;
211 	struct mlx5dr_icm_mr *icm_mr = NULL;
212 	struct mlx5dr_icm_chunk *chunk;
213 	int i, err = 0;
214 
215 	mr_req_size = bucket->num_of_entries * bucket->entry_size;
216 	mr_row_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
217 							 pool->icm_type);
218 	mutex_lock(&pool->mr_mutex);
219 	if (!list_empty(&pool->icm_mr_list)) {
220 		icm_mr = list_last_entry(&pool->icm_mr_list,
221 					 struct mlx5dr_icm_mr, mr_list);
222 
223 		if (icm_mr)
224 			mr_free_size = icm_mr->dm.length - icm_mr->used_length;
225 	}
226 
227 	if (!icm_mr || mr_free_size < mr_row_size) {
228 		icm_mr = dr_icm_pool_mr_create(pool);
229 		if (!icm_mr) {
230 			err = -ENOMEM;
231 			goto out_err;
232 		}
233 	}
234 
235 	/* Create memory aligned chunks */
236 	for (i = 0; i < mr_row_size / mr_req_size; i++) {
237 		chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL);
238 		if (!chunk) {
239 			err = -ENOMEM;
240 			goto out_err;
241 		}
242 
243 		chunk->bucket = bucket;
244 		chunk->rkey = icm_mr->mkey.key;
245 		/* mr start addr is zero based */
246 		chunk->mr_addr = icm_mr->used_length;
247 		chunk->icm_addr = (uintptr_t)icm_mr->icm_start_addr + icm_mr->used_length;
248 		icm_mr->used_length += mr_req_size;
249 		chunk->num_of_entries = bucket->num_of_entries;
250 		chunk->byte_size = chunk->num_of_entries * bucket->entry_size;
251 
252 		if (pool->icm_type == DR_ICM_TYPE_STE) {
253 			err = dr_icm_chunk_ste_init(chunk);
254 			if (err)
255 				goto out_free_chunk;
256 		}
257 
258 		INIT_LIST_HEAD(&chunk->chunk_list);
259 		list_add(&chunk->chunk_list, &bucket->free_list);
260 		bucket->free_list_count++;
261 		bucket->total_chunks++;
262 	}
263 	mutex_unlock(&pool->mr_mutex);
264 	return 0;
265 
266 out_free_chunk:
267 	kvfree(chunk);
268 out_err:
269 	mutex_unlock(&pool->mr_mutex);
270 	return err;
271 }
272 
dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk * chunk)273 static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
274 {
275 	kvfree(chunk->miss_list);
276 	kvfree(chunk->hw_ste_arr);
277 	kvfree(chunk->ste_arr);
278 }
279 
dr_icm_chunk_destroy(struct mlx5dr_icm_chunk * chunk)280 static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk)
281 {
282 	struct mlx5dr_icm_bucket *bucket = chunk->bucket;
283 
284 	list_del(&chunk->chunk_list);
285 	bucket->total_chunks--;
286 
287 	if (bucket->pool->icm_type == DR_ICM_TYPE_STE)
288 		dr_icm_chunk_ste_cleanup(chunk);
289 
290 	kvfree(chunk);
291 }
292 
dr_icm_bucket_init(struct mlx5dr_icm_pool * pool,struct mlx5dr_icm_bucket * bucket,enum mlx5dr_icm_chunk_size chunk_size)293 static void dr_icm_bucket_init(struct mlx5dr_icm_pool *pool,
294 			       struct mlx5dr_icm_bucket *bucket,
295 			       enum mlx5dr_icm_chunk_size chunk_size)
296 {
297 	if (pool->icm_type == DR_ICM_TYPE_STE)
298 		bucket->entry_size = DR_STE_SIZE;
299 	else
300 		bucket->entry_size = DR_MODIFY_ACTION_SIZE;
301 
302 	bucket->num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
303 	bucket->pool = pool;
304 	mutex_init(&bucket->mutex);
305 	INIT_LIST_HEAD(&bucket->free_list);
306 	INIT_LIST_HEAD(&bucket->used_list);
307 	INIT_LIST_HEAD(&bucket->hot_list);
308 	INIT_LIST_HEAD(&bucket->sync_list);
309 }
310 
dr_icm_bucket_cleanup(struct mlx5dr_icm_bucket * bucket)311 static void dr_icm_bucket_cleanup(struct mlx5dr_icm_bucket *bucket)
312 {
313 	struct mlx5dr_icm_chunk *chunk, *next;
314 
315 	mutex_destroy(&bucket->mutex);
316 	list_splice_tail_init(&bucket->sync_list, &bucket->free_list);
317 	list_splice_tail_init(&bucket->hot_list, &bucket->free_list);
318 
319 	list_for_each_entry_safe(chunk, next, &bucket->free_list, chunk_list)
320 		dr_icm_chunk_destroy(chunk);
321 
322 	WARN_ON(bucket->total_chunks != 0);
323 
324 	/* Cleanup of unreturned chunks */
325 	list_for_each_entry_safe(chunk, next, &bucket->used_list, chunk_list)
326 		dr_icm_chunk_destroy(chunk);
327 }
328 
dr_icm_hot_mem_size(struct mlx5dr_icm_pool * pool)329 static u64 dr_icm_hot_mem_size(struct mlx5dr_icm_pool *pool)
330 {
331 	u64 hot_size = 0;
332 	int chunk_order;
333 
334 	for (chunk_order = 0; chunk_order < pool->num_of_buckets; chunk_order++)
335 		hot_size += pool->buckets[chunk_order].hot_list_count *
336 			    mlx5dr_icm_pool_chunk_size_to_byte(chunk_order, pool->icm_type);
337 
338 	return hot_size;
339 }
340 
dr_icm_reuse_hot_entries(struct mlx5dr_icm_pool * pool,struct mlx5dr_icm_bucket * bucket)341 static bool dr_icm_reuse_hot_entries(struct mlx5dr_icm_pool *pool,
342 				     struct mlx5dr_icm_bucket *bucket)
343 {
344 	u64 bytes_for_sync;
345 
346 	bytes_for_sync = dr_icm_hot_mem_size(pool);
347 	if (bytes_for_sync < DR_ICM_SYNC_THRESHOLD || !bucket->hot_list_count)
348 		return false;
349 
350 	return true;
351 }
352 
dr_icm_chill_bucket_start(struct mlx5dr_icm_bucket * bucket)353 static void dr_icm_chill_bucket_start(struct mlx5dr_icm_bucket *bucket)
354 {
355 	list_splice_tail_init(&bucket->hot_list, &bucket->sync_list);
356 	bucket->sync_list_count += bucket->hot_list_count;
357 	bucket->hot_list_count = 0;
358 }
359 
dr_icm_chill_bucket_end(struct mlx5dr_icm_bucket * bucket)360 static void dr_icm_chill_bucket_end(struct mlx5dr_icm_bucket *bucket)
361 {
362 	list_splice_tail_init(&bucket->sync_list, &bucket->free_list);
363 	bucket->free_list_count += bucket->sync_list_count;
364 	bucket->sync_list_count = 0;
365 }
366 
dr_icm_chill_bucket_abort(struct mlx5dr_icm_bucket * bucket)367 static void dr_icm_chill_bucket_abort(struct mlx5dr_icm_bucket *bucket)
368 {
369 	list_splice_tail_init(&bucket->sync_list, &bucket->hot_list);
370 	bucket->hot_list_count += bucket->sync_list_count;
371 	bucket->sync_list_count = 0;
372 }
373 
dr_icm_chill_buckets_start(struct mlx5dr_icm_pool * pool,struct mlx5dr_icm_bucket * cb,bool buckets[DR_CHUNK_SIZE_MAX])374 static void dr_icm_chill_buckets_start(struct mlx5dr_icm_pool *pool,
375 				       struct mlx5dr_icm_bucket *cb,
376 				       bool buckets[DR_CHUNK_SIZE_MAX])
377 {
378 	struct mlx5dr_icm_bucket *bucket;
379 	int i;
380 
381 	for (i = 0; i < pool->num_of_buckets; i++) {
382 		bucket = &pool->buckets[i];
383 		if (bucket == cb) {
384 			dr_icm_chill_bucket_start(bucket);
385 			continue;
386 		}
387 
388 		/* Freeing the mutex is done at the end of that process, after
389 		 * sync_ste was executed at dr_icm_chill_buckets_end func.
390 		 */
391 		if (mutex_trylock(&bucket->mutex)) {
392 			dr_icm_chill_bucket_start(bucket);
393 			buckets[i] = true;
394 		}
395 	}
396 }
397 
dr_icm_chill_buckets_end(struct mlx5dr_icm_pool * pool,struct mlx5dr_icm_bucket * cb,bool buckets[DR_CHUNK_SIZE_MAX])398 static void dr_icm_chill_buckets_end(struct mlx5dr_icm_pool *pool,
399 				     struct mlx5dr_icm_bucket *cb,
400 				     bool buckets[DR_CHUNK_SIZE_MAX])
401 {
402 	struct mlx5dr_icm_bucket *bucket;
403 	int i;
404 
405 	for (i = 0; i < pool->num_of_buckets; i++) {
406 		bucket = &pool->buckets[i];
407 		if (bucket == cb) {
408 			dr_icm_chill_bucket_end(bucket);
409 			continue;
410 		}
411 
412 		if (!buckets[i])
413 			continue;
414 
415 		dr_icm_chill_bucket_end(bucket);
416 		mutex_unlock(&bucket->mutex);
417 	}
418 }
419 
dr_icm_chill_buckets_abort(struct mlx5dr_icm_pool * pool,struct mlx5dr_icm_bucket * cb,bool buckets[DR_CHUNK_SIZE_MAX])420 static void dr_icm_chill_buckets_abort(struct mlx5dr_icm_pool *pool,
421 				       struct mlx5dr_icm_bucket *cb,
422 				       bool buckets[DR_CHUNK_SIZE_MAX])
423 {
424 	struct mlx5dr_icm_bucket *bucket;
425 	int i;
426 
427 	for (i = 0; i < pool->num_of_buckets; i++) {
428 		bucket = &pool->buckets[i];
429 		if (bucket == cb) {
430 			dr_icm_chill_bucket_abort(bucket);
431 			continue;
432 		}
433 
434 		if (!buckets[i])
435 			continue;
436 
437 		dr_icm_chill_bucket_abort(bucket);
438 		mutex_unlock(&bucket->mutex);
439 	}
440 }
441 
442 /* Allocate an ICM chunk, each chunk holds a piece of ICM memory and
443  * also memory used for HW STE management for optimizations.
444  */
445 struct mlx5dr_icm_chunk *
mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size)446 mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
447 		       enum mlx5dr_icm_chunk_size chunk_size)
448 {
449 	struct mlx5dr_icm_chunk *chunk = NULL; /* Fix compilation warning */
450 	bool buckets[DR_CHUNK_SIZE_MAX] = {};
451 	struct mlx5dr_icm_bucket *bucket;
452 	int err;
453 
454 	if (chunk_size > pool->max_log_chunk_sz)
455 		return NULL;
456 
457 	bucket = &pool->buckets[chunk_size];
458 
459 	mutex_lock(&bucket->mutex);
460 
461 	/* Take chunk from pool if available, otherwise allocate new chunks */
462 	if (list_empty(&bucket->free_list)) {
463 		if (dr_icm_reuse_hot_entries(pool, bucket)) {
464 			dr_icm_chill_buckets_start(pool, bucket, buckets);
465 			err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
466 			if (err) {
467 				dr_icm_chill_buckets_abort(pool, bucket, buckets);
468 				mlx5dr_err(pool->dmn, "Sync_steering failed\n");
469 				chunk = NULL;
470 				goto out;
471 			}
472 			dr_icm_chill_buckets_end(pool, bucket, buckets);
473 		} else {
474 			dr_icm_chunks_create(bucket);
475 		}
476 	}
477 
478 	if (!list_empty(&bucket->free_list)) {
479 		chunk = list_last_entry(&bucket->free_list,
480 					struct mlx5dr_icm_chunk,
481 					chunk_list);
482 		if (chunk) {
483 			list_del_init(&chunk->chunk_list);
484 			list_add_tail(&chunk->chunk_list, &bucket->used_list);
485 			bucket->free_list_count--;
486 			bucket->used_list_count++;
487 		}
488 	}
489 out:
490 	mutex_unlock(&bucket->mutex);
491 	return chunk;
492 }
493 
mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk * chunk)494 void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
495 {
496 	struct mlx5dr_icm_bucket *bucket = chunk->bucket;
497 
498 	if (bucket->pool->icm_type == DR_ICM_TYPE_STE) {
499 		memset(chunk->ste_arr, 0,
500 		       bucket->num_of_entries * sizeof(chunk->ste_arr[0]));
501 		memset(chunk->hw_ste_arr, 0,
502 		       bucket->num_of_entries * DR_STE_SIZE_REDUCED);
503 	}
504 
505 	mutex_lock(&bucket->mutex);
506 	list_del_init(&chunk->chunk_list);
507 	list_add_tail(&chunk->chunk_list, &bucket->hot_list);
508 	bucket->hot_list_count++;
509 	bucket->used_list_count--;
510 	mutex_unlock(&bucket->mutex);
511 }
512 
mlx5dr_icm_pool_create(struct mlx5dr_domain * dmn,enum mlx5dr_icm_type icm_type)513 struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
514 					       enum mlx5dr_icm_type icm_type)
515 {
516 	enum mlx5dr_icm_chunk_size max_log_chunk_sz;
517 	struct mlx5dr_icm_pool *pool;
518 	int i;
519 
520 	if (icm_type == DR_ICM_TYPE_STE)
521 		max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
522 	else
523 		max_log_chunk_sz = dmn->info.max_log_action_icm_sz;
524 
525 	pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
526 	if (!pool)
527 		return NULL;
528 
529 	pool->buckets = kcalloc(max_log_chunk_sz + 1,
530 				sizeof(pool->buckets[0]),
531 				GFP_KERNEL);
532 	if (!pool->buckets)
533 		goto free_pool;
534 
535 	pool->dmn = dmn;
536 	pool->icm_type = icm_type;
537 	pool->max_log_chunk_sz = max_log_chunk_sz;
538 	pool->num_of_buckets = max_log_chunk_sz + 1;
539 	INIT_LIST_HEAD(&pool->icm_mr_list);
540 
541 	for (i = 0; i < pool->num_of_buckets; i++)
542 		dr_icm_bucket_init(pool, &pool->buckets[i], i);
543 
544 	mutex_init(&pool->mr_mutex);
545 
546 	return pool;
547 
548 free_pool:
549 	kvfree(pool);
550 	return NULL;
551 }
552 
mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool * pool)553 void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool)
554 {
555 	struct mlx5dr_icm_mr *icm_mr, *next;
556 	int i;
557 
558 	mutex_destroy(&pool->mr_mutex);
559 
560 	list_for_each_entry_safe(icm_mr, next, &pool->icm_mr_list, mr_list)
561 		dr_icm_pool_mr_destroy(icm_mr);
562 
563 	for (i = 0; i < pool->num_of_buckets; i++)
564 		dr_icm_bucket_cleanup(&pool->buckets[i]);
565 
566 	kfree(pool->buckets);
567 	kvfree(pool);
568 }
569