• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "dr_types.h"
5 
6 #define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
7 
8 struct mlx5dr_icm_pool {
9 	enum mlx5dr_icm_type icm_type;
10 	enum mlx5dr_icm_chunk_size max_log_chunk_sz;
11 	struct mlx5dr_domain *dmn;
12 	/* memory management */
13 	struct mutex mutex; /* protect the ICM pool and ICM buddy */
14 	struct list_head buddy_mem_list;
15 	u64 hot_memory_size;
16 };
17 
18 struct mlx5dr_icm_dm {
19 	u32 obj_id;
20 	enum mlx5_sw_icm_type type;
21 	phys_addr_t addr;
22 	size_t length;
23 };
24 
25 struct mlx5dr_icm_mr {
26 	u32 mkey;
27 	struct mlx5dr_icm_dm dm;
28 	struct mlx5dr_domain *dmn;
29 	size_t length;
30 	u64 icm_start_addr;
31 };
32 
dr_icm_create_dm_mkey(struct mlx5_core_dev * mdev,u32 pd,u64 length,u64 start_addr,int mode,u32 * mkey)33 static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
34 				 u32 pd, u64 length, u64 start_addr, int mode,
35 				 u32 *mkey)
36 {
37 	u32 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
38 	u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
39 	void *mkc;
40 
41 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
42 
43 	MLX5_SET(mkc, mkc, access_mode_1_0, mode);
44 	MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
45 	MLX5_SET(mkc, mkc, lw, 1);
46 	MLX5_SET(mkc, mkc, lr, 1);
47 	if (mode == MLX5_MKC_ACCESS_MODE_SW_ICM) {
48 		MLX5_SET(mkc, mkc, rw, 1);
49 		MLX5_SET(mkc, mkc, rr, 1);
50 	}
51 
52 	MLX5_SET64(mkc, mkc, len, length);
53 	MLX5_SET(mkc, mkc, pd, pd);
54 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
55 	MLX5_SET64(mkc, mkc, start_addr, start_addr);
56 
57 	return mlx5_core_create_mkey(mdev, mkey, in, inlen);
58 }
59 
mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk * chunk)60 u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk)
61 {
62 	u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
63 
64 	return (u64)offset * chunk->seg;
65 }
66 
mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk * chunk)67 u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk)
68 {
69 	return chunk->buddy_mem->icm_mr->mkey;
70 }
71 
mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk * chunk)72 u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk)
73 {
74 	u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
75 
76 	return (u64)chunk->buddy_mem->icm_mr->icm_start_addr + size * chunk->seg;
77 }
78 
mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk * chunk)79 u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk)
80 {
81 	return mlx5dr_icm_pool_chunk_size_to_byte(chunk->size,
82 			chunk->buddy_mem->pool->icm_type);
83 }
84 
mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk * chunk)85 u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk)
86 {
87 	return mlx5dr_icm_pool_chunk_size_to_entries(chunk->size);
88 }
89 
90 static struct mlx5dr_icm_mr *
dr_icm_pool_mr_create(struct mlx5dr_icm_pool * pool)91 dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
92 {
93 	struct mlx5_core_dev *mdev = pool->dmn->mdev;
94 	enum mlx5_sw_icm_type dm_type;
95 	struct mlx5dr_icm_mr *icm_mr;
96 	size_t log_align_base;
97 	int err;
98 
99 	icm_mr = kvzalloc(sizeof(*icm_mr), GFP_KERNEL);
100 	if (!icm_mr)
101 		return NULL;
102 
103 	icm_mr->dmn = pool->dmn;
104 
105 	icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
106 							       pool->icm_type);
107 
108 	if (pool->icm_type == DR_ICM_TYPE_STE) {
109 		dm_type = MLX5_SW_ICM_TYPE_STEERING;
110 		log_align_base = ilog2(icm_mr->dm.length);
111 	} else {
112 		dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
113 		/* Align base is 64B */
114 		log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
115 	}
116 	icm_mr->dm.type = dm_type;
117 
118 	err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length,
119 				   log_align_base, 0, &icm_mr->dm.addr,
120 				   &icm_mr->dm.obj_id);
121 	if (err) {
122 		mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err);
123 		goto free_icm_mr;
124 	}
125 
126 	/* Register device memory */
127 	err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn,
128 				    icm_mr->dm.length,
129 				    icm_mr->dm.addr,
130 				    MLX5_MKC_ACCESS_MODE_SW_ICM,
131 				    &icm_mr->mkey);
132 	if (err) {
133 		mlx5dr_err(pool->dmn, "Failed to create SW ICM MKEY, err (%d)\n", err);
134 		goto free_dm;
135 	}
136 
137 	icm_mr->icm_start_addr = icm_mr->dm.addr;
138 
139 	if (icm_mr->icm_start_addr & (BIT(log_align_base) - 1)) {
140 		mlx5dr_err(pool->dmn, "Failed to get Aligned ICM mem (asked: %zu)\n",
141 			   log_align_base);
142 		goto free_mkey;
143 	}
144 
145 	return icm_mr;
146 
147 free_mkey:
148 	mlx5_core_destroy_mkey(mdev, icm_mr->mkey);
149 free_dm:
150 	mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
151 			       icm_mr->dm.addr, icm_mr->dm.obj_id);
152 free_icm_mr:
153 	kvfree(icm_mr);
154 	return NULL;
155 }
156 
dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr * icm_mr)157 static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
158 {
159 	struct mlx5_core_dev *mdev = icm_mr->dmn->mdev;
160 	struct mlx5dr_icm_dm *dm = &icm_mr->dm;
161 
162 	mlx5_core_destroy_mkey(mdev, icm_mr->mkey);
163 	mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0,
164 			       dm->addr, dm->obj_id);
165 	kvfree(icm_mr);
166 }
167 
dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem * buddy)168 static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
169 {
170 	/* We support only one type of STE size, both for ConnectX-5 and later
171 	 * devices. Once the support for match STE which has a larger tag is
172 	 * added (32B instead of 16B), the STE size for devices later than
173 	 * ConnectX-5 needs to account for that.
174 	 */
175 	return DR_STE_SIZE_REDUCED;
176 }
177 
dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk * chunk,int offset)178 static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
179 {
180 	struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
181 	int index = offset / DR_STE_SIZE;
182 
183 	chunk->ste_arr = &buddy->ste_arr[index];
184 	chunk->miss_list = &buddy->miss_list[index];
185 	chunk->hw_ste_arr = buddy->hw_ste_arr +
186 			    index * dr_icm_buddy_get_ste_size(buddy);
187 }
188 
dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk * chunk)189 static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
190 {
191 	int num_of_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
192 	struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
193 
194 	memset(chunk->hw_ste_arr, 0,
195 	       num_of_entries * dr_icm_buddy_get_ste_size(buddy));
196 	memset(chunk->ste_arr, 0,
197 	       num_of_entries * sizeof(chunk->ste_arr[0]));
198 }
199 
200 static enum mlx5dr_icm_type
get_chunk_icm_type(struct mlx5dr_icm_chunk * chunk)201 get_chunk_icm_type(struct mlx5dr_icm_chunk *chunk)
202 {
203 	return chunk->buddy_mem->pool->icm_type;
204 }
205 
dr_icm_chunk_destroy(struct mlx5dr_icm_chunk * chunk,struct mlx5dr_icm_buddy_mem * buddy)206 static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
207 				 struct mlx5dr_icm_buddy_mem *buddy)
208 {
209 	enum mlx5dr_icm_type icm_type = get_chunk_icm_type(chunk);
210 
211 	buddy->used_memory -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
212 	list_del(&chunk->chunk_list);
213 
214 	if (icm_type == DR_ICM_TYPE_STE)
215 		dr_icm_chunk_ste_cleanup(chunk);
216 
217 	kvfree(chunk);
218 }
219 
dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem * buddy)220 static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
221 {
222 	int num_of_entries =
223 		mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz);
224 
225 	buddy->ste_arr = kvcalloc(num_of_entries,
226 				  sizeof(struct mlx5dr_ste), GFP_KERNEL);
227 	if (!buddy->ste_arr)
228 		return -ENOMEM;
229 
230 	/* Preallocate full STE size on non-ConnectX-5 devices since
231 	 * we need to support both full and reduced with the same cache.
232 	 */
233 	buddy->hw_ste_arr = kvcalloc(num_of_entries,
234 				     dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL);
235 	if (!buddy->hw_ste_arr)
236 		goto free_ste_arr;
237 
238 	buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL);
239 	if (!buddy->miss_list)
240 		goto free_hw_ste_arr;
241 
242 	return 0;
243 
244 free_hw_ste_arr:
245 	kvfree(buddy->hw_ste_arr);
246 free_ste_arr:
247 	kvfree(buddy->ste_arr);
248 	return -ENOMEM;
249 }
250 
dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem * buddy)251 static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
252 {
253 	kvfree(buddy->ste_arr);
254 	kvfree(buddy->hw_ste_arr);
255 	kvfree(buddy->miss_list);
256 }
257 
dr_icm_buddy_create(struct mlx5dr_icm_pool * pool)258 static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
259 {
260 	struct mlx5dr_icm_buddy_mem *buddy;
261 	struct mlx5dr_icm_mr *icm_mr;
262 
263 	icm_mr = dr_icm_pool_mr_create(pool);
264 	if (!icm_mr)
265 		return -ENOMEM;
266 
267 	buddy = kvzalloc(sizeof(*buddy), GFP_KERNEL);
268 	if (!buddy)
269 		goto free_mr;
270 
271 	if (mlx5dr_buddy_init(buddy, pool->max_log_chunk_sz))
272 		goto err_free_buddy;
273 
274 	buddy->icm_mr = icm_mr;
275 	buddy->pool = pool;
276 
277 	if (pool->icm_type == DR_ICM_TYPE_STE) {
278 		/* Reduce allocations by preallocating and reusing the STE structures */
279 		if (dr_icm_buddy_init_ste_cache(buddy))
280 			goto err_cleanup_buddy;
281 	}
282 
283 	/* add it to the -start- of the list in order to search in it first */
284 	list_add(&buddy->list_node, &pool->buddy_mem_list);
285 
286 	return 0;
287 
288 err_cleanup_buddy:
289 	mlx5dr_buddy_cleanup(buddy);
290 err_free_buddy:
291 	kvfree(buddy);
292 free_mr:
293 	dr_icm_pool_mr_destroy(icm_mr);
294 	return -ENOMEM;
295 }
296 
dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem * buddy)297 static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
298 {
299 	struct mlx5dr_icm_chunk *chunk, *next;
300 
301 	list_for_each_entry_safe(chunk, next, &buddy->hot_list, chunk_list)
302 		dr_icm_chunk_destroy(chunk, buddy);
303 
304 	list_for_each_entry_safe(chunk, next, &buddy->used_list, chunk_list)
305 		dr_icm_chunk_destroy(chunk, buddy);
306 
307 	dr_icm_pool_mr_destroy(buddy->icm_mr);
308 
309 	mlx5dr_buddy_cleanup(buddy);
310 
311 	if (buddy->pool->icm_type == DR_ICM_TYPE_STE)
312 		dr_icm_buddy_cleanup_ste_cache(buddy);
313 
314 	kvfree(buddy);
315 }
316 
317 static struct mlx5dr_icm_chunk *
dr_icm_chunk_create(struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size,struct mlx5dr_icm_buddy_mem * buddy_mem_pool,unsigned int seg)318 dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
319 		    enum mlx5dr_icm_chunk_size chunk_size,
320 		    struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
321 		    unsigned int seg)
322 {
323 	struct mlx5dr_icm_chunk *chunk;
324 	int offset;
325 
326 	chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL);
327 	if (!chunk)
328 		return NULL;
329 
330 	offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
331 
332 	chunk->seg = seg;
333 	chunk->size = chunk_size;
334 	chunk->buddy_mem = buddy_mem_pool;
335 
336 	if (pool->icm_type == DR_ICM_TYPE_STE)
337 		dr_icm_chunk_ste_init(chunk, offset);
338 
339 	buddy_mem_pool->used_memory += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
340 	INIT_LIST_HEAD(&chunk->chunk_list);
341 
342 	/* chunk now is part of the used_list */
343 	list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
344 
345 	return chunk;
346 }
347 
dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool * pool)348 static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
349 {
350 	int allow_hot_size;
351 
352 	/* sync when hot memory reaches half of the pool size */
353 	allow_hot_size =
354 		mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
355 						   pool->icm_type) / 2;
356 
357 	return pool->hot_memory_size > allow_hot_size;
358 }
359 
dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool * pool)360 static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
361 {
362 	struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
363 	u32 num_entries;
364 	int err;
365 
366 	err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
367 	if (err) {
368 		mlx5dr_err(pool->dmn, "Failed to sync to HW (err: %d)\n", err);
369 		return err;
370 	}
371 
372 	list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) {
373 		struct mlx5dr_icm_chunk *chunk, *tmp_chunk;
374 
375 		list_for_each_entry_safe(chunk, tmp_chunk, &buddy->hot_list, chunk_list) {
376 			num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
377 			mlx5dr_buddy_free_mem(buddy, chunk->seg, ilog2(num_entries));
378 			pool->hot_memory_size -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
379 			dr_icm_chunk_destroy(chunk, buddy);
380 		}
381 
382 		if (!buddy->used_memory && pool->icm_type == DR_ICM_TYPE_STE)
383 			dr_icm_buddy_destroy(buddy);
384 	}
385 
386 	return 0;
387 }
388 
dr_icm_handle_buddies_get_mem(struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size,struct mlx5dr_icm_buddy_mem ** buddy,unsigned int * seg)389 static int dr_icm_handle_buddies_get_mem(struct mlx5dr_icm_pool *pool,
390 					 enum mlx5dr_icm_chunk_size chunk_size,
391 					 struct mlx5dr_icm_buddy_mem **buddy,
392 					 unsigned int *seg)
393 {
394 	struct mlx5dr_icm_buddy_mem *buddy_mem_pool;
395 	bool new_mem = false;
396 	int err;
397 
398 alloc_buddy_mem:
399 	/* find the next free place from the buddy list */
400 	list_for_each_entry(buddy_mem_pool, &pool->buddy_mem_list, list_node) {
401 		err = mlx5dr_buddy_alloc_mem(buddy_mem_pool,
402 					     chunk_size, seg);
403 		if (!err)
404 			goto found;
405 
406 		if (WARN_ON(new_mem)) {
407 			/* We have new memory pool, first in the list */
408 			mlx5dr_err(pool->dmn,
409 				   "No memory for order: %d\n",
410 				   chunk_size);
411 			goto out;
412 		}
413 	}
414 
415 	/* no more available allocators in that pool, create new */
416 	err = dr_icm_buddy_create(pool);
417 	if (err) {
418 		mlx5dr_err(pool->dmn,
419 			   "Failed creating buddy for order %d\n",
420 			   chunk_size);
421 		goto out;
422 	}
423 
424 	/* mark we have new memory, first in list */
425 	new_mem = true;
426 	goto alloc_buddy_mem;
427 
428 found:
429 	*buddy = buddy_mem_pool;
430 out:
431 	return err;
432 }
433 
434 /* Allocate an ICM chunk, each chunk holds a piece of ICM memory and
435  * also memory used for HW STE management for optimizations.
436  */
437 struct mlx5dr_icm_chunk *
mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size)438 mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
439 		       enum mlx5dr_icm_chunk_size chunk_size)
440 {
441 	struct mlx5dr_icm_chunk *chunk = NULL;
442 	struct mlx5dr_icm_buddy_mem *buddy;
443 	unsigned int seg;
444 	int ret;
445 
446 	if (chunk_size > pool->max_log_chunk_sz)
447 		return NULL;
448 
449 	mutex_lock(&pool->mutex);
450 	/* find mem, get back the relevant buddy pool and seg in that mem */
451 	ret = dr_icm_handle_buddies_get_mem(pool, chunk_size, &buddy, &seg);
452 	if (ret)
453 		goto out;
454 
455 	chunk = dr_icm_chunk_create(pool, chunk_size, buddy, seg);
456 	if (!chunk)
457 		goto out_err;
458 
459 	goto out;
460 
461 out_err:
462 	mlx5dr_buddy_free_mem(buddy, seg, chunk_size);
463 out:
464 	mutex_unlock(&pool->mutex);
465 	return chunk;
466 }
467 
mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk * chunk)468 void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
469 {
470 	struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
471 	struct mlx5dr_icm_pool *pool = buddy->pool;
472 
473 	/* move the memory to the waiting list AKA "hot" */
474 	mutex_lock(&pool->mutex);
475 	list_move_tail(&chunk->chunk_list, &buddy->hot_list);
476 	pool->hot_memory_size += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
477 
478 	/* Check if we have chunks that are waiting for sync-ste */
479 	if (dr_icm_pool_is_sync_required(pool))
480 		dr_icm_pool_sync_all_buddy_pools(pool);
481 
482 	mutex_unlock(&pool->mutex);
483 }
484 
mlx5dr_icm_pool_create(struct mlx5dr_domain * dmn,enum mlx5dr_icm_type icm_type)485 struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
486 					       enum mlx5dr_icm_type icm_type)
487 {
488 	enum mlx5dr_icm_chunk_size max_log_chunk_sz;
489 	struct mlx5dr_icm_pool *pool;
490 
491 	if (icm_type == DR_ICM_TYPE_STE)
492 		max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
493 	else
494 		max_log_chunk_sz = dmn->info.max_log_action_icm_sz;
495 
496 	pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
497 	if (!pool)
498 		return NULL;
499 
500 	pool->dmn = dmn;
501 	pool->icm_type = icm_type;
502 	pool->max_log_chunk_sz = max_log_chunk_sz;
503 
504 	INIT_LIST_HEAD(&pool->buddy_mem_list);
505 
506 	mutex_init(&pool->mutex);
507 
508 	return pool;
509 }
510 
mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool * pool)511 void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool)
512 {
513 	struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
514 
515 	list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node)
516 		dr_icm_buddy_destroy(buddy);
517 
518 	mutex_destroy(&pool->mutex);
519 	kvfree(pool);
520 }
521