• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 
4 #include "dr_types.h"
5 
6 #define DR_ICM_MODIFY_HDR_GRANULARITY_4K 12
7 
8 /* modify-header arg pool */
9 enum dr_arg_chunk_size {
10 	DR_ARG_CHUNK_SIZE_1,
11 	DR_ARG_CHUNK_SIZE_MIN = DR_ARG_CHUNK_SIZE_1, /* keep updated when changing */
12 	DR_ARG_CHUNK_SIZE_2,
13 	DR_ARG_CHUNK_SIZE_3,
14 	DR_ARG_CHUNK_SIZE_4,
15 	DR_ARG_CHUNK_SIZE_MAX,
16 };
17 
18 /* argument pool area */
19 struct dr_arg_pool {
20 	enum dr_arg_chunk_size log_chunk_size;
21 	struct mlx5dr_domain *dmn;
22 	struct list_head free_list;
23 	struct mutex mutex; /* protect arg pool */
24 };
25 
26 struct mlx5dr_arg_mgr {
27 	struct mlx5dr_domain *dmn;
28 	struct dr_arg_pool *pools[DR_ARG_CHUNK_SIZE_MAX];
29 };
30 
dr_arg_pool_alloc_objs(struct dr_arg_pool * pool)31 static int dr_arg_pool_alloc_objs(struct dr_arg_pool *pool)
32 {
33 	struct mlx5dr_arg_obj *arg_obj, *tmp_arg;
34 	struct list_head cur_list;
35 	u16 object_range;
36 	int num_of_objects;
37 	u32 obj_id = 0;
38 	int i, ret;
39 
40 	INIT_LIST_HEAD(&cur_list);
41 
42 	object_range =
43 		pool->dmn->info.caps.log_header_modify_argument_granularity;
44 
45 	object_range =
46 		max_t(u32, pool->dmn->info.caps.log_header_modify_argument_granularity,
47 		      DR_ICM_MODIFY_HDR_GRANULARITY_4K);
48 	object_range =
49 		min_t(u32, pool->dmn->info.caps.log_header_modify_argument_max_alloc,
50 		      object_range);
51 
52 	if (pool->log_chunk_size > object_range) {
53 		mlx5dr_err(pool->dmn, "Required chunk size (%d) is not supported\n",
54 			   pool->log_chunk_size);
55 		return -ENOMEM;
56 	}
57 
58 	num_of_objects = (1 << (object_range - pool->log_chunk_size));
59 	/* Only one devx object per range */
60 	ret = mlx5dr_cmd_create_modify_header_arg(pool->dmn->mdev,
61 						  object_range,
62 						  pool->dmn->pdn,
63 						  &obj_id);
64 	if (ret) {
65 		mlx5dr_err(pool->dmn, "failed allocating object with range: %d:\n",
66 			   object_range);
67 		return -EAGAIN;
68 	}
69 
70 	for (i = 0; i < num_of_objects; i++) {
71 		arg_obj = kzalloc(sizeof(*arg_obj), GFP_KERNEL);
72 		if (!arg_obj) {
73 			ret = -ENOMEM;
74 			goto clean_arg_obj;
75 		}
76 
77 		arg_obj->log_chunk_size = pool->log_chunk_size;
78 
79 		list_add_tail(&arg_obj->list_node, &cur_list);
80 
81 		arg_obj->obj_id = obj_id;
82 		arg_obj->obj_offset = i * (1 << pool->log_chunk_size);
83 	}
84 	list_splice_tail_init(&cur_list, &pool->free_list);
85 
86 	return 0;
87 
88 clean_arg_obj:
89 	mlx5dr_cmd_destroy_modify_header_arg(pool->dmn->mdev, obj_id);
90 	list_for_each_entry_safe(arg_obj, tmp_arg, &cur_list, list_node) {
91 		list_del(&arg_obj->list_node);
92 		kfree(arg_obj);
93 	}
94 	return ret;
95 }
96 
dr_arg_pool_get_arg_obj(struct dr_arg_pool * pool)97 static struct mlx5dr_arg_obj *dr_arg_pool_get_arg_obj(struct dr_arg_pool *pool)
98 {
99 	struct mlx5dr_arg_obj *arg_obj = NULL;
100 	int ret;
101 
102 	mutex_lock(&pool->mutex);
103 	if (list_empty(&pool->free_list)) {
104 		ret = dr_arg_pool_alloc_objs(pool);
105 		if (ret)
106 			goto out;
107 	}
108 
109 	arg_obj = list_first_entry_or_null(&pool->free_list,
110 					   struct mlx5dr_arg_obj,
111 					   list_node);
112 	WARN(!arg_obj, "couldn't get dr arg obj from pool");
113 
114 	if (arg_obj)
115 		list_del_init(&arg_obj->list_node);
116 
117 out:
118 	mutex_unlock(&pool->mutex);
119 	return arg_obj;
120 }
121 
dr_arg_pool_put_arg_obj(struct dr_arg_pool * pool,struct mlx5dr_arg_obj * arg_obj)122 static void dr_arg_pool_put_arg_obj(struct dr_arg_pool *pool,
123 				    struct mlx5dr_arg_obj *arg_obj)
124 {
125 	mutex_lock(&pool->mutex);
126 	list_add(&arg_obj->list_node, &pool->free_list);
127 	mutex_unlock(&pool->mutex);
128 }
129 
dr_arg_pool_create(struct mlx5dr_domain * dmn,enum dr_arg_chunk_size chunk_size)130 static struct dr_arg_pool *dr_arg_pool_create(struct mlx5dr_domain *dmn,
131 					      enum dr_arg_chunk_size chunk_size)
132 {
133 	struct dr_arg_pool *pool;
134 
135 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
136 	if (!pool)
137 		return NULL;
138 
139 	pool->dmn = dmn;
140 
141 	INIT_LIST_HEAD(&pool->free_list);
142 	mutex_init(&pool->mutex);
143 
144 	pool->log_chunk_size = chunk_size;
145 	if (dr_arg_pool_alloc_objs(pool))
146 		goto free_pool;
147 
148 	return pool;
149 
150 free_pool:
151 	kfree(pool);
152 
153 	return NULL;
154 }
155 
dr_arg_pool_destroy(struct dr_arg_pool * pool)156 static void dr_arg_pool_destroy(struct dr_arg_pool *pool)
157 {
158 	struct mlx5dr_arg_obj *arg_obj, *tmp_arg;
159 
160 	list_for_each_entry_safe(arg_obj, tmp_arg, &pool->free_list, list_node) {
161 		list_del(&arg_obj->list_node);
162 		if (!arg_obj->obj_offset) /* the first in range */
163 			mlx5dr_cmd_destroy_modify_header_arg(pool->dmn->mdev, arg_obj->obj_id);
164 		kfree(arg_obj);
165 	}
166 
167 	mutex_destroy(&pool->mutex);
168 	kfree(pool);
169 }
170 
dr_arg_get_chunk_size(u16 num_of_actions)171 static enum dr_arg_chunk_size dr_arg_get_chunk_size(u16 num_of_actions)
172 {
173 	if (num_of_actions <= 8)
174 		return DR_ARG_CHUNK_SIZE_1;
175 	if (num_of_actions <= 16)
176 		return DR_ARG_CHUNK_SIZE_2;
177 	if (num_of_actions <= 32)
178 		return DR_ARG_CHUNK_SIZE_3;
179 	if (num_of_actions <= 64)
180 		return DR_ARG_CHUNK_SIZE_4;
181 
182 	return DR_ARG_CHUNK_SIZE_MAX;
183 }
184 
mlx5dr_arg_get_obj_id(struct mlx5dr_arg_obj * arg_obj)185 u32 mlx5dr_arg_get_obj_id(struct mlx5dr_arg_obj *arg_obj)
186 {
187 	return (arg_obj->obj_id + arg_obj->obj_offset);
188 }
189 
mlx5dr_arg_get_obj(struct mlx5dr_arg_mgr * mgr,u16 num_of_actions,u8 * data)190 struct mlx5dr_arg_obj *mlx5dr_arg_get_obj(struct mlx5dr_arg_mgr *mgr,
191 					  u16 num_of_actions,
192 					  u8 *data)
193 {
194 	u32 size = dr_arg_get_chunk_size(num_of_actions);
195 	struct mlx5dr_arg_obj *arg_obj;
196 	int ret;
197 
198 	if (size >= DR_ARG_CHUNK_SIZE_MAX)
199 		return NULL;
200 
201 	arg_obj = dr_arg_pool_get_arg_obj(mgr->pools[size]);
202 	if (!arg_obj) {
203 		mlx5dr_err(mgr->dmn, "Failed allocating args object for modify header\n");
204 		return NULL;
205 	}
206 
207 	/* write it into the hw */
208 	ret = mlx5dr_send_postsend_args(mgr->dmn,
209 					mlx5dr_arg_get_obj_id(arg_obj),
210 					num_of_actions, data);
211 	if (ret) {
212 		mlx5dr_err(mgr->dmn, "Failed writing args object\n");
213 		goto put_obj;
214 	}
215 
216 	return arg_obj;
217 
218 put_obj:
219 	mlx5dr_arg_put_obj(mgr, arg_obj);
220 	return NULL;
221 }
222 
mlx5dr_arg_put_obj(struct mlx5dr_arg_mgr * mgr,struct mlx5dr_arg_obj * arg_obj)223 void mlx5dr_arg_put_obj(struct mlx5dr_arg_mgr *mgr,
224 			struct mlx5dr_arg_obj *arg_obj)
225 {
226 	dr_arg_pool_put_arg_obj(mgr->pools[arg_obj->log_chunk_size], arg_obj);
227 }
228 
229 struct mlx5dr_arg_mgr*
mlx5dr_arg_mgr_create(struct mlx5dr_domain * dmn)230 mlx5dr_arg_mgr_create(struct mlx5dr_domain *dmn)
231 {
232 	struct mlx5dr_arg_mgr *pool_mgr;
233 	int i;
234 
235 	if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
236 		return NULL;
237 
238 	pool_mgr = kzalloc(sizeof(*pool_mgr), GFP_KERNEL);
239 	if (!pool_mgr)
240 		return NULL;
241 
242 	pool_mgr->dmn = dmn;
243 
244 	for (i = 0; i < DR_ARG_CHUNK_SIZE_MAX; i++) {
245 		pool_mgr->pools[i] = dr_arg_pool_create(dmn, i);
246 		if (!pool_mgr->pools[i])
247 			goto clean_pools;
248 	}
249 
250 	return pool_mgr;
251 
252 clean_pools:
253 	for (i--; i >= 0; i--)
254 		dr_arg_pool_destroy(pool_mgr->pools[i]);
255 
256 	kfree(pool_mgr);
257 	return NULL;
258 }
259 
mlx5dr_arg_mgr_destroy(struct mlx5dr_arg_mgr * mgr)260 void mlx5dr_arg_mgr_destroy(struct mlx5dr_arg_mgr *mgr)
261 {
262 	struct dr_arg_pool **pools;
263 	int i;
264 
265 	if (!mgr)
266 		return;
267 
268 	pools = mgr->pools;
269 	for (i = 0; i < DR_ARG_CHUNK_SIZE_MAX; i++)
270 		dr_arg_pool_destroy(pools[i]);
271 
272 	kfree(mgr);
273 }
274